-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDockerfile
165 lines (135 loc) · 5.38 KB
/
Dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
FROM node:20.16-alpine@sha256:eb8101caae9ac02229bd64c024919fe3d4504ff7f329da79ca60a04db08cef52 AS base
ARG SCOPE
ARG APP_PATH
ARG RELEASE_VERSION
ARG S3_BUCKET
ARG S3_PATH
ARG ASSET_PREFIX
## Add curl for health checks
RUN apk add --no-cache curl
## Add turbo and pnpm to all followup builder images
# Dockerfile
RUN corepack enable && corepack prepare [email protected] --activate
# Enable `pnpm add --global` on Alpine Linux by setting
# home location environment variable to a location already in $PATH
# https://github.com/pnpm/pnpm/issues/784#issuecomment-1518582235
ENV PNPM_HOME=/usr/local/bin
RUN pnpm add -g [email protected]
#----------------------------------------
# Docker build step that prunes down to
# the active project.
#----------------------------------------
FROM base AS setup
ARG SCOPE
ARG APP_PATH
ARG RELEASE_VERSION
ARG S3_BUCKET
ARG S3_PATH
ARG ASSET_PREFIX
RUN apk add --no-cache curl
RUN apk update
# Set working directory
WORKDIR /app
COPY . .
# Prune the structure to an optimized folder structure with just the `scopes` app dependencies.
RUN turbo prune $SCOPE --docker
#----------------------------------------
# Docker build step that:
# 1. Installs all the dependencies
# 2. Builds the application
# 3. Exports it as a built application
#----------------------------------------
# Add lockfile and package.json's of isolated subworkspace
FROM base AS builder
ARG SCOPE
ARG APP_PATH
ARG RELEASE_VERSION
ARG ASSET_PREFIX
ARG S3_BUCKET
ARG S3_PATH
# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
RUN apk add --no-cache libc6-compat aws-cli
RUN apk update
WORKDIR /app
# First install the dependencies (as they change less often)
COPY --from=setup /app/out/pnpm-workspace.yaml ./pnpm-workspace.yaml
COPY --from=setup /app/out/pnpm-lock.yaml ./pnpm-lock.yaml
# First install dependencies (as they change less often)
COPY --from=setup /app/out/json/ ./
RUN pnpm install --filter=${SCOPE}... --frozen-lockfile
# Build the project and its dependencies
COPY --from=setup /app/out/full/ ./
ENV ASSET_PREFIX=${ASSET_PREFIX}
ENV RELEASE_VERSION=${RELEASE_VERSION}
ENV SHOWDEV=''
RUN pnpm run build --filter=${SCOPE}...
# If the s3 bucket was passed, upload the nextjs CDN files to s3
RUN --mount=type=secret,id=aws-access-key-id,target=/run/secrets/aws-access-key-id \
--mount=type=secret,id=aws-secret-access-key,target=/run/secrets/aws-secret-key \
--mount=type=secret,id=aws-session-token,target=/run/secrets/aws-session-token \
--mount=type=secret,id=aws-region,target=/run/secrets/aws-region \
if [ -n "$S3_BUCKET" ] ; then \
AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws-access-key-id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws-secret-key) \
AWS_SESSION_TOKEN=$(cat /run/secrets/aws-session-token) \
AWS_REGION=$(cat /run/secrets/aws-region) \
aws s3 cp \
--recursive \
--acl public-read \
--metadata-directive REPLACE \
--cache-control max-age=31536000 \
--include "*" \
--exclude "BUILD_ID" \
/app/clients/${APP_PATH}/.next/static s3://${S3_BUCKET}/${S3_PATH}/_next/static \
; fi
RUN --mount=type=secret,id=aws-access-key-id,target=/run/secrets/aws-access-key-id \
--mount=type=secret,id=aws-secret-access-key,target=/run/secrets/aws-secret-key \
--mount=type=secret,id=aws-session-token,target=/run/secrets/aws-session-token \
--mount=type=secret,id=aws-region,target=/run/secrets/aws-region \
if [ -n "$S3_BUCKET" ] ; then \
AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws-access-key-id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws-secret-key) \
AWS_SESSION_TOKEN=$(cat /run/secrets/aws-session-token) \
AWS_REGION=$(cat /run/secrets/aws-region) \
aws s3 cp \
--recursive \
--acl public-read \
--metadata-directive REPLACE \
--cache-control max-age=31536000 \
--include "*" \
--exclude "BUILD_ID" \
/app/clients/${APP_PATH}/public s3://${S3_BUCKET}/${S3_PATH}/public \
; fi
#----------------------------------------
# Docker build step that:
# 1. Sets up our actual runner
#----------------------------------------
FROM base AS runners
ARG RELEASE_VERSION
ARG SCOPE
ARG APP_PATH
ARG RELEASE_VERSION
ARG ASSET_PREFIX
WORKDIR /app
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nodejs
RUN chown -R nodejs:nodejs /app
USER nodejs
# Automatically leverage output traces to reduce image size
# https://nextjs.org/docs/advanced-features/output-file-tracing
COPY --from=builder --chown=nodejs:nodejs /app/clients/${APP_PATH}/.next/standalone /app
# These are only if we aren't uploading to S3
# COPY --from=builder --chown=nodejs:nodejs /app/clients/${APP_PATH}/.next/static ./_next/static
# We could serve this from the Assets CDN but it requires more updates https://nextjs.org/docs/pages/api-reference/next-config-js/assetPrefix
COPY --from=builder --chown=nodejs:nodejs /app/clients/${APP_PATH}/public /app/clients/${APP_PATH}/public
ENV NEXT_TELEMETRY_DISABLED=1
ENV NODE_ENV=production
ENV PORT=3000
ENV RELEASE_VERSION=${RELEASE_VERSION}
ENV ASSET_PREFIX=${ASSET_PREFIX}
# we cant use args or envs in CMD so we make a script for it
RUN echo "node clients/${APP_PATH}/server.js" > start.sh \
&& chmod u+x start.sh
EXPOSE ${PORT}
ENV HOSTNAME=0.0.0.0
CMD ["sh", "-c", "./start.sh" ]