-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yml
272 lines (257 loc) · 7.62 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
version: '3.8'
x-app: &app
build:
context: .
target: hyku-web
args:
- HYKU_BULKRAX_ENABLED=true
# command: sh -l -c "bundle && bundle exec puma -v -b tcp://0.0.0.0:3000"
image: ghcr.io/scientist-softserv/britishlibrary:${TAG:-latest}
env_file:
- .env
# NOTE: all common env variables moved to .env
volumes:
- node_modules:/app/samvera/hyrax-webapp/node_modules:cached
- uploads:/app/samvera/hyrax-webapp/public/uploads:cached
- assets:/app/samvera/hyrax-webapp/public/assets:cached
- cache:/app/samvera/hyrax-webapp/tmp/cache:cached
- .:/app/samvera/hyrax-webapp
networks:
internal:
volumes:
assets:
cache:
db:
fcrepo:
node_modules:
redis:
solr:
uploads:
zk:
zoo:
networks:
internal:
services:
zoo:
image: zookeeper:3.6.2
ports:
- 2181:2181
- 7001:7000
environment:
- ZOO_MY_ID=1
- ZOO_4LW_COMMANDS_WHITELIST=mntr,srvr,ruok,conf
- ZOO_SERVER_ID=1
- ZOO_SERVERS=server.1=zoo:2888:3888;2181
volumes:
- zoo:/data
- zk:/datalog
networks:
internal:
healthcheck:
test: ["CMD-SHELL", "echo 'ruok' | nc -w 2 -q 2 localhost 2181 | grep imok || exit 1"]
interval: "10s"
timeout: "8s"
solr:
image: hyku/solr:8
build:
context: solr
dockerfile: Dockerfile
env_file:
- .env
environment:
- OOM=script
- VIRTUAL_PORT=8983
- VIRTUAL_HOST=solr.bl.test
depends_on:
zoo:
condition: service_healthy
user: root
command: bash -c "
chown -R 8983:8983 /var/solr
&& ./bin/solr zk cp file:/var/security.json zk:/security.json
&& runuser -u solr -- solr-foreground"
expose:
- 8983
volumes:
- solr:/var/solr
networks:
internal:
healthcheck:
test: curl -sf http://$$SOLR_ADMIN_USER:$$SOLR_ADMIN_PASSWORD@solr:8983/solr/admin/cores?action=STATUS || exit 1
start_period: 3s
interval: 5s
timeout: 5s
retries: 6
fcrepo:
image: ghcr.io/samvera/fcrepo4:4.7.5
volumes:
- fcrepo:/data:cached
env_file:
- .env
environment:
- VIRTUAL_PORT=8080
- VIRTUAL_HOST=fcrepo.bl.test
- JAVA_OPTS=${JAVA_OPTS} -Dfcrepo.modeshape.configuration="classpath:/config/file-simple/repository.json" -Dfcrepo.object.directory="/data/objects" -Dfcrepo.binary.directory="/data/binaries"
expose:
- 8080
networks:
internal:
db:
image: postgres:11.1
env_file:
- .env
environment:
- POSTGRES_DB=${DATABASE_NAME}
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
- POSTGRES_USER=${DATABASE_USER}
- VIRTUAL_PORT=5432
- VIRTUAL_HOST=db.bl.test
volumes:
- db:/var/lib/postgresql/data
networks:
internal:
# Used exclusively for building and caching the base image to reduce build times
base:
<<: *app
image: ghcr.io/scientist-softserv/britishlibrary/base:${TAG:-latest}
build:
context: .
target: hyku-base
web:
<<: *app
environment:
- VIRTUAL_PORT=3000
- VIRTUAL_HOST=.bl.test
################################################################################
## Note on commands: by default the commands don't run bundle. That is to
## reduce boot times. However, when the application is in active
## development, we might be adjusting the Gemfile and Gemfile.lock. That
## means you'll want to be regularly running bundle.
##
## With the following line, uncommented during active development, we'll
## run bundle then boot the web-server.
##
# command: sh -l -c "bundle && bundle exec puma -v -b tcp://0.0.0.0:3000"
##
## Similar to the above, except we will bundle and then tell the container
## to wait. You'll then need to bash into the web container and start the
## web server (e.g. with `bundle exec puma -v -b tcp://0.0.0.0:3000`). This
## allows you to add byebug in your code, bash into the web container, and
## interact with the breakpoints.
##
# command: sh -l -c "bundle && tail -f /dev/null"
depends_on:
db:
condition: service_started
solr:
condition: service_started
fcrepo:
condition: service_started
redis:
condition: service_started
zoo:
condition: service_started
check_volumes:
condition: service_started
chrome:
condition: service_started
worker:
condition: service_started
initialize_app:
condition: service_completed_successfully
expose:
- 3000
worker:
<<: *app
################################################################################
## Note on commands: by default the commands don't run bundle. That is to
## reduce boot times. However, when the application is in active
## development, we might be adjusting the Gemfile and Gemfile.lock. That
## means you'll want to be regularly running bundle.
##
## With the following line, uncommented during active development, we'll
## run bundle then run sidekiq.
command: sh -l -c "clamd && bundle && bundle exec sidekiq"
##
## Similar to the above, except we will bundle and then tell the container
## to wait. You'll then need to bash into the worker container and start
## sidekiq (e.g. with `bundle exec sidekiq`. This allows you to add byebug
## in your code, bash into the worker container, and interact with the
## breakpoints.
# command: sh -l -c "bundle && tail -f /dev/null"
build:
context: .
target: hyku-worker
args:
- HYKU_BULKRAX_ENABLED=true
cache_from:
- ghcr.io/scientist-softserv/britishlibrary:${TAG:-latest}
- ghcr.io/scientist-softserv/britishlibrary/worker:${TAG:-latest}
image: ghcr.io/scientist-softserv/britishlibrary/worker:${TAG:-latest}
depends_on:
check_volumes:
condition: service_completed_successfully
initialize_app:
condition: service_completed_successfully
db:
condition: service_started
solr:
condition: service_started
fcrepo:
condition: service_started
redis:
condition: service_started
zoo:
condition: service_started
# Do not recurse through all of tmp. derivitives will make booting
# very slow and eventually just time out as data grows
check_volumes:
<<: *app
user: root
entrypoint: ["sh", "-x", "-c"]
command:
- >
chown -R app:app /app/samvera/hyrax-webapp/public/uploads &&
chown -R app:app /app/samvera/hyrax-webapp/public/assets &&
chown -R app:app /app/samvera/hyrax-webapp/tmp/cache
initialize_app:
<<: *app
environment:
- CONFDIR=/app/samvera/hyrax-webapp/solr/config
entrypoint: ["sh", "-c"]
command:
- >
solrcloud-upload-configset.sh /app/samvera/hyrax-webapp/solr/config &&
solrcloud-assign-configset.sh &&
SOLR_COLLECTION_NAME=hydra-test solrcloud-assign-configset.sh &&
db-migrate-seed.sh
depends_on:
db:
condition: service_started
solr:
condition: service_healthy
fcrepo:
condition: service_started
redis:
condition: service_started
redis:
image: redis:5
command: redis-server
volumes:
- redis:/data
networks:
internal:
chrome:
# password is 'secret'
image: seleniarm/standalone-chromium:latest
logging:
driver: none
volumes:
- /dev/shm:/dev/shm
shm_size: 3G
networks:
internal:
environment:
- JAVA_OPTS=-Dwebdriver.chrome.whitelistedIps=
- VIRTUAL_PORT=7900
- VIRTUAL_HOST=chrome.hyku.test