-
Notifications
You must be signed in to change notification settings - Fork 9
/
docker-compose.yml
executable file
·243 lines (233 loc) · 12.2 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
version: '3'
# NOTE about healthchecks.
#
# Healthchecks, long-form "depends_on" and "service_healthy" are not used on purpose. Instead, dependencies
# and health checks are moved to the run-*.sh scripts that the containers run.
#
# As of April 2022, podman appears to be buggy when healthchecks are defined for containers: podman can fail
# to start containers when healthchecks are defined.
# - https://github.com/containers/podman/discussions/12674 and
# - https://bugzilla.redhat.com/show_bug.cgi?id=2024229 (strangely, even though the upstream bugs appear to be
# merged as of April 2022, the problem still exists).
#
# As of April 2022, upstream podman-compose doesn't support "service_healthy" status
# (see https://github.com/containers/podman-compose/pull/453 ).
# Even if it did, this code needs to run on RHEL8 that has old podman-compose (0.1.7 as of April 2022).
#
# If healthchecks are used, the following Makefile target for compose up must be used for it to work
# consistently enough. Obviously, using this would be untenable.
#
# [BEGIN COMPOSE UP SHOWCASE]
# #***********************************
# # podman-compose up
# #***********************************
# # NOTE 1: docker-compose has a "depends_on" feature with a possible state "service_healthy" that starts
# # depending containers only once "healthcheck" determines the prerequisite container is healthy. Currently
# # (April 2022), podman-compose implements "service_healthy" state as if it was "service_started", but
# # "healthcheck" is implemented well enough so as to be callable through "podman healthcheck run". Therefore,
# # to ensure podman-compose compatibility, there are additional healthchecks here in this Makefile; they are
# # not needed on docker-compose and potentially in the future with a fully-implemented podman-compose. With
# # docker-compose, it's enough to start all containers with just a single "up -d" call.
# # NOTE 2: The WORKAROUND_* are there because sometimes, podman containers enter a state where the first one
# # or two starts fail with an error "Failed to start transient timer unit". See
# # https://github.com/containers/podman/discussions/12674 and
# # https://bugzilla.redhat.com/show_bug.cgi?id=2024229 (strangely, even though the upstream bugs appear to be
# # merged as of April 2022, the problem still exists).
# # NOTE 3: The checks starting with "cnt" restart the containers if they seem to get stuck in an unhealthy state.
# # I don't know why it happens sometimes. "podman logs osidb-data" looks absolutely fine and it works fine 95% of
# # the time. :-( It seems unrelated to the bug in NOTE 2 except for the part where after a restart I perform an
# # additional start because sometimes both bugs from NOTE 2 and NOTE 3 manifest at the same time and the
# # container must be started 2 times for it to actually start.
# WORKAROUND_A={ echo ">Met with a possible podman bug. Workaround for osidb-data. See NOTE 2 in Makefile." ; sleep 2 ; $(podman) start osidb-data || true ; sleep 10 ; $(podman) start osidb-data || true ; }
# WORKAROUND_B={ echo ">Met with a possible podman bug. Workaround for osidb-service. See NOTE 2 in Makefile." ; sleep 2 ; $(podman) start osidb-data osidb-service || true ; sleep 10 ; $(podman) start osidb-data osidb-service || true ; }
# WORKAROUND_C={ echo ">Met with a possible podman bug. Workaround. See NOTE 2 in Makefile." ; sleep 2 ; $(podman) start osidb-data osidb-service || true ; sleep 10 ; $(podman) start osidb-data osidb-service || true ; $(podmancompose) -f docker-compose.yml -f docker-compose.test.yml up -d ; }
# compose-up:
# @echo ">compose up"
# @$(podmancompose) -f docker-compose.yml -f docker-compose.test.yml up -d osidb-data redis || $(WORKAROUND_A) || true
# @cnt=1; until { $(podman) healthcheck run osidb-data >/dev/null 2>&1 ; } ; do ((cnt++)); echo ">waiting for osidb-data" ; sleep 2 ; if (( cnt > 30 )) ; then echo ">Container osidb-data seems stuck. Let's restart it. Sorry, IDK why that sometimes happens. It seems to happen less if 'make stop-local' is run first." ; $(podman) restart osidb-data || true ; sleep 10 ; $(podman) start osidb-data || true ; cnt=1 ; fi ; done
# @$(podmancompose) -f docker-compose.yml -f docker-compose.test.yml up -d osidb-service testldap || $(WORKAROUND_B) || true
# @cnt=1; until { $(podman) healthcheck run osidb-service >/dev/null 2>&1 ; } ; do ((cnt++)); echo ">waiting for osidb-service" ; sleep 2 ; if (( cnt > 30 )) ; then echo ">Container osidb-service seems stuck. Let's restart it. Sorry, IDK why that sometimes happens. It seems to happen less if 'make stop-local' is run first." ; $(podman) restart osidb-service || true ; sleep 10 ; $(podman) start osidb-service || true ; cnt=1 ; fi ; done
# @$(podmancompose) -f docker-compose.yml -f docker-compose.test.yml up -d celery celery_beat flower
# @$(podmancompose) -f docker-compose.yml -f docker-compose.test.yml up -d || $(WORKAROUND_C)
# [END COMPOSE UP SHOWCASE]
services:
osidb-service:
container_name: osidb-service
build:
context: .
args:
RH_CERT_URL: ${RH_CERT_URL}
PYPI_MIRROR: ${PIP_INDEX_URL}
image: osidb-service
stdin_open: true
tty: true
ports:
- "8000:8000"
environment:
BBSYNC_SYNC_TO_BZ: ${BBSYNC_SYNC_TO_BZ}
BBSYNC_SYNC_FLAWS_TO_BZ: ${BBSYNC_SYNC_FLAWS_TO_BZ}
BBSYNC_SYNC_FLAWS_TO_BZ_ASYNCHRONOUSLY: ${BBSYNC_SYNC_FLAWS_TO_BZ_ASYNCHRONOUSLY}
BBSYNC_SYNC_TRACKERS_TO_BZ: ${BBSYNC_SYNC_TRACKERS_TO_BZ}
BZIMPORT_BZ_API_KEY: ${BZIMPORT_BZ_API_KEY:?Variable BZIMPORT_BZ_API_KEY must be set.}
BZIMPORT_BZ_URL: ${BZIMPORT_BZ_URL}
DJANGO_SETTINGS_MODULE: "config.settings_local"
ET_URL: ${ET_URL}
HTTPS_PROXY: ${HTTPS_PROXY}
HTTPS_TASKMAN_PROXY: ${HTTPS_TASKMAN_PROXY}
JIRA_AUTH_TOKEN: ${JIRA_AUTH_TOKEN:?Variable JIRA_AUTH_TOKEN must be set.}
JIRA_MAX_CONNECTION_AGE: ${JIRA_MAX_CONNECTION_AGE}
JIRA_STORY_ISSUE_TYPE_ID: ${JIRA_STORY_ISSUE_TYPE_ID}
JIRA_TASKMAN_AUTO_SYNC_FLAW: ${JIRA_TASKMAN_AUTO_SYNC_FLAW}
JIRA_TASKMAN_PROJECT_ID: ${JIRA_TASKMAN_PROJECT_ID}
JIRA_TASKMAN_PROJECT_KEY: ${JIRA_TASKMAN_PROJECT_KEY}
JIRA_TASKMAN_URL: ${JIRA_TASKMAN_URL}
JIRA_URL: ${JIRA_URL}
OSIDB_CORS_ALLOWED_ORIGINS: ${OSIDB_CORS_ALLOWED_ORIGINS}
OSIDB_CORS_ALLOW_HEADERS: ${OSIDB_CORS_ALLOW_HEADERS}
OSIDB_DEBUG: ${OSIDB_DEBUG}
PRODUCT_DEF_URL: ${PRODUCT_DEF_URL}
PRODUCT_DEF_BRANCH: ${PRODUCT_DEF_BRANCH}
PS_CONSTANTS_URL: ${PS_CONSTANTS_URL}
SNIPPET_CREATION: ${SNIPPET_CREATION}
SNIPPET_CREATION_START: ${SNIPPET_CREATION_START}
TRACKERS_SYNC_TO_JIRA: ${TRACKERS_SYNC_TO_JIRA}
command: ./scripts/setup-osidb-service.sh
volumes:
- ${PWD}:/opt/app-root/src:z
depends_on: ["osidb-data"]
# See "NOTE about healthchecks":
# depends_on:
# osidb-data:
# condition: service_healthy
# healthcheck:
# test: ["CMD-SHELL", "curl -f http://localhost:8000/osidb/healthy || exit 1"]
# interval: "60s"
# timeout: "3s"
# retries: 3
osidb-data:
image: docker.io/library/postgres:13
shm_size: 1gb # Needed for more consecutive connections
hostname: osidb-data
container_name: osidb-data
environment:
# Default connection parameter values
PGDATABASE: osidb
PGPASSWORD: passw0rd
PGUSER: osidb_admin_user
POSTGRES_DB: osidb
POSTGRES_PASSWORD: passw0rd
POSTGRES_USER: osidb_admin_user
volumes:
- pg-data:/var/lib/postgresql/data
- ${PWD}/etc/pg/postgresql.conf:/etc/postgresql/postgresql.conf:z
- ${PWD}/etc/pg/local-dev-app-user.sql:/docker-entrypoint-initdb.d/local-dev-app-user.sql:z
- ${PWD}/etc/pg/local-dev-manage-user.sql:/docker-entrypoint-initdb.d/local-dev-manage-user.sql:z
- ${PWD}/etc/pg/local-server.crt:/var/lib/postgresql/local-server.crt:z
- ${PWD}/etc/pg/local-server.key:/var/lib/postgresql/local-server.key:z
ports:
- "5432"
command: >
-c ssl=on
-c ssl_cert_file=/var/lib/postgresql/local-server.crt
-c ssl_key_file=/var/lib/postgresql/local-server.key
-c config_file=/etc/postgresql/postgresql.conf
celery:
image: osidb-service
command: ./scripts/run-celery.sh
deploy:
mode: replicated
replicas: ${OSIDB_CELERY_WORKERS_NO:-2}
volumes:
- ${PWD}:/opt/app-root/src:z
environment:
BZIMPORT_BZ_API_KEY: ${BZIMPORT_BZ_API_KEY:?Variable BZIMPORT_BZ_API_KEY must be set.}
BZIMPORT_BZ_URL: ${BZIMPORT_BZ_URL}
DJANGO_SETTINGS_MODULE: "config.settings_local"
ET_URL: ${ET_URL}
JIRA_AUTH_TOKEN: ${JIRA_AUTH_TOKEN:?Variable JIRA_AUTH_TOKEN must be set.}
JIRA_URL: ${JIRA_URL}
OSIDB_DEBUG: ${OSIDB_DEBUG}
PRODUCT_DEF_URL: ${PRODUCT_DEF_URL}
PRODUCT_DEF_BRANCH: ${PRODUCT_DEF_BRANCH}
PS_CONSTANTS_URL: ${PS_CONSTANTS_URL}
FLAW_COLLECTOR_ENABLED: ${FLAW_COLLECTOR_ENABLED}
BZ_TRACKER_COLLECTOR_ENABLED: ${BZ_TRACKER_COLLECTOR_ENABLED}
BZ_METADATA_COLLECTOR_ENABLED: ${BZ_METADATA_COLLECTOR_ENABLED}
ERRATA_COLLECTOR_ENABLED: ${ERRATA_COLLECTOR_ENABLED}
JIRA_TASK_COLLECTOR_ENABLED: ${JIRA_TASK_COLLECTOR_ENABLED}
JIRA_TRACKER_COLLECTOR_ENABLED: ${JIRA_TRACKER_COLLECTOR_ENABLED}
JIRA_METADATA_COLLECTOR_ENABLED: ${JIRA_METADATA_COLLECTOR_ENABLED}
# CVEORG_COLLECTOR_ENABLED: ${CVEORG_COLLECTOR_ENABLED}
NVD_COLLECTOR_ENABLED: ${NVD_COLLECTOR_ENABLED}
OSV_COLLECTOR_ENABLED: ${OSV_COLLECTOR_ENABLED}
depends_on: ["osidb-data", "osidb-service", "redis"]
# See "NOTE about healthchecks":
# depends_on:
# osidb-data:
# condition: service_healthy
# osidb-service:
# condition: service_healthy
# redis:
# condition: service_started
celery_beat:
container_name: celery_beat
hostname: celery_beat
image: osidb-service
command: ./scripts/run-celery-beat.sh
volumes:
- ${PWD}:/opt/app-root/src:z
environment:
BZIMPORT_BZ_API_KEY: ${BZIMPORT_BZ_API_KEY:?Variable BZIMPORT_BZ_API_KEY must be set.}
BZIMPORT_BZ_URL: ${BZIMPORT_BZ_URL}
DJANGO_SETTINGS_MODULE: "config.settings_local"
ET_URL: ${ET_URL}
JIRA_AUTH_TOKEN: ${JIRA_AUTH_TOKEN:?Variable JIRA_AUTH_TOKEN must be set.}
JIRA_URL: ${JIRA_URL}
OSIDB_DEBUG: ${OSIDB_DEBUG}
PRODUCT_DEF_URL: ${PRODUCT_DEF_URL}
PRODUCT_DEF_BRANCH: ${PRODUCT_DEF_BRANCH}
PS_CONSTANTS_URL: ${PS_CONSTANTS_URL}
FLAW_COLLECTOR_ENABLED: ${FLAW_COLLECTOR_ENABLED}
BZ_TRACKER_COLLECTOR_ENABLED: ${BZ_TRACKER_COLLECTOR_ENABLED}
BZ_METADATA_COLLECTOR_ENABLED: ${BZ_METADATA_COLLECTOR_ENABLED}
ERRATA_COLLECTOR_ENABLED: ${ERRATA_COLLECTOR_ENABLED}
JIRA_TASK_COLLECTOR_ENABLED: ${JIRA_TASK_COLLECTOR_ENABLED}
JIRA_TRACKER_COLLECTOR_ENABLED: ${JIRA_TRACKER_COLLECTOR_ENABLED}
JIRA_METADATA_COLLECTOR_ENABLED: ${JIRA_METADATA_COLLECTOR_ENABLED}
# CVEORG_COLLECTOR_ENABLED: ${CVEORG_COLLECTOR_ENABLED}
NVD_COLLECTOR_ENABLED: ${NVD_COLLECTOR_ENABLED}
OSV_COLLECTOR_ENABLED: ${OSV_COLLECTOR_ENABLED}
depends_on: ["osidb-data", "osidb-service", "redis"]
# See "NOTE about healthchecks":
# depends_on:
# osidb-data:
# condition: service_healthy
# osidb-service:
# condition: service_healthy
# redis:
# condition: service_started
flower:
container_name: flower
hostname: flower
image: osidb-service
command: ./scripts/run-flower.sh
ports:
- "5555:5555"
volumes:
- ${PWD}:/opt/app-root/src:z
environment:
DJANGO_SETTINGS_MODULE: "config.settings_local"
ET_URL: ${ET_URL}
OSIDB_DEBUG: ${OSIDB_DEBUG}
PRODUCT_DEF_URL: ${PRODUCT_DEF_URL}
PRODUCT_DEF_BRANCH: ${PRODUCT_DEF_BRANCH}
PS_CONSTANTS_URL: ${PS_CONSTANTS_URL}
depends_on: ["redis", "osidb-data"]
redis:
container_name: redis
hostname: redis
image: docker.io/library/redis:6
ports:
- "6379"
volumes:
pg-data: