-
Notifications
You must be signed in to change notification settings - Fork 812
/
example.nomad.hcl
422 lines (380 loc) · 16.2 KB
/
example.nomad.hcl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
# There can only be a single job definition per file. This job is named
# "example" so it will create a job with the ID and Name "example".
# The "job" block is the top-most configuration option in the job
# specification. A job is a declarative specification of tasks that Nomad
# should run. Jobs have a globally unique name, one or many task groups, which
# are themselves collections of one or many tasks.
#
# For more information and examples on the "job" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/job
#
job "example" {
# The "region" parameter specifies the region in which to execute the job.
# If omitted, this inherits the default region name of "global".
# region = "global"
#
# The "datacenters" parameter specifies the list of datacenters which should
# be considered when placing this task. This accepts wildcards and defaults
# allowing placement on all datacenters.
datacenters = ["*"]
# The "type" parameter controls the type of job, which impacts the scheduler's
# decision on placement. This configuration is optional and defaults to
# "service". For a full list of job types and their differences, please see
# the online documentation.
#
# For more information, please see the online documentation at:
#
# https://www.nomadproject.io/docs/schedulers
#
type = "service"
# The "constraint" block defines additional constraints for placing this job,
# in addition to any resource or driver constraints. This block may be placed
# at the "job", "group", or "task" level, and supports variable interpolation.
#
# For more information and examples on the "constraint" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/constraint
#
# constraint {
# attribute = "${attr.kernel.name}"
# value = "linux"
# }
# The "update" block specifies the update strategy of task groups. The update
# strategy is used to control things like rolling upgrades, canaries, and
# blue/green deployments. If omitted, no update strategy is enforced. The
# "update" block may be placed at the job or task group. When placed at the
# job, it applies to all groups within the job. When placed at both the job and
# group level, the blocks are merged with the group's taking precedence.
#
# For more information and examples on the "update" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/update
#
update {
# The "max_parallel" parameter specifies the maximum number of updates to
# perform in parallel. In this case, this specifies to update a single task
# at a time.
max_parallel = 1
# The "min_healthy_time" parameter specifies the minimum time the allocation
# must be in the healthy state before it is marked as healthy and unblocks
# further allocations from being updated.
min_healthy_time = "10s"
# The "healthy_deadline" parameter specifies the deadline in which the
# allocation must be marked as healthy after which the allocation is
# automatically transitioned to unhealthy. Transitioning to unhealthy will
# fail the deployment and potentially roll back the job if "auto_revert" is
# set to true.
healthy_deadline = "3m"
# The "progress_deadline" parameter specifies the deadline in which an
# allocation must be marked as healthy. The deadline begins when the first
# allocation for the deployment is created and is reset whenever an allocation
# as part of the deployment transitions to a healthy state. If no allocation
# transitions to the healthy state before the progress deadline, the
# deployment is marked as failed.
progress_deadline = "10m"
# The "auto_revert" parameter specifies if the job should auto-revert to the
# last stable job on deployment failure. A job is marked as stable if all the
# allocations as part of its deployment were marked healthy.
auto_revert = false
# The "canary" parameter specifies that changes to the job that would result
# in destructive updates should create the specified number of canaries
# without stopping any previous allocations. Once the operator determines the
# canaries are healthy, they can be promoted which unblocks a rolling update
# of the remaining allocations at a rate of "max_parallel".
#
# Further, setting "canary" equal to the count of the task group allows
# blue/green deployments. When the job is updated, a full set of the new
# version is deployed and upon promotion the old version is stopped.
canary = 0
}
# The migrate block specifies the group's strategy for migrating off of
# draining nodes. If omitted, a default migration strategy is applied.
#
# For more information on the "migrate" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/migrate
#
migrate {
# Specifies the number of task groups that can be migrated at the same
# time. This number must be less than the total count for the group as
# (count - max_parallel) will be left running during migrations.
max_parallel = 1
# Specifies the mechanism in which allocations health is determined. The
# potential values are "checks" or "task_states".
health_check = "checks"
# Specifies the minimum time the allocation must be in the healthy state
# before it is marked as healthy and unblocks further allocations from being
# migrated. This is specified using a label suffix like "30s" or "15m".
min_healthy_time = "10s"
# Specifies the deadline in which the allocation must be marked as healthy
# after which the allocation is automatically transitioned to unhealthy. This
# is specified using a label suffix like "2m" or "1h".
healthy_deadline = "5m"
}
# The "group" block defines a series of tasks that should be co-located on
# the same Nomad client. Any task within a group will be placed on the same
# client.
#
# For more information and examples on the "group" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/group
#
group "cache" {
# The "count" parameter specifies the number of the task groups that should
# be running under this group. This value must be non-negative and defaults
# to 1.
count = 1
# The "network" block specifies the network configuration for the allocation
# including requesting port bindings.
#
# For more information and examples on the "network" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/network
#
network {
port "db" {
to = 6379
}
}
# The "service" block instructs Nomad to register this task as a service
# in the service discovery engine, which is currently Nomad or Consul. This
# will make the service discoverable after Nomad has placed it on a host and
# port.
#
# For more information and examples on the "service" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/service
#
service {
name = "redis-cache"
tags = ["global", "cache"]
port = "db"
provider = "nomad"
# The "check" block instructs Nomad to create a Consul health check for
# this service. A sample check is provided here for your convenience;
# uncomment it to enable it. The "check" block is documented in the
# "service" block documentation.
# check {
# name = "alive"
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }
}
# The "restart" block configures a group's behavior on task failure. If
# left unspecified, a default restart policy is used based on the job type.
#
# For more information and examples on the "restart" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/restart
#
restart {
# The number of attempts to run the job within the specified interval.
attempts = 2
interval = "30m"
# The "delay" parameter specifies the duration to wait before restarting
# a task after it has failed.
delay = "15s"
# The "mode" parameter controls what happens when a task has restarted
# "attempts" times within the interval. "delay" mode delays the next
# restart until the next interval. "fail" mode does not restart the task
# if "attempts" has been hit within the interval.
mode = "fail"
}
# The "ephemeral_disk" block instructs Nomad to utilize an ephemeral disk
# instead of a hard disk requirement. Clients using this block should
# not specify disk requirements in the resources block of the task. All
# tasks in this group will share the same ephemeral disk.
#
# For more information and examples on the "ephemeral_disk" block, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/ephemeral_disk
#
ephemeral_disk {
# When sticky is true and the task group is updated, the scheduler
# will prefer to place the updated allocation on the same node and
# will migrate the data. This is useful for tasks that store data
# that should persist across allocation updates.
# sticky = true
#
# Setting migrate to true results in the allocation directory of a
# sticky allocation directory to be migrated.
# migrate = true
#
# The "size" parameter specifies the size in MB of shared ephemeral disk
# between tasks in the group.
size = 300
}
# The "affinity" block enables operators to express placement preferences
# based on node attributes or metadata.
#
# For more information and examples on the "affinity" block, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/affinity
#
# affinity {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# value specifies the desired attribute value. In this example Nomad
# will prefer placement in the "us-west1" datacenter.
# value = "us-west1"
# weight can be used to indicate relative preference
# when the job has more than one affinity. It defaults to 50 if not set.
# weight = 100
# }
# The "spread" block allows operators to increase the failure tolerance of
# their applications by specifying a node attribute that allocations
# should be spread over.
#
# For more information and examples on the "spread" block, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/spread
#
# spread {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# targets can be used to define desired percentages of allocations
# for each targeted attribute value.
#
# target "us-east1" {
# percent = 60
# }
# target "us-west1" {
# percent = 40
# }
# }
# The "task" block creates an individual unit of work, such as a Docker
# container, web application, or batch processing.
#
# For more information and examples on the "task" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/task
#
task "redis" {
# The "driver" parameter specifies the task driver that should be used to
# run the task.
driver = "docker"
# The "config" block specifies the driver configuration, which is passed
# directly to the driver to start the task. The details of configurations
# are specific to each driver, so please see specific driver
# documentation for more information.
config {
image = "redis:7"
ports = ["db"]
# The "auth_soft_fail" configuration instructs Nomad to try public
# repositories if the task fails to authenticate when pulling images
# and the Docker driver has an "auth" configuration block.
auth_soft_fail = true
}
# The "artifact" block instructs Nomad to download an artifact from a
# remote source prior to starting the task. This provides a convenient
# mechanism for downloading configuration files or data needed to run the
# task. It is possible to specify the "artifact" block multiple times to
# download multiple artifacts.
#
# For more information and examples on the "artifact" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/artifact
#
# artifact {
# source = "http://foo.com/artifact.tar.gz"
# options {
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
# }
# }
# The "logs" block instructs the Nomad client on how many log files and
# the maximum size of those logs files to retain. Logging is enabled by
# default, but the "logs" block allows for finer-grained control over
# the log rotation and storage configuration.
#
# For more information and examples on the "logs" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/logs
#
# logs {
# max_files = 10
# max_file_size = 15
# }
# The "identity" block instructs Nomad to expose the task's workload
# identity token as an environment variable and in the file
# secrets/nomad_token.
identity {
env = true
file = true
}
# The "resources" block describes the requirements a task needs to
# execute. Resource requirements include memory, cpu, and more.
# This ensures the task will execute on a machine that contains enough
# resource capacity.
#
# For more information and examples on the "resources" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/resources
#
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
}
# The "template" block instructs Nomad to manage a template, such as
# a configuration file or script. This template can optionally pull data
# from Consul or Vault to populate runtime configuration data.
#
# For more information and examples on the "template" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/template
#
# template {
# data = "---\nkey: {{ key \"service/my-key\" }}"
# destination = "local/file.yml"
# change_mode = "signal"
# change_signal = "SIGHUP"
# }
# The "template" block can also be used to create environment variables
# for tasks that prefer those to config files. The task will be restarted
# when data pulled from Consul or Vault changes.
#
# template {
# data = "KEY={{ key \"service/my-key\" }}"
# destination = "local/file.env"
# env = true
# }
# The "vault" block instructs the Nomad client to acquire a token from
# a HashiCorp Vault server. The Nomad servers must be configured and
# authorized to communicate with Vault. By default, Nomad will inject
# The token into the job via an environment variable and make the token
# available to the "template" block. The Nomad client handles the renewal
# and revocation of the Vault token.
#
# For more information and examples on the "vault" block, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/vault
#
# vault {
# policies = ["cdn", "frontend"]
# change_mode = "signal"
# change_signal = "SIGHUP"
# }
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
# kill_timeout = "20s"
}
}
}