-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yaml
130 lines (123 loc) · 4.37 KB
/
docker-compose.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
version: '3'
volumes:
prometheus_ladt:
grafana_ladt:
clickhouse_ladt:
services:
prometheus:
image: prom/prometheus:v2.54.1
restart: unless-stopped
stop_grace_period: 5m
volumes:
- ./services/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_ladt:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
- '--enable-feature=otlp-write-receiver'
- '--storage.tsdb.retention.time=1y'
networks:
- api_discovery_tool
otel-collector:
image: ghcr.io/f5devcentral/application-study-tool/otel_custom_collector:v0.7.0
restart: unless-stopped
volumes:
- ./services/otel_collector:/etc/otel-collector-config
command: ["--config=/etc/otel-collector-config/config.yaml"]
env_file:
- ".env"
environment:
CLICKHOUSE_PASSWORD: ${OTEL_COLLECTOR_CLICKHOUSE_PASSWORD}
ports:
- 514:54526/udp
- 6514:54526
networks:
- api_discovery_tool
depends_on:
clickhouse:
condition: service_healthy
grafana:
image: grafana/grafana:11.2.0
restart: unless-stopped
ports:
- 3000:3000
volumes:
- grafana_ladt:/var/lib/grafana
- ./services/grafana/provisioning/:/etc/grafana/provisioning
env_file: ".env"
environment:
CLICKHOUSE_PASSWORD: ${GRAFANA_CLICKHOUSE_PASSWORD}
networks:
- api_discovery_tool
clickhouse:
image: clickhouse/clickhouse-server:24.8.4
user: "101:101"
cap_add:
- SYS_NICE
- NET_ADMIN
- IPC_LOCK
hostname: clickhouse
volumes:
- ./services/clickhouse/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
- ./services/clickhouse/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
- ./services/clickhouse/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
- clickhouse_ladt:/var/lib/clickhouse
env_file: ".env"
environment:
GRAFANA_CLICKHOUSE_PASSWORD: ${GRAFANA_CLICKHOUSE_PASSWORD}
ANALYZER_CLICKHOUSE_PASSWORD: ${ANALYZER_CLICKHOUSE_PASSWORD}
OTEL_COLLECTOR_CLICKHOUSE_PASSWORD: ${OTEL_COLLECTOR_CLICKHOUSE_PASSWORD}
networks:
- api_discovery_tool
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1
log-analyzer:
image: ghcr.io/f5devcentral/ast-api-discovery/api_discovery_log_analyzer:v0.2.0
restart: unless-stopped
environment:
# Default will run at 1 hour interval or every access logs fetch limit
ACCESS_LOGS_FETCH_LIMIT: 3000000
# Check to see if log limit or max interval reached every X seconds
ACCESS_LOGS_COUNT_CHECK_INTERVAL_SECONDS: 30
# Run at least every 2 hours
DATA_ANALYSIS_MAX_INTERVAL_SECONDS: 7200
# Run at most every 5 minutes
DATA_ANALYSIS_MIN_INTERVAL_SECONDS: 300
# If ACCESS_LOGS_FETCH_LIMIT logs have arrived since last analysis and it's been
# more than DATA_ANALYSIS_MIN_INTERVAL_SECONDS, run the analysis
DATA_ANALYSIS_AT_FETCH_LIMIT: "True"
# If there are no prior analysis runs, look back at most X seconds
DATA_ANALYSIS_MAX_FETCH_INTERVAL_SECONDS: 7200
# This tells the discovery not to replace the first 2 path segments with $DYN$
# e.g. /api/users will not be eligible, but the 12345 in /api/users/12345 will
PRUNE_MIN_LEVEL_FOR_PRUNING: 3
# Tell discovery not to prune segments with fewer children than:
PRUNE_NB_CHILDREN_LOW_NB_CHILDREN: 10
# Tell discovery to prune segments with more children than:
PRUNE_NB_CHILDREN_HIGH_NB_CHILDREN: 50
CLICKHOUSE_USE_TLS: "False" #TODO: Default True
CLICKHOUSE_HOST: clickhouse
CLICKHOUSE_PASSWORD: ${ANALYZER_CLICKHOUSE_PASSWORD}
networks:
- api_discovery_tool
depends_on:
clickhouse:
condition: service_healthy
# loadgenerator:
# image: locustio/locust
# container_name: load-generator
# deploy:
# resources:
# limits:
# memory: 120M
# volumes:
# - ./:/mnt/locust
# restart: unless-stopped
# ports:
# - 8089:8089
# command: --loglevel DEBUG -f /mnt/locust/locustfile.py --host https://nginx.f5kc.com
networks:
api_discovery_tool: