forked from windmill-labs/windmill
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yml
181 lines (172 loc) · 5.57 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
version: "3.7"
services:
db:
deploy:
# To use an external database, set replicas to 0 and set DATABASE_URL to the external database url in the .env file
replicas: 1
image: postgres:16
shm_size: 1g
restart: unless-stopped
volumes:
- db_data:/var/lib/postgresql/data
expose:
- 5432
ports:
- 5432:5432
environment:
POSTGRES_PASSWORD: changeme
POSTGRES_DB: windmill
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
windmill_server:
image: ${WM_IMAGE}
pull_policy: always
deploy:
replicas: 1
restart: unless-stopped
expose:
- 8000
- 2525
environment:
- DATABASE_URL=${DATABASE_URL}
- MODE=server
depends_on:
db:
condition: service_healthy
volumes:
- worker_logs:/tmp/windmill/logs
windmill_worker:
image: ${WM_IMAGE}
pull_policy: always
deploy:
replicas: 3
resources:
limits:
cpus: "1"
memory: 2048M
restart: unless-stopped
environment:
- DATABASE_URL=${DATABASE_URL}
- MODE=worker
- WORKER_GROUP=default
depends_on:
db:
condition: service_healthy
# to mount the worker folder to debug, KEEP_JOB_DIR=true and mount /tmp/windmill
volumes:
# mount the docker socket to allow to run docker containers from within the workers
- /var/run/docker.sock:/var/run/docker.sock
- worker_dependency_cache:/tmp/windmill/cache
- worker_logs:/tmp/windmill/logs
## This worker is specialized for "native" jobs. Native jobs run in-process and thus are much more lightweight than other jobs
windmill_worker_native:
# Use ghcr.io/windmill-labs/windmill-ee:main for the ee
image: ${WM_IMAGE}
pull_policy: always
deploy:
replicas: 1
resources:
limits:
cpus: "1"
memory: 2048M
restart: unless-stopped
environment:
- DATABASE_URL=${DATABASE_URL}
- MODE=worker
- WORKER_GROUP=native
- NUM_WORKERS=8
- SLEEP_QUEUE=200
depends_on:
db:
condition: service_healthy
volumes:
- worker_logs:/tmp/windmill/logs
# This worker is specialized for reports or scraping jobs. It is assigned the "reports" worker group which has an init script that installs chromium and can be targeted by using the "chromium" worker tag.
# windmill_worker_reports:
# image: ${WM_IMAGE}
# pull_policy: always
# deploy:
# replicas: 1
# resources:
# limits:
# cpus: "1"
# memory: 2048M
# restart: unless-stopped
# environment:
# - DATABASE_URL=${DATABASE_URL}
# - MODE=worker
# - WORKER_GROUP=reports
# depends_on:
# db:
# condition: service_healthy
# # to mount the worker folder to debug, KEEP_JOB_DIR=true and mount /tmp/windmill
# volumes:
# # mount the docker socket to allow to run docker containers from within the workers
# - /var/run/docker.sock:/var/run/docker.sock
# - worker_dependency_cache:/tmp/windmill/cache
# - worker_logs:/tmp/windmill/logs
# The indexer powers full-text job and log search, an EE feature.
windmill_indexer:
image: ${WM_IMAGE}
pull_policy: always
deploy:
replicas: 0 # set to 1 to enable full-text job and log search
restart: unless-stopped
expose:
- 8001
environment:
- PORT=8001
- DATABASE_URL=${DATABASE_URL}
- MODE=indexer
- TANTIVY_MAX_INDEXED_JOB_LOG_SIZE__MB=1 # job logs bigger than this will be truncated before indexing
- TANTIVY_S3_BACKUP_PERIOD__S=3600 # how often to backup the index into object storage
- TANTIVY_INDEX_WRITER_MEMORY_BUDGET__MB=100 # higher budget for higher indexing throughput
- TANTIVY_REFRESH_INDEX_PERIOD__S=300 #how often to start indexing new jobs
- TANTIVY_DOC_COMMIT_MAX_BATCH_SIZE=100000 #how many documents to batch in one commit
- TANTIVY_SHOW_MEMORY_EVERY=10000 #log memory usage and progress every so many documents indexed
depends_on:
db:
condition: service_healthy
volumes:
- windmill_index:/tmp/windmill/search
- worker_logs:/tmp/windmill/logs
lsp:
image: ghcr.io/windmill-labs/windmill-lsp:latest
pull_policy: always
restart: unless-stopped
expose:
- 3001
volumes:
- lsp_cache:/root/.cache
multiplayer:
image: ghcr.io/windmill-labs/windmill-multiplayer:latest
deploy:
replicas: 0 # Set to 1 to enable multiplayer, only available on Enterprise Edition
restart: unless-stopped
expose:
- 3002
caddy:
image: ghcr.io/windmill-labs/caddy-l4:latest
restart: unless-stopped
# Configure the mounted Caddyfile and the exposed ports or use another reverse proxy if needed
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
# - ./certs:/certs # Provide custom certificate files like cert.pem and key.pem to enable HTTPS - See the corresponding section in the Caddyfile
ports:
# To change the exposed port, simply change 80:80 to <desired_port>:80. No other changes needed
- 80:80
- 25:25
# - 443:443 # Uncomment to enable HTTPS handling by Caddy
environment:
- BASE_URL=":80"
# - BASE_URL=":443" # uncomment and comment line above to enable HTTPS via custom certificate and key files
# - BASE_URL=mydomain.com # Uncomment and comment line above to enable HTTPS handling by Caddy
volumes:
db_data: null
worker_dependency_cache: null
worker_logs: null
windmill_index: null
lsp_cache: null