forked from confluentinc/cp-ansible
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathhosts_example.yml
497 lines (465 loc) · 25 KB
/
hosts_example.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
---
all:
vars:
ansible_connection: ssh
ansible_user: ec2-user
ansible_become: true
ansible_ssh_private_key_file: /tmp/certs/ssh_priv.pem
#### Setting Proxy Environment variables ####
## To set proxy env vars for the duration of playbook run, uncomment below block and set as necessary
# proxy_env:
# http_proxy: http://proxy.example.com:8080
# https_proxy: http://proxy.example.com:8080
## Note: You must use Hostnames or IPs to define your no_proxy server addresses, CIDR ranges are not supported.
# no_proxy: http://proxy.example.com:8080
#### SASL Authentication Configuration ####
## By default there will be no SASL Authentication
## For SASL/PLAIN uncomment this line:
# sasl_protocol: plain
## For SASL/SCRAM uncomment this line:
# sasl_protocol: scram
## For SASL/GSSAPI uncomment this line and see Kerberos Configuration properties below
# sasl_protocol: kerberos
#### Zookeeper SASL Authentication ####
## Zookeeper can have Kerberos (GSSAPI) or Digest-MD5 SASL Authentication
## By default when sasl_protocol = kerberos, zookeeper will also use sasl kerberos. It can be configured with:
## When a mechanism is selected, zookeeper.set.acl=true is added to kafka's server.properties. Note: property not added when using mTLS, set manually with Custom Properties
# zookeeper_sasl_protocol: <none/kerberos/digest>
#### Kerberos Configuration ####
## Applicable when sasl_protocol is kerberos
## REQUIRED: Under each host set keytab file path and principal name, see below
# kerberos_configure: <Boolean for ansible to install kerberos packages and configure this file: /etc/krb5.conf, defaults to true>
# kerberos:
# realm: <KDC server realm eg. confluent.example.com>
# kdc_hostname: <hostname of machine with KDC running eg. ip-172-31-45-82.us-east-2.compute.internal>
# admin_hostname: <hostname of machine with KDC running eg. ip-172-31-45-82.us-east-2.compute.internal>
#### TLS Configuration ####
## By default, data will NOT be encrypted. To turn on TLS encryption, uncomment this line
# ssl_enabled: true
## By default, the components will be configured with One-Way TLS, to turn on TLS mutual auth, uncomment this line:
# ssl_mutual_auth_enabled: true
## By default, the certs for this configuration will be self signed, to deploy custom certificates there are two options.
## Option 1: Custom Certs
## You will need to provide the path to the Certificate Authority Cert used to sign each hosts' certs
## As well as the signed certificate path and the key for that certificate for each host.
## These will need to be set for the correct host
# ssl_custom_certs: true
# ssl_custom_certs_remote_src: true # set to true if key crt and ca file already on hosts, file paths must still be set
# ssl_ca_cert_filepath: "/tmp/certs/ca.crt" # Can be a bundle of ca certs to be included in truststore
# ssl_signed_cert_filepath: "/tmp/certs/{{inventory_hostname}}-signed.crt" # Can be a full chain of certs
# ssl_key_filepath: "/tmp/certs/{{inventory_hostname}}-key.pem"
# ssl_key_password: <password for key for each host, will be inputting in the form -passin pass:{{ssl_key_password}} >
# regenerate_keystore_and_truststore: true # Set to true to update certs on hosts. If keystores/truststores exist, they won't be updated without this variable.
## Option 2: Custom Keystores and Truststores
## CP-Ansible can move keystores/truststores to their corresponding hosts and configure the components to use them. Set These vars
# ssl_provided_keystore_and_truststore: true
# ssl_keystore_filepath: "/tmp/certs/{{inventory_hostname}}-keystore.jks"
# ssl_keystore_key_password: mystorepassword
# ssl_keystore_store_password: mystorepassword
# ssl_keystore_alias: <alias for host specific certificate, only required if multiple certs in provided keystore>
# ssl_truststore_filepath: "/tmp/certs/truststore.jks"
# ssl_truststore_password: truststorepass
# ssl_truststore_ca_cert_alias: <alias to the ca certificate in the truststore eg. CARoot>
#### Zookeeper TLS Configuration ####
## Zookeeper can also have TLS Encryption and mTLS Authentication
## Zookeeper's TLS settings are inherited from the ssl_enabled settings.
## If you have ssl_enabled, but want zookeeper without TLS, uncomment these lines
# zookeeper_ssl_enabled: false
# zookeeper_ssl_mutual_auth_enabled: false
#### Certificate Regeneration ####
## When using self signed certificates, each playbook run will regenerate the CA, to turn this off, uncomment this line:
# regenerate_ca: false
## By default, the playbook will recreate them keystores and truststores on each run,
## To prevent this, uncomment this line:
# regenerate_keystore_and_truststore: false
#### Monitoring Configuration ####
## Jolokia is disabled by default. When enabled, Jolokia jar gets pulled from the internet and enabled on all the components
## To enable, uncomment this line:
# jolokia_enabled: true
## To copy from control host instead of downloading from remote URL:
# jolokia_url_remote: false
## During setup, the hosts will download the jolokia agent jar from Maven. To update that jar download set this var
# jolokia_jar_url: http://<inteneral-server>/jolokia-jvm-1.6.2-agent.jar
## JMX Exporter is disabled by default. When enabled, JMX Exporter jar will be pulled from the Internet and enabled on the broker and zookeeper *only*.
## To enable, uncomment this line:
# jmxexporter_enabled: true
## To update that jar download set this var
# jmxexporter_jar_url: http://<internal-server>/jmx_prometheus_javaagent-0.12.0.jar
## To copy from control host instead of downloading from remote URL:
# jmxexporter_url_remote: false
#### Custom Yum Repo File (Rhel/Centos) ####
## If you are using your own yum repo server to host the packages, in the case of an air-gapped environment,
## use the below variables to distribute a custom .repo file to the hosts and skip our repo setup.
## Note, your repo server must host all confluent packages
# repository_configuration: custom
# custom_yum_repofile_filepath: /tmp/my-repo.repo
#### Custom Apt Repo File (Ubuntu/Debian) ####
## If you are using your own apt repo server to host the packages, in the case of an air-gapped environment,
## use the below variables to distribute a custom .repo file to the hosts and skip our repo setup.
## Note, your repo server must host all confluent packages
# repository_configuration: custom
# custom_apt_repo_filepath: "/tmp/my-source.list"
#### Confluent Server vs Confluent Kafka ####
## Confluent Server will be installed by default, to install confluent-kafka instead, uncomment the below
# confluent_server_enabled: false
#### Schema Validation ####
## Schema Validation with the kafka configuration is disabled by default. To enable uncomment this line:
## Schema Validation only works with confluent_server_enabled: true
# kafka_broker_schema_validation_enabled: true
#### Fips Security ####
## To enable Fips for added security, uncomment the below line.
## Fips only works with ssl_enabled: true and confluent_server_enabled: true
# fips_enabled: true
#### Configuring Multiple Listeners ####
## CP-Ansible will configure two listeners on the broker: a broker listener for the broker to communicate and an internal for the components and other clients.
## If you only need one listener uncomment this line:
# kafka_broker_configure_multiple_listeners: false
## By default both of these listeners will follow whatever you set for ssl_enabled and sasl_protocol.
## To configure different security settings on the internal and external listeners set the following variables:
# kafka_broker_custom_listeners:
# broker:
# name: BROKER
# port: 9091
# ssl_enabled: false
# ssl_mutual_auth_enabled: false
# sasl_protocol: none
# internal:
# name: INTERNAL
# port: 9092
# ssl_enabled: true
# ssl_mutual_auth_enabled: false
# sasl_protocol: scram
## You can even add additional listeners, make sure name and port are unique
# client_listener:
# name: CLIENT
# port: 9093
# ssl_enabled: true
# ssl_mutual_auth_enabled: true
# sasl_protocol: scram
#### Creating Connectors ####
## To manage the connector configs from Ansible, set the following list of connector objects:
## one per connector, must have `name` and `config` properties
## make sure to provide the numeric values as strings
# kafka_connect_connectors:
# - name: sample-connector
# config:
# connector.class: "org.apache.kafka.connect.tools.VerifiableSinkConnector"
# tasks.max: "1"
# file: "path/to/file.txt"
# topics: "test_topic"
#### Configuring Role Based Access Control ####
## To have CP-Ansible configure Components for RBAC and create necessary role bindings, set these mandatory variables:
## Note: Confluent components will be configured to connect to the "internal" listener automatically
## DO NOT UPDATE the internal listener
## Note: It is recommended to create an additional listener for external clients, but the interbroker listener would also work
## Note: An authentication mode must be selected on all listeners, for example (ssl_enabled=false and ssl_mutual_auth_enabled=false) or sasl_protocol=none is not supported.
# rbac_enabled: true
##
## LDAP Users
## Note: Below users must already exist in your LDAP environment. See kafka_broker vars, for LDAP connection details.
# mds_super_user: <Your mds super user which has the ability to bootstrap RBAC roles and permissions>
# mds_super_user_password: <ldap password>
# kafka_broker_ldap_user: <Your Embedded Rest Proxy username in LDAP>
# kafka_broker_ldap_password: <Your Embedded Rest Proxy user's LDAP password>
# schema_registry_ldap_user: <Your Schema Registry username in LDAP>
# schema_registry_ldap_password <Your schema registry user's LDAP password>
# kafka_connect_ldap_user: <Your Connect username in LDAP>
# kafka_connect_ldap_password: <Your Connect user's password in LDAP>
# ksql_ldap_user: <Your KSQL username in LDAP>
# ksql_ldap_password: <Your KSQL user's password in LDAP>
# kafka_rest_ldap_user: <Your REST Proxy's username in LDAP>
# kafka_rest_ldap_password: <Your REST Proxy's password in LDAP>
# control_center_ldap_user: <Your Control Center username in LDAP>
# control_center_ldap_password: <Your Control Center password in LDAP>
## Below are optional variables
# create_mds_certs: false # To provide your own MDS Certs set this variable and the next two
# token_services_public_pem_file: /path/to/public.pem
# token_services_private_pem_file: /path/to/tokenKeypair.pem
# mds_acls_enabled: false #to turn off mds based acls, they are on by default if rbac is on
# kafka_broker_rest_ssl_enabled: true/false #defaults to whatever ssl_enabled var is set to
## Allow the playbooks to configure additional users as system admins on the platform, set the list below
# rbac_component_additional_system_admins:
# - user1
# - user2
##
#### Configuring Role Based Access Control with a remote MDS ####
## To have CP-Ansible configure Brokers and Components for RBAC with the MDS on a remote Kafka cluster, set these mandatory variables in addition to those listed above:
# rbac_enabled: true
# external_mds_enabled: true
## The URL for the MDS REST API on your Kafka Cluster hosting MDS
# mds_bootstrap_server_urls: http(s)://<mds-broker-hostname>:8090,http(s)://<mds-broker-hostname>:8090
## The URL and Port for the listener on your Kafka Cluster hosting the MDS that you wish to connect to
# mds_broker_bootstrap_servers: <mds-broker-hostname><port>,<mds-broker-hostname><port>
## Configure the security settings to match the same listener as defined in the mds_broker_bootstrap_servers
# mds_broker_listener:
# ssl_enabled: true <set to false if remote MDS does not use TLS>
# ssl_mutual_auth_enabled: true <set to false if remote MDS doe not use MTLS>
# sasl_protocol: none <set protocol for remote MDS, options are: kerberos, sasl_plain, sasl_scram>
##
## By default the Confluent CLI will be installed on each host *when rbac is enabled*, to stop this download set:
# confluent_cli_download_enabled: false
## CLI will be downloaded from Confluent's webservers, to customize the location of the binary set:
# confluent_cli_custom_download_url: <URL to custom webserver hosting for confluent cli>
## Configuring Telemetry
## Set the below required variables
# telemetry_enabled: false
# telemetry_api_key: XXXXXX
# telemetry_api_secret: YYYYYYYYY
## To set custom properties for each service
## Find property options in the Confluent Documentation
# zookeeper_custom_properties:
# initLimit: 6
# syncLimit: 3
# kafka_broker_custom_properties:
# num.io.threads: 15
# schema_registry_custom_properties:
# key: val
# control_center_custom_properties:
# key: val
# kafka_connect_custom_properties:
# key: val
# kafka_rest_custom_properties:
# key: val
# ksql_custom_properties:
# key: val
zookeeper:
## To set variables on all zookeeper hosts, use the vars block here
# vars:
# ## To configure Zookeeper to run as a custom user, uncomment below
# zookeeper_user: custom-user
# zookeeper_group: custom-group
#
# ## To copy files to zookeeper hosts, set this list below
# zookeeper_copy_files:
# - source_path: /path/to/file.txt
# destination_path: /tmp/file.txt
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## By default the first host will get zookeeper id=1, second gets id=2. Set zookeeper_id to customize
# zookeeper_id: 2
## For kerberos sasl protocol, EACH host will need these two variables:
# zookeeper_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/zookeeper-ip-172-31-34-246.us-east-2.compute.internal.keytab>
# zookeeper_kerberos_principal: <The principal configured in kdc server, eg. zookeeper/[email protected]>
ip-172-31-37-15.us-east-2.compute.internal:
# zookeeper_id: 3
ip-172-31-34-231.us-east-2.compute.internal:
# zookeeper_id: 1
kafka_broker:
## To set variables on all kafka_broker hosts, use the vars block here
# vars:
# ## To configure Kafka to run as a custom user, uncomment below
# kafka_broker_user: custom-user
# kafka_broker_group: custom-group
#
# # To update data log location use custom properties:
# # By default the log directory is /var/lib/kafka/data
#
# kafka_broker_custom_properties:
# log.dirs: dir1,dir2
#
# ## To enabled Self Balancing Kafka Brokers, uncomment the below lines
# kafka_broker_custom_properties:
# confluent.balancer.enable: "true"
#
# ## To configure LDAP for RBAC enablement, you will need to provide the appropiate properties to connect to your LDAP server
# ## using the kafka_broker_custom_properties: varible under the vars block. The following is an example:
# ## Note: If connecting to a remote MDS, do not set these parameters. LDAP is handled by the remote MDS.
# kafka_broker_custom_properties:
# ldap.java.naming.factory.initial: com.sun.jndi.ldap.LdapCtxFactory
# ldap.com.sun.jndi.ldap.read.timeout: 3000
# ldap.java.naming.provider.url: ldap://ldap1:389
# ldap.java.naming.security.principal: uid=mds,OU=rbac,DC=example,DC=com
# ldap.java.naming.security.credentials: password
# ldap.java.naming.security.authentication: simple
# ldap.user.search.base: OU=rbac,DC=example,DC=com
# ldap.group.search.base: OU=rbac,DC=example,DC=com
# ldap.user.name.attribute: uid
# ldap.user.memberof.attribute.pattern: CN=(.*),OU=rbac,DC=example,DC=com
# ldap.group.name.attribute: cn
# ldap.group.member.attribute.pattern: CN=(.*),OU=rbac,DC=example,DC=com
# ldap.user.object.class: account
#
# ## To copy files to kafka broker hosts, set this list below
# kafka_broker_copy_files:
# - source_path: /path/to/file.txt
# destination_path: /tmp/file.txt
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## By default the first host will get broker id=1, second gets id=2. Set broker_id to customize
# broker_id: 3
## Properties can also be applied on a host by host basis.
## In the below example we configure a Multi-Region Clusters by setting the following properties on each host:
# kafka_broker_custom_properties:
# broker.rack: us-east-1d
# replica.selector.class: org.apache.kafka.common.replica.RackAwareReplicaSelector
## For kerberos sasl protocol, EACH host will need these two variables:
# kafka_broker_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/ip-172-31-34-246.us-east-2.compute.internal>
# kafka_broker_kerberos_principal: <The principal configured in kdc server, eg. kafka/[email protected]>
ip-172-31-37-15.us-east-2.compute.internal:
# broker_id: 2
# kafka_broker_custom_properties:
# broker.rack: us-east-1a
# replica.selector.class: org.apache.kafka.common.replica.RackAwareReplicaSelector
ip-172-31-34-231.us-east-2.compute.internal:
# broker_id: 1
# kafka_broker_custom_properties:
# broker.rack: us-east-1b
# replica.selector.class: org.apache.kafka.common.replica.RackAwareReplicaSelector
schema_registry:
## To set variables on all schema_registry hosts, use the vars block here
# vars:
# ## To configure Schema Registry to run as a custom user, uncomment below
# schema_registry_user: custom-user
# schema_registry_group: custom-group
#
# ## To copy files to schema_registry hosts, set this list below
# schema_registry_copy_files:
# - source_path: /path/to/file.txt
# destination_path: /tmp/file.txt
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# schema_registry_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/schemaregistry-ip-172-31-34-246.us-east-2.compute.internal
# schema_registry_kerberos_principal: The principal configured in kdc server ex: schemaregistry/[email protected]>
kafka_rest:
## To set variables on all kafka_rest hosts, use the vars block here
# vars:
# ## To configure Rest Proxy to run as a custom user, uncomment below
# kafka_rest_user: custom-user
# kafka_rest_group: custom-group
#
# ## To copy files to kafka_rest hosts, set this list below
# kafka_rest_copy_files:
# - source_path: /path/to/file.txt
# destination_path: /tmp/file.txt
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# kafka_rest_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/restproxy-ip-172-31-34-246.us-east-2.compute.internal
# kafka_rest_kerberos_principal: The principal configured in kdc server ex: restproxy/[email protected]>
ksql:
## To set variables on all ksql hosts, use the vars block here
# vars:
# ## To configure KSQL to run as a custom user, uncomment below
# ksql_user: custom-user
# ksql_group: custom-group
#
# ## To copy files to ksql hosts, set this list below
# ksql_copy_files:
# - source_path: /path/to/file.txt
# destination_path: /tmp/file.txt
hosts:
ip-172-31-37-15.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# ksql_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/ksql-ip-172-31-37-15.us-east-2.compute.internal
# ksql_kerberos_principal: The principal configured in kdc server ex: ksql/[email protected]>
#### To configure multiple ksql clusters, make use of child groups and follow the example below
## Note: There can only be one ksql group, so comment out above section, if configuring multiple ksql clusters
## Decide on a name each ksql cluster (that is not 'ksql') and use that as ansible group name, this is how the cluster will be named in c3
# ksql1:
# vars:
# # The below is a mandatory variable that must be unique for each ksql cluster.
# # The service id should end in an underscore by convention
# ksql_service_id: ksql1_
# hosts:
# ip-172-31-34-15.us-east-2.compute.internal:
# ip-172-31-37-16.us-east-2.compute.internal:
#
# ksql2:
# vars:
# ksql_service_id: ksql2_
# hosts:
# ip-172-31-34-17.us-east-2.compute.internal:
# ip-172-31-37-18.us-east-2.compute.internal:
#
# ksql:
# children:
# ksql1:
# ksql2:
kafka_connect:
## To set variables on all kafka_connect hosts, use the vars block here
# vars:
# ## To configure Connect to run as a custom user, uncomment below
# kafka_connect_user: custom-user
# kafka_connect_group: custom-group
#
# ## To copy files to kafka_connect hosts, set this list below
# kafka_connect_copy_files:
# - source_path: /path/to/file.txt
# destination_path: /tmp/file.txt
#
# ### Connectors and the Confluent Hub
# # Adding Connector Paths.
# # NOTE: This variable is mapped to the `plugin.path` Kafka Connect property.
# kafka_connect_plugins_path:
# - /usr/share/java
# - /my/connectors/dir
#
# ## Installing Connectors From Confluent Hub
# kafka_connect_confluent_hub_plugins:
# - jcustenborder/kafka-connect-spooldir:2.0.43
#
# ## Installing Connectors from Archive files local to Ansible host
# kafka_connect_plugins:
# - local/path/to/connect_archive.zip
#
# ## Installing Connectors from Archive files in remote server (ie Nexus)
# kafka_connect_plugins_remote:
# - http://myhost.com/connect_archive.zip
#
# ### RBAC with Connect Secret Registry
# # By default the secret registry will be enabled when RBAC is on. To customize the key set this var:
# kafka_connect_secret_registry_key: <your own key>
# # To disable the feature:
# kafka_connect_secret_registry_enabled: false
hosts:
ip-172-31-34-246.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# kafka_connect_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/connect-ip-172-31-34-246.us-east-2.compute.internal
# kafka_connect_kerberos_principal: The principal configured in kdc server ex: connect/[email protected]>
#### To configure multiple connect clusters, make use of child groups and follow the example below
## Note: There can only be one kafka_connect group, so comment out above section, if configuring multiple connect clusters
## Decide on a name for each connect cluster (that is not 'kafka_connect') and use that as ansible group name
# syslog:
# vars:
# # Decide on a group id for each connect cluster. This is a mandatory variable, and must be unique for each cluster
# # The group id will be the name of the connect cluster within c3
# kafka_connect_group_id: connect-syslog
# hosts:
# ip-172-31-34-246.us-east-2.compute.internal:
#
# elastic:
# vars:
# kafka_connect_group_id: connect-elastic
# hosts:
# ip-172-31-34-246.us-east-2.compute.internal:
#
# kafka_connect:
# children:
# syslog:
# elastic:
control_center:
## To set variables on all control_center hosts, use the vars block here
# vars:
# ## To configure Control Center to run as a custom user, uncomment below
# control_center_user: custom-user
# control_center_group: custom-group
#
# ## To copy files to control_center hosts, set this list below
# control_center_copy_files:
# - source_path: /path/to/file.txt
# destination_path: /tmp/file.txt
hosts:
ip-172-31-37-15.us-east-2.compute.internal:
## For kerberos sasl protocol, EACH host will need these two variables:
# control_center_kerberos_keytab_path: <The path on ansible host to keytab file, eg. /tmp/keytabs/controlcenter-ip-172-31-37-15.us-east-2.compute.internal
# control_center_kerberos_principal: The principal configured in kdc server ex: controlcenter/[email protected]>
# ## If you are configuring multiple connect or ksql clusters, the below variables are mandatory.
# # The group names must match the group names as they are in your inventory
# vars:
# ksql_cluster_ansible_group_names:
# - ksql1
# - ksql2
# kafka_connect_cluster_ansible_group_names:
# - syslog
# - elastic