diff --git a/integration-tests/docker/Dockerfile b/integration-tests/docker/Dockerfile
index 29f4ef9b8ef9..02cedcc461ac 100644
--- a/integration-tests/docker/Dockerfile
+++ b/integration-tests/docker/Dockerfile
@@ -22,6 +22,13 @@ RUN wget -q -O - http://www.us.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeep
RUN cp /usr/local/zookeeper-3.4.6/conf/zoo_sample.cfg /usr/local/zookeeper-3.4.6/conf/zoo.cfg
RUN ln -s /usr/local/zookeeper-3.4.6 /usr/local/zookeeper
+# Kafka
+RUN wget -q -O - http://www.us.apache.org/dist/kafka/0.8.2.0/kafka_2.10-0.8.2.0.tgz | tar -xzf - -C /usr/local
+RUN ln -s /usr/local/kafka_2.10-0.8.2.0 /usr/local/kafka
+# unless advertised.host.name is set to docker ip, publishing data fails
+ADD docker_ip docker_ip
+RUN perl -pi -e "s/#advertised.port=.*/advertised.port=9092/; s/#advertised.host.*/advertised.host.name=$(cat docker_ip)/" /usr/local/kafka/config/server.properties
+
# git
RUN apt-get install -y git
diff --git a/integration-tests/docker/broker.conf b/integration-tests/docker/broker.conf
index 63b6af12e3c7..332796a66e34 100644
--- a/integration-tests/docker/broker.conf
+++ b/integration-tests/docker/broker.conf
@@ -11,7 +11,7 @@ command=java
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Ddruid.host=%(ENV_HOST_IP)s
- -Ddruid.zk.service.host=druid-zookeeper
+ -Ddruid.zk.service.host=druid-zookeeper-kafka
-Ddruid.processing.buffer.sizeBytes=75000000
-Ddruid.server.http.numThreads=100
-Ddruid.processing.numThreads=1
diff --git a/integration-tests/docker/coordinator.conf b/integration-tests/docker/coordinator.conf
index 60cf6b97362d..c8fa1c8f770d 100644
--- a/integration-tests/docker/coordinator.conf
+++ b/integration-tests/docker/coordinator.conf
@@ -13,7 +13,7 @@ command=java
-Ddruid.metadata.storage.connector.connectURI=jdbc:mysql://druid-metadata-storage/druid
-Ddruid.metadata.storage.connector.user=druid
-Ddruid.metadata.storage.connector.password=diurd
- -Ddruid.zk.service.host=druid-zookeeper
+ -Ddruid.zk.service.host=druid-zookeeper-kafka
-Ddruid.coordinator.startDelay=PT5S
-cp /usr/local/druid/lib/*
io.druid.cli.Main server coordinator
diff --git a/integration-tests/docker/historical.conf b/integration-tests/docker/historical.conf
index ab02bbc664a8..85e32e725001 100644
--- a/integration-tests/docker/historical.conf
+++ b/integration-tests/docker/historical.conf
@@ -11,7 +11,7 @@ command=java
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Ddruid.host=%(ENV_HOST_IP)s
- -Ddruid.zk.service.host=druid-zookeeper
+ -Ddruid.zk.service.host=druid-zookeeper-kafka
-Ddruid.s3.accessKey=AKIAIMKECRUYKDQGR6YQ
-Ddruid.s3.secretKey=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
-Ddruid.processing.buffer.sizeBytes=75000000
diff --git a/integration-tests/docker/kafka.conf b/integration-tests/docker/kafka.conf
new file mode 100644
index 000000000000..861aa36b32cf
--- /dev/null
+++ b/integration-tests/docker/kafka.conf
@@ -0,0 +1,5 @@
+[program:kafka]
+command=/usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties
+user=daemon
+priority=0
+stdout_logfile=/shared/logs/kafka.log
diff --git a/integration-tests/docker/middlemanager.conf b/integration-tests/docker/middlemanager.conf
index cf1436182e76..c32e8a580b36 100644
--- a/integration-tests/docker/middlemanager.conf
+++ b/integration-tests/docker/middlemanager.conf
@@ -9,7 +9,7 @@ command=java
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Ddruid.host=%(ENV_HOST_IP)s
- -Ddruid.zk.service.host=druid-zookeeper
+ -Ddruid.zk.service.host=druid-zookeeper-kafka
-Ddruid.indexer.logs.directory=/shared/tasklogs
-Ddruid.storage.storageDirectory=/shared/storage
-Ddruid.indexer.runner.javaOpts=-server -Xmx256m -Xms256m -XX:NewSize=128m -XX:MaxNewSize=128m -XX:+UseConcMarkSweepGC -XX:+PrintGCDetails -XX:+PrintGCTimeStamps
diff --git a/integration-tests/docker/overlord.conf b/integration-tests/docker/overlord.conf
index a0d436c5a025..1c646b82fbf7 100644
--- a/integration-tests/docker/overlord.conf
+++ b/integration-tests/docker/overlord.conf
@@ -13,7 +13,7 @@ command=java
-Ddruid.metadata.storage.connector.connectURI=jdbc:mysql://druid-metadata-storage/druid
-Ddruid.metadata.storage.connector.user=druid
-Ddruid.metadata.storage.connector.password=diurd
- -Ddruid.zk.service.host=druid-zookeeper
+ -Ddruid.zk.service.host=druid-zookeeper-kafka
-Ddruid.indexer.storage.type=metadata
-Ddruid.indexer.logs.directory=/shared/tasklogs
-Ddruid.indexer.runner.type=remote
diff --git a/integration-tests/docker/router.conf b/integration-tests/docker/router.conf
index ddd8121f6c9d..17d5146bdf7b 100644
--- a/integration-tests/docker/router.conf
+++ b/integration-tests/docker/router.conf
@@ -8,7 +8,7 @@ command=java
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Ddruid.host=%(ENV_HOST_IP)s
- -Ddruid.zk.service.host=druid-zookeeper
+ -Ddruid.zk.service.host=druid-zookeeper-kafka
-Ddruid.computation.buffer.size=75000000
-Ddruid.server.http.numThreads=100
-Ddruid.processing.numThreads=1
diff --git a/integration-tests/pom.xml b/integration-tests/pom.xml
index 3bbbf9f199f1..6fe8d368b73a 100644
--- a/integration-tests/pom.xml
+++ b/integration-tests/pom.xml
@@ -29,6 +29,11 @@
0.9.0-SNAPSHOT
+
+ 0.8.2.1
+ 0.4
+
+
io.druid
@@ -40,6 +45,11 @@
druid-s3-extensions
${project.parent.version}
+
+ io.druid.extensions
+ druid-kafka-eight
+ ${project.parent.version}
+
io.druid.extensions
druid-histogram
@@ -71,6 +81,29 @@
easymock
test
+
+ com.101tec
+ zkclient
+ ${zkclient.version}
+
+
+ org.apache.zookeeper
+ zookeeper
+
+
+
+
+ org.apache.kafka
+ kafka_2.10
+ ${apache.kafka.version}
+
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+
diff --git a/integration-tests/run_cluster.sh b/integration-tests/run_cluster.sh
index ea543915e8d8..1cccc9228521 100755
--- a/integration-tests/run_cluster.sh
+++ b/integration-tests/run_cluster.sh
@@ -1,5 +1,5 @@
# cleanup
-for node in druid-historical druid-coordinator druid-overlord druid-router druid-broker druid-middlemanager druid-zookeeper druid-metadata-storage;
+for node in druid-historical druid-coordinator druid-overlord druid-router druid-broker druid-middlemanager druid-zookeeper-kafka druid-metadata-storage;
do
docker stop $node
docker rm $node
@@ -12,6 +12,9 @@ SHARED_DIR=${HOME}/shared
SUPERVISORDIR=/usr/lib/druid/conf
RESOURCEDIR=$DIR/src/test/resources
+# so docker IP addr will be known during docker build
+echo $DOCKER_IP > $DOCKERDIR/docker_ip
+
# Make directories if they dont exist
mkdir -p $SHARED_DIR/logs
mkdir -p $SHARED_DIR/tasklogs
@@ -24,26 +27,26 @@ mvn dependency:copy-dependencies -DoutputDirectory=$SHARED_DIR/docker/lib
# Build Druid Cluster Image
docker build -t druid/cluster $SHARED_DIR/docker
-# Start zookeeper
-docker run -d --name druid-zookeeper -p 2181:2181 -v $SHARED_DIR:/shared -v $DOCKERDIR/zookeeper.conf:$SUPERVISORDIR/zookeeper.conf druid/cluster
+# Start zookeeper and kafka
+docker run -d --name druid-zookeeper-kafka -p 2181:2181 -p 9092:9092 -v $SHARED_DIR:/shared -v $DOCKERDIR/zookeeper.conf:$SUPERVISORDIR/zookeeper.conf -v $DOCKERDIR/kafka.conf:$SUPERVISORDIR/kafka.conf druid/cluster
# Start MYSQL
docker run -d --name druid-metadata-storage -v $SHARED_DIR:/shared -v $DOCKERDIR/metadata-storage.conf:$SUPERVISORDIR/metadata-storage.conf druid/cluster
# Start Overlord
-docker run -d --name druid-overlord -p 8090:8090 -v $SHARED_DIR:/shared -v $DOCKERDIR/overlord.conf:$SUPERVISORDIR/overlord.conf --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper:druid-zookeeper druid/cluster
+docker run -d --name druid-overlord -p 8090:8090 -v $SHARED_DIR:/shared -v $DOCKERDIR/overlord.conf:$SUPERVISORDIR/overlord.conf --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
# Start Coordinator
-docker run -d --name druid-coordinator -p 8081:8081 -v $SHARED_DIR:/shared -v $DOCKERDIR/coordinator.conf:$SUPERVISORDIR/coordinator.conf --link druid-overlord:druid-overlord --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper:druid-zookeeper druid/cluster
+docker run -d --name druid-coordinator -p 8081:8081 -v $SHARED_DIR:/shared -v $DOCKERDIR/coordinator.conf:$SUPERVISORDIR/coordinator.conf --link druid-overlord:druid-overlord --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
# Start Historical
-docker run -d --name druid-historical -v $SHARED_DIR:/shared -v $DOCKERDIR/historical.conf:$SUPERVISORDIR/historical.conf --link druid-zookeeper:druid-zookeeper druid/cluster
+docker run -d --name druid-historical -v $SHARED_DIR:/shared -v $DOCKERDIR/historical.conf:$SUPERVISORDIR/historical.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
# Start Middlemanger
-docker run -d --name druid-middlemanager -p 8100:8100 -p 8101:8101 -p 8102:8102 -p 8103:8103 -p 8104:8104 -p 8105:8105 -v $RESOURCEDIR:/resources -v $SHARED_DIR:/shared -v $DOCKERDIR/middlemanager.conf:$SUPERVISORDIR/middlemanager.conf --link druid-zookeeper:druid-zookeeper --link druid-overlord:druid-overlord druid/cluster
+docker run -d --name druid-middlemanager -p 8100:8100 -p 8101:8101 -p 8102:8102 -p 8103:8103 -p 8104:8104 -p 8105:8105 -v $RESOURCEDIR:/resources -v $SHARED_DIR:/shared -v $DOCKERDIR/middlemanager.conf:$SUPERVISORDIR/middlemanager.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-overlord:druid-overlord druid/cluster
# Start Broker
-docker run -d --name druid-broker -p 8082:8082 -v $SHARED_DIR:/shared -v $DOCKERDIR/broker.conf:$SUPERVISORDIR/broker.conf --link druid-zookeeper:druid-zookeeper --link druid-middlemanager:druid-middlemanager --link druid-historical:druid-historical druid/cluster
+docker run -d --name druid-broker -p 8082:8082 -v $SHARED_DIR:/shared -v $DOCKERDIR/broker.conf:$SUPERVISORDIR/broker.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-middlemanager:druid-middlemanager --link druid-historical:druid-historical druid/cluster
# Start Router
-docker run -d --name druid-router -p 8888:8888 -v $SHARED_DIR:/shared -v $DOCKERDIR/router.conf:$SUPERVISORDIR/router.conf --link druid-zookeeper:druid-zookeeper --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
+docker run -d --name druid-router -p 8888:8888 -v $SHARED_DIR:/shared -v $DOCKERDIR/router.conf:$SUPERVISORDIR/router.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
diff --git a/integration-tests/src/main/java/io/druid/testing/ConfigFileConfigProvider.java b/integration-tests/src/main/java/io/druid/testing/ConfigFileConfigProvider.java
index 6975952ee3b3..b181f855ece0 100644
--- a/integration-tests/src/main/java/io/druid/testing/ConfigFileConfigProvider.java
+++ b/integration-tests/src/main/java/io/druid/testing/ConfigFileConfigProvider.java
@@ -42,6 +42,7 @@ public class ConfigFileConfigProvider implements IntegrationTestingConfigProvide
private String indexerHost = "";
private String middleManagerHost = "";
private String zookeeperHosts = ""; // comma-separated list of host:port
+ private String kafkaHost = "";
private Map props = null;
@JsonCreator
@@ -62,20 +63,26 @@ private void loadProperties(String configFile)
catch (IOException ex) {
throw new RuntimeException(ex);
}
- routerHost = props.get("router_host") + ":" + props.get("router_port");
+ // there might not be a router; we want routerHost to be null in that case
+ routerHost = props.get("router_host");
+ if (null != routerHost) {
+ routerHost += ":" + props.get("router_port");
+ }
brokerHost = props.get("broker_host") + ":" + props.get("broker_port");
historicalHost = props.get("historical_host") + ":" + props.get("historical_port");
coordinatorHost = props.get("coordinator_host") + ":" + props.get("coordinator_port");
indexerHost = props.get("indexer_host") + ":" + props.get("indexer_port");
middleManagerHost = props.get("middlemanager_host");
zookeeperHosts = props.get("zookeeper_hosts");
+ kafkaHost = props.get("kafka_host") + ":" + props.get ("kafka_port");
LOG.info ("router: [%s]", routerHost);
- LOG.info ("broker [%s]: ", brokerHost);
+ LOG.info ("broker: [%s]", brokerHost);
LOG.info ("coordinator: [%s]", coordinatorHost);
LOG.info ("overlord: [%s]", indexerHost);
LOG.info ("middle manager: [%s]", middleManagerHost);
LOG.info ("zookeepers: [%s]", zookeeperHosts);
+ LOG.info ("kafka: [%s]", kafkaHost);
}
@Override
@@ -125,6 +132,12 @@ public String getZookeeperHosts()
return zookeeperHosts;
}
+ @Override
+ public String getKafkaHost()
+ {
+ return kafkaHost;
+ }
+
@Override
public String getProperty(String keyword)
{
diff --git a/integration-tests/src/main/java/io/druid/testing/DockerConfigProvider.java b/integration-tests/src/main/java/io/druid/testing/DockerConfigProvider.java
index 1068c0a8f60c..eea79aa1715a 100644
--- a/integration-tests/src/main/java/io/druid/testing/DockerConfigProvider.java
+++ b/integration-tests/src/main/java/io/druid/testing/DockerConfigProvider.java
@@ -76,6 +76,12 @@ public String getZookeeperHosts()
return dockerIp + ":2181";
}
+ @Override
+ public String getKafkaHost()
+ {
+ return dockerIp + ":9092";
+ }
+
@Override
public String getProperty(String prop)
{
diff --git a/integration-tests/src/main/java/io/druid/testing/IntegrationTestingConfig.java b/integration-tests/src/main/java/io/druid/testing/IntegrationTestingConfig.java
index 162ca9736e02..ace8d8fb866a 100644
--- a/integration-tests/src/main/java/io/druid/testing/IntegrationTestingConfig.java
+++ b/integration-tests/src/main/java/io/druid/testing/IntegrationTestingConfig.java
@@ -35,5 +35,7 @@ public interface IntegrationTestingConfig
public String getZookeeperHosts();
+ public String getKafkaHost();
+
public String getProperty(String prop);
}
diff --git a/integration-tests/src/main/java/io/druid/testing/IntegrationTestingCuratorConfig.java b/integration-tests/src/main/java/io/druid/testing/IntegrationTestingCuratorConfig.java
new file mode 100644
index 000000000000..752e8a332a58
--- /dev/null
+++ b/integration-tests/src/main/java/io/druid/testing/IntegrationTestingCuratorConfig.java
@@ -0,0 +1,45 @@
+/*
+* Licensed to Metamarkets Group Inc. (Metamarkets) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. Metamarkets licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied. See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*/
+
+package io.druid.testing;
+
+import com.google.inject.Inject;
+import io.druid.curator.CuratorConfig;
+
+/**
+ * We will use this instead of druid server's CuratorConfig, because CuratorConfig in
+ * a test cluster environment sees zookeeper at localhost even if zookeeper is elsewhere.
+ * We'll take the zookeeper host from the configuration file instead.
+ */
+public class IntegrationTestingCuratorConfig extends CuratorConfig
+{
+ private IntegrationTestingConfig config;
+
+ @Inject
+ public IntegrationTestingCuratorConfig (IntegrationTestingConfig config)
+ {
+ this.config = config;
+ }
+
+ @Override
+ public String getZkHosts()
+ {
+ return config.getZookeeperHosts();
+ }
+}
diff --git a/integration-tests/src/main/java/io/druid/testing/clients/OverlordResourceTestClient.java b/integration-tests/src/main/java/io/druid/testing/clients/OverlordResourceTestClient.java
index 69c15c095a5f..a4880c4b5743 100644
--- a/integration-tests/src/main/java/io/druid/testing/clients/OverlordResourceTestClient.java
+++ b/integration-tests/src/main/java/io/druid/testing/clients/OverlordResourceTestClient.java
@@ -115,7 +115,7 @@ public String submitTask(String task)
public TaskStatus.Status getTaskStatus(String taskID)
{
try {
- StatusResponseHolder response = makeRequest(
+ StatusResponseHolder response = makeRequest( HttpMethod.GET,
String.format(
"%stask/%s/status",
getIndexerURL(),
@@ -156,7 +156,7 @@ public List getPendingTasks()
private List getTasks(String identifier)
{
try {
- StatusResponseHolder response = makeRequest(
+ StatusResponseHolder response = makeRequest( HttpMethod.GET,
String.format("%s%s", getIndexerURL(), identifier)
);
LOG.info("Tasks %s response %s", identifier, response.getContent());
@@ -171,6 +171,26 @@ private List getTasks(String identifier)
}
}
+ public Map shutDownTask(String taskID)
+ {
+ try {
+ StatusResponseHolder response = makeRequest( HttpMethod.POST,
+ String.format("%stask/%s/shutdown", getIndexerURL(),
+ URLEncoder.encode(taskID, "UTF-8")
+ )
+ );
+ LOG.info("Shutdown Task %s response %s", taskID, response.getContent());
+ return jsonMapper.readValue(
+ response.getContent(), new TypeReference