@@ -33,6 +33,7 @@ import org.apache.kafka.streams.KafkaStreams
33
33
import org.apache.kafka.streams.StreamsConfig
34
34
import org.apache.kafka.streams.Topology
35
35
import org.apache.kafka.streams.processor.WallclockTimestampExtractor
36
+ import java.io.InputStream
36
37
import java.lang.StringBuilder
37
38
import java.net.URL
38
39
import java.util.Properties
@@ -140,16 +141,24 @@ class Stream(override val topologyContext: TopologyContext) : FloodplainSourceCo
140
141
)
141
142
}
142
143
144
+ fun renderAndSchedule (connectorURL : URL ? , settings : InputStream , force : Boolean = false): KafkaStreams {
145
+ val prop = Properties ()
146
+ prop.load(settings)
147
+ val propMap = mutableMapOf<String ,Any >()
148
+ prop.forEach { (k,v)-> propMap.put(k as String ,v) }
149
+ return renderAndSchedule(connectorURL,prop[StreamsConfig .BOOTSTRAP_SERVERS_CONFIG ] as String ,force,propMap)
150
+ }
151
+
143
152
/* *
144
153
* Will create an executable definition of the str
145
154
* eam (@see render), then will start the topology by starting a streams
146
155
* instance pointing at the kafka cluster at kafkaHosts, using the supplied clientId.
147
156
* Finally, it will POST the supplied
148
157
*/
149
- fun renderAndSchedule (connectorURL : URL ? , kafkaHosts : String , force : Boolean = false): KafkaStreams {
158
+ fun renderAndSchedule (connectorURL : URL ? , kafkaHosts : String , force : Boolean = false, settings : Map < String , Any > ? = null ): KafkaStreams {
150
159
val topologyConstructor = TopologyConstructor ()
151
160
val (topology, sources, sinks) = render(topologyConstructor)
152
- topologyConstructor.createTopicsAsNeeded(topologyContext, kafkaHosts)
161
+ topologyConstructor.createTopicsAsNeeded(settings ? : mapOf ( StreamsConfig . BOOTSTRAP_SERVERS_CONFIG to kafkaHosts) )
153
162
sources.forEach { (name, json) ->
154
163
connectorURL?.let {
155
164
startConstructor(name, topologyContext, it, json, force)
@@ -161,7 +170,11 @@ class Stream(override val topologyContext: TopologyContext) : FloodplainSourceCo
161
170
}
162
171
}
163
172
val appId = topologyContext.topicName(" @applicationId" )
164
- val streams = runTopology(topology, appId, kafkaHosts, " storagePath" )
173
+ val extra: MutableMap <String ,Any > = mutableMapOf ()
174
+ if (settings!= null ) {
175
+ extra.putAll(settings)
176
+ }
177
+ val streams = runTopology(topology, appId, kafkaHosts, " storagePath" , extra)
165
178
logger.info { " Topology running!" }
166
179
return streams
167
180
}
@@ -187,8 +200,12 @@ class Stream(override val topologyContext: TopologyContext) : FloodplainSourceCo
187
200
return Triple (topology, sources, sinks)
188
201
}
189
202
190
- private fun runTopology (topology : Topology , applicationId : String , kafkaHosts : String , storagePath : String ): KafkaStreams {
191
- val props = createProperties(applicationId, kafkaHosts, storagePath)
203
+ private fun runTopology (topology : Topology , applicationId : String , kafkaHosts : String , storagePath : String , extra : MutableMap <String ,Any >): KafkaStreams {
204
+ extra[StreamsConfig .BOOTSTRAP_SERVERS_CONFIG ] = kafkaHosts
205
+ extra[StreamsConfig .APPLICATION_ID_CONFIG ] = applicationId
206
+ extra[StreamsConfig .STATE_DIR_CONFIG ] = storagePath
207
+
208
+ val props = createProperties(extra)
192
209
val stream = KafkaStreams (topology, props)
193
210
logger.info(" CurrentTopology:\n ${topology.describe()} " )
194
211
stream.setUncaughtExceptionHandler { thread: Thread , exception: Throwable ? ->
@@ -202,15 +219,12 @@ class Stream(override val topologyContext: TopologyContext) : FloodplainSourceCo
202
219
return stream
203
220
}
204
221
205
- private fun createProperties (applicationId : String , brokers : String , storagePath : String ): Properties {
222
+ private fun createProperties (extra : Map < String ,Any > ): Properties {
206
223
val streamsConfiguration = Properties ()
207
224
// Give the Streams application a unique name. The name must be unique in the Kafka cluster
208
225
// against which the application is run.
209
- logger.info(" Creating application with name: {}" , applicationId)
210
- logger.info(" Creating application id: {}" , applicationId)
211
- logger.info(" Starting instance in storagePath: {}" , storagePath)
212
- streamsConfiguration[StreamsConfig .APPLICATION_ID_CONFIG ] = applicationId
213
- streamsConfiguration[StreamsConfig .BOOTSTRAP_SERVERS_CONFIG ] = brokers
226
+ streamsConfiguration.putAll(extra)
227
+ // logger.info("Starting instance in storagePath: {}", storagePath)
214
228
streamsConfiguration[StreamsConfig .DEFAULT_KEY_SERDE_CLASS_CONFIG ] = Serdes .String ().javaClass
215
229
streamsConfiguration[StreamsConfig .DEFAULT_VALUE_SERDE_CLASS_CONFIG ] = StreamOperators .replicationSerde.javaClass
216
230
streamsConfiguration[ConsumerConfig .AUTO_OFFSET_RESET_CONFIG ] = " earliest"
@@ -220,7 +234,7 @@ class Stream(override val topologyContext: TopologyContext) : FloodplainSourceCo
220
234
streamsConfiguration[ConsumerConfig .MAX_POLL_INTERVAL_MS_CONFIG ] = 7200000
221
235
streamsConfiguration[ConsumerConfig .MAX_POLL_RECORDS_CONFIG ] = 100
222
236
streamsConfiguration[ProducerConfig .COMPRESSION_TYPE_CONFIG ] = " lz4"
223
- streamsConfiguration[StreamsConfig .STATE_DIR_CONFIG ] = storagePath
237
+ // streamsConfiguration[StreamsConfig.STATE_DIR_CONFIG] = storagePath
224
238
streamsConfiguration[StreamsConfig .NUM_STREAM_THREADS_CONFIG ] = 1
225
239
streamsConfiguration[StreamsConfig .NUM_STANDBY_REPLICAS_CONFIG ] = 0
226
240
streamsConfiguration[StreamsConfig .RETRIES_CONFIG ] = 50
0 commit comments