diff --git a/.travis.yml b/.travis.yml
index 4f53b9abd..4450f76e9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,13 +4,13 @@ jdk: oraclejdk8
install: true
script: travis_wait 30 ./sbt clean coverage assembly
scala:
- - 2.11.7
+ - 2.12.8
#after_success:
# - sbt coverageReport coveralls
cache:
directories:
- - $HOME/.sbt/0.13/dependency
+ - $HOME/.sbt/1.0/dependency
- $HOME/.sbt/boot/scala*
- $HOME/.sbt/launchers
- $HOME/.ivy2/cache
diff --git a/README.md b/README.md
index 4471bbb74..b0381346a 100644
--- a/README.md
+++ b/README.md
@@ -117,6 +117,46 @@ You should increase the above for large # of consumers with consumer polling ena
Kafka managed consumer offset is now consumed by KafkaManagedOffsetCache from the "__consumer_offsets" topic. Note, this has not been tested with large number of offsets being tracked. There is a single thread per cluster consuming this topic so it may not be able to keep up on large # of offsets being pushed to the topic.
+### Authenticating a User with LDAP
+Warning, you need to have SSL configured with Kafka Manager to ensure your credentials aren't passed unencrypted.
+Authenticating a User with LDAP is possible by passing the user credentials with the Authorization header.
+LDAP authentication is done on first visit, if successful, a cookie is set.
+On next request, the cookie value is compared with credentials from Authorization header.
+LDAP support is through the basic authentication filter.
+
+1. Configure basic authentication
+- basicAuthentication.enabled=true
+- basicAuthentication.realm=< basic authentication realm>
+
+2. Encryption parameters (optional, otherwise randomly generated on startup) :
+- basicAuthentication.salt="some-hex-string-representing-byte-array"
+- basicAuthentication.iv="some-hex-string-representing-byte-array"
+- basicAuthentication.secret="my-secret-string"
+
+3. Configure LDAP/LDAPS authentication
+- basicAuthentication.ldap.enabled=< Boolean flag to enable/disable ldap authentication >
+- basicAuthentication.ldap.server=< fqdn of LDAP server>
+- basicAuthentication.ldap.port=< port of LDAP server>
+- basicAuthentication.ldap.username=< LDAP search username>
+- basicAuthentication.ldap.password=< LDAP search password>
+- basicAuthentication.ldap.search-base-dn=< LDAP search base>
+- basicAuthentication.ldap.search-filter=< LDAP search filter>
+- basicAuthentication.ldap.connection-pool-size=< number of connection to LDAP server>
+- basicAuthentication.ldap.ssl=< Boolean flag to enable/disable LDAPS>
+
+#### Example (Online LDAP Test Server):
+
+- basicAuthentication.ldap.enabled=true
+- basicAuthentication.ldap.server="ldap.forumsys.com"
+- basicAuthentication.ldap.port=389
+- basicAuthentication.ldap.username="cn=read-only-admin,dc=example,dc=com"
+- basicAuthentication.ldap.password="password"
+- basicAuthentication.ldap.search-base-dn="dc=example,dc=com"
+- basicAuthentication.ldap.search-filter="(uid=$capturedLogin$)"
+- basicAuthentication.ldap.connection-pool-size=10
+- basicAuthentication.ldap.ssl=false
+
+
Deployment
----------
diff --git a/app/controllers/ApiHealth.scala b/app/controllers/ApiHealth.scala
index 40b2c82cb..d3c0354f6 100644
--- a/app/controllers/ApiHealth.scala
+++ b/app/controllers/ApiHealth.scala
@@ -1,12 +1,13 @@
package controllers
-import play.api.i18n.{I18nSupport, MessagesApi}
+import play.api.i18n.I18nSupport
import play.api.mvc._
-class ApiHealth(val messagesApi: MessagesApi) extends Controller with I18nSupport {
+import scala.concurrent.ExecutionContext
- def ping = Action {
+class ApiHealth(val cc: ControllerComponents)(implicit ec:ExecutionContext) extends AbstractController(cc) with I18nSupport {
+
+ def ping = Action { implicit request:RequestHeader =>
Ok("healthy").withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
-
}
diff --git a/app/controllers/Application.scala b/app/controllers/Application.scala
index 2f3d426df..a30e287a8 100644
--- a/app/controllers/Application.scala
+++ b/app/controllers/Application.scala
@@ -7,20 +7,20 @@ package controllers
import features.ApplicationFeatures
import models.navigation.Menus
-import play.api.i18n.{MessagesApi, I18nSupport}
+import play.api.i18n.I18nSupport
import play.api.mvc._
+import scala.concurrent.ExecutionContext
+
/**
* @author hiral
*/
-class Application (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManagerContext)
- (implicit af: ApplicationFeatures, menus: Menus) extends Controller with I18nSupport {
-
- import play.api.libs.concurrent.Execution.Implicits.defaultContext
+class Application(val cc: ControllerComponents, kafkaManagerContext: KafkaManagerContext)
+ (implicit af: ApplicationFeatures, menus: Menus, ec:ExecutionContext) extends AbstractController(cc) with I18nSupport {
private[this] val kafkaManager = kafkaManagerContext.getKafkaManager
- def index = Action.async {
+ def index = Action.async { implicit request: RequestHeader =>
kafkaManager.getClusterList.map { errorOrClusterList =>
Ok(views.html.index(errorOrClusterList)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
diff --git a/app/controllers/BasicAuthenticationFilter.scala b/app/controllers/BasicAuthenticationFilter.scala
index ce08446fa..84915de89 100644
--- a/app/controllers/BasicAuthenticationFilter.scala
+++ b/app/controllers/BasicAuthenticationFilter.scala
@@ -1,107 +1,306 @@
package controllers
-import com.typesafe.config.ConfigValueType
+import java.nio.charset.StandardCharsets
+import java.security.SecureRandom
+import com.typesafe.config.ConfigValueType
import java.util.UUID
+import com.unboundid.ldap.sdk._
+import javax.net.ssl.SSLSocketFactory
+import akka.stream.Materializer
import org.apache.commons.codec.binary.Base64
-
import play.api.Configuration
-import play.api.http.HeaderNames.AUTHORIZATION
-import play.api.http.HeaderNames.WWW_AUTHENTICATE
-import play.api.libs.Crypto
-import play.api.libs.concurrent.Execution.Implicits.defaultContext
-import play.api.mvc.Cookie
-import play.api.mvc.Filter
-import play.api.mvc.RequestHeader
-import play.api.mvc.Result
+import play.api.http.HeaderNames.{AUTHORIZATION, WWW_AUTHENTICATE}
import play.api.mvc.Results.Unauthorized
+import play.api.mvc.{Cookie, Filter, RequestHeader, Result}
import scala.collection.JavaConverters._
-import scala.concurrent.Future
+import scala.util.{Success, Try}
+import grizzled.slf4j.Logging
+import javax.crypto.Mac
+import play.api.libs.Codecs
+
+import scala.concurrent.{ExecutionContext, Future}
-class BasicAuthenticationFilter(configurationFactory: => BasicAuthenticationFilterConfiguration) extends Filter {
+class BasicAuthenticationFilter(configuration: BasicAuthenticationFilterConfiguration, authenticator: Authenticator)(implicit val mat: Materializer, ec: ExecutionContext) extends Filter {
def apply(next: RequestHeader => Future[Result])(requestHeader: RequestHeader): Future[Result] =
- if (configuration.enabled && isNotExcluded(requestHeader))
- checkAuthentication(requestHeader, next)
+ if (configuration.enabled && isNotExcluded(requestHeader)) {
+ authenticator.checkAuthentication(requestHeader, next)
+ }
else next(requestHeader)
private def isNotExcluded(requestHeader: RequestHeader): Boolean =
- !configuration.excluded.exists( requestHeader.path matches _ )
+ !configuration.excluded.exists(requestHeader.path matches _)
- private def checkAuthentication(requestHeader: RequestHeader, next: RequestHeader => Future[Result]): Future[Result] =
- if (isAuthorized(requestHeader)) addCookie(next(requestHeader))
- else unauthorizedResult
+}
- private def isAuthorized(requestHeader: RequestHeader) = {
- lazy val authorizedByHeader =
- requestHeader.headers.get(AUTHORIZATION).exists(expectedHeaderValues)
+trait Authenticator {
+
+ import javax.crypto.Cipher
+ import javax.crypto.SecretKeyFactory
+ import javax.crypto.spec.PBEKeySpec
+ import javax.crypto.spec.SecretKeySpec
+ import javax.crypto.spec.IvParameterSpec
+
+ private lazy val factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA256")
+ private lazy val spec = new PBEKeySpec(secret, salt, 65536, 256)
+ private lazy val secretKey = new SecretKeySpec(factory.generateSecret(spec).getEncoded, "AES")
+ private lazy val cipher: Cipher = {
+ val c = Cipher.getInstance("AES/CBC/PKCS5Padding")
+ c.init(Cipher.ENCRYPT_MODE, secretKey, new IvParameterSpec(iv))
+ c
+ }
- lazy val authorizedByCookie =
- requestHeader.cookies.get(COOKIE_NAME).exists(_.value == cookieValue)
+ private lazy val mac: Mac = {
+ val m = Mac.getInstance("HmacSHA256")
+ m.init(new SecretKeySpec(factory.generateSecret(spec).getEncoded, "HmacSHA256"))
+ m
+ }
- authorizedByHeader || authorizedByCookie
+ def salt: Array[Byte]
+
+ def iv: Array[Byte]
+
+ def secret: Array[Char]
+
+ def encrypt(content: Array[Byte]): Array[Byte] = {
+ cipher.doFinal(content)
+ }
+
+ def decrypt(content: Array[Byte], iv: Array[Byte]): Array[Byte] = {
+ cipher.init(Cipher.DECRYPT_MODE, secretKey, new IvParameterSpec(iv))
+ cipher.doFinal(content)
+ }
+
+ def sign(content: String) : String = {
+ Codecs.toHexString(mac.doFinal(content.getBytes(StandardCharsets.UTF_8)))
+ }
+
+ def checkAuthentication(requestHeader: RequestHeader, next: RequestHeader => Future[Result]): Future[Result]
+}
+
+object BasicAuthenticator {
+ private lazy val COOKIE_NAME = "play-basic-authentication"
+}
+
+case class BasicAuthenticator(config: BasicAuthenticationConfig)(implicit val mat: Materializer, ec: ExecutionContext) extends Authenticator {
+
+ import BasicAuthenticator._
+
+ private lazy val realm = basic(s"""realm="${config.realm}"""")
+ private lazy val unauthorizedResult = Future successful Unauthorized.withHeaders(WWW_AUTHENTICATE -> realm)
+
+ def salt: Array[Byte] = config.salt
+
+ def iv: Array[Byte] = config.iv
+
+ def secret: Array[Char] = config.secret
+
+ def checkAuthentication(requestHeader: RequestHeader, next: RequestHeader => Future[Result]): Future[Result] = {
+ if (isAuthorized(requestHeader)) addCookie(next(requestHeader))
+ else unauthorizedResult
}
private def addCookie(result: Future[Result]) =
result.map(_.withCookies(cookie))
- private lazy val configuration = configurationFactory
+ private def isAuthorized(requestHeader: RequestHeader) = {
+ val expectedHeader = expectedHeaderValues(config)
+ val authorizedByHeader = requestHeader.headers.get(AUTHORIZATION).exists(expectedHeader)
+
+ val expectedCookie = cookieValue
+ val authorizedByCookie = requestHeader.cookies.get(COOKIE_NAME).exists(_.value == expectedCookie)
- private lazy val unauthorizedResult =
- Future successful Unauthorized.withHeaders(WWW_AUTHENTICATE -> realm)
+ authorizedByHeader || authorizedByCookie
+ }
- private lazy val COOKIE_NAME = "play-basic-authentication-filter"
+ private def cookie = Cookie(COOKIE_NAME, cookieValue, maxAge = Option(3600))
- private lazy val cookie = Cookie(COOKIE_NAME, cookieValue)
+ private lazy val cookieValue: String =
+ cookieValue(config.username, config.passwords)
- private lazy val cookieValue =
- Crypto.sign(configuration.username + configuration.passwords)
+ private def cookieValue(username: String, passwords: Set[String]): String =
+ new String(Base64.encodeBase64((username + passwords.mkString(",")).getBytes(StandardCharsets.UTF_8)))
- private lazy val expectedHeaderValues =
+ private def expectedHeaderValues(configuration: BasicAuthenticationConfig) =
configuration.passwords.map { password =>
val combined = configuration.username + ":" + password
val credentials = Base64.encodeBase64String(combined.getBytes)
basic(credentials)
}
- private def realm = basic(s"""realm=\"${configuration.realm}"""")
+ private def basic(content: String) = s"Basic $content"
+}
+
+object LDAPAuthenticator {
+ private lazy val COOKIE_NAME = "play-basic-ldap-authentication"
+}
+
+case class LDAPAuthenticator(config: LDAPAuthenticationConfig)(implicit val mat: Materializer, ec: ExecutionContext) extends Authenticator with Logging {
+
+ import LDAPAuthenticator._
+
+ private lazy val realm = basic(s"""realm="${config.realm}"""")
+ private lazy val unauthorizedResult = Future successful Unauthorized.withHeaders(WWW_AUTHENTICATE -> realm)
+ private lazy val ldapConnectionPool: LDAPConnectionPool = {
+ val (address, port) = (config.address, config.port)
+ val connection = if (config.sslEnabled) {
+ new LDAPConnection(SSLSocketFactory.getDefault, address, port, config.username, config.password)
+ } else {
+ new LDAPConnection(address, port, config.username, config.password)
+ }
+ new LDAPConnectionPool(connection, config.connectionPoolSize)
+ }
+
+ def salt: Array[Byte] = config.salt
+
+ def iv: Array[Byte] = config.iv
+
+ def secret: Array[Char] = config.secret
+
+ def checkAuthentication(requestHeader: RequestHeader, next: RequestHeader => Future[Result]): Future[Result] = {
+ val credentials = credentialsFromHeader(requestHeader)
+ if (credentials.isDefined && isAuthorized(requestHeader, credentials.get)) addCookie(credentials.get, next(requestHeader))
+ else unauthorizedResult
+ }
+
+ private def credentialsFromHeader(requestHeader: RequestHeader): Option[(String, String)] = {
+ requestHeader.headers.get(AUTHORIZATION).flatMap(authorization => {
+ authorization.split("\\s+").toList match {
+ case "Basic" :: base64Hash :: Nil => {
+ val credentials = new String(org.apache.commons.codec.binary.Base64.decodeBase64(base64Hash.getBytes))
+ credentials.split(":").toList match {
+ case username :: password :: Nil => Some(username -> password)
+ case _ => None
+ }
+ }
+ case _ => None
+ }
+ })
+ }
+
+ private def isAuthorized(requestHeader: RequestHeader, credentials: (String, String)) = {
+ val (username, password) = credentials
+ val expectedCookie = cookieValue(username, Set(password))
+ val authorizedByCookie =
+ requestHeader.cookies.get(COOKIE_NAME).exists(_.value == expectedCookie)
+
+ authorizedByCookie || {
+ val connection = ldapConnectionPool.getConnection
+ try {
+ findUserDN(config.searchBaseDN, config.searchFilter, username, connection) match {
+ case None =>
+ logger.debug(s"Can't find user DN for username: $username. " +
+ s"Base DN: ${config.searchBaseDN}. " +
+ s"Filter: ${renderSearchFilter(config.searchFilter, username)}")
+ false
+ case Some(userDN) => Try(connection.bind(userDN, password)).isSuccess
+ }
+ } finally {
+ connection.close()
+ }
+ }
+ }
+
+
+ private def findUserDN(baseDN: String, filterTemplate: String, username: String, connection: LDAPConnection) = {
+ val filter = renderSearchFilter(filterTemplate, username)
+ val searchRequest = new SearchRequest(baseDN, SearchScope.SUB, filter)
+ Try(connection.search(searchRequest)) match {
+ case Success(sr) if sr.getEntryCount > 0 => Some(sr.getSearchEntries.get(0).getDN)
+ case _ => None
+ }
+ }
+
+ private def renderSearchFilter(filterTemplate: String, username: String) = {
+ filterTemplate.replaceAll("\\$capturedLogin\\$", username)
+ }
+
+ private def addCookie(credentials: (String, String), result: Future[Result]) = {
+ val (username, password) = credentials
+ result.map(_.withCookies(cookie(username, password)))
+ }
+
+ private def cookieValue(username: String, passwords: Set[String]): String =
+ sign(username + passwords.mkString(","))
private def basic(content: String) = s"Basic $content"
+
+ private def cookie(username: String, password: String) = Cookie(COOKIE_NAME, cookieValue(username, Set(password)), maxAge = Option(3600))
}
-object BasicAuthenticationFilter {
- def apply() = new BasicAuthenticationFilter(
- BasicAuthenticationFilterConfiguration.parse(
- play.api.Play.current.configuration
- )
- )
-
- def apply(configuration: => Configuration) = new BasicAuthenticationFilter(
- BasicAuthenticationFilterConfiguration parse configuration
- )
+sealed trait AuthenticationConfig {
+ def salt: Array[Byte]
+
+ def iv: Array[Byte]
+
+ def secret: Array[Char]
}
-case class BasicAuthenticationFilterConfiguration(
- realm: String,
- enabled: Boolean,
- username: String,
- passwords: Set[String],
- excluded: Set[String])
+case class BasicAuthenticationConfig(salt: Array[Byte]
+ , iv: Array[Byte]
+ , secret: Array[Char]
+ , realm: String
+ , username: String
+ , passwords: Set[String]) extends AuthenticationConfig
+
+case class LDAPAuthenticationConfig(salt: Array[Byte]
+ , iv: Array[Byte]
+ , secret: Array[Char]
+ , realm: String
+ , address: String
+ , port: Int
+ , username: String
+ , password: String
+ , searchBaseDN: String
+ , searchFilter: String
+ , connectionPoolSize: Int
+ , sslEnabled: Boolean) extends AuthenticationConfig
+
+sealed trait AuthType[T <: AuthenticationConfig] {
+ def getConfig(config: AuthenticationConfig): T
+}
+
+case object BasicAuth extends AuthType[BasicAuthenticationConfig] {
+ def getConfig(config: AuthenticationConfig): BasicAuthenticationConfig = {
+ require(config.isInstanceOf[BasicAuthenticationConfig], s"Unexpected config type : ${config.getClass.getSimpleName}")
+ config.asInstanceOf[BasicAuthenticationConfig]
+ }
+}
+
+case object LDAPAuth extends AuthType[LDAPAuthenticationConfig] {
+ def getConfig(config: AuthenticationConfig): LDAPAuthenticationConfig = {
+ require(config.isInstanceOf[LDAPAuthenticationConfig], s"Unexpected config type : ${config.getClass.getSimpleName}")
+ config.asInstanceOf[LDAPAuthenticationConfig]
+ }
+}
+
+case class BasicAuthenticationFilterConfiguration(enabled: Boolean,
+ authType: AuthType[_ <: AuthenticationConfig],
+ authenticationConfig: AuthenticationConfig,
+ excluded: Set[String])
object BasicAuthenticationFilterConfiguration {
+ private val SALT_LEN = 20
private val defaultRealm = "Application"
+
private def credentialsMissingRealm(realm: String) =
s"$realm: The username or password could not be found in the configuration."
- def parse(configuration: Configuration) = {
+ def parse(configuration: Configuration): BasicAuthenticationFilterConfiguration = {
val root = "basicAuthentication."
- def boolean(key: String) = configuration.getBoolean(root + key)
- def string(key: String) = configuration.getString(root + key)
+
+ def boolean(key: String) = configuration.getOptional[Boolean](root + key)
+
+ def string(key: String) = configuration.getOptional[String](root + key)
+
+ def int(key: String) = configuration.getOptional[Int](root + key)
+
def seq(key: String) =
Option(configuration.underlying getValue (root + key)).map { value =>
value.valueType match {
@@ -111,34 +310,82 @@ object BasicAuthenticationFilterConfiguration {
}
}
+ val sr = new SecureRandom()
+ val salt: Array[Byte] = string("salt").map(Codecs.hexStringToByte).getOrElse(sr.generateSeed(SALT_LEN))
+ val iv: Array[Byte] = string("iv").map(Codecs.hexStringToByte).getOrElse(sr.generateSeed(SALT_LEN))
+ val secret: Array[Char] = string("secret").map(_.toCharArray).getOrElse(UUID.randomUUID().toString.toCharArray)
val enabled = boolean("enabled").getOrElse(false)
+ val ldapEnabled = boolean("ldap.enabled").getOrElse(false)
- val credentials: Option[(String, Set[String])] = for {
- username <- string("username")
- passwords <- seq("password")
- } yield (username, passwords.toSet)
+ val excluded = configuration.getOptional[Seq[String]](root + "excluded")
+ .getOrElse(Seq.empty)
+ .toSet
- val (username, passwords) = {
- def uuid = UUID.randomUUID.toString
- credentials.getOrElse((uuid, Set(uuid)))
- }
+ if (ldapEnabled) {
+ val connection: Option[(String, Int)] = for {
+ server <- string("ldap.server")
+ port <- int("ldap.port")
+ } yield (server, port)
- def realm(hasCredentials: Boolean) = {
- val realm = string("realm").getOrElse(defaultRealm)
- if (hasCredentials) realm
- else credentialsMissingRealm(realm)
+ val (server, port) = {
+ connection.getOrElse(("localhost", 389))
+ }
+
+ val username = string("ldap.username").getOrElse("")
+ val password = string("ldap.password").getOrElse("")
+
+ val searchDN = string("ldap.search-base-dn").getOrElse("")
+ val searchFilter = string("ldap.search-filter").getOrElse("")
+ val connectionPoolSize = int("ldap.connection-pool-size").getOrElse(10)
+ val sslEnabled = boolean("ldap.ssl").getOrElse(false)
+
+ BasicAuthenticationFilterConfiguration(
+ enabled,
+ LDAPAuth,
+ LDAPAuthenticationConfig(salt, iv, secret,
+ string("realm").getOrElse(defaultRealm),
+ server, port, username, password, searchDN, searchFilter, connectionPoolSize, sslEnabled
+ ),
+ excluded
+ )
+ } else {
+ val credentials: Option[(String, Set[String])] = for {
+ username <- string("username")
+ passwords <- seq("password")
+ } yield (username, passwords.toSet)
+
+ val (username, passwords) = {
+ def uuid = UUID.randomUUID.toString
+
+ credentials.getOrElse((uuid, Set(uuid)))
+ }
+
+ def realm(hasCredentials: Boolean) = {
+ val realm = string("realm").getOrElse(defaultRealm)
+ if (hasCredentials) realm
+ else credentialsMissingRealm(realm)
+ }
+
+ BasicAuthenticationFilterConfiguration(
+ enabled,
+ BasicAuth,
+ BasicAuthenticationConfig(salt, iv, secret, realm(credentials.isDefined), username, passwords),
+ excluded
+ )
}
- val excluded = configuration.getStringSeq(root + "excluded")
- .getOrElse(Seq.empty)
- .toSet
+ }
+}
- BasicAuthenticationFilterConfiguration(
- realm(credentials.isDefined),
- enabled,
- username,
- passwords,
- excluded
- )
+object BasicAuthenticationFilter {
+ def apply(configuration: => Configuration)(implicit mat: Materializer, ec: ExecutionContext): Filter = {
+ val filterConfig = BasicAuthenticationFilterConfiguration.parse(configuration)
+ val authenticator = filterConfig.authType match {
+ case BasicAuth =>
+ new BasicAuthenticator(BasicAuth.getConfig(filterConfig.authenticationConfig))
+ case LDAPAuth =>
+ new LDAPAuthenticator(LDAPAuth.getConfig(filterConfig.authenticationConfig))
+ }
+ new BasicAuthenticationFilter(filterConfig, authenticator)
}
}
\ No newline at end of file
diff --git a/app/controllers/Cluster.scala b/app/controllers/Cluster.scala
index 35f0167b0..5e002cca3 100644
--- a/app/controllers/Cluster.scala
+++ b/app/controllers/Cluster.scala
@@ -6,28 +6,27 @@
package controllers
import features.{ApplicationFeatures, KMClusterManagerFeature}
-import kafka.manager.model._
import kafka.manager.ApiError
+import kafka.manager.model._
import models.FollowLink
import models.form._
import models.navigation.Menus
import play.api.data.Form
import play.api.data.Forms._
-import play.api.data.validation.{Constraint, Invalid, Valid}
import play.api.data.validation.Constraints._
-import play.api.i18n.{I18nSupport, MessagesApi}
+import play.api.data.validation.{Constraint, Invalid, Valid}
+import play.api.i18n.I18nSupport
import play.api.mvc._
-import scala.concurrent.Future
+import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
import scalaz.{-\/, \/-}
/**
* @author hiral
*/
-class Cluster (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManagerContext)
- (implicit af: ApplicationFeatures, menus: Menus) extends Controller with I18nSupport {
- import play.api.libs.concurrent.Execution.Implicits.defaultContext
+class Cluster (val cc: ControllerComponents, val kafkaManagerContext: KafkaManagerContext)
+ (implicit af: ApplicationFeatures, menus: Menus, ec:ExecutionContext) extends AbstractController(cc) with I18nSupport {
private[this] val kafkaManager = kafkaManagerContext.getKafkaManager
private[this] val defaultTuning = kafkaManager.defaultTuning
@@ -193,31 +192,31 @@ class Cluster (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManag
)
}
- def cluster(c: String) = Action.async {
+ def cluster(c: String) = Action.async { implicit request: RequestHeader =>
kafkaManager.getClusterView(c).map { errorOrClusterView =>
Ok(views.html.cluster.clusterView(c,errorOrClusterView)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
- def brokers(c: String) = Action.async {
+ def brokers(c: String) = Action.async { implicit request: RequestHeader =>
kafkaManager.getBrokerList(c).map { errorOrBrokerList =>
Ok(views.html.broker.brokerList(c,errorOrBrokerList)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
- def broker(c: String, b: Int) = Action.async {
+ def broker(c: String, b: Int) = Action.async { implicit request: RequestHeader =>
kafkaManager.getBrokerView(c,b).map { errorOrBrokerView =>
Ok(views.html.broker.brokerView(c,b,errorOrBrokerView)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
- def addCluster = Action.async { implicit request =>
+ def addCluster = Action.async { implicit request: RequestHeader =>
featureGate(KMClusterManagerFeature) {
Future.successful(Ok(views.html.cluster.addCluster(clusterConfigForm.fill(defaultClusterConfig))).withHeaders("X-Frame-Options" -> "SAMEORIGIN"))
}
}
- def updateCluster(c: String) = Action.async { implicit request =>
+ def updateCluster(c: String) = Action.async { implicit request: RequestHeader =>
featureGate(KMClusterManagerFeature) {
kafkaManager.getClusterConfig(c).map { errorOrClusterConfig =>
Ok(views.html.cluster.updateCluster(c,errorOrClusterConfig.map { cc =>
@@ -247,7 +246,7 @@ class Cluster (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManag
}
- def handleAddCluster = Action.async { implicit request =>
+ def handleAddCluster = Action.async { implicit request: Request[AnyContent] =>
featureGate(KMClusterManagerFeature) {
clusterConfigForm.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.cluster.addCluster(formWithErrors))),
@@ -283,7 +282,7 @@ class Cluster (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManag
}
}
- def handleUpdateCluster(c: String) = Action.async { implicit request =>
+ def handleUpdateCluster(c: String) = Action.async { implicit request: Request[AnyContent] =>
featureGate(KMClusterManagerFeature) {
updateForm.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.cluster.updateCluster(c, \/-(formWithErrors)))),
diff --git a/app/controllers/Consumer.scala b/app/controllers/Consumer.scala
index c790c8025..ce820ed92 100644
--- a/app/controllers/Consumer.scala
+++ b/app/controllers/Consumer.scala
@@ -7,32 +7,32 @@ package controllers
import features.ApplicationFeatures
import models.navigation.Menus
-import play.api.i18n.{I18nSupport, MessagesApi}
+import play.api.i18n.I18nSupport
import play.api.mvc._
+import scala.concurrent.ExecutionContext
+
/**
* @author cvcal
*/
-class Consumer (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManagerContext)
- (implicit af: ApplicationFeatures, menus: Menus) extends Controller with I18nSupport {
-
- import play.api.libs.concurrent.Execution.Implicits.defaultContext
+class Consumer (val cc: ControllerComponents, val kafkaManagerContext: KafkaManagerContext)
+ (implicit af: ApplicationFeatures, menus: Menus, ec: ExecutionContext) extends AbstractController(cc) with I18nSupport {
private[this] val kafkaManager = kafkaManagerContext.getKafkaManager
- def consumers(cluster: String) = Action.async {
+ def consumers(cluster: String) = Action.async { implicit request: RequestHeader =>
kafkaManager.getConsumerListExtended(cluster).map { errorOrConsumerList =>
Ok(views.html.consumer.consumerList(cluster, errorOrConsumerList)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
- def consumer(cluster: String, consumerGroup: String, consumerType: String) = Action.async {
+ def consumer(cluster: String, consumerGroup: String, consumerType: String) = Action.async { implicit request: RequestHeader =>
kafkaManager.getConsumerIdentity(cluster,consumerGroup, consumerType).map { errorOrConsumerIdentity =>
Ok(views.html.consumer.consumerView(cluster,consumerGroup,errorOrConsumerIdentity)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
- def consumerAndTopic(cluster: String, consumerGroup: String, topic: String, consumerType: String) = Action.async {
+ def consumerAndTopic(cluster: String, consumerGroup: String, topic: String, consumerType: String) = Action.async { implicit request: RequestHeader =>
kafkaManager.getConsumedTopicState(cluster,consumerGroup,topic, consumerType).map { errorOrConsumedTopicState =>
Ok(views.html.consumer.consumedTopicView(cluster,consumerGroup,consumerType,topic,errorOrConsumedTopicState)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
diff --git a/app/controllers/Logkafka.scala b/app/controllers/Logkafka.scala
index 1fd3ae0ac..441f93892 100644
--- a/app/controllers/Logkafka.scala
+++ b/app/controllers/Logkafka.scala
@@ -8,31 +8,30 @@ package controllers
import java.util.Properties
import _root_.features.ApplicationFeatures
-import kafka.manager.model._
-import ActorModel.LogkafkaIdentity
+import kafka.manager._
import kafka.manager.features.KMLogKafkaFeature
+import kafka.manager.model.ActorModel.LogkafkaIdentity
+import kafka.manager.model._
import kafka.manager.utils.LogkafkaNewConfigs
-import kafka.manager._
import models.FollowLink
import models.form._
import models.navigation.Menus
import play.api.data.Form
import play.api.data.Forms._
-import play.api.data.validation.{Valid, Invalid, Constraint}
import play.api.data.validation.Constraints._
-import play.api.i18n.{I18nSupport, MessagesApi}
+import play.api.data.validation.{Constraint, Invalid, Valid}
+import play.api.i18n.I18nSupport
import play.api.mvc._
-import scala.concurrent.Future
-import scala.util.{Success, Failure, Try}
-import scalaz.{\/-, -\/}
+import scala.concurrent.{ExecutionContext, Future}
+import scala.util.{Failure, Success, Try}
+import scalaz.{-\/, \/-}
/**
* @author hiral
*/
-class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManagerContext)
- (implicit af: ApplicationFeatures, menus: Menus) extends Controller with I18nSupport {
- import play.api.libs.concurrent.Execution.Implicits.defaultContext
+class Logkafka (val cc: ControllerComponents, val kafkaManagerContext: KafkaManagerContext)
+ (implicit af: ApplicationFeatures, menus: Menus, ec:ExecutionContext) extends AbstractController(cc) with I18nSupport {
implicit private[this] val kafkaManager = kafkaManagerContext.getKafkaManager
@@ -92,6 +91,12 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
LogkafkaNewConfigs.configMaps(Kafka_1_1_1).map{case(k,v) => LKConfig(k,Some(v))}.toList)
val kafka_2_0_0_Default = CreateLogkafka("","",
LogkafkaNewConfigs.configMaps(Kafka_2_0_0).map{case(k,v) => LKConfig(k,Some(v))}.toList)
+ val kafka_2_1_0_Default = CreateLogkafka("","",
+ LogkafkaNewConfigs.configMaps(Kafka_2_1_0).map{case(k,v) => LKConfig(k,Some(v))}.toList)
+ val kafka_2_1_1_Default = CreateLogkafka("","",
+ LogkafkaNewConfigs.configMaps(Kafka_2_1_1).map{case(k,v) => LKConfig(k,Some(v))}.toList)
+ val kafka_2_2_0_Default = CreateLogkafka("","",
+ LogkafkaNewConfigs.configMaps(Kafka_2_2_0).map{case(k,v) => LKConfig(k,Some(v))}.toList)
val defaultCreateForm = Form(
mapping(
@@ -149,12 +154,15 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
case Kafka_1_1_0 => (defaultCreateForm.fill(kafka_1_1_0_Default), clusterContext)
case Kafka_1_1_1 => (defaultCreateForm.fill(kafka_1_1_1_Default), clusterContext)
case Kafka_2_0_0 => (defaultCreateForm.fill(kafka_2_0_0_Default), clusterContext)
+ case Kafka_2_1_0 => (defaultCreateForm.fill(kafka_2_1_0_Default), clusterContext)
+ case Kafka_2_1_1 => (defaultCreateForm.fill(kafka_2_1_1_Default), clusterContext)
+ case Kafka_2_2_0 => (defaultCreateForm.fill(kafka_2_2_0_Default), clusterContext)
}
}
}
}
- def logkafkas(c: String) = Action.async {
+ def logkafkas(c: String) = Action.async { implicit request:RequestHeader =>
clusterFeatureGate(c, KMLogKafkaFeature) { clusterContext =>
kafkaManager.getLogkafkaListExtended(c).map { errorOrLogkafkaList =>
Ok(views.html.logkafka.logkafkaList(c, errorOrLogkafkaList.map( lkle => (lkle, clusterContext)))).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
@@ -162,7 +170,7 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
}
}
- def logkafka(c: String, h: String, l:String) = Action.async {
+ def logkafka(c: String, h: String, l:String) = Action.async { implicit request:RequestHeader =>
clusterFeatureGate(c, KMLogKafkaFeature) { clusterContext =>
kafkaManager.getLogkafkaIdentity(c, h).map { errorOrLogkafkaIdentity =>
Ok(views.html.logkafka.logkafkaView(c, h, l, errorOrLogkafkaIdentity.map( lki => (lki, clusterContext)))).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
@@ -170,7 +178,7 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
}
}
- def createLogkafka(clusterName: String) = Action.async { implicit request =>
+ def createLogkafka(clusterName: String) = Action.async { implicit request:RequestHeader =>
clusterFeatureGate(clusterName, KMLogKafkaFeature) { clusterContext =>
createLogkafkaForm(clusterName).map { errorOrForm =>
Ok(views.html.logkafka.createLogkafka(clusterName, errorOrForm)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
@@ -178,7 +186,7 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
}
}
- def handleCreateLogkafka(clusterName: String) = Action.async { implicit request =>
+ def handleCreateLogkafka(clusterName: String) = Action.async { implicit request:Request[AnyContent] =>
clusterFeatureGate(clusterName, KMLogKafkaFeature) { clusterContext =>
implicit val clusterFeatures = clusterContext.clusterFeatures
defaultCreateForm.bindFromRequest.fold(
@@ -203,7 +211,7 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
}
}
- def handleDeleteLogkafka(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request =>
+ def handleDeleteLogkafka(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request:Request[AnyContent] =>
clusterFeatureGate(clusterName, KMLogKafkaFeature) { clusterContext =>
implicit val clusterFeatures = clusterContext.clusterFeatures
defaultDeleteForm.bindFromRequest.fold(
@@ -250,6 +258,9 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
case Kafka_1_1_0 => LogkafkaNewConfigs.configNames(Kafka_1_1_0).map(n => (n,LKConfig(n,None))).toMap
case Kafka_1_1_1 => LogkafkaNewConfigs.configNames(Kafka_1_1_1).map(n => (n,LKConfig(n,None))).toMap
case Kafka_2_0_0 => LogkafkaNewConfigs.configNames(Kafka_2_0_0).map(n => (n,LKConfig(n,None))).toMap
+ case Kafka_2_1_0 => LogkafkaNewConfigs.configNames(Kafka_2_1_0).map(n => (n,LKConfig(n,None))).toMap
+ case Kafka_2_1_1 => LogkafkaNewConfigs.configNames(Kafka_2_1_1).map(n => (n,LKConfig(n,None))).toMap
+ case Kafka_2_2_0 => LogkafkaNewConfigs.configNames(Kafka_2_2_0).map(n => (n,LKConfig(n,None))).toMap
}
val identityOption = li.identityMap.get(log_path)
if (identityOption.isDefined) {
@@ -266,7 +277,7 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
}
}
- def updateConfig(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request =>
+ def updateConfig(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request:RequestHeader =>
clusterFeatureGate(clusterName, KMLogKafkaFeature) { clusterContext =>
val errorOrFormFuture = kafkaManager.getLogkafkaIdentity(clusterName, logkafka_id).map(
_.map(lki => (updateConfigForm(clusterContext, log_path, lki), clusterContext))
@@ -277,7 +288,7 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
}
}
- def handleUpdateConfig(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request =>
+ def handleUpdateConfig(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request:Request[AnyContent] =>
clusterFeatureGate(clusterName, KMLogKafkaFeature) { clusterContext =>
implicit val clusterFeatures = clusterContext.clusterFeatures
defaultUpdateConfigForm.bindFromRequest.fold(
@@ -300,7 +311,7 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
}
}
- def handleEnableConfig(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request =>
+ def handleEnableConfig(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request:RequestHeader =>
clusterFeatureGate(clusterName, KMLogKafkaFeature) { clusterContext =>
implicit val clusterFeatures = clusterContext.clusterFeatures
val props = new Properties();
@@ -318,7 +329,7 @@ class Logkafka (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaMana
}
}
- def handleDisableConfig(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request =>
+ def handleDisableConfig(clusterName: String, logkafka_id: String, log_path: String) = Action.async { implicit request:RequestHeader =>
clusterFeatureGate(clusterName, KMLogKafkaFeature) { clusterContext =>
implicit val clusterFeatures = clusterContext.clusterFeatures
val props = new Properties();
diff --git a/app/controllers/PreferredReplicaElection.scala b/app/controllers/PreferredReplicaElection.scala
index 734f72b81..f7ddd171e 100644
--- a/app/controllers/PreferredReplicaElection.scala
+++ b/app/controllers/PreferredReplicaElection.scala
@@ -8,24 +8,23 @@ package controllers
import features.{ApplicationFeatures, KMPreferredReplicaElectionFeature}
import kafka.manager.ApiError
import kafka.manager.features.ClusterFeatures
+import models.FollowLink
+import models.form.{PreferredReplicaElectionOperation, RunElection, UnknownPREO}
import models.navigation.Menus
-import models.{navigation, FollowLink}
-import models.form.{UnknownPREO, RunElection, PreferredReplicaElectionOperation}
import play.api.data.Form
import play.api.data.Forms._
-import play.api.data.validation.{Valid, Invalid, Constraint}
-import play.api.i18n.{I18nSupport, MessagesApi}
+import play.api.data.validation.{Constraint, Invalid, Valid}
+import play.api.i18n.I18nSupport
import play.api.mvc._
-import scala.concurrent.Future
+import scala.concurrent.{ExecutionContext, Future}
import scalaz.-\/
/**
* @author hiral
*/
-class PreferredReplicaElection (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManagerContext)
- (implicit af: ApplicationFeatures, menus: Menus) extends Controller with I18nSupport {
- import play.api.libs.concurrent.Execution.Implicits.defaultContext
+class PreferredReplicaElection (val cc: ControllerComponents, val kafkaManagerContext: KafkaManagerContext)
+ (implicit af: ApplicationFeatures, menus: Menus, ec:ExecutionContext) extends AbstractController(cc) with I18nSupport {
private[this] val kafkaManager = kafkaManagerContext.getKafkaManager
private[this] implicit val cf: ClusterFeatures = ClusterFeatures.default
@@ -42,14 +41,14 @@ class PreferredReplicaElection (val messagesApi: MessagesApi, val kafkaManagerCo
)(PreferredReplicaElectionOperation.apply)(PreferredReplicaElectionOperation.unapply)
)
- def preferredReplicaElection(c: String) = Action.async {
+ def preferredReplicaElection(c: String) = Action.async { implicit request: RequestHeader =>
kafkaManager.getPreferredLeaderElection(c).map { errorOrStatus =>
Ok(views.html.preferredReplicaElection(c,errorOrStatus,preferredReplicaElectionForm)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
- def handleRunElection(c: String) = Action.async { implicit request =>
+ def handleRunElection(c: String) = Action.async { implicit request: Request[AnyContent] =>
featureGate(KMPreferredReplicaElectionFeature) {
preferredReplicaElectionForm.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.preferredReplicaElection(c, -\/(ApiError("Unknown operation!")), formWithErrors))),
diff --git a/app/controllers/ReassignPartitions.scala b/app/controllers/ReassignPartitions.scala
index 1ac3a0bd9..81c238764 100644
--- a/app/controllers/ReassignPartitions.scala
+++ b/app/controllers/ReassignPartitions.scala
@@ -5,29 +5,27 @@
package controllers
-import features.{KMReassignPartitionsFeature, ApplicationFeatures}
-import kafka.manager.model.ActorModel
-import ActorModel._
+import features.{ApplicationFeatures, KMReassignPartitionsFeature}
import kafka.manager.ApiError
-import models.form.ReassignPartitionOperation.{ForceRunAssignment, UnknownRPO, RunAssignment}
-import models.navigation.Menus
-import models.{navigation, FollowLink}
+import kafka.manager.model.ActorModel._
+import models.FollowLink
+import models.form.ReassignPartitionOperation.{ForceRunAssignment, RunAssignment, UnknownRPO}
import models.form._
+import models.navigation.Menus
import play.api.data.Form
import play.api.data.Forms._
-import play.api.data.validation.{Valid, Invalid, Constraint}
-import play.api.i18n.{I18nSupport, MessagesApi}
+import play.api.data.validation.{Constraint, Invalid, Valid}
+import play.api.i18n.I18nSupport
import play.api.mvc._
-import scala.concurrent.Future
-import scalaz.{\/, \/-, -\/}
+import scala.concurrent.{ExecutionContext, Future}
+import scalaz.{-\/, \/, \/-}
/**
* @author hiral
*/
-class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManagerContext)
- (implicit af: ApplicationFeatures, menus: Menus) extends Controller with I18nSupport {
- import play.api.libs.concurrent.Execution.Implicits.defaultContext
+class ReassignPartitions (val cc: ControllerComponents, val kafkaManagerContext: KafkaManagerContext)
+ (implicit af: ApplicationFeatures, menus: Menus, ec:ExecutionContext) extends AbstractController(cc) with I18nSupport {
private[this] implicit val kafkaManager = kafkaManagerContext.getKafkaManager
@@ -102,13 +100,13 @@ class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext:
)(GenerateMultipleAssignments.apply)(GenerateMultipleAssignments.unapply)
)
- def reassignPartitions(c: String) = Action.async {
+ def reassignPartitions(c: String) = Action.async { implicit request:RequestHeader =>
kafkaManager.getReassignPartitions(c).map { errorOrStatus =>
Ok(views.html.reassignPartitions(c,errorOrStatus)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
- def runMultipleAssignments(c: String) = Action.async {
+ def runMultipleAssignments(c: String) = Action.async { implicit request:RequestHeader =>
featureGate(KMReassignPartitionsFeature) {
kafkaManager.getTopicList(c).flatMap { errorOrSuccess =>
withClusterContext(c)(
@@ -126,7 +124,7 @@ class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext:
}
}
- def confirmAssignment(c: String, t: String) = Action.async {
+ def confirmAssignment(c: String, t: String) = Action.async { implicit request:RequestHeader =>
featureGate(KMReassignPartitionsFeature) {
kafkaManager.getBrokerList(c).flatMap { errorOrSuccess =>
withClusterContext(c)(
@@ -147,7 +145,7 @@ class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext:
}
}
- def confirmMultipleAssignments(c: String) = Action.async {
+ def confirmMultipleAssignments(c: String) = Action.async { implicit request:RequestHeader =>
featureGate(KMReassignPartitionsFeature) {
kafkaManager.getTopicList(c).flatMap { errOrTL =>
withClusterContext(c)(
@@ -180,7 +178,7 @@ class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext:
})
}
- def manualAssignments(c: String, t: String): Action[AnyContent] = Action.async {
+ def manualAssignments(c: String, t: String): Action[AnyContent] = Action.async { implicit request:RequestHeader =>
featureGate(KMReassignPartitionsFeature) {
withClusterFeatures(c)( err => {
@@ -261,7 +259,7 @@ class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext:
}
}
- def handleManualAssignment(c: String, t: String) = Action.async { implicit request =>
+ def handleManualAssignment(c: String, t: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMReassignPartitionsFeature) {
def validateAssignment(assignment: List[(String, List[(Int, List[Int])])]) = {
(for {
@@ -311,7 +309,7 @@ class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext:
}
}
- def handleGenerateAssignment(c: String, t: String) = Action.async { implicit request =>
+ def handleGenerateAssignment(c: String, t: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMReassignPartitionsFeature) {
withClusterContext(c)(
err => Future.successful(
@@ -342,7 +340,7 @@ class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext:
}
}
- def handleGenerateMultipleAssignments(c: String) = Action.async { implicit request =>
+ def handleGenerateMultipleAssignments(c: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMReassignPartitionsFeature) {
withClusterContext(c)(
err => Future.successful(
@@ -370,7 +368,7 @@ class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext:
}
}
- def handleRunMultipleAssignments(c: String) = Action.async { implicit request =>
+ def handleRunMultipleAssignments(c: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMReassignPartitionsFeature) {
withClusterContext(c)(
err => Future.successful(
@@ -401,7 +399,7 @@ class ReassignPartitions (val messagesApi: MessagesApi, val kafkaManagerContext:
}
}
- def handleOperation(c: String, t: String) = Action.async { implicit request =>
+ def handleOperation(c: String, t: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMReassignPartitionsFeature) {
withClusterContext(c)(
err => Future.successful(
diff --git a/app/controllers/Topic.scala b/app/controllers/Topic.scala
index a5c610876..8eb690e01 100644
--- a/app/controllers/Topic.scala
+++ b/app/controllers/Topic.scala
@@ -21,19 +21,18 @@ import play.api.data.Form
import play.api.data.Forms._
import play.api.data.validation.Constraints._
import play.api.data.validation.{Constraint, Invalid, Valid}
-import play.api.i18n.{I18nSupport, MessagesApi}
+import play.api.i18n.I18nSupport
import play.api.mvc._
-import scala.concurrent.Future
+import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
import scalaz.-\/
/**
* @author hiral
*/
-class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManagerContext)
- (implicit af: ApplicationFeatures, menus: Menus) extends Controller with I18nSupport {
- import play.api.libs.concurrent.Execution.Implicits.defaultContext
+class Topic (val cc: ControllerComponents, val kafkaManagerContext: KafkaManagerContext)
+ (implicit af: ApplicationFeatures, menus: Menus, ec:ExecutionContext) extends AbstractController(cc) with I18nSupport {
private[this] val kafkaManager = kafkaManagerContext.getKafkaManager
@@ -46,25 +45,28 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- val kafka_0_8_1_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_8_1_1).map(n => TConfig(n,None)).toList)
- val kafka_0_8_2_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_8_2_0).map(n => TConfig(n,None)).toList)
- val kafka_0_8_2_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_8_2_1).map(n => TConfig(n,None)).toList)
- val kafka_0_8_2_2_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_8_2_2).map(n => TConfig(n,None)).toList)
- val kafka_0_9_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_9_0_0).map(n => TConfig(n,None)).toList)
- val kafka_0_9_0_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_9_0_1).map(n => TConfig(n,None)).toList)
- val kafka_0_10_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_10_0_0).map(n => TConfig(n,None)).toList)
- val kafka_0_10_0_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_10_0_1).map(n => TConfig(n,None)).toList)
- val kafka_0_10_1_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_10_1_0).map(n => TConfig(n,None)).toList)
- val kafka_0_10_1_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_10_1_1).map(n => TConfig(n,None)).toList)
- val kafka_0_10_2_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_10_2_0).map(n => TConfig(n,None)).toList)
- val kafka_0_10_2_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_10_2_1).map(n => TConfig(n,None)).toList)
- val kafka_0_11_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_11_0_0).map(n => TConfig(n,None)).toList)
- val kafka_0_11_0_2_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_11_0_2).map(n => TConfig(n,None)).toList)
- val kafka_1_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_1_0_0).map(n => TConfig(n,None)).toList)
- val kafka_1_0_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_1_0_1).map(n => TConfig(n,None)).toList)
- val kafka_1_1_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_1_1_0).map(n => TConfig(n,None)).toList)
- val kafka_1_1_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_1_1_1).map(n => TConfig(n,None)).toList)
- val kafka_2_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_2_0_0).map(n => TConfig(n,None)).toList)
+ val kafka_0_8_1_1_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_8_1_1).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_8_2_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_8_2_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_8_2_1_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_8_2_1).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_8_2_2_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_8_2_2).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_9_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_9_0_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_9_0_1_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_9_0_1).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_10_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_10_0_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_10_0_1_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_10_0_1).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_10_1_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_10_1_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_10_1_1_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_10_1_1).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_10_2_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_10_2_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_10_2_1_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_10_2_1).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_11_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_11_0_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_0_11_0_2_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_0_11_0_2).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_1_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_1_0_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_1_0_1_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_1_0_1).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_1_1_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_1_1_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_1_1_1_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_1_1_1).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_2_0_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_2_0_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_2_1_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_2_1_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_2_1_1_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_2_1_1).map{ case (n, h) => TConfig(n,None,h)}.toList)
+ val kafka_2_2_0_Default = CreateTopic("",1,1,TopicConfigs.configNamesAndDoc(Kafka_2_2_0).map{ case (n, h) => TConfig(n,None,h)}.toList)
val defaultCreateForm = Form(
mapping(
@@ -74,7 +76,8 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
"configs" -> list(
mapping(
"name" -> nonEmptyText,
- "value" -> optional(text)
+ "value" -> optional(text),
+ "help" -> text,
)(TConfig.apply)(TConfig.unapply)
)
)(CreateTopic.apply)(CreateTopic.unapply)
@@ -132,7 +135,8 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
"configs" -> list(
mapping(
"name" -> nonEmptyText,
- "value" -> optional(text)
+ "value" -> optional(text),
+ "help" -> text,
)(TConfig.apply)(TConfig.unapply)
),
"readVersion" -> number(min = 0)
@@ -162,18 +166,21 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
case Kafka_1_1_0 => (defaultCreateForm.fill(kafka_1_1_0_Default), clusterContext)
case Kafka_1_1_1 => (defaultCreateForm.fill(kafka_1_1_1_Default), clusterContext)
case Kafka_2_0_0 => (defaultCreateForm.fill(kafka_2_0_0_Default), clusterContext)
+ case Kafka_2_1_0 => (defaultCreateForm.fill(kafka_2_1_0_Default), clusterContext)
+ case Kafka_2_1_1 => (defaultCreateForm.fill(kafka_2_1_1_Default), clusterContext)
+ case Kafka_2_2_0 => (defaultCreateForm.fill(kafka_2_1_1_Default), clusterContext)
}
}
}
}
- def topics(c: String) = Action.async {
+ def topics(c: String) = Action.async { implicit request:RequestHeader =>
kafkaManager.getTopicListExtended(c).map { errorOrTopicList =>
Ok(views.html.topic.topicList(c,errorOrTopicList)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
}
}
- def topic(c: String, t: String, force: Boolean) = Action.async {
+ def topic(c: String, t: String, force: Boolean) = Action.async { implicit request:RequestHeader =>
val futureErrorOrTopicIdentity = kafkaManager.getTopicIdentity(c,t)
val futureErrorOrConsumerList = kafkaManager.getConsumersForTopic(c,t)
@@ -186,7 +193,7 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- def createTopic(clusterName: String) = Action.async { implicit request =>
+ def createTopic(clusterName: String) = Action.async { implicit request:RequestHeader =>
featureGate(KMTopicManagerFeature) {
createTopicForm(clusterName).map { errorOrForm =>
Ok(views.html.topic.createTopic(clusterName, errorOrForm)).withHeaders("X-Frame-Options" -> "SAMEORIGIN")
@@ -194,7 +201,7 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- def handleCreateTopic(clusterName: String) = Action.async { implicit request =>
+ def handleCreateTopic(clusterName: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMTopicManagerFeature) {
defaultCreateForm.bindFromRequest.fold(
formWithErrors => {
@@ -232,7 +239,7 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- def confirmDeleteTopic(clusterName: String, topic: String) = Action.async {
+ def confirmDeleteTopic(clusterName: String, topic: String) = Action.async { implicit request:RequestHeader =>
val futureErrorOrTopicIdentity = kafkaManager.getTopicIdentity(clusterName, topic)
val futureErrorOrConsumerList = kafkaManager.getConsumersForTopic(clusterName, topic)
@@ -241,7 +248,7 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- def handleDeleteTopic(clusterName: String, topic: String) = Action.async { implicit request =>
+ def handleDeleteTopic(clusterName: String, topic: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMTopicManagerFeature) {
defaultDeleteForm.bindFromRequest.fold(
formWithErrors => Future.successful(
@@ -270,7 +277,7 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- def addPartitions(clusterName: String, topic: String) = Action.async { implicit request =>
+ def addPartitions(clusterName: String, topic: String) = Action.async { implicit request:RequestHeader =>
featureGate(KMTopicManagerFeature) {
val errorOrFormFuture = kafkaManager.getTopicIdentity(clusterName, topic).flatMap { errorOrTopicIdentity =>
errorOrTopicIdentity.fold(e => Future.successful(-\/(e)), { topicIdentity =>
@@ -288,7 +295,7 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- def addPartitionsToMultipleTopics(clusterName: String) = Action.async { implicit request =>
+ def addPartitionsToMultipleTopics(clusterName: String) = Action.async { implicit request:RequestHeader =>
featureGate(KMTopicManagerFeature) {
val errorOrFormFuture = kafkaManager.getTopicListExtended(clusterName).flatMap { errorOrTle =>
errorOrTle.fold(e => Future.successful(-\/(e)), { topicListExtended =>
@@ -311,7 +318,7 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- def handleAddPartitions(clusterName: String, topic: String) = Action.async { implicit request =>
+ def handleAddPartitions(clusterName: String, topic: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMTopicManagerFeature) {
defaultAddPartitionsForm.bindFromRequest.fold(
formWithErrors => {
@@ -347,7 +354,7 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- def handleAddPartitionsToMultipleTopics(clusterName: String) = Action.async { implicit request =>
+ def handleAddPartitionsToMultipleTopics(clusterName: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMTopicManagerFeature) {
defaultAddMultipleTopicsPartitionsForm.bindFromRequest.fold(
formWithErrors => {
@@ -389,35 +396,46 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
private def updateConfigForm(clusterName: String, ti: TopicIdentity) = {
kafkaManager.getClusterConfig(clusterName).map { errorOrConfig =>
errorOrConfig.map { clusterConfig =>
- val defaultConfigMap = clusterConfig.version match {
- case Kafka_0_8_1_1 => TopicConfigs.configNames(Kafka_0_8_1_1).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_8_2_0 => TopicConfigs.configNames(Kafka_0_8_2_0).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_8_2_1 => TopicConfigs.configNames(Kafka_0_8_2_1).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_8_2_2 => TopicConfigs.configNames(Kafka_0_8_2_2).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_9_0_0 => TopicConfigs.configNames(Kafka_0_9_0_0).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_9_0_1 => TopicConfigs.configNames(Kafka_0_9_0_1).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_10_0_0 => TopicConfigs.configNames(Kafka_0_10_0_0).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_10_0_1 => TopicConfigs.configNames(Kafka_0_10_0_1).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_10_1_0 => TopicConfigs.configNames(Kafka_0_10_1_0).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_10_1_1 => TopicConfigs.configNames(Kafka_0_10_1_1).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_10_2_0 => TopicConfigs.configNames(Kafka_0_10_2_0).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_10_2_1 => TopicConfigs.configNames(Kafka_0_10_2_1).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_11_0_0 => TopicConfigs.configNames(Kafka_0_11_0_0).map(n => (n,TConfig(n,None))).toMap
- case Kafka_0_11_0_2 => TopicConfigs.configNames(Kafka_0_11_0_2).map(n => (n,TConfig(n,None))).toMap
- case Kafka_1_0_0 => TopicConfigs.configNames(Kafka_1_0_0).map(n => (n,TConfig(n,None))).toMap
- case Kafka_1_0_1 => TopicConfigs.configNames(Kafka_1_0_0).map(n => (n,TConfig(n,None))).toMap
- case Kafka_1_1_0 => TopicConfigs.configNames(Kafka_1_1_0).map(n => (n,TConfig(n,None))).toMap
- case Kafka_1_1_1 => TopicConfigs.configNames(Kafka_1_1_1).map(n => (n,TConfig(n,None))).toMap
- case Kafka_2_0_0 => TopicConfigs.configNames(Kafka_2_0_0).map(n => (n,TConfig(n,None))).toMap
+ val defaultConfigs = clusterConfig.version match {
+ case Kafka_0_8_1_1 => TopicConfigs.configNamesAndDoc(Kafka_0_8_1_1).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_8_2_0 => TopicConfigs.configNamesAndDoc(Kafka_0_8_2_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_8_2_1 => TopicConfigs.configNamesAndDoc(Kafka_0_8_2_1).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_8_2_2 => TopicConfigs.configNamesAndDoc(Kafka_0_8_2_2).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_9_0_0 => TopicConfigs.configNamesAndDoc(Kafka_0_9_0_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_9_0_1 => TopicConfigs.configNamesAndDoc(Kafka_0_9_0_1).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_10_0_0 => TopicConfigs.configNamesAndDoc(Kafka_0_10_0_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_10_0_1 => TopicConfigs.configNamesAndDoc(Kafka_0_10_0_1).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_10_1_0 => TopicConfigs.configNamesAndDoc(Kafka_0_10_1_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_10_1_1 => TopicConfigs.configNamesAndDoc(Kafka_0_10_1_1).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_10_2_0 => TopicConfigs.configNamesAndDoc(Kafka_0_10_2_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_10_2_1 => TopicConfigs.configNamesAndDoc(Kafka_0_10_2_1).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_11_0_0 => TopicConfigs.configNamesAndDoc(Kafka_0_11_0_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_0_11_0_2 => TopicConfigs.configNamesAndDoc(Kafka_0_11_0_2).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_1_0_0 => TopicConfigs.configNamesAndDoc(Kafka_1_0_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_1_0_1 => TopicConfigs.configNamesAndDoc(Kafka_1_0_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_1_1_0 => TopicConfigs.configNamesAndDoc(Kafka_1_1_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_1_1_1 => TopicConfigs.configNamesAndDoc(Kafka_1_1_1).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_2_0_0 => TopicConfigs.configNamesAndDoc(Kafka_2_0_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_2_1_0 => TopicConfigs.configNamesAndDoc(Kafka_2_1_0).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_2_1_1 => TopicConfigs.configNamesAndDoc(Kafka_2_1_1).map { case (n, h) => (n,TConfig(n,None, h)) }
+ case Kafka_2_2_0 => TopicConfigs.configNamesAndDoc(Kafka_2_2_0).map { case (n, h) => (n,TConfig(n,None, h)) }
}
- val combinedMap = defaultConfigMap ++ ti.config.toMap.map(tpl => tpl._1 -> TConfig(tpl._1,Option(tpl._2)))
- (defaultUpdateConfigForm.fill(UpdateTopicConfig(ti.topic,combinedMap.toList.map(_._2),ti.configReadVersion)),
+ val updatedConfigMap = ti.config.toMap
+ val updatedConfigList = defaultConfigs.map {
+ case (n, cfg) =>
+ if(updatedConfigMap.contains(n)) {
+ cfg.copy(value = Option(updatedConfigMap(n)))
+ } else {
+ cfg
+ }
+ }
+ (defaultUpdateConfigForm.fill(UpdateTopicConfig(ti.topic,updatedConfigList.toList,ti.configReadVersion)),
ti.clusterContext)
}
}
}
- def updateConfig(clusterName: String, topic: String) = Action.async { implicit request =>
+ def updateConfig(clusterName: String, topic: String) = Action.async { implicit request:RequestHeader =>
featureGate(KMTopicManagerFeature) {
val errorOrFormFuture = kafkaManager.getTopicIdentity(clusterName, topic).flatMap { errorOrTopicIdentity =>
errorOrTopicIdentity.fold(e => Future.successful(-\/(e)), { topicIdentity =>
@@ -430,7 +448,7 @@ class Topic (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManager
}
}
- def handleUpdateConfig(clusterName: String, topic: String) = Action.async { implicit request =>
+ def handleUpdateConfig(clusterName: String, topic: String) = Action.async { implicit request:Request[AnyContent] =>
featureGate(KMTopicManagerFeature) {
defaultUpdateConfigForm.bindFromRequest.fold(
formWithErrors => {
diff --git a/app/controllers/api/KafkaStateCheck.scala b/app/controllers/api/KafkaStateCheck.scala
index 325458376..c7007c2ee 100644
--- a/app/controllers/api/KafkaStateCheck.scala
+++ b/app/controllers/api/KafkaStateCheck.scala
@@ -10,22 +10,19 @@ import features.ApplicationFeatures
import kafka.manager.model.ActorModel.BrokerIdentity
import kafka.manager.model.SecurityProtocol
import models.navigation.Menus
-import play.api.i18n.{I18nSupport, MessagesApi}
-import play.api.libs.json._
-import play.api.mvc._
-
-import scala.concurrent.Future
import org.json4s.jackson.Serialization
import org.json4s.scalaz.JsonScalaz.toJSON
+import play.api.i18n.I18nSupport
+import play.api.mvc._
+
+import scala.concurrent.{ExecutionContext, Future}
/**
* @author jisookim0513
*/
-class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: KafkaManagerContext)
- (implicit af: ApplicationFeatures, menus: Menus) extends Controller with I18nSupport {
-
- import play.api.libs.concurrent.Execution.Implicits.defaultContext
+class KafkaStateCheck (val cc: ControllerComponents, val kafkaManagerContext: KafkaManagerContext)
+ (implicit af: ApplicationFeatures, menus: Menus, ec:ExecutionContext) extends AbstractController(cc) with I18nSupport {
private[this] val kafkaManager = kafkaManagerContext.getKafkaManager
@@ -39,7 +36,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
implicit val brokerIdentityWrites = Json.writes[BrokerIdentity]
- def brokers(c: String) = Action.async { implicit request =>
+ def brokers(c: String) = Action.async { implicit request:RequestHeader =>
kafkaManager.getBrokerList(c).map { errorOrBrokerList =>
errorOrBrokerList.fold(
error => BadRequest(Json.obj("msg" -> error.msg)),
@@ -47,7 +44,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
)
}
}
- def brokersExtended(c: String) = Action.async { implicit request =>
+ def brokersExtended(c: String) = Action.async { implicit request:RequestHeader =>
kafkaManager.getBrokerList(c).map { errorOrBrokerList =>
errorOrBrokerList.fold(
error => BadRequest(Json.obj("msg" -> error.msg)),
@@ -56,7 +53,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
}
}
- def topics(c: String) = Action.async { implicit request =>
+ def topics(c: String) = Action.async { implicit request:RequestHeader =>
kafkaManager.getTopicList(c).map { errorOrTopicList =>
errorOrTopicList.fold(
error => BadRequest(Json.obj("msg" -> error.msg)),
@@ -65,7 +62,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
}
}
- def topicIdentities(c: String) = Action.async { implicit request =>
+ def topicIdentities(c: String) = Action.async { implicit request:RequestHeader =>
implicit val formats = org.json4s.DefaultFormats
kafkaManager.getTopicListExtended(c).map { errorOrTopicListExtended =>
errorOrTopicListExtended.fold(
@@ -75,7 +72,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
}
}
- def clusters = Action.async { implicit request =>
+ def clusters = Action.async { implicit request:RequestHeader =>
implicit val formats = org.json4s.DefaultFormats
kafkaManager.getClusterList.map { errorOrClusterList =>
errorOrClusterList.fold(
@@ -85,7 +82,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
}
}
- def underReplicatedPartitions(c: String, t: String) = Action.async { implicit request =>
+ def underReplicatedPartitions(c: String, t: String) = Action.async { implicit request:RequestHeader =>
kafkaManager.getTopicIdentity(c, t).map { errorOrTopicIdentity =>
errorOrTopicIdentity.fold(
error => BadRequest(Json.obj("msg" -> error.msg)),
@@ -94,7 +91,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
}
}
- def unavailablePartitions(c: String, t: String) = Action.async { implicit request =>
+ def unavailablePartitions(c: String, t: String) = Action.async { implicit request:RequestHeader =>
kafkaManager.getTopicIdentity(c, t).map { errorOrTopicIdentity =>
errorOrTopicIdentity.fold(
error => BadRequest(Json.obj("msg" -> error.msg)),
@@ -102,7 +99,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
}
}
- def topicSummaryAction(cluster: String, consumer: String, topic: String, consumerType: String) = Action.async { implicit request =>
+ def topicSummaryAction(cluster: String, consumer: String, topic: String, consumerType: String) = Action.async { implicit request:RequestHeader =>
getTopicSummary(cluster, consumer, topic, consumerType).map { errorOrTopicSummary =>
errorOrTopicSummary.fold(
error => BadRequest(Json.obj("msg" -> error.msg)),
@@ -126,7 +123,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
}
}
- def groupSummaryAction(cluster: String, consumer: String, consumerType: String) = Action.async { implicit request =>
+ def groupSummaryAction(cluster: String, consumer: String, consumerType: String) = Action.async { implicit request:RequestHeader =>
kafkaManager.getConsumerIdentity(cluster, consumer, consumerType).flatMap { errorOrConsumedTopicSummary =>
errorOrConsumedTopicSummary.fold(
error =>
@@ -145,7 +142,7 @@ class KafkaStateCheck (val messagesApi: MessagesApi, val kafkaManagerContext: Ka
Future.sequence(cosumdTopicSummary).map(_.toMap)
}
- def consumersSummaryAction(cluster: String) = Action.async { implicit request =>
+ def consumersSummaryAction(cluster: String) = Action.async { implicit request:RequestHeader =>
implicit val formats = org.json4s.DefaultFormats
kafkaManager.getConsumerListExtended(cluster).map { errorOrConsumersSummary =>
errorOrConsumersSummary.fold(
diff --git a/app/controllers/package.scala b/app/controllers/package.scala
index c4baee6f3..a5cb06b6b 100644
--- a/app/controllers/package.scala
+++ b/app/controllers/package.scala
@@ -2,23 +2,21 @@
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
-import features.{ApplicationFeatures, ApplicationFeature}
+import features.{ApplicationFeature, ApplicationFeatures}
+import kafka.manager.features.{ClusterFeature, ClusterFeatures}
import kafka.manager.model.ClusterContext
-import kafka.manager.{KafkaManager, ApiError}
-import kafka.manager.features.{ClusterFeatures, ClusterFeature}
-import play.api.mvc._
+import kafka.manager.{ApiError, KafkaManager}
import play.api.mvc.Results._
+import play.api.mvc._
-import scala.concurrent.Future
-import scalaz.{\/-, -\/, \/}
+import scala.concurrent.{ExecutionContext, Future}
+import scalaz.{-\/, \/-}
/**
* Created by hiral on 8/23/15.
*/
package object controllers {
- import play.api.libs.concurrent.Execution.Implicits.defaultContext
-
def featureGate(af: ApplicationFeature)(fn: => Future[Result])(implicit features: ApplicationFeatures) : Future[Result] = {
if(features.features(af)) {
fn
@@ -28,7 +26,7 @@ package object controllers {
}
def clusterFeatureGate(clusterName: String, cf: ClusterFeature)(fn: ClusterContext => Future[Result])
- (implicit km: KafkaManager) : Future[Result] = {
+ (implicit km: KafkaManager, ec:ExecutionContext) : Future[Result] = {
km.getClusterContext(clusterName).flatMap { clusterContextOrError =>
clusterContextOrError.fold(
error => {
@@ -48,7 +46,7 @@ package object controllers {
}
def withClusterFeatures(clusterName: String)(err: ApiError => Future[Result], fn: ClusterFeatures => Future[Result])
- (implicit km: KafkaManager) : Future[Result] = {
+ (implicit km: KafkaManager, ec: ExecutionContext) : Future[Result] = {
km.getClusterContext(clusterName).flatMap { clusterContextOrError =>
clusterContextOrError.map(_.clusterFeatures) match {
case -\/(error) => err(error)
@@ -58,7 +56,7 @@ package object controllers {
}
def withClusterContext(clusterName: String)(err: ApiError => Future[Result], fn: ClusterContext => Future[Result])
- (implicit km: KafkaManager) : Future[Result] = {
+ (implicit km: KafkaManager, ec: ExecutionContext) : Future[Result] = {
km.getClusterContext(clusterName).flatMap { clusterContextOrError =>
clusterContextOrError match {
case -\/(error) => err(error)
diff --git a/app/kafka/manager/KafkaManager.scala b/app/kafka/manager/KafkaManager.scala
index 8bed8ceb7..7e563c3d2 100644
--- a/app/kafka/manager/KafkaManager.scala
+++ b/app/kafka/manager/KafkaManager.scala
@@ -21,7 +21,7 @@ import kafka.manager.utils.UtilException
import kafka.manager.utils.zero81.ReassignPartitionErrors.ReplicationOutOfSync
import kafka.manager.utils.zero81.{ForceOnReplicationOutOfSync, ForceReassignmentCommand, ReassignPartitionErrors}
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
@@ -247,7 +247,7 @@ class KafkaManager(akkaConfig: Config) extends Logging {
def shutdown(): Unit = {
implicit val ec = apiExecutionContext
system.actorSelection(kafkaManagerActor).tell(KMShutdown, system.deadLetters)
- system.shutdown()
+ Try(Await.ready(system.terminate(), Duration(30, TimeUnit.SECONDS)))
apiExecutor.shutdown()
}
diff --git a/app/kafka/manager/actor/KafkaManagerActor.scala b/app/kafka/manager/actor/KafkaManagerActor.scala
index c898a78e2..94c8eda82 100644
--- a/app/kafka/manager/actor/KafkaManagerActor.scala
+++ b/app/kafka/manager/actor/KafkaManagerActor.scala
@@ -146,10 +146,13 @@ class KafkaManagerActor(kafkaManagerConfig: KafkaManagerActorConfig)
Future {
try {
log.debug(s"Acquiring kafka manager mutex...")
- mutex.acquire(kafkaManagerConfig.mutexTimeoutMillis,TimeUnit.MILLISECONDS)
- KMCommandResult(Try {
- fn
- })
+ if(mutex.acquire(kafkaManagerConfig.mutexTimeoutMillis,TimeUnit.MILLISECONDS)) {
+ KMCommandResult(Try {
+ fn
+ })
+ } else {
+ throw new RuntimeException("Failed to acquire lock for kafka manager command")
+ }
} finally {
if(mutex.isAcquiredInThisProcess) {
log.debug(s"Releasing kafka manger mutex...")
diff --git a/app/kafka/manager/actor/cluster/ClusterManagerActor.scala b/app/kafka/manager/actor/cluster/ClusterManagerActor.scala
index 8d43d61c5..252161dd1 100644
--- a/app/kafka/manager/actor/cluster/ClusterManagerActor.scala
+++ b/app/kafka/manager/actor/cluster/ClusterManagerActor.scala
@@ -622,8 +622,11 @@ class ClusterManagerActor(cmConfig: ClusterManagerActorConfig)
private[this] def modify[T](fn: => T): T = {
try {
- mutex.acquire(cmConfig.mutexTimeoutMillis,TimeUnit.MILLISECONDS)
- fn
+ if(mutex.acquire(cmConfig.mutexTimeoutMillis,TimeUnit.MILLISECONDS)) {
+ fn
+ } else {
+ throw new RuntimeException("Failed to acquire mutex for cluster manager command")
+ }
} finally {
if(mutex.isAcquiredInThisProcess) {
mutex.release()
diff --git a/app/kafka/manager/actor/cluster/KafkaStateActor.scala b/app/kafka/manager/actor/cluster/KafkaStateActor.scala
index 7654e261f..4fcd9116b 100644
--- a/app/kafka/manager/actor/cluster/KafkaStateActor.scala
+++ b/app/kafka/manager/actor/cluster/KafkaStateActor.scala
@@ -8,6 +8,7 @@ package kafka.manager.actor.cluster
import java.io.Closeable
import java.net.InetAddress
import java.nio.ByteBuffer
+import java.time.Duration
import java.util.Properties
import java.util.concurrent.{ConcurrentLinkedDeque, TimeUnit}
@@ -16,8 +17,6 @@ import akka.pattern._
import com.github.benmanes.caffeine.cache.{Cache, Caffeine, RemovalCause, RemovalListener}
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import grizzled.slf4j.Logging
-import kafka.admin.AdminClient
-import kafka.api.PartitionOffsetRequestInfo
import kafka.common.{OffsetAndMetadata, TopicAndPartition}
import kafka.manager._
import kafka.manager.base.cluster.{BaseClusterQueryActor, BaseClusterQueryCommandActor}
@@ -33,11 +32,10 @@ import org.apache.curator.framework.recipes.cache.PathChildrenCache.StartMode
import org.apache.curator.framework.recipes.cache._
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.clients.consumer.{Consumer, ConsumerRecords, KafkaConsumer}
-import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.{ConsumerGroupState, TopicPartition}
import org.apache.kafka.common.requests.DescribeGroupsResponse
import org.joda.time.{DateTime, DateTimeZone}
-import scala.collection.JavaConversions.{mapAsScalaMap, _}
import scala.collection.concurrent.TrieMap
import scala.collection.immutable.Map
import scala.collection.mutable
@@ -48,6 +46,9 @@ import org.apache.kafka.clients.consumer.internals.ConsumerProtocol
import org.apache.kafka.common.config.SaslConfigs
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.clients.CommonClientConfigs.SECURITY_PROTOCOL_CONFIG
+import org.apache.kafka.clients.admin.{AdminClient, ConsumerGroupDescription, DescribeConsumerGroupsOptions}
+import org.apache.kafka.common.KafkaFuture.BiConsumer
+import org.apache.kafka.common.utils.Time
/**
* @author hiral
@@ -56,6 +57,7 @@ import kafka.manager.utils._
import scala.collection.JavaConverters._
+case class PartitionOffsetRequestInfo(time: Long, maxNumOffsets: Int)
case class KafkaAdminClientActorConfig(clusterContext: ClusterContext, longRunningPoolConfig: LongRunningPoolConfig, kafkaStateActorPath: ActorPath, consumerProperties: Option[Properties])
case class KafkaAdminClientActor(config: KafkaAdminClientActorConfig) extends BaseClusterQueryActor with LongRunningPoolActor {
@@ -125,27 +127,24 @@ case class KafkaAdminClientActor(config: KafkaAdminClientActorConfig) extends Ba
request match {
case KAGetGroupSummary(groupList: Seq[String], enqueue: java.util.Queue[(String, List[MemberMetadata])]) =>
Future {
- groupList.foreach {
- group =>
- try {
- adminClientOption.foreach {
- client =>
- val groupMetadata = client.describeConsumerGroupHandler(client.findCoordinator(group, 1000), group)
- if (groupMetadata != null) {
- if(isValidConsumerGroupResponse(groupMetadata)) {
- enqueue.offer((group, groupMetadata.members().asScala.map(m => MemberMetadata.from(group, groupMetadata, m)).toList))
- } else {
- log.error(s"Invalid group metadata group=$group metadata.error=${groupMetadata.error} metadata.state=${groupMetadata.state()} metadata.protocolType=${groupMetadata.protocolType()}")
- }
- }
+ try {
+ adminClientOption.foreach {
+ client =>
+ val options = new DescribeConsumerGroupsOptions
+ options.timeoutMs(1000)
+ client.describeConsumerGroups(groupList.asJava, options).all().whenComplete {
+ (mapGroupDescription, error) => mapGroupDescription.asScala.foreach {
+ case (group, desc) =>
+ enqueue.offer(group -> desc.members().asScala.map(m => MemberMetadata.from(group, desc, m)).toList)
+ }
}
- } catch {
- case e: Exception =>
- log.error(e, s"Failed to get group summary with admin client : $group")
- log.error(e, s"Forcing new admin client initialization...")
- Try { adminClientOption.foreach(_.close()) }
- adminClientOption = None
- }
+ }
+ } catch {
+ case e: Exception =>
+ log.error(e, s"Failed to get group summary with admin client : $groupList")
+ log.error(e, s"Forcing new admin client initialization...")
+ Try { adminClientOption.foreach(_.close()) }
+ adminClientOption = None
}
}
case any: Any => log.warning("kac : processQueryRequest : Received unknown message: {}", any.toString)
@@ -177,7 +176,7 @@ class KafkaAdminClient(context: => ActorContext, adminClientActorPath: ActorPath
object KafkaManagedOffsetCache {
- val supportedVersions: Set[KafkaVersion] = Set(Kafka_0_8_2_0, Kafka_0_8_2_1, Kafka_0_8_2_2, Kafka_0_9_0_0, Kafka_0_9_0_1, Kafka_0_10_0_0, Kafka_0_10_0_1, Kafka_0_10_1_0, Kafka_0_10_1_1, Kafka_0_10_2_0, Kafka_0_10_2_1, Kafka_0_11_0_0, Kafka_0_11_0_2, Kafka_1_0_0, Kafka_1_0_1, Kafka_1_1_0, Kafka_1_1_1, Kafka_2_0_0)
+ val supportedVersions: Set[KafkaVersion] = Set(Kafka_0_8_2_0, Kafka_0_8_2_1, Kafka_0_8_2_2, Kafka_0_9_0_0, Kafka_0_9_0_1, Kafka_0_10_0_0, Kafka_0_10_0_1, Kafka_0_10_1_0, Kafka_0_10_1_1, Kafka_0_10_2_0, Kafka_0_10_2_1, Kafka_0_11_0_0, Kafka_0_11_0_2, Kafka_1_0_0, Kafka_1_0_1, Kafka_1_1_0, Kafka_1_1_1, Kafka_2_0_0, Kafka_2_1_0, Kafka_2_1_1, Kafka_2_2_0)
val ConsumerOffsetTopic = "__consumer_offsets"
def isSupported(version: KafkaVersion) : Boolean = {
@@ -341,7 +340,7 @@ case class KafkaManagedOffsetCache(clusterContext: ClusterContext
error("Failed to backfill group metadata", e)
}
- val records: ConsumerRecords[Array[Byte], Array[Byte]] = consumer.poll(100)
+ val records: ConsumerRecords[Array[Byte], Array[Byte]] = consumer.poll(Duration.ofMillis(100))
val iterator = records.iterator()
while (iterator.hasNext) {
val record = iterator.next()
@@ -375,7 +374,7 @@ case class KafkaManagedOffsetCache(clusterContext: ClusterContext
}
topicSet += topic
case GroupMetadataKey(version, key) =>
- val value: GroupMetadata = readGroupMessageValue(key, ByteBuffer.wrap(record.value()))
+ val value: GroupMetadata = readGroupMessageValue(key, ByteBuffer.wrap(record.value()), Time.SYSTEM)
value.allMemberMetadata.foreach {
mm =>
mm.assignment.foreach {
@@ -530,8 +529,8 @@ trait OffsetCache extends Logging {
}
}
- futureMap onFailure {
- case t => error(s"[topic=$topic] An error has occurred while getting topic offsets", t)
+ futureMap.failed.foreach {
+ t => error(s"[topic=$topic] An error has occurred while getting topic offsets", t)
}
futureMap
}
@@ -1209,7 +1208,7 @@ class KafkaStateActor(config: KafkaStateActorConfig) extends BaseClusterQueryCom
private[this] def getTopicConfigString(topic: String) : Option[(Int,String)] = {
val data: mutable.Buffer[ChildData] = topicsConfigPathCache.getCurrentData.asScala
- val result: Option[ChildData] = data.find(p => p.getPath.endsWith(topic))
+ val result: Option[ChildData] = data.find(p => p.getPath.endsWith("/" + topic))
result.map(cd => (cd.getStat.getVersion,asString(cd.getData)))
}
@@ -1489,7 +1488,7 @@ class KafkaStateActor(config: KafkaStateActorConfig) extends BaseClusterQueryCom
try {
kafkaConsumer = Option(new KafkaConsumer(consumerProperties))
val request = tpList.map(f => new TopicPartition(f._1.topic, f._1.partition))
- var tpOffsetMapOption = kafkaConsumer.map(_.endOffsets(request))
+ var tpOffsetMapOption = kafkaConsumer.map(_.endOffsets(request.asJavaCollection).asScala)
var topicOffsetMap: Map[Int, Long] = null
tpOffsetMapOption.foreach(tpOffsetMap => tpOffsetMap.keys.foreach(tp => {
@@ -1504,7 +1503,7 @@ class KafkaStateActor(config: KafkaStateActorConfig) extends BaseClusterQueryCom
}))
} catch {
case e: Exception =>
- log.error(s"consumerProperties:$consumerProperties", e)
+ log.error(e, s"consumerProperties:$consumerProperties")
throw e
} finally {
kafkaConsumer.foreach(_.close())
diff --git a/app/kafka/manager/actor/cluster/package.scala b/app/kafka/manager/actor/cluster/package.scala
index ba505a22c..961f935c4 100644
--- a/app/kafka/manager/actor/cluster/package.scala
+++ b/app/kafka/manager/actor/cluster/package.scala
@@ -6,7 +6,8 @@
package kafka.manager.actor
import grizzled.slf4j.Logging
-import kafka.manager.features.{ClusterFeatures, ClusterFeature}
+import kafka.manager.features.{ClusterFeature, ClusterFeatures}
+import org.apache.kafka.common.KafkaFuture.BiConsumer
import scala.util.{Failure, Try}
@@ -25,6 +26,14 @@ package object cluster {
}
}
+ implicit def toBiConsumer[A,B](fn: (A, B) => Unit): BiConsumer[A, B] = {
+ new BiConsumer[A, B] {
+ override def accept(a: A, b: B): Unit = {
+ fn(a, b)
+ }
+ }
+ }
+
def featureGate[T](af: ClusterFeature)(fn: => Unit)(implicit features: ClusterFeatures) : Unit = {
if(features.features(af)) {
fn
diff --git a/app/kafka/manager/model/model.scala b/app/kafka/manager/model/model.scala
index 4a456f781..bf8e996bb 100644
--- a/app/kafka/manager/model/model.scala
+++ b/app/kafka/manager/model/model.scala
@@ -88,6 +88,18 @@ case object Kafka_2_0_0 extends KafkaVersion {
override def toString = "2.0.0"
}
+case object Kafka_2_1_0 extends KafkaVersion {
+ override def toString = "2.1.0"
+}
+
+case object Kafka_2_1_1 extends KafkaVersion {
+ override def toString = "2.1.1"
+}
+
+case object Kafka_2_2_0 extends KafkaVersion {
+ override def toString = "2.2.0"
+}
+
object KafkaVersion {
val supportedVersions: Map[String,KafkaVersion] = Map(
"0.8.1.1" -> Kafka_0_8_1_1,
@@ -109,7 +121,10 @@ object KafkaVersion {
"1.0.1" -> Kafka_1_0_1,
"1.1.0" -> Kafka_1_1_0,
"1.1.1" -> Kafka_1_1_1,
- "2.0.0" -> Kafka_2_0_0
+ "2.0.0" -> Kafka_2_0_0,
+ "2.1.0" -> Kafka_2_1_0,
+ "2.1.1" -> Kafka_2_1_1,
+ "2.2.0" -> Kafka_2_2_0
)
val formSelectList : IndexedSeq[(String,String)] = supportedVersions.toIndexedSeq.filterNot(_._1.contains("beta")).map(t => (t._1,t._2.toString)).sortWith((a, b) => sortVersion(a._1, b._1))
@@ -446,6 +461,10 @@ case object SASL_PLAINTEXT extends SecurityProtocol {
val stringId = "SASL_PLAINTEXT"
val secure = true
}
+case object PLAINTEXTSASL extends SecurityProtocol {
+ val stringId = "PLAINTEXTSASL"
+ val secure = true
+}
case object SASL_SSL extends SecurityProtocol {
val stringId = "SASL_SSL"
val secure = true
@@ -461,6 +480,7 @@ case object PLAINTEXT extends SecurityProtocol {
object SecurityProtocol {
private[this] val typesMap: Map[String, SecurityProtocol] = Map(
SASL_PLAINTEXT.stringId -> SASL_PLAINTEXT
+ , PLAINTEXTSASL.stringId -> SASL_PLAINTEXT
, SASL_SSL.stringId -> SASL_SSL
, SSL.stringId -> SSL
, PLAINTEXT.stringId -> PLAINTEXT
diff --git a/app/kafka/manager/utils/LogkafkaNewConfigs.scala b/app/kafka/manager/utils/LogkafkaNewConfigs.scala
index 0e399e2a9..0c20f41a5 100644
--- a/app/kafka/manager/utils/LogkafkaNewConfigs.scala
+++ b/app/kafka/manager/utils/LogkafkaNewConfigs.scala
@@ -36,8 +36,11 @@ object LogkafkaNewConfigs {
Kafka_1_0_1 -> logkafka82.LogConfig,
Kafka_1_1_0 -> logkafka82.LogConfig,
Kafka_1_1_1 -> logkafka82.LogConfig,
- Kafka_2_0_0 -> logkafka82.LogConfig
- )
+ Kafka_2_0_0 -> logkafka82.LogConfig,
+ Kafka_2_1_0 -> logkafka82.LogConfig,
+ Kafka_2_1_1 -> logkafka82.LogConfig,
+ Kafka_2_2_0 -> logkafka82.LogConfig
+ )
def configNames(version: KafkaVersion) : Set[String] = {
logkafkaConfigsByVersion.get(version) match {
diff --git a/app/kafka/manager/utils/TopicConfigs.scala b/app/kafka/manager/utils/TopicConfigs.scala
index 9081f9c87..b8032061a 100644
--- a/app/kafka/manager/utils/TopicConfigs.scala
+++ b/app/kafka/manager/utils/TopicConfigs.scala
@@ -1,7 +1,7 @@
/**
- * Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
- * See accompanying LICENSE file.
- */
+ * Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
+ * See accompanying LICENSE file.
+ */
package kafka.manager.utils
@@ -11,44 +11,58 @@ import kafka.manager.model.{Kafka_1_0_0, _}
trait TopicConfigs {
- def configNames : Set[String]
+ def configNames: Seq[String]
+
def validate(props: Properties)
+
+ def configNamesAndDoc: Seq[(String, String)]
}
object TopicConfigs {
-
- val topicConfigsByVersion : Map[KafkaVersion, TopicConfigs] = Map(
+
+ val topicConfigsByVersion: Map[KafkaVersion, TopicConfigs] = Map(
Kafka_0_8_1_1 -> zero81.LogConfig,
Kafka_0_8_2_0 -> zero82.LogConfig,
Kafka_0_8_2_1 -> zero82.LogConfig,
Kafka_0_8_2_2 -> zero82.LogConfig,
Kafka_0_9_0_0 -> zero90.LogConfig,
Kafka_0_9_0_1 -> zero90.LogConfig,
- Kafka_0_10_0_0 -> zero90.LogConfig,
- Kafka_0_10_0_1 -> zero90.LogConfig,
- Kafka_0_10_1_0 -> zero90.LogConfig,
- Kafka_0_10_1_1 -> zero90.LogConfig,
- Kafka_0_10_2_0 -> zero90.LogConfig,
- Kafka_0_10_2_1 -> zero90.LogConfig,
- Kafka_0_11_0_0 -> zero90.LogConfig,
- Kafka_0_11_0_2 -> zero90.LogConfig,
- Kafka_1_0_0 -> zero90.LogConfig,
- Kafka_1_0_1 -> zero90.LogConfig,
- Kafka_1_1_0 -> zero90.LogConfig,
- Kafka_1_1_1 -> zero90.LogConfig,
- Kafka_2_0_0 -> zero90.LogConfig
- )
-
- def configNames(version: KafkaVersion) : Set[String] = {
+ Kafka_0_10_0_0 -> zero10.LogConfig,
+ Kafka_0_10_0_1 -> zero10.LogConfig,
+ Kafka_0_10_1_0 -> zero10.LogConfig,
+ Kafka_0_10_1_1 -> zero10.LogConfig,
+ Kafka_0_10_2_0 -> zero10.LogConfig,
+ Kafka_0_10_2_1 -> zero10.LogConfig,
+ Kafka_0_11_0_0 -> zero11.LogConfig,
+ Kafka_0_11_0_2 -> zero11.LogConfig,
+ Kafka_1_0_0 -> one10.LogConfig,
+ Kafka_1_0_1 -> one10.LogConfig,
+ Kafka_1_1_0 -> one10.LogConfig,
+ Kafka_1_1_1 -> one10.LogConfig,
+ Kafka_2_0_0 -> two00.LogConfig,
+ Kafka_2_1_0 -> two00.LogConfig,
+ Kafka_2_1_1 -> two00.LogConfig,
+ Kafka_2_2_0 -> two00.LogConfig
+ )
+
+ def configNames(version: KafkaVersion): Seq[String] = {
topicConfigsByVersion.get(version) match {
case Some(tc) => tc.configNames
case None => throw new IllegalArgumentException(s"Undefined topic configs for version : $version, cannot get config names")
}
}
- def validate(version: KafkaVersion, props: Properties) : Unit = {
+
+ def validate(version: KafkaVersion, props: Properties): Unit = {
topicConfigsByVersion.get(version) match {
case Some(tc) => tc.validate(props)
case None => throw new IllegalArgumentException(s"Undefined topic configs for version : $version, cannot validate config")
}
}
+
+ def configNamesAndDoc(version: KafkaVersion): Seq[(String, String)] = {
+ topicConfigsByVersion.get(version) match {
+ case Some(tc) => tc.configNamesAndDoc
+ case None => throw new IllegalArgumentException(s"Undefined topic configs for version : $version, cannot get config names and doc")
+ }
+ }
}
diff --git a/app/kafka/manager/utils/one10/GroupMetadataManager.scala b/app/kafka/manager/utils/one10/GroupMetadataManager.scala
index db503b075..4d691e4ff 100644
--- a/app/kafka/manager/utils/one10/GroupMetadataManager.scala
+++ b/app/kafka/manager/utils/one10/GroupMetadataManager.scala
@@ -20,20 +20,101 @@ package kafka.manager.utils.one10
import java.io.PrintStream
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
-import java.util.UUID
+import java.util.Optional
+import java.util.concurrent.locks.ReentrantLock
-import kafka.common.{KafkaException, MessageFormatter, OffsetAndMetadata}
-import kafka.utils.{Logging, nonthreadsafe}
+import kafka.api.{ApiVersion, KAFKA_2_1_IV0, KAFKA_2_1_IV1}
+import kafka.common.{MessageFormatter, OffsetAndMetadata}
+import kafka.coordinator.group.JoinGroupResult
+import kafka.utils.{CoreUtils, Logging, nonthreadsafe}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.clients.consumer.internals.{ConsumerProtocol, PartitionAssignor}
-import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.types.Type._
import org.apache.kafka.common.protocol.types._
-import org.apache.kafka.common.utils.Utils
+import org.apache.kafka.common.record._
+import org.apache.kafka.common.utils.Time
+import org.apache.kafka.common.{KafkaException, TopicPartition}
import scala.collection.JavaConverters._
import scala.collection.{Seq, immutable, mutable, _}
+private[one10] sealed trait GroupState
+
+/**
+ * Group is preparing to rebalance
+ *
+ * action: respond to heartbeats with REBALANCE_IN_PROGRESS
+ * respond to sync group with REBALANCE_IN_PROGRESS
+ * remove member on leave group request
+ * park join group requests from new or existing members until all expected members have joined
+ * allow offset commits from previous generation
+ * allow offset fetch requests
+ * transition: some members have joined by the timeout => CompletingRebalance
+ * all members have left the group => Empty
+ * group is removed by partition emigration => Dead
+ */
+private[one10] case object PreparingRebalance extends GroupState
+
+/**
+ * Group is awaiting state assignment from the leader
+ *
+ * action: respond to heartbeats with REBALANCE_IN_PROGRESS
+ * respond to offset commits with REBALANCE_IN_PROGRESS
+ * park sync group requests from followers until transition to Stable
+ * allow offset fetch requests
+ * transition: sync group with state assignment received from leader => Stable
+ * join group from new member or existing member with updated metadata => PreparingRebalance
+ * leave group from existing member => PreparingRebalance
+ * member failure detected => PreparingRebalance
+ * group is removed by partition emigration => Dead
+ */
+private[one10] case object CompletingRebalance extends GroupState
+
+/**
+ * Group is stable
+ *
+ * action: respond to member heartbeats normally
+ * respond to sync group from any member with current assignment
+ * respond to join group from followers with matching metadata with current group metadata
+ * allow offset commits from member of current generation
+ * allow offset fetch requests
+ * transition: member failure detected via heartbeat => PreparingRebalance
+ * leave group from existing member => PreparingRebalance
+ * leader join-group received => PreparingRebalance
+ * follower join-group with new metadata => PreparingRebalance
+ * group is removed by partition emigration => Dead
+ */
+private[one10] case object Stable extends GroupState
+
+/**
+ * Group has no more members and its metadata is being removed
+ *
+ * action: respond to join group with UNKNOWN_MEMBER_ID
+ * respond to sync group with UNKNOWN_MEMBER_ID
+ * respond to heartbeat with UNKNOWN_MEMBER_ID
+ * respond to leave group with UNKNOWN_MEMBER_ID
+ * respond to offset commit with UNKNOWN_MEMBER_ID
+ * allow offset fetch requests
+ * transition: Dead is a final state before group metadata is cleaned up, so there are no transitions
+ */
+private[one10] case object Dead extends GroupState
+
+/**
+ * Group has no more members, but lingers until all offsets have expired. This state
+ * also represents groups which use Kafka only for offset commits and have no members.
+ *
+ * action: respond normally to join group from new members
+ * respond to sync group with UNKNOWN_MEMBER_ID
+ * respond to heartbeat with UNKNOWN_MEMBER_ID
+ * respond to leave group with UNKNOWN_MEMBER_ID
+ * respond to offset commit with UNKNOWN_MEMBER_ID
+ * allow offset fetch requests
+ * transition: last offsets removed in periodic expiration task => Dead
+ * join group from a new member => PreparingRebalance
+ * group is removed by partition emigration => Dead
+ * group is removed by expiration => Dead
+ */
+private[one10] case object Empty extends GroupState
/**
* Case class used to represent group metadata for the ListGroups API
@@ -51,6 +132,34 @@ case class CommitRecordMetadataAndOffset(appendedBatchOffset: Option[Long], offs
def olderThan(that: CommitRecordMetadataAndOffset) : Boolean = appendedBatchOffset.get < that.appendedBatchOffset.get
}
+object GroupMetadata {
+ private val validPreviousStates: Map[GroupState, Set[GroupState]] =
+ Map(Dead -> Set(Stable, PreparingRebalance, CompletingRebalance, Empty, Dead),
+ CompletingRebalance -> Set(PreparingRebalance),
+ Stable -> Set(CompletingRebalance),
+ PreparingRebalance -> Set(Stable, CompletingRebalance, Empty),
+ Empty -> Set(PreparingRebalance))
+
+ def loadGroup(groupId: String,
+ initialState: GroupState,
+ generationId: Int,
+ protocolType: String,
+ protocol: String,
+ leaderId: String,
+ currentStateTimestamp: Option[Long],
+ members: Iterable[MemberMetadata],
+ time: Time): GroupMetadata = {
+ val group = new GroupMetadata(groupId, initialState, time)
+ group.generationId = generationId
+ group.protocolType = if (protocolType == null || protocolType.isEmpty) None else Some(protocolType)
+ group.protocol = Option(protocol)
+ group.leaderId = Option(leaderId)
+ group.currentStateTimestamp = currentStateTimestamp
+ members.foreach(group.add(_, null))
+ group
+ }
+}
+
/**
* Group contains the following metadata:
*
@@ -65,14 +174,22 @@ case class CommitRecordMetadataAndOffset(appendedBatchOffset: Option[Long], offs
* 3. leader id
*/
@nonthreadsafe
-class GroupMetadata(val groupId: String
- , var protocolType: Option[String]
- , var generationId: Int
- , var protocol: Option[String]
- , var leaderId: Option[String]
- ) extends Logging {
+class GroupMetadata(val groupId: String, initialState: GroupState, time: Time) extends Logging {
+ type JoinCallback = JoinGroupResult => Unit
+
+ private[one10] val lock = new ReentrantLock
+
+ private var state: GroupState = initialState
+ var currentStateTimestamp: Option[Long] = Some(time.milliseconds())
+ var protocolType: Option[String] = None
+ var generationId = 0
+ private var leaderId: Option[String] = None
+ private var protocol: Option[String] = None
private val members = new mutable.HashMap[String, MemberMetadata]
+ private val pendingMembers = new mutable.HashSet[String]
+ private var numMembersAwaitingJoin = 0
+ private val supportedProtocols = new mutable.HashMap[String, Integer]().withDefaultValue(0)
private val offsets = new mutable.HashMap[TopicPartition, CommitRecordMetadataAndOffset]
private val pendingOffsetCommits = new mutable.HashMap[TopicPartition, OffsetAndMetadata]
private val pendingTransactionalOffsetCommits = new mutable.HashMap[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]()
@@ -81,109 +198,38 @@ class GroupMetadata(val groupId: String
var newMemberAdded: Boolean = false
+ def inLock[T](fun: => T): T = CoreUtils.inLock(lock)(fun)
+
+ def is(groupState: GroupState) = state == groupState
+ def not(groupState: GroupState) = state != groupState
def has(memberId: String) = members.contains(memberId)
def get(memberId: String) = members(memberId)
+ def size = members.size
def isLeader(memberId: String): Boolean = leaderId.contains(memberId)
def leaderOrNull: String = leaderId.orNull
def protocolOrNull: String = protocol.orNull
+ def currentStateTimestampOrDefault: Long = currentStateTimestamp.getOrElse(-1)
- def add(member: MemberMetadata) {
+ def add(member: MemberMetadata, callback: JoinCallback = null) {
if (members.isEmpty)
this.protocolType = Some(member.protocolType)
assert(groupId == member.groupId)
assert(this.protocolType.orNull == member.protocolType)
- assert(supportsProtocols(member.protocols))
+ //assert(supportsProtocols(member.protocolType, member.supportedProtocols))
if (leaderId.isEmpty)
leaderId = Some(member.memberId)
members.put(member.memberId, member)
- }
-
- def remove(memberId: String) {
- members.remove(memberId)
- if (isLeader(memberId)) {
- leaderId = if (members.isEmpty) {
- None
- } else {
- Some(members.keys.head)
- }
- }
+ member.supportedProtocols.foreach{ case (protocolType, protocolSet) => protocolSet.foreach { protocol => supportedProtocols(protocol) += 1} }
}
def allMembers = members.keySet
def allMemberMetadata = members.values.toList
- // TODO: decide if ids should be predictable or random
- def generateMemberIdSuffix = UUID.randomUUID().toString
-
- private def candidateProtocols = {
- // get the set of protocols that are commonly supported by all members
- allMemberMetadata
- .map(_.protocols)
- .reduceLeft((commonProtocols, protocols) => commonProtocols & protocols)
- }
-
- def supportsProtocols(memberProtocols: Set[String]) = {
- members.isEmpty || (memberProtocols & candidateProtocols).nonEmpty
- }
-
- def overview: GroupOverview = {
- GroupOverview(groupId, protocolType.getOrElse(""))
- }
-
- def initializeOffsets(offsets: collection.Map[TopicPartition, CommitRecordMetadataAndOffset],
- pendingTxnOffsets: Map[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]) {
- this.offsets ++= offsets
- this.pendingTransactionalOffsetCommits ++= pendingTxnOffsets
- }
-
- def onOffsetCommitAppend(topicPartition: TopicPartition, offsetWithCommitRecordMetadata: CommitRecordMetadataAndOffset) {
- if (pendingOffsetCommits.contains(topicPartition)) {
- if (offsetWithCommitRecordMetadata.appendedBatchOffset.isEmpty)
- throw new IllegalStateException("Cannot complete offset commit write without providing the metadata of the record " +
- "in the log.")
- if (!offsets.contains(topicPartition) || offsets(topicPartition).olderThan(offsetWithCommitRecordMetadata))
- offsets.put(topicPartition, offsetWithCommitRecordMetadata)
- }
-
- pendingOffsetCommits.get(topicPartition) match {
- case Some(stagedOffset) if offsetWithCommitRecordMetadata.offsetAndMetadata == stagedOffset =>
- pendingOffsetCommits.remove(topicPartition)
- case _ =>
- // The pendingOffsetCommits for this partition could be empty if the topic was deleted, in which case
- // its entries would be removed from the cache by the `removeOffsets` method.
- }
- }
-
- def failPendingOffsetWrite(topicPartition: TopicPartition, offset: OffsetAndMetadata): Unit = {
- pendingOffsetCommits.get(topicPartition) match {
- case Some(pendingOffset) if offset == pendingOffset => pendingOffsetCommits.remove(topicPartition)
- case _ =>
- }
- }
-
- def prepareOffsetCommit(offsets: Map[TopicPartition, OffsetAndMetadata]) {
- receivedConsumerOffsetCommits = true
- pendingOffsetCommits ++= offsets
- }
-
- def prepareTxnOffsetCommit(producerId: Long, offsets: Map[TopicPartition, OffsetAndMetadata]) {
- trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $offsets is pending")
- receivedTransactionalOffsetCommits = true
- val producerOffsets = pendingTransactionalOffsetCommits.getOrElseUpdate(producerId,
- mutable.Map.empty[TopicPartition, CommitRecordMetadataAndOffset])
-
- offsets.foreach { case (topicPartition, offsetAndMetadata) =>
- producerOffsets.put(topicPartition, CommitRecordMetadataAndOffset(None, offsetAndMetadata))
- }
- }
-
- def hasReceivedConsistentOffsetCommits : Boolean = {
- !receivedConsumerOffsetCommits || !receivedTransactionalOffsetCommits
- }
+ def currentState = state
/* Remove a pending transactional offset commit if the actual offset commit record was not written to the log.
* We will return an error and the client will retry the request, potentially to a different coordinator.
@@ -246,9 +292,10 @@ class GroupMetadata(val groupId: String
def hasPendingOffsetCommitsFromProducer(producerId: Long) =
pendingTransactionalOffsetCommits.contains(producerId)
+ def removeAllOffsets(): immutable.Map[TopicPartition, OffsetAndMetadata] = removeOffsets(offsets.keySet.toSeq)
+
def removeOffsets(topicPartitions: Seq[TopicPartition]): immutable.Map[TopicPartition, OffsetAndMetadata] = {
topicPartitions.flatMap { topicPartition =>
-
pendingOffsetCommits.remove(topicPartition)
pendingTransactionalOffsetCommits.foreach { case (_, pendingOffsets) =>
pendingOffsets.remove(topicPartition)
@@ -258,18 +305,51 @@ class GroupMetadata(val groupId: String
}.toMap
}
- def removeExpiredOffsets(startMs: Long) : Map[TopicPartition, OffsetAndMetadata] = {
- val expiredOffsets = offsets
- .filter {
+ def removeExpiredOffsets(currentTimestamp: Long, offsetRetentionMs: Long) : Map[TopicPartition, OffsetAndMetadata] = {
+
+ def getExpiredOffsets(baseTimestamp: CommitRecordMetadataAndOffset => Long): Map[TopicPartition, OffsetAndMetadata] = {
+ offsets.filter {
case (topicPartition, commitRecordMetadataAndOffset) =>
- commitRecordMetadataAndOffset.offsetAndMetadata.expireTimestamp < startMs && !pendingOffsetCommits.contains(topicPartition)
- }
- .map {
+ !pendingOffsetCommits.contains(topicPartition) && {
+ commitRecordMetadataAndOffset.offsetAndMetadata.expireTimestamp match {
+ case None =>
+ // current version with no per partition retention
+ currentTimestamp - baseTimestamp(commitRecordMetadataAndOffset) >= offsetRetentionMs
+ case Some(expireTimestamp) =>
+ // older versions with explicit expire_timestamp field => old expiration semantics is used
+ currentTimestamp >= expireTimestamp
+ }
+ }
+ }.map {
case (topicPartition, commitRecordOffsetAndMetadata) =>
(topicPartition, commitRecordOffsetAndMetadata.offsetAndMetadata)
- }
+ }.toMap
+ }
+
+ val expiredOffsets: Map[TopicPartition, OffsetAndMetadata] = protocolType match {
+ case Some(_) if is(Empty) =>
+ // no consumer exists in the group =>
+ // - if current state timestamp exists and retention period has passed since group became Empty,
+ // expire all offsets with no pending offset commit;
+ // - if there is no current state timestamp (old group metadata schema) and retention period has passed
+ // since the last commit timestamp, expire the offset
+ getExpiredOffsets(commitRecordMetadataAndOffset =>
+ currentStateTimestamp.getOrElse(commitRecordMetadataAndOffset.offsetAndMetadata.commitTimestamp))
+
+ case None =>
+ // protocolType is None => standalone (simple) consumer, that uses Kafka for offset storage only
+ // expire offsets with no pending offset commit that retention period has passed since their last commit
+ getExpiredOffsets(_.offsetAndMetadata.commitTimestamp)
+
+ case _ =>
+ Map()
+ }
+
+ if (expiredOffsets.nonEmpty)
+ debug(s"Expired offsets from group '$groupId': ${expiredOffsets.keySet}")
+
offsets --= expiredOffsets.keySet
- expiredOffsets.toMap
+ expiredOffsets
}
def allOffsets = offsets.map { case (topicPartition, commitRecordMetadataAndOffset) =>
@@ -279,17 +359,26 @@ class GroupMetadata(val groupId: String
def offset(topicPartition: TopicPartition): Option[OffsetAndMetadata] = offsets.get(topicPartition).map(_.offsetAndMetadata)
// visible for testing
- def offsetWithRecordMetadata(topicPartition: TopicPartition): Option[CommitRecordMetadataAndOffset] = offsets.get(topicPartition)
+ private[one10] def offsetWithRecordMetadata(topicPartition: TopicPartition): Option[CommitRecordMetadataAndOffset] = offsets.get(topicPartition)
def numOffsets = offsets.size
def hasOffsets = offsets.nonEmpty || pendingOffsetCommits.nonEmpty || pendingTransactionalOffsetCommits.nonEmpty
+ /*
+ private def assertValidTransition(targetState: GroupState) {
+ if (!GroupMetadata.validPreviousStates(targetState).contains(state))
+ throw new IllegalStateException("Group %s should be in the %s states before moving to %s state. Instead it is in %s state"
+ .format(groupId, GroupMetadata.validPreviousStates(targetState).mkString(","), targetState, state))
+ }
+ */
+
override def toString: String = {
"GroupMetadata(" +
s"groupId=$groupId, " +
s"generation=$generationId, " +
s"protocolType=$protocolType, " +
+ s"currentState=$currentState, " +
s"members=$members)"
}
@@ -338,6 +427,23 @@ object GroupMetadataManager {
private val OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("commit_timestamp")
private val OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("expire_timestamp")
+ private val OFFSET_COMMIT_VALUE_SCHEMA_V2 = new Schema(new Field("offset", INT64),
+ new Field("metadata", STRING, "Associated metadata.", ""),
+ new Field("commit_timestamp", INT64))
+ private val OFFSET_VALUE_OFFSET_FIELD_V2 = OFFSET_COMMIT_VALUE_SCHEMA_V2.get("offset")
+ private val OFFSET_VALUE_METADATA_FIELD_V2 = OFFSET_COMMIT_VALUE_SCHEMA_V2.get("metadata")
+ private val OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V2 = OFFSET_COMMIT_VALUE_SCHEMA_V2.get("commit_timestamp")
+
+ private val OFFSET_COMMIT_VALUE_SCHEMA_V3 = new Schema(
+ new Field("offset", INT64),
+ new Field("leader_epoch", INT32),
+ new Field("metadata", STRING, "Associated metadata.", ""),
+ new Field("commit_timestamp", INT64))
+ private val OFFSET_VALUE_OFFSET_FIELD_V3 = OFFSET_COMMIT_VALUE_SCHEMA_V3.get("offset")
+ private val OFFSET_VALUE_LEADER_EPOCH_FIELD_V3 = OFFSET_COMMIT_VALUE_SCHEMA_V3.get("leader_epoch")
+ private val OFFSET_VALUE_METADATA_FIELD_V3 = OFFSET_COMMIT_VALUE_SCHEMA_V3.get("metadata")
+ private val OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V3 = OFFSET_COMMIT_VALUE_SCHEMA_V3.get("commit_timestamp")
+
private val GROUP_METADATA_KEY_SCHEMA = new Schema(new Field("group", STRING))
private val GROUP_KEY_GROUP_FIELD = GROUP_METADATA_KEY_SCHEMA.get("group")
@@ -368,27 +474,11 @@ object GroupMetadataManager {
private val MEMBER_METADATA_V2 = MEMBER_METADATA_V1
- private val OFFSET_COMMIT_VALUE_SCHEMA_V2 = new Schema(new Field("offset", INT64),
- new Field("metadata", STRING, "Associated metadata.", ""),
- new Field("commit_timestamp", INT64))
- private val OFFSET_VALUE_OFFSET_FIELD_V2 = OFFSET_COMMIT_VALUE_SCHEMA_V2.get("offset")
- private val OFFSET_VALUE_METADATA_FIELD_V2 = OFFSET_COMMIT_VALUE_SCHEMA_V2.get("metadata")
- private val OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V2 = OFFSET_COMMIT_VALUE_SCHEMA_V2.get("commit_timestamp")
-
- private val OFFSET_COMMIT_VALUE_SCHEMA_V3 = new Schema(
- new Field("offset", INT64),
- new Field("leader_epoch", INT32),
- new Field("metadata", STRING, "Associated metadata.", ""),
- new Field("commit_timestamp", INT64))
- private val OFFSET_VALUE_OFFSET_FIELD_V3 = OFFSET_COMMIT_VALUE_SCHEMA_V3.get("offset")
- private val OFFSET_VALUE_LEADER_EPOCH_FIELD_V3 = OFFSET_COMMIT_VALUE_SCHEMA_V3.get("leader_epoch")
- private val OFFSET_VALUE_METADATA_FIELD_V3 = OFFSET_COMMIT_VALUE_SCHEMA_V3.get("metadata")
- private val OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V3 = OFFSET_COMMIT_VALUE_SCHEMA_V3.get("commit_timestamp")
-
private val PROTOCOL_TYPE_KEY = "protocol_type"
private val GENERATION_KEY = "generation"
private val PROTOCOL_KEY = "protocol"
private val LEADER_KEY = "leader"
+ private val CURRENT_STATE_TIMESTAMP_KEY = "current_state_timestamp"
private val MEMBERS_KEY = "members"
private val GROUP_METADATA_VALUE_SCHEMA_V0 = new Schema(
@@ -410,6 +500,7 @@ object GroupMetadataManager {
new Field(GENERATION_KEY, INT32),
new Field(PROTOCOL_KEY, NULLABLE_STRING),
new Field(LEADER_KEY, NULLABLE_STRING),
+ new Field(CURRENT_STATE_TIMESTAMP_KEY, INT64),
new Field(MEMBERS_KEY, new ArrayOf(MEMBER_METADATA_V2)))
// map of versions to key schemas as data types
@@ -425,21 +516,20 @@ object GroupMetadataManager {
2 -> OFFSET_COMMIT_VALUE_SCHEMA_V2,
3 -> OFFSET_COMMIT_VALUE_SCHEMA_V3)
- private val CURRENT_OFFSET_VALUE_SCHEMA_VERSION = 1.toShort
-
// map of version of group metadata value schemas
private val GROUP_VALUE_SCHEMAS = Map(
0 -> GROUP_METADATA_VALUE_SCHEMA_V0,
1 -> GROUP_METADATA_VALUE_SCHEMA_V1,
2 -> GROUP_METADATA_VALUE_SCHEMA_V2)
- private val CURRENT_GROUP_VALUE_SCHEMA_VERSION = 1.toShort
-
private val CURRENT_OFFSET_KEY_SCHEMA = schemaForKey(CURRENT_OFFSET_KEY_SCHEMA_VERSION)
private val CURRENT_GROUP_KEY_SCHEMA = schemaForKey(CURRENT_GROUP_KEY_SCHEMA_VERSION)
- private val CURRENT_OFFSET_VALUE_SCHEMA = schemaForOffset(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
- private val CURRENT_GROUP_VALUE_SCHEMA = schemaForGroup(CURRENT_GROUP_VALUE_SCHEMA_VERSION)
+ //private val CURRENT_OFFSET_VALUE_SCHEMA_VERSION = 1.toShort
+ //private val CURRENT_GROUP_VALUE_SCHEMA_VERSION = 1.toShort
+
+ //private val CURRENT_OFFSET_VALUE_SCHEMA = schemaForOffset(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
+ //private val CURRENT_GROUP_VALUE_SCHEMA = schemaForGroup(CURRENT_GROUP_VALUE_SCHEMA_VERSION)
private def schemaForKey(version: Int) = {
val schemaOpt = MESSAGE_TYPE_SCHEMAS.get(version)
@@ -449,7 +539,7 @@ object GroupMetadataManager {
}
}
- private def schemaForOffset(version: Int) = {
+ private def schemaForOffsetValue(version: Int) = {
val schemaOpt = OFFSET_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
@@ -457,7 +547,7 @@ object GroupMetadataManager {
}
}
- private def schemaForGroup(version: Int) = {
+ private def schemaForGroupValue(version: Int) = {
val schemaOpt = GROUP_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
@@ -470,8 +560,8 @@ object GroupMetadataManager {
*
* @return key for offset commit message
*/
- def offsetCommitKey(group: String, topicPartition: TopicPartition,
- versionId: Short = 0): Array[Byte] = {
+ private[one10] def offsetCommitKey(group: String,
+ topicPartition: TopicPartition): Array[Byte] = {
val key = new Struct(CURRENT_OFFSET_KEY_SCHEMA)
key.set(OFFSET_KEY_GROUP_FIELD, group)
key.set(OFFSET_KEY_TOPIC_FIELD, topicPartition.topic)
@@ -488,7 +578,7 @@ object GroupMetadataManager {
*
* @return key bytes for group metadata message
*/
- def groupMetadataKey(group: String): Array[Byte] = {
+ private[one10] def groupMetadataKey(group: String): Array[Byte] = {
val key = new Struct(CURRENT_GROUP_KEY_SCHEMA)
key.set(GROUP_KEY_GROUP_FIELD, group)
@@ -502,17 +592,41 @@ object GroupMetadataManager {
* Generates the payload for offset commit message from given offset and metadata
*
* @param offsetAndMetadata consumer's current offset and metadata
+ * @param apiVersion the api version
* @return payload for offset commit message
*/
- def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata): Array[Byte] = {
- // generate commit value with schema version 1
- val value = new Struct(CURRENT_OFFSET_VALUE_SCHEMA)
- value.set(OFFSET_VALUE_OFFSET_FIELD_V1, offsetAndMetadata.offset)
- value.set(OFFSET_VALUE_METADATA_FIELD_V1, offsetAndMetadata.metadata)
- value.set(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1, offsetAndMetadata.commitTimestamp)
- value.set(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1, offsetAndMetadata.expireTimestamp)
+ private[one10] def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata,
+ apiVersion: ApiVersion): Array[Byte] = {
+ // generate commit value according to schema version
+ val (version, value) = {
+ if (apiVersion < KAFKA_2_1_IV0 || offsetAndMetadata.expireTimestamp.nonEmpty) {
+ val value = new Struct(OFFSET_COMMIT_VALUE_SCHEMA_V1)
+ value.set(OFFSET_VALUE_OFFSET_FIELD_V1, offsetAndMetadata.offset)
+ value.set(OFFSET_VALUE_METADATA_FIELD_V1, offsetAndMetadata.metadata)
+ value.set(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1, offsetAndMetadata.commitTimestamp)
+ // version 1 has a non empty expireTimestamp field
+ value.set(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1,
+ offsetAndMetadata.expireTimestamp.getOrElse(-1L))
+ (1, value)
+ } else if (apiVersion < KAFKA_2_1_IV1) {
+ val value = new Struct(OFFSET_COMMIT_VALUE_SCHEMA_V2)
+ value.set(OFFSET_VALUE_OFFSET_FIELD_V2, offsetAndMetadata.offset)
+ value.set(OFFSET_VALUE_METADATA_FIELD_V2, offsetAndMetadata.metadata)
+ value.set(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V2, offsetAndMetadata.commitTimestamp)
+ (2, value)
+ } else {
+ val value = new Struct(OFFSET_COMMIT_VALUE_SCHEMA_V3)
+ value.set(OFFSET_VALUE_OFFSET_FIELD_V3, offsetAndMetadata.offset)
+ value.set(OFFSET_VALUE_LEADER_EPOCH_FIELD_V3,
+ offsetAndMetadata.leaderEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH))
+ value.set(OFFSET_VALUE_METADATA_FIELD_V3, offsetAndMetadata.metadata)
+ value.set(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V3, offsetAndMetadata.commitTimestamp)
+ (3, value)
+ }
+ }
+
val byteBuffer = ByteBuffer.allocate(2 /* version */ + value.sizeOf)
- byteBuffer.putShort(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
+ byteBuffer.putShort(version.toShort)
value.writeTo(byteBuffer)
byteBuffer.array()
}
@@ -542,7 +656,7 @@ object GroupMetadataManager {
GroupMetadataKey(version, group)
} else {
- throw new IllegalStateException("Unknown version " + version + " for group metadata message")
+ throw new IllegalStateException(s"Unknown group metadata message version: $version")
}
}
@@ -557,7 +671,7 @@ object GroupMetadataManager {
null
} else {
val version = buffer.getShort
- val valueSchema = schemaForOffset(version)
+ val valueSchema = schemaForOffsetValue(version)
val value = valueSchema.read(buffer)
if (version == 0) {
@@ -572,7 +686,10 @@ object GroupMetadataManager {
val commitTimestamp = value.get(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
val expireTimestamp = value.get(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
- OffsetAndMetadata(offset, metadata, commitTimestamp, expireTimestamp)
+ if (expireTimestamp == -1L)
+ OffsetAndMetadata(offset, metadata, commitTimestamp)
+ else
+ OffsetAndMetadata(offset, metadata, commitTimestamp, expireTimestamp)
} else if (version == 2) {
val offset = value.get(OFFSET_VALUE_OFFSET_FIELD_V2).asInstanceOf[Long]
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V2).asInstanceOf[String]
@@ -585,34 +702,47 @@ object GroupMetadataManager {
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V3).asInstanceOf[String]
val commitTimestamp = value.get(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V3).asInstanceOf[Long]
- // val leaderEpochOpt: Optional[Integer] = if (leaderEpoch < 0) Optional.empty() else Optional.of(leaderEpoch)
- OffsetAndMetadata(offset, metadata, commitTimestamp)
+ val leaderEpochOpt: Optional[Integer] = if (leaderEpoch < 0) Optional.empty() else Optional.of(leaderEpoch)
+ OffsetAndMetadata(offset, leaderEpochOpt, metadata, commitTimestamp)
} else {
- throw new IllegalStateException("Unknown offset message version")
+ throw new IllegalStateException(s"Unknown offset message version: $version")
}
}
}
+
/**
- * Decodes the group metadata messages' payload and retrieves its member metadatafrom it
+ * Decodes the group metadata messages' payload and retrieves its member metadata from it
*
* @param buffer input byte-buffer
+ * @param time the time instance to use
* @return a group metadata object from the message
*/
- def readGroupMessageValue(groupId: String, buffer: ByteBuffer): GroupMetadata = {
+ def readGroupMessageValue(groupId: String, buffer: ByteBuffer, time: Time): GroupMetadata = {
if (buffer == null) { // tombstone
null
} else {
val version = buffer.getShort
- val valueSchema = schemaForGroup(version)
+ val valueSchema = schemaForGroupValue(version)
val value = valueSchema.read(buffer)
- if (version == 0 || version == 1) {
+ if (version >= 0 && version <= 2) {
val generationId = value.get(GENERATION_KEY).asInstanceOf[Int]
val protocolType = value.get(PROTOCOL_TYPE_KEY).asInstanceOf[String]
val protocol = value.get(PROTOCOL_KEY).asInstanceOf[String]
val leaderId = value.get(LEADER_KEY).asInstanceOf[String]
val memberMetadataArray = value.getArray(MEMBERS_KEY)
+ val initialState = if (memberMetadataArray.isEmpty) Empty else Stable
+ val currentStateTimestamp: Option[Long] = version match {
+ case version if version == 2 =>
+ if (value.hasField(CURRENT_STATE_TIMESTAMP_KEY)) {
+ val timestamp = value.getLong(CURRENT_STATE_TIMESTAMP_KEY)
+ if (timestamp == -1) None else Some(timestamp)
+ } else
+ None
+ case _ =>
+ None
+ }
val members = memberMetadataArray.map { memberMetadataObj =>
val memberMetadata = memberMetadataObj.asInstanceOf[Struct]
@@ -630,21 +760,14 @@ object GroupMetadataManager {
, assignment.partitions().asScala.map(tp => (tp.topic(), tp.partition())).toSet)
member
}
- val finalProtocolType = if (protocolType == null || protocolType.isEmpty) None else Some(protocolType)
- val group = new GroupMetadata(groupId = groupId
- , generationId = generationId
- , protocolType = finalProtocolType
- , protocol = Option(protocol)
- , leaderId = Option(leaderId)
- )
- members.foreach(group.add)
- group
+ GroupMetadata.loadGroup(groupId, initialState, generationId, protocolType, protocol, leaderId, currentStateTimestamp, members, time)
} else {
- throw new IllegalStateException("Unknown group metadata message version")
+ throw new IllegalStateException(s"Unknown group metadata message version: $version")
}
}
}
+
// Formatter for use with tools such as console consumer: Consumer should also set exclude.internal.topics to false.
// (specify --formatter "kafka.coordinator.group.GroupMetadataManager\$OffsetsMessageFormatter" when consuming __consumer_offsets)
class OffsetsMessageFormatter extends MessageFormatter {
@@ -678,7 +801,7 @@ object GroupMetadataManager {
val value = consumerRecord.value
val formattedValue =
if (value == null) "NULL"
- else GroupMetadataManager.readGroupMessageValue(groupId, ByteBuffer.wrap(value)).toString
+ else GroupMetadataManager.readGroupMessageValue(groupId, ByteBuffer.wrap(value), Time.SYSTEM).toString
output.write(groupId.getBytes(StandardCharsets.UTF_8))
output.write("::".getBytes(StandardCharsets.UTF_8))
output.write(formattedValue.getBytes(StandardCharsets.UTF_8))
diff --git a/app/kafka/manager/utils/one10/LogConfig.scala b/app/kafka/manager/utils/one10/LogConfig.scala
new file mode 100644
index 000000000..c8dbec963
--- /dev/null
+++ b/app/kafka/manager/utils/one10/LogConfig.scala
@@ -0,0 +1,341 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.manager.utils.one10
+
+import java.util.{Collections, Locale, Properties}
+
+import scala.collection.JavaConverters._
+import kafka.api.ApiVersion
+import kafka.manager.utils.TopicConfigs
+import kafka.message.BrokerCompressionCodec
+import kafka.server.{KafkaConfig, ThrottledReplicaListValidator}
+import kafka.utils.Implicits._
+import org.apache.kafka.common.errors.InvalidConfigurationException
+import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, TopicConfig}
+import org.apache.kafka.common.record.{LegacyRecord, TimestampType}
+import org.apache.kafka.common.utils.Utils
+
+import scala.collection.{Map, mutable}
+import org.apache.kafka.common.config.ConfigDef.{ConfigKey, ValidList, Validator}
+
+object Defaults {
+ val SegmentSize = kafka.server.Defaults.LogSegmentBytes
+ val SegmentMs = kafka.server.Defaults.LogRollHours * 60 * 60 * 1000L
+ val SegmentJitterMs = kafka.server.Defaults.LogRollJitterHours * 60 * 60 * 1000L
+ val FlushInterval = kafka.server.Defaults.LogFlushIntervalMessages
+ val FlushMs = kafka.server.Defaults.LogFlushSchedulerIntervalMs
+ val RetentionSize = kafka.server.Defaults.LogRetentionBytes
+ val RetentionMs = kafka.server.Defaults.LogRetentionHours * 60 * 60 * 1000L
+ val MaxMessageSize = kafka.server.Defaults.MessageMaxBytes
+ val MaxIndexSize = kafka.server.Defaults.LogIndexSizeMaxBytes
+ val IndexInterval = kafka.server.Defaults.LogIndexIntervalBytes
+ val FileDeleteDelayMs = kafka.server.Defaults.LogDeleteDelayMs
+ val DeleteRetentionMs = kafka.server.Defaults.LogCleanerDeleteRetentionMs
+ val MinCompactionLagMs = kafka.server.Defaults.LogCleanerMinCompactionLagMs
+ val MinCleanableDirtyRatio = kafka.server.Defaults.LogCleanerMinCleanRatio
+
+ @deprecated(message = "This is a misleading variable name as it actually refers to the 'delete' cleanup policy. Use " +
+ "`CleanupPolicy` instead.", since = "1.0.0")
+ val Compact = kafka.server.Defaults.LogCleanupPolicy
+
+ val CleanupPolicy = kafka.server.Defaults.LogCleanupPolicy
+ val UncleanLeaderElectionEnable = kafka.server.Defaults.UncleanLeaderElectionEnable
+ val MinInSyncReplicas = kafka.server.Defaults.MinInSyncReplicas
+ val CompressionType = kafka.server.Defaults.CompressionType
+ val PreAllocateEnable = kafka.server.Defaults.LogPreAllocateEnable
+ val MessageFormatVersion = kafka.server.Defaults.LogMessageFormatVersion
+ val MessageTimestampType = kafka.server.Defaults.LogMessageTimestampType
+ val MessageTimestampDifferenceMaxMs = kafka.server.Defaults.LogMessageTimestampDifferenceMaxMs
+ val LeaderReplicationThrottledReplicas = Collections.emptyList[String]()
+ val FollowerReplicationThrottledReplicas = Collections.emptyList[String]()
+ val MaxIdMapSnapshots = kafka.server.Defaults.MaxIdMapSnapshots
+}
+
+case class LogConfig(props: java.util.Map[_, _], overriddenConfigs: Set[String] = Set.empty)
+ extends AbstractConfig(LogConfig.configDef, props, false) {
+ /**
+ * Important note: Any configuration parameter that is passed along from KafkaConfig to LogConfig
+ * should also go in kafka.server.KafkaServer.copyKafkaConfigToLog.
+ */
+ val segmentSize = getInt(LogConfig.SegmentBytesProp)
+ val segmentMs = getLong(LogConfig.SegmentMsProp)
+ val segmentJitterMs = getLong(LogConfig.SegmentJitterMsProp)
+ val maxIndexSize = getInt(LogConfig.SegmentIndexBytesProp)
+ val flushInterval = getLong(LogConfig.FlushMessagesProp)
+ val flushMs = getLong(LogConfig.FlushMsProp)
+ val retentionSize = getLong(LogConfig.RetentionBytesProp)
+ val retentionMs = getLong(LogConfig.RetentionMsProp)
+ val maxMessageSize = getInt(LogConfig.MaxMessageBytesProp)
+ val indexInterval = getInt(LogConfig.IndexIntervalBytesProp)
+ val fileDeleteDelayMs = getLong(LogConfig.FileDeleteDelayMsProp)
+ val deleteRetentionMs = getLong(LogConfig.DeleteRetentionMsProp)
+ val compactionLagMs = getLong(LogConfig.MinCompactionLagMsProp)
+ val minCleanableRatio = getDouble(LogConfig.MinCleanableDirtyRatioProp)
+ val compact = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Compact)
+ val delete = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Delete)
+ val uncleanLeaderElectionEnable = getBoolean(LogConfig.UncleanLeaderElectionEnableProp)
+ val minInSyncReplicas = getInt(LogConfig.MinInSyncReplicasProp)
+ val compressionType = getString(LogConfig.CompressionTypeProp).toLowerCase(Locale.ROOT)
+ val preallocate = getBoolean(LogConfig.PreAllocateEnableProp)
+ val messageFormatVersion = ApiVersion(getString(LogConfig.MessageFormatVersionProp))
+ val messageTimestampType = TimestampType.forName(getString(LogConfig.MessageTimestampTypeProp))
+ val messageTimestampDifferenceMaxMs = getLong(LogConfig.MessageTimestampDifferenceMaxMsProp).longValue
+ val LeaderReplicationThrottledReplicas = getList(LogConfig.LeaderReplicationThrottledReplicasProp)
+ val FollowerReplicationThrottledReplicas = getList(LogConfig.FollowerReplicationThrottledReplicasProp)
+
+ def randomSegmentJitter: Long =
+ if (segmentJitterMs == 0) 0 else Utils.abs(scala.util.Random.nextInt()) % math.min(segmentJitterMs, segmentMs)
+}
+
+object LogConfig extends TopicConfigs {
+
+ def main(args: Array[String]) {
+ println(configDef.toHtmlTable)
+ }
+
+ val SegmentBytesProp = TopicConfig.SEGMENT_BYTES_CONFIG
+ val SegmentMsProp = TopicConfig.SEGMENT_MS_CONFIG
+ val SegmentJitterMsProp = TopicConfig.SEGMENT_JITTER_MS_CONFIG
+ val SegmentIndexBytesProp = TopicConfig.SEGMENT_INDEX_BYTES_CONFIG
+ val FlushMessagesProp = TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG
+ val FlushMsProp = TopicConfig.FLUSH_MS_CONFIG
+ val RetentionBytesProp = TopicConfig.RETENTION_BYTES_CONFIG
+ val RetentionMsProp = TopicConfig.RETENTION_MS_CONFIG
+ val MaxMessageBytesProp = TopicConfig.MAX_MESSAGE_BYTES_CONFIG
+ val IndexIntervalBytesProp = TopicConfig.INDEX_INTERVAL_BYTES_CONFIG
+ val DeleteRetentionMsProp = TopicConfig.DELETE_RETENTION_MS_CONFIG
+ val MinCompactionLagMsProp = TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG
+ val FileDeleteDelayMsProp = TopicConfig.FILE_DELETE_DELAY_MS_CONFIG
+ val MinCleanableDirtyRatioProp = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG
+ val CleanupPolicyProp = TopicConfig.CLEANUP_POLICY_CONFIG
+ val Delete = TopicConfig.CLEANUP_POLICY_DELETE
+ val Compact = TopicConfig.CLEANUP_POLICY_COMPACT
+ val UncleanLeaderElectionEnableProp = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG
+ val MinInSyncReplicasProp = TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG
+ val CompressionTypeProp = TopicConfig.COMPRESSION_TYPE_CONFIG
+ val PreAllocateEnableProp = TopicConfig.PREALLOCATE_CONFIG
+ val MessageFormatVersionProp = TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG
+ val MessageTimestampTypeProp = TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG
+ val MessageTimestampDifferenceMaxMsProp = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG
+
+ // Leave these out of TopicConfig for now as they are replication quota configs
+ val LeaderReplicationThrottledReplicasProp = "leader.replication.throttled.replicas"
+ val FollowerReplicationThrottledReplicasProp = "follower.replication.throttled.replicas"
+
+ val SegmentSizeDoc = TopicConfig.SEGMENT_BYTES_DOC
+ val SegmentMsDoc = TopicConfig.SEGMENT_MS_DOC
+ val SegmentJitterMsDoc = TopicConfig.SEGMENT_JITTER_MS_DOC
+ val MaxIndexSizeDoc = TopicConfig.SEGMENT_INDEX_BYTES_DOC
+ val FlushIntervalDoc = TopicConfig.FLUSH_MESSAGES_INTERVAL_DOC
+ val FlushMsDoc = TopicConfig.FLUSH_MS_DOC
+ val RetentionSizeDoc = TopicConfig.RETENTION_BYTES_DOC
+ val RetentionMsDoc = TopicConfig.RETENTION_MS_DOC
+ val MaxMessageSizeDoc = TopicConfig.MAX_MESSAGE_BYTES_DOC
+ val IndexIntervalDoc = TopicConfig.INDEX_INTERVAL_BYTES_DOCS
+ val FileDeleteDelayMsDoc = TopicConfig.FILE_DELETE_DELAY_MS_DOC
+ val DeleteRetentionMsDoc = TopicConfig.DELETE_RETENTION_MS_DOC
+ val MinCompactionLagMsDoc = TopicConfig.MIN_COMPACTION_LAG_MS_DOC
+ val MinCleanableRatioDoc = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_DOC
+ val CompactDoc = TopicConfig.CLEANUP_POLICY_DOC
+ val UncleanLeaderElectionEnableDoc = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_DOC
+ val MinInSyncReplicasDoc = TopicConfig.MIN_IN_SYNC_REPLICAS_DOC
+ val CompressionTypeDoc = TopicConfig.COMPRESSION_TYPE_DOC
+ val PreAllocateEnableDoc = TopicConfig.PREALLOCATE_DOC
+ val MessageFormatVersionDoc = TopicConfig.MESSAGE_FORMAT_VERSION_DOC
+ val MessageTimestampTypeDoc = TopicConfig.MESSAGE_TIMESTAMP_TYPE_DOC
+ val MessageTimestampDifferenceMaxMsDoc = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC
+
+ val LeaderReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " +
+ "the leader side. The list should describe a set of replicas in the form " +
+ "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " +
+ "all replicas for this topic."
+ val FollowerReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " +
+ "the follower side. The list should describe a set of " + "replicas in the form " +
+ "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " +
+ "all replicas for this topic."
+
+ private class LogConfigDef extends ConfigDef {
+
+ private final val serverDefaultConfigNames = mutable.Map[String, String]()
+
+ def define(name: String, defType: ConfigDef.Type, defaultValue: Any, validator: Validator,
+ importance: ConfigDef.Importance, doc: String, serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, defaultValue, validator, importance, doc)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ def define(name: String, defType: ConfigDef.Type, defaultValue: Any, importance: ConfigDef.Importance,
+ documentation: String, serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, defaultValue, importance, documentation)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ def define(name: String, defType: ConfigDef.Type, importance: ConfigDef.Importance, documentation: String,
+ serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, importance, documentation)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ override def headers = List("Name", "Description", "Type", "Default", "Valid Values", "Server Default Property", "Importance").asJava
+
+ override def getConfigValue(key: ConfigKey, headerName: String): String = {
+ headerName match {
+ case "Server Default Property" => serverDefaultConfigNames.get(key.name).get
+ case _ => super.getConfigValue(key, headerName)
+ }
+ }
+
+ def serverConfigName(configName: String): Option[String] = serverDefaultConfigNames.get(configName)
+ }
+
+ private val configDef: LogConfigDef = {
+ import org.apache.kafka.common.config.ConfigDef.Importance._
+ import org.apache.kafka.common.config.ConfigDef.Range._
+ import org.apache.kafka.common.config.ConfigDef.Type._
+ import org.apache.kafka.common.config.ConfigDef.ValidString._
+
+ new LogConfigDef()
+ .define(SegmentBytesProp, INT, Defaults.SegmentSize, atLeast(LegacyRecord.RECORD_OVERHEAD_V0), MEDIUM,
+ SegmentSizeDoc, KafkaConfig.LogSegmentBytesProp)
+ .define(SegmentMsProp, LONG, Defaults.SegmentMs, atLeast(0), MEDIUM, SegmentMsDoc,
+ KafkaConfig.LogRollTimeMillisProp)
+ .define(SegmentJitterMsProp, LONG, Defaults.SegmentJitterMs, atLeast(0), MEDIUM, SegmentJitterMsDoc,
+ KafkaConfig.LogRollTimeJitterMillisProp)
+ .define(SegmentIndexBytesProp, INT, Defaults.MaxIndexSize, atLeast(0), MEDIUM, MaxIndexSizeDoc,
+ KafkaConfig.LogIndexSizeMaxBytesProp)
+ .define(FlushMessagesProp, LONG, Defaults.FlushInterval, atLeast(0), MEDIUM, FlushIntervalDoc,
+ KafkaConfig.LogFlushIntervalMessagesProp)
+ .define(FlushMsProp, LONG, Defaults.FlushMs, atLeast(0), MEDIUM, FlushMsDoc,
+ KafkaConfig.LogFlushIntervalMsProp)
+ // can be negative. See kafka.log.LogManager.cleanupSegmentsToMaintainSize
+ .define(RetentionBytesProp, LONG, Defaults.RetentionSize, MEDIUM, RetentionSizeDoc,
+ KafkaConfig.LogRetentionBytesProp)
+ // can be negative. See kafka.log.LogManager.cleanupExpiredSegments
+ .define(RetentionMsProp, LONG, Defaults.RetentionMs, MEDIUM, RetentionMsDoc,
+ KafkaConfig.LogRetentionTimeMillisProp)
+ .define(MaxMessageBytesProp, INT, Defaults.MaxMessageSize, atLeast(0), MEDIUM, MaxMessageSizeDoc,
+ KafkaConfig.MessageMaxBytesProp)
+ .define(IndexIntervalBytesProp, INT, Defaults.IndexInterval, atLeast(0), MEDIUM, IndexIntervalDoc,
+ KafkaConfig.LogIndexIntervalBytesProp)
+ .define(DeleteRetentionMsProp, LONG, Defaults.DeleteRetentionMs, atLeast(0), MEDIUM,
+ DeleteRetentionMsDoc, KafkaConfig.LogCleanerDeleteRetentionMsProp)
+ .define(MinCompactionLagMsProp, LONG, Defaults.MinCompactionLagMs, atLeast(0), MEDIUM, MinCompactionLagMsDoc,
+ KafkaConfig.LogCleanerMinCompactionLagMsProp)
+ .define(FileDeleteDelayMsProp, LONG, Defaults.FileDeleteDelayMs, atLeast(0), MEDIUM, FileDeleteDelayMsDoc,
+ KafkaConfig.LogDeleteDelayMsProp)
+ .define(MinCleanableDirtyRatioProp, DOUBLE, Defaults.MinCleanableDirtyRatio, between(0, 1), MEDIUM,
+ MinCleanableRatioDoc, KafkaConfig.LogCleanerMinCleanRatioProp)
+ .define(CleanupPolicyProp, LIST, Defaults.CleanupPolicy, ValidList.in(LogConfig.Compact, LogConfig.Delete), MEDIUM, CompactDoc,
+ KafkaConfig.LogCleanupPolicyProp)
+ .define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable,
+ MEDIUM, UncleanLeaderElectionEnableDoc, KafkaConfig.UncleanLeaderElectionEnableProp)
+ .define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), MEDIUM, MinInSyncReplicasDoc,
+ KafkaConfig.MinInSyncReplicasProp)
+ .define(CompressionTypeProp, STRING, Defaults.CompressionType, in(BrokerCompressionCodec.brokerCompressionOptions:_*),
+ MEDIUM, CompressionTypeDoc, KafkaConfig.CompressionTypeProp)
+ .define(PreAllocateEnableProp, BOOLEAN, Defaults.PreAllocateEnable, MEDIUM, PreAllocateEnableDoc,
+ KafkaConfig.LogPreAllocateProp)
+ .define(MessageFormatVersionProp, STRING, Defaults.MessageFormatVersion, MEDIUM, MessageFormatVersionDoc,
+ KafkaConfig.LogMessageFormatVersionProp)
+ .define(MessageTimestampTypeProp, STRING, Defaults.MessageTimestampType, MEDIUM, MessageTimestampTypeDoc,
+ KafkaConfig.LogMessageTimestampTypeProp)
+ .define(MessageTimestampDifferenceMaxMsProp, LONG, Defaults.MessageTimestampDifferenceMaxMs,
+ atLeast(0), MEDIUM, MessageTimestampDifferenceMaxMsDoc, KafkaConfig.LogMessageTimestampDifferenceMaxMsProp)
+ .define(LeaderReplicationThrottledReplicasProp, LIST, Defaults.LeaderReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
+ LeaderReplicationThrottledReplicasDoc, LeaderReplicationThrottledReplicasProp)
+ .define(FollowerReplicationThrottledReplicasProp, LIST, Defaults.FollowerReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
+ FollowerReplicationThrottledReplicasDoc, FollowerReplicationThrottledReplicasProp)
+ }
+
+ def apply(): LogConfig = LogConfig(new Properties())
+
+ def configNames: Seq[String] = configDef.names.asScala.toSeq.sorted
+
+ def serverConfigName(configName: String): Option[String] = configDef.serverConfigName(configName)
+
+ /**
+ * Create a log config instance using the given properties and defaults
+ */
+ def fromProps(defaults: java.util.Map[_ <: Object, _ <: Object], overrides: Properties): LogConfig = {
+ val props = new Properties()
+ defaults.asScala.foreach { case (k, v) => props.put(k, v) }
+ props ++= overrides
+ val overriddenKeys = overrides.keySet.asScala.map(_.asInstanceOf[String]).toSet
+ new LogConfig(props, overriddenKeys)
+ }
+
+ /**
+ * Check that property names are valid
+ */
+ def validateNames(props: Properties) {
+ val names = configNames
+ for(name <- props.asScala.keys)
+ if (!names.contains(name))
+ throw new InvalidConfigurationException(s"Unknown topic config name: $name")
+ }
+
+ /**
+ * Check that the given properties contain only valid log config names and that all values can be parsed and are valid
+ */
+ def validate(props: Properties) {
+ validateNames(props)
+ configDef.parse(props)
+ }
+
+ /**
+ * Map topic config to the broker config with highest priority. Some of these have additional synonyms
+ * that can be obtained using kafka.server.DynamicBrokerConfig#brokerConfigSynonyms
+ */
+ val TopicConfigSynonyms = Map(
+ SegmentBytesProp -> KafkaConfig.LogSegmentBytesProp,
+ SegmentMsProp -> KafkaConfig.LogRollTimeMillisProp,
+ SegmentJitterMsProp -> KafkaConfig.LogRollTimeJitterMillisProp,
+ SegmentIndexBytesProp -> KafkaConfig.LogIndexSizeMaxBytesProp,
+ FlushMessagesProp -> KafkaConfig.LogFlushIntervalMessagesProp,
+ FlushMsProp -> KafkaConfig.LogFlushIntervalMsProp,
+ RetentionBytesProp -> KafkaConfig.LogRetentionBytesProp,
+ RetentionMsProp -> KafkaConfig.LogRetentionTimeMillisProp,
+ MaxMessageBytesProp -> KafkaConfig.MessageMaxBytesProp,
+ IndexIntervalBytesProp -> KafkaConfig.LogIndexIntervalBytesProp,
+ DeleteRetentionMsProp -> KafkaConfig.LogCleanerDeleteRetentionMsProp,
+ MinCompactionLagMsProp -> KafkaConfig.LogCleanerMinCompactionLagMsProp,
+ FileDeleteDelayMsProp -> KafkaConfig.LogDeleteDelayMsProp,
+ MinCleanableDirtyRatioProp -> KafkaConfig.LogCleanerMinCleanRatioProp,
+ CleanupPolicyProp -> KafkaConfig.LogCleanupPolicyProp,
+ UncleanLeaderElectionEnableProp -> KafkaConfig.UncleanLeaderElectionEnableProp,
+ MinInSyncReplicasProp -> KafkaConfig.MinInSyncReplicasProp,
+ CompressionTypeProp -> KafkaConfig.CompressionTypeProp,
+ PreAllocateEnableProp -> KafkaConfig.LogPreAllocateProp,
+ MessageFormatVersionProp -> KafkaConfig.LogMessageFormatVersionProp,
+ MessageTimestampTypeProp -> KafkaConfig.LogMessageTimestampTypeProp,
+ MessageTimestampDifferenceMaxMsProp -> KafkaConfig.LogMessageTimestampDifferenceMaxMsProp
+ )
+
+ def configNamesAndDoc: Seq[(String, String)] = {
+ Option(configDef).fold {
+ configNames.map(n => n -> "")
+ } {
+ configDef =>
+ val keyMap = configDef.configKeys()
+ configNames.map(n => n -> Option(keyMap.get(n)).map(_.documentation).flatMap(Option.apply).getOrElse(""))
+ }
+ }
+}
diff --git a/app/kafka/manager/utils/one10/MemberMetadata.scala b/app/kafka/manager/utils/one10/MemberMetadata.scala
index 81bf47ecc..d2e1bc236 100644
--- a/app/kafka/manager/utils/one10/MemberMetadata.scala
+++ b/app/kafka/manager/utils/one10/MemberMetadata.scala
@@ -18,12 +18,25 @@
package kafka.manager.utils.one10
import java.nio.ByteBuffer
+import org.apache.kafka.clients.admin.{ConsumerGroupDescription, MemberDescription}
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol
import org.apache.kafka.common.requests.DescribeGroupsResponse
import org.apache.kafka.common.utils.Utils
object MemberMetadata {
import collection.JavaConverters._
+ def from(groupId: String, groupSummary: ConsumerGroupDescription, memberSummary: MemberDescription) : MemberMetadata = {
+ val assignment = memberSummary.assignment().topicPartitions().asScala.map(tp => tp.topic() -> tp.partition()).toSet
+ MemberMetadata(
+ memberSummary.consumerId()
+ , groupId
+ , memberSummary.clientId
+ , memberSummary.host()
+ , "(n/a on backfill)"
+ , List.empty
+ , assignment
+ )
+ }
def from(groupId: String, groupSummary: DescribeGroupsResponse.GroupMetadata, memberSummary: DescribeGroupsResponse.GroupMember) : MemberMetadata = {
val assignment = ConsumerProtocol.deserializeAssignment(ByteBuffer.wrap(Utils.readBytes(memberSummary.memberAssignment)))
val topics: Set[String] = {
@@ -39,7 +52,7 @@ object MemberMetadata {
memberSummary.memberId
, groupId
, memberSummary.clientId
- , memberSummary. clientHost
+ , memberSummary.clientHost
, groupSummary.protocolType()
, List((groupSummary.protocol, topics))
, assignment.partitions().asScala.map(tp => tp.topic() -> tp.partition()).toSet
diff --git a/app/kafka/manager/utils/two00/LogConfig.scala b/app/kafka/manager/utils/two00/LogConfig.scala
new file mode 100644
index 000000000..fb6287c2f
--- /dev/null
+++ b/app/kafka/manager/utils/two00/LogConfig.scala
@@ -0,0 +1,348 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.manager.utils.two00
+
+import java.util.{Collections, Locale, Properties}
+
+import scala.collection.JavaConverters._
+import kafka.api.{ApiVersion, ApiVersionValidator}
+import kafka.manager.utils.TopicConfigs
+import kafka.message.BrokerCompressionCodec
+import kafka.server.{KafkaConfig, ThrottledReplicaListValidator}
+import kafka.utils.Implicits._
+import org.apache.kafka.common.errors.InvalidConfigurationException
+import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, TopicConfig}
+import org.apache.kafka.common.record.{LegacyRecord, TimestampType}
+import org.apache.kafka.common.utils.Utils
+
+import scala.collection.{Map, mutable}
+import org.apache.kafka.common.config.ConfigDef.{ConfigKey, ValidList, Validator}
+
+object Defaults {
+ val SegmentSize = kafka.server.Defaults.LogSegmentBytes
+ val SegmentMs = kafka.server.Defaults.LogRollHours * 60 * 60 * 1000L
+ val SegmentJitterMs = kafka.server.Defaults.LogRollJitterHours * 60 * 60 * 1000L
+ val FlushInterval = kafka.server.Defaults.LogFlushIntervalMessages
+ val FlushMs = kafka.server.Defaults.LogFlushSchedulerIntervalMs
+ val RetentionSize = kafka.server.Defaults.LogRetentionBytes
+ val RetentionMs = kafka.server.Defaults.LogRetentionHours * 60 * 60 * 1000L
+ val MaxMessageSize = kafka.server.Defaults.MessageMaxBytes
+ val MaxIndexSize = kafka.server.Defaults.LogIndexSizeMaxBytes
+ val IndexInterval = kafka.server.Defaults.LogIndexIntervalBytes
+ val FileDeleteDelayMs = kafka.server.Defaults.LogDeleteDelayMs
+ val DeleteRetentionMs = kafka.server.Defaults.LogCleanerDeleteRetentionMs
+ val MinCompactionLagMs = kafka.server.Defaults.LogCleanerMinCompactionLagMs
+ val MinCleanableDirtyRatio = kafka.server.Defaults.LogCleanerMinCleanRatio
+
+ @deprecated(message = "This is a misleading variable name as it actually refers to the 'delete' cleanup policy. Use " +
+ "`CleanupPolicy` instead.", since = "1.0.0")
+ val Compact = kafka.server.Defaults.LogCleanupPolicy
+
+ val CleanupPolicy = kafka.server.Defaults.LogCleanupPolicy
+ val UncleanLeaderElectionEnable = kafka.server.Defaults.UncleanLeaderElectionEnable
+ val MinInSyncReplicas = kafka.server.Defaults.MinInSyncReplicas
+ val CompressionType = kafka.server.Defaults.CompressionType
+ val PreAllocateEnable = kafka.server.Defaults.LogPreAllocateEnable
+ val MessageFormatVersion = kafka.server.Defaults.LogMessageFormatVersion
+ val MessageTimestampType = kafka.server.Defaults.LogMessageTimestampType
+ val MessageTimestampDifferenceMaxMs = kafka.server.Defaults.LogMessageTimestampDifferenceMaxMs
+ val LeaderReplicationThrottledReplicas = Collections.emptyList[String]()
+ val FollowerReplicationThrottledReplicas = Collections.emptyList[String]()
+ val MaxIdMapSnapshots = kafka.server.Defaults.MaxIdMapSnapshots
+ val MessageDownConversionEnable = kafka.server.Defaults.MessageDownConversionEnable
+}
+
+case class LogConfig(props: java.util.Map[_, _], overriddenConfigs: Set[String] = Set.empty)
+ extends AbstractConfig(LogConfig.configDef, props, false) {
+ /**
+ * Important note: Any configuration parameter that is passed along from KafkaConfig to LogConfig
+ * should also go in kafka.server.KafkaServer.copyKafkaConfigToLog.
+ */
+ val segmentSize = getInt(LogConfig.SegmentBytesProp)
+ val segmentMs = getLong(LogConfig.SegmentMsProp)
+ val segmentJitterMs = getLong(LogConfig.SegmentJitterMsProp)
+ val maxIndexSize = getInt(LogConfig.SegmentIndexBytesProp)
+ val flushInterval = getLong(LogConfig.FlushMessagesProp)
+ val flushMs = getLong(LogConfig.FlushMsProp)
+ val retentionSize = getLong(LogConfig.RetentionBytesProp)
+ val retentionMs = getLong(LogConfig.RetentionMsProp)
+ val maxMessageSize = getInt(LogConfig.MaxMessageBytesProp)
+ val indexInterval = getInt(LogConfig.IndexIntervalBytesProp)
+ val fileDeleteDelayMs = getLong(LogConfig.FileDeleteDelayMsProp)
+ val deleteRetentionMs = getLong(LogConfig.DeleteRetentionMsProp)
+ val compactionLagMs = getLong(LogConfig.MinCompactionLagMsProp)
+ val minCleanableRatio = getDouble(LogConfig.MinCleanableDirtyRatioProp)
+ val compact = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Compact)
+ val delete = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Delete)
+ val uncleanLeaderElectionEnable = getBoolean(LogConfig.UncleanLeaderElectionEnableProp)
+ val minInSyncReplicas = getInt(LogConfig.MinInSyncReplicasProp)
+ val compressionType = getString(LogConfig.CompressionTypeProp).toLowerCase(Locale.ROOT)
+ val preallocate = getBoolean(LogConfig.PreAllocateEnableProp)
+ val messageFormatVersion = ApiVersion(getString(LogConfig.MessageFormatVersionProp))
+ val messageTimestampType = TimestampType.forName(getString(LogConfig.MessageTimestampTypeProp))
+ val messageTimestampDifferenceMaxMs = getLong(LogConfig.MessageTimestampDifferenceMaxMsProp).longValue
+ val LeaderReplicationThrottledReplicas = getList(LogConfig.LeaderReplicationThrottledReplicasProp)
+ val FollowerReplicationThrottledReplicas = getList(LogConfig.FollowerReplicationThrottledReplicasProp)
+ val messageDownConversionEnable = getBoolean(LogConfig.MessageDownConversionEnableProp)
+
+ def randomSegmentJitter: Long =
+ if (segmentJitterMs == 0) 0 else Utils.abs(scala.util.Random.nextInt()) % math.min(segmentJitterMs, segmentMs)
+}
+
+object LogConfig extends TopicConfigs {
+
+ def main(args: Array[String]) {
+ println(configDef.toHtmlTable)
+ }
+
+ val SegmentBytesProp = TopicConfig.SEGMENT_BYTES_CONFIG
+ val SegmentMsProp = TopicConfig.SEGMENT_MS_CONFIG
+ val SegmentJitterMsProp = TopicConfig.SEGMENT_JITTER_MS_CONFIG
+ val SegmentIndexBytesProp = TopicConfig.SEGMENT_INDEX_BYTES_CONFIG
+ val FlushMessagesProp = TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG
+ val FlushMsProp = TopicConfig.FLUSH_MS_CONFIG
+ val RetentionBytesProp = TopicConfig.RETENTION_BYTES_CONFIG
+ val RetentionMsProp = TopicConfig.RETENTION_MS_CONFIG
+ val MaxMessageBytesProp = TopicConfig.MAX_MESSAGE_BYTES_CONFIG
+ val IndexIntervalBytesProp = TopicConfig.INDEX_INTERVAL_BYTES_CONFIG
+ val DeleteRetentionMsProp = TopicConfig.DELETE_RETENTION_MS_CONFIG
+ val MinCompactionLagMsProp = TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG
+ val FileDeleteDelayMsProp = TopicConfig.FILE_DELETE_DELAY_MS_CONFIG
+ val MinCleanableDirtyRatioProp = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG
+ val CleanupPolicyProp = TopicConfig.CLEANUP_POLICY_CONFIG
+ val Delete = TopicConfig.CLEANUP_POLICY_DELETE
+ val Compact = TopicConfig.CLEANUP_POLICY_COMPACT
+ val UncleanLeaderElectionEnableProp = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG
+ val MinInSyncReplicasProp = TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG
+ val CompressionTypeProp = TopicConfig.COMPRESSION_TYPE_CONFIG
+ val PreAllocateEnableProp = TopicConfig.PREALLOCATE_CONFIG
+ val MessageFormatVersionProp = TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG
+ val MessageTimestampTypeProp = TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG
+ val MessageTimestampDifferenceMaxMsProp = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG
+ val MessageDownConversionEnableProp = TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG
+
+ // Leave these out of TopicConfig for now as they are replication quota configs
+ val LeaderReplicationThrottledReplicasProp = "leader.replication.throttled.replicas"
+ val FollowerReplicationThrottledReplicasProp = "follower.replication.throttled.replicas"
+
+ val SegmentSizeDoc = TopicConfig.SEGMENT_BYTES_DOC
+ val SegmentMsDoc = TopicConfig.SEGMENT_MS_DOC
+ val SegmentJitterMsDoc = TopicConfig.SEGMENT_JITTER_MS_DOC
+ val MaxIndexSizeDoc = TopicConfig.SEGMENT_INDEX_BYTES_DOC
+ val FlushIntervalDoc = TopicConfig.FLUSH_MESSAGES_INTERVAL_DOC
+ val FlushMsDoc = TopicConfig.FLUSH_MS_DOC
+ val RetentionSizeDoc = TopicConfig.RETENTION_BYTES_DOC
+ val RetentionMsDoc = TopicConfig.RETENTION_MS_DOC
+ val MaxMessageSizeDoc = TopicConfig.MAX_MESSAGE_BYTES_DOC
+ val IndexIntervalDoc = TopicConfig.INDEX_INTERVAL_BYTES_DOCS
+ val FileDeleteDelayMsDoc = TopicConfig.FILE_DELETE_DELAY_MS_DOC
+ val DeleteRetentionMsDoc = TopicConfig.DELETE_RETENTION_MS_DOC
+ val MinCompactionLagMsDoc = TopicConfig.MIN_COMPACTION_LAG_MS_DOC
+ val MinCleanableRatioDoc = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_DOC
+ val CompactDoc = TopicConfig.CLEANUP_POLICY_DOC
+ val UncleanLeaderElectionEnableDoc = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_DOC
+ val MinInSyncReplicasDoc = TopicConfig.MIN_IN_SYNC_REPLICAS_DOC
+ val CompressionTypeDoc = TopicConfig.COMPRESSION_TYPE_DOC
+ val PreAllocateEnableDoc = TopicConfig.PREALLOCATE_DOC
+ val MessageFormatVersionDoc = TopicConfig.MESSAGE_FORMAT_VERSION_DOC
+ val MessageTimestampTypeDoc = TopicConfig.MESSAGE_TIMESTAMP_TYPE_DOC
+ val MessageTimestampDifferenceMaxMsDoc = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC
+ val MessageDownConversionEnableDoc = TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_DOC
+
+ val LeaderReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " +
+ "the leader side. The list should describe a set of replicas in the form " +
+ "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " +
+ "all replicas for this topic."
+ val FollowerReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " +
+ "the follower side. The list should describe a set of " + "replicas in the form " +
+ "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " +
+ "all replicas for this topic."
+
+ private class LogConfigDef extends ConfigDef {
+
+ private final val serverDefaultConfigNames = mutable.Map[String, String]()
+
+ def define(name: String, defType: ConfigDef.Type, defaultValue: Any, validator: Validator,
+ importance: ConfigDef.Importance, doc: String, serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, defaultValue, validator, importance, doc)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ def define(name: String, defType: ConfigDef.Type, defaultValue: Any, importance: ConfigDef.Importance,
+ documentation: String, serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, defaultValue, importance, documentation)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ def define(name: String, defType: ConfigDef.Type, importance: ConfigDef.Importance, documentation: String,
+ serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, importance, documentation)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ override def headers = List("Name", "Description", "Type", "Default", "Valid Values", "Server Default Property", "Importance").asJava
+
+ override def getConfigValue(key: ConfigKey, headerName: String): String = {
+ headerName match {
+ case "Server Default Property" => serverDefaultConfigNames.get(key.name).get
+ case _ => super.getConfigValue(key, headerName)
+ }
+ }
+
+ def serverConfigName(configName: String): Option[String] = serverDefaultConfigNames.get(configName)
+ }
+
+ private val configDef: LogConfigDef = {
+ import org.apache.kafka.common.config.ConfigDef.Importance._
+ import org.apache.kafka.common.config.ConfigDef.Range._
+ import org.apache.kafka.common.config.ConfigDef.Type._
+ import org.apache.kafka.common.config.ConfigDef.ValidString._
+
+ new LogConfigDef()
+ .define(SegmentBytesProp, INT, Defaults.SegmentSize, atLeast(LegacyRecord.RECORD_OVERHEAD_V0), MEDIUM,
+ SegmentSizeDoc, KafkaConfig.LogSegmentBytesProp)
+ .define(SegmentMsProp, LONG, Defaults.SegmentMs, atLeast(1), MEDIUM, SegmentMsDoc,
+ KafkaConfig.LogRollTimeMillisProp)
+ .define(SegmentJitterMsProp, LONG, Defaults.SegmentJitterMs, atLeast(0), MEDIUM, SegmentJitterMsDoc,
+ KafkaConfig.LogRollTimeJitterMillisProp)
+ .define(SegmentIndexBytesProp, INT, Defaults.MaxIndexSize, atLeast(0), MEDIUM, MaxIndexSizeDoc,
+ KafkaConfig.LogIndexSizeMaxBytesProp)
+ .define(FlushMessagesProp, LONG, Defaults.FlushInterval, atLeast(0), MEDIUM, FlushIntervalDoc,
+ KafkaConfig.LogFlushIntervalMessagesProp)
+ .define(FlushMsProp, LONG, Defaults.FlushMs, atLeast(0), MEDIUM, FlushMsDoc,
+ KafkaConfig.LogFlushIntervalMsProp)
+ // can be negative. See kafka.log.LogManager.cleanupSegmentsToMaintainSize
+ .define(RetentionBytesProp, LONG, Defaults.RetentionSize, MEDIUM, RetentionSizeDoc,
+ KafkaConfig.LogRetentionBytesProp)
+ // can be negative. See kafka.log.LogManager.cleanupExpiredSegments
+ .define(RetentionMsProp, LONG, Defaults.RetentionMs, atLeast(-1), MEDIUM, RetentionMsDoc,
+ KafkaConfig.LogRetentionTimeMillisProp)
+ .define(MaxMessageBytesProp, INT, Defaults.MaxMessageSize, atLeast(0), MEDIUM, MaxMessageSizeDoc,
+ KafkaConfig.MessageMaxBytesProp)
+ .define(IndexIntervalBytesProp, INT, Defaults.IndexInterval, atLeast(0), MEDIUM, IndexIntervalDoc,
+ KafkaConfig.LogIndexIntervalBytesProp)
+ .define(DeleteRetentionMsProp, LONG, Defaults.DeleteRetentionMs, atLeast(0), MEDIUM,
+ DeleteRetentionMsDoc, KafkaConfig.LogCleanerDeleteRetentionMsProp)
+ .define(MinCompactionLagMsProp, LONG, Defaults.MinCompactionLagMs, atLeast(0), MEDIUM, MinCompactionLagMsDoc,
+ KafkaConfig.LogCleanerMinCompactionLagMsProp)
+ .define(FileDeleteDelayMsProp, LONG, Defaults.FileDeleteDelayMs, atLeast(0), MEDIUM, FileDeleteDelayMsDoc,
+ KafkaConfig.LogDeleteDelayMsProp)
+ .define(MinCleanableDirtyRatioProp, DOUBLE, Defaults.MinCleanableDirtyRatio, between(0, 1), MEDIUM,
+ MinCleanableRatioDoc, KafkaConfig.LogCleanerMinCleanRatioProp)
+ .define(CleanupPolicyProp, LIST, Defaults.CleanupPolicy, ValidList.in(LogConfig.Compact, LogConfig.Delete), MEDIUM, CompactDoc,
+ KafkaConfig.LogCleanupPolicyProp)
+ .define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable,
+ MEDIUM, UncleanLeaderElectionEnableDoc, KafkaConfig.UncleanLeaderElectionEnableProp)
+ .define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), MEDIUM, MinInSyncReplicasDoc,
+ KafkaConfig.MinInSyncReplicasProp)
+ .define(CompressionTypeProp, STRING, Defaults.CompressionType, in(BrokerCompressionCodec.brokerCompressionOptions:_*),
+ MEDIUM, CompressionTypeDoc, KafkaConfig.CompressionTypeProp)
+ .define(PreAllocateEnableProp, BOOLEAN, Defaults.PreAllocateEnable, MEDIUM, PreAllocateEnableDoc,
+ KafkaConfig.LogPreAllocateProp)
+ .define(MessageFormatVersionProp, STRING, Defaults.MessageFormatVersion, ApiVersionValidator, MEDIUM, MessageFormatVersionDoc,
+ KafkaConfig.LogMessageFormatVersionProp)
+ .define(MessageTimestampTypeProp, STRING, Defaults.MessageTimestampType, in("CreateTime", "LogAppendTime"), MEDIUM, MessageTimestampTypeDoc,
+ KafkaConfig.LogMessageTimestampTypeProp)
+ .define(MessageTimestampDifferenceMaxMsProp, LONG, Defaults.MessageTimestampDifferenceMaxMs,
+ atLeast(0), MEDIUM, MessageTimestampDifferenceMaxMsDoc, KafkaConfig.LogMessageTimestampDifferenceMaxMsProp)
+ .define(LeaderReplicationThrottledReplicasProp, LIST, Defaults.LeaderReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
+ LeaderReplicationThrottledReplicasDoc, LeaderReplicationThrottledReplicasProp)
+ .define(FollowerReplicationThrottledReplicasProp, LIST, Defaults.FollowerReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
+ FollowerReplicationThrottledReplicasDoc, FollowerReplicationThrottledReplicasProp)
+ .define(MessageDownConversionEnableProp, BOOLEAN, Defaults.MessageDownConversionEnable, LOW,
+ MessageDownConversionEnableDoc, KafkaConfig.LogMessageDownConversionEnableProp)
+ }
+
+ def apply(): LogConfig = LogConfig(new Properties())
+
+ def configNames: Seq[String] = configDef.names.asScala.toSeq.sorted
+
+ def serverConfigName(configName: String): Option[String] = configDef.serverConfigName(configName)
+
+ /**
+ * Create a log config instance using the given properties and defaults
+ */
+ def fromProps(defaults: java.util.Map[_ <: Object, _ <: Object], overrides: Properties): LogConfig = {
+ val props = new Properties()
+ defaults.asScala.foreach { case (k, v) => props.put(k, v) }
+ props ++= overrides
+ val overriddenKeys = overrides.keySet.asScala.map(_.asInstanceOf[String]).toSet
+ new LogConfig(props, overriddenKeys)
+ }
+
+ /**
+ * Check that property names are valid
+ */
+ def validateNames(props: Properties) {
+ val names = configNames
+ for(name <- props.asScala.keys)
+ if (!names.contains(name))
+ throw new InvalidConfigurationException(s"Unknown topic config name: $name")
+ }
+
+ /**
+ * Check that the given properties contain only valid log config names and that all values can be parsed and are valid
+ */
+ def validate(props: Properties) {
+ validateNames(props)
+ configDef.parse(props)
+ }
+
+ /**
+ * Map topic config to the broker config with highest priority. Some of these have additional synonyms
+ * that can be obtained using kafka.server.DynamicBrokerConfig#brokerConfigSynonyms
+ */
+ val TopicConfigSynonyms = Map(
+ SegmentBytesProp -> KafkaConfig.LogSegmentBytesProp,
+ SegmentMsProp -> KafkaConfig.LogRollTimeMillisProp,
+ SegmentJitterMsProp -> KafkaConfig.LogRollTimeJitterMillisProp,
+ SegmentIndexBytesProp -> KafkaConfig.LogIndexSizeMaxBytesProp,
+ FlushMessagesProp -> KafkaConfig.LogFlushIntervalMessagesProp,
+ FlushMsProp -> KafkaConfig.LogFlushIntervalMsProp,
+ RetentionBytesProp -> KafkaConfig.LogRetentionBytesProp,
+ RetentionMsProp -> KafkaConfig.LogRetentionTimeMillisProp,
+ MaxMessageBytesProp -> KafkaConfig.MessageMaxBytesProp,
+ IndexIntervalBytesProp -> KafkaConfig.LogIndexIntervalBytesProp,
+ DeleteRetentionMsProp -> KafkaConfig.LogCleanerDeleteRetentionMsProp,
+ MinCompactionLagMsProp -> KafkaConfig.LogCleanerMinCompactionLagMsProp,
+ FileDeleteDelayMsProp -> KafkaConfig.LogDeleteDelayMsProp,
+ MinCleanableDirtyRatioProp -> KafkaConfig.LogCleanerMinCleanRatioProp,
+ CleanupPolicyProp -> KafkaConfig.LogCleanupPolicyProp,
+ UncleanLeaderElectionEnableProp -> KafkaConfig.UncleanLeaderElectionEnableProp,
+ MinInSyncReplicasProp -> KafkaConfig.MinInSyncReplicasProp,
+ CompressionTypeProp -> KafkaConfig.CompressionTypeProp,
+ PreAllocateEnableProp -> KafkaConfig.LogPreAllocateProp,
+ MessageFormatVersionProp -> KafkaConfig.LogMessageFormatVersionProp,
+ MessageTimestampTypeProp -> KafkaConfig.LogMessageTimestampTypeProp,
+ MessageTimestampDifferenceMaxMsProp -> KafkaConfig.LogMessageTimestampDifferenceMaxMsProp,
+ MessageDownConversionEnableProp -> KafkaConfig.LogMessageDownConversionEnableProp
+ )
+
+ def configNamesAndDoc: Seq[(String, String)] = {
+ Option(configDef).fold {
+ configNames.map(n => n -> "")
+ } {
+ configDef =>
+ val keyMap = configDef.configKeys()
+ configNames.map(n => n -> Option(keyMap.get(n)).map(_.documentation).flatMap(Option.apply).getOrElse(""))
+ }
+ }
+}
\ No newline at end of file
diff --git a/app/kafka/manager/utils/zero10/LogConfig.scala b/app/kafka/manager/utils/zero10/LogConfig.scala
new file mode 100644
index 000000000..6c43b66bb
--- /dev/null
+++ b/app/kafka/manager/utils/zero10/LogConfig.scala
@@ -0,0 +1,344 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.manager.utils.zero10
+
+import java.util.{Collections, Locale, Properties}
+
+import scala.collection.JavaConverters._
+import kafka.api.ApiVersion
+import kafka.manager.utils.TopicConfigs
+import kafka.message.BrokerCompressionCodec
+import kafka.server.{KafkaConfig, ThrottledReplicaListValidator}
+import org.apache.kafka.common.errors.InvalidConfigurationException
+import org.apache.kafka.common.config.{AbstractConfig, ConfigDef}
+import org.apache.kafka.common.record.{LegacyRecord, TimestampType}
+import org.apache.kafka.common.utils.Utils
+
+import scala.collection.mutable
+import org.apache.kafka.common.config.ConfigDef.{ConfigKey, ValidList, Validator}
+
+object Defaults {
+ val SegmentSize = kafka.server.Defaults.LogSegmentBytes
+ val SegmentMs = kafka.server.Defaults.LogRollHours * 60 * 60 * 1000L
+ val SegmentJitterMs = kafka.server.Defaults.LogRollJitterHours * 60 * 60 * 1000L
+ val FlushInterval = kafka.server.Defaults.LogFlushIntervalMessages
+ val FlushMs = kafka.server.Defaults.LogFlushSchedulerIntervalMs
+ val RetentionSize = kafka.server.Defaults.LogRetentionBytes
+ val RetentionMs = kafka.server.Defaults.LogRetentionHours * 60 * 60 * 1000L
+ val MaxMessageSize = kafka.server.Defaults.MessageMaxBytes
+ val MaxIndexSize = kafka.server.Defaults.LogIndexSizeMaxBytes
+ val IndexInterval = kafka.server.Defaults.LogIndexIntervalBytes
+ val FileDeleteDelayMs = kafka.server.Defaults.LogDeleteDelayMs
+ val DeleteRetentionMs = kafka.server.Defaults.LogCleanerDeleteRetentionMs
+ val MinCompactionLagMs = kafka.server.Defaults.LogCleanerMinCompactionLagMs
+ val MinCleanableDirtyRatio = kafka.server.Defaults.LogCleanerMinCleanRatio
+ val Compact = kafka.server.Defaults.LogCleanupPolicy
+ val UncleanLeaderElectionEnable = kafka.server.Defaults.UncleanLeaderElectionEnable
+ val MinInSyncReplicas = kafka.server.Defaults.MinInSyncReplicas
+ val CompressionType = kafka.server.Defaults.CompressionType
+ val PreAllocateEnable = kafka.server.Defaults.LogPreAllocateEnable
+ val MessageFormatVersion = kafka.server.Defaults.LogMessageFormatVersion
+ val MessageTimestampType = kafka.server.Defaults.LogMessageTimestampType
+ val MessageTimestampDifferenceMaxMs = kafka.server.Defaults.LogMessageTimestampDifferenceMaxMs
+ val LeaderReplicationThrottledReplicas = Collections.emptyList[String]()
+ val FollowerReplicationThrottledReplicas = Collections.emptyList[String]()
+}
+
+case class LogConfig(props: java.util.Map[_, _]) extends AbstractConfig(LogConfig.configDef, props, false) {
+ /**
+ * Important note: Any configuration parameter that is passed along from KafkaConfig to LogConfig
+ * should also go in kafka.server.KafkaServer.copyKafkaConfigToLog.
+ */
+ val segmentSize = getInt(LogConfig.SegmentBytesProp)
+ val segmentMs = getLong(LogConfig.SegmentMsProp)
+ val segmentJitterMs = getLong(LogConfig.SegmentJitterMsProp)
+ val maxIndexSize = getInt(LogConfig.SegmentIndexBytesProp)
+ val flushInterval = getLong(LogConfig.FlushMessagesProp)
+ val flushMs = getLong(LogConfig.FlushMsProp)
+ val retentionSize = getLong(LogConfig.RetentionBytesProp)
+ val retentionMs = getLong(LogConfig.RetentionMsProp)
+ val maxMessageSize = getInt(LogConfig.MaxMessageBytesProp)
+ val indexInterval = getInt(LogConfig.IndexIntervalBytesProp)
+ val fileDeleteDelayMs = getLong(LogConfig.FileDeleteDelayMsProp)
+ val deleteRetentionMs = getLong(LogConfig.DeleteRetentionMsProp)
+ val compactionLagMs = getLong(LogConfig.MinCompactionLagMsProp)
+ val minCleanableRatio = getDouble(LogConfig.MinCleanableDirtyRatioProp)
+ val compact = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Compact)
+ val delete = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Delete)
+ val uncleanLeaderElectionEnable = getBoolean(LogConfig.UncleanLeaderElectionEnableProp)
+ val minInSyncReplicas = getInt(LogConfig.MinInSyncReplicasProp)
+ val compressionType = getString(LogConfig.CompressionTypeProp).toLowerCase(Locale.ROOT)
+ val preallocate = getBoolean(LogConfig.PreAllocateEnableProp)
+ val messageFormatVersion = ApiVersion(getString(LogConfig.MessageFormatVersionProp))
+ val messageTimestampType = TimestampType.forName(getString(LogConfig.MessageTimestampTypeProp))
+ val messageTimestampDifferenceMaxMs = getLong(LogConfig.MessageTimestampDifferenceMaxMsProp).longValue
+ val LeaderReplicationThrottledReplicas = getList(LogConfig.LeaderReplicationThrottledReplicasProp)
+ val FollowerReplicationThrottledReplicas = getList(LogConfig.FollowerReplicationThrottledReplicasProp)
+
+ def randomSegmentJitter: Long =
+ if (segmentJitterMs == 0) 0 else Utils.abs(scala.util.Random.nextInt()) % math.min(segmentJitterMs, segmentMs)
+}
+
+object LogConfig extends TopicConfigs {
+
+ def main(args: Array[String]) {
+ println(configDef.toHtmlTable)
+ }
+
+ val Delete = "delete"
+ val Compact = "compact"
+
+ val SegmentBytesProp = "segment.bytes"
+ val SegmentMsProp = "segment.ms"
+ val SegmentJitterMsProp = "segment.jitter.ms"
+ val SegmentIndexBytesProp = "segment.index.bytes"
+ val FlushMessagesProp = "flush.messages"
+ val FlushMsProp = "flush.ms"
+ val RetentionBytesProp = "retention.bytes"
+ val RetentionMsProp = "retention.ms"
+ val MaxMessageBytesProp = "max.message.bytes"
+ val IndexIntervalBytesProp = "index.interval.bytes"
+ val DeleteRetentionMsProp = "delete.retention.ms"
+ val MinCompactionLagMsProp = "min.compaction.lag.ms"
+ val FileDeleteDelayMsProp = "file.delete.delay.ms"
+ val MinCleanableDirtyRatioProp = "min.cleanable.dirty.ratio"
+ val CleanupPolicyProp = "cleanup.policy"
+ val UncleanLeaderElectionEnableProp = "unclean.leader.election.enable"
+ val MinInSyncReplicasProp = "min.insync.replicas"
+ val CompressionTypeProp = "compression.type"
+ val PreAllocateEnableProp = "preallocate"
+ val MessageFormatVersionProp = "message.format.version"
+ val MessageTimestampTypeProp = "message.timestamp.type"
+ val MessageTimestampDifferenceMaxMsProp = "message.timestamp.difference.max.ms"
+ val LeaderReplicationThrottledReplicasProp = "leader.replication.throttled.replicas"
+ val FollowerReplicationThrottledReplicasProp = "follower.replication.throttled.replicas"
+
+ val SegmentSizeDoc = "This configuration controls the segment file size for " +
+ "the log. Retention and cleaning is always done a file at a time so a larger " +
+ "segment size means fewer files but less granular control over retention."
+ val SegmentMsDoc = "This configuration controls the period of time after " +
+ "which Kafka will force the log to roll even if the segment file isn't full " +
+ "to ensure that retention can delete or compact old data."
+ val SegmentJitterMsDoc = "The maximum random jitter subtracted from the scheduled segment roll time to avoid" +
+ " thundering herds of segment rolling"
+ val FlushIntervalDoc = "This setting allows specifying an interval at which we " +
+ "will force an fsync of data written to the log. For example if this was set to 1 " +
+ "we would fsync after every message; if it were 5 we would fsync after every five " +
+ "messages. In general we recommend you not set this and use replication for " +
+ "durability and allow the operating system's background flush capabilities as it " +
+ "is more efficient. This setting can be overridden on a per-topic basis (see the per-topic configuration section)."
+ val FlushMsDoc = "This setting allows specifying a time interval at which we will " +
+ "force an fsync of data written to the log. For example if this was set to 1000 " +
+ "we would fsync after 1000 ms had passed. In general we recommend you not set " +
+ "this and use replication for durability and allow the operating system's background " +
+ "flush capabilities as it is more efficient."
+ val RetentionSizeDoc = "This configuration controls the maximum size a log can grow " +
+ "to before we will discard old log segments to free up space if we are using the " +
+ "\"delete\" retention policy. By default there is no size limit only a time limit."
+ val RetentionMsDoc = "This configuration controls the maximum time we will retain a " +
+ "log before we will discard old log segments to free up space if we are using the " +
+ "\"delete\" retention policy. This represents an SLA on how soon consumers must read " +
+ "their data."
+ val MaxIndexSizeDoc = "This configuration controls the size of the index that maps " +
+ "offsets to file positions. We preallocate this index file and shrink it only after log " +
+ "rolls. You generally should not need to change this setting."
+ val MaxMessageSizeDoc = "This is largest message size Kafka will allow to be appended. Note that if you increase" +
+ " this size you must also increase your consumer's fetch size so they can fetch messages this large."
+ val IndexIntervalDoc = "This setting controls how frequently Kafka adds an index " +
+ "entry to it's offset index. The default setting ensures that we index a message " +
+ "roughly every 4096 bytes. More indexing allows reads to jump closer to the exact " +
+ "position in the log but makes the index larger. You probably don't need to change " +
+ "this."
+ val FileDeleteDelayMsDoc = "The time to wait before deleting a file from the filesystem"
+ val DeleteRetentionMsDoc = "The amount of time to retain delete tombstone markers " +
+ "for log compacted topics. This setting also gives a bound " +
+ "on the time in which a consumer must complete a read if they begin from offset 0 " +
+ "to ensure that they get a valid snapshot of the final stage (otherwise delete " +
+ "tombstones may be collected before they complete their scan)."
+ val MinCompactionLagMsDoc = "The minimum time a message will remain uncompacted in the log. " +
+ "Only applicable for logs that are being compacted."
+ val MinCleanableRatioDoc = "This configuration controls how frequently the log " +
+ "compactor will attempt to clean the log (assuming log " +
+ "compaction is enabled). By default we will avoid cleaning a log where more than " +
+ "50% of the log has been compacted. This ratio bounds the maximum space wasted in " +
+ "the log by duplicates (at 50% at most 50% of the log could be duplicates). A " +
+ "higher ratio will mean fewer, more efficient cleanings but will mean more wasted " +
+ "space in the log."
+ val CompactDoc = "A string that is either \"delete\" or \"compact\". This string " +
+ "designates the retention policy to use on old log segments. The default policy " +
+ "(\"delete\") will discard old segments when their retention time or size limit has " +
+ "been reached. The \"compact\" setting will enable log " +
+ "compaction on the topic."
+ val UncleanLeaderElectionEnableDoc = "Indicates whether to enable replicas not in the ISR set to be elected as" +
+ " leader as a last resort, even though doing so may result in data loss"
+ val MinInSyncReplicasDoc = KafkaConfig.MinInSyncReplicasDoc
+ val CompressionTypeDoc = "Specify the final compression type for a given topic. This configuration accepts the " +
+ "standard compression codecs ('gzip', 'snappy', lz4). It additionally accepts 'uncompressed' which is equivalent to " +
+ "no compression; and 'producer' which means retain the original compression codec set by the producer."
+ val PreAllocateEnableDoc ="Should pre allocate file when create new segment?"
+ val MessageFormatVersionDoc = KafkaConfig.LogMessageFormatVersionDoc
+ val MessageTimestampTypeDoc = KafkaConfig.LogMessageTimestampTypeDoc
+ val MessageTimestampDifferenceMaxMsDoc = "The maximum difference allowed between the timestamp when a broker receives " +
+ "a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected " +
+ "if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime."
+ val LeaderReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on the leader side. The list should describe a set of " +
+ "replicas in the form [PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle all replicas for this topic."
+ val FollowerReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on the follower side. The list should describe a set of " +
+ "replicas in the form [PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle all replicas for this topic."
+
+ private class LogConfigDef extends ConfigDef {
+
+ private final val serverDefaultConfigNames = mutable.Map[String, String]()
+
+ def define(name: String, defType: ConfigDef.Type, defaultValue: Any, validator: Validator,
+ importance: ConfigDef.Importance, doc: String, serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, defaultValue, validator, importance, doc)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ def define(name: String, defType: ConfigDef.Type, defaultValue: Any, importance: ConfigDef.Importance,
+ documentation: String, serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, defaultValue, importance, documentation)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ def define(name: String, defType: ConfigDef.Type, importance: ConfigDef.Importance, documentation: String,
+ serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, importance, documentation)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ override def headers = List("Name", "Description", "Type", "Default", "Valid Values", "Server Default Property", "Importance").asJava
+
+ override def getConfigValue(key: ConfigKey, headerName: String): String = {
+ headerName match {
+ case "Server Default Property" => serverDefaultConfigNames.get(key.name).get
+ case _ => super.getConfigValue(key, headerName)
+ }
+ }
+
+ def serverConfigName(configName: String): Option[String] = serverDefaultConfigNames.get(configName)
+ }
+
+ private val configDef: LogConfigDef = {
+ import org.apache.kafka.common.config.ConfigDef.Importance._
+ import org.apache.kafka.common.config.ConfigDef.Range._
+ import org.apache.kafka.common.config.ConfigDef.Type._
+ import org.apache.kafka.common.config.ConfigDef.ValidString._
+
+ new LogConfigDef()
+ .define(SegmentBytesProp, INT, Defaults.SegmentSize, atLeast(LegacyRecord.RECORD_OVERHEAD_V0), MEDIUM,
+ SegmentSizeDoc, KafkaConfig.LogSegmentBytesProp)
+ .define(SegmentMsProp, LONG, Defaults.SegmentMs, atLeast(0), MEDIUM, SegmentMsDoc,
+ KafkaConfig.LogRollTimeMillisProp)
+ .define(SegmentJitterMsProp, LONG, Defaults.SegmentJitterMs, atLeast(0), MEDIUM, SegmentJitterMsDoc,
+ KafkaConfig.LogRollTimeJitterMillisProp)
+ .define(SegmentIndexBytesProp, INT, Defaults.MaxIndexSize, atLeast(0), MEDIUM, MaxIndexSizeDoc,
+ KafkaConfig.LogIndexSizeMaxBytesProp)
+ .define(FlushMessagesProp, LONG, Defaults.FlushInterval, atLeast(0), MEDIUM, FlushIntervalDoc,
+ KafkaConfig.LogFlushIntervalMessagesProp)
+ .define(FlushMsProp, LONG, Defaults.FlushMs, atLeast(0), MEDIUM, FlushMsDoc,
+ KafkaConfig.LogFlushIntervalMsProp)
+ // can be negative. See kafka.log.LogManager.cleanupSegmentsToMaintainSize
+ .define(RetentionBytesProp, LONG, Defaults.RetentionSize, MEDIUM, RetentionSizeDoc,
+ KafkaConfig.LogRetentionBytesProp)
+ // can be negative. See kafka.log.LogManager.cleanupExpiredSegments
+ .define(RetentionMsProp, LONG, Defaults.RetentionMs, MEDIUM, RetentionMsDoc,
+ KafkaConfig.LogRetentionTimeMillisProp)
+ .define(MaxMessageBytesProp, INT, Defaults.MaxMessageSize, atLeast(0), MEDIUM, MaxMessageSizeDoc,
+ KafkaConfig.MessageMaxBytesProp)
+ .define(IndexIntervalBytesProp, INT, Defaults.IndexInterval, atLeast(0), MEDIUM, IndexIntervalDoc,
+ KafkaConfig.LogIndexIntervalBytesProp)
+ .define(DeleteRetentionMsProp, LONG, Defaults.DeleteRetentionMs, atLeast(0), MEDIUM,
+ DeleteRetentionMsDoc, KafkaConfig.LogCleanerDeleteRetentionMsProp)
+ .define(MinCompactionLagMsProp, LONG, Defaults.MinCompactionLagMs, atLeast(0), MEDIUM, MinCompactionLagMsDoc,
+ KafkaConfig.LogCleanerMinCompactionLagMsProp)
+ .define(FileDeleteDelayMsProp, LONG, Defaults.FileDeleteDelayMs, atLeast(0), MEDIUM, FileDeleteDelayMsDoc,
+ KafkaConfig.LogDeleteDelayMsProp)
+ .define(MinCleanableDirtyRatioProp, DOUBLE, Defaults.MinCleanableDirtyRatio, between(0, 1), MEDIUM,
+ MinCleanableRatioDoc, KafkaConfig.LogCleanerMinCleanRatioProp)
+ .define(CleanupPolicyProp, LIST, Defaults.Compact, ValidList.in(LogConfig.Compact, LogConfig.Delete), MEDIUM, CompactDoc,
+ KafkaConfig.LogCleanupPolicyProp)
+ .define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable,
+ MEDIUM, UncleanLeaderElectionEnableDoc, KafkaConfig.UncleanLeaderElectionEnableProp)
+ .define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), MEDIUM, MinInSyncReplicasDoc,
+ KafkaConfig.MinInSyncReplicasProp)
+ .define(CompressionTypeProp, STRING, Defaults.CompressionType, in(BrokerCompressionCodec.brokerCompressionOptions:_*),
+ MEDIUM, CompressionTypeDoc, KafkaConfig.CompressionTypeProp)
+ .define(PreAllocateEnableProp, BOOLEAN, Defaults.PreAllocateEnable, MEDIUM, PreAllocateEnableDoc,
+ KafkaConfig.LogPreAllocateProp)
+ .define(MessageFormatVersionProp, STRING, Defaults.MessageFormatVersion, MEDIUM, MessageFormatVersionDoc,
+ KafkaConfig.LogMessageFormatVersionProp)
+ .define(MessageTimestampTypeProp, STRING, Defaults.MessageTimestampType, MEDIUM, MessageTimestampTypeDoc,
+ KafkaConfig.LogMessageTimestampTypeProp)
+ .define(MessageTimestampDifferenceMaxMsProp, LONG, Defaults.MessageTimestampDifferenceMaxMs,
+ atLeast(0), MEDIUM, MessageTimestampDifferenceMaxMsDoc, KafkaConfig.LogMessageTimestampDifferenceMaxMsProp)
+ .define(LeaderReplicationThrottledReplicasProp, LIST, Defaults.LeaderReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
+ LeaderReplicationThrottledReplicasDoc, LeaderReplicationThrottledReplicasProp)
+ .define(FollowerReplicationThrottledReplicasProp, LIST, Defaults.FollowerReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
+ FollowerReplicationThrottledReplicasDoc, FollowerReplicationThrottledReplicasProp)
+ }
+
+ def apply(): LogConfig = LogConfig(new Properties())
+
+ def configNames: Seq[String] = configDef.names.asScala.toSeq.sorted
+
+ def serverConfigName(configName: String): Option[String] = configDef.serverConfigName(configName)
+
+ /**
+ * Create a log config instance using the given properties and defaults
+ */
+ def fromProps(defaults: java.util.Map[_ <: Object, _ <: Object], overrides: Properties): LogConfig = {
+ val props = new Properties()
+ props.putAll(defaults)
+ props.putAll(overrides)
+ LogConfig(props)
+ }
+
+ /**
+ * Check that property names are valid
+ */
+ def validateNames(props: Properties) {
+ val names = configNames
+ for(name <- props.asScala.keys)
+ if (!names.contains(name))
+ throw new InvalidConfigurationException(s"Unknown Log configuration $name.")
+ }
+
+ /**
+ * Check that the given properties contain only valid log config names and that all values can be parsed and are valid
+ */
+ def validate(props: Properties) {
+ validateNames(props)
+ configDef.parse(props)
+ }
+
+ def configNamesAndDoc: Seq[(String, String)] = {
+ Option(configDef).fold {
+ configNames.map(n => n -> "")
+ } {
+ configDef =>
+ val keyMap = configDef.configKeys()
+ configNames.map(n => n -> Option(keyMap.get(n)).map(_.documentation).flatMap(Option.apply).getOrElse(""))
+ }
+ }
+}
diff --git a/app/kafka/manager/utils/zero11/LogConfig.scala b/app/kafka/manager/utils/zero11/LogConfig.scala
new file mode 100644
index 000000000..6ceb0e5f7
--- /dev/null
+++ b/app/kafka/manager/utils/zero11/LogConfig.scala
@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.manager.utils.zero11
+
+import java.util.{Collections, Locale, Properties}
+
+import scala.collection.JavaConverters._
+import kafka.api.ApiVersion
+import kafka.manager.utils.TopicConfigs
+import kafka.message.BrokerCompressionCodec
+import kafka.server.{KafkaConfig, ThrottledReplicaListValidator}
+import org.apache.kafka.common.errors.InvalidConfigurationException
+import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, TopicConfig}
+import org.apache.kafka.common.record.{LegacyRecord, TimestampType}
+import org.apache.kafka.common.utils.Utils
+
+import scala.collection.mutable
+import org.apache.kafka.common.config.ConfigDef.{ConfigKey, ValidList, Validator}
+
+object Defaults {
+ val SegmentSize = kafka.server.Defaults.LogSegmentBytes
+ val SegmentMs = kafka.server.Defaults.LogRollHours * 60 * 60 * 1000L
+ val SegmentJitterMs = kafka.server.Defaults.LogRollJitterHours * 60 * 60 * 1000L
+ val FlushInterval = kafka.server.Defaults.LogFlushIntervalMessages
+ val FlushMs = kafka.server.Defaults.LogFlushSchedulerIntervalMs
+ val RetentionSize = kafka.server.Defaults.LogRetentionBytes
+ val RetentionMs = kafka.server.Defaults.LogRetentionHours * 60 * 60 * 1000L
+ val MaxMessageSize = kafka.server.Defaults.MessageMaxBytes
+ val MaxIndexSize = kafka.server.Defaults.LogIndexSizeMaxBytes
+ val IndexInterval = kafka.server.Defaults.LogIndexIntervalBytes
+ val FileDeleteDelayMs = kafka.server.Defaults.LogDeleteDelayMs
+ val DeleteRetentionMs = kafka.server.Defaults.LogCleanerDeleteRetentionMs
+ val MinCompactionLagMs = kafka.server.Defaults.LogCleanerMinCompactionLagMs
+ val MinCleanableDirtyRatio = kafka.server.Defaults.LogCleanerMinCleanRatio
+ val Compact = kafka.server.Defaults.LogCleanupPolicy
+ val UncleanLeaderElectionEnable = kafka.server.Defaults.UncleanLeaderElectionEnable
+ val MinInSyncReplicas = kafka.server.Defaults.MinInSyncReplicas
+ val CompressionType = kafka.server.Defaults.CompressionType
+ val PreAllocateEnable = kafka.server.Defaults.LogPreAllocateEnable
+ val MessageFormatVersion = kafka.server.Defaults.LogMessageFormatVersion
+ val MessageTimestampType = kafka.server.Defaults.LogMessageTimestampType
+ val MessageTimestampDifferenceMaxMs = kafka.server.Defaults.LogMessageTimestampDifferenceMaxMs
+ val LeaderReplicationThrottledReplicas = Collections.emptyList[String]()
+ val FollowerReplicationThrottledReplicas = Collections.emptyList[String]()
+ val MaxIdMapSnapshots = kafka.server.Defaults.MaxIdMapSnapshots
+}
+
+case class LogConfig(props: java.util.Map[_, _]) extends AbstractConfig(LogConfig.configDef, props, false) {
+ /**
+ * Important note: Any configuration parameter that is passed along from KafkaConfig to LogConfig
+ * should also go in kafka.server.KafkaServer.copyKafkaConfigToLog.
+ */
+ val segmentSize = getInt(LogConfig.SegmentBytesProp)
+ val segmentMs = getLong(LogConfig.SegmentMsProp)
+ val segmentJitterMs = getLong(LogConfig.SegmentJitterMsProp)
+ val maxIndexSize = getInt(LogConfig.SegmentIndexBytesProp)
+ val flushInterval = getLong(LogConfig.FlushMessagesProp)
+ val flushMs = getLong(LogConfig.FlushMsProp)
+ val retentionSize = getLong(LogConfig.RetentionBytesProp)
+ val retentionMs = getLong(LogConfig.RetentionMsProp)
+ val maxMessageSize = getInt(LogConfig.MaxMessageBytesProp)
+ val indexInterval = getInt(LogConfig.IndexIntervalBytesProp)
+ val fileDeleteDelayMs = getLong(LogConfig.FileDeleteDelayMsProp)
+ val deleteRetentionMs = getLong(LogConfig.DeleteRetentionMsProp)
+ val compactionLagMs = getLong(LogConfig.MinCompactionLagMsProp)
+ val minCleanableRatio = getDouble(LogConfig.MinCleanableDirtyRatioProp)
+ val compact = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Compact)
+ val delete = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Delete)
+ val uncleanLeaderElectionEnable = getBoolean(LogConfig.UncleanLeaderElectionEnableProp)
+ val minInSyncReplicas = getInt(LogConfig.MinInSyncReplicasProp)
+ val compressionType = getString(LogConfig.CompressionTypeProp).toLowerCase(Locale.ROOT)
+ val preallocate = getBoolean(LogConfig.PreAllocateEnableProp)
+ val messageFormatVersion = ApiVersion(getString(LogConfig.MessageFormatVersionProp))
+ val messageTimestampType = TimestampType.forName(getString(LogConfig.MessageTimestampTypeProp))
+ val messageTimestampDifferenceMaxMs = getLong(LogConfig.MessageTimestampDifferenceMaxMsProp).longValue
+ val LeaderReplicationThrottledReplicas = getList(LogConfig.LeaderReplicationThrottledReplicasProp)
+ val FollowerReplicationThrottledReplicas = getList(LogConfig.FollowerReplicationThrottledReplicasProp)
+
+ def randomSegmentJitter: Long =
+ if (segmentJitterMs == 0) 0 else Utils.abs(scala.util.Random.nextInt()) % math.min(segmentJitterMs, segmentMs)
+}
+
+object LogConfig extends TopicConfigs {
+
+ def main(args: Array[String]) {
+ println(configDef.toHtmlTable)
+ }
+
+ val SegmentBytesProp = TopicConfig.SEGMENT_BYTES_CONFIG
+ val SegmentMsProp = TopicConfig.SEGMENT_MS_CONFIG
+ val SegmentJitterMsProp = TopicConfig.SEGMENT_JITTER_MS_CONFIG
+ val SegmentIndexBytesProp = TopicConfig.SEGMENT_INDEX_BYTES_CONFIG
+ val FlushMessagesProp = TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG
+ val FlushMsProp = TopicConfig.FLUSH_MS_CONFIG
+ val RetentionBytesProp = TopicConfig.RETENTION_BYTES_CONFIG
+ val RetentionMsProp = TopicConfig.RETENTION_MS_CONFIG
+ val MaxMessageBytesProp = TopicConfig.MAX_MESSAGE_BYTES_CONFIG
+ val IndexIntervalBytesProp = TopicConfig.INDEX_INTERVAL_BYTES_CONFIG
+ val DeleteRetentionMsProp = TopicConfig.DELETE_RETENTION_MS_CONFIG
+ val MinCompactionLagMsProp = TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG
+ val FileDeleteDelayMsProp = TopicConfig.FILE_DELETE_DELAY_MS_CONFIG
+ val MinCleanableDirtyRatioProp = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG
+ val CleanupPolicyProp = TopicConfig.CLEANUP_POLICY_CONFIG
+ val Delete = TopicConfig.CLEANUP_POLICY_DELETE
+ val Compact = TopicConfig.CLEANUP_POLICY_COMPACT
+ val UncleanLeaderElectionEnableProp = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG
+ val MinInSyncReplicasProp = TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG
+ val CompressionTypeProp = TopicConfig.COMPRESSION_TYPE_CONFIG
+ val PreAllocateEnableProp = TopicConfig.PREALLOCATE_CONFIG
+ val MessageFormatVersionProp = TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG
+ val MessageTimestampTypeProp = TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG
+ val MessageTimestampDifferenceMaxMsProp = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG
+
+ // Leave these out of TopicConfig for now as they are replication quota configs
+ val LeaderReplicationThrottledReplicasProp = "leader.replication.throttled.replicas"
+ val FollowerReplicationThrottledReplicasProp = "follower.replication.throttled.replicas"
+
+ val SegmentSizeDoc = TopicConfig.SEGMENT_BYTES_DOC
+ val SegmentMsDoc = TopicConfig.SEGMENT_MS_DOC
+ val SegmentJitterMsDoc = TopicConfig.SEGMENT_JITTER_MS_DOC
+ val MaxIndexSizeDoc = TopicConfig.SEGMENT_INDEX_BYTES_DOC
+ val FlushIntervalDoc = TopicConfig.FLUSH_MESSAGES_INTERVAL_DOC
+ val FlushMsDoc = TopicConfig.FLUSH_MS_DOC
+ val RetentionSizeDoc = TopicConfig.RETENTION_BYTES_DOC
+ val RetentionMsDoc = TopicConfig.RETENTION_MS_DOC
+ val MaxMessageSizeDoc = TopicConfig.MAX_MESSAGE_BYTES_DOC
+ val IndexIntervalDoc = TopicConfig.INDEX_INTERVAL_BYTES_DOCS
+ val FileDeleteDelayMsDoc = TopicConfig.FILE_DELETE_DELAY_MS_DOC
+ val DeleteRetentionMsDoc = TopicConfig.DELETE_RETENTION_MS_DOC
+ val MinCompactionLagMsDoc = TopicConfig.MIN_COMPACTION_LAG_MS_DOC
+ val MinCleanableRatioDoc = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_DOC
+ val CompactDoc = TopicConfig.CLEANUP_POLICY_DOC
+ val UncleanLeaderElectionEnableDoc = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_DOC
+ val MinInSyncReplicasDoc = TopicConfig.MIN_IN_SYNC_REPLICAS_DOC
+ val CompressionTypeDoc = TopicConfig.COMPRESSION_TYPE_DOC
+ val PreAllocateEnableDoc = TopicConfig.PREALLOCATE_DOC
+ val MessageFormatVersionDoc = TopicConfig.MESSAGE_FORMAT_VERSION_DOC
+ val MessageTimestampTypeDoc = TopicConfig.MESSAGE_TIMESTAMP_TYPE_DOC
+ val MessageTimestampDifferenceMaxMsDoc = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC
+
+ val LeaderReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " +
+ "the leader side. The list should describe a set of replicas in the form " +
+ "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " +
+ "all replicas for this topic."
+ val FollowerReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " +
+ "the follower side. The list should describe a set of " + "replicas in the form " +
+ "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " +
+ "all replicas for this topic."
+
+ private class LogConfigDef extends ConfigDef {
+
+ private final val serverDefaultConfigNames = mutable.Map[String, String]()
+
+ def define(name: String, defType: ConfigDef.Type, defaultValue: Any, validator: Validator,
+ importance: ConfigDef.Importance, doc: String, serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, defaultValue, validator, importance, doc)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ def define(name: String, defType: ConfigDef.Type, defaultValue: Any, importance: ConfigDef.Importance,
+ documentation: String, serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, defaultValue, importance, documentation)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ def define(name: String, defType: ConfigDef.Type, importance: ConfigDef.Importance, documentation: String,
+ serverDefaultConfigName: String): LogConfigDef = {
+ super.define(name, defType, importance, documentation)
+ serverDefaultConfigNames.put(name, serverDefaultConfigName)
+ this
+ }
+
+ override def headers = List("Name", "Description", "Type", "Default", "Valid Values", "Server Default Property", "Importance").asJava
+
+ override def getConfigValue(key: ConfigKey, headerName: String): String = {
+ headerName match {
+ case "Server Default Property" => serverDefaultConfigNames.get(key.name).get
+ case _ => super.getConfigValue(key, headerName)
+ }
+ }
+
+ def serverConfigName(configName: String): Option[String] = serverDefaultConfigNames.get(configName)
+ }
+
+ private val configDef: LogConfigDef = {
+ import org.apache.kafka.common.config.ConfigDef.Importance._
+ import org.apache.kafka.common.config.ConfigDef.Range._
+ import org.apache.kafka.common.config.ConfigDef.Type._
+ import org.apache.kafka.common.config.ConfigDef.ValidString._
+
+ new LogConfigDef()
+ .define(SegmentBytesProp, INT, Defaults.SegmentSize, atLeast(LegacyRecord.RECORD_OVERHEAD_V0), MEDIUM,
+ SegmentSizeDoc, KafkaConfig.LogSegmentBytesProp)
+ .define(SegmentMsProp, LONG, Defaults.SegmentMs, atLeast(0), MEDIUM, SegmentMsDoc,
+ KafkaConfig.LogRollTimeMillisProp)
+ .define(SegmentJitterMsProp, LONG, Defaults.SegmentJitterMs, atLeast(0), MEDIUM, SegmentJitterMsDoc,
+ KafkaConfig.LogRollTimeJitterMillisProp)
+ .define(SegmentIndexBytesProp, INT, Defaults.MaxIndexSize, atLeast(0), MEDIUM, MaxIndexSizeDoc,
+ KafkaConfig.LogIndexSizeMaxBytesProp)
+ .define(FlushMessagesProp, LONG, Defaults.FlushInterval, atLeast(0), MEDIUM, FlushIntervalDoc,
+ KafkaConfig.LogFlushIntervalMessagesProp)
+ .define(FlushMsProp, LONG, Defaults.FlushMs, atLeast(0), MEDIUM, FlushMsDoc,
+ KafkaConfig.LogFlushIntervalMsProp)
+ // can be negative. See kafka.log.LogManager.cleanupSegmentsToMaintainSize
+ .define(RetentionBytesProp, LONG, Defaults.RetentionSize, MEDIUM, RetentionSizeDoc,
+ KafkaConfig.LogRetentionBytesProp)
+ // can be negative. See kafka.log.LogManager.cleanupExpiredSegments
+ .define(RetentionMsProp, LONG, Defaults.RetentionMs, MEDIUM, RetentionMsDoc,
+ KafkaConfig.LogRetentionTimeMillisProp)
+ .define(MaxMessageBytesProp, INT, Defaults.MaxMessageSize, atLeast(0), MEDIUM, MaxMessageSizeDoc,
+ KafkaConfig.MessageMaxBytesProp)
+ .define(IndexIntervalBytesProp, INT, Defaults.IndexInterval, atLeast(0), MEDIUM, IndexIntervalDoc,
+ KafkaConfig.LogIndexIntervalBytesProp)
+ .define(DeleteRetentionMsProp, LONG, Defaults.DeleteRetentionMs, atLeast(0), MEDIUM,
+ DeleteRetentionMsDoc, KafkaConfig.LogCleanerDeleteRetentionMsProp)
+ .define(MinCompactionLagMsProp, LONG, Defaults.MinCompactionLagMs, atLeast(0), MEDIUM, MinCompactionLagMsDoc,
+ KafkaConfig.LogCleanerMinCompactionLagMsProp)
+ .define(FileDeleteDelayMsProp, LONG, Defaults.FileDeleteDelayMs, atLeast(0), MEDIUM, FileDeleteDelayMsDoc,
+ KafkaConfig.LogDeleteDelayMsProp)
+ .define(MinCleanableDirtyRatioProp, DOUBLE, Defaults.MinCleanableDirtyRatio, between(0, 1), MEDIUM,
+ MinCleanableRatioDoc, KafkaConfig.LogCleanerMinCleanRatioProp)
+ .define(CleanupPolicyProp, LIST, Defaults.Compact, ValidList.in(LogConfig.Compact, LogConfig.Delete), MEDIUM, CompactDoc,
+ KafkaConfig.LogCleanupPolicyProp)
+ .define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable,
+ MEDIUM, UncleanLeaderElectionEnableDoc, KafkaConfig.UncleanLeaderElectionEnableProp)
+ .define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), MEDIUM, MinInSyncReplicasDoc,
+ KafkaConfig.MinInSyncReplicasProp)
+ .define(CompressionTypeProp, STRING, Defaults.CompressionType, in(BrokerCompressionCodec.brokerCompressionOptions:_*),
+ MEDIUM, CompressionTypeDoc, KafkaConfig.CompressionTypeProp)
+ .define(PreAllocateEnableProp, BOOLEAN, Defaults.PreAllocateEnable, MEDIUM, PreAllocateEnableDoc,
+ KafkaConfig.LogPreAllocateProp)
+ .define(MessageFormatVersionProp, STRING, Defaults.MessageFormatVersion, MEDIUM, MessageFormatVersionDoc,
+ KafkaConfig.LogMessageFormatVersionProp)
+ .define(MessageTimestampTypeProp, STRING, Defaults.MessageTimestampType, MEDIUM, MessageTimestampTypeDoc,
+ KafkaConfig.LogMessageTimestampTypeProp)
+ .define(MessageTimestampDifferenceMaxMsProp, LONG, Defaults.MessageTimestampDifferenceMaxMs,
+ atLeast(0), MEDIUM, MessageTimestampDifferenceMaxMsDoc, KafkaConfig.LogMessageTimestampDifferenceMaxMsProp)
+ .define(LeaderReplicationThrottledReplicasProp, LIST, Defaults.LeaderReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
+ LeaderReplicationThrottledReplicasDoc, LeaderReplicationThrottledReplicasProp)
+ .define(FollowerReplicationThrottledReplicasProp, LIST, Defaults.FollowerReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM,
+ FollowerReplicationThrottledReplicasDoc, FollowerReplicationThrottledReplicasProp)
+ }
+
+ def apply(): LogConfig = LogConfig(new Properties())
+
+ def configNames: Seq[String] = configDef.names.asScala.toSeq.sorted
+
+ def serverConfigName(configName: String): Option[String] = configDef.serverConfigName(configName)
+
+ /**
+ * Create a log config instance using the given properties and defaults
+ */
+ def fromProps(defaults: java.util.Map[_ <: Object, _ <: Object], overrides: Properties): LogConfig = {
+ val props = new Properties()
+ props.putAll(defaults)
+ props.putAll(overrides)
+ LogConfig(props)
+ }
+
+ /**
+ * Check that property names are valid
+ */
+ def validateNames(props: Properties) {
+ val names = configNames
+ for(name <- props.asScala.keys)
+ if (!names.contains(name))
+ throw new InvalidConfigurationException(s"Unknown topic config name: $name")
+ }
+
+ /**
+ * Check that the given properties contain only valid log config names and that all values can be parsed and are valid
+ */
+ def validate(props: Properties) {
+ validateNames(props)
+ configDef.parse(props)
+ }
+
+ def configNamesAndDoc: Seq[(String, String)] = {
+ Option(configDef).fold {
+ configNames.map(n => n -> "")
+ } {
+ configDef =>
+ val keyMap = configDef.configKeys()
+ configNames.map(n => n -> Option(keyMap.get(n)).map(_.documentation).flatMap(Option.apply).getOrElse(""))
+ }
+ }
+}
diff --git a/app/kafka/manager/utils/zero81/LogConfig.scala b/app/kafka/manager/utils/zero81/LogConfig.scala
index a4089dc11..c391a7b77 100644
--- a/app/kafka/manager/utils/zero81/LogConfig.scala
+++ b/app/kafka/manager/utils/zero81/LogConfig.scala
@@ -74,7 +74,7 @@ object LogConfig extends TopicConfigs {
val MinCleanableDirtyRatioProp = "min.cleanable.dirty.ratio"
val CleanupPolicyProp = "cleanup.policy"
- val ConfigNames = Set(SegmentBytesProp,
+ val ConfigNames = Seq(SegmentBytesProp,
SegmentMsProp,
SegmentIndexBytesProp,
FlushMessagesProp,
@@ -122,9 +122,9 @@ object LogConfig extends TopicConfigs {
* Check that property names are valid
*/
def validateNames(props: Properties) {
- import scala.collection.JavaConversions._
- for (name <- props.keys)
- require(LogConfig.ConfigNames.contains(name), "Unknown configuration \"%s\".".format(name))
+ import scala.collection.JavaConverters._
+ for (name <- props.keys.asScala)
+ require(LogConfig.ConfigNames.asJava.contains(name), "Unknown configuration \"%s\".".format(name))
}
/**
@@ -134,4 +134,8 @@ object LogConfig extends TopicConfigs {
validateNames(props)
LogConfig.fromProps(LogConfig().toProps, props) // check that we can parse the values
}
+
+ def configNamesAndDoc: Seq[(String, String)] = {
+ configNames.map(n => n -> "")
+ }
}
diff --git a/app/kafka/manager/utils/zero82/LogConfig.scala b/app/kafka/manager/utils/zero82/LogConfig.scala
index 461d81efd..db5452f05 100644
--- a/app/kafka/manager/utils/zero82/LogConfig.scala
+++ b/app/kafka/manager/utils/zero82/LogConfig.scala
@@ -18,6 +18,7 @@
package kafka.manager.utils.zero82
import java.util.Properties
+
import kafka.manager.utils.TopicConfigs
/**
@@ -131,7 +132,7 @@ object LogConfig extends TopicConfigs {
val UncleanLeaderElectionEnableProp = "unclean.leader.election.enable"
val MinInSyncReplicasProp = "min.insync.replicas"
- val ConfigNames = Set(SegmentBytesProp,
+ val ConfigNames = Seq(SegmentBytesProp,
SegmentMsProp,
SegmentJitterMsProp,
SegmentIndexBytesProp,
@@ -215,4 +216,7 @@ object LogConfig extends TopicConfigs {
}
}
+ def configNamesAndDoc: Seq[(String, String)] = {
+ configNames.map(n => n -> "")
+ }
}
\ No newline at end of file
diff --git a/app/kafka/manager/utils/zero90/LogConfig.scala b/app/kafka/manager/utils/zero90/LogConfig.scala
index ce8f9d2c1..46a460cd8 100644
--- a/app/kafka/manager/utils/zero90/LogConfig.scala
+++ b/app/kafka/manager/utils/zero90/LogConfig.scala
@@ -18,11 +18,12 @@
package kafka.manager.utils.zero90
import java.util.Properties
+
import kafka.manager.utils.TopicConfigs
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.common.config.{AbstractConfig, ConfigDef}
import kafka.message.BrokerCompressionCodec
-import kafka.message.Message
+import org.apache.kafka.common.record.LegacyRecord
object Defaults {
val SegmentSize = kafka.server.Defaults.LogSegmentBytes
@@ -133,7 +134,7 @@ object LogConfig extends TopicConfigs {
import ConfigDef.Importance._
new ConfigDef()
- .define(SegmentBytesProp, INT, Defaults.SegmentSize, atLeast(Message.MinMessageOverhead), MEDIUM, SegmentSizeDoc)
+ .define(SegmentBytesProp, INT, Defaults.SegmentSize, atLeast(LegacyRecord.RECORD_OVERHEAD_V0), MEDIUM, SegmentSizeDoc)
.define(SegmentMsProp, LONG, Defaults.SegmentMs, atLeast(0), MEDIUM, SegmentMsDoc)
.define(SegmentJitterMsProp, LONG, Defaults.SegmentJitterMs, atLeast(0), MEDIUM, SegmentJitterMsDoc)
.define(SegmentIndexBytesProp, INT, Defaults.MaxIndexSize, atLeast(0), MEDIUM, MaxIndexSizeDoc)
@@ -161,9 +162,9 @@ object LogConfig extends TopicConfigs {
def apply(): LogConfig = LogConfig(new Properties())
- val configNames : Set[String] = {
+ val configNames : Seq[String] = {
import scala.collection.JavaConverters._
- configDef.names().asScala.toSet
+ configDef.names.asScala.toSeq.sorted
}
@@ -181,9 +182,9 @@ object LogConfig extends TopicConfigs {
* Check that property names are valid
*/
def validateNames(props: Properties) {
- import scala.collection.JavaConversions._
+ import scala.collection.JavaConverters._
val names = configDef.names()
- for(name <- props.keys)
+ for(name <- props.keys.asScala)
require(names.contains(name), "Unknown configuration \"%s\".".format(name))
}
@@ -195,4 +196,13 @@ object LogConfig extends TopicConfigs {
configDef.parse(props)
}
+ def configNamesAndDoc: Seq[(String, String)] = {
+ Option(configDef).fold {
+ configNames.map(n => n -> "")
+ } {
+ configDef =>
+ val keyMap = configDef.configKeys()
+ configNames.map(n => n -> Option(keyMap.get(n)).map(_.documentation).flatMap(Option.apply).getOrElse(""))
+ }
+ }
}
diff --git a/app/loader/KafkaManagerLoader.scala b/app/loader/KafkaManagerLoader.scala
index d89e2f1f3..33728c168 100644
--- a/app/loader/KafkaManagerLoader.scala
+++ b/app/loader/KafkaManagerLoader.scala
@@ -5,45 +5,47 @@
package loader
-import controllers.KafkaManagerContext
+import controllers.{AssetsComponents, BasicAuthenticationFilter, KafkaManagerContext}
import features.ApplicationFeatures
import models.navigation.Menus
-import play.api.ApplicationLoader
+import play.api.{Application, ApplicationLoader, BuiltInComponentsFromContext, LoggerConfigurator}
import play.api.ApplicationLoader.Context
-import play.api.BuiltInComponentsFromContext
import play.api.i18n.I18nComponents
+import play.api.mvc.Filter
import play.api.routing.Router
import router.Routes
-import controllers.BasicAuthenticationFilter
+
+import scala.concurrent.ExecutionContext
/**
* Created by hiral on 12/2/15.
*/
class KafkaManagerLoader extends ApplicationLoader {
- def load(context: Context) = {
+ def load(context: Context): Application = {
+ LoggerConfigurator(context.environment.classLoader).foreach {
+ _.configure(context.environment, context.initialConfiguration, Map.empty)
+ }
new ApplicationComponents(context).application
}
}
-class ApplicationComponents(context: Context) extends BuiltInComponentsFromContext(context) with I18nComponents {
- private[this] implicit val applicationFeatures = ApplicationFeatures.getApplicationFeatures(context.initialConfiguration.underlying)
- private[this] implicit val menus = new Menus
- private[this] val kafkaManagerContext = new KafkaManagerContext(applicationLifecycle, context.initialConfiguration)
- private[this] lazy val applicationC = new controllers.Application(messagesApi, kafkaManagerContext)
- private[this] lazy val clusterC = new controllers.Cluster(messagesApi, kafkaManagerContext)
- private[this] lazy val topicC = new controllers.Topic(messagesApi, kafkaManagerContext)
- private[this] lazy val logKafkaC = new controllers.Logkafka(messagesApi, kafkaManagerContext)
- private[this] lazy val consumerC = new controllers.Consumer(messagesApi, kafkaManagerContext)
- private[this] lazy val preferredReplicaElectionC= new controllers.PreferredReplicaElection(messagesApi, kafkaManagerContext)
- private[this] lazy val reassignPartitionsC = new controllers.ReassignPartitions(messagesApi, kafkaManagerContext)
- private[this] lazy val kafkaStateCheckC = new controllers.api.KafkaStateCheck(messagesApi, kafkaManagerContext)
- private[this] lazy val assetsC = new controllers.Assets(httpErrorHandler)
- private[this] lazy val webJarsAssetsC = new controllers.WebJarAssets(httpErrorHandler, context.initialConfiguration, context.environment)
- private[this] lazy val apiHealthC = new controllers.ApiHealth(messagesApi)
-
+class ApplicationComponents(context: Context) extends BuiltInComponentsFromContext(context) with I18nComponents with AssetsComponents {
+ implicit val applicationFeatures: ApplicationFeatures = ApplicationFeatures.getApplicationFeatures(context.initialConfiguration.underlying)
+ implicit val menus: Menus = new Menus
+ implicit val ec: ExecutionContext = controllerComponents.executionContext
+ val kafkaManagerContext = new KafkaManagerContext(applicationLifecycle, context.initialConfiguration)
+ private[this] val applicationC = new controllers.Application(controllerComponents, kafkaManagerContext)
+ private[this] lazy val clusterC = new controllers.Cluster(controllerComponents, kafkaManagerContext)
+ private[this] lazy val topicC = new controllers.Topic(controllerComponents, kafkaManagerContext)
+ private[this] lazy val logKafkaC = new controllers.Logkafka(controllerComponents, kafkaManagerContext)
+ private[this] lazy val consumerC = new controllers.Consumer(controllerComponents, kafkaManagerContext)
+ private[this] lazy val preferredReplicaElectionC= new controllers.PreferredReplicaElection(controllerComponents, kafkaManagerContext)
+ private[this] lazy val reassignPartitionsC = new controllers.ReassignPartitions(controllerComponents, kafkaManagerContext)
+ lazy val kafkaStateCheckC = new controllers.api.KafkaStateCheck(controllerComponents, kafkaManagerContext)
+ lazy val apiHealthC = new controllers.ApiHealth(controllerComponents)
- override lazy val httpFilters = Seq(BasicAuthenticationFilter(context.initialConfiguration))
+ override lazy val httpFilters: Seq[Filter] = Seq(BasicAuthenticationFilter(context.initialConfiguration))
override val router: Router = new Routes(
@@ -56,8 +58,7 @@ class ApplicationComponents(context: Context) extends BuiltInComponentsFromConte
preferredReplicaElectionC,
reassignPartitionsC,
kafkaStateCheckC,
- assetsC,
- webJarsAssetsC,
+ assets,
apiHealthC
- ).withPrefix(context.initialConfiguration.getString("play.http.context").orNull)
+ ).withPrefix(context.initialConfiguration.getOptional[String]("play.http.context").orNull)
}
diff --git a/app/models/form/TopicOperation.scala b/app/models/form/TopicOperation.scala
index cd6180158..8452b6d70 100644
--- a/app/models/form/TopicOperation.scala
+++ b/app/models/form/TopicOperation.scala
@@ -11,7 +11,7 @@ package models.form
sealed trait TopicOperation
-case class TConfig(name: String, value: Option[String])
+case class TConfig(name: String, value: Option[String], help: String)
case class CreateTopic(topic: String, partitions: Int, replication: Int, configs: List[TConfig]) extends TopicOperation
case class DeleteTopic(topic: String) extends TopicOperation
diff --git a/app/models/navigation/QuickRoutes.scala b/app/models/navigation/QuickRoutes.scala
index e785f7ec9..3250d0e06 100644
--- a/app/models/navigation/QuickRoutes.scala
+++ b/app/models/navigation/QuickRoutes.scala
@@ -14,9 +14,9 @@ object QuickRoutes {
import models.navigation.BreadCrumbs._
val baseRoutes : Map[String, () => Call] = Map(
- "Clusters" -> controllers.routes.Application.index,
- "List" -> controllers.routes.Application.index,
- "Add Cluster" -> controllers.routes.Cluster.addCluster
+ "Clusters" -> { () => controllers.routes.Application.index() },
+ "List" -> { () => controllers.routes.Application.index() },
+ "Add Cluster" -> { () => controllers.routes.Cluster.addCluster() }
)
val clusterRoutes : Map[String, String => Call] = Map(
"Update Cluster" -> controllers.routes.Cluster.updateCluster,
diff --git a/app/views/broker/brokerList.scala.html b/app/views/broker/brokerList.scala.html
index 582a03d0c..aa8169aa2 100644
--- a/app/views/broker/brokerList.scala.html
+++ b/app/views/broker/brokerList.scala.html
@@ -5,7 +5,7 @@
@import kafka.manager.model.ActorModel.BrokerIdentity
@import scalaz.{\/}
@(cluster:String, errorOrBrokers: kafka.manager.ApiError \/ kafka.manager.BrokerListExtended
-)(implicit af: features.ApplicationFeatures, messages: play.api.i18n.Messages, menus: models.navigation.Menus)
+)(implicit af: features.ApplicationFeatures, messages: play.api.i18n.Messages, menus: models.navigation.Menus, request: RequestHeader)
@theMenu = {
@views.html.navigation.clusterMenu(cluster,"Brokers","",menus.clusterMenus(cluster)(
@@ -18,7 +18,7 @@
@views.html.common.brokerMetrics(bl.combinedMetric)
} else {
- Please enable JMX polling
here.
+ Please enable JMX polling
here.
}
}
@@ -28,21 +28,25 @@
menu = theMenu,
breadcrumbs=views.html.navigation.breadCrumbs(models.navigation.BreadCrumbs.withViewAndCluster("Brokers",cluster))) {
-
-
+
+
+
@errorOrBrokers.fold( views.html.errors.onApiError(_), views.html.broker.brokerListContent(cluster,_) )
+
-
-
Combined Metrics
+
+
+
@errorOrBrokers.fold( views.html.errors.onApiError(_), bl => renderBrokerMetrics(bl))
+
diff --git a/app/views/broker/brokerListContent.scala.html b/app/views/broker/brokerListContent.scala.html
index 149b19e97..3770a6ea2 100644
--- a/app/views/broker/brokerListContent.scala.html
+++ b/app/views/broker/brokerListContent.scala.html
@@ -3,7 +3,7 @@
* See accompanying LICENSE file.
*@
@import kafka.manager.model.ActorModel.BrokerIdentity
-@(cluster:String, brokerListExtended: kafka.manager.BrokerListExtended)(implicit messages: play.api.i18n.Messages)
+@(cluster:String, brokerListExtended: kafka.manager.BrokerListExtended)(implicit messages: play.api.i18n.Messages, request:RequestHeader)
@@ -22,7 +22,7 @@
@for(broker <- brokerListExtended.list) {
- @broker.id |
+ @broker.id |
@broker.host |
@broker.endpointsString |
@broker.jmxPort |
diff --git a/app/views/broker/brokerView.scala.html b/app/views/broker/brokerView.scala.html
index 44286423a..62b24d992 100644
--- a/app/views/broker/brokerView.scala.html
+++ b/app/views/broker/brokerView.scala.html
@@ -4,7 +4,7 @@
*@
@import scalaz.{\/}
@(cluster:String, brokerId: Int, errorOrBrokerView: kafka.manager.ApiError \/ kafka.manager.model.ActorModel.BVView
-)(implicit af: features.ApplicationFeatures, messages: play.api.i18n.Messages, menus: models.navigation.Menus)
+)(implicit af: features.ApplicationFeatures, messages: play.api.i18n.Messages, menus: models.navigation.Menus, request:RequestHeader)
@theMenu = {
@views.html.navigation.clusterMenu(cluster,"Brokers","",menus.clusterMenus(cluster)(
@@ -29,11 +29,13 @@
breadcrumbs=views.html.navigation.breadCrumbs(models.navigation.BreadCrumbs.withNamedViewAndCluster("Broker View",cluster,brokerId.toString)),
scripts=brokerScripts) {
-
-
-
Broker Id @brokerId
-
+
+
+
@errorOrBrokerView.fold[Html](views.html.errors.onApiError(_), views.html.broker.brokerViewContent(cluster, brokerId, _))
+
}
diff --git a/app/views/broker/brokerViewContent.scala.html b/app/views/broker/brokerViewContent.scala.html
index b57c5f8b7..2d08fe314 100644
--- a/app/views/broker/brokerViewContent.scala.html
+++ b/app/views/broker/brokerViewContent.scala.html
@@ -2,22 +2,23 @@
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*@
-@(cluster: String, brokerId: Int, brokerView :kafka.manager.model.ActorModel.BVView)(implicit messages: play.api.i18n.Messages)
+@(cluster: String, brokerId: Int, brokerView :kafka.manager.model.ActorModel.BVView)(implicit messages: play.api.i18n.Messages, request:RequestHeader)
@renderBrokerMetrics = {
@if(brokerView.clusterContext.clusterFeatures.features(kafka.manager.features.KMJMXMetricsFeature)) {
@views.html.common.brokerMetrics(brokerView.metrics)
} else {
- Please enable JMX polling
here.
+ Please enable JMX polling
here.
}
}
-
-
Summary
+
+
+
# of Topics | @brokerView.numTopics |
@@ -30,19 +31,23 @@
}
+
-
-
Metrics
+
+
+
@renderBrokerMetrics
+
-
-
Per Topic Detail
+
diff --git a/app/views/cluster/addCluster.scala.html b/app/views/cluster/addCluster.scala.html
index 6042e274d..acf42ee84 100644
--- a/app/views/cluster/addCluster.scala.html
+++ b/app/views/cluster/addCluster.scala.html
@@ -2,17 +2,16 @@
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*@
-@(addClusterForm: Form[kafka.manager.model.ClusterConfig])(implicit messages: play.api.i18n.Messages, menus: models.navigation.Menus)
+@(addClusterForm: Form[kafka.manager.model.ClusterConfig])(implicit messages: play.api.i18n.Messages, menus: models.navigation.Menus, request:RequestHeader)
-@import b3.vertical.fieldConstructor
@import controllers.routes
@theMenu = {
@views.html.navigation.defaultMenu(views.html.navigation.menuNav("Cluster","Add Cluster",menus.indexMenu))
}
-@checkboxWithLink(field: play.api.data.Field) = {
-@b3.inputFormGroup(field, withFeedback = false, withLabelFor = false, b3.Args.withDefault(Seq(), 'disabled -> false)) { fieldInfo =>
+@checkboxWithLink(field: play.api.data.Field)(implicit fc: b4.B4FieldConstructor, msgsProv: MessagesProvider) = {
+@b4.inputFormGroup(field, withLabelFor = false, views.html.bs.Args.withDefault(Seq(), 'disabled -> false)) { fieldInfo =>