kafka源码分析一

来源:互联网 发布:中南网络大学教育平台 编辑:程序博客网 时间:2024/05/21 18:39



Kafka服务启动

package kafkaimport java.util.Propertiesimport joptsimple.OptionParserimport kafka.server.{KafkaServer, KafkaServerStartable}import kafka.utils.{CommandLineUtils, Logging}import org.apache.kafka.common.utils.Utilsimport scala.collection.JavaConverters._ //kafka继承log4j日志,可在后面方法里打印日志信息到日志文件object Kafka extends Logging {  def getPropsFromArgs(args: Array[String]): Properties = {    val optionParser = new OptionParser  //“重写”、“可选应该重写值设定在server.properties文件”属性    val overrideOpt = optionParser.accepts("override", "Optional property that should override values set in server.properties file")      .withRequiredArg()      .ofType(classOf[String])    if (args.length == 0) {      CommandLineUtils.printUsageAndDie(optionParser, "USAGE: java [options] %s server.properties [--override property=value]*".format(classOf[KafkaServer].getSimpleName()))    }    //加载conf/server.properties资源文件    val props = Utils.loadProps(args(0))    if(args.length > 1) {      val options = optionParser.parse(args.slice(1, args.length): _*)      if(options.nonOptionArguments().size() > 0) {        CommandLineUtils.printUsageAndDie(optionParser, "Found non argument parameters: " + options.nonOptionArguments().toArray.mkString(","))      }      //初始资源资源配置类把server.properties配置文件加载的类      props.putAll(CommandLineUtils.parseKeyValueArgs(options.valuesOf(overrideOpt).asScala))    }    props  }  def main(args: Array[String]): Unit = {    try {      //初始化服务资源      val serverProps = getPropsFromArgs(args)      //kafka服务可启动类      val kafkaServerStartable = KafkaServerStartable.fromProps(serverProps)      // attach shutdown handler to catch control-c      //高度停机处理来捕捉控制      Runtime.getRuntime().addShutdownHook(new Thread() {        override def run() = {          kafkaServerStartable.shutdown        }      })      kafkaServerStartable.startup      kafkaServerStartable.awaitShutdown    }    catch {      case e: Throwable =>        fatal(e)        System.exit(1)    }    System.exit(0)  }}



package kafka.serverimport java.util.Propertiesimport kafka.metrics.KafkaMetricsReporterimport kafka.utils.{VerifiableProperties, Logging}object KafkaServerStartable {  def fromProps(serverProps: Properties) = {    val reporters = KafkaMetricsReporter.startReporters(new VerifiableProperties(serverProps))    new KafkaServerStartable(KafkaConfig.fromProps(serverProps), reporters)  }}class KafkaServerStartable(val serverConfig: KafkaConfig, reporters: Seq[KafkaMetricsReporter]) extends Logging {  private val server = new KafkaServer(serverConfig, kafkaMetricsReporters = reporters)  def this(serverConfig: KafkaConfig) = this(serverConfig, Seq.empty)  def startup() {    try {      server.startup()    }    catch {      //在kafkaserverstartable启动致命错误。准备关机      case e: Throwable =>        fatal("Fatal error during KafkaServerStartable startup. Prepare to shutdown", e)        // KafkaServer already calls shutdown() internally, so this is purely for logging & the exit code        //kafkaserver已经叫shutdown()内部,所以这纯粹是为了记录和退出代码        System.exit(1)    }  }  def shutdown() {    try {      server.shutdown()    }    catch {      case e: Throwable =>        //在kafkaserverstable关机致命错误。准备停止        fatal("Fatal error during KafkaServerStable shutdown. Prepare to halt", e)        // Calling exit() can lead to deadlock as exit() can be called multiple times. Force exit.        // 打电话exit()可能导致死锁exit()可以多次调用。强制退出。        Runtime.getRuntime.halt(1)    }  }  /**   * Allow setting broker state from the startable.   * This is needed when a custom kafka server startable want to emit new states that it introduces.   */  /**   *允许从启动设置代理的状态。   *这是需要一个定制的卡夫卡服务器启动想发出新规定,介绍了。   */  def setServerState(newState: Byte) {    server.brokerState.newState(newState)  }  def awaitShutdown() =     server.awaitShutdown}



package kafka.serverimport java.io.{File, IOException}import java.net.SocketTimeoutExceptionimport java.utilimport java.util.concurrent._import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}import com.yammer.metrics.core.Gaugeimport kafka.admin.AdminUtilsimport kafka.api.KAFKA_0_9_0import kafka.cluster.{Broker, EndPoint}import kafka.common.{GenerateBrokerIdException, InconsistentBrokerIdException}import kafka.controller.{ControllerStats, KafkaController}import kafka.coordinator.GroupCoordinatorimport kafka.log.{CleanerConfig, LogConfig, LogManager}import kafka.metrics.{KafkaMetricsGroup, KafkaMetricsReporter}import kafka.network.{BlockingChannel, SocketServer}import kafka.security.CredentialProviderimport kafka.security.auth.Authorizerimport kafka.utils._import org.I0Itec.zkclient.ZkClientimport org.apache.kafka.clients.{ClientRequest, ManualMetadataUpdater, NetworkClient}import org.apache.kafka.common.internals.ClusterResourceListenersimport org.apache.kafka.common.metrics.{JmxReporter, Metrics, _}import org.apache.kafka.common.network._import org.apache.kafka.common.protocol.{ApiKeys, Errors, SecurityProtocol}import org.apache.kafka.common.requests.{ControlledShutdownRequest, ControlledShutdownResponse}import org.apache.kafka.common.security.JaasUtilsimport org.apache.kafka.common.utils.{AppInfoParser, Time}import org.apache.kafka.common.{ClusterResource, Node}import scala.collection.JavaConverters._import scala.collection.{Map, mutable}object KafkaServer {  // Copy the subset of properties that are relevant to Logs  // I'm listing out individual properties here since the names are slightly different in each Config class...  private[kafka] def copyKafkaConfigToLog(kafkaConfig: KafkaConfig): java.util.Map[String, Object] = {    val logProps = new util.HashMap[String, Object]()    logProps.put(LogConfig.SegmentBytesProp, kafkaConfig.logSegmentBytes)    logProps.put(LogConfig.SegmentMsProp, kafkaConfig.logRollTimeMillis)    logProps.put(LogConfig.SegmentJitterMsProp, kafkaConfig.logRollTimeJitterMillis)    logProps.put(LogConfig.SegmentIndexBytesProp, kafkaConfig.logIndexSizeMaxBytes)    logProps.put(LogConfig.FlushMessagesProp, kafkaConfig.logFlushIntervalMessages)    logProps.put(LogConfig.FlushMsProp, kafkaConfig.logFlushIntervalMs)    logProps.put(LogConfig.RetentionBytesProp, kafkaConfig.logRetentionBytes)    logProps.put(LogConfig.RetentionMsProp, kafkaConfig.logRetentionTimeMillis: java.lang.Long)    logProps.put(LogConfig.MaxMessageBytesProp, kafkaConfig.messageMaxBytes)    logProps.put(LogConfig.IndexIntervalBytesProp, kafkaConfig.logIndexIntervalBytes)    logProps.put(LogConfig.DeleteRetentionMsProp, kafkaConfig.logCleanerDeleteRetentionMs)    logProps.put(LogConfig.MinCompactionLagMsProp, kafkaConfig.logCleanerMinCompactionLagMs)    logProps.put(LogConfig.FileDeleteDelayMsProp, kafkaConfig.logDeleteDelayMs)    logProps.put(LogConfig.MinCleanableDirtyRatioProp, kafkaConfig.logCleanerMinCleanRatio)    logProps.put(LogConfig.CleanupPolicyProp, kafkaConfig.logCleanupPolicy)    logProps.put(LogConfig.MinInSyncReplicasProp, kafkaConfig.minInSyncReplicas)    logProps.put(LogConfig.CompressionTypeProp, kafkaConfig.compressionType)    logProps.put(LogConfig.UncleanLeaderElectionEnableProp, kafkaConfig.uncleanLeaderElectionEnable)    logProps.put(LogConfig.PreAllocateEnableProp, kafkaConfig.logPreAllocateEnable)    logProps.put(LogConfig.MessageFormatVersionProp, kafkaConfig.logMessageFormatVersion.version)    logProps.put(LogConfig.MessageTimestampTypeProp, kafkaConfig.logMessageTimestampType.name)    logProps.put(LogConfig.MessageTimestampDifferenceMaxMsProp, kafkaConfig.logMessageTimestampDifferenceMaxMs)    logProps  }  private[server] def metricConfig(kafkaConfig: KafkaConfig): MetricConfig = {    new MetricConfig()      .samples(kafkaConfig.metricNumSamples)      .recordLevel(Sensor.RecordingLevel.forName(kafkaConfig.metricRecordingLevel))      .timeWindow(kafkaConfig.metricSampleWindowMs, TimeUnit.MILLISECONDS)  }}/** * Represents the lifecycle of a single Kafka broker. Handles all functionality required * to start up and shutdown a single Kafka node. */class KafkaServer(val config: KafkaConfig, time: Time = Time.SYSTEM, threadNamePrefix: Option[String] = None, kafkaMetricsReporters: Seq[KafkaMetricsReporter] = List()) extends Logging with KafkaMetricsGroup {  private val startupComplete = new AtomicBoolean(false)  private val isShuttingDown = new AtomicBoolean(false)  private val isStartingUp = new AtomicBoolean(false)  private var shutdownLatch = new CountDownLatch(1)  private val jmxPrefix: String = "kafka.server"  var metrics: Metrics = null  val brokerState: BrokerState = new BrokerState  var apis: KafkaApis = null  var authorizer: Option[Authorizer] = None  var socketServer: SocketServer = null  var requestHandlerPool: KafkaRequestHandlerPool = null  var logManager: LogManager = null  var replicaManager: ReplicaManager = null  var adminManager: AdminManager = null  var dynamicConfigHandlers: Map[String, ConfigHandler] = null  var dynamicConfigManager: DynamicConfigManager = null  var credentialProvider: CredentialProvider = null  var groupCoordinator: GroupCoordinator = null  var kafkaController: KafkaController = null  val kafkaScheduler = new KafkaScheduler(config.backgroundThreads)  var kafkaHealthcheck: KafkaHealthcheck = null  var metadataCache: MetadataCache = null  var quotaManagers: QuotaFactory.QuotaManagers = null  var zkUtils: ZkUtils = null  val correlationId: AtomicInteger = new AtomicInteger(0)  val brokerMetaPropsFile = "meta.properties"  val brokerMetadataCheckpoints = config.logDirs.map(logDir => (logDir, new BrokerMetadataCheckpoint(new File(logDir + File.separator +brokerMetaPropsFile)))).toMap  private var _clusterId: String = null  def clusterId: String = _clusterId  newGauge(    "BrokerState",    new Gauge[Int] {      def value = brokerState.currentState    }  )  newGauge(    "ClusterId",    new Gauge[String] {      def value = clusterId    }  )  newGauge(    "yammer-metrics-count",    new Gauge[Int] {      def value = {        com.yammer.metrics.Metrics.defaultRegistry().allMetrics().size()      }    }  )  /**   * Start up API for bringing up a single instance of the Kafka server.   * Instantiates the LogManager, the SocketServer and the request handlers - KafkaRequestHandlers   */  /**   *启动API来生成卡夫卡服务器的单个实例。   *实例化LogManager,的SocketServer和请求处理程序- KafkaRequestHandlers   */  def startup() {    try {      info("starting")      if(isShuttingDown.get)        throw new IllegalStateException("Kafka server is still shutting down, cannot re-start!")      if(startupComplete.get)        return      val canStartup = isStartingUp.compareAndSet(false, true)      if (canStartup) {        brokerState.newState(Starting)        /* start scheduler */        kafkaScheduler.startup()        /* setup zookeeper */        zkUtils = initZk()        /* Get or create cluster_id */        _clusterId = getOrGenerateClusterId(zkUtils)        info(s"Cluster ID = $clusterId")        /* generate brokerId */        config.brokerId =  getBrokerId        this.logIdent = "[Kafka Server " + config.brokerId + "], "        /* create and configure metrics */        val reporters = config.getConfiguredInstances(KafkaConfig.MetricReporterClassesProp, classOf[MetricsReporter],            Map[String, AnyRef](KafkaConfig.BrokerIdProp -> (config.brokerId.toString)).asJava)        reporters.add(new JmxReporter(jmxPrefix))        val metricConfig = KafkaServer.metricConfig(config)        metrics = new Metrics(metricConfig, reporters, time, true)        quotaManagers = QuotaFactory.instantiate(config, metrics, time)        notifyClusterListeners(kafkaMetricsReporters ++ reporters.asScala)        /* start log manager */        logManager = createLogManager(zkUtils.zkClient, brokerState)        logManager.startup()        metadataCache = new MetadataCache(config.brokerId)        credentialProvider = new CredentialProvider(config.saslEnabledMechanisms)        socketServer = new SocketServer(config, metrics, time, credentialProvider)        socketServer.startup()        /* start replica manager */        replicaManager = new ReplicaManager(config, metrics, time, zkUtils, kafkaScheduler, logManager,          isShuttingDown, quotaManagers.follower)        replicaManager.startup()        /* start kafka controller */        kafkaController = new KafkaController(config, zkUtils, brokerState, time, metrics, threadNamePrefix)        kafkaController.startup()        adminManager = new AdminManager(config, metrics, metadataCache, zkUtils)        /* start group coordinator */        // Hardcode Time.SYSTEM for now as some Streams tests fail otherwise, it would be good to fix the underlying issue        groupCoordinator = GroupCoordinator(config, zkUtils, replicaManager, Time.SYSTEM)        groupCoordinator.startup()        /* Get the authorizer and initialize it if one is specified.*/        authorizer = Option(config.authorizerClassName).filter(_.nonEmpty).map { authorizerClassName =>          val authZ = CoreUtils.createObject[Authorizer](authorizerClassName)          authZ.configure(config.originals())          authZ        }        /* start processing requests */        /* 开始处理请求 */        apis = new KafkaApis(socketServer.requestChannel, replicaManager, adminManager, groupCoordinator,          kafkaController, zkUtils, config.brokerId, config, metadataCache, metrics, authorizer, quotaManagers,          clusterId, time)        requestHandlerPool = new KafkaRequestHandlerPool(config.brokerId, socketServer.requestChannel, apis, time,          config.numIoThreads)        Mx4jLoader.maybeLoad()        /* start dynamic config manager */        dynamicConfigHandlers = Map[String, ConfigHandler](ConfigType.Topic -> new TopicConfigHandler(logManager, config, quotaManagers),                                                           ConfigType.Client -> new ClientIdConfigHandler(quotaManagers),                                                           ConfigType.User -> new UserConfigHandler(quotaManagers, credentialProvider),                                                           ConfigType.Broker -> new BrokerConfigHandler(config, quotaManagers))        // Create the config manager. start listening to notifications        dynamicConfigManager = new DynamicConfigManager(zkUtils, dynamicConfigHandlers)        dynamicConfigManager.startup()        /* tell everyone we are alive */        val listeners = config.advertisedListeners.map { endpoint =>          if (endpoint.port == 0)            endpoint.copy(port = socketServer.boundPort(endpoint.listenerName))          else            endpoint        }        kafkaHealthcheck = new KafkaHealthcheck(config.brokerId, listeners, zkUtils, config.rack,          config.interBrokerProtocolVersion)        kafkaHealthcheck.startup()        // Now that the broker id is successfully registered via KafkaHealthcheck, checkpoint it        checkpointBrokerId(config.brokerId)        /* register broker metrics */        registerStats()        brokerState.newState(RunningAsBroker)        shutdownLatch = new CountDownLatch(1)        startupComplete.set(true)        isStartingUp.set(false)        AppInfoParser.registerAppInfo(jmxPrefix, config.brokerId.toString)        info("started")      }    }    catch {      case e: Throwable =>        fatal("Fatal error during KafkaServer startup. Prepare to shutdown", e)        isStartingUp.set(false)        shutdown()        throw e    }  }  def notifyClusterListeners(clusterListeners: Seq[AnyRef]): Unit = {    val clusterResourceListeners = new ClusterResourceListeners    clusterResourceListeners.maybeAddAll(clusterListeners.asJava)    clusterResourceListeners.onUpdate(new ClusterResource(clusterId))  }  private def initZk(): ZkUtils = {    info(s"Connecting to zookeeper on ${config.zkConnect}")    val chrootIndex = config.zkConnect.indexOf("/")    val chrootOption = {      if (chrootIndex > 0) Some(config.zkConnect.substring(chrootIndex))      else None    }    val secureAclsEnabled = config.zkEnableSecureAcls    val isZkSecurityEnabled = JaasUtils.isZkSecurityEnabled()    if (secureAclsEnabled && !isZkSecurityEnabled)      throw new java.lang.SecurityException(s"${KafkaConfig.ZkEnableSecureAclsProp} is true, but the verification of the JAAS login file failed.")    chrootOption.foreach { chroot =>      val zkConnForChrootCreation = config.zkConnect.substring(0, chrootIndex)      val zkClientForChrootCreation = ZkUtils(zkConnForChrootCreation,                                              sessionTimeout = config.zkSessionTimeoutMs,                                              connectionTimeout = config.zkConnectionTimeoutMs,                                              secureAclsEnabled)      zkClientForChrootCreation.makeSurePersistentPathExists(chroot)      info(s"Created zookeeper path $chroot")      zkClientForChrootCreation.zkClient.close()    }    val zkUtils = ZkUtils(config.zkConnect,                          sessionTimeout = config.zkSessionTimeoutMs,                          connectionTimeout = config.zkConnectionTimeoutMs,                          secureAclsEnabled)    zkUtils.setupCommonPaths()    zkUtils  }  def getOrGenerateClusterId(zkUtils: ZkUtils): String = {    zkUtils.getClusterId.getOrElse(zkUtils.createOrGetClusterId(CoreUtils.generateUuidAsBase64))  }  /**   *  Forces some dynamic jmx beans to be registered on server startup.   */  private def registerStats() {    BrokerTopicStats.getBrokerAllTopicsStats()    ControllerStats.uncleanLeaderElectionRate    ControllerStats.leaderElectionTimer  }  /**   *  Performs controlled shutdown   */  private def controlledShutdown() {    def node(broker: Broker): Node = {      val brokerEndPoint = broker.getBrokerEndPoint(config.interBrokerListenerName)      new Node(brokerEndPoint.id, brokerEndPoint.host, brokerEndPoint.port)    }    val socketTimeoutMs = config.controllerSocketTimeoutMs    def networkClientControlledShutdown(retries: Int): Boolean = {      val metadataUpdater = new ManualMetadataUpdater()      val networkClient = {        val channelBuilder = ChannelBuilders.clientChannelBuilder(          config.interBrokerSecurityProtocol,          LoginType.SERVER,          config.values,          config.saslMechanismInterBrokerProtocol,          config.saslInterBrokerHandshakeRequestEnable)        val selector = new Selector(          NetworkReceive.UNLIMITED,          config.connectionsMaxIdleMs,          metrics,          time,          "kafka-server-controlled-shutdown",          Map.empty.asJava,          false,          channelBuilder        )        new NetworkClient(          selector,          metadataUpdater,          config.brokerId.toString,          1,          0,          Selectable.USE_DEFAULT_BUFFER_SIZE,          Selectable.USE_DEFAULT_BUFFER_SIZE,          config.requestTimeoutMs,          time,          false)      }      var shutdownSucceeded: Boolean = false      try {        var remainingRetries = retries        var prevController: Broker = null        var ioException = false        while (!shutdownSucceeded && remainingRetries > 0) {          remainingRetries = remainingRetries - 1          import NetworkClientBlockingOps._          // 1. Find the controller and establish a connection to it.          // Get the current controller info. This is to ensure we use the most recent info to issue the          // controlled shutdown request          val controllerId = zkUtils.getController()          zkUtils.getBrokerInfo(controllerId) match {            case Some(broker) =>              // if this is the first attempt, if the controller has changed or if an exception was thrown in a previous              // attempt, connect to the most recent controller              if (ioException || broker != prevController) {                ioException = false                if (prevController != null)                  networkClient.close(node(prevController).idString)                prevController = broker                metadataUpdater.setNodes(Seq(node(prevController)).asJava)              }            case None => //ignore and try again          }          // 2. issue a controlled shutdown to the controller          if (prevController != null) {            try {              if (!networkClient.blockingReady(node(prevController), socketTimeoutMs)(time))                throw new SocketTimeoutException(s"Failed to connect within $socketTimeoutMs ms")              // send the controlled shutdown request              val controlledShutdownRequest = new ControlledShutdownRequest.Builder(config.brokerId)              val request = networkClient.newClientRequest(node(prevController).idString, controlledShutdownRequest,                time.milliseconds(), true)              val clientResponse = networkClient.blockingSendAndReceive(request)(time)              val shutdownResponse = clientResponse.responseBody.asInstanceOf[ControlledShutdownResponse]              if (shutdownResponse.errorCode == Errors.NONE.code && shutdownResponse.partitionsRemaining.isEmpty) {                shutdownSucceeded = true                info("Controlled shutdown succeeded")              }              else {                info("Remaining partitions to move: %s".format(shutdownResponse.partitionsRemaining.asScala.mkString(",")))                info("Error code from controller: %d".format(shutdownResponse.errorCode))              }            }            catch {              case ioe: IOException =>                ioException = true                warn("Error during controlled shutdown, possibly because leader movement took longer than the configured socket.timeout.ms: %s".format(ioe.getMessage))                // ignore and try again            }          }          if (!shutdownSucceeded) {            Thread.sleep(config.controlledShutdownRetryBackoffMs)            warn("Retrying controlled shutdown after the previous attempt failed...")          }        }      }      finally        networkClient.close()      shutdownSucceeded    }    def blockingChannelControlledShutdown(retries: Int): Boolean = {      var remainingRetries = retries      var channel: BlockingChannel = null      var prevController: Broker = null      var shutdownSucceeded: Boolean = false      try {        while (!shutdownSucceeded && remainingRetries > 0) {          remainingRetries = remainingRetries - 1          // 1. Find the controller and establish a connection to it.          // Get the current controller info. This is to ensure we use the most recent info to issue the          // controlled shutdown request          val controllerId = zkUtils.getController()          zkUtils.getBrokerInfo(controllerId) match {            case Some(broker) =>              if (channel == null || prevController == null || !prevController.equals(broker)) {                // if this is the first attempt or if the controller has changed, create a channel to the most recent                // controller                if (channel != null)                  channel.disconnect()                val brokerEndPoint = broker.getBrokerEndPoint(config.interBrokerListenerName)                channel = new BlockingChannel(brokerEndPoint.host,                  brokerEndPoint.port,                  BlockingChannel.UseDefaultBufferSize,                  BlockingChannel.UseDefaultBufferSize,                  config.controllerSocketTimeoutMs)                channel.connect()                prevController = broker              }            case None => //ignore and try again          }          // 2. issue a controlled shutdown to the controller          if (channel != null) {            var response: NetworkReceive = null            try {              // send the controlled shutdown request              val request = new kafka.api.ControlledShutdownRequest(0, correlationId.getAndIncrement, None, config.brokerId)              channel.send(request)              response = channel.receive()              val shutdownResponse = kafka.api.ControlledShutdownResponse.readFrom(response.payload())              if (shutdownResponse.errorCode == Errors.NONE.code && shutdownResponse.partitionsRemaining != null &&                shutdownResponse.partitionsRemaining.isEmpty) {                shutdownSucceeded = true                info ("Controlled shutdown succeeded")              }              else {                info("Remaining partitions to move: %s".format(shutdownResponse.partitionsRemaining.mkString(",")))                info("Error code from controller: %d".format(shutdownResponse.errorCode))              }            }            catch {              case ioe: java.io.IOException =>                channel.disconnect()                channel = null                warn("Error during controlled shutdown, possibly because leader movement took longer than the configured socket.timeout.ms: %s".format(ioe.getMessage))                // ignore and try again            }          }          if (!shutdownSucceeded) {            Thread.sleep(config.controlledShutdownRetryBackoffMs)            warn("Retrying controlled shutdown after the previous attempt failed...")          }        }      }      finally {        if (channel != null) {          channel.disconnect()          channel = null        }      }      shutdownSucceeded    }    if (startupComplete.get() && config.controlledShutdownEnable) {      // We request the controller to do a controlled shutdown. On failure, we backoff for a configured period      // of time and try again for a configured number of retries. If all the attempt fails, we simply force      // the shutdown.      info("Starting controlled shutdown")      brokerState.newState(PendingControlledShutdown)      val shutdownSucceeded =        // Before 0.9.0.0, `ControlledShutdownRequest` did not contain `client_id` and it's a mandatory field in        // `RequestHeader`, which is used by `NetworkClient`        if (config.interBrokerProtocolVersion >= KAFKA_0_9_0)          networkClientControlledShutdown(config.controlledShutdownMaxRetries.intValue)        else blockingChannelControlledShutdown(config.controlledShutdownMaxRetries.intValue)      if (!shutdownSucceeded)        warn("Proceeding to do an unclean shutdown as all the controlled shutdown attempts failed")    }  }  /**   * Shutdown API for shutting down a single instance of the Kafka server.   * Shuts down the LogManager, the SocketServer and the log cleaner scheduler thread   */  def shutdown() {    try {      info("shutting down")      if(isStartingUp.get)        throw new IllegalStateException("Kafka server is still starting up, cannot shut down!")      // To ensure correct behavior under concurrent calls, we need to check `shutdownLatch` first since it gets updated      // last in the `if` block. If the order is reversed, we could shutdown twice or leave `isShuttingDown` set to      // `true` at the end of this method.      if (shutdownLatch.getCount > 0 && isShuttingDown.compareAndSet(false, true)) {        CoreUtils.swallow(controlledShutdown())        brokerState.newState(BrokerShuttingDown)        if(socketServer != null)          CoreUtils.swallow(socketServer.shutdown())        if(requestHandlerPool != null)          CoreUtils.swallow(requestHandlerPool.shutdown())        CoreUtils.swallow(kafkaScheduler.shutdown())        if(apis != null)          CoreUtils.swallow(apis.close())        CoreUtils.swallow(authorizer.foreach(_.close()))        if(replicaManager != null)          CoreUtils.swallow(replicaManager.shutdown())        if (adminManager != null)          CoreUtils.swallow(adminManager.shutdown())        if(groupCoordinator != null)          CoreUtils.swallow(groupCoordinator.shutdown())        if(logManager != null)          CoreUtils.swallow(logManager.shutdown())        if(kafkaController != null)          CoreUtils.swallow(kafkaController.shutdown())        if(zkUtils != null)          CoreUtils.swallow(zkUtils.close())        if (metrics != null)          CoreUtils.swallow(metrics.close())        brokerState.newState(NotRunning)        startupComplete.set(false)        isShuttingDown.set(false)        CoreUtils.swallow(AppInfoParser.unregisterAppInfo(jmxPrefix, config.brokerId.toString))        shutdownLatch.countDown()        info("shut down completed")      }    }    catch {      case e: Throwable =>        fatal("Fatal error during KafkaServer shutdown.", e)        isShuttingDown.set(false)        throw e    }  }  /**   * After calling shutdown(), use this API to wait until the shutdown is complete   */  def awaitShutdown(): Unit = shutdownLatch.await()  def getLogManager(): LogManager = logManager  def boundPort(listenerName: ListenerName): Int = socketServer.boundPort(listenerName)  private def createLogManager(zkClient: ZkClient, brokerState: BrokerState): LogManager = {    val defaultProps = KafkaServer.copyKafkaConfigToLog(config)    val defaultLogConfig = LogConfig(defaultProps)    val configs = AdminUtils.fetchAllTopicConfigs(zkUtils).map { case (topic, configs) =>      topic -> LogConfig.fromProps(defaultProps, configs)    }    // read the log configurations from zookeeper    val cleanerConfig = CleanerConfig(numThreads = config.logCleanerThreads,                                      dedupeBufferSize = config.logCleanerDedupeBufferSize,                                      dedupeBufferLoadFactor = config.logCleanerDedupeBufferLoadFactor,                                      ioBufferSize = config.logCleanerIoBufferSize,                                      maxMessageSize = config.messageMaxBytes,                                      maxIoBytesPerSecond = config.logCleanerIoMaxBytesPerSecond,                                      backOffMs = config.logCleanerBackoffMs,                                      enableCleaner = config.logCleanerEnable)    new LogManager(logDirs = config.logDirs.map(new File(_)).toArray,                   topicConfigs = configs,                   defaultConfig = defaultLogConfig,                   cleanerConfig = cleanerConfig,                   ioThreads = config.numRecoveryThreadsPerDataDir,                   flushCheckMs = config.logFlushSchedulerIntervalMs,                   flushCheckpointMs = config.logFlushOffsetCheckpointIntervalMs,                   retentionCheckMs = config.logCleanupIntervalMs,                   scheduler = kafkaScheduler,                   brokerState = brokerState,                   time = time)  }  /**    * Generates new brokerId if enabled or reads from meta.properties based on following conditions    * <ol>    * <li> config has no broker.id provided and broker id generation is enabled, generates a broker.id based on Zookeeper's sequence    * <li> stored broker.id in meta.properties doesn't match in all the log.dirs throws InconsistentBrokerIdException    * <li> config has broker.id and meta.properties contains broker.id if they don't match throws InconsistentBrokerIdException    * <li> config has broker.id and there is no meta.properties file, creates new meta.properties and stores broker.id    * <ol>    *    * @return A brokerId.    */  private def getBrokerId: Int =  {    var brokerId = config.brokerId    val brokerIdSet = mutable.HashSet[Int]()    for (logDir <- config.logDirs) {      val brokerMetadataOpt = brokerMetadataCheckpoints(logDir).read()      brokerMetadataOpt.foreach { brokerMetadata =>        brokerIdSet.add(brokerMetadata.brokerId)      }    }    if(brokerIdSet.size > 1)      throw new InconsistentBrokerIdException(        s"Failed to match broker.id across log.dirs. This could happen if multiple brokers shared a log directory (log.dirs) " +        s"or partial data was manually copied from another broker. Found $brokerIdSet")    else if(brokerId >= 0 && brokerIdSet.size == 1 && brokerIdSet.last != brokerId)      throw new InconsistentBrokerIdException(        s"Configured broker.id $brokerId doesn't match stored broker.id ${brokerIdSet.last} in meta.properties. " +        s"If you moved your data, make sure your configured broker.id matches. " +        s"If you intend to create a new broker, you should remove all data in your data directories (log.dirs).")    else if(brokerIdSet.isEmpty && brokerId < 0 && config.brokerIdGenerationEnable)  // generate a new brokerId from Zookeeper      brokerId = generateBrokerId    else if(brokerIdSet.size == 1) // pick broker.id from meta.properties      brokerId = brokerIdSet.last    brokerId  }  private def checkpointBrokerId(brokerId: Int) {    var logDirsWithoutMetaProps: List[String] = List()    for (logDir <- config.logDirs) {      val brokerMetadataOpt = brokerMetadataCheckpoints(logDir).read()      if(brokerMetadataOpt.isEmpty)          logDirsWithoutMetaProps ++= List(logDir)    }    for(logDir <- logDirsWithoutMetaProps) {      val checkpoint = brokerMetadataCheckpoints(logDir)      checkpoint.write(BrokerMetadata(brokerId))    }  }  private def generateBrokerId: Int = {    try {      zkUtils.getBrokerSequenceId(config.maxReservedBrokerId)    } catch {      case e: Exception =>        error("Failed to generate broker.id due to ", e)        throw new GenerateBrokerIdException("Failed to generate broker.id", e)    }  }}


/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements.  See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License.  You may obtain a copy of the License at * *    http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package kafka.networkimport java.io.IOExceptionimport java.net._import java.nio.channels._import java.nio.channels.{Selector => NSelector}import java.utilimport java.util.concurrent._import java.util.concurrent.atomic._import com.yammer.metrics.core.Gaugeimport kafka.cluster.{BrokerEndPoint, EndPoint}import kafka.common.KafkaExceptionimport kafka.metrics.KafkaMetricsGroupimport kafka.security.CredentialProviderimport kafka.server.KafkaConfigimport kafka.utils._import org.apache.kafka.common.errors.InvalidRequestExceptionimport org.apache.kafka.common.metrics._import org.apache.kafka.common.network.{ChannelBuilders, KafkaChannel, ListenerName, LoginType, Mode, Selectable, Selector => KSelector}import org.apache.kafka.common.security.auth.KafkaPrincipalimport org.apache.kafka.common.protocol.SecurityProtocolimport org.apache.kafka.common.protocol.types.SchemaExceptionimport org.apache.kafka.common.utils.{Time, Utils}import scala.collection._import JavaConverters._import scala.util.control.{ControlThrowable, NonFatal}/** * An NIO socket server. The threading model is *   1 Acceptor thread that handles new connections *   Acceptor has N Processor threads that each have their own selector and read requests from sockets *   M Handler threads that handle requests and produce responses back to the processor threads for writing. *//** *氧化硅套接字服务器。线程模型是 *处理新连接的1个接受线程 *接收器有n个处理器线程,每个线程都有自己的选择器,并从套接字读取请求。 *处理请求的M Handler线程,并将响应返回处理器线程写入。 */class SocketServer(val config: KafkaConfig, val metrics: Metrics, val time: Time, val credentialProvider: CredentialProvider) extends Logging with KafkaMetricsGroup {  private val endpoints = config.listeners.map(l => l.listenerName -> l).toMap  private val numProcessorThreads = config.numNetworkThreads  private val maxQueuedRequests = config.queuedMaxRequests  private val totalProcessorThreads = numProcessorThreads * endpoints.size  private val maxConnectionsPerIp = config.maxConnectionsPerIp  private val maxConnectionsPerIpOverrides = config.maxConnectionsPerIpOverrides  this.logIdent = "[Socket Server on Broker " + config.brokerId + "], "  val requestChannel = new RequestChannel(totalProcessorThreads, maxQueuedRequests)  private val processors = new Array[Processor](totalProcessorThreads)  private[network] val acceptors = mutable.Map[EndPoint, Acceptor]()  private var connectionQuotas: ConnectionQuotas = _  private val allMetricNames = (0 until totalProcessorThreads).map { i =>    val tags = new util.HashMap[String, String]()    tags.put("networkProcessor", i.toString)    metrics.metricName("io-wait-ratio", "socket-server-metrics", tags)  }  /**   * Start the socket server   */  def startup() {    this.synchronized {      connectionQuotas = new ConnectionQuotas(maxConnectionsPerIp, maxConnectionsPerIpOverrides)      val sendBufferSize = config.socketSendBufferBytes      val recvBufferSize = config.socketReceiveBufferBytes      val brokerId = config.brokerId      var processorBeginIndex = 0      config.listeners.foreach { endpoint =>        val listenerName = endpoint.listenerName        val securityProtocol = endpoint.securityProtocol        val processorEndIndex = processorBeginIndex + numProcessorThreads        for (i <- processorBeginIndex until processorEndIndex)          processors(i) = newProcessor(i, connectionQuotas, listenerName, securityProtocol)        val acceptor = new Acceptor(endpoint, sendBufferSize, recvBufferSize, brokerId,          processors.slice(processorBeginIndex, processorEndIndex), connectionQuotas)        acceptors.put(endpoint, acceptor)        Utils.newThread(s"kafka-socket-acceptor-$listenerName-$securityProtocol-${endpoint.port}", acceptor, false).start()        acceptor.awaitStartup()        processorBeginIndex = processorEndIndex      }    }    newGauge("NetworkProcessorAvgIdlePercent",      new Gauge[Double] {        def value = allMetricNames.map { metricName =>          Option(metrics.metric(metricName)).fold(0.0)(_.value)        }.sum / totalProcessorThreads      }    )    info("Started " + acceptors.size + " acceptor threads")  }  // register the processor threads for notification of responses  requestChannel.addResponseListener(id => processors(id).wakeup())  /**   * Shutdown the socket server   */  def shutdown() = {    info("Shutting down")    this.synchronized {      acceptors.values.foreach(_.shutdown)      processors.foreach(_.shutdown)    }    info("Shutdown completed")  }  def boundPort(listenerName: ListenerName): Int = {    try {      acceptors(endpoints(listenerName)).serverChannel.socket.getLocalPort    } catch {      case e: Exception => throw new KafkaException("Tried to check server's port before server was started or checked for port of non-existing protocol", e)    }  }  /* `protected` for test usage */  protected[network] def newProcessor(id: Int, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,                                      securityProtocol: SecurityProtocol): Processor = {    new Processor(id,      time,      config.socketRequestMaxBytes,      requestChannel,      connectionQuotas,      config.connectionsMaxIdleMs,      listenerName,      securityProtocol,      config.values,      metrics,      credentialProvider    )  }  /* For test usage */  private[network] def connectionCount(address: InetAddress): Int =    Option(connectionQuotas).fold(0)(_.get(address))  /* For test usage */  private[network] def processor(index: Int): Processor = processors(index)}/** * A base class with some helper variables and methods */private[kafka] abstract class AbstractServerThread(connectionQuotas: ConnectionQuotas) extends Runnable with Logging {  private val startupLatch = new CountDownLatch(1)  // `shutdown()` is invoked before `startupComplete` and `shutdownComplete` if an exception is thrown in the constructor  // (e.g. if the address is already in use). We want `shutdown` to proceed in such cases, so we first assign an open  // latch and then replace it in `startupComplete()`.  @volatile private var shutdownLatch = new CountDownLatch(0)  private val alive = new AtomicBoolean(true)  def wakeup(): Unit  /**   * Initiates a graceful shutdown by signaling to stop and waiting for the shutdown to complete   */  def shutdown(): Unit = {    alive.set(false)    wakeup()    shutdownLatch.await()  }  /**   * Wait for the thread to completely start up   */  def awaitStartup(): Unit = startupLatch.await  /**   * Record that the thread startup is complete   */  protected def startupComplete(): Unit = {    // Replace the open latch with a closed one    shutdownLatch = new CountDownLatch(1)    startupLatch.countDown()  }  /**   * Record that the thread shutdown is complete   */  protected def shutdownComplete(): Unit = shutdownLatch.countDown()  /**   * Is the server still running?   */  protected def isRunning: Boolean = alive.get  /**   * Close the connection identified by `connectionId` and decrement the connection count.   */  def close(selector: KSelector, connectionId: String): Unit = {    val channel = selector.channel(connectionId)    if (channel != null) {      debug(s"Closing selector connection $connectionId")      val address = channel.socketAddress      if (address != null)        connectionQuotas.dec(address)      selector.close(connectionId)    }  }  /**   * Close `channel` and decrement the connection count.   */  def close(channel: SocketChannel): Unit = {    if (channel != null) {      debug("Closing connection from " + channel.socket.getRemoteSocketAddress())      connectionQuotas.dec(channel.socket.getInetAddress)      swallowError(channel.socket().close())      swallowError(channel.close())    }  }}/** * Thread that accepts and configures new connections. There is one of these per endpoint. */private[kafka] class Acceptor(val endPoint: EndPoint,                              val sendBufferSize: Int,                              val recvBufferSize: Int,                              brokerId: Int,                              processors: Array[Processor],                              connectionQuotas: ConnectionQuotas) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup {  private val nioSelector = NSelector.open()  val serverChannel = openServerSocket(endPoint.host, endPoint.port)  this.synchronized {    processors.foreach { processor =>      Utils.newThread(s"kafka-network-thread-$brokerId-${endPoint.listenerName}-${endPoint.securityProtocol}-${processor.id}",        processor, false).start()    }  }  /**   * Accept loop that checks for new connection attempts   */  def run() {    serverChannel.register(nioSelector, SelectionKey.OP_ACCEPT)    startupComplete()    try {      var currentProcessor = 0      while (isRunning) {        try {          val ready = nioSelector.select(500)          if (ready > 0) {            info(this+"...................ready:" +ready)            val keys = nioSelector.selectedKeys()            val iter = keys.iterator()            while (iter.hasNext && isRunning) {              try {                val key = iter.next                iter.remove()                if (key.isAcceptable)                  accept(key, processors(currentProcessor))                else                  throw new IllegalStateException("Unrecognized key state for acceptor thread.")                // round robin to the next processor thread                currentProcessor = (currentProcessor + 1) % processors.length              } catch {                case e: Throwable => error("Error while accepting connection", e)              }            }          }        }        catch {          // We catch all the throwables to prevent the acceptor thread from exiting on exceptions due          // to a select operation on a specific channel or a bad request. We don't want          // the broker to stop responding to requests from other clients in these scenarios.          case e: ControlThrowable => throw e          case e: Throwable => error("Error occurred", e)        }      }    } finally {      debug("Closing server socket and selector.")      swallowError(serverChannel.close())      swallowError(nioSelector.close())      shutdownComplete()    }  }  /*   * Create a server socket to listen for connections on.   */  private def openServerSocket(host: String, port: Int): ServerSocketChannel = {    val socketAddress =      if(host == null || host.trim.isEmpty)        new InetSocketAddress(port)      else        new InetSocketAddress(host, port)    val serverChannel = ServerSocketChannel.open()    serverChannel.configureBlocking(false)    if (recvBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE)      serverChannel.socket().setReceiveBufferSize(recvBufferSize)    try {      serverChannel.socket.bind(socketAddress)      info("Awaiting socket connections on %s:%d.".format(socketAddress.getHostString, serverChannel.socket.getLocalPort))    } catch {      case e: SocketException =>        throw new KafkaException("Socket server failed to bind to %s:%d: %s.".format(socketAddress.getHostString, port, e.getMessage), e)    }    serverChannel  }  /*   * Accept a new connection   */  def accept(key: SelectionKey, processor: Processor) {    val serverSocketChannel = key.channel().asInstanceOf[ServerSocketChannel]    val socketChannel = serverSocketChannel.accept()    try {      connectionQuotas.inc(socketChannel.socket().getInetAddress)      socketChannel.configureBlocking(false)      socketChannel.socket().setTcpNoDelay(true)      socketChannel.socket().setKeepAlive(true)      if (sendBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE)        socketChannel.socket().setSendBufferSize(sendBufferSize)      debug("Accepted connection from %s on %s and assigned it to processor %d, sendBufferSize [actual|requested]: [%d|%d] recvBufferSize [actual|requested]: [%d|%d]"            .format(socketChannel.socket.getRemoteSocketAddress, socketChannel.socket.getLocalSocketAddress, processor.id,                  socketChannel.socket.getSendBufferSize, sendBufferSize,                  socketChannel.socket.getReceiveBufferSize, recvBufferSize))      processor.accept(socketChannel)    } catch {      case e: TooManyConnectionsException =>        info("Rejected connection from %s, address already has the configured maximum of %d connections.".format(e.ip, e.count))        close(socketChannel)    }  }  /**   * Wakeup the thread for selection.   */  @Override  def wakeup = nioSelector.wakeup()}/** * Thread that processes all requests from a single connection. There are N of these running in parallel * each of which has its own selector */private[kafka] class Processor(val id: Int,                               time: Time,                               maxRequestSize: Int,                               requestChannel: RequestChannel,                               connectionQuotas: ConnectionQuotas,                               connectionsMaxIdleMs: Long,                               listenerName: ListenerName,                               securityProtocol: SecurityProtocol,                               channelConfigs: java.util.Map[String, _],                               metrics: Metrics,                               credentialProvider: CredentialProvider) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup {  private object ConnectionId {    def fromString(s: String): Option[ConnectionId] = s.split("-") match {      case Array(local, remote) => BrokerEndPoint.parseHostPort(local).flatMap { case (localHost, localPort) =>        BrokerEndPoint.parseHostPort(remote).map { case (remoteHost, remotePort) =>          ConnectionId(localHost, localPort, remoteHost, remotePort)        }      }      case _ => None    }  }  private case class ConnectionId(localHost: String, localPort: Int, remoteHost: String, remotePort: Int) {    override def toString: String = s"$localHost:$localPort-$remoteHost:$remotePort"  }  private val newConnections = new ConcurrentLinkedQueue[SocketChannel]()  private val inflightResponses = mutable.Map[String, RequestChannel.Response]()  private val metricTags = Map("networkProcessor" -> id.toString).asJava  newGauge("IdlePercent",    new Gauge[Double] {      def value = {        Option(metrics.metric(metrics.metricName("io-wait-ratio", "socket-server-metrics", metricTags))).fold(0.0)(_.value)      }    },    metricTags.asScala  )  private val selector = new KSelector(    maxRequestSize,    connectionsMaxIdleMs,    metrics,    time,    "socket-server",    metricTags,    false,    ChannelBuilders.serverChannelBuilder(securityProtocol, channelConfigs, credentialProvider.credentialCache))  override def run() {    startupComplete()    while (isRunning) {      try {        // setup any new connections that have been queued up        //设置已排队的任何新连接        configureNewConnections()        // register any new responses for writing        //注册任何新的书面答复        processNewResponses()        poll()        processCompletedReceives()        processCompletedSends()        processDisconnected()      } catch {        // We catch all the throwables here to prevent the processor thread from exiting. We do this because        // letting a processor exit might cause a bigger impact on the broker. Usually the exceptions thrown would        // be either associated with a specific socket channel or a bad request. We just ignore the bad socket channel        // or request. This behavior might need to be reviewed if we see an exception that need the entire broker to stop.        //我们抓住所有的throwables来防止处理器线程退出。我们这样做是因为        //让处理器退出可能会对代理造成更大的影响。通常抛出的异常将        //或与某个特定套接字通道相关或是一个错误请求。我们只是忽略了坏套接字通道        //或请求。如果我们看到一个需要整个代理停止的异常,那么这个行为可能需要被检查。        case e: ControlThrowable => throw e        case e: Throwable =>          error("Processor got uncaught exception.", e)      }    }    debug("Closing selector - processor " + id)    swallowError(closeAll())    shutdownComplete()  }  private def processNewResponses() {    var curr = requestChannel.receiveResponse(id)    while (curr != null) {      try {        curr.responseAction match {          case RequestChannel.NoOpAction =>            // There is no response to send to the client, we need to read more pipelined requests            // that are sitting in the server's socket buffer            curr.request.updateRequestMetrics            trace("Socket server received empty response to send, registering for read: " + curr)            val channelId = curr.request.connectionId            if (selector.channel(channelId) != null || selector.closingChannel(channelId) != null)                selector.unmute(channelId)          case RequestChannel.SendAction =>            sendResponse(curr)          case RequestChannel.CloseConnectionAction =>            curr.request.updateRequestMetrics            trace("Closing socket connection actively according to the response code.")            close(selector, curr.request.connectionId)        }      } finally {        curr = requestChannel.receiveResponse(id)      }    }  }  /* `protected` for test usage */  protected[network] def sendResponse(response: RequestChannel.Response) {    trace(s"Socket server received response to send, registering for write and sending data: $response")    val channel = selector.channel(response.responseSend.destination)    // `channel` can be null if the selector closed the connection because it was idle for too long    if (channel == null) {      warn(s"Attempting to send response via channel for which there is no open connection, connection id $id")      response.request.updateRequestMetrics()    }    else {      selector.send(response.responseSend)      inflightResponses += (response.request.connectionId -> response)    }  }  private def poll() {    try selector.poll(300)    catch {      case e @ (_: IllegalStateException | _: IOException) =>        error(s"Closing processor $id due to illegal state or IO exception")        swallow(closeAll())        shutdownComplete()        throw e    }  }  private def processCompletedReceives() {    selector.completedReceives.asScala.foreach { receive =>      try {        val openChannel = selector.channel(receive.source)        val session = {          // Only methods that are safe to call on a disconnected channel should be invoked on 'channel'.          val channel = if (openChannel != null) openChannel else selector.closingChannel(receive.source)          RequestChannel.Session(new KafkaPrincipal(KafkaPrincipal.USER_TYPE, channel.principal.getName), channel.socketAddress)        }        val req = RequestChannel.Request(processor = id, connectionId = receive.source, session = session,          buffer = receive.payload, startTimeMs = time.milliseconds, listenerName = listenerName,          securityProtocol = securityProtocol)        requestChannel.sendRequest(req)        selector.mute(receive.source)      } catch {        case e @ (_: InvalidRequestException | _: SchemaException) =>          // note that even though we got an exception, we can assume that receive.source is valid. Issues with constructing a valid receive object were handled earlier          error(s"Closing socket for ${receive.source} because of error", e)          close(selector, receive.source)      }    }  }  private def processCompletedSends() {    selector.completedSends.asScala.foreach { send =>      val resp = inflightResponses.remove(send.destination).getOrElse {        throw new IllegalStateException(s"Send for ${send.destination} completed, but not in `inflightResponses`")      }      resp.request.updateRequestMetrics()      selector.unmute(send.destination)    }  }  private def processDisconnected() {    selector.disconnected.asScala.foreach { connectionId =>      val remoteHost = ConnectionId.fromString(connectionId).getOrElse {        throw new IllegalStateException(s"connectionId has unexpected format: $connectionId")      }.remoteHost      inflightResponses.remove(connectionId).foreach(_.request.updateRequestMetrics())      // the channel has been closed by the selector but the quotas still need to be updated      connectionQuotas.dec(InetAddress.getByName(remoteHost))    }  }  /**   * Queue up a new connection for reading   */  def accept(socketChannel: SocketChannel) {    newConnections.add(socketChannel)    wakeup()  }  /**   * Register any new connections that have been queued up   */  /**   *注册已排队的任何新连接。   */  private def configureNewConnections() {    while (!newConnections.isEmpty) {      val channel = newConnections.poll()      try {        debug(s"Processor $id listening to new connection from ${channel.socket.getRemoteSocketAddress}")        val localHost = channel.socket().getLocalAddress.getHostAddress        val localPort = channel.socket().getLocalPort        val remoteHost = channel.socket().getInetAddress.getHostAddress        val remotePort = channel.socket().getPort        val connectionId = ConnectionId(localHost, localPort, remoteHost, remotePort).toString        selector.register(connectionId, channel)      } catch {        // We explicitly catch all non fatal exceptions and close the socket to avoid a socket leak. The other        // throwables will be caught in processor and logged as uncaught exceptions.        case NonFatal(e) =>          val remoteAddress = channel.getRemoteAddress          // need to close the channel here to avoid a socket leak.          close(channel)          error(s"Processor $id closed connection from $remoteAddress", e)      }    }  }  /**   * Close the selector and all open connections   */  private def closeAll() {    selector.channels.asScala.foreach { channel =>      close(selector, channel.id)    }    selector.close()  }  /* For test usage */  private[network] def channel(connectionId: String): Option[KafkaChannel] =    Option(selector.channel(connectionId))  /**   * Wakeup the thread for selection.   */  /**   *唤醒选择的线程。   */  @Override  def wakeup = selector.wakeup()}class ConnectionQuotas(val defaultMax: Int, overrideQuotas: Map[String, Int]) {  private val overrides = overrideQuotas.map { case (host, count) => (InetAddress.getByName(host), count) }  private val counts = mutable.Map[InetAddress, Int]()  def inc(address: InetAddress) {    counts.synchronized {      val count = counts.getOrElseUpdate(address, 0)      counts.put(address, count + 1)      val max = overrides.getOrElse(address, defaultMax)      if (count >= max)        throw new TooManyConnectionsException(address, max)    }  }  def dec(address: InetAddress) {    counts.synchronized {      val count = counts.getOrElse(address,        throw new IllegalArgumentException(s"Attempted to decrease connection count for address with no connections, address: $address"))      if (count == 1)        counts.remove(address)      else        counts.put(address, count - 1)    }  }  def get(address: InetAddress): Int = counts.synchronized {    counts.getOrElse(address, 0)  }}class TooManyConnectionsException(val ip: InetAddress, val count: Int) extends KafkaException("Too many connections from %s (maximum = %d)".format(ip, count))