kafka源码分析

来源:互联网 发布:wps表格怎么改数据 编辑:程序博客网 时间:2024/05/22 02:26

MemoryRecordsBuilder.java

ProduceResponse.java


追加日志类:Log.scala

追加日志的方法,其中生成时间戳

def append(records: MemoryRecords, assignOffsets: Boolean = true): LogAppendInfo = {    val appendInfo = analyzeAndValidateRecords(records)    // if we have any valid messages, append them to the log    if (appendInfo.shallowCount == 0)      return appendInfo    // trim any invalid bytes or partial messages before appending it to the on-disk log    var validRecords = trimInvalidBytes(records, appendInfo)    try {      // they are valid, insert them in the log      //它们是有效的,将它们插入日志中      lock synchronized {        if (assignOffsets) {          // assign offsets to the message set          //向消息集分配偏移量          val offset = new LongRef(nextOffsetMetadata.messageOffset)          appendInfo.firstOffset = offset.value          val now = time.milliseconds          val validateAndOffsetAssignResult = try {            LogValidator.validateMessagesAndAssignOffsets(validRecords,                                                          offset,                                                          now,                                                          appendInfo.sourceCodec,                                                          appendInfo.targetCodec,                                                          config.compact,                                                          config.messageFormatVersion.messageFormatVersion,                                                          config.messageTimestampType,                                                          config.messageTimestampDifferenceMaxMs)          } catch {            case e: IOException => throw new KafkaException("Error in validating messages while appending to log '%s'".format(name), e)          }          validRecords = validateAndOffsetAssignResult.validatedRecords          appendInfo.maxTimestamp = validateAndOffsetAssignResult.maxTimestamp          appendInfo.offsetOfMaxTimestamp = validateAndOffsetAssignResult.shallowOffsetOfMaxTimestamp          appendInfo.lastOffset = offset.value - 1          if (config.messageTimestampType == TimestampType.LOG_APPEND_TIME)            appendInfo.logAppendTime = now          debug("timesstampType" + config.messageTimestampType +"logAppendTime" +appendInfo.logAppendTime)          // re-validate message sizes if there's a possibility that they have changed (due to re-compression or message          // format conversion)          if (validateAndOffsetAssignResult.messageSizeMaybeChanged) {            for (logEntry <- validRecords.shallowEntries.asScala) {              if (logEntry.sizeInBytes > config.maxMessageSize) {                // we record the original message set size instead of the trimmed size                // to be consistent with pre-compression bytesRejectedRate recording                BrokerTopicStats.getBrokerTopicStats(topicPartition.topic).bytesRejectedRate.mark(records.sizeInBytes)                BrokerTopicStats.getBrokerAllTopicsStats.bytesRejectedRate.mark(records.sizeInBytes)                throw new RecordTooLargeException("Message size is %d bytes which exceeds the maximum configured message size of %d."                  .format(logEntry.sizeInBytes, config.maxMessageSize))              }            }          }        } else {          // we are taking the offsets we are given          if (!appendInfo.offsetsMonotonic || appendInfo.firstOffset < nextOffsetMetadata.messageOffset)            throw new IllegalArgumentException("Out of order offsets found in " + records.deepEntries.asScala.map(_.offset))        }        // check messages set size may be exceed config.segmentSize        if (validRecords.sizeInBytes > config.segmentSize) {          throw new RecordBatchTooLargeException("Message set size is %d bytes which exceeds the maximum configured segment size of %d."            .format(validRecords.sizeInBytes, config.segmentSize))        }        // maybe roll the log if this segment is full        val segment = maybeRoll(messagesSize = validRecords.sizeInBytes,          maxTimestampInMessages = appendInfo.maxTimestamp,          maxOffsetInMessages = appendInfo.lastOffset)        // now append to the log        segment.append(firstOffset = appendInfo.firstOffset,          largestOffset = appendInfo.lastOffset,          largestTimestamp = appendInfo.maxTimestamp,          shallowOffsetOfMaxTimestamp = appendInfo.offsetOfMaxTimestamp,          records = validRecords)        // increment the log end offset        updateLogEndOffset(appendInfo.lastOffset + 1)        trace("Appended message set to log %s with first offset: %d, next offset: %d, and messages: %s"          .format(this.name, appendInfo.firstOffset, nextOffsetMetadata.messageOffset, validRecords))        if (unflushedMessages >= config.flushInterval)          flush()        appendInfo      }    } catch {      case e: IOException => throw new KafkaStorageException("I/O exception in append to log '%s'".format(name), e)    }  }


原创粉丝点击