spark 2.1 NettyBlockRpcServer use stream to open block

来源:互联网 发布:最近流行的网络词语 编辑:程序博客网 时间:2024/05/16 05:27

NettyBlockRpcServer is used to deal prc calls.

/** * Serves requests to open blocks by simply registering one chunk per block requested. * Handles opening and uploading arbitrary BlockManager blocks. * * Opened blocks are registered with the "one-for-one" strategy, meaning each Transport-layer Chunk * is equivalent to one Spark-level shuffle block. */class NettyBlockRpcServer(    appId: String,    serializer: Serializer,    blockManager: BlockDataManager)  extends RpcHandler with Logging {  private val streamManager = new OneForOneStreamManager()  override def receive(      client: TransportClient,      rpcMessage: ByteBuffer,      responseContext: RpcResponseCallback): Unit = {    val message = BlockTransferMessage.Decoder.fromByteBuffer(rpcMessage)    logTrace(s"Received request: $message")    message match {      case openBlocks: OpenBlocks =>        val blocks: Seq[ManagedBuffer] =          openBlocks.blockIds.map(BlockId.apply).map(blockManager.getBlockData)        val streamId = streamManager.registerStream(appId, blocks.iterator.asJava)        logTrace(s"Registered streamId $streamId with ${blocks.size} buffers")        responseContext.onSuccess(new StreamHandle(streamId, blocks.size).toByteBuffer)      case uploadBlock: UploadBlock =>        // StorageLevel and ClassTag are serialized as bytes using our JavaSerializer.        val (level: StorageLevel, classTag: ClassTag[_]) = {          serializer            .newInstance()            .deserialize(ByteBuffer.wrap(uploadBlock.metadata))            .asInstanceOf[(StorageLevel, ClassTag[_])]        }        val data = new NioManagedBuffer(ByteBuffer.wrap(uploadBlock.blockData))        val blockId = BlockId(uploadBlock.blockId)        blockManager.putBlockData(blockId, data, level, classTag)        responseContext.onSuccess(ByteBuffer.allocate(0))    }  }  override def getStreamManager(): StreamManager = streamManager}
  /**   * Registers a stream of ManagedBuffers which are served as individual chunks one at a time to   * callers. Each ManagedBuffer will be release()'d after it is transferred on the wire. If a   * client connection is closed before the iterator is fully drained, then the remaining buffers   * will all be release()'d.   *   * If an app ID is provided, only callers who've authenticated with the given app ID will be   * allowed to fetch from this stream.   */  public long registerStream(String appId, Iterator<ManagedBuffer> buffers) {    long myStreamId = nextStreamId.getAndIncrement();    streams.put(myStreamId, new StreamState(appId, buffers));    return myStreamId;  }

StreamState

/** State of a single stream. */  private static class StreamState {    final String appId;    final Iterator<ManagedBuffer> buffers;    // The channel associated to the stream    Channel associatedChannel = null;    // Used to keep track of the index of the buffer that the user has retrieved, just to ensure    // that the caller only requests each chunk one at a time, in order.    int curChunk = 0;    StreamState(String appId, Iterator<ManagedBuffer> buffers) {      this.appId = appId;      this.buffers = Preconditions.checkNotNull(buffers);    }  }

OneForOneStreamManager

/** * StreamManager which allows registration of an Iterator&lt;ManagedBuffer&gt;, which are * individually fetched as chunks by the client. Each registered buffer is one chunk. */public class OneForOneStreamManager extends StreamManager {  private static final Logger logger = LoggerFactory.getLogger(OneForOneStreamManager.class);  private final AtomicLong nextStreamId;  private final ConcurrentHashMap<Long, StreamState> streams;public OneForOneStreamManager() {    // For debugging purposes, start with a random stream id to help identifying different streams.    // This does not need to be globally unique, only unique to this class.    nextStreamId = new AtomicLong((long) new Random().nextInt(Integer.MAX_VALUE) * 1000);    streams = new ConcurrentHashMap<>();  }  @Override  public void registerChannel(Channel channel, long streamId) {    if (streams.containsKey(streamId)) {      streams.get(streamId).associatedChannel = channel;    }  }  @Override  public ManagedBuffer getChunk(long streamId, int chunkIndex) {    StreamState state = streams.get(streamId);    if (chunkIndex != state.curChunk) {      throw new IllegalStateException(String.format(        "Received out-of-order chunk index %s (expected %s)", chunkIndex, state.curChunk));    } else if (!state.buffers.hasNext()) {      throw new IllegalStateException(String.format(        "Requested chunk index beyond end %s", chunkIndex));    }    state.curChunk += 1;    ManagedBuffer nextChunk = state.buffers.next();    if (!state.buffers.hasNext()) {      logger.trace("Removing stream id {}", streamId);      streams.remove(streamId);    }    return nextChunk;  }  @Override  public void connectionTerminated(Channel channel) {    // Close all streams which have been associated with the channel.    for (Map.Entry<Long, StreamState> entry: streams.entrySet()) {      StreamState state = entry.getValue();      if (state.associatedChannel == channel) {        streams.remove(entry.getKey());        // Release all remaining buffers.        while (state.buffers.hasNext()) {          state.buffers.next().release();        }      }    }  }  @Override  public void checkAuthorization(TransportClient client, long streamId) {    if (client.getClientId() != null) {      StreamState state = streams.get(streamId);      Preconditions.checkArgument(state != null, "Unknown stream ID.");      if (!client.getClientId().equals(state.appId)) {        throw new SecurityException(String.format(          "Client %s not authorized to read stream %d (app %s).",          client.getClientId(),          streamId,          state.appId));      }    }  }
0 0