spark 2.0 OneForOneStreamManager
来源:互联网 发布:mac默认系统 编辑:程序博客网 时间:2024/06/16 12:09
OneForOneStreamManager只能按顺序一个接一个的读取一个流的数据块。它借助于内部的StreamSate类,StreamState类里存放了客户端编号,ManagedBufffer的迭代器,以及当前读的块编号。要读的块编号必须为当前块编号加1。
/** * StreamManager which allows registration of an Iterator<ManagedBuffer>, which are * individually fetched as chunks by the client. Each registered buffer is one chunk. */public class OneForOneStreamManager extends StreamManager { private final Logger logger = LoggerFactory.getLogger(OneForOneStreamManager.class); private final AtomicLong nextStreamId; private final ConcurrentHashMap<Long, StreamState> streams; /** State of a single stream. */ private static class StreamState { final String appId; final Iterator<ManagedBuffer> buffers; // The channel associated to the stream Channel associatedChannel = null; // Used to keep track of the index of the buffer that the user has retrieved, just to ensure // that the caller only requests each chunk one at a time, in order. int curChunk = 0; StreamState(String appId, Iterator<ManagedBuffer> buffers) { this.appId = appId; this.buffers = Preconditions.checkNotNull(buffers); } } public OneForOneStreamManager() { // For debugging purposes, start with a random stream id to help identifying different streams. // This does not need to be globally unique, only unique to this class. nextStreamId = new AtomicLong((long) new Random().nextInt(Integer.MAX_VALUE) * 1000); streams = new ConcurrentHashMap<>(); } @Override public void registerChannel(Channel channel, long streamId) { if (streams.containsKey(streamId)) { streams.get(streamId).associatedChannel = channel; } } @Override public ManagedBuffer getChunk(long streamId, int chunkIndex) { StreamState state = streams.get(streamId); if (chunkIndex != state.curChunk) { throw new IllegalStateException(String.format( "Received out-of-order chunk index %s (expected %s)", chunkIndex, state.curChunk)); } else if (!state.buffers.hasNext()) { throw new IllegalStateException(String.format( "Requested chunk index beyond end %s", chunkIndex)); } state.curChunk += 1; ManagedBuffer nextChunk = state.buffers.next(); if (!state.buffers.hasNext()) { logger.trace("Removing stream id {}", streamId); streams.remove(streamId); } return nextChunk; } @Override public void connectionTerminated(Channel channel) { // Close all streams which have been associated with the channel. for (Map.Entry<Long, StreamState> entry: streams.entrySet()) { StreamState state = entry.getValue(); if (state.associatedChannel == channel) { streams.remove(entry.getKey()); // Release all remaining buffers. while (state.buffers.hasNext()) { state.buffers.next().release(); } } } } @Override public void checkAuthorization(TransportClient client, long streamId) { if (client.getClientId() != null) { StreamState state = streams.get(streamId); Preconditions.checkArgument(state != null, "Unknown stream ID."); if (!client.getClientId().equals(state.appId)) { throw new SecurityException(String.format( "Client %s not authorized to read stream %d (app %s).", client.getClientId(), streamId, state.appId)); } } } /** * Registers a stream of ManagedBuffers which are served as individual chunks one at a time to * callers. Each ManagedBuffer will be release()'d after it is transferred on the wire. If a * client connection is closed before the iterator is fully drained, then the remaining buffers * will all be release()'d. * * If an app ID is provided, only callers who've authenticated with the given app ID will be * allowed to fetch from this stream. */ public long registerStream(String appId, Iterator<ManagedBuffer> buffers) { long myStreamId = nextStreamId.getAndIncrement(); streams.put(myStreamId, new StreamState(appId, buffers)); return myStreamId; }}
0 0
- spark 2.0 OneForOneStreamManager
- spark 2.0 spark Inbox消息类型
- 使用spark-sql-perf评测spark 2.0
- spark 2.0 新特性
- Spark 2.0技术预览
- Spark 2.0 On Yarn
- spark 2.0 TransportConf常量
- spark 2.0 RpcHandler
- spark 2.0 RpcEndpointRef
- spark 2.0 RpcEnv
- spark 2.0 RpcEnvFileServer
- spark 2.0 RpcEnv NettyRpcEnv
- spark 2.0 StreamManager
- spark 2.0 MapStatus
- spark-2.0.2变动
- spark Streaming 2.0 InputInfoTracker
- spark streaming 2.0 RateLimiter
- spark core 2.0 SortShuffleManager
- JS 实现别踩白块功能
- Gradle环境下添加JNI编译
- 常用adb命令
- Xcode磁盘空间清理
- RNN 教程-part4,用python实现LSTM/GRU
- spark 2.0 OneForOneStreamManager
- 删除排序链表的重复节点
- Mysql 如何备份与还原数据库(在Mysql Workbench)
- java util
- linux 下shell中if的“-e,-d,-f”是什么意思
- hadoop MMSEG4 分词实例
- 如何配置https站点
- 使用ab进行页面的压力测试
- GDB学习整理2--gdb常用命令