sender分析之Selector
来源:互联网 发布:mwap网站js弹广告 编辑:程序博客网 时间:2024/05/21 06:40
Selector是kafka自己实现的一个NIO 异步非阻塞的网络IO操作。使用一条单独的线程管理多条网络连接上的连接、读、写操作
一 核心字段
java.nio.channels.Selector nioSelector: 用来监听网络I/O事件
Map<String, KafkaChannel> channels: 维护了NodeId和KafkaChannel之间的映射关系,KafkaChannel是针对SocketChannel的进一步封装
List<Send> completedSends: 保存哪些请求已经完全发送出去
List<String> failedSends: 保存哪些请求发送失败
List<NetworkReceive> completedReceives: 保存已经完全接收到的请求
Map<KafkaChannel,Deque<NetworkReceive>> stagedReceives: 暂存一次OP_READ事件处理过程中读取到的全部请求,当一次OP_READ事件处理完成之后,会将stagedReceives集合中的请求保存到completedReceives集合中
List<String> disconnected: 记录poll过程中断开的连接
List<String> connected:记录poll过程中新建的连接
ChannelBuilder channelBuilder: 用于创建KafkaChannel的工具,根据不同配置创建不同的TransportLayer的子类,然后创建KafkaChannel
int maxReceiveSize: 能够接受请求的最大字节是多大
二 重要方法
2.1 connect
public voidconnect(Stringid, InetSocketAddressaddress, int sendBufferSize, int receiveBufferSize) throws IOException {
if (this.channels.containsKey(id))
throw new IllegalStateException("There isalready a connection for id "+ id);
// 创建SocketChannel
SocketChannel socketChannel= SocketChannel.open();
// 设置成非阻塞模式
socketChannel.configureBlocking(false);
// 获取Socket对象
Socket socket= socketChannel.socket();
socket.setKeepAlive(true);//设置为长连接
// 设置发送buffer的大小
if (sendBufferSize!= Selectable.USE_DEFAULT_BUFFER_SIZE)
socket.setSendBufferSize(sendBufferSize);
// 设置接收buffe的大小
if (receiveBufferSize!= Selectable.USE_DEFAULT_BUFFER_SIZE)
socket.setReceiveBufferSize(receiveBufferSize);
socket.setTcpNoDelay(true);
boolean connected;
try {
// 因为是非阻塞式的,所以SocketChannel.connect方法是发起一个连接,connect方法在连接正式建立
// 之前就可能返回,在后面会通过Selector.finishConnect方法确认连接是否真正的建立
connected = socketChannel.connect(address);
} catch (UnresolvedAddressExceptione) {
socketChannel.close();
throw new IOException("Can't resolve address:"+ address, e);
} catch (IOExceptione) {
socketChannel.close();
throw e;
}
// 将这个SocketChannel注册到nioSelector上,并关注OP_CONNECT事件
SelectionKey key= socketChannel.register(nioSelector,SelectionKey.OP_CONNECT);
// 创建KafkaChannel
KafkaChannel channel= channelBuilder.buildChannel(id,key, maxReceiveSize);
// 将kafkachannel注册到key
key.attach(channel);
// 将NodeId和KafkaChannel绑定,放到channels中管理
this.channels.put(id,channel);
if (connected) {
// OP_CONNECTwon't trigger for immediately connected channels
log.debug("Immediately connected tonode {}",channel.id());
immediatelyConnectedKeys.add(key);
key.interestOps(0);
}
}
2.2 poll 真正执行网络IO,它会调用nioSelector.select方法等待I/O事件发生
public void poll(long timeout) throws IOException { if (timeout < 0) throw new IllegalArgumentException("timeout should be >= 0"); clear(); // 将上一次poll的结果清除掉 if (hasStagedReceives() || !immediatelyConnectedKeys.isEmpty()) timeout = 0; /* check ready keys */ long startSelect = time.nanoseconds(); // 调用nioSelect.select方法等待I/O事件发生 int readyKeys = select(timeout); long endSelect = time.nanoseconds(); this.sensors.selectTime.record(endSelect - startSelect, time.milliseconds()); // 处理I/O事件 if (readyKeys > 0 || !immediatelyConnectedKeys.isEmpty()) { pollSelectionKeys(this.nioSelector.selectedKeys(), false, endSelect); pollSelectionKeys(immediatelyConnectedKeys, true, endSelect); } // 将stagedReceives复制到completedReceives addToCompletedReceives(); long endIo = time.nanoseconds(); this.sensors.ioTime.record(endIo - endSelect, time.milliseconds()); // 关闭长期空闲连接 maybeCloseOldestConnection(endSelect);}
2.3 pollSelectionKeys 处理OP_CONNECT、OP_READ、OP_WRITE事件,并且会检测连接状态
private void pollSelectionKeys(Iterable<SelectionKey> selectionKeys, boolean isImmediatelyConnected, long currentTimeNanos) { Iterator<SelectionKey> iterator = selectionKeys.iterator(); // 遍历SelectionKey while (iterator.hasNext()) { // 获取每一个SelectionKey SelectionKey key = iterator.next(); // 从Iterator中移除 iterator.remove(); // 之前创建连接时,将kafkachannel注册到key上,就是为了在这里获取 KafkaChannel channel = channel(key); sensors.maybeRegisterConnectionMetrics(channel.id()); if (idleExpiryManager != null) idleExpiryManager.update(channel.id(), currentTimeNanos); try { // 对connect方法返回true或者OP_CONNECTION事件进行处理 if (isImmediatelyConnected || key.isConnectable()) { // 会检测SocketChannel是否建立完成,建立后会取消对OP_CONNECT事件的关注,开始 // 关注OP_READ事件 if (channel.finishConnect()) { // 添加到已连接的集合中 this.connected.add(channel.id()); this.sensors.connectionCreated.record(); SocketChannel socketChannel = (SocketChannel) key.channel(); log.debug("Created socket with SO_RCVBUF = {}, SO_SNDBUF = {}, SO_TIMEOUT = {} to node {}", socketChannel.socket().getReceiveBufferSize(), socketChannel.socket().getSendBufferSize(), socketChannel.socket().getSoTimeout(), channel.id()); } else continue; } // 调用KafkaChannel的prepare方法进行身份验证 if (channel.isConnected() && !channel.ready()) channel.prepare(); // 处理OP_READ事件 if (channel.ready() && key.isReadable() && !hasStagedReceive(channel)) { NetworkReceive networkReceive; while ((networkReceive = channel.read()) != null) // read方法读到一个完整的NetworkReceive,则将其添加到stagedReceives中保存 // 如读取不到一个完整的NetworkReceive,则返回null,下次处理OP_READ事件时,继续读取 // 直到读取到一个完整的NetworkReceive addToStagedReceives(channel, networkReceive); } // 处理OP_WRITE事件 if (channel.ready() && key.isWritable()) { Send send = channel.write(); // 上面的write方法将KafkaChannel的send字段发送出去,如果发送未完成,则返回null // 如果发送完成则返回Send,并添加到completedSends集合中带后续处理 if (send != null) { this.completedSends.add(send); this.sensors.recordBytesSent(channel.id(), send.size()); } } // 如果key无效。则关闭KafkaChannel.并且添加这个channel到断开的连接的集合中 if (!key.isValid()) { close(channel); this.disconnected.add(channel.id()); } } catch (Exception e) { String desc = channel.socketDescription(); if (e instanceof IOException) log.debug("Connection with {} disconnected", desc, e); else log.warn("Unexpected error from {}; closing connection", desc, e); close(channel); this.disconnected.add(channel.id()); } }}
2.4 具体的读写操作交给了KafkaChannel
private boolean send(Send send) throws IOException { // 如果send在一次write调用时没有发送完,SelectionKey的OP_WRITE事件还没有取消,还会继续监听此channel的op_write事件 // 直到整个send请求发送完毕才取消 send.writeTo(transportLayer); // 判断发送是否完成是通过ByteBuffer中是否还有剩余的字节来判断的 if (send.completed()) transportLayer.removeInterestOps(SelectionKey.OP_WRITE); return send.completed();}
public NetworkReceive read() throws IOException { NetworkReceive result = null; // 初始化NetworkReceive if (receive == null) { receive = new NetworkReceive(maxReceiveSize, id); } // 从TransportLayer中读取数据到NetworkReceive对象中,如果没有读完一个完整的NetworkReceive // 则下次触发OP_READ事件时将继续填充此NetworkReceive对象;如果读完了则将此receive置为空,下次 // 触发读操作的时候,创建新的NetworkReceive对象 receive(receive); if (receive.complete()) { receive.payload().rewind(); result = receive; receive = null; } return result;}
- sender分析之Selector
- Kafka源码分析之Sender
- sender分析之创建请求
- Kafka源码分析之Sender
- storyboard之 prepareForSegue:sender:
- storyboard之 prepareForSegue:sender:
- storyboard之 prepareForSegue:sender:
- storyboard之 prepareForSegue:sender:
- storyboard之 prepareForSegue:sender:
- qt学习之sender
- jQuery代码分析之二Selector Engine
- sender
- Sender
- 《Java 源码分析》:Java NIO 之 Selector(第一部分Selector.open())
- 《Java 源码分析》:Java NIO 之 Selector(第二部分selector.select())
- jQuery源码分析之jQuery(selector,context)详解
- ddpush 学习之路 12 Sender.java
- iOS学习之——prepareForSegue:sender:
- 关键字之标识符
- HAL
- 跑马灯不生效的问题
- Android jsoup 按比例显示图片
- ruby on rails 使用 rails ajax render partial html, 替换view render,提高效率
- sender分析之Selector
- SpringMVC绑定json数组,参考链接http://www.th7.cn/web/ajax/201603/158081.shtml
- python基础知识小总结
- CI框架表单验证
- Quartz使用总结
- Mybatis 向指定表中批量插入数据
- NetworkClient分析
- 使用wireshark常用的过滤命令
- LeetCode-algorithms 49. Group Anagrams