1.netty的ChannelHandler设计,使用了接口的适配器模式
ChannelHandler
ChannelHandlerAdapter
ChannelInboundHandler
ChannelInboundHandlerAdapter
ChannelOutboundHandler
ChannelOutboundHandlerAdapter
2.channelPipeline形成的ChannelHandler的链
ChannelHandler
ChannelHandlerAdapter
ChannelInboundHandler
ChannelInboundHandlerAdapter
ChannelOutboundHandler
ChannelOutboundHandlerAdapter
2.channelPipeline形成的ChannelHandler的链
一般这么写
ServerBootstrap b = new ServerBootstrap();
EventLoopGroup parentGroup = new NioEventLoopGroup();
EventLoopGroup childGroup = new NioEventLoopGroup();
b.group(parentGroup, childGroup).channel(NioServerSocketChannel.class)
.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(handler1,handler2,handler3);
}
});
b.bind(8080).sync().channel().closeFuture().sync();
pipeline.addLast方法
public final ChannelPipeline addLast(EventExecutorGroup group, String name, ChannelHandler handler) {
final AbstractChannelHandlerContext newCtx;
synchronized (this) {
checkMultiplicity(handler);
newCtx = newContext(group, filterName(name, handler), handler);
addLast0(newCtx);
// If the registered is false it means that the channel was not registered on an eventloop yet.
// In this case we add the context to the pipeline and add a task that will call
// ChannelHandler.handlerAdded(...) once the channel is registered.
if (!registered) {
newCtx.setAddPending();
callHandlerCallbackLater(newCtx, true);
return this;
}
EventExecutor executor = newCtx.executor();
if (!executor.inEventLoop()) {
newCtx.setAddPending();
executor.execute(new Runnable() {
@Override
public void run() {
callHandlerAdded0(newCtx);
}
});
return this;
}
}
callHandlerAdded0(newCtx);
return this;
}
private void addLast0(AbstractChannelHandlerContext newCtx) {
AbstractChannelHandlerContext prev = tail.prev;
newCtx.prev = prev;
newCtx.next = tail;
prev.next = newCtx;
tail.prev = newCtx;
}
可以看出在DefaultChannelPipeline中有两个属性
final AbstractChannelHandlerContext head;
final AbstractChannelHandlerContext tail;
tail = new TailContext(this);
head = new HeadContext(this);
而AbstractChannelHandlerContext也有两个属性
volatile AbstractChannelHandlerContext next;
volatile AbstractChannelHandlerContext prev;
也就是说加入的ChannelHandler构造了AbstractChannelHandlerContext在DefaultChannelPipeline中形成了一个双向链
如何传递的?
在ChannelInboundHandlerAdapter中的方法默认实现都是ctx.fireChannelXXX,比如 ctx.fireChannelRead(msg);
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(Object){
invokeChannelRead(findContextInbound(), msg);
return this;
}
static void invokeChannelRead(final AbstractChannelHandlerContext next, Object msg) {
final Object m = next.pipeline.touch(ObjectUtil.checkNotNull(msg, "msg"), next);
EventExecutor executor = next.executor();
if (executor.inEventLoop()) {
next.invokeChannelRead(m);
} else {
executor.execute(new Runnable() {
@Override
public void run() {
next.invokeChannelRead(m);//传递到下一个handler
}
});
}
}
//找到下一个InboundHandler
private AbstractChannelHandlerContext findContextInbound() {
AbstractChannelHandlerContext ctx = this;
do {
ctx = ctx.next;
} while (!ctx.inbound);
return ctx;
}
也就是说在我们自己的channelHandler中的方法处理完自己的逻辑后,必须调用ctx.fireChannelXXX那传递到下一个handler处理
这也就是责任链模式的一种实现,有点类似j2ee里面的filter
3.从哪开始使用channelPipeline处理读到的数据的
io.netty.channel.nio.NioEventLoop.processSelectedKey(SelectionKey, AbstractNioChannel){
......
if ((readyOps & (SelectionKey.OP_READ | SelectionKey.OP_ACCEPT)) != 0 || readyOps == 0) {
unsafe.read();
}
......
}
io.netty.channel.nio.AbstractNioByteChannel.NioByteUnsafe.read(){
final ChannelConfig config = config();
final ChannelPipeline pipeline = pipeline();
final ByteBufAllocator allocator = config.getAllocator();
final RecvByteBufAllocator.Handle allocHandle = recvBufAllocHandle();
allocHandle.reset(config);
ByteBuf byteBuf = null;
boolean close = false;
try {
do {
byteBuf = allocHandle.allocate(allocator);
allocHandle.lastBytesRead(doReadBytes(byteBuf));
if (allocHandle.lastBytesRead() <= 0) {
// nothing was read. release the buffer.
byteBuf.release();
byteBuf = null;
close = allocHandle.lastBytesRead() < 0;
if (close) {
// There is nothing left to read as we received an EOF.
readPending = false;
}
break;
}
allocHandle.incMessagesRead(1);
readPending = false;
pipeline.fireChannelRead(byteBuf);//使用pipeline处理读到的数据
byteBuf = null;
} while (allocHandle.continueReading());
allocHandle.readComplete();
pipeline.fireChannelReadComplete();
if (close) {
closeOnRead(pipeline);
}
} catch (Throwable t) {
handleReadException(pipeline, byteBuf, t, close, allocHandle);
} finally {
// Check if there is a readPending which was not processed yet.
// This could be for two reasons:
// * The user called Channel.read() or ChannelHandlerContext.read() in channelRead(...) method
// * The user called Channel.read() or ChannelHandlerContext.read() in channelReadComplete(...) method
//
// See https://github.com/netty/netty/issues/2254
if (!readPending && !config.isAutoRead()) {
removeReadOp();
}
}
}
io.netty.channel.DefaultChannelPipeline.fireChannelRead(Object){
AbstractChannelHandlerContext.invokeChannelRead(head, msg);//从head开始
return this;
}
4.channelHandler里面的代码是在哪执行的,在pipeline.add的时候可以传入一个EventExecutorGroup作为执行的线程池,
如果没有传则使用Channel的EventLoop执行
static void AbstractChannelHandlerContext.invokeChannelRead(final AbstractChannelHandlerContext next, Object msg) {
final Object m = next.pipeline.touch(ObjectUtil.checkNotNull(msg, "msg"), next);
EventExecutor executor = next.executor();
if (executor.inEventLoop()) {
next.invokeChannelRead(m);
} else {
executor.execute(new Runnable() {
@Override
public void run() {
next.invokeChannelRead(m);
}
});
}
}
io.netty.channel.AbstractChannelHandlerContext.executor(){
if (executor == null) {
return channel().eventLoop();
} else {
return executor;
}
}