org.apache.tomcat.util.net.NioEndpoint 接收网络请求的处理类,其中包含几个重要的内部类
org.apache.tomcat.util.net.NioEndpoint#startInternal 这里启动了几个重要的线程
public void startInternal() throws Exception {
if (!running) {
running = true;
paused = false;
// 这三个缓冲区大小默认都是128
processorCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE, socketProperties.getProcessorCache());
eventCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE, socketProperties.getEventCache());
// 读写缓冲区大小都是8192
nioChannels = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE, socketProperties.getBufferPool());
// Create worker collection
if ( getExecutor() == null ) {
createExecutor();
}
// 初始化最大连接数为 10000 的连接限制
initializeConnectionLatch();
// Start poller threads
// getPollerThreadCount 实际值是2
pollers = new Poller[getPollerThreadCount()];
for (int i=0; i<pollers.length; i++) {
pollers[i] = new Poller();
Thread pollerThread = new Thread(pollers[i], getName() + "-ClientPoller-"+i);
pollerThread.setPriority(threadPriority);
pollerThread.setDaemon(true);
pollerThread.start();
}
startAcceptorThreads();
}
}
pollers数组初始大小
org.apache.tomcat.util.net.NioEndpoint.Acceptor#run 接收请求
protected class Acceptor extends AbstractEndpoint.Acceptor {
// Socket接收请求
@Override
public void run() {
int errorDelay = 0;
// Loop until we receive a shutdown command
while (running) {
// Loop if endpoint is paused
while (paused && running) {
state = AcceptorState.PAUSED;
try {
Thread.sleep(50);
} catch (InterruptedException e) {
// Ignore
}
}
if (!running) {
break;
}
state = AcceptorState.RUNNING;
try {
// 如果我们已达到最大连接数,请等待
//if we have reached max connections, wait
countUpOrAwaitConnection();
SocketChannel socket = null;
try {
// socket接收连接请求
// Accept the next incoming connection from the server
// socket
socket = serverSock.accept();
} catch (IOException ioe) {
//we didn't get a socket
countDownConnection();
// Introduce delay if necessary
errorDelay = handleExceptionWithDelay(errorDelay);
// re-throw
throw ioe;
}
// Successful accept, reset the error delay
errorDelay = 0;
// setSocketOptions() will add channel to the poller
// if successful
if (running && !paused) {
// 把SocketChannel请求封装成PollerEvent事件添加到Poller内部的队列中
if (!setSocketOptions(socket)) {
countDownConnection();
closeSocket(socket);
}
} else {
countDownConnection();
closeSocket(socket);
}
} catch (SocketTimeoutException sx) {
// Ignore: Normal condition
} catch (IOException x) {
if (running) {
log.error(sm.getString("endpoint.accept.fail"), x);
}
} catch (OutOfMemoryError oom) {
try {
oomParachuteData = null;
releaseCaches();
log.error("", oom);
}catch ( Throwable oomt ) {
try {
try {
System.err.println(oomParachuteMsg);
oomt.printStackTrace();
}catch (Throwable letsHopeWeDontGetHere){
ExceptionUtils.handleThrowable(letsHopeWeDontGetHere);
}
}catch (Throwable letsHopeWeDontGetHere){
ExceptionUtils.handleThrowable(letsHopeWeDontGetHere);
}
}
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
log.error(sm.getString("endpoint.accept.fail"), t);
}
}
state = AcceptorState.ENDED;
}
}
org.apache.tomcat.util.net.NioEndpoint#setSocketOptions
protected boolean setSocketOptions(SocketChannel socket) {
// Process the connection
try {
//disable blocking, APR style, we are gonna be polling it
socket.configureBlocking(false);
Socket sock = socket.socket();
socketProperties.setProperties(sock);
// nioChannels初始大小默认为128,正常情况下取出来的连接肯定是null
NioChannel channel = nioChannels.pop();
if ( channel == null ) {
// SSL setup
if (sslContext != null) {
SSLEngine engine = createSSLEngine();
int appbufsize = engine.getSession().getApplicationBufferSize();
NioBufferHandler bufhandler = new NioBufferHandler(Math.max(appbufsize,socketProperties.getAppReadBufSize()),
Math.max(appbufsize,socketProperties.getAppWriteBufSize()),
socketProperties.getDirectBuffer());
channel = new SecureNioChannel(socket, engine, bufhandler, selectorPool);
} else {
// 正常的tcp连接走这里,读写缓存区大小默认都是8192
// normal tcp setup
NioBufferHandler bufhandler = new NioBufferHandler(socketProperties.getAppReadBufSize(),
socketProperties.getAppWriteBufSize(),
socketProperties.getDirectBuffer());
channel = new NioChannel(socket, bufhandler);
}
} else {
channel.setIOChannel(socket);
if ( channel instanceof SecureNioChannel ) {
SSLEngine engine = createSSLEngine();
((SecureNioChannel)channel).reset(engine);
} else {
channel.reset();
}
}
// TODO 注册socket事件
// getPoller0
getPoller0().register(channel);
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
try {
log.error("",t);
} catch (Throwable tt) {
ExceptionUtils.handleThrowable(tt);
}
// Tell to close the socket
return false;
}
return true;
}
org.apache.tomcat.util.net.NioEndpoint.Poller#register
public void register(final NioChannel socket) {
socket.setPoller(this);
KeyAttachment ka = new KeyAttachment(socket);
ka.setPoller(this);
ka.setTimeout(getSocketProperties().getSoTimeout());
ka.setKeepAliveLeft(NioEndpoint.this.getMaxKeepAliveRequests());
ka.setSecure(isSSLEnabled());
PollerEvent r = eventCache.pop();
ka.interestOps(SelectionKey.OP_READ);//this is what OP_REGISTER turns into.
if (r == null)
r = new PollerEvent(socket, ka, OP_REGISTER);
else
r.reset(socket, ka, OP_REGISTER);
// 添加事件
addEvent(r);
}
org.apache.tomcat.util.net.NioEndpoint.Poller#addEvent
private void addEvent(PollerEvent event) {
events.offer(event);
if ( wakeupCounter.incrementAndGet() == 0 ) selector.wakeup();
}
到这里acceptor的工作就完成了
org.apache.tomcat.util.net.NioEndpoint.Poller#run 轮询已经注册的事件
public void run() {
// Loop until destroy() is called
while (true) {
try {
// Loop if endpoint is paused
while (paused && (!close) ) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
// Ignore
}
}
boolean hasEvents = false;
// Time to terminate?
if (close) {
// PollerEvent事件
events();
timeout(0, false);
try {
selector.close();
} catch (IOException ioe) {
log.error(sm.getString(
"endpoint.nio.selectorCloseFail"), ioe);
}
break;
} else {
hasEvents = events();
}
try {
if ( !close ) {
// selector 时根据是否有新连接决定是否立即返回
if (wakeupCounter.getAndSet(-1) > 0) {
// 立即返回
//if we are here, means we have other stuff to do
//do a non blocking select
keyCount = selector.selectNow();
} else {
// 阻塞 1s 后返回
keyCount = selector.select(selectorTimeout);
}
wakeupCounter.set(0);
}
if (close) {
events();
timeout(0, false);
try {
selector.close();
} catch (IOException ioe) {
log.error(sm.getString(
"endpoint.nio.selectorCloseFail"), ioe);
}
break;
}
} catch (Throwable x) {
ExceptionUtils.handleThrowable(x);
log.error("",x);
continue;
}
// 我们要么超时要么醒来,首先处理事件
//either we timed out or we woke up, process events first
if ( keyCount == 0 ) hasEvents = (hasEvents | events());
Iterator<SelectionKey> iterator = keyCount > 0 ? selector.selectedKeys().iterator() : null;
// Walk through the collection of ready keys and dispatch
// any active event.
while (iterator != null && iterator.hasNext()) {
SelectionKey sk = iterator.next();
KeyAttachment attachment = (KeyAttachment)sk.attachment();
// Attachment may be null if another thread has called
// cancelledKey()
if (attachment == null) {
iterator.remove();
} else {
attachment.access();
iterator.remove();
// TODO 执行请求
processKey(sk, attachment);
}
}//while
//process timeouts
timeout(keyCount,hasEvents);
if ( oomParachute > 0 && oomParachuteData == null ) checkParachute();
} catch (OutOfMemoryError oom) {
try {
oomParachuteData = null;
releaseCaches();
log.error("", oom);
}catch ( Throwable oomt ) {
try {
System.err.println(oomParachuteMsg);
oomt.printStackTrace();
}catch (Throwable letsHopeWeDontGetHere){
ExceptionUtils.handleThrowable(letsHopeWeDontGetHere);
}
}
}
}//while
stopLatch.countDown();
}
org.apache.tomcat.util.net.NioEndpoint.Poller#events 先轮询把socket注册到poller中的selector中,这里一个poller中有多个selector
public boolean events() {
boolean result = false;
PollerEvent pe = null;
for (int i = 0, size = events.size(); i < size && (pe = events.poll()) != null; i++ ) {
result = true;
try {
// PollerEvent事件执行
pe.run();
pe.reset();
if (running && !paused) {
eventCache.push(pe);
}
} catch ( Throwable x ) {
log.error("",x);
}
}
return result;
}
org.apache.tomcat.util.net.NioEndpoint.PollerEvent#run
public void run() {
// NioSocketWrapper注册到selector中
if ( interestOps == OP_REGISTER ) {
try {
socket.getIOChannel().register(socket.getPoller().getSelector(), SelectionKey.OP_READ, key);
} catch (Exception x) {
log.error("", x);
}
} else {
// 根据selector获取SelectionKey
final SelectionKey key = socket.getIOChannel().keyFor(socket.getPoller().getSelector());
try {
if (key == null) {
// The key was cancelled (e.g. due to socket closure)
// and removed from the selector while it was being
// processed. Count down the connections at this point
// since it won't have been counted down when the socket
// closed.
socket.getPoller().getEndpoint().countDownConnection();
} else {
// NioSocketWrapper 和 SelectionKey 绑定
final KeyAttachment att = (KeyAttachment) key.attachment();
if ( att!=null ) {
att.access();//to prevent timeout
//we are registering the key to start with, reset the fairness counter.
int ops = key.interestOps() | interestOps;
att.interestOps(ops);
key.interestOps(ops);
} else {
socket.getPoller().cancelledKey(key, SocketStatus.ERROR);
}
}
} catch (CancelledKeyException ckx) {
try {
socket.getPoller().cancelledKey(key, SocketStatus.DISCONNECT);
} catch (Exception ignore) {}
}
}//end if
}
org.apache.tomcat.util.net.NioEndpoint.Poller#processKey
protected boolean processKey(SelectionKey sk, KeyAttachment attachment) {
boolean result = true;
try {
if ( close ) {
cancelledKey(sk, SocketStatus.STOP);
} else if ( sk.isValid() && attachment != null ) {
attachment.access();//make sure we don't time out valid sockets
if (sk.isReadable() || sk.isWritable() ) {
if ( attachment.getSendfileData() != null ) {
// 处理发送文件
processSendfile(sk,attachment, false);
} else {
if ( isWorkerAvailable() ) {
unreg(sk, attachment, sk.readyOps());
boolean closeSocket = false;
// Read goes before write
if (sk.isReadable()) {
// 处理读请求
if (!processSocket(attachment, SocketStatus.OPEN_READ, true)) {
closeSocket = true;
}
}
if (!closeSocket && sk.isWritable()) {
// 处理写请求
if (!processSocket(attachment, SocketStatus.OPEN_WRITE, true)) {
closeSocket = true;
}
}
if (closeSocket) {
cancelledKey(sk,SocketStatus.DISCONNECT);
}
} else {
result = false;
}
}
}
} else {
//invalid key
cancelledKey(sk, SocketStatus.ERROR);
}
} catch ( CancelledKeyException ckx ) {
cancelledKey(sk, SocketStatus.ERROR);
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
log.error("",t);
}
return result;
}
org.apache.tomcat.util.net.NioEndpoint#processSocket 把线程放到线程池中
protected boolean processSocket(KeyAttachment attachment, SocketStatus status, boolean dispatch) {
try {
if (attachment == null) {
return false;
}
SocketProcessor sc = processorCache.pop();
if ( sc == null )
sc = new SocketProcessor(attachment, status);
else
sc.reset(attachment, status);
Executor executor = getExecutor();
if (dispatch && executor != null) {
// tomcat8中默认创建线程池来执行
// 有线程池就使用线程池执行SocketProcessor的run方法
executor.execute(sc);
} else {
// 没有线程池就直接执行run方法
sc.run();
}
} catch (RejectedExecutionException ree) {
log.warn(sm.getString("endpoint.executor.fail", attachment.getSocket()), ree);
return false;
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
// This means we got an OOM or similar creating a thread, or that
// the pool and its queue are full
log.error(sm.getString("endpoint.process.fail"), t);
return false;
}
return true;
}
org.apache.tomcat.util.net.AbstractEndpoint#createExecutor
public void createExecutor() {
internalExecutor = true;
TaskQueue taskqueue = new TaskQueue();
TaskThreadFactory tf = new TaskThreadFactory(getName() + "-exec-", daemon, getThreadPriority());
// http-nio-8080
// 创建 corePoolSize 10、maximumPoolSize 200、keepAliveTime 60s、LinkedBlockingQueue、 名称前缀是 TP-exec- 的线程池
executor = new ThreadPoolExecutor(getMinSpareThreads(), getMaxThreads(), 60, TimeUnit.SECONDS,taskqueue, tf);
taskqueue.setParent( (ThreadPoolExecutor) executor);
}
org.apache.tomcat.util.net.NioEndpoint.SocketProcessor#run 执行线程池中的任务
public void run() {
NioChannel socket = ka.getSocket();
// Upgraded connections need to allow multiple threads to access the
// connection at the same time to enable blocking IO to be used when
// NIO has been configured
if (ka.isUpgraded() && SocketStatus.OPEN_WRITE == status) {
synchronized (ka.getWriteThreadLock()) {
doRun();
}
} else {
synchronized (socket) {
doRun();
}
}
}
org.apache.tomcat.util.net.NioEndpoint.SocketProcessor#doRun
private void doRun() {
NioChannel socket = ka.getSocket();
SelectionKey key = socket.getIOChannel().keyFor(socket.getPoller().getSelector());
try {
int handshake = -1;
try {
if (key != null) {
// For STOP there is no point trying to handshake as the
// Poller has been stopped.
if (socket.isHandshakeComplete() || status == SocketStatus.STOP) {
handshake = 0;
} else {
handshake = socket.handshake(key.isReadable(), key.isWritable());
// The handshake process reads/writes from/to the
// socket. status may therefore be OPEN_WRITE once
// the handshake completes. However, the handshake
// happens when the socket is opened so the status
// must always be OPEN_READ after it completes. It
// is OK to always set this as it is only used if
// the handshake completes.
status = SocketStatus.OPEN_READ;
}
}
} catch (IOException x) {
handshake = -1;
if (log.isDebugEnabled()) log.debug("Error during SSL handshake",x);
} catch (CancelledKeyException ckx) {
handshake = -1;
}
if (handshake == 0) {
SocketState state = SocketState.OPEN;
// 继续处理socket请求
// Process the request from this socket
if (status == null) {
// AbstractConnectionHandler.process
state = handler.process(ka, SocketStatus.OPEN_READ);
} else {
state = handler.process(ka, status);
}
if (state == SocketState.CLOSED) {
close(socket, key, SocketStatus.ERROR);
}
} else if (handshake == -1 ) {
close(socket, key, SocketStatus.DISCONNECT);
} else {
ka.getPoller().add(socket,handshake);
}
} catch (CancelledKeyException cx) {
socket.getPoller().cancelledKey(key, null);
} catch (OutOfMemoryError oom) {
try {
oomParachuteData = null;
log.error("", oom);
socket.getPoller().cancelledKey(key,SocketStatus.ERROR);
releaseCaches();
} catch (Throwable oomt) {
try {
System.err.println(oomParachuteMsg);
oomt.printStackTrace();
} catch (Throwable letsHopeWeDontGetHere){
ExceptionUtils.handleThrowable(letsHopeWeDontGetHere);
}
}
} catch (VirtualMachineError vme) {
ExceptionUtils.handleThrowable(vme);
} catch (Throwable t) {
log.error("", t);
socket.getPoller().cancelledKey(key,SocketStatus.ERROR);
} finally {
ka = null;
status = null;
//return to cache
if (running && !paused) {
processorCache.push(this);
}
}
}
org.apache.coyote.AbstractProtocol.AbstractConnectionHandler#process
public SocketState process(SocketWrapper<S> wrapper, SocketStatus status) {
if (wrapper == null) {
// Nothing to do. Socket has been closed.
return SocketState.CLOSED;
}
S socket = wrapper.getSocket();
if (socket == null) {
// Nothing to do. Socket has been closed.
return SocketState.CLOSED;
}
// 获取socket连接
Processor<S> processor = connections.get(socket);
if (status == SocketStatus.DISCONNECT && processor == null) {
// Nothing to do. Endpoint requested a close and there is no
// longer a processor associated with this socket.
return SocketState.CLOSED;
}
wrapper.setAsync(false);
ContainerThreadMarker.set();
try {
if (processor == null) {
processor = recycledProcessors.pop();
}
if (processor == null) {
// createProcessor() 会创建一个 Http11Processor, 它用来解析 Socket,将 Socket 中的内容 封 装 到 Request 中
processor = createProcessor();
}
initSsl(wrapper, processor);
SocketState state = SocketState.CLOSED;
Iterator<DispatchType> dispatches = null;
do {
if (dispatches != null) {
// Associate the processor with the connection as
// these calls may result in a nested call to process()
connections.put(socket, processor);
DispatchType nextDispatch = dispatches.next();
if (processor.isUpgrade()) {
state = processor.upgradeDispatch(nextDispatch.getSocketStatus());
} else {
state = processor.asyncDispatch(nextDispatch.getSocketStatus());
}
} else if (processor.isComet()) {
state = processor.event(status);
} else if (processor.isUpgrade()) {
state = processor.upgradeDispatch(status);
} else if (status == SocketStatus.DISCONNECT) {
// Comet and upgrade need to see DISCONNECT but the
// others don't. NO-OP and let socket close.
} else if (processor.isAsync() || state == SocketState.ASYNC_END) {
state = processor.asyncDispatch(status);
if (state == SocketState.OPEN) {
// release() won't get called so in case this request
// takes a long time to process, remove the socket from
// the waiting requests now else the async timeout will
// fire
getProtocol().endpoint.removeWaitingRequest(wrapper);
// There may be pipe-lined data to read. If the data
// isn't processed now, execution will exit this
// loop and call release() which will recycle the
// processor (and input buffer) deleting any
// pipe-lined data. To avoid this, process it now.
state = processor.process(wrapper);
}
} else if (status == SocketStatus.OPEN_WRITE) {
// Extra write event likely after async, ignore
state = SocketState.LONG;
} else {
// TODO 请求处理
// AbstractHttp11Processor.process
state = processor.process(wrapper);
}
if (state != SocketState.CLOSED && processor.isAsync()) {
// 异步 Servlet
state = processor.asyncPostProcess();
}
if (state == SocketState.UPGRADING) {
// Get the HTTP upgrade handler
UpgradeToken upgradeToken = processor.getUpgradeToken();
HttpUpgradeHandler httpUpgradeHandler = upgradeToken.getHttpUpgradeHandler();
// Retrieve leftover input
ByteBuffer leftoverInput = processor.getLeftoverInput();
// Release the Http11 processor to be re-used
release(wrapper, processor, false, false);
// Create the upgrade processor
processor = createUpgradeProcessor(
wrapper, leftoverInput, upgradeToken);
// Mark the connection as upgraded
wrapper.setUpgraded(true);
// Associate with the processor with the connection
connections.put(socket, processor);
// Initialise the upgrade handler (which may trigger
// some IO using the new protocol which is why the lines
// above are necessary)
// This cast should be safe. If it fails the error
// handling for the surrounding try/catch will deal with
// it.
if (upgradeToken.getInstanceManager() == null) {
httpUpgradeHandler.init((WebConnection) processor);
} else {
ClassLoader oldCL = upgradeToken.getContextBind().bind(false, null);
try {
httpUpgradeHandler.init((WebConnection) processor);
} finally {
upgradeToken.getContextBind().unbind(false, oldCL);
}
}
}
if (getLog().isDebugEnabled()) {
getLog().debug("Socket: [" + wrapper +
"], Status in: [" + status +
"], State out: [" + state + "]");
}
if (dispatches == null || !dispatches.hasNext()) {
// Only returns non-null iterator if there are
// dispatches to process.
dispatches = wrapper.getIteratorAndClearDispatches();
}
} while (state == SocketState.ASYNC_END ||
state == SocketState.UPGRADING ||
dispatches != null && state != SocketState.CLOSED);
if (state == SocketState.LONG) {
// In the middle of processing a request/response. Keep the
// socket associated with the processor. Exact requirements
// depend on type of long poll
connections.put(socket, processor);
longPoll(wrapper, processor);
} else if (state == SocketState.OPEN) {
// In keep-alive but between requests. OK to recycle
// processor. Continue to poll for the next request.
connections.remove(socket);
release(wrapper, processor, false, true);
} else if (state == SocketState.SENDFILE) {
// Sendfile in progress. If it fails, the socket will be
// closed. If it works, the socket either be added to the
// poller (or equivalent) to await more data or processed
// if there are any pipe-lined requests remaining.
connections.put(socket, processor);
} else if (state == SocketState.UPGRADED) {
// Don't add sockets back to the poller if this was a
// non-blocking write otherwise the poller may trigger
// multiple read events which may lead to thread starvation
// in the connector. The write() method will add this socket
// to the poller if necessary.
if (status != SocketStatus.OPEN_WRITE) {
longPoll(wrapper, processor);
}
} else {
// Connection closed. OK to recycle the processor. Upgrade
// processors are not recycled.
connections.remove(socket);
if (processor.isUpgrade()) {
UpgradeToken upgradeToken = processor.getUpgradeToken();
HttpUpgradeHandler httpUpgradeHandler = upgradeToken.getHttpUpgradeHandler();
InstanceManager instanceManager = upgradeToken.getInstanceManager();
if (instanceManager == null) {
httpUpgradeHandler.destroy();
} else {
ClassLoader oldCL = upgradeToken.getContextBind().bind(false, null);
try {
httpUpgradeHandler.destroy();
} finally {
try {
instanceManager.destroyInstance(httpUpgradeHandler);
} catch (Throwable e) {
ExceptionUtils.handleThrowable(e);
getLog().error(sm.getString("abstractConnectionHandler.error"), e);
}
upgradeToken.getContextBind().unbind(false, oldCL);
}
}
} else {
release(wrapper, processor, true, false);
}
}
return state;
} catch(java.net.SocketException e) {
// SocketExceptions are normal
getLog().debug(sm.getString(
"abstractConnectionHandler.socketexception.debug"), e);
} catch (java.io.IOException e) {
// IOExceptions are normal
getLog().debug(sm.getString(
"abstractConnectionHandler.ioexception.debug"), e);
}
// Future developers: if you discover any other
// rare-but-nonfatal exceptions, catch them here, and log as
// above.
catch (Throwable e) {
ExceptionUtils.handleThrowable(e);
// any other exception or error is odd. Here we log it
// with "ERROR" level, so it will show up even on
// less-than-verbose logs.
getLog().error(
sm.getString("abstractConnectionHandler.error"), e);
} finally {
ContainerThreadMarker.clear();
}
// Make sure socket/processor is removed from the list of current
// connections
connections.remove(socket);
// Don't try to add upgrade processors back into the pool
if (processor !=null && !processor.isUpgrade()) {
release(wrapper, processor, true, false);
}
return SocketState.CLOSED;
}
后面就是提交给servlet处理了,这里不做赘述。
1、acceptor阻塞接收socket请求,在nio中叫socketChannel
2、把socketChannel封装成pollerEvent添加到poller内部的同步队列中
3、poller轮询已经注册的事件,并把socketChannel注册poller内部的selector集合中
4、当监听到已经就绪的事件就把他放到线程池中执行