昨天晚上看了一下tomcat,今天整理一下。主要是Tomcat的启动和Tomcat对连接的处理。 建议先了解一下Tomcat 的整体架构
Tomcat的启动
不更改Tomcat的servel,默认启动。跳过解析文件的过程,来查看Connect的启动。这段展示只关注 Connector con = new Connector(protocolName);
这一句
public void begin(String namespace, String name, Attributes attributes)
throws Exception {
Service svc = (Service) digester.peek();
Executor ex = null;
String executorName = attributes.getValue("executor");
if (executorName != null ) {
ex = svc.getExecutor(executorName);
}
String protocolName = attributes.getValue("protocol");
// 专注于这句话
Connector con = new Connector(protocolName);
if (ex != null) {
setExecutor(con, ex);
}
String sslImplementationName = attributes.getValue("sslImplementationName");
if (sslImplementationName != null) {
setSSLImplementationName(con, sslImplementationName);
}
digester.push(con);
点进去
public Connector(String protocol) {
configuredProtocol = protocol;
ProtocolHandler p = null;
try {
// 创建protocolHandler
p = ProtocolHandler.create(protocol);
} catch (Exception e) {
log.error(sm.getString(
"coyoteConnector.protocolHandlerInstantiationFailed"), e);
}
if (p != null) {
protocolHandler = p;
protocolHandlerClassName = protocolHandler.getClass().getName();
} else {
protocolHandler = null;
protocolHandlerClassName = protocol;
}
// Default for Connector depends on this system property
setThrowOnFailure(Boolean.getBoolean("org.apache.catalina.startup.EXIT_ON_INIT_FAILURE"));
}
这里的 p = ProtocolHandler.create(protocol);
生产一个ProtoocalHandler实例
public static ProtocolHandler create(String protocol)
throws ClassNotFoundException, InstantiationException, IllegalAccessException,
IllegalArgumentException, InvocationTargetException, NoSuchMethodException, SecurityException {
if (protocol == null || "HTTP/1.1".equals(protocol)
|| org.apache.coyote.http11.Http11NioProtocol.class.getName().equals(protocol)) {
// 第一种 常见的
return new org.apache.coyote.http11.Http11NioProtocol();
} else if ("AJP/1.3".equals(protocol)
|| org.apache.coyote.ajp.AjpNioProtocol.class.getName().equals(protocol)) {
// 第二种
return new org.apache.coyote.ajp.AjpNioProtocol();
} else {
// Instantiate protocol handler 反射实例
Class<?> clazz = Class.forName(protocol);
return (ProtocolHandler) clazz.getConstructor().newInstance();
}
}
我们看第一种的org.apache.coyote.http11.Http11NioProtocol()
的构造函数
public class Http11NioProtocol extends AbstractHttp11JsseProtocol<NioChannel> {
private static final Log log = LogFactory.getLog(Http11NioProtocol.class);
public Http11NioProtocol() {
//关键元素,Endpoint
super(new NioEndpoint());
}
// 抽象父类 继续往上看
public abstract class AbstractHttp11JsseProtocol<S>
extends AbstractHttp11Protocol<S> {
public AbstractHttp11JsseProtocol(AbstractJsseEndpoint<S,?> endpoint) {
super(endpoint);
}
// 抽象父类 继续往上看
public abstract class AbstractHttp11Protocol<S> extends AbstractProtocol<S> {
public AbstractHttp11Protocol(AbstractEndpoint<S,?> endpoint) {
super(endpoint);
setConnectionTimeout(Constants.DEFAULT_CONNECTION_TIMEOUT);
}
// 顶级抽象父类 总的来说 就是给protocol创建一个endpoint
public abstract class AbstractProtocol<S> implements ProtocolHandler,
MBeanRegistration {
public AbstractProtocol(AbstractEndpoint<S,?> endpoint) {
this.endpoint = endpoint;
// 连接handler
ConnectionHandler<S> cHandler = new ConnectionHandler<>(this);
getEndpoint().setHandler(cHandler);
setHandler(cHandler);
setConnectionLinger(Constants.DEFAULT_CONNECTION_LINGER);
setTcpNoDelay(Constants.DEFAULT_TCP_NO_DELAY);
}
真正的启动
从Catalina类的getServer().start(); 可以认为Catalina就是一个server
org/apache/catalina/startup/Catalina.java
public void start() {
if (getServer() == null) {
load();
}
if (getServer() == null) {
log.fatal(sm.getString("catalina.noServer"));
return;
}
long t1 = System.nanoTime();
// Start the new server
try {
// 关注这句话
getServer().start();
} catch (LifecycleException e) {
log.fatal(sm.getString("catalina.serverStartFail"), e);
try {
getServer().destroy();
} catch (LifecycleException e1) {
log.debug("destroy() failed for failed Server ", e1);
}
return;
}
这里的启动和生命周期息息相关。这里初略说明一下。public interface Lifecycle
接口控制着容器的生命周期,有一个抽象实现类public abstract class LifecycleBase
对一些公共操作进行了统一处理,比如 如果你想start,那么如果当前容器的生命周期阶段为初始化,那么回执行init,然后更改容器的状态,在执行有子类实现的方法startInternal();
来执行真正的启动。不同容器不同生命阶段周期做了什么这里不做讨论。下面是Server的启动
org/apache/catalina/core/StandardServer.java
protected void startInternal() throws LifecycleException {
fireLifecycleEvent(CONFIGURE_START_EVENT, null);
setState(LifecycleState.STARTING);
globalNamingResources.start();
// Start our defined Services
synchronized (servicesLock) {
for (Service service : services) {
service.start();
}
}
if (periodicEventDelay > 0) {
monitorFuture = getUtilityExecutor().scheduleWithFixedDelay(
() -> startPeriodicLifecycleEvent(), 0, 60, TimeUnit.SECONDS);
}
}
进入service的启动
protected void startInternal() throws LifecycleException {
if(log.isInfoEnabled()) {
log.info(sm.getString("standardService.start.name", this.name));
}
setState(LifecycleState.STARTING);
// Start our defined Container first 启动容器
if (engine != null) {
synchronized (engine) {
engine.start();
}
}
// 执行器 这里没研究
synchronized (executors) {
for (Executor executor: executors) {
executor.start();
}
}
mapperListener.start();
// Start our defined Connectors second 启动connect的 是可以有多个connect的
synchronized (connectorsLock) {
for (Connector connector: connectors) {
// If it has already failed, don't try and start it
if (connector.getState() != LifecycleState.FAILED) {
connector.start();
}
}
}
}
我们先关注于connect的启动
//org/apache/catalina/connector/Connector.java
@Override
protected void startInternal() throws LifecycleException {
// Validate settings before starting
String id = (protocolHandler != null) ? protocolHandler.getId() : null;
if (id == null && getPortWithOffset() < 0) {
throw new LifecycleException(sm.getString(
"coyoteConnector.invalidPort", Integer.valueOf(getPortWithOffset())));
}
setState(LifecycleState.STARTING);
try {
// 在new Connect()时得到的协议处理
protocolHandler.start();
} catch (Exception e) {
throw new LifecycleException(
sm.getString("coyoteConnector.protocolHandlerStartFailed"), e);
}
}
protocolHandler
的启动
//org/apache/coyote/AbstractProtocol.java
@Override
public void start() throws Exception {
if (getLog().isInfoEnabled()) {
getLog().info(sm.getString("abstractProtocolHandler.start", getName()));
logPortOffset();
}
// 上面说的 super(new NioEndpoint());
endpoint.start();
monitorFuture = getUtilityExecutor().scheduleWithFixedDelay(
() -> {
startAsyncTimeout();
}, 0, 60, TimeUnit.SECONDS);
}
endpoint的启动
//org/apache/tomcat/util/net/AbstractEndpoint.java
public final void start() throws Exception {
if (bindState == BindState.UNBOUND) {
bindWithCleanup();
bindState = BindState.BOUND_ON_START;
}
// 具体的启动
startInternal();
}
// org/apache/tomcat/util/net/NioEndpoint.java
@Override
public void startInternal() throws Exception {
if (!running) {
running = true;
paused = false;
if (socketProperties.getProcessorCache() != 0) {
processorCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
socketProperties.getProcessorCache());
}
if (socketProperties.getEventCache() != 0) {
eventCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
socketProperties.getEventCache());
}
int actualBufferPool =
socketProperties.getActualBufferPool(isSSLEnabled() ? getSniParseLimit() * 2 : 0);
if (actualBufferPool != 0) {
nioChannels = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
actualBufferPool);
}
// 从这里开始关注 工作线程池
// Create worker collection
if (getExecutor() == null) {
createExecutor();
}
// 控制连接数
initializeConnectionLatch();
// Start poller thread 这里是轮询 poller是对select的包装
poller = new Poller();
Thread pollerThread = new Thread(poller, getName() + "-Poller");
pollerThread.setPriority(threadPriority);
pollerThread.setDaemon(true);
// 启动线程
pollerThread.start();
// 这里是启动监听线程
startAcceptorThread();
}
}
我们先看监听的启动线程
@Override
public void run() {
int errorDelay = 0;
long pauseStart = 0;
try {
// Loop until we receive a shutdown command
while (!stopCalled) {
while (endpoint.isPaused() && !stopCalled) {
if (state != AcceptorState.PAUSED) {
pauseStart = System.nanoTime();
// Entered pause state
state = AcceptorState.PAUSED;
}
if ((System.nanoTime() - pauseStart) > 1_000_000) {
// Paused for more than 1ms
try {
if ((System.nanoTime() - pauseStart) > 10_000_000) {
Thread.sleep(10);
} else {
Thread.sleep(1);
}
} catch (InterruptedException e) {
// Ignore
}
}
}
if (stopCalled) {
break;
}
state = AcceptorState.RUNNING;
try {
//if we have reached max connections, wait 控制端口连接流量
endpoint.countUpOrAwaitConnection();
// Endpoint might have been paused while waiting for latch
// If that is the case, don't accept new connections
if (endpoint.isPaused()) {
continue;
}
U socket = null;
try {
// Accept the next incoming connection from the server
// socket 在这里等待线程的连接 点进去 就是 SocketChannel result = serverSock.accept();
socket = endpoint.serverSocketAccept();
} catch (Exception ioe) {
// We didn't get a socket
endpoint.countDownConnection();
if (endpoint.isRunning()) {
// Introduce delay if necessary
errorDelay = handleExceptionWithDelay(errorDelay);
// re-throw
throw ioe;
} else {
break;
}
}
// Successful accept, reset the error delay
errorDelay = 0;
// Configure the socket
if (!stopCalled && !endpoint.isPaused()) {
// setSocketOptions() will hand the socket off to
// an appropriate processor if successful 在这里处理连接
if (!endpoint.setSocketOptions(socket)) {
endpoint.closeSocket(socket);
}
} else {
endpoint.destroySocket(socket);
}
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
String msg = sm.getString("endpoint.accept.fail");
log.error(msg, t);
}
}
} finally {
stopLatch.countDown();
}
state = AcceptorState.ENDED;
}
与此同事,poller线程则在不断轮询。下面我们将处理一个socket的连接
@Override
protected boolean setSocketOptions(SocketChannel socket) {
NioSocketWrapper socketWrapper = null;
try {
// Allocate channel and wrapper
NioChannel channel = null;
if (nioChannels != null) {
channel = nioChannels.pop();
}
if (channel == null) {
SocketBufferHandler bufhandler = new SocketBufferHandler(
socketProperties.getAppReadBufSize(),
socketProperties.getAppWriteBufSize(),
socketProperties.getDirectBuffer());
// 进行包装
if (isSSLEnabled())
channel = new SecureNioChannel(bufhandler, this);
} else {
channel = new NioChannel(bufhandler);
}
}
NioSocketWrapper newWrapper = new NioSocketWrapper(channel, this);
channel.reset(socket, newWrapper);
connections.put(socket, newWrapper);
socketWrapper = newWrapper;
// Set socket properties
// Disable blocking, polling will be used
socket.configureBlocking(false);
if (getUnixDomainSocketPath() == null) {
socketProperties.setProperties(socket.socket());
}
socketWrapper.setReadTimeout(getConnectionTimeout());
socketWrapper.setWriteTimeout(getConnectionTimeout());
socketWrapper.setKeepAliveLeft(NioEndpoint.this.getMaxKeepAliveRequests());
// poller来处理这个连接
poller.register(socketWrapper);
return true;
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
try {
log.error(sm.getString("endpoint.socketOptionsError"), t);
} catch (Throwable tt) {
ExceptionUtils.handleThrowable(tt);
}
if (socketWrapper == null) {
destroySocket(socket);
}
}
// Tell to close the socket if needed
return false;
}
来看poller的处理过程
// public class Poller implements Runnable
public void register(final NioSocketWrapper socketWrapper) {
socketWrapper.interestOps(SelectionKey.OP_READ);//this is what OP_REGISTER turns into.
// 这里的事件时socket的注册 注意这里,下面的分析会马上用到
PollerEvent pollerEvent = createPollerEvent(socketWrapper, OP_REGISTER);
// 放到poller 里的 event当中
addEvent(pollerEvent);
}
poller会一直轮询这个event
@Override
public void run() {
// Loop until destroy() is called
while (true) {
boolean hasEvents = false;
try {
if (!close) {
是否有事件
hasEvents = events();
if (wakeupCounter.getAndSet(-1) > 0) {
// If we are here, means we have other stuff to do
// Do a non blocking select
keyCount = selector.selectNow();
} else {
keyCount = selector.select(selectorTimeout);
}
wakeupCounter.set(0);
}
if (close) {
events();
timeout(0, false);
try {
selector.close();
} catch (IOException ioe) {
log.error(sm.getString("endpoint.nio.selectorCloseFail"), ioe);
}
break;
}
// Either we timed out or we woke up, process events first
if (keyCount == 0) {
hasEvents = (hasEvents | events());
}
} catch (Throwable x) {
ExceptionUtils.handleThrowable(x);
log.error(sm.getString("endpoint.nio.selectorLoopError"), x);
continue;
}
Iterator<SelectionKey> iterator =
keyCount > 0 ? selector.selectedKeys().iterator() : null;
// Walk through the collection of ready keys and dispatch
// any active event.
while (iterator != null && iterator.hasNext()) {
SelectionKey sk = iterator.next();
iterator.remove();
NioSocketWrapper socketWrapper = (NioSocketWrapper) sk.attachment();
// Attachment may be null if another thread has called
// cancelledKey()
if (socketWrapper != null) {
processKey(sk, socketWrapper);
}
}
// Process timeouts
timeout(keyCount,hasEvents);
}
getStopLatch().countDown();
}
来先看
if (!close) {
是否有事件
hasEvents = events();
if (wakeupCounter.getAndSet(-1) > 0) {
// If we are here, means we have other stuff to do
// Do a non blocking select
keyCount = selector.selectNow();
} else {
keyCount = selector.select(selectorTimeout);
}
wakeupCounter.set(0);
public boolean events() {
boolean result = false;
PollerEvent pe = null;
// 当event队列里不为空时就会返回true
for (int i = 0, size = events.size(); i < size && (pe = events.poll()) != null; i++ ) {
result = true;
// 获取事件的socket
NioSocketWrapper socketWrapper = pe.getSocketWrapper();
SocketChannel sc = socketWrapper.getSocket().getIOChannel();
int interestOps = pe.getInterestOps();
if (sc == null) {
log.warn(sm.getString("endpoint.nio.nullSocketChannel"));
socketWrapper.close();
} else if (interestOps == OP_REGISTER) {
try {
// 如果时注册 ,则向select进行绑定,同事注册OP——read事件
sc.register(getSelector(), SelectionKey.OP_READ, socketWrapper);
} catch (Exception x) {
log.error(sm.getString("endpoint.nio.registerFail"), x);
}
} else {
// 如果时其他情况 等会分析
final SelectionKey key = sc.keyFor(getSelector());
if (key == null) {
// The key was cancelled (e.g. due to socket closure)
// and removed from the selector while it was being
// processed. Count down the connections at this point
// since it won't have been counted down when the socket
// closed.
socketWrapper.close();
} else {
// 获取对应的包装的socket对象
final NioSocketWrapper attachment = (NioSocketWrapper) key.attachment();
if (attachment != null) {
// We are registering the key to start with, reset the fairness counter.
try {
int ops = key.interestOps() | interestOps;
attachment.interestOps(ops);
key.interestOps(ops);
} catch (CancelledKeyException ckx) {
socketWrapper.close();
}
} else {
socketWrapper.close();
}
}
}
if (running && eventCache != null) {
// 事件使用完后,对对象进行缓存
pe.reset();
eventCache.push(pe);
}
}
return result;
}
sc.register(getSelector(), SelectionKey.OP_READ, socketWrapper);
由上面的注册可知,执行着一段代码,这里时什么意思呢:就是向poller中的select进行读事件的注册。这里是对events队列的处理。回到run线程当中,接着会执行这个,这个就是我们熟悉的NIO
Iterator<SelectionKey> iterator =
keyCount > 0 ? selector.selectedKeys().iterator() : null;
// Walk through the collection of ready keys and dispatch
// any active event.
while (iterator != null && iterator.hasNext()) {
SelectionKey sk = iterator.next();
iterator.remove();
NioSocketWrapper socketWrapper = (NioSocketWrapper) sk.attachment();
// Attachment may be null if another thread has called
// cancelledKey()
if (socketWrapper != null) {
处理过程
processKey(sk, socketWrapper);
}
}
然后接着对注册了的进行处理processKey(sk, socketWrapper);
protected void processKey(SelectionKey sk, NioSocketWrapper socketWrapper) {
try {
if (close) {
socketWrapper.close();
} else if (sk.isValid()) {
// 读写事件
if (sk.isReadable() || sk.isWritable()) {
// 文件发送
if (socketWrapper.getSendfileData() != null) {
processSendfile(sk, socketWrapper, false);
} else {
// 这里不太懂什么意识 最后的操作是对sk和socketwrapper 设置感interestOPS sk.interestOps() & (~readyOps)
unreg(sk, socketWrapper, sk.readyOps());
boolean closeSocket = false;
// Read goes before write
if (sk.isReadable()) {
if (socketWrapper.readOperation != null) {
if (!socketWrapper.readOperation.process()) {
closeSocket = true;
}
} else if (socketWrapper.readBlocking) {
synchronized (socketWrapper.readLock) {
socketWrapper.readBlocking = false;
socketWrapper.readLock.notify();
}
} else if (!processSocket(socketWrapper, SocketEvent.OPEN_READ, true)) {
closeSocket = true;
}
}
if (!closeSocket && sk.isWritable()) {
if (socketWrapper.writeOperation != null) {
if (!socketWrapper.writeOperation.process()) {
closeSocket = true;
}
} else if (socketWrapper.writeBlocking) {
synchronized (socketWrapper.writeLock) {
socketWrapper.writeBlocking = false;
socketWrapper.writeLock.notify();
}
// 接下来会走这个
} else if (!processSocket(socketWrapper, SocketEvent.OPEN_WRITE, true)) {
closeSocket = true;
}
}
if (closeSocket) {
socketWrapper.close();
}
}
}
} else {
// Invalid key
socketWrapper.close();
}
} catch (CancelledKeyException ckx) {
socketWrapper.close();
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
log.error(sm.getString("endpoint.nio.keyProcessingError"), t);
}
}
processSocket(socketWrapper, SocketEvent.OPEN_WRITE, true)
的执行
// org/apache/tomcat/util/net/AbstractEndpoint.java
try {
if (socketWrapper == null) {
return false;
}
SocketProcessorBase<S> sc = null;
if (processorCache != null) {
sc = processorCache.pop();
}
if (sc == null) {
// 选择合适的socketprocesser,子类具体实现 这里是关于socket选择处理
sc = createSocketProcessor(socketWrapper, event);
} else {
sc.reset(socketWrapper, event);
}
Executor executor = getExecutor();
if (dispatch && executor != null) {
// 丢到执行器里面去执行
executor.execute(sc);
} else {
sc.run();
}
} catch (RejectedExecutionException ree) {
getLog().warn(sm.getString("endpoint.executor.fail", socketWrapper) , ree);
return false;
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
// This means we got an OOM or similar creating a thread, or that
// the pool and its queue are full
getLog().error(sm.getString("endpoint.process.fail"), t);
return false;
}
return true;
线程池里走的代码实际上是
Poller poller = NioEndpoint.this.poller;
if (poller == null) {
socketWrapper.close();
return;
}
try {
int handshake = -1;
try {
if (socketWrapper.getSocket().isHandshakeComplete()) {
// No TLS handshaking required. Let the handler
// process this socket / event combination.
handshake = 0;
} else if (event == SocketEvent.STOP || event == SocketEvent.DISCONNECT ||
event == SocketEvent.ERROR) {
// Unable to complete the TLS handshake. Treat it as
// if the handshake failed.
handshake = -1;
} else {
handshake = socketWrapper.getSocket().handshake(event == SocketEvent.OPEN_READ, event == SocketEvent.OPEN_WRITE);
// The handshake process reads/writes from/to the
// socket. status may therefore be OPEN_WRITE once
// the handshake completes. However, the handshake
// happens when the socket is opened so the status
// must always be OPEN_READ after it completes. It
// is OK to always set this as it is only used if
// the handshake completes.
event = SocketEvent.OPEN_READ;
}
} catch (IOException x) {
handshake = -1;
if (logHandshake.isDebugEnabled()) {
logHandshake.debug(sm.getString("endpoint.err.handshake",
socketWrapper.getRemoteAddr(), Integer.toString(socketWrapper.getRemotePort())), x);
}
} catch (CancelledKeyException ckx) {
handshake = -1;
}
if (handshake == 0) {
SocketState state = SocketState.OPEN;
// Process the request from this socket
if (event == null) {
state = getHandler().process(socketWrapper, SocketEvent.OPEN_READ);
} else {
// 接着会走到这里
state = getHandler().process(socketWrapper, event);
}
if (state == SocketState.CLOSED) {
socketWrapper.close();
}
} else if (handshake == -1 ) {
getHandler().process(socketWrapper, SocketEvent.CONNECT_FAIL);
socketWrapper.close();
} else if (handshake == SelectionKey.OP_READ){
socketWrapper.registerReadInterest();
} else if (handshake == SelectionKey.OP_WRITE){
socketWrapper.registerWriteInterest();
}
} catch (CancelledKeyException cx) {
socketWrapper.close();
} catch (VirtualMachineError vme) {
ExceptionUtils.handleThrowable(vme);
} catch (Throwable t) {
log.error(sm.getString("endpoint.processing.fail"), t);
socketWrapper.close();
} finally {
socketWrapper = null;
event = null;
//return to cache
if (running && processorCache != null) {
processorCache.push(this);
}
}
}
state = getHandler().process(socketWrapper, event);
分析:
第一个是获取该endpoint 中的 默认的都是 new ConnectionHandler<> 注意看方框下面的代码
// org/apache/coyote/AbstractProtocol.java.ConnectionHandler<S>
@Override
public SocketState process(SocketWrapperBase<S> wrapper, SocketEvent status) {
if (getLog().isDebugEnabled()) {
getLog().debug(sm.getString("abstractConnectionHandler.process",
wrapper.getSocket(), status));
}
if (wrapper == null) {
// Nothing to do. Socket has been closed.
return SocketState.CLOSED;
}
S socket = wrapper.getSocket();
// We take complete ownership of the Processor inside of this method to ensure
// no other thread can release it while we're using it. Whatever processor is
// held by this variable will be associated with the SocketWrapper before this
// method returns.
Processor processor = (Processor) wrapper.takeCurrentProcessor();
if (getLog().isDebugEnabled()) {
getLog().debug(sm.getString("abstractConnectionHandler.connectionsGet",
processor, socket));
}
// Timeouts are calculated on a dedicated thread and then
// dispatched. Because of delays in the dispatch process, the
// timeout may no longer be required. Check here and avoid
// unnecessary processing.
if (SocketEvent.TIMEOUT == status &&
(processor == null ||
!processor.isAsync() && !processor.isUpgrade() ||
processor.isAsync() && !processor.checkAsyncTimeoutGeneration())) {
// This is effectively a NO-OP
return SocketState.OPEN;
}
if (processor != null) {
// Make sure an async timeout doesn't fire
getProtocol().removeWaitingProcessor(processor);
} else if (status == SocketEvent.DISCONNECT || status == SocketEvent.ERROR) {
// Nothing to do. Endpoint requested a close and there is no
// longer a processor associated with this socket.
return SocketState.CLOSED;
}
try {
if (processor == null) {
String negotiatedProtocol = wrapper.getNegotiatedProtocol();
// OpenSSL typically returns null whereas JSSE typically
// returns "" when no protocol is negotiated
if (negotiatedProtocol != null && negotiatedProtocol.length() > 0) {
UpgradeProtocol upgradeProtocol = getProtocol().getNegotiatedProtocol(negotiatedProtocol);
if (upgradeProtocol != null) {
processor = upgradeProtocol.getProcessor(wrapper, getProtocol().getAdapter());
if (getLog().isDebugEnabled()) {
getLog().debug(sm.getString("abstractConnectionHandler.processorCreate", processor));
}
} else if (negotiatedProtocol.equals("http/1.1")) {
// Explicitly negotiated the default protocol.
// Obtain a processor below.
} else {
// TODO:
// OpenSSL 1.0.2's ALPN callback doesn't support
// failing the handshake with an error if no
// protocol can be negotiated. Therefore, we need to
// fail the connection here. Once this is fixed,
// replace the code below with the commented out
// block.
if (getLog().isDebugEnabled()) {
getLog().debug(sm.getString("abstractConnectionHandler.negotiatedProcessor.fail",
negotiatedProtocol));
}
return SocketState.CLOSED;
/*
* To replace the code above once OpenSSL 1.1.0 is
* used.
// Failed to create processor. This is a bug.
throw new IllegalStateException(sm.getString(
"abstractConnectionHandler.negotiatedProcessor.fail",
negotiatedProtocol));
*/
}
}
}
if (processor == null) {
processor = recycledProcessors.pop();
if (getLog().isDebugEnabled()) {
getLog().debug(sm.getString("abstractConnectionHandler.processorPop", processor));
}
}
if (processor == null) {
//====================================
//
//第一次在这里 上面的都是利用已有的连接或缓存来获取
//=======================================
processor = getProtocol().createProcessor();
// 不清楚
register(processor);
if (getLog().isDebugEnabled()) {
getLog().debug(sm.getString("abstractConnectionHandler.processorCreate", processor));
}
}
processor.setSslSupport(wrapper.getSslSupport());
SocketState state = SocketState.CLOSED;
do {
// ==============================
//
// 进行处理,这里就回到了process 以后再讲
//==========================
state = processor.process(wrapper, status);
if (state == SocketState.UPGRADING) {
// Get the HTTP upgrade handler
UpgradeToken upgradeToken = processor.getUpgradeToken();
// Restore leftover input to the wrapper so the upgrade
// processor can process it.
ByteBuffer leftOverInput = processor.getLeftoverInput();
wrapper.unRead(leftOverInput);
if (upgradeToken == null) {
// Assume direct HTTP/2 connection
UpgradeProtocol upgradeProtocol = getProtocol().getUpgradeProtocol("h2c");
if (upgradeProtocol != null) {
// Release the Http11 processor to be re-used
release(processor);
// Create the upgrade processor
processor = upgradeProtocol.getProcessor(wrapper, getProtocol().getAdapter());
} else {
if (getLog().isDebugEnabled()) {
getLog().debug(sm.getString(
"abstractConnectionHandler.negotiatedProcessor.fail",
"h2c"));
}
// Exit loop and trigger appropriate clean-up
state = SocketState.CLOSED;
}
} else {
HttpUpgradeHandler httpUpgradeHandler = upgradeToken.getHttpUpgradeHandler();
// Release the Http11 processor to be re-used
release(processor);
// Create the upgrade processor
processor = getProtocol().createUpgradeProcessor(wrapper, upgradeToken);
if (getLog().isDebugEnabled()) {
getLog().debug(sm.getString("abstractConnectionHandler.upgradeCreate",
processor, wrapper));
}
// Initialise the upgrade handler (which may trigger
// some IO using the new protocol which is why the lines
// above are necessary)
// This cast should be safe. If it fails the error
// handling for the surrounding try/catch will deal with
// it.
if (upgradeToken.getInstanceManager() == null) {
httpUpgradeHandler.init((WebConnection) processor);
} else {
ClassLoader oldCL = upgradeToken.getContextBind().bind(false, null);
try {
httpUpgradeHandler.init((WebConnection) processor);
} finally {
upgradeToken.getContextBind().unbind(false, oldCL);
}
}
if (httpUpgradeHandler instanceof InternalHttpUpgradeHandler) {
if (((InternalHttpUpgradeHandler) httpUpgradeHandler).hasAsyncIO()) {
// The handler will initiate all further I/O
state = SocketState.ASYNC_IO;
}
}
}
}
} while ( state == SocketState.UPGRADING);
if (state == SocketState.LONG) {
longPoll(wrapper, processor);
if (processor.isAsync()) {
getProtocol().addWaitingProcessor(processor);
}
} else if (state == SocketState.OPEN) {
// In keep-alive but between requests. OK to recycle
// processor. Continue to poll for the next request.
release(processor);
processor = null;
wrapper.registerReadInterest();
} else if (state == SocketState.SENDFILE) {
} else if (state == SocketState.UPGRADED) {
if (status != SocketEvent.OPEN_WRITE) {
longPoll(wrapper, processor);
getProtocol().addWaitingProcessor(processor);
}
} else if (state == SocketState.ASYNC_IO) {
} else if (state == SocketState.SUSPENDED) {
} else {
if (processor != null && processor.isUpgrade()) {
UpgradeToken upgradeToken = processor.getUpgradeToken();
HttpUpgradeHandler httpUpgradeHandler = upgradeToken.getHttpUpgradeHandler();
InstanceManager instanceManager = upgradeToken.getInstanceManager();
if (instanceManager == null) {
httpUpgradeHandler.destroy();
} else {
ClassLoader oldCL = upgradeToken.getContextBind().bind(false, null);
try {
httpUpgradeHandler.destroy();
} finally {
try {
instanceManager.destroyInstance(httpUpgradeHandler);
} catch (Throwable e) {
ExceptionUtils.handleThrowable(e);
getLog().error(sm.getString("abstractConnectionHandler.error"), e);
}
upgradeToken.getContextBind().unbind(false, oldCL);
}
}
}
release(processor);
processor = null;
}
if (processor != null) {
wrapper.setCurrentProcessor(processor);
}
return state;
} catch(java.net.SocketException e) {
// SocketExceptions are normal
getLog().debug(sm.getString(
"abstractConnectionHandler.socketexception.debug"), e);
} catch (java.io.IOException e) {
// IOExceptions are normal
getLog().debug(sm.getString(
"abstractConnectionHandler.ioexception.debug"), e);
} catch (ProtocolException e) {
// Protocol exceptions normally mean the client sent invalid or
// incomplete data.
getLog().debug(sm.getString(
"abstractConnectionHandler.protocolexception.debug"), e);
}
catch (OutOfMemoryError oome) {
getLog().error(sm.getString("abstractConnectionHandler.oome"), oome);
} catch (Throwable e) {
ExceptionUtils.handleThrowable(e);
getLog().error(sm.getString("abstractConnectionHandler.error"), e);
}
release(processor);
return SocketState.CLOSED;
}
processor = getProtocol().createProcessor();
前面的就是当前的protocal对象。
// org/apache/coyote/http11/AbstractHttp11Protocol.java
@Override
protected Processor createProcessor() {
Http11Processor processor = new Http11Processor(this, adapter);
return processor;
}
这里就清楚了认知 protocal = endpoint + processer
然后就是processer的对warpper的处理过程,以后再另一文章上说
初次写文章也写的比价凌乱,大部分都是代码,有好的写作经验和建议,希望大家可以不啬分享
总结
更据配置的conennct 来初始化connect里面的protocolhandler
protocolhandler = endpoint + processer
enpoint 里面有一个Accepte复制监听 ,有一个poller 复则轮询。当得到对应的socket注册事件后,会有对应的socketprocesser来进行处理 ,socketprocesser
这是一个runnable
实例,直接丢到线程池里面进行处理socketprocess
会使用ConnectionHandler<S>
处理 。更据当前的protocolhandler
来获得 processe 进行处理。
讲的不明白,希望大家能够自己调试。
其他
初次写文章也写的比价凌乱,大部分都是代码,有好的写作经验和建议,希望大家可以不啬分享
======================
菜鸟一枚,希望能快速成长。路漫漫其修远兮,吾将上下而求索。
欢迎编程路上的朋友一起学习和讨论,恰个v。