It is illegal to call this method if the current request is not in asynchron

本文详细介绍了在使用Spring AOP进行接口请求日志记录时遇到的错误及其解决方案,特别是如何处理ServletRequest和ServletResponse对象导致的序列化问题。

切面报错写法:

/*
@auther aa

@create_date 2019-12-24 15:15

@Desc 接口请求日志切面

**/

import com.alibaba.fastjson.JSON;
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.AfterReturning;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.aspectj.lang.annotation.Pointcut;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;

import javax.servlet.http.HttpServletRequest;

@Aspect
@Component
public class WebLogAcpect {
    protected static final Logger logger = LoggerFactory.getLogger(WebLogAcpect.class);

    /**
     * 定义切入点
     * */
    @Pointcut("execution(* com.smcv.xyx.partOrder.manage.controller.view..*.*(..))")
    public void webLog(){}

    /**
     * 前置通知
     * */
    @Before("webLog()")
    public void doBefore(JoinPoint joinPoint)throws Exception{
        //接收到请求,打印请求内容
        ServletRequestAttributes attributes =(ServletRequestAttributes)RequestContextHolder.getRequestAttributes();
        HttpServletRequest request = attributes.getRequest();
        //记录请求内容
        logger.info("-------------log print start--------");
        logger.info("请求接口名:{},请求方式:{}",request.getRequestURI().toString(),request.getMethod());
//报错位置在此下方
//报错位置在此下方
//报错位置在此下方
        logger.info("请求参数:{}",JSON.toJSONString(joinPoint.getArgs()));

    }

    @AfterReturning(returning="ret",pointcut = "webLog()")
    public void doAfterReturning(Object ret)throws Exception{
        ServletRequestAttributes attributes =(ServletRequestAttributes)RequestContextHolder.getRequestAttributes();
        HttpServletRequest request = attributes.getRequest();
        //接收到响应,打印请求内容
        logger.info("请求接口:{}的响应为:{}",request.getRequestURI(),JSON.toJSONString(ret));
        logger.info("-------------log print end--------");

    }

}

因为:当请求中带有ServletRequest或ServletResponse时无法进行序列号,因而报错

【解决方式】

/*
@auther aa

@create_date 2019-12-24 15:15

@Desc 接口请求日志切面

**/

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.AfterReturning;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.aspectj.lang.annotation.Pointcut;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import org.springframework.web.multipart.MultipartFile;

import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;

@Aspect
@Component
public class WebLogAcpect {
    protected static final Logger logger = LoggerFactory.getLogger(WebLogAcpect.class);

    /**
     * 定义切入点
     * */
    @Pointcut("execution(* com.smcv.xyx.partOrder.manage.controller.view..*.*(..))")
    public void webLog(){}

    /**
     * 前置通知
     * */
    @Before("webLog()")
    public void doBefore(JoinPoint joinPoint)  {
        Object[] args = joinPoint.getArgs();
        Object[] arguments = new Object[args.length];
        for (int i = 0; i < args.length; i++) {
            if (args[i] instanceof ServletRequest || args[i] instanceof ServletResponse || args[i] instanceof MultipartFile) {
                //ServletRequest不能序列化,从入参里排除,否则报异常:java.lang.IllegalStateException: It is illegal to call this method if the current request is not in asynchronous mode (i.e. isAsyncStarted() returns false)
                //ServletResponse不能序列化 从入参里排除,否则报异常:java.lang.IllegalStateException: getOutputStream() has already been called for this response
                continue;
            }
            arguments[i] = args[i];
        }
        String paramter = "";
        if (arguments != null) {
            try {
                paramter = JSONObject.toJSONString(arguments);
            } catch (Exception e) {
                paramter = arguments.toString();
            }
        }
        logger.info("请求接口名:{}", joinPoint.getSignature().getName());
        logger.info("请求参数:{}", paramter);
    }

    @AfterReturning(returning="ret",pointcut = "webLog()")
    public void doAfterReturning(Object ret)throws Exception{
        ServletRequestAttributes attributes =(ServletRequestAttributes)RequestContextHolder.getRequestAttributes();
        HttpServletRequest request = attributes.getRequest();
        //接收到响应,打印请求内容
        logger.info("请求接口:{}的响应为:{}",request.getRequestURI(),JSON.toJSONString(ret));
        logger.info("-------------log print end--------");

    }

}

 

r"""HTTP/1.1 client library <intro stuff goes here> <other stuff, too> HTTPConnection goes through a number of "states", which define when a client may legally make another request or fetch the response for a particular request. This diagram details these state transitions: (null) | | HTTPConnection() v Idle | | putrequest() v Request-started | | ( putheader() )* endheaders() v Request-sent |\_____________________________ | | getresponse() raises | response = getresponse() | ConnectionError v v Unread-response Idle [Response-headers-read] |\____________________ | | | response.read() | putrequest() v v Idle Req-started-unread-response ______/| / | response.read() | | ( putheader() )* endheaders() v v Request-started Req-sent-unread-response | | response.read() v Request-sent This diagram presents the following rules: -- a second request may not be started until {response-headers-read} -- a response [object] cannot be retrieved until {request-sent} -- there is no differentiation between an unread response body and a partially read response body Note: this enforcement is applied by the HTTPConnection class. The HTTPResponse class does not enforce this state machine, which implies sophisticated clients may accelerate the request/response pipeline. Caution should be taken, though: accelerating the states beyond the above pattern may imply knowledge of the server's connection-close behavior for certain requests. For example, it is impossible to tell whether the server will close the connection UNTIL the response headers have been read; this means that further requests cannot be placed into the pipeline until it is known that the server will NOT be closing the connection. Logical State __state __response ------------- ------- ---------- Idle _CS_IDLE None Request-started _CS_REQ_STARTED None Request-sent _CS_REQ_SENT None Unread-response _CS_IDLE <response_class> Req-started-unread-response _CS_REQ_STARTED <response_class> Req-sent-unread-response _CS_REQ_SENT <response_class> """ import email.parser import email.message import errno import http import io import re import socket import sys import collections.abc from urllib.parse import urlsplit # HTTPMessage, parse_headers(), and the HTTP status code constants are # intentionally omitted for simplicity __all__ = ["HTTPResponse", "HTTPConnection", "HTTPException", "NotConnected", "UnknownProtocol", "UnknownTransferEncoding", "UnimplementedFileMode", "IncompleteRead", "InvalidURL", "ImproperConnectionState", "CannotSendRequest", "CannotSendHeader", "ResponseNotReady", "BadStatusLine", "LineTooLong", "RemoteDisconnected", "error", "responses"] HTTP_PORT = 80 HTTPS_PORT = 443 _UNKNOWN = 'UNKNOWN' # connection states _CS_IDLE = 'Idle' _CS_REQ_STARTED = 'Request-started' _CS_REQ_SENT = 'Request-sent' # hack to maintain backwards compatibility globals().update(http.HTTPStatus.__members__) # another hack to maintain backwards compatibility # Mapping status codes to official W3C names responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()} # maximal line length when calling readline(). _MAXLINE = 65536 _MAXHEADERS = 100 # Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2) # # VCHAR = %x21-7E # obs-text = %x80-FF # header-field = field-name ":" OWS field-value OWS # field-name = token # field-value = *( field-content / obs-fold ) # field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] # field-vchar = VCHAR / obs-text # # obs-fold = CRLF 1*( SP / HTAB ) # ; obsolete line folding # ; see Section 3.2.4 # token = 1*tchar # # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" # / DIGIT / ALPHA # ; any VCHAR, except delimiters # # VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 # the patterns for both name and value are more lenient than RFC # definitions to allow for backwards compatibility _is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*').fullmatch _is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search # These characters are not allowed within HTTP URL paths. # See https://tools.ietf.org/html/rfc3986#section-3.3 and the # https://tools.ietf.org/html/rfc3986#appendix-A pchar definition. # Prevents CVE-2019-9740. Includes control characters such as \r\n. # We don't restrict chars above \x7f as putrequest() limits us to ASCII. _contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]') # Arguably only these _should_ allowed: # _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$") # We are more lenient for assumed real world compatibility purposes. # These characters are not allowed within HTTP method names # to prevent http header injection. _contains_disallowed_method_pchar_re = re.compile('[\x00-\x1f]') # We always set the Content-Length header for these methods because some # servers will otherwise respond with a 411 _METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'} def _encode(data, name='data'): """Call data.encode("latin-1") but show a better error message.""" try: return data.encode("latin-1") except UnicodeEncodeError as err: raise UnicodeEncodeError( err.encoding, err.object, err.start, err.end, "%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') " "if you want to send it encoded in UTF-8." % (name.title(), data[err.start:err.end], name)) from None class HTTPMessage(email.message.Message): # XXX The only usage of this method is in # http.server.CGIHTTPRequestHandler. Maybe move the code there so # that it doesn't need to be part of the public API. The API has # never been defined so this could cause backwards compatibility # issues. def getallmatchingheaders(self, name): """Find all header lines matching a given header name. Look through the list of headers and find all lines matching a given header name (and their continuation lines). A list of the lines is returned, without interpretation. If the header does not occur, an empty list is returned. If the header occurs multiple times, all occurrences are returned. Case is not important in the header name. """ name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.keys(): if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(line) return lst def _read_headers(fp): """Reads potential header lines into a list from a file pointer. Length of line is limited by _MAXLINE, and number of headers is limited by _MAXHEADERS. """ headers = [] while True: line = fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("header line") headers.append(line) if len(headers) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if line in (b'\r\n', b'\n', b''): break return headers def parse_headers(fp, _class=HTTPMessage): """Parses only RFC2822 headers from a file pointer. email Parser wants to see strings rather than bytes. But a TextIOWrapper around self.rfile would buffer too many bytes from the stream, bytes which we later need to read as bytes. So we read the correct bytes here, as bytes, for email Parser to parse. """ headers = _read_headers(fp) hstring = b''.join(headers).decode('iso-8859-1') return email.parser.Parser(_class=_class).parsestr(hstring) class HTTPResponse(io.BufferedIOBase): # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details. # The bytes from the socket object are iso-8859-1 strings. # See RFC 2616 sec 2.2 which notes an exception for MIME-encoded # text following RFC 2047. The basic status line parsing only # accepts iso-8859-1. def __init__(self, sock, debuglevel=0, method=None, url=None): # If the response includes a content-length header, we need to # make sure that the client doesn't read more than the # specified number of bytes. If it does, it will block until # the server times out and closes the connection. This will # happen if a self.fp.read() is done (without a size) whether # self.fp is buffered or not. So, no self.fp.read() by # clients unless they know what they are doing. self.fp = sock.makefile("rb") self.debuglevel = debuglevel self._method = method # The HTTPResponse object is returned via urllib. The clients # of http and urllib expect different attributes for the # headers. headers is used here and supports urllib. msg is # provided as a backwards compatibility layer for http # clients. self.headers = self.msg = None # from the Status-Line of the response self.version = _UNKNOWN # HTTP-Version self.status = _UNKNOWN # Status-Code self.reason = _UNKNOWN # Reason-Phrase self.chunked = _UNKNOWN # is "chunked" being used? self.chunk_left = _UNKNOWN # bytes left to read in current chunk self.length = _UNKNOWN # number of bytes left in response self.will_close = _UNKNOWN # conn will close at end of response def _read_status(self): line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1") if len(line) > _MAXLINE: raise LineTooLong("status line") if self.debuglevel > 0: print("reply:", repr(line)) if not line: # Presumably, the server closed the connection before # sending a valid response. raise RemoteDisconnected("Remote end closed connection without" " response") try: version, status, reason = line.split(None, 2) except ValueError: try: version, status = line.split(None, 1) reason = "" except ValueError: # empty version will cause next test to fail. version = "" if not version.startswith("HTTP/"): self._close_conn() raise BadStatusLine(line) # The status code is a three-digit number try: status = int(status) if status < 100 or status > 999: raise BadStatusLine(line) except ValueError: raise BadStatusLine(line) return version, status, reason def begin(self): if self.headers is not None: # we've already started reading the response return # read until we get a non-100 response while True: version, status, reason = self._read_status() if status != CONTINUE: break # skip the header from the 100 response skipped_headers = _read_headers(self.fp) if self.debuglevel > 0: print("headers:", skipped_headers) del skipped_headers self.code = self.status = status self.reason = reason.strip() if version in ("HTTP/1.0", "HTTP/0.9"): # Some servers might still return "0.9", treat it as 1.0 anyway self.version = 10 elif version.startswith("HTTP/1."): self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1 else: raise UnknownProtocol(version) self.headers = self.msg = parse_headers(self.fp) if self.debuglevel > 0: for hdr, val in self.headers.items(): print("header:", hdr + ":", val) # are we using the chunked-style of transfer encoding? tr_enc = self.headers.get("transfer-encoding") if tr_enc and tr_enc.lower() == "chunked": self.chunked = True self.chunk_left = None else: self.chunked = False # will the connection close at the end of the response? self.will_close = self._check_close() # do we have a Content-Length? # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" self.length = None length = self.headers.get("content-length") if length and not self.chunked: try: self.length = int(length) except ValueError: self.length = None else: if self.length < 0: # ignore nonsensical negative lengths self.length = None else: self.length = None # does the body have a fixed length? (of zero) if (status == NO_CONTENT or status == NOT_MODIFIED or 100 <= status < 200 or # 1xx codes self._method == "HEAD"): self.length = 0 # if the connection remains open, and we aren't using chunked, and # a content-length was not provided, then assume that the connection # WILL close. if (not self.will_close and not self.chunked and self.length is None): self.will_close = True def _check_close(self): conn = self.headers.get("connection") if self.version == 11: # An HTTP/1.1 proxy is assumed to stay open unless # explicitly closed. if conn and "close" in conn.lower(): return True return False # Some HTTP/1.0 implementations have support for persistent # connections, using rules different than HTTP/1.1. # For older HTTP, Keep-Alive indicates persistent connection. if self.headers.get("keep-alive"): return False # At least Akamai returns a "Connection: Keep-Alive" header, # which was supposed to be sent by the client. if conn and "keep-alive" in conn.lower(): return False # Proxy-Connection is a netscape hack. pconn = self.headers.get("proxy-connection") if pconn and "keep-alive" in pconn.lower(): return False # otherwise, assume it will close return True def _close_conn(self): fp = self.fp self.fp = None fp.close() def close(self): try: super().close() # set "closed" flag finally: if self.fp: self._close_conn() # These implementations are for the benefit of io.BufferedReader. # XXX This class should probably be revised to act more like # the "raw stream" that BufferedReader expects. def flush(self): super().flush() if self.fp: self.fp.flush() def readable(self): """Always returns True""" return True # End of "raw stream" methods def isclosed(self): """True if the connection is closed.""" # NOTE: it is possible that we will not ever call self.close(). This # case occurs when will_close is TRUE, length is None, and we # read up to the last byte, but NOT past it. # # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be # called, meaning self.isclosed() is meaningful. return self.fp is None def read(self, amt=None): if self.fp is None: return b"" if self._method == "HEAD": self._close_conn() return b"" if self.chunked: return self._read_chunked(amt) if amt is not None: if self.length is not None and amt > self.length: # clip the read to the "end of response" amt = self.length s = self.fp.read(amt) if not s and amt: # Ideally, we would raise IncompleteRead if the content-length # wasn't satisfied, but it might break compatibility. self._close_conn() elif self.length is not None: self.length -= len(s) if not self.length: self._close_conn() return s else: # Amount is not given (unbounded read) so we must check self.length if self.length is None: s = self.fp.read() else: try: s = self._safe_read(self.length) except IncompleteRead: self._close_conn() raise self.length = 0 self._close_conn() # we read everything return s def readinto(self, b): """Read up to len(b) bytes into bytearray b and return the number of bytes read. """ if self.fp is None: return 0 if self._method == "HEAD": self._close_conn() return 0 if self.chunked: return self._readinto_chunked(b) if self.length is not None: if len(b) > self.length: # clip the read to the "end of response" b = memoryview(b)[0:self.length] # we do not use _safe_read() here because this may be a .will_close # connection, and the user is reading more bytes than will be provided # (for example, reading in 1k chunks) n = self.fp.readinto(b) if not n and b: # Ideally, we would raise IncompleteRead if the content-length # wasn't satisfied, but it might break compatibility. self._close_conn() elif self.length is not None: self.length -= n if not self.length: self._close_conn() return n def _read_next_chunk_size(self): # Read the next chunk size from the file line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("chunk size") i = line.find(b";") if i >= 0: line = line[:i] # strip chunk-extensions try: return int(line, 16) except ValueError: # close the connection as protocol synchronisation is # probably lost self._close_conn() raise def _read_and_discard_trailer(self): # read and discard trailer up to the CRLF terminator ### note: we shouldn't have any trailers! while True: line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("trailer line") if not line: # a vanishingly small number of sites EOF without # sending the trailer break if line in (b'\r\n', b'\n', b''): break def _get_chunk_left(self): # return self.chunk_left, reading a new chunk if necessary. # chunk_left == 0: at the end of the current chunk, need to close it # chunk_left == None: No current chunk, should read next. # This function returns non-zero or None if the last chunk has # been read. chunk_left = self.chunk_left if not chunk_left: # Can be 0 or None if chunk_left is not None: # We are at the end of chunk, discard chunk end self._safe_read(2) # toss the CRLF at the end of the chunk try: chunk_left = self._read_next_chunk_size() except ValueError: raise IncompleteRead(b'') if chunk_left == 0: # last chunk: 1*("0") [ chunk-extension ] CRLF self._read_and_discard_trailer() # we read everything; close the "file" self._close_conn() chunk_left = None self.chunk_left = chunk_left return chunk_left def _read_chunked(self, amt=None): assert self.chunked != _UNKNOWN value = [] try: while True: chunk_left = self._get_chunk_left() if chunk_left is None: break if amt is not None and amt <= chunk_left: value.append(self._safe_read(amt)) self.chunk_left = chunk_left - amt break value.append(self._safe_read(chunk_left)) if amt is not None: amt -= chunk_left self.chunk_left = 0 return b''.join(value) except IncompleteRead as exc: raise IncompleteRead(b''.join(value)) from exc def _readinto_chunked(self, b): assert self.chunked != _UNKNOWN total_bytes = 0 mvb = memoryview(b) try: while True: chunk_left = self._get_chunk_left() if chunk_left is None: return total_bytes if len(mvb) <= chunk_left: n = self._safe_readinto(mvb) self.chunk_left = chunk_left - n return total_bytes + n temp_mvb = mvb[:chunk_left] n = self._safe_readinto(temp_mvb) mvb = mvb[n:] total_bytes += n self.chunk_left = 0 except IncompleteRead: raise IncompleteRead(bytes(b[0:total_bytes])) def _safe_read(self, amt): """Read the number of bytes requested. This function should be used when <amt> bytes "should" be present for reading. If the bytes are truly not available (due to EOF), then the IncompleteRead exception can be used to detect the problem. """ data = self.fp.read(amt) if len(data) < amt: raise IncompleteRead(data, amt-len(data)) return data def _safe_readinto(self, b): """Same as _safe_read, but for reading into a buffer.""" amt = len(b) n = self.fp.readinto(b) if n < amt: raise IncompleteRead(bytes(b[:n]), amt-n) return n def read1(self, n=-1): """Read with at most one underlying system call. If at least one byte is buffered, return that instead. """ if self.fp is None or self._method == "HEAD": return b"" if self.chunked: return self._read1_chunked(n) if self.length is not None and (n < 0 or n > self.length): n = self.length result = self.fp.read1(n) if not result and n: self._close_conn() elif self.length is not None: self.length -= len(result) return result def peek(self, n=-1): # Having this enables IOBase.readline() to read more than one # byte at a time if self.fp is None or self._method == "HEAD": return b"" if self.chunked: return self._peek_chunked(n) return self.fp.peek(n) def readline(self, limit=-1): if self.fp is None or self._method == "HEAD": return b"" if self.chunked: # Fallback to IOBase readline which uses peek() and read() return super().readline(limit) if self.length is not None and (limit < 0 or limit > self.length): limit = self.length result = self.fp.readline(limit) if not result and limit: self._close_conn() elif self.length is not None: self.length -= len(result) return result def _read1_chunked(self, n): # Strictly speaking, _get_chunk_left() may cause more than one read, # but that is ok, since that is to satisfy the chunked protocol. chunk_left = self._get_chunk_left() if chunk_left is None or n == 0: return b'' if not (0 <= n <= chunk_left): n = chunk_left # if n is negative or larger than chunk_left read = self.fp.read1(n) self.chunk_left -= len(read) if not read: raise IncompleteRead(b"") return read def _peek_chunked(self, n): # Strictly speaking, _get_chunk_left() may cause more than one read, # but that is ok, since that is to satisfy the chunked protocol. try: chunk_left = self._get_chunk_left() except IncompleteRead: return b'' # peek doesn't worry about protocol if chunk_left is None: return b'' # eof # peek is allowed to return more than requested. Just request the # entire chunk, and truncate what we get. return self.fp.peek(chunk_left)[:chunk_left] def fileno(self): return self.fp.fileno() def getheader(self, name, default=None): '''Returns the value of the header matching *name*. If there are multiple matching headers, the values are combined into a single string separated by commas and spaces. If no matching header is found, returns *default* or None if the *default* is not specified. If the headers are unknown, raises http.client.ResponseNotReady. ''' if self.headers is None: raise ResponseNotReady() headers = self.headers.get_all(name) or default if isinstance(headers, str) or not hasattr(headers, '__iter__'): return headers else: return ', '.join(headers) def getheaders(self): """Return list of (header, value) tuples.""" if self.headers is None: raise ResponseNotReady() return list(self.headers.items()) # We override IOBase.__iter__ so that it doesn't check for closed-ness def __iter__(self): return self # For compatibility with old-style urllib responses. def info(self): '''Returns an instance of the class mimetools.Message containing meta-information associated with the URL. When the method is HTTP, these headers are those returned by the server at the head of the retrieved HTML page (including Content-Length and Content-Type). When the method is FTP, a Content-Length header will be present if (as is now usual) the server passed back a file length in response to the FTP retrieval request. A Content-Type header will be present if the MIME type can be guessed. When the method is local-file, returned headers will include a Date representing the file's last-modified time, a Content-Length giving file size, and a Content-Type containing a guess at the file's type. See also the description of the mimetools module. ''' return self.headers def geturl(self): '''Return the real URL of the page. In some cases, the HTTP server redirects a client to another URL. The urlopen() function handles this transparently, but in some cases the caller needs to know which URL the client was redirected to. The geturl() method can be used to get at this redirected URL. ''' return self.url def getcode(self): '''Return the HTTP status code that was sent with the response, or None if the URL is not an HTTP URL. ''' return self.status class HTTPConnection: _http_vsn = 11 _http_vsn_str = 'HTTP/1.1' response_class = HTTPResponse default_port = HTTP_PORT auto_open = 1 debuglevel = 0 @staticmethod def _is_textIO(stream): """Test whether a file-like object is a text or a binary stream. """ return isinstance(stream, io.TextIOBase) @staticmethod def _get_content_length(body, method): """Get the content-length based on the body. If the body is None, we set Content-Length: 0 for methods that expect a body (RFC 7230, Section 3.3.2). We also set the Content-Length for any method if the body is a str or bytes-like object and not a file. """ if body is None: # do an explicit check for not None here to distinguish # between unset and set but empty if method.upper() in _METHODS_EXPECTING_BODY: return 0 else: return None if hasattr(body, 'read'): # file-like object. return None try: # does it implement the buffer protocol (bytes, bytearray, array)? mv = memoryview(body) return mv.nbytes except TypeError: pass if isinstance(body, str): return len(body) return None def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, blocksize=8192): self.timeout = timeout self.source_address = source_address self.blocksize = blocksize self.sock = None self._buffer = [] self.__response = None self.__state = _CS_IDLE self._method = None self._tunnel_host = None self._tunnel_port = None self._tunnel_headers = {} (self.host, self.port) = self._get_hostport(host, port) self._validate_host(self.host) # This is stored as an instance variable to allow unit # tests to replace it with a suitable mockup self._create_connection = socket.create_connection def set_tunnel(self, host, port=None, headers=None): """Set up host and port for HTTP CONNECT tunnelling. In a connection that uses HTTP CONNECT tunneling, the host passed to the constructor is used as a proxy server that relays all communication to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTTP connection has been established. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request. """ if self.sock: raise RuntimeError("Can't set up tunnel for established connection") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear() def _get_hostport(self, host, port): if port is None: i = host.rfind(':') j = host.rfind(']') # ipv6 addresses have [...] if i > j: try: port = int(host[i+1:]) except ValueError: if host[i+1:] == "": # http://foo.com:/ == http://foo.com/ port = self.default_port else: raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) host = host[:i] else: port = self.default_port if host and host[0] == '[' and host[-1] == ']': host = host[1:-1] return (host, port) def set_debuglevel(self, level): self.debuglevel = level def _tunnel(self): connect = b"CONNECT %s:%d HTTP/1.0\r\n" % ( self._tunnel_host.encode("ascii"), self._tunnel_port) headers = [connect] for header, value in self._tunnel_headers.items(): headers.append(f"{header}: {value}\r\n".encode("latin-1")) headers.append(b"\r\n") # Making a single send() call instead of one per line encourages # the host OS to use a more optimal packet size instead of # potentially emitting a series of small packets. self.send(b"".join(headers)) del headers response = self.response_class(self.sock, method=self._method) (version, code, message) = response._read_status() if code != http.HTTPStatus.OK: self.close() raise OSError(f"Tunnel connection failed: {code} {message.strip()}") while True: line = response.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("header line") if not line: # for sites which EOF without sending a trailer break if line in (b'\r\n', b'\n', b''): break if self.debuglevel > 0: print('header:', line.decode()) def connect(self): """Connect to the host and port specified in __init__.""" sys.audit("http.client.connect", self, self.host, self.port) self.sock = self._create_connection( (self.host,self.port), self.timeout, self.source_address) # Might fail in OSs that don't implement TCP_NODELAY try: self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) except OSError as e: if e.errno != errno.ENOPROTOOPT: raise if self._tunnel_host: self._tunnel() def close(self): """Close the connection to the HTTP server.""" self.__state = _CS_IDLE try: sock = self.sock if sock: self.sock = None sock.close() # close it manually... there may be other refs finally: response = self.__response if response: self.__response = None response.close() def send(self, data): """Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object. """ if self.sock is None: if self.auto_open: self.connect() else: raise NotConnected() if self.debuglevel > 0: print("send:", repr(data)) if hasattr(data, "read") : if self.debuglevel > 0: print("sendIng a read()able") encode = self._is_textIO(data) if encode and self.debuglevel > 0: print("encoding file using iso-8859-1") while 1: datablock = data.read(self.blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") sys.audit("http.client.send", self, datablock) self.sock.sendall(datablock) return sys.audit("http.client.send", self, data) try: self.sock.sendall(data) except TypeError: if isinstance(data, collections.abc.Iterable): for d in data: self.sock.sendall(d) else: raise TypeError("data should be a bytes-like object " "or an iterable, got %r" % type(data)) def _output(self, s): """Add a line of output to the current request buffer. Assumes that the line does *not* end with \\r\\n. """ self._buffer.append(s) def _read_readable(self, readable): if self.debuglevel > 0: print("sendIng a read()able") encode = self._is_textIO(readable) if encode and self.debuglevel > 0: print("encoding file using iso-8859-1") while True: datablock = readable.read(self.blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") yield datablock def _send_output(self, message_body=None, encode_chunked=False): """Send the currently buffered request and clear the buffer. Appends an extra \\r\\n to the buffer. A message_body may be specified, to be appended to the request. """ self._buffer.extend((b"", b"")) msg = b"\r\n".join(self._buffer) del self._buffer[:] self.send(msg) if message_body is not None: # create a consistent interface to message_body if hasattr(message_body, 'read'): # Let file-like take precedence over byte-like. This # is needed to allow the current position of mmap'ed # files to be taken into account. chunks = self._read_readable(message_body) else: try: # this is solely to check to see if message_body # implements the buffer API. it /would/ be easier # to capture if PyObject_CheckBuffer was exposed # to Python. memoryview(message_body) except TypeError: try: chunks = iter(message_body) except TypeError: raise TypeError("message_body should be a bytes-like " "object or an iterable, got %r" % type(message_body)) else: # the object implements the buffer interface and # can be passed directly into socket methods chunks = (message_body,) for chunk in chunks: if not chunk: if self.debuglevel > 0: print('Zero length chunk ignored') continue if encode_chunked and self._http_vsn == 11: # chunked encoding chunk = f'{len(chunk):X}\r\n'.encode('ascii') + chunk \ + b'\r\n' self.send(chunk) if encode_chunked and self._http_vsn == 11: # end chunked transfer self.send(b'0\r\n\r\n') def putrequest(self, method, url, skip_host=False, skip_accept_encoding=False): """Send a request to the server. `method' specifies an HTTP request method, e.g. 'GET'. `url' specifies the object being requested, e.g. '/index.html'. `skip_host' if True does not add automatically a 'Host:' header `skip_accept_encoding' if True does not add automatically an 'Accept-Encoding:' header """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # in certain cases, we cannot issue another request on this connection. # this occurs when: # 1) we are in the process of sending a request. (_CS_REQ_STARTED) # 2) a response to a previous request has signalled that it is going # to close the connection upon completion. # 3) the headers for the previous response have not been read, thus # we cannot determine whether point (2) is true. (_CS_REQ_SENT) # # if there is no prior response, then we can request at will. # # if point (2) is true, then we will have passed the socket to the # response (effectively meaning, "there is no prior response"), and # will open a new one when a new request is made. # # Note: if a prior response exists, then we *can* start a new request. # We are not allowed to begin fetching the response to this new # request, however, until that prior response is complete. # if self.__state == _CS_IDLE: self.__state = _CS_REQ_STARTED else: raise CannotSendRequest(self.__state) self._validate_method(method) # Save the method for use later in the response phase self._method = method url = url or '/' self._validate_path(url) request = '%s %s %s' % (method, url, self._http_vsn_str) self._output(self._encode_request(request)) if self._http_vsn == 11: # Issue some standard headers for better HTTP/1.1 compliance if not skip_host: # this header is issued *only* for HTTP/1.1 # connections. more specifically, this means it is # only issued when the client uses the new # HTTPConnection() class. backwards-compat clients # will be using HTTP/1.0 and those clients may be # issuing this header themselves. we should NOT issue # it twice; some web servers (such as Apache) barf # when they see two Host: headers # If we need a non-standard port,include it in the # header. If the request is going through a proxy, # but the host of the actual URL, not the host of the # proxy. netloc = '' if url.startswith('http'): nil, netloc, nil, nil, nil = urlsplit(url) if netloc: try: netloc_enc = netloc.encode("ascii") except UnicodeEncodeError: netloc_enc = netloc.encode("idna") self.putheader('Host', netloc_enc) else: if self._tunnel_host: host = self._tunnel_host port = self._tunnel_port else: host = self.host port = self.port try: host_enc = host.encode("ascii") except UnicodeEncodeError: host_enc = host.encode("idna") # As per RFC 273, IPv6 address should be wrapped with [] # when used as Host header if host.find(':') >= 0: host_enc = b'[' + host_enc + b']' if port == self.default_port: self.putheader('Host', host_enc) else: host_enc = host_enc.decode("ascii") self.putheader('Host', "%s:%s" % (host_enc, port)) # note: we are assuming that clients will not attempt to set these # headers since *this* library must deal with the # consequences. this also means that when the supporting # libraries are updated to recognize other forms, then this # code should be changed (removed or updated). # we only want a Content-Encoding of "identity" since we don't # support encodings such as x-gzip or x-deflate. if not skip_accept_encoding: self.putheader('Accept-Encoding', 'identity') # we can accept "chunked" Transfer-Encodings, but no others # NOTE: no TE header implies *only* "chunked" #self.putheader('TE', 'chunked') # if TE is supplied in the header, then it must appear in a # Connection header. #self.putheader('Connection', 'TE') else: # For HTTP/1.0, the server will assume "not chunked" pass def _encode_request(self, request): # ASCII also helps prevent CVE-2019-9740. return request.encode('ascii') def _validate_method(self, method): """Validate a method name for putrequest.""" # prevent http header injection match = _contains_disallowed_method_pchar_re.search(method) if match: raise ValueError( f"method can't contain control characters. {method!r} " f"(found at least {match.group()!r})") def _validate_path(self, url): """Validate a url for putrequest.""" # Prevent CVE-2019-9740. match = _contains_disallowed_url_pchar_re.search(url) if match: raise InvalidURL(f"URL can't contain control characters. {url!r} " f"(found at least {match.group()!r})") def _validate_host(self, host): """Validate a host so it doesn't contain control characters.""" # Prevent CVE-2019-18348. match = _contains_disallowed_url_pchar_re.search(host) if match: raise InvalidURL(f"URL can't contain control characters. {host!r} " f"(found at least {match.group()!r})") def putheader(self, header, *values): """Send a request header line to the server. For example: h.putheader('Accept', 'text/html') """ if self.__state != _CS_REQ_STARTED: raise CannotSendHeader() if hasattr(header, 'encode'): header = header.encode('ascii') if not _is_legal_header_name(header): raise ValueError('Invalid header name %r' % (header,)) values = list(values) for i, one_value in enumerate(values): if hasattr(one_value, 'encode'): values[i] = one_value.encode('latin-1') elif isinstance(one_value, int): values[i] = str(one_value).encode('ascii') if _is_illegal_header_value(values[i]): raise ValueError('Invalid header value %r' % (values[i],)) value = b'\r\n\t'.join(values) header = header + b': ' + value self._output(header) def endheaders(self, message_body=None, *, encode_chunked=False): """Indicate that the last header line has been sent to the server. This method sends the request to the server. The optional message_body argument can be used to pass a message body associated with the request. """ if self.__state == _CS_REQ_STARTED: self.__state = _CS_REQ_SENT else: raise CannotSendHeader() self._send_output(message_body, encode_chunked=encode_chunked) def request(self, method, url, body=None, headers={}, *, encode_chunked=False): """Send a complete request to the server.""" self._send_request(method, url, body, headers, encode_chunked) def _send_request(self, method, url, body, headers, encode_chunked): # Honor explicitly requested Host: and Accept-Encoding: headers. header_names = frozenset(k.lower() for k in headers) skips = {} if 'host' in header_names: skips['skip_host'] = 1 if 'accept-encoding' in header_names: skips['skip_accept_encoding'] = 1 self.putrequest(method, url, **skips) # chunked encoding will happen if HTTP/1.1 is used and either # the caller passes encode_chunked=True or the following # conditions hold: # 1. content-length has not been explicitly set # 2. the body is a file or iterable, but not a str or bytes-like # 3. Transfer-Encoding has NOT been explicitly set by the caller if 'content-length' not in header_names: # only chunk body if not explicitly set for backwards # compatibility, assuming the client code is already handling the # chunking if 'transfer-encoding' not in header_names: # if content-length cannot be automatically determined, fall # back to chunked encoding encode_chunked = False content_length = self._get_content_length(body, method) if content_length is None: if body is not None: if self.debuglevel > 0: print('Unable to determine size of %r' % body) encode_chunked = True self.putheader('Transfer-Encoding', 'chunked') else: self.putheader('Content-Length', str(content_length)) else: encode_chunked = False for hdr, value in headers.items(): self.putheader(hdr, value) if isinstance(body, str): # RFC 2616 Section 3.7.1 says that text default has a # default charset of iso-8859-1. body = _encode(body, 'body') self.endheaders(body, encode_chunked=encode_chunked) def getresponse(self): """Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # if a prior response exists, then it must be completed (otherwise, we # cannot read this response's header to determine the connection-close # behavior) # # note: if a prior response existed, but was connection-close, then the # socket and response were made independent of this HTTPConnection # object since a new request requires that we open a whole new # connection # # this means the prior response had one of two states: # 1) will_close: this connection was reset and the prior socket and # response operate independently # 2) persistent: the response was retained and we await its # isclosed() status to become true. # if self.__state != _CS_REQ_SENT or self.__response: raise ResponseNotReady(self.__state) if self.debuglevel > 0: response = self.response_class(self.sock, self.debuglevel, method=self._method) else: response = self.response_class(self.sock, method=self._method) try: try: response.begin() except ConnectionError: self.close() raise assert response.will_close != _UNKNOWN self.__state = _CS_IDLE if response.will_close: # this effectively passes the connection to the response self.close() else: # remember this, so we can tell when it is complete self.__response = response return response except: response.close() raise try: import ssl except ImportError: pass else: class HTTPSConnection(HTTPConnection): "This class allows communication via SSL." default_port = HTTPS_PORT # XXX Should key_file and cert_file be deprecated in favour of context? def __init__(self, host, port=None, key_file=None, cert_file=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, context=None, check_hostname=None, blocksize=8192): super(HTTPSConnection, self).__init__(host, port, timeout, source_address, blocksize=blocksize) if (key_file is not None or cert_file is not None or check_hostname is not None): import warnings warnings.warn("key_file, cert_file and check_hostname are " "deprecated, use a custom context instead.", DeprecationWarning, 2) self.key_file = key_file self.cert_file = cert_file if context is None: context = ssl._create_default_https_context() # send ALPN extension to indicate HTTP/1.1 protocol if self._http_vsn == 11: context.set_alpn_protocols(['http/1.1']) # enable PHA for TLS 1.3 connections if available if context.post_handshake_auth is not None: context.post_handshake_auth = True will_verify = context.verify_mode != ssl.CERT_NONE if check_hostname is None: check_hostname = context.check_hostname if check_hostname and not will_verify: raise ValueError("check_hostname needs a SSL context with " "either CERT_OPTIONAL or CERT_REQUIRED") if key_file or cert_file: context.load_cert_chain(cert_file, key_file) # cert and key file means the user wants to authenticate. # enable TLS 1.3 PHA implicitly even for custom contexts. if context.post_handshake_auth is not None: context.post_handshake_auth = True self._context = context if check_hostname is not None: self._context.check_hostname = check_hostname def connect(self): "Connect to a host on a given (SSL) port." super().connect() if self._tunnel_host: server_hostname = self._tunnel_host else: server_hostname = self.host self.sock = self._context.wrap_socket(self.sock, server_hostname=server_hostname) __all__.append("HTTPSConnection") class HTTPException(Exception): # Subclasses that define an __init__ must call Exception.__init__ # or define self.args. Otherwise, str() will fail. pass class NotConnected(HTTPException): pass class InvalidURL(HTTPException): pass class UnknownProtocol(HTTPException): def __init__(self, version): self.args = version, self.version = version class UnknownTransferEncoding(HTTPException): pass class UnimplementedFileMode(HTTPException): pass class IncompleteRead(HTTPException): def __init__(self, partial, expected=None): self.args = partial, self.partial = partial self.expected = expected def __repr__(self): if self.expected is not None: e = ', %i more expected' % self.expected else: e = '' return '%s(%i bytes read%s)' % (self.__class__.__name__, len(self.partial), e) __str__ = object.__str__ class ImproperConnectionState(HTTPException): pass class CannotSendRequest(ImproperConnectionState): pass class CannotSendHeader(ImproperConnectionState): pass class ResponseNotReady(ImproperConnectionState): pass class BadStatusLine(HTTPException): def __init__(self, line): if not line: line = repr(line) self.args = line, self.line = line class LineTooLong(HTTPException): def __init__(self, line_type): HTTPException.__init__(self, "got more than %d bytes when reading %s" % (_MAXLINE, line_type)) class RemoteDisconnected(ConnectionResetError, BadStatusLine): def __init__(self, *pos, **kw): BadStatusLine.__init__(self, "") ConnectionResetError.__init__(self, *pos, **kw) # for backwards compatibility error = HTTPException 解析这些代码 怎么联接这个服务器
06-19
@Slf4j @Component public class BidirectionRequestHeaderProcessor extends AuthRequiredHeaderProcessor<BidirectionRequestChannel, BidirectionRequestChannelInformation> { private ChannelManager channelManager; private ContainerManager containerManager; private CloudAccessChannelHelper cloudAccessChannelHelper; @Autowired public BidirectionRequestHeaderProcessor(AuthClient authClient, BidirectionRequestHeaderProcessorProp prop, ChannelManager channelManager, ContainerManager containerManager, CloudAccessChannelHelper cloudAccessChannelHelper) { super(authClient, prop); this.channelManager = channelManager; this.containerManager = containerManager; this.cloudAccessChannelHelper = cloudAccessChannelHelper; } @Override protected BidirectionRequestChannel doBeforeAddExecutor(BidirectionRequestChannel bidirectionChannel) { if (log.isDebugEnabled()) { log.debug("[sid:{}] Disable auto read of Bidirection channel:", bidirectionChannel.getSid()); } bidirectionChannel.disableAutoRead(); return bidirectionChannel; } @Override public void doHeaderProcess(BidirectionRequestChannel bidirectionChannel) throws Exception { // Add into channel manager String channelId = bidirectionChannel.getChannelId(); channelManager.addRequestChannel(channelId, bidirectionChannel); // Add life time event if necessary. As the bidirection has sink channel characteristic, the life time event // should be created before channel adding into container. if (bidirectionChannel instanceof ILifeTimeControl) { ((ILifeTimeControl) bidirectionChannel).createLifeTimeEvent(ILifeTimeControl.LifeTimeType.CON_IDLE); } // Obtain the container. BidirectionContainer container = containerManager.obtainBidirectionContainer(bidirectionChannel); if (container == null) { log.info("[sid:{}] Bidirection channel is kicked off by others when obtaining container.", bidirectionChannel.getSid()); bidirectionChannel.sendHttpResponseAndClose(503, "Kicked off", RelayConsts.CloseReason.BIDIRECTION_KICKED_OFF); return; } if (bidirectionChannel instanceof CloudAccessBidirectionRequestChannel) { doCloudAccessHeaderProcess(bidirectionChannel, container); } // Initialize the container. if (log.isDebugEnabled()) { log.debug("[sid:{}] Bidirection channel obtain container successfully: container={}", bidirectionChannel.getSid(), container.toString()); } boolean hasPeerChannel = bidirectionChannel.initContainer(container); if (hasPeerChannel) { if (log.isDebugEnabled()) { log.debug("[sid:{}] Initialize Bidirection channel after container initialization", bidirectionChannel.getSid()); } bidirectionChannel.startDataAcceptanceAfterSinkChannelAccess(); } } public void doCloudAccessHeaderProcess(final BidirectionRequestChannel bidirectionChannel, BidirectionContainer container) throws Exception { RequestChannel cloudAccessChannel = cloudAccessChannelHelper .createCloudAccessChannel(bidirectionChannel.getBindingId(), bidirectionChannel.getInformation().getOrignalRequest(), bidirectionChannel.getNettyChannel()); if (cloudAccessChannel instanceof CloudAccessBidirectionRequestChannel) { CloudAccessBidirectionRequestChannel cloudRelayChannel = (CloudAccessBidirectionRequestChannel) cloudAccessChannel; channelManager.addRequestChannel(cloudRelayChannel.getChannelId(), cloudRelayChannel); cloudRelayChannel.createLifeTimeEvent(ILifeTimeControl.LifeTimeType.CON_IDLE); containerManager.obtainBidirectionContainer(cloudRelayChannel); cloudRelayChannel.initContainer(container); } else { log.error("failed to create cloud access channel for {}", bidirectionChannel.getBindingId()); } } } @Slf4j @Component public class PassthroughGetRequestDataProcessor extends AbstractProcessor { private ChannelManager channelManager; @Autowired public PassthroughGetRequestDataProcessor(ChannelManager channelManager) { this.channelManager = channelManager; } @Override protected PassthroughGetRequestChannel doProcess(PassthroughGetRequestChannel passthroughGetChannel) throws Exception { String bindingId = passthroughGetChannel.getBindingId(); PostRequestChannel postChannel = (PostRequestChannel) channelManager.getRequestChannel(bindingId); if (postChannel == null) { log.info("[sid:{}] POST channel is missing when passthrough GET data arrives.", passthroughGetChannel.getSid()); return passthroughGetChannel; } // Start Get channel transmission: only for 1.3 multiple mapping request passthroughGetChannel.startTransmission(postChannel); List<IHttpData> dataList = passthroughGetChannel.getDataList(); IHttpResponseGenerator generator = postChannel.getSuccessResponseGenerator(); while (!dataList.isEmpty()) { IHttpData data = dataList.remove(0); if (generator != null && !data.isLastData()) { postChannel.sendData(generator.getHttpData(data)); } else { // Maybe POST channel is Version 1.2, this should not happen. data.release(); } } return passthroughGetChannel; } @Override public void exceptionCaught(PassthroughGetRequestChannel passthroughGetChannel, Throwable cause) { if (cause instanceof IllegalArgumentException || cause instanceof IllegalRequestException) { log.error("[sid:{}] Illegal request data:", passthroughGetChannel.getSid(), cause); passthroughGetChannel.close(RelayConsts.CloseReason.ILLEGAL_REQUEST_DATA_FORMAT); } else { log.error("[sid:{}] PassthroughGetRequestDataProcessor failed:", passthroughGetChannel.getSid(), cause); passthroughGetChannel.close(RelayConsts.CloseReason.SERVER_INTERNAL_ERROR); } } } @Slf4j @Component public class BufferedRequestDataProcessor extends RelayPostRequestDataProcessor { @Override protected DataProcessState processData(BufferedPostRequestChannel bufferedPostChannel, DataProcessState dataProcessState, IHttpData data) { if (dataProcessState == DataProcessState.TRANSMIT_DIRECT) { return super.transmitDataDirectly(bufferedPostChannel, DataProcessState.TRANSMIT_DIRECT, data); } else if (dataProcessState == DataProcessState.WANT_BUFFERED_DATA) { if (log.isDebugEnabled()) { log.debug("[sid:{}] Process buffered data: index={}", bufferedPostChannel.getSid(), data.getIndex()); } // store buffered data int left = bufferedPostChannel.addBufferedHttpData(data); if (left > 0) { // transmit buffered data to bound channels bufferedPostChannel.transmitData(data); // keep state return DataProcessState.WANT_BUFFERED_DATA; } else if (left == 0) { // transmit buffered data to bound channels bufferedPostChannel.transmitData(data); // change data process stat to DataProcessStat. return bufferedPostChannel.changeToNextState(); } else { // This will not happen super.transmitDataDirectly(bufferedPostChannel, DataProcessState.TRANSMIT_DIRECT, data); return bufferedPostChannel.changeToNextState(); } } else if (dataProcessState == DataProcessState.WANT_RESPONSE_MOULD) { return super.processHttpResponseMould(bufferedPostChannel, data); } else { // Use direct transmission as default action. This case should not happen. return super.transmitDataDirectly(bufferedPostChannel, DataProcessState.TRANSMIT_DIRECT, data); } } } @Slf4j @Component(“relayPostRequestDataProcessor”) public class RelayPostRequestDataProcessor extends PostRequestDataProcessor { @Override protected DataProcessState processData(T postChannel, DataProcessState dataProcessState, IHttpData data) { if (dataProcessState == DataProcessState.TRANSMIT_DIRECT) { return super.transmitDataDirectly(postChannel, DataProcessState.TRANSMIT_DIRECT, data); } else if (dataProcessState == DataProcessState.WANT_RESPONSE_MOULD) { return processHttpResponseMould(postChannel, data); } else { // Use direct transmission as default action. This case should not happen. return super.transmitDataDirectly(postChannel, DataProcessState.TRANSMIT_DIRECT, data); } } /** * Parse the response mould into response header, set it into POST request channel and transmit it to all GET * request channels related with the POST request channel. * * @param postChannel The POST request channel which received the response mould data. * @param data The response mould data */ protected DataProcessState processHttpResponseMould(T postChannel, IHttpData data) throws IllegalRequestException { log.debug("[sid:{}] Process first data: index={}", postChannel.getSid(), data.getIndex()); try { String content = null; if (data instanceof MultipartHttpData) { content = ((MultipartHttpData) data).addedContents().toString(data.getContentCharset()); } else { content = data.toString(); } HttpResponse httpResponse = parseHttpResponse(content); // If the Transfer-Encoding: chunked is set, do not modified as Chunked is used to measure the message // length of the HTTP response even if the Content-Length is set at the same time. if (!HttpUtil.isTransferEncodingChunked(httpResponse)) { String type = postChannel.getInformation().getParams().get(RelayConsts.ParamKey.TYPE); if (RelayConsts.Type.FILE.equals(type)) { // If the type is file, the Content-Length field in response should be the file size and multipart // should not be used. if (postChannel.getInformation().getContentType().isMultiPart() && httpResponse.headers().contains(HttpHeaderNames.CONTENT_LENGTH)) { throw new IllegalRequestException( "file type with multipart package should use Transfer-Encoding: chunked."); } } else { // If the type is other value, only when the Content-Length field in response does not exist or is // negtive, new Content-Length will be set. if (HttpUtil.getContentLength(httpResponse, -1L) < 0) { long contentLength = HttpUtil.getContentLength(postChannel.getInformation().getOrignalRequest(), -1L); httpResponse.headers().set(RelayConsts.HttpNames.CONTENT_LENGTH, contentLength > 0 ? contentLength : Long.MAX_VALUE); } } } // if status is 429, it shows device has too many relay connections at the same time // if (postChannel.hasSegmenter() && // httpResponse.status().code() == HttpResponseStatus.TOO_MANY_REQUESTS.code()) { // postChannel.stopSegmenterWhenCanNotGetVideoStream(RelayConsts.SegmenterStopReason.RELAY_CONNECTION_EXCEEDED); // } postChannel.setResponseHeader(httpResponse); postChannel.transmitHttpResponse(httpResponse); return postChannel.changeToNextState(); } finally { data.release(); } } /** * Parse response header from {@link String} format to {@link HttpResponse} format. * * @param stringValue {@link String} format of response header. * * @return {@link HttpResponse} format of response header. */ public HttpResponse parseHttpResponse(String stringValue) throws IllegalRequestException { String[] parts = stringValue.split("\r\n", 2); if (parts.length != 2 || parts[1].isEmpty()) { throw new IllegalRequestException("Invalid first data as a response header: " + stringValue); } String[] statusLine = parts[0].split(" ", 3); if (statusLine.length != 3 || statusLine[1].isEmpty() || statusLine[2].isEmpty()) { throw new IllegalRequestException("Invalid first data with an invalid response line: " + parts[0]); } HttpResponse httpResponse = null; try { HttpResponseStatus status = new SpecificCodeHttpResponseStatus(statusLine[1], statusLine[2]); httpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status); } catch (NumberFormatException e) { throw new IllegalRequestException("Invalid first data with an unresovled status code: " + parts[0]); } String[] entries = parts[1].split("\r\n"); for (String entry : entries) { String[] content = entry.split(":", 2); if (content.length != 2 || content[1].isEmpty()) { throw new IllegalRequestException("Invalid first data with an unresovled header: " + entry); } String name = content[0].trim(); String value = content[1].trim(); if (name.isEmpty()) { throw new IllegalRequestException("Invalid first data with an empty key of header:" + entry); } httpResponse.headers().add(name, value); } // Netty channel of GET could not be reused. httpResponse.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); httpResponse.headers().set(HttpHeaderNames.PRAGMA, HttpHeaderValues.NO_CACHE); httpResponse.headers().set(HttpHeaderNames.CACHE_CONTROL, HttpHeaderValues.NO_CACHE); return httpResponse; } } @Slf4j @Component(“postRequestDataProcessor”) public class PostRequestDataProcessor extends AbstractProcessor { @Override protected T doProcess(T sourceChannel) throws IllegalRequestException { DataProcessState dataProcessStat = sourceChannel.getDataProcessState(); List<IHttpData> dataList = sourceChannel.getDataList(); while (!dataList.isEmpty()) { IHttpData data = dataList.remove(0); dataProcessStat = processData(sourceChannel, dataProcessStat, data); } return sourceChannel; } protected DataProcessState processData(T sourceChannel, DataProcessState dataProcessState, IHttpData data) { return transmitDataDirectly(sourceChannel, dataProcessState, data); } protected DataProcessState transmitDataDirectly(T sourceChannel, DataProcessState dataProcessStat, IHttpData data) { assert dataProcessStat == DataProcessState.TRANSMIT_DIRECT; // Transmit the data sourceChannel.transmitData(data); // Release data original reference data.release(); return DataProcessState.TRANSMIT_DIRECT; } @Override public void exceptionCaught(T sourceChannel, Throwable cause) { if (cause instanceof IllegalArgumentException || cause instanceof IllegalRequestException) { log.error("[sid:{}] Illegal request data format:", sourceChannel.getSid(), cause); sourceChannel.close(RelayConsts.CloseReason.ILLEGAL_REQUEST_DATA_FORMAT); } else { log.error("[sid:{}] PostRequestDataProcessor failed:", sourceChannel.getSid(), cause); sourceChannel.close(RelayConsts.CloseReason.SERVER_INTERNAL_ERROR); } } } @Slf4j public class DefaultGetRequestChannel extends GetRequestChannel implements ILifeTimeControl, IHeartBeatControl { private static final RuntimeException HEART_BEAT_TIMEOUT_EXCEPTION = new RuntimeException("Heart beat timeout"); protected ILifeTimeControl lifeTimeController; protected AtomicLong closedTimeStamp; public DefaultGetRequestChannel(String channelId, Channel nettyChannel, String bindingId, int basicLifeTime, GetRequestChannelInformation information) { super(channelId, nettyChannel, bindingId, information); this.lifeTimeController = buildLifeTimeController(nettyChannel, basicLifeTime); this.closedTimeStamp = new AtomicLong(System.currentTimeMillis() + IHeartBeatControl.PROLONGED_TIME_MS); } protected ILifeTimeControl buildLifeTimeController(Channel nettyChannel, int basicLifeTime) { IEventCallBack callback = new IEventCallBack() { @Override public void handleEvent() { if (log.isDebugEnabled()) { log.debug("[sid:{}] Channel is closing as timeout: channelType={}", getSid(), DefaultGetRequestChannel.this.getClass().getSimpleName()); } sendHttpResponseAndClose(HttpResponseStatus.NOT_FOUND, RelayConsts.CloseReason.GET_LIFETIME_EXPIRE); } }; return new LifeTimeController(getSid(), ILifeTimeControl.GET_IDLE_TIME_S, basicLifeTime, EventTimer.getLifeTimeEventKey(nettyChannel), getClass().getSimpleName(), callback); } @Override protected ISinkHandler buildBasicSinkHandler(GetRequestChannelInformation information) { assert !information.isFFmpegChannel(); if (isAudio(information)) { return new AudioSinkHandler(this); } else { return new DefaultSinkHandler<>(this); } } private boolean isAudio(GetRequestChannelInformation information) { String type = information.getParams().get(RelayConsts.ParamKey.TYPE); switch (type) { case RelayConsts.Type.AUDIO: return true; case RelayConsts.Type.NVR: case RelayConsts.Type.SMART_NVR: String resolution = information.getParams().get(RelayConsts.ParamKey.RESOLUTION); if (resolution == null) { return false; } switch (resolution) { case RelayConsts.AudioResolution.AAC: case RelayConsts.AudioResolution.MP2: case RelayConsts.AudioResolution.PCM: return true; case RelayConsts.VideoResolution.HD: case RelayConsts.VideoResolution.QVGA: case RelayConsts.VideoResolution.VGA: default: return false; } default: return false; } } @Override public long getClosedTimeStamp() { return closedTimeStamp.get(); } @Override public void setClosedTimeStamp(long timeStamp) { closedTimeStamp.lazySet(timeStamp); information.addHeartBeatNum(); } @Override public String getLifeTimeEventKey() { return lifeTimeController.getLifeTimeEventKey(); } @Override public int getBasicLifeTime() { return lifeTimeController.getBasicLifeTime(); } @Override public int createLifeTimeEvent(LifeTimeType type) throws TimerEventException { return lifeTimeController.createLifeTimeEvent(type); } @Override public void prolongLeftTimeTo(int newLifeTime) throws TimerEventException { lifeTimeController.prolongLeftTimeTo(newLifeTime); } @Override public void shortenLeftTimeTo(int newLifeTime) { lifeTimeController.shortenLeftTimeTo(newLifeTime); } @Override public void removeLifeTimeEvent() { lifeTimeController.removeLifeTimeEvent(); } /** * {@inheritDoc} */ @Override public boolean updateAfterContainerInitialization(ISourceRequestChannel postRequestChannel) { try { boolean isSuccess = super.updateAfterContainerInitialization(postRequestChannel); if (isSuccess) { lifeTimeController.prolongLeftTimeTo(lifeTimeController.getBasicLifeTime()); } return isSuccess; } catch (Exception e) { log.error("[sid:{}] Exception caught when updating Get channel after container initialization", getSid(), e); return false; } } /** * {@inheritDoc} */ @Override public boolean updateWhenBinding(PostRequestChannel postRequestChannel) { try { boolean isSuccess = super.updateWhenBinding(postRequestChannel); if (isSuccess) { createLifeTimeEvent(LifeTimeType.BASIC); } return isSuccess; } catch (Exception e) { log.error("[sid:{}] Exception caught when binding Get channel", getSid(), e); return false; } } /** * If the heart beat is expired, the Get channel will be closed instead of sending template data. At this time, the * template data reference will not be increased. * * @param templateData The template data */ @Override public ChannelFuture sendTemplateData(IHttpData templateData) { if (System.currentTimeMillis() > getClosedTimeStamp()) { if (log.isDebugEnabled()) { log.debug("[sid:{}] Close GET channel as losing heartbeat:", getSid()); } close(RelayConsts.CloseReason.GET_LACK_HEART_BEAT); DefaultChannelPromise promise = new DefaultChannelPromise(getNettyChannel()); promise.setFailure(HEART_BEAT_TIMEOUT_EXCEPTION); return promise; } else { return super.sendTemplateData(templateData); } } @Override protected void releaseResourceAfterNettyChannelClosure() { super.releaseResourceAfterNettyChannelClosure(); removeLifeTimeEvent(); } } public class PassthroughGetRequestChannel extends DefaultGetRequestChannel implements IDataFollowedRelayRequestChannel { private RelayServiceDataController<PassthroughGetRequestChannel> dataController; private volatile boolean isStartDataProcess; public PassthroughGetRequestChannel(String channelId, Channel nettyChannel, String bindingId, // ClientInfoController clientInfoController, int basicLifeTime, GetRequestChannelInformation information) { super(channelId, nettyChannel, bindingId, // clientInfoController, basicLifeTime, information); this.dataController = new RelayServiceDataController<>(this, false); } @Override public boolean isStartDataProcess() { return isStartDataProcess; } @Override public void setStartDataProcess() { this.isStartDataProcess = true; } @Override public boolean updateAfterContainerInitialization(ISourceRequestChannel postRequestChannel) { if (super.updateAfterContainerInitialization(postRequestChannel)) { setStartDataProcess(); enableAutoRead(); addPushDataTask(); return true; } else { return false; } } /** * {@inheritDoc} */ @Override public boolean updateWhenBinding(PostRequestChannel postRequestChannel) { if (super.updateWhenBinding(postRequestChannel)) { setStartDataProcess(); enableAutoRead(); addPushDataTask(); return true; } else { return false; } } @Override public void disableAutoRead() { dataController.disableAutoRead(); } @Override public void enableAutoRead() { dataController.enableAutoRead(); } @Override public List<IHttpData> getDataList() { return dataController.getDataList(); } @Override public void addPushDataTask() { dataController.addPushDataTask(); } @Override protected void releaseResourceAfterNettyChannelClosure() { super.releaseResourceAfterNettyChannelClosure(); dataController.releaseDataList(); } } @Slf4j public class BidirectionRequestChannel extends SourceRequestChannel<BidirectionContainer, BidirectionRequestChannelInformation> implements ISinkRequestChannel { private DefaultSinkHandler<BidirectionRequestChannel> sinkHandler; public BidirectionRequestChannel(String channelId, Channel nettyChannel, String bindingId, BidirectionRequestChannelInformation information) { super(channelId, nettyChannel, bindingId, information); this.sinkHandler = new DefaultSinkHandler<>(this); } @Override public String getRequestType() { if (isDeviceChannel()) { return "relayservice-deviceStream"; } else { return "relayservice-appStream"; } } @Override public boolean initContainer(BidirectionContainer container) { assert this.container == null : "initContainer() should only be called once."; this.container = container; BidirectionRequestChannel peerChannel = getPeerDirectionChannel(container); if (peerChannel == null) { return false; } if (peerChannel.updateAfterContainerInitialization(this)) { return true; } else { // As peer channel is closing, this bidirection channel will be closing soon. close(RelayConsts.CloseReason.BIDIRECTION_CLOSED_BY_PEER); return false; } } public BidirectionRequestChannel getPeerDirectionChannel() { BidirectionContainer container = this.container; if (container != null) { return getPeerDirectionChannel(container); } else { return null; } } private BidirectionRequestChannel getPeerDirectionChannel(BidirectionContainer container) { if (isDeviceChannel()) { return container.getAppChannel(); } else { return container.getDeviceChannel(); } } /** * Only contains peer channel if it exists. */ @Override protected List<? extends ISinkRequestChannel> getTransmittedSinkRequestChannels() { BidirectionContainer container = this.container; if (container == null) { return new ArrayList<>(); } List<BidirectionRequestChannel> list = new ArrayList<>(); BidirectionRequestChannel peerChannel = getPeerDirectionChannel(container); if (peerChannel != null) { list.add(peerChannel); } return list; } /** * Do nothing, waiting terminal to close the request channel. */ @Override protected void doProcessBeforeTransmitData(IHttpData templateData) { // do nothing } @Override protected void transmitDataToSinkRequestChannel(ISinkRequestChannel sinkChannel, IHttpData templateData) { // 当channel来自回放请求时,channel为长连接,此时为了适配web无法发起正常http长连接,手动将lastData的空包丢掉 String type = this.getInformation().getParamValue(RelayConsts.ParamKey.TYPE); if ((Objects.equals(type, RelayConsts.Type.SDVOD) || Objects.equals(type, RelayConsts.Type.DOWNLOAD)) && templateData.isLastData()) { return; } sinkChannel.sendTemplateData(templateData); } @Override public void startDataAcceptanceAfterSinkChannelAccess() { assert container != null : "initContainer should be called first"; BidirectionRequestChannel peerChannel = getPeerDirectionChannel(container); if (peerChannel == null) { log.warn("[sid:{}] Peer channel is missing, may be closed?", getSid()); return; } if (information.getStatisticInformation().getIsWs()) { // Start data process setStartDataProcess(); // Enable auto read enableAutoRead(); return; } doBindInitialization(peerChannel); } @Override public DataProcessState getDataProcessState() { return DataProcessState.TRANSMIT_DIRECT; } @Override public DataProcessState changeToNextState() { return DataProcessState.TRANSMIT_DIRECT; } @Override public boolean updateAfterContainerInitialization(ISourceRequestChannel sourceRequestChannel) { if (log.isDebugEnabled()) { log.debug("[sid:{}] Do bind initialization after container initialization", getSid()); } doBindInitialization(sourceRequestChannel); return true; } protected void doBindInitialization(ISourceRequestChannel sourceRequestChannel) { // Update sid String sourceSid = sourceRequestChannel.getSid(); if (sourceSid.indexOf('-') == -1) { String oldSid = getSid(); if (oldSid.indexOf('-') == -1) { updateSid(sourceSid + '-' + oldSid); } } // Try to send response header HttpResponse responseHeader = sourceRequestChannel.getResponseHeader(); if (responseHeader != null) { sendTemplateHttpResponse(responseHeader); } // Start data process setStartDataProcess(); // Enable auto read enableAutoRead(); // Add push data task addPushDataTask(); } @Override public ChannelFuture sendTemplateHttpResponse(HttpResponse templateHttpResponse) { return sinkHandler.sendTemplateHttpResponse(templateHttpResponse); } @Override public ChannelFuture sendTemplateHttpResponseAndClose(HttpResponse templateHttpResponse, RelayConsts.CloseReason closeReason) { return sinkHandler.sendTemplateHttpResponseAndClose(templateHttpResponse, closeReason); } @Override public ChannelFuture sendTemplateData(IHttpData templateData) { return sinkHandler.sendTemplateData(templateData); } @Override public ChannelFuture sendTemplateDataAndClose(IHttpData templateData, RelayConsts.CloseReason closeReason) { return sinkHandler.sendTemplateDataAndClose(templateData, closeReason); } @Override protected void releaseResourceBeforeNettyChannelClosure() { if (information.getCloseReason() != RelayConsts.CloseReason.BIDIRECTION_CLOSED_BY_PEER) { ContainerManager.getInstance().removeBidirectionContainer(this); } super.releaseResourceBeforeNettyChannelClosure(); } } @Slf4j public abstract class SourceRequestChannel<T extends IRequestChannelContainer<? extends ISinkRequestChannel>, I extends AbstractTransactionRequestChannelInformation> extends TransactionRequestChannel implements ISourceRequestChannel { private String bindingId; private ChannelTrafficShapingHandler trafficHandler; private RelayServiceDataController<SourceRequestChannel<T, I>> dataController; private volatile boolean isStartDataProcess; private HttpResponse responseHeader; protected T container; public SourceRequestChannel(String channelId, Channel nettyChannel, String bindingId, I information) { super(channelId, nettyChannel, information); this.bindingId = bindingId; this.dataController = new RelayServiceDataController<>(this, false); } @Override public String getBindingId() { return bindingId; } public ChannelTrafficShapingHandler getTrafficHandler() { return trafficHandler; } public void setTrafficHandler(ChannelTrafficShapingHandler trafficHandler) { this.trafficHandler = trafficHandler; } @Override public void disableAutoRead() { dataController.disableAutoRead(); } @Override public void enableAutoRead() { dataController.enableAutoRead(); } @Override public List<IHttpData> getDataList() { return dataController.getDataList(); } @Override public void addPushDataTask() { dataController.addPushDataTask(); } @Override public HttpResponse getResponseHeader() { return responseHeader; } @Override public void setResponseHeader(HttpResponse responseHeader) { this.responseHeader = responseHeader; } @Override public boolean isStartDataProcess() { return isStartDataProcess; } @Override public void setStartDataProcess() { this.isStartDataProcess = true; } /** * Initialize the source channel with a container in which the sink channel is put into. * * @param container The container * * @return True if the container has at least one sink channel. */ public abstract boolean initContainer(T container); public boolean isInitContainer() { return container != null; } public T getContainer() { return container; } /** * Only used for Junit test. */ void setContainer(T container) { this.container = container; } /** * Start source channel data acceptance after sink channel access. */ public abstract void startDataAcceptanceAfterSinkChannelAccess(); /** * {@inheritDoc} */ @Override public void transmitHttpResponse(HttpResponse templateHttpResponse) { List<? extends ISinkRequestChannel> currentList = getTransmittedSinkRequestChannels(); for (ISinkRequestChannel sinkChannel : currentList) { try { sinkChannel.sendTemplateHttpResponse(templateHttpResponse); } catch (Exception e) { log.error("[sid:{}] Failed to transmit response to GET channel: channelId={}:", getSid(), sinkChannel.getChannelId(), e); sinkChannel.close(RelayConsts.CloseReason.SERVER_INTERNAL_ERROR); } } } /** * {@inheritDoc} */ @Override public void transmitData(IHttpData templateData) { List<? extends ISinkRequestChannel> currentList = templateData.isLastData() ? getTransmittedSinkRequestChannels() : null; doProcessBeforeTransmitData(templateData); currentList = currentList == null ? getTransmittedSinkRequestChannels() : currentList; for (ISinkRequestChannel sinkChannel : currentList) { try { transmitDataToSinkRequestChannel(sinkChannel, templateData); } catch (Exception e) { log.error("[sid:{}] Failed to transmit data to GET channel: channelId={}, index={}", getSid(), sinkChannel.getChannelId(), templateData.getIndex(), e); sinkChannel.close(RelayConsts.CloseReason.SERVER_INTERNAL_ERROR); } } } /** * Obtain all sink channel which can be transmitted response and data to. * * @return The sink channel which can be transmitted response and data to. */ protected abstract List<? extends ISinkRequestChannel> getTransmittedSinkRequestChannels(); /** * Process before transmit template data. * * @param templateData The template data */ protected abstract void doProcessBeforeTransmitData(IHttpData templateData); /** * Transmit template data to sink channel. * * @param sinkChannel The sink channel * @param templateData The template data. */ protected abstract void transmitDataToSinkRequestChannel(ISinkRequestChannel sinkChannel, IHttpData templateData); @Override protected void releaseResourceAfterNettyChannelClosure() { super.releaseResourceAfterNettyChannelClosure(); dataController.releaseDataList(); } } @Slf4j public abstract class TransactionRequestChannel extends RequestChannel { protected String channelId; @Getter @Setter private String content; public TransactionRequestChannel(String channelId, Channel nettyChannel, I information) { super(nettyChannel, information); this.channelId = channelId; } public String getChannelId() { return channelId; } public ChannelVersion getChannelVersion() { return information.getChannelVersion(); } public boolean isDeviceChannel() { return information.isFromDevice(); } public AbstractAuthContext getAuthContext() { return information.getAuthContext(); } public RequestLevel getRequestLevel() { return getAuthContext().getLevel(); } @Override protected void releaseResourceBeforeNettyChannelClosure() { // Call super method super.releaseResourceBeforeNettyChannelClosure(); // Reomve itself from ChannelManager immediately if (isRequestChannelRemovedImmediately()) { removeFromChannelManager(); } } @Override protected void releaseResourceAfterNettyChannelClosure() { // Call super method super.releaseResourceAfterNettyChannelClosure(); // Reomve itself from ChannelManager after 1s. if (!isRequestChannelRemovedImmediately()) { getNettyChannel().eventLoop().schedule(new Runnable() { @Override public void run() { removeFromChannelManager(); } }, 1000, TimeUnit.MILLISECONDS); } } protected boolean isRequestChannelRemovedImmediately() { return true; } ; protected void removeFromChannelManager() { if (log.isDebugEnabled()) { log.debug("[sid:{}] Remove request channel {} channelId={} from channel manager", getSid(), this.getClass().getSimpleName(), getChannelId()); } ChannelManager.getInstance().removeRequestChannel(channelId, this); } } @Slf4j public class GetRequestChannel extends TransactionRequestChannel implements ISinkRequestChannel { protected String bindingId; protected AtomicBoolean isAllowTransmission; protected ISinkHandler sinkHandler; public GetRequestChannel(String channelId, Channel nettyChannel, String bindingId, GetRequestChannelInformation information) { super(channelId, nettyChannel, information); this.bindingId = bindingId; this.isAllowTransmission = new AtomicBoolean(false); this.sinkHandler = buildSinkHandler(information); } /** * Build the sink hanlder according to the sink channel. When any template response and data are sent to this Get * channel, the sending action will be processed by using sink hanlder. * * @param information The Get channel information * * @return The sink hanlder. */ protected ISinkHandler buildSinkHandler(GetRequestChannelInformation information) { if (information.isNeedMultipleMappingSource()) { // Flow control is only used in Preview type(such as video or nvr) return new FlowControlHandler(buildBasicSinkHandler(information)); } else { return buildBasicSinkHandler(information); } } /** * The basic sink handler which is used to do actual sending action. * * @param information The Get channel information * * @return The basic sink hanlder. */ protected ISinkHandler buildBasicSinkHandler(GetRequestChannelInformation information) { return new DefaultSinkHandler<>(this); } @Override public String getRequestType() { if (getChannelVersion() == ChannelVersion.VERSION_1_2) { return "relayservice-getStream"; } else if (isDeviceChannel()) { return "relayservice-deviceStream"; } else { return "relayservice-appStream"; } } @Override public String getBindingId() { return bindingId; } public void updateBindingId(String newBindingId) { this.bindingId = newBindingId; } public boolean isAllowTransmission() { return isAllowTransmission.get(); } public boolean setAllowTransmission() { return this.isAllowTransmission.compareAndSet(false, true); } /** * Whether the type is a multiple mapping type, such as video, audio, mixed and nvr. * * @return True if the type is a multiple mapping type. */ public boolean isNeedMultipleMappingSource() { return information.isNeedMultipleMappingSource(); } /** * If the Get channel arrives before the corresponding Post channel, the Get channel will be put into a container. * When the Post channel arrives, the Post channel will find and initialize the container to check whether any Get * channel is bound with it. At this time, using this method the notify the Get channel of binding action after * container initialization. * <p> * <p> * This method will be called in following cases: * <ul> * <li>After the Post channel initializes the container, any Get channel in the container will be notified of the * initialization by calling this.</li> * <li>If the container has been initialized by Post channel when the Get channel is added into container directly. * The Get channel should be notified of the initialization by calling this.</li> * </ul> * </p> * * @param postRequestChannel The source channel which this sink channel is bound with. * * @return Whether the sink channel is successfully updated. If false, the sink channel may be closed and the source * channel should remove it. */ @Override public boolean updateAfterContainerInitialization(final ISourceRequestChannel postRequestChannel) { doBindInitialization(postRequestChannel); return true; } /** * Update Get channel when the Get channel is bound with Post channel. * * @param postRequestChannel The corresponding Post channel. * * @return True if the updating operation is successful. */ public boolean updateWhenBinding(final PostRequestChannel postRequestChannel) { doBindInitialization(postRequestChannel); return true; } private void doBindInitialization(final ISourceRequestChannel requestChannel) { EventLoop eventLoop = getNettyChannel().eventLoop(); if (eventLoop.inEventLoop()) { doBindInitializationInEventLoop(requestChannel); } else { eventLoop.execute(() -> doBindInitializationInEventLoop(requestChannel)); } } /** * The initialization of Get channel when bound with Post channel * * @param postRequestChannel The corresponding Post channel. */ protected void doBindInitializationInEventLoop(ISourceRequestChannel postRequestChannel) { // Update sid updateSidByPrefix(postRequestChannel.getSid()); // Try to send response header HttpResponse responseHeader = postRequestChannel.getResponseHeader(); if (responseHeader != null) { sendTemplateHttpResponse(responseHeader); } } /** * To initialize the Get request channel transmission. * * @param postRequestChannel The corresponding Post request channel. */ public void startTransmission(final PostRequestChannel postRequestChannel) { EventLoop eventLoop = getNettyChannel().eventLoop(); if (eventLoop.inEventLoop()) { doStartTransmission(postRequestChannel); } else { eventLoop.execute(() -> doStartTransmission(postRequestChannel)); } } private void doStartTransmission(PostRequestChannel postRequestChannel) { assert getNettyChannel().eventLoop().inEventLoop(); // This is helpful when the 2nd App for smart codec stream. initSmartCodec(postRequestChannel); if (setAllowTransmission()) { int num = postRequestChannel.updateTransmittedGetRequestChannels(); if (log.isDebugEnabled()) { log.debug("[sid:{}] Start transmission of GET channel: transmittedGetChannelNum={}", getSid(), num); } // This is helpful when the 1st App and smart codec notification may be comming soon. initSmartCodec(postRequestChannel); /* List<IHttpData> templateDataList = getDataListToStartTransmission(postRequestChannel); for (IHttpData templateData : templateDataList) { sendTemplateData(templateData); }*/ } } private void initSmartCodec(PostRequestChannel postRequestChannel) { if (postRequestChannel.isSmartCodec()) { startSmartCodec(); } else { stopSmartCodec(); } } /** * If the corresponding channel is {@link BufferedPostRequestChannel}, the buffered data will be returned. * Otherwise, empty array will be returned. * * @param postRequestChannel The corresponding Post request channel. * * @return The data list for initialization of transmission */ protected List<IHttpData> getDataListToStartTransmission(PostRequestChannel postRequestChannel) { List<IHttpData> dataList; if (postRequestChannel instanceof BufferedPostRequestChannel) { dataList = ((BufferedPostRequestChannel) postRequestChannel).getAllBufferedData(); if (log.isDebugEnabled()) { log.debug("[sid:{}] Add buffered HTTP data: size={}", getSid(), dataList.size()); } } else { dataList = new ArrayList<>(); } IHttpData iFrame = postRequestChannel.getIFrame(); if (iFrame != null) { dataList.add(iFrame); } return dataList; } @Override public ChannelFuture sendTemplateHttpResponse(HttpResponse templateHttpResponse) { return sinkHandler.sendTemplateHttpResponse(templateHttpResponse); } @Override public ChannelFuture sendTemplateHttpResponseAndClose(HttpResponse templateHttpResponse, RelayConsts.CloseReason closeReason) { return sinkHandler.sendTemplateHttpResponseAndClose(templateHttpResponse, closeReason); } @Override public ChannelFuture sendTemplateData(IHttpData templateData) { return sinkHandler.sendTemplateData(templateData); } @Override public ChannelFuture sendTemplateDataAndClose(IHttpData templateData, RelayConsts.CloseReason closeReason) { return sinkHandler.sendTemplateDataAndClose(templateData, closeReason); } public void startSmartCodec() { ISinkHandler handler = sinkHandler; FlowControlHandler flowControlHandler = findFlowControlHandler(handler); if (flowControlHandler != null) { flowControlHandler.setSmartCodec(true); } if (!(handler instanceof SmartCodecSinkHandler)) { sinkHandler = new SmartCodecSinkHandler(handler); } } public void stopSmartCodec() { ISinkHandler handler = sinkHandler; FlowControlHandler flowControlHandler = findFlowControlHandler(handler); if (flowControlHandler != null) { flowControlHandler.setSmartCodec(false); } if (handler instanceof SmartCodecSinkHandler) { sinkHandler = ((SmartCodecSinkHandler) handler).unwrap(); } } public void updateKeyFrameUTCTime(long utcTime) { FlowControlHandler h = findFlowControlHandler(sinkHandler); if (h != null) { h.updateUTCTime(utcTime); } } public void updateWritabilityEvent(boolean isWritable) { FlowControlHandler h = findFlowControlHandler(sinkHandler); if (h != null) { h.setSinkChannelWriterable(isWritable); } } private FlowControlHandler findFlowControlHandler(ISinkHandler h) { while (h instanceof FilterSinkHandler) { if (h instanceof FlowControlHandler) { return (FlowControlHandler) h; } else { h = ((FilterSinkHandler) h).unwrap(); } } return null; } @Override protected void doClose() { if (RelayConsts.Type.FILE.equals(information.getParams().get(RelayConsts.ParamKey.TYPE))) { // When the last data is sent, netty channel may be closed immediately and the elb will not discard the last // data. The delay added here is like SO_LINGER configuration. if (log.isDebugEnabled()) { log.debug("[sid={}] Delay close netty channel when file type", getSid()); } getNettyChannel().eventLoop().schedule(GetRequestChannel.super::doClose, 100, TimeUnit.MILLISECONDS); } else { super.doClose(); } } @Override protected void releaseResourceBeforeNettyChannelClosure() { // Call super method super.releaseResourceBeforeNettyChannelClosure(); // As this channel has unbounded, unbindGetRequestChannel() should be avoided. RelayConsts.CloseReason closeReason = information.getCloseReason(); if (closeReason == RelayConsts.CloseReason.GET_CLOSED_BY_POST || closeReason == RelayConsts.CloseReason.GET_CLOSED_AFTER_LAST_DATA_SENT || closeReason == RelayConsts.CloseReason.GET_CLOSED_BY_KILL_SHARER_CMD) { return; } ChannelManager manager = ChannelManager.getInstance(); PostRequestChannel postRequestChannel = (PostRequestChannel) manager.getRequestChannel(bindingId); if (postRequestChannel != null) { postRequestChannel.unbindGetRequestChannel(this); } else { ContainerManager.getInstance().removeFromSingleDirectionContainer(this); } } } @Slf4j public class BufferedPostRequestChannel extends DefaultPostRequestChannel { private volatile IHttpData[] buffedDataList; public BufferedPostRequestChannel(String channelId, Channel nettyChannel, String shortBindingId, DataProcessState initDataProcessState, int basicLifeTime, int bufferedNum, PostRequestChannelInformation information) { super(channelId, nettyChannel, shortBindingId, initDataProcessState, basicLifeTime, information); this.buffedDataList = new IHttpData[bufferedNum]; } /** * Try to add data into buffer. The return value represents the left number in the buffer after adding this data. If * the data is added successfully, the return value is non-negtive, especially 0 indicates the buffer is full after * adding this data. If the data is failed to be added into buffer when the buffer is already full, -1 will be * return. No matter the data is added successfully or not, the data reference will not be modified. * * @return left number in the buffer after adding this data. * <ul> * <li>If the value is positive, the buffer still have capcacity after adding this data</li> * <li>If the value is 0, the buffer is full after adding this data</li> * <li>If the value is -1, the data is not added into this data as the buffer is already full</li> * </ul> */ public int addBufferedHttpData(IHttpData bufferedHttpData) { IHttpData[] oldList = this.buffedDataList; int bufferedNum = oldList.length; int index = 0; for (; index < bufferedNum; index++) { if (oldList[index] == null) { break; } } if (index < bufferedNum) { IHttpData[] newList = Arrays.copyOf(oldList, bufferedNum); newList[index] = bufferedHttpData; this.buffedDataList = newList; return bufferedNum - (index + 1); } else { return -1; } } /** * Obtain all buffered data at this time. The return list will be instantiated each time this method is called. * * @return The buffered data list. */ public List<IHttpData> getAllBufferedData() { IHttpData[] current = this.buffedDataList; ArrayList<IHttpData> ret = new ArrayList<>(current.length); for (int index = 0; index < current.length; index++) { if (current[index] != null) { ret.add(current[index]); } else { break; } } return ret; } @Override protected DataProcessState getNextState(DataProcessState oldState) { if (oldState == DataProcessState.WANT_RESPONSE_MOULD) { return DataProcessState.WANT_BUFFERED_DATA; } else if (oldState == DataProcessState.WANT_BUFFERED_DATA) { return DataProcessState.TRANSMIT_DIRECT; } else if (oldState == DataProcessState.TRANSMIT_DIRECT) { return DataProcessState.TRANSMIT_DIRECT; } else { throw new RuntimeException("Bugs in data process state change:" + oldState); } } @Override protected void releaseResourceAfterNettyChannelClosure() { // release wave header if (log.isDebugEnabled()) { log.debug("[sid:{}] Release buffered HTTP data", getSid()); } IHttpData[] current = this.buffedDataList; this.buffedDataList = new IHttpData[0]; for (IHttpData data : current) { ReferenceCountUtil.safeRelease(data); } if (information.isFromDevice()) { log.debug("cloud access channel for {} from device is closing", getBindingId()); CloudStreamInfoContainer.getInstance().removeInfo(getBindingId()); } super.releaseResourceAfterNettyChannelClosure(); } } @Slf4j public class PostRequestChannel extends SourceRequestChannel<SingleDirectionContainer, PostRequestChannelInformation> { /* It is used for multiple mapping type when social share starts and the preview of single stream output IPC */ private String shortBindingId; private DataProcessState dataProcessState; private boolean isNeedResponseAfterGetAccess; private IHttpResponseGenerator successResponseGenerator; private IFrameHolder iFrameHolder; public PostRequestChannel(String channelId, Channel nettyChannel, String shortBindingId, DataProcessState initDataProcessState, PostRequestChannelInformation information) { super(channelId, nettyChannel, channelId, information); this.shortBindingId = shortBindingId; this.dataProcessState = initDataProcessState; if (this.getChannelVersion() == ChannelVersion.VERSION_1_2) { Boolean isNeedSuccessResponse = information.isNeedSuccessResponse(); if (isNeedSuccessResponse == null) { this.isNeedResponseAfterGetAccess = false; this.successResponseGenerator = buildSuccessResponseGenerator(nettyChannel.alloc()); } else { this.isNeedResponseAfterGetAccess = isNeedSuccessResponse; this.successResponseGenerator = isNeedSuccessResponse ? buildSuccessResponseGenerator(nettyChannel.alloc()) : null; } } else { this.isNeedResponseAfterGetAccess = true; this.successResponseGenerator = buildSuccessResponseGenerator(nettyChannel.alloc()); } } private IHttpResponseGenerator buildSuccessResponseGenerator(ByteBufAllocator alloc) { String boundary = ContentType.generateBoundary(); ContentType contentType = new ContentType("multipart/mixed", boundary, ContentType.DEFAULT_DATA_CHARSET); return new MultiPartResponseGenerator(alloc, contentType, HttpUtil.isTransferEncodingChunked(information .getOrignalRequest())); } @Override public String getRequestType() { if (getChannelVersion() == ChannelVersion.VERSION_1_2) { return "relayservice-postStream"; } else if (isDeviceChannel()) { return "relayservice-deviceStream"; } else { return "relayservice-appStream"; } } public boolean isMultipleMapping() { return information.isMultipleMapping(); } public boolean isNeedResponseAfterGetAccess() { return isNeedResponseAfterGetAccess; } public boolean isSingleStream() { return information.isSingleStream(); } public IHttpResponseGenerator getSuccessResponseGenerator() { return successResponseGenerator; } public String getShortBindingId() { return shortBindingId; } @Override public DataProcessState getDataProcessState() { return dataProcessState; } /** * Change to next state and return the new state. As this method is called in data processor only, the state field * only uses <code>volitale</code> instead of <code>AtomicReference</code> * * @return The next data process state. */ @Override public DataProcessState changeToNextState() { assert getNettyChannel().eventLoop().inEventLoop(); DataProcessState state = getNextState(this.dataProcessState); this.dataProcessState = state; return state; } protected DataProcessState getNextState(DataProcessState oldState) { if (oldState == DataProcessState.WANT_RESPONSE_MOULD) { return DataProcessState.TRANSMIT_DIRECT; } else if (oldState == DataProcessState.TRANSMIT_DIRECT) { return DataProcessState.TRANSMIT_DIRECT; } else { throw new RuntimeException("Bugs in data process state change:" + oldState); } } /** * Initialize the Post channel with a container. The Post channel only transmit data to the Get channel which has * the same version(except ffmpeg Get channel). If the Get channel version does not match with the Post version, the * Get channel will be response 404/400 and closed. Especially, when the Get channel is 1.2 version and Post channel * is 1.3 version, Get channel will be shown hint video whe next access. * * @param container The allocated container * * @return Whether the container has contained any Get channels, if <code>true</code>, the container contains GET * channels. */ @Override public boolean initContainer(SingleDirectionContainer container) { assert this.container == null : "initContainer() should only be called once."; synchronized (PostRequestChannel.class) { this.container = container; return doInitContainer(container); } } protected boolean doInitContainer(SingleDirectionContainer container) { boolean hasGetChannels = false; Iterator<GetRequestChannel> iter = container.boundIterator(); while (iter.hasNext()) { GetRequestChannel getChannel = iter.next(); if (getChannel.getChannelVersion() != getChannelVersion() // && !getChannel.isFFmpegChannel() ) { iter.remove(); information.addVersionUnmatchNum(); getChannel.getInformation().setVersionUnmatch(true); if (getChannel.getChannelVersion() == ChannelVersion.VERSION_1_2) { getChannel.sendHttpResponseAndClose(HttpResponseStatus.NOT_FOUND, CloseReason.GET_CLOSED_WITH_LOW_VERSION); } else { getChannel.sendHttpResponseAndClose(HttpResponseStatus.BAD_REQUEST, CloseReason.GET_CLOSED_WITH_HIGHT_VERSION); } continue; } if (getChannel.getChannelVersion() == ChannelVersion.VERSION_1_2) { getChannel.setAllowTransmission(); } boolean isSuccess = getChannel.updateAfterContainerInitialization(this); if (isSuccess) { isSuccess = doUpdateBindingInformation(getChannel); } if (isSuccess) { hasGetChannels = true; } else { iter.remove(); getChannel.sendHttpResponseAndClose(HttpResponseStatus.NOT_FOUND, CloseReason.GET_CLOSED_WHEN_BIND_FAILURE); } } if (hasGetChannels) { // As Post channel data doesnot arrive or is buffered in decoder, startTransmission of Get channel does // not need to be called. container.synchronizeTransmittedSinkRequestChannelList(); } return hasGetChannels; } /** * Update Get and Post channel after container initialization. This is called when Get channel is added into * container which has been initialized by this Post channel. * <ul> * <li>For Get channel, using {@link GetRequestChannel#updateAfterContainerInitialization(ISourceRequestChannel)} to * update Get channel.</li> * <li>For Post channel, updating binding information</li> * </ul> * * @param getRequestChannel The Get channel which is added into an initialized container. * * @return True if the updating operation is successfully executed. */ public boolean updateAfterContainerInitialization(GetRequestChannel getRequestChannel) { // As this method is called after isInitContainer() checking, the container has been initialized. assert container != null : "initContainer() or isInitContainer() should be called first"; SingleDirectionContainer container = this.container; synchronized (container) { if (getRequestChannel.getChannelVersion() == ChannelVersion.VERSION_1_2) { getRequestChannel.startTransmission(this); } boolean isSuccess = getRequestChannel.updateAfterContainerInitialization(this); if (isSuccess) { isSuccess = doUpdateBindingInformation(getRequestChannel); } if (!isSuccess) { container.removeBoundSinkRequestChannel(getRequestChannel); container.synchronizeTransmittedSinkRequestChannelList(); } return isSuccess; } } /** * Call it after Post channel is added into channel manager. This method is used to obtain and process data which is * stored in decoder, which is useful to sticky package for version 1.3. */ public void callAfterAddIntoChannelManager() { for (GetRequestChannel getChannel : getBoundGetRequestChannels()) { if (getChannel instanceof IDataFollowedRelayRequestChannel) { ((IDataFollowedRelayRequestChannel) getChannel).setStartDataProcess(); ((IDataFollowedRelayRequestChannel) getChannel).enableAutoRead(); ((IDataFollowedRelayRequestChannel) getChannel).addPushDataTask(); } } } /** * Bind the given Get channel with this Post channel and return the binding result. If the Post channel is closed, * false will be returned.The Post channel only transmit data to the Get channel which has the same version(except * ffmpeg Get channel). If the Get channel version does not match with the Post version, the binding result will be * false. * * @param getRequestChannel The Get channel needs to be bound with this Post channel * * @return The binding action is sucessful or not. True if the Get channel is successfully bound with this Post * channel. False if the Post channel is closed and fails to bind the Get channel. */ public BindingResult bindGetRequestChannel(GetRequestChannel getRequestChannel) { // As this method is called after PostRequestChannel being put into channel manager. the container has been // initialized. assert container != null : "initContainer() or isInitContainer() should be called first"; if (isClosed()) { return BindingResult.SOURCE_CHANNEL_CLOSURE; } SingleDirectionContainer container = this.container; synchronized (container) { return doBindGetRequestChannel(container, getRequestChannel); } } protected BindingResult doBindGetRequestChannel(SingleDirectionContainer container, GetRequestChannel getRequestChannel) { BindingResult result = container.addBoundSinkRequestChannel(getRequestChannel); if (result != BindingResult.SUCCESS) { return result; } if (getRequestChannel.getChannelVersion() == ChannelVersion.VERSION_1_2) { getRequestChannel.startTransmission(this); } if (BooleanUtils.isTrue(getRequestChannel.getWs())) { if (getRequestChannel instanceof IDataFollowedRelayRequestChannel) { ((IDataFollowedRelayRequestChannel) getRequestChannel).setStartDataProcess(); ((IDataFollowedRelayRequestChannel) getRequestChannel).enableAutoRead(); } return BindingResult.SUCCESS; } boolean isSuccess = getRequestChannel.updateWhenBinding(this); if (isSuccess) { isSuccess = doUpdateBindingInformation(getRequestChannel); } if (!isSuccess) { container.removeBoundSinkRequestChannel(getRequestChannel); container.synchronizeTransmittedSinkRequestChannelList(); } return isSuccess ? BindingResult.SUCCESS : BindingResult.UPDATE_INFORMATION_FAILURE; } private boolean doUpdateBindingInformation(GetRequestChannel getRequestChannel) { switch (getRequestChannel.getRequestLevel()) { case OWNER: information.addOwnerNum(); break; case SLAVE: information.addSlaveNum(); break; case SHARED: information.addSharedNum(); break; case SOCIAL: information.addSocialNum(); break; default: break; } return true; } /** * Send pull-stream command when ffmpeg channel arrives and the Post channel version is 1.3 */ public void sendPullStreamCommandIfNecessary() { if (getChannelVersion() == ChannelVersion.VERSION_1_3) { assert successResponseGenerator != null; HttpResponse response = successResponseGenerator.getHttpResponse(HttpResponseStatus.OK); if (response != null) { sendHttpResponse(response); } JSONObject command = buildPullStreamCommand(getInformation().getParams()); sendData(successResponseGenerator.getHttpData(command)); } } /** * <p> * Build pull-stream command. * </p> * <p> * In 1.3 version, channel of video type is 0, and channel of nvr type starts from 0 and has following mapping. * * <pre> * 'channel' in relay request url ----> 'channel' in JSON command * 0 ----> 0 * 1 ----> 1 * ... ----> ... * </pre> * * </p> * * @param params request url parameters. * * @return The pull-stream command */ private JSONObject buildPullStreamCommand(Map<String, String> params) { // 实际上设备使用此信息控制推流RESOLUTION int channel = 0; if (Type.NVR.equals(params.get(ParamKey.TYPE)) || Type.SMART_NVR.equals(params.get(ParamKey.TYPE))) { channel = Integer.parseInt(params.get(ParamKey.CHANNEL)); } JSONObject preview = new JSONObject(); preview.put("channels", new JSONArray().put(channel)); String resolution = params.get(ParamKey.RESOLUTION); if (resolution != null) { preview.put("resolutions", new JSONArray().put(resolution)); } JSONObject command = new JSONObject(); command.put("type", "request"); command.put("seq", "0"); command.put("params", new JSONObject().put("method", "get").put("preview", preview)); return command; } /** * {@inheritDoc} * * <ul> * <li>For 1.3 version Post channel, as the terminal will not send data before receiving pull-data command, the * response and auto read COULD be sent and enabled after Get access.</li> * <li>For 1.2 version Post channel, the response and auto read SHOULD be sent and enabled after Get access.</li> * </ul> */ @Override public void startDataAcceptanceAfterSinkChannelAccess() { // Send success response header if necessary if (isNeedResponseAfterGetAccess()) { HttpResponse httpResponse = successResponseGenerator.getHttpResponse(HttpResponseStatus.OK); if (httpResponse != null) { sendHttpResponse(httpResponse); } } // Start data process setStartDataProcess(); // Try to enable auto read of POST channel enableAutoRead(); // Add pull data task addPushDataTask(); } /** * Unbind the given GET channel. If the GET channel exists in the binding list, true will be returned. * * @param getRequestChannel The GET channel needs to be unbound. * * @return True if the GET channel exists in the binding list. */ public boolean unbindGetRequestChannel(GetRequestChannel getRequestChannel) { // As this method is called after PostRequestChannel being put into channel manager. the container has been // initialized. assert container != null : "Should call initContainer() first."; SingleDirectionContainer container = this.container; synchronized (container) { boolean isExisted = container.removeBoundSinkRequestChannel(getRequestChannel); if (isExisted) { // If the ffmpeg channel is closed explicitly, the channel will not exist in list. container.synchronizeTransmittedSinkRequestChannelList(); // 取消FFmpeg重试 // if (getRequestChannel.isFFmpegChannel()) { // doRestartSegmenter(container); // } else { doTryToClose(container, CloseReason.POST_CLOSED_AFTER_GET_EXIT); // } } return isExisted; } } /** * Try to close the Post channel. The closure strategy is shown in {@link #doTryToClose(SingleDirectionContainer, * CloseReason)} * * @param closeReason The specific close reason. */ public void tryToClose(CloseReason closeReason) { SingleDirectionContainer container = this.container; if (container == null) { return; } synchronized (container) { doTryToClose(container, closeReason); } } /** * Try to close current POST channel after GET channel is unbound. For single mapping channel, the POST channel will * be closed immediately if no GET channel is bound any more. For multiple mapping channel, the POST channel will be * closed in following cases: * <ul> * <li>When the bound channel number is 0, the segmenter doesn't exist or is in Segmenter.MAX_RESTART_RETRIES state, * or doesn't have consumer.</li> * <li>When the bound channel number is 1, the segmenter is in Segmenter.SEGMENTING state and doesn't have consumer</li> * </ul> * * @param closeReason The reason of closing action */ protected void doTryToClose(SingleDirectionContainer container, CloseReason closeReason) { // Before doTryToClose, container has been checked and could not be null. if (isMultipleMapping()) { // int bindedChannelNum = container.getBoundSinkRequestChannelNum(); // if (bindedChannelNum == 0) { // Segmenter s = segmenter; // if (s == null || s.getState() == Segmenter.MAX_RESTART_RETRIES || !s.hasConsumer()) { // close(closeReason); // } // } else if (bindedChannelNum == 1) { // Segmenter s = segmenter; // if (s != null && s.isSegmenting() && !s.hasConsumer()) { // close(closeReason); // } // } //log.error("Is multiple mapping"); close(closeReason); } else { if (container.hasNoBoundSinkRequestChannel()) { close(closeReason); } } } /** * Close all GET channels bound with this POST channel. The method is only used in {@link * #releaseResourceAfterNettyChannelClosure()} */ private void closeAllGetRequestChannels() { SingleDirectionContainer container = this.container; if (container == null) { return; } synchronized (container) { Iterator<GetRequestChannel> iter = container.boundIterator(); while (iter.hasNext()) { GetRequestChannel getChannel = iter.next(); if (log.isDebugEnabled()) { log.debug("[sid:{}] Close GET Channel: channelId={}", getSid(), getChannel.getChannelId()); } getChannel.close(CloseReason.GET_CLOSED_BY_POST); } container.removeAllBoundSinkeRequestChannels(); container.synchronizeTransmittedSinkRequestChannelList(); } } public List<GetRequestChannel> getBoundGetRequestChannels() { SingleDirectionContainer container = this.container; if (container == null) { return new ArrayList<>(); } synchronized (container) { return container.getBoundSinkRequestChannels(); } } public int getBoundGetRequestChannelNum() { SingleDirectionContainer container = this.container; if (container == null) { return 0; } synchronized (container) { return container.getBoundSinkRequestChannelNum(); } } @SuppressWarnings("unchecked") public List<GetRequestChannel> getTransmittedGetRequestChannels() { return (List<GetRequestChannel>) getTransmittedSinkRequestChannels(); } public int getTransmittedGetRequestChannelNum() { SingleDirectionContainer container = this.container; if (container == null) { return 0; } return container.getTransmittedSinkRequestChannelNum(); } public int updateTransmittedGetRequestChannels() { SingleDirectionContainer container = this.container; if (container == null) { return 0; } synchronized (container) { return container.synchronizeTransmittedSinkRequestChannelList(); } } public class ControlRequestChannel extends BaseHttpRequestChannel { public ControlRequestChannel(String channelId, Channel nettyChannel, ControlRequestChannelInformation information) { super(channelId, nettyChannel, ILifeTimeControl.IDLE_LIFE_TIME_S, information); } @Override public String getRequestType() { String cmdMethod = information.getCmdMethod(); return cmdMethod != null ? RelayConsts.Service.CONTROL + '-' + cmdMethod : RelayConsts.Service.CONTROL; } }
最新发布
09-11
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值