主要是利用rabbitmq的东西,贴出服务端代码
listen代码
gen_tcp:listen(Port, SocketOpts)
SocketOpts = [
binary,
{packet, 0}, %%{packet, 0}表示erlang系统会吧TCP数据原封不动地直接传送给应用程序
{reuseaddr, true}, %%允许本地重复使用端口号
{nodelay, true}, %%意味着很少的数据也会被马上被发送出去
{delay_send, true}, %%如果打开了delay_send,每个port会维护一个发送队列,数据不是立即发送,而是存到发送队列里,等socket可写的时候再发送,相当于是ERTS自己实现的组包机制
{active, false}, %%注意如果socket客户端断开后,其port不会关闭,而{active,true}与{active,once}则会关闭port
{backlog, 1024}, %%缓冲区的长度
{exit_on_close, false}, %%设置为flase,那么在socket被close之后还能将缓冲区中的数据发送出去
{send_timeout, 15000} %%设置一个时间去等待操作系统发送数据,如果底层在这个时间段后还没发出数据,那么就会返回{error,timeout}
]
这个handle_info是在acceptor进程处理的
handle_info({inet_async, LSock, Ref, {ok, Sock}}, State = #state{listen_socket=LSock, ref=Ref}) ->
%% patch up the socket so it looks like one we got from
%% gen_tcp:accept/1
{ok, Mod} = inet_db:lookup_socket(LSock),
inet_db:register_socket(Sock, Mod),
try
%% report
{ok, {Address, Port}} = inet:sockname(LSock),
{ok, {PeerAddress, PeerPort}} = inet:peername(Sock),
?DEBUG("accepted TCP connection on ~s:~p from ~s:~p~n",
[inet_parse:ntoa(Address), Port,
inet_parse:ntoa(PeerAddress), PeerPort]),
spawn_socket_controller(Sock)
catch Error:Reason ->
gen_tcp:close(Sock),
?ERROR_MSG("ERRunable to accept TCP connection: ~p ~p~n", [Error, Reason])
end,
accept(State);
spawn_socket_controller(ClientSock) ->
case gen_tcp:recv(ClientSock, 23, 30000) of
{ok, Bin} -> %%需要测试,过滤掉没用的数据
case supervisor:start_child(sdeeg_tcp_client_sup, [ClientSock, Line]) of
{ok, CPid} ->
inet:setopts(ClientSock, [{packet, 4}, binary, {active, false}, {nodelay, true}, {delay_send, true}]),
gen_tcp:controlling_process(ClientSock, CPid), %%移交socket到网关进程中去
CPid ! go; %%路由到网关进程,以后的数据将会从socket发送到sdeeg_tcp_client进程
{error, Error} ->
?CRITICAL_MSG("cannt accept client:~w", [Error]),
catch erlang:port_close(ClientSock)
end;
Other ->
?ERROR_MSG("ERRrecv packet error:~w", [Other]),
catch erlang:port_close(ClientSock)
end.
上面会利用gen_tcp:controlling_process把以后客户端发来的数据发送到网关进程中去处理。
下面看看网关进程的部分代码
handle_info(go, #state{socket=Socket} = State) ->
prim_inet:async_recv(Socket, 0, -1),
{noreply, State};
handle_info({inet_async, Socket, _Ref, {ok, Data}}, State) ->
?ERROR_MSG("ERR~p", [Data]),
do_handle_data(Socket,Data,State),
{noreply, State};
handle_info({inet_async, _Socket, _Ref, {error, closed}}, State) ->
?ERROR_MSG("ERR~p", [closed]),
{stop, normal, State};
handle_info({inet_async, _Socket, _Ref, {error, Reason}}, State) ->
?ERROR_MSG("ERR~ts:~w", ["Socket出错", Reason]),
{stop, normal, State};
do_handle_data(Socket, Data, _State) ->
?ERROR_MSG("ERR~p", [Data]),
prim_inet:async_recv(Socket, 0, -1).
下面是我的测试代码:
-module(test_gateway).
-export([connect/0]).
connect() ->
{ok,Socket1} = gen_tcp:connect("210.38.235.164", 443, [binary, {packet, 0}]),
ok = gen_tcp:send(Socket1,<<1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1>>),
ok = gen_tcp:send(Socket1,<<23:32,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1>>),
gen_tcp:close(Socket1).
上面的测试代码只能收到第一条信息,第二条信息一直都收取不到,刚开始一直以为是ClientSocket移交给网关进程有问题,但是在执行gen_tcp:close(Socket1),网关进程却能够收到{error, closed}这条消息,明显不是这个问题。后来发现是问题是出现在这里
在刚开始gen_tcp:listen除的参数packet设置为0的,但后面把socket移交给网关进程的时候把packet设置为4.所以导致上面的客户端程序的第二条消息一直都收取不到。下面正确代码
-module(test_gateway).
-export([connect/0]).
connect() ->
{ok,Socket1} = gen_tcp:connect("210.38.235.164", 443, [binary, {packet, 0}]),
ok = gen_tcp:send(Socket1,<<1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1>>),
inet:setopts(Socket1, [{packet, 4}]),
ok = gen_tcp:send(Socket1,<<23:32,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1>>),
gen_tcp:close(Socket1).
OK,搞定。