import
java.io.IOException;
import
java.net.ServerSocket;
import
java.net.Socket;
import
com.gftech.cmpp.bean.CmppBody;
import
com.gftech.cmpp.bean.CmppDeliver;
import
com.gftech.cmpp.bean.CmppHead;
import
com.gftech.cmpp.bean.CmppPack;
import
com.gftech.cmpp.bean.ICmppCmdID;
import
com.gftech.smp.SmpDeliverPack;

/** */
/**
* 测试读取速度:使用Buffer和不使用Buffer的情况
*
* @author sinboy
* @since 2007.3.30
*
*/

public
class
ServerDemo
...
{ 

public static void main(String[] args) ...{
int listenPort = 2000;
try ...{
Socket client = null;
ServerSocket ss = new ServerSocket(listenPort);
System.out.println("侦听" + listenPort + "端口,等待客户端的连接...");
while (true) ...{
client = ss.accept();
System.out.println("接收到通信平台或客服系统的连接" + client.toString());
read(client);
}
} catch (IOException e) ...{
e.printStackTrace();
}
}

public static void read(Socket sock) ...{
if (sock != null)
try ...{ 
while (true) ...{
CmppPack cp = CmppCommu.receive(sock);
if (cp != null) ...{
CmppHead head = cp.getHead();
CmppBody body = cp.getBody();
if (head != null) ...{
switch (head.getCmdID()) ...{
case ICmppCmdID.CMPP_DELIVER:
CmppDeliver cd = new CmppDeliver(body.getBody());
if (cd != null) ...{
SmpDeliverPack pack = new SmpDeliverPack(cd);
String content = "cmpp deliver:" + pack.getSrcAddr() + " " + pack.getDestAddr() + " " + pack.getContent() + " "
+ pack.getLinkID();
System.out.println(content);
}
}
}
}
}
} catch (IOException e) ...{
e.printStackTrace();
}
}
}


public
void
send(
byte
[] b)
throws
IOException
...
{
if (sock == null)
throw new IOException();
if (b != null) ...{
try ...{
BufferedOutputStream os = new BufferedOutputStream(sock.getOutputStream());
if (b != null) ...{
os.write(b);
os.flush();
}
} catch (IOException e) ...{
throw e;
}
}
}
客户端代码如下:
public
class
ClientDemo
...
{

public static void main(String[] args) ...{
try ...{
Socket client = new Socket("192.168.10.8", 2000);
write(client);
} catch (UnknownHostException e) ...{
e.printStackTrace();
} catch (IOException e) ...{
e.printStackTrace();
}
}

public static void write(Socket client) ...{
try ...{
ArrayList<CmppPack> cpList = new ArrayList<CmppPack>();
for (int i = 0; i < 10000; i++) ...{
SmpDeliverPack pack = new SmpDeliverPack("13612345678", "01234", "" + (i + 1));
CmppDeliver cd = pack.toCmppDeliver();
if (cd != null) ...{
CmppPack cp = new CmppPack(new CmppHead(12 + cd.getBody().length, ICmppCmdID.CMPP_DELIVER, 100), cd);
cpList.add(cp);
}
}

for (int i = 0; i < cpList.size(); i++) ...{
CmppCommu.send(client,cpList.get(i));
System.out.println(i);
}
Thread.sleep(10000);
} catch (IOException e) ...{
e.printStackTrace();
} 
catch (InterruptedException e) ...{
}
}
}
用到的发送和接入方法如下:
public
class
CmppCommu
...
{

public static void send(Socket sock, CmppPack pack) throws IOException ...{
if (sock != null && pack != null) ...{
try ...{
byte[] b = pack.getBytes();
BufferedOutputStream os = new BufferedOutputStream(sock.getOutputStream());
if (b != null) ...{
os.write(b);
os.flush();
}
} catch (IOException e) ...{
throw e;
}
}
}
public static CmppPack receive(Socket sock) throws IOException ...{
final int MAX_LEN = 10000;
CmppPack pack = null;
if (sock == null)
throw new IOException();
try ...{
BufferedInputStream bis = new BufferedInputStream(sock.getInputStream());
DataInputStream in = new DataInputStream(bis);
int len = in.readInt();// 读取消息头
if (len >= 12 && len < MAX_LEN) ...{
int cmd = in.readInt();
int seq = in.readInt();
int bodyLen = len - 12;
byte[] msg = new byte[bodyLen];
in.read(msg);// 读取消息体
CmppHead head = new CmppHead(len, cmd, seq);
CmppBody body = new CmppBody(msg);
pack = new CmppPack(head, body);
}
} catch (SocketTimeoutException e) ...{
// logger.warn("time out");
} catch (IOException e) ...{
throw e;
}
return pack;
}
}
使用Eclipse TPTP对程序的执行进行监控,结果如下:
如上图所示,服务器端程序在接收数据时总共花了 21 秒多。因为从理论上讲把输入流用 Buffer 包装一下接收的速度会更快一些,下面我们对它进行验证,把接收程序略做改动,增加一句 Buffer 包装:
BufferedInputStream bis
=
new
BufferedInputStream(sock.getInputStream());
DataInputStream in
=
new
DataInputStream(bis);
再次运行,发现一个很奇怪的问题,接收时有数据丢失的情况,正常情况下应该是从1到10000:
cmpp deliver:
13612345678
01234
1
16240905710000010001
cmpp deliver:
13612345678
01234
2
16241005710000010001
cmpp deliver:
13612345678
01234
261
16241005710000010001
cmpp deliver:
13612345678
01234
262
16241005710000010001
cmpp deliver:
13612345678
01234
263
16241005710000010001
cmpp deliver:
13612345678
01234
264
16241005710000010001
. . . . . .
明白了这一点,那我们可以把创建输入流放到每次读取的循环之外,修改读取方法如下:
public
static
CmppPack receive(DataInputStream in)
throws
IOException
...
{
final int MAX_LEN = 10000;
CmppPack pack = null;
if (in == null)
throw new IOException();
try ...{
int len = in.readInt();// 读取消息头
if (len >= 12 && len < MAX_LEN) ...{
int cmd = in.readInt();
int seq = in.readInt();
int bodyLen = len - 12;
byte[] msg = new byte[bodyLen];
in.read(msg);// 读取消息体
CmppHead head = new CmppHead(len, cmd, seq);
CmppBody body = new CmppBody(msg);
pack = new CmppPack(head, body);
}
} catch (SocketTimeoutException e) ...{
// logger.warn("time out");
} catch (IOException e) ...{
throw e;
}
return pack;
}
服务器端的代码也作相应调整,把创建输入流的过程放在循环之外,保证每次读取数据包都是使用同一个输入流:
BufferedInputStream bis
=
new
BufferedInputStream(sock.getInputStream());
DataInputStream in
=
new
DataInputStream(bis);
while
(
true
)
...
{
CmppPack cp = CmppCommu.receive(in);
...
再次进行测试,结果如下:
从上图可以看出来,效率提高了很多,这里提高的原因有两点:一个使用了Buffer进行输入流的包装,二是每次读取数据包时创建新的输入流的过程放到了循环之外,减少的资源的开销。但纯粹的使用Buffer进行包装,效果究竟能提高多少呢?我们再次使用非Buffer的读取方式,但把创建输入流的过程放在循环之外,接收方法不变,服务端代码如下:
DataInputStream in
=
new
DataInputStream(sock.getInputStream());
while
(
true
)
...
{
CmppPack cp = CmppCommu.receive(in);
...
从上图可以看出,非Buffer包装的输入流,时间增加了约3秒钟,效率下降了50%左右。
再发客户端的程序做一个改进,把创建输出流这一步也提到循环之外:
BufferedOutputStream out
=
new
BufferedOutputStream(client.getOutputStream());
for
(
int
i
=
0
; i
<
cpList.size(); i
++
)
...
{
CmppCommu.send(out,cpList.get(i));
System.out.println(i);
}
提高虽然不明显,但也有提高,结果如下:
本文通过客户端/服务器模式模拟,对比使用与未使用Buffered流的网络IO操作效率,并针对数据丢失问题给出解决方案。
24

被折叠的 条评论
为什么被折叠?



