一.
修改最大打开文件数
1.修改系统总共最大打开文件数
在 /etc/sysctl.conf 文件末尾添加:
fs.file-max = 1020000 或者永久修改,*表示所有用户,可以改成某个用户名
编辑 /etc/security/limits.conf 文件, 加入如下行:
# /etc/security/limits.conf * hard nofile 1020000 * soft nofile 1020000
2.修改当前用户单个进程可以打开的最大文件数
sudo sh -c "ulimit -n 1020000 && exec su $LOGNAME"
二。测试代码
服务器端程序依赖libev框架,需要提前编译,然后存放到相应位置。下面t有具体服务器端代码
编译
gcc server.c -o server ../include/libev.a -lm
运行
./server -p 8000
在源码中默认指定了8000端口,可以通过-p进行指定新的端口。 开启了8000端口进行监听请求,http协议处理类似于htmlfile chunked块编码传输。
#include <arpa/inet.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#include <errno.h>
#include <err.h>
#include <unistd.h>
#include "libev-4.20/include/ev.h"
#define HTMLFILE_RESPONSE_HEADER \
"HTTP/1.1 200 OK\r\n" \
"Connection: keep-alive\r\n" \
"Content-Type: text/html; charset=utf-8\r\n" \
"Transfer-Encoding: chunked\r\n" \
"\r\n"
#define HTMLFILE_RESPONSE_FIRST \
"<html><head><title>htmlfile chunked example</title><script>var _ = function (msg) { document.getElementById('div').innerHTML = msg; };</script></head><body><div id=\"div\"></div> "
#define PORT_NUM 30
static int server_port = 8000;
struct ev_loop *loop;
typedef struct {
int fd;
ev_io ev_read;
} client_t;
ev_io ev_accept[PORT_NUM];
static int usr_num;
static void incr_usr_num() {
usr_num ++;
printf("online user %d\n", usr_num);
}
static void dec_usr_num() {
usr_num --;
printf("~online user %d\n", usr_num);
}
static void free_res(struct ev_loop *loop, ev_io *ws);
int setnonblock(int fd) {
int flags = fcntl(fd, F_GETFL);
if (flags < 0)
return flags;
flags |= O_NONBLOCK;
if (fcntl(fd, F_SETFL, flags) < 0)
return -1;
return 0;
}
static int format_message(const char *ori_message, char *target_message) {
return sprintf(target_message, "%X\r\n<script>_('%s');</script>\r\n", ((int)strlen(ori_message) + 23), ori_message);
}
static void write_ori(client_t *client, char *msg) {
if (client == NULL) {
fprintf(stderr, "line:%d -- the client is NULL:%s !\n",__LINE__,strerror(errno) );
return;
}
write(client->fd, msg, strlen(msg));
}
static void write_body(client_t *client, char *msg) {
char body_msg[strlen(msg) + 100];
format_message(msg, body_msg);
write_ori(client, body_msg);
}
static void read_cb(struct ev_loop *loop, ev_io *w, int revents) {
client_t *client = w->data;
int r = 0;
char rbuff[1024];
if (revents & EV_READ) {
r = read(client->fd, &rbuff, 1024);
}
if (EV_ERROR & revents) {
fprintf(stderr, "error event in read\n");
free_res(loop, w);
return ;
}
if (r < 0) {
fprintf(stderr, "read error\n");
ev_io_stop(EV_A_ w);
free_res(loop, w);
return;
}
if (r == 0) {
fprintf(stderr, "client disconnected.\n");
ev_io_stop(EV_A_ w);
free_res(loop, w);
return;
}
write_ori(client, HTMLFILE_RESPONSE_HEADER);
char target_message[strlen(HTMLFILE_RESPONSE_FIRST) + 20];
sprintf(target_message, "%X\r\n%s\r\n", (int)strlen(HTMLFILE_RESPONSE_FIRST), HTMLFILE_RESPONSE_FIRST);
write_ori(client, target_message);
}
static void accept_cb(struct ev_loop *loop, ev_io *w, int revents) {
struct sockaddr_in client_addr;
socklen_t client_len = sizeof(client_addr);
int client_fd = accept(w->fd, (struct sockaddr *) &client_addr, &client_len);
if (client_fd == -1) {
fprintf(stderr, "line:%d -- the accept return -1:%s !\n",__LINE__,strerror(errno) );
return;
}
client_t *client = malloc(sizeof(client_t));
client->fd = client_fd;
if (setnonblock(client->fd) < 0)
err(1, "failed to set client socket to non-blocking");
client->ev_read.data = client;
ev_io_init(&client->ev_read, read_cb, client->fd, EV_READ);
ev_io_start(loop, &client->ev_read);
incr_usr_num();
}
int main(int argc, char const *argv[]) {
int ch;
while ((ch = getopt(argc, argv, "p:")) != -1) {
switch (ch) {
case 'p':
server_port = atoi(optarg);
break;
}
}
printf("start free -m is \n");
system("free -m");
loop = ev_default_loop(0);
int i=0;
for(i=0;i<PORT_NUM;i++){
struct sockaddr_in listen_addr;
int reuseaddr_on = 1;
int listen_fd = socket(AF_INET, SOCK_STREAM, 0);
if (listen_fd < 0)
err(1, "listen failed");
if (setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr_on, sizeof(reuseaddr_on)) == -1)
err(1, "setsockopt failed");
memset(&listen_addr, 0, sizeof(listen_addr));
listen_addr.sin_family = AF_INET;
listen_addr.sin_addr.s_addr = INADDR_ANY;
listen_addr.sin_port = htons(server_port+i);
if (bind(listen_fd, (struct sockaddr *) &listen_addr, sizeof(listen_addr)) < 0)
err(1, "bind failed");
if (listen(listen_fd, 5) < 0)
err(1, "listen failed");
if (setnonblock(listen_fd) < 0)
err(1, "failed to set server socket to non-blocking");
ev_io_init(&ev_accept[i], accept_cb, listen_fd, EV_READ);
ev_io_start(loop, &ev_accept[i]);
}
ev_loop(loop, 0);
return 0;
}
static void free_res(struct ev_loop *loop, ev_io *w) {
dec_usr_num();
client_t *client = w->data;
if (client == NULL) {
fprintf(stderr, "line:%d -- the client is NULL:%s !\n",__LINE__,strerror(errno) );
return;
}
ev_io_stop(loop, &client->ev_read);
close(client->fd);
free(client);
}
测试程序使用libevent框架,因其使用简单,提供丰富易用接口,但需要提前下载,手动安装:
wget https://github.com/downloads/libevent/libevent/libevent-2.0.21-stable.tar.gz
tar xvf libevent-2.0.21-stable.tar.gz
cd libevent-2.0.21-stable
./configure --prefix=/usr
make
make install
注意make和make install需要root用户。
client1.c 源码:
#include <sys/types.h>
#include <sys/time.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <err.h>
#include "include/event.h"
#include "include/evhttp.h"
#include <unistd.h>
#include <stdio.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <time.h>
#include <pthread.h>
#define BUFSIZE 4096
#define NUMCONNS 30000
#define SLEEP_MS 10
char buf[BUFSIZE];
int bytes_recvd = 0;
int chunks_recvd = 0;
int closed = 0;
int connected = 0;
void chunkcb(struct evhttp_request *req, void *arg) {
int s = evbuffer_remove( req->input_buffer, &buf, BUFSIZE );
bytes_recvd += s;
chunks_recvd++;
//printf("buf:%s\n", buf);
if (connected >= NUMCONNS && chunks_recvd % 10000 == 0){
printf(">Chunks: %d\tBytes: %d\tClosed: %d\n", chunks_recvd, bytes_recvd, closed);
}
}
void reqcb(struct evhttp_request *req, void *arg) {
closed++;
}
int main(int argc, char **argv) {
if(argc <= 2){
printf("Usage: %s ip port\n", argv[0]);
exit(0);
}
const char *SERVERADDR=argv[1];
int SERVERPORT=atoi(argv[2]);
event_init();
struct evhttp_connection *evhttp_connection;
struct evhttp_request *evhttp_request;
char path[32]; // eg: "/test/123"
int i;
for (i = 1; ; i++) {
int tmp=i%30;
evhttp_connection = evhttp_connection_new(SERVERADDR, SERVERPORT+tmp);
evhttp_set_timeout(evhttp_connection, 864000); // 10 day timeout
evhttp_request = evhttp_request_new(reqcb, NULL);
evhttp_request->chunk_cb = chunkcb;
sprintf(&path, "/test/%d", ++connected);
if (i % 100 == 0) printf("Req: %s\t->\t%s\n", SERVERADDR, &path);
evhttp_make_request( evhttp_connection, evhttp_request, EVHTTP_REQ_GET, path );
evhttp_connection_set_timeout(evhttp_request->evcon, 864000);
event_loop( EVLOOP_NONBLOCK );
if ( connected % 200 == 0 )
printf("\nChunks: %d\tBytes: %d\tClosed: %d\n", chunks_recvd, bytes_recvd, closed);
usleep(SLEEP_MS * 1000);
}
event_dispatch();
return 0;
}
客户端有点问题,堆内存不停的涨,用valgrind 提示可能有内存泄露
三.内存
转自:http://blog.youkuaiyun.com/mafuli007/article/details/7573521
# sync # echo 1 > /proc/sys/vm/drop_caches
echo 2 > /proc/sys/vm/drop_caches echo 3 > /proc/sys/vm/drop_caches cache释放: To free pagecache: echo 1 > /proc/sys/vm/drop_caches To free dentries and inodes: echo 2 > /proc/sys/vm/drop_caches To free pagecache, dentries and inodes: echo 3 > /proc/sys/vm/drop_caches
说明,释放前最好sync一下,防止丢数据。
因为LINUX的内核机制,一般情况下不需要特意去释放已经使用的cache。这些cache起来的内容可以增加文件以及的读写速度。 先说下free命令怎么看内存
[root@yuyii proc] # free total used free shared buffers cached Mem: 515588 295452 220136 0 2060 64040 -/+ buffers/cache: 229352 286236 Swap: 682720 112 682608其中第一行用全局角度描述系统使用的内存状况: total——总物理内存 used——已使用内存,一般情况这个值会比较大,因为这个值包括了cache+应用程序使用的内存 free——完全未被使用的内存 shared——应用程序共享内存 buffers——缓存,主要用于目录方面,inode值等(ls大目录可看到这个值增加) cached——缓存,用于已打开的文件 note: total=used+free used=buffers+cached (maybe add shared also)
第二行描述应用程序的内存使用: 前个值表示-buffers/cache——应用程序使用的内存大小,used减去cache减去buffer 后个值表示+buffers/cache——所有可供应用程序使用的内存大小,free加上cache加上buffer note: -buffers/cache=used-buffers-cached +buffers/cache=free+buffers+cached
未启动服务器时
1.查看现有内存
[le@master c1000k]$ free -m total used free shared buffers cached Mem: 3705 649 3056 0 12 95 -/+ buffers/cache: 541 3164 Swap: 8191 0 8191
客户端:
[le@localhost c1000k_http]$ free -m total used free shared buffers cached Mem: 3591 1677 1914 344 0 230 -/+ buffers/cache: 1446 2145 Swap: 3999 0 3999
2.达到一定连接数时,客户的无法在建立连接,过一段时间,客户的出现timeout
服务器端/客户端输入dmesg命令我的在建立65507个连接的时候,出现此情况,此时内存总共大概用了100M。
问题a.看到很多:nf_conntrack: table full, dropping packet
因为允许的最大跟踪连接条目:CONNTRACK_MAX(默认值是 2^16=65536 )
在客户/服务器端修改ip_conntrack_max
vi /etc/sysctl.conf 在kernel2.6之前的添加项: net.ipv4.netfilter.ip_conntrack_max = 1020000 #net.ipv4.netfilter.ip_conntrack_tcp_timeout_established = 180 kernel2.6之后的添加项: net.nf_conntrack_max = 1020000 #问题b:在dmesg的输出中发现很多类似的报错信息:Out of socket memorynet.netfilter.nf_conntrack_max = 1020000#net.netfilter.nf_conntrack_tcp_timeout_established = 1200
sysctl -p
如果报错:error: "net.nf_conntrack_max" is an unknown key 则需要使用modprobe载入ip_conntrack模块,lsmod查看模块已载入。
modprobe ip_conntrack
原因:http://m.blog.youkuaiyun.com/blog/star62003/8743275
1.有很多的孤儿套接字(orphan sockets)
处理:在/etc/sysctl.conf
文件中添加或者增大 net.ipv4.tcp_max_orphans
的值
2.tcp socket 用尽了给他分配的内存
echo "net.ipv4.tcp_mem = 786432 2097152 3145728">> /etc/sysctl.conf
echo "net.ipv4.tcp_rmem = 4096 4096 16777216">> /etc/sysctl.conf
echo "net.ipv4.tcp_wmem = 4096 4096 16777216">> /etc/sysctl.conf
备注: 为了节省内存,设置tcp读、写缓冲区都为4K大小或者可以设置更小,读写缓冲区应该来说相当于滑动窗口,tcp_mem
三个值分别为3G 8G 16G,tcp_rmem
和tcp_wmem
最大值也是16G。(men是设置整个协议栈,单位是页面---一般是4k,而rmen,wmen是对于单个连接,单位是字节)
四。测试结果
total used free shared buffers cached
Mem: 3705 2625 1080 0 9 79
-/+ buffers/cache: 2536 1169
Swap: 8191 0 8191
但是 服务进程通 命令$ cat /proc/11186/status 查看 “VmSize: 33728 kB” 大概只用了30M
http://www.ibm.com/developerworks/cn/linux/l-hisock.html
实践参考:
http://www.ideawu.net/blog/archives/740.html
http://www.blogjava.net/yongboy/archive/2013/04/11/397677.html