Linux 异步IO简介(io_uring)

1.什么是异步操作?

        异步是形容两者之间的关系(例如:在网络信息交互时客户端和服务端的关系就属于异步的,即请求回应模式)。

2.什么是异步IO

        将IO操作数据的请求和数据返回进行分开处理。对于异步的方式编程,并不是将CPU“有效的运行时间减少”,而是将耗时操作(例如:文件IO操作,文件IO的读写操作可以分为两个部分一个是IO事件的触发,一个是文件IO操作的结果(即将磁盘的数据加载到内核态再将内核态的数据拷贝到用户空间),在同步IO操作时将其看成是一个完整的过程,即具备“原子性”,CPU在此期间处于休眠的状态,导致CPU的利用率不高。)如何将此时间段的CPU利用起来,即阻塞等待结果的时间用来调度线程池(后面的文章将会讲到)里其他需要进行处理的,与阻塞线程逻辑没有关联的业务。一旦耗时操作完成,执行耗时操作任务的线程会主动的通知调度器此操作完成,然后进行线程的切换回原来的线程继续执行。

3.io_uring简介

        io_uring 是在2019年加入到Linux内核的,并提供三个系统调用函数:

        io_uring_setup函数:构建io_uring的环境所必须的参数;

        io_uring_register函数:开始提交;

        io_uring_enter函数:加入任务节点。

        可以看到上述三个函数不是太过抽象,不利于理解和逻辑的梳理,因此又封装了一个liburing库。

4.使用异步IO代码

        服务端代码:



#include <stdio.h>
#include <liburing.h>
#include <netinet/in.h>
#include <string.h>
#include <unistd.h>

#define ENTRIES_LENGTH		1024
#define BUFFER_LENGTH		1024

#define EVENT_ACCEPT   	0
#define EVENT_READ		1
#define EVENT_WRITE		2


int init_server(unsigned short port) {	

	int sockfd = socket(AF_INET, SOCK_STREAM, 0);	
	struct sockaddr_in serveraddr;	
	memset(&serveraddr, 0, sizeof(struct sockaddr_in));	
	serveraddr.sin_family = AF_INET;	
	serveraddr.sin_addr.s_addr = htonl(INADDR_ANY);	
	serveraddr.sin_port = htons(port);	

	if (-1 == bind(sockfd, (struct sockaddr*)&serveraddr, sizeof(struct sockaddr))) {		
		perror("bind");		
		return -1;	
	}	

	listen(sockfd, 10);
	
	return sockfd;
}

struct conn_info{
    int fd;
    int event;
};


int set_event_accept(struct io_uring *ring,int sockfd, struct sockaddr *addr ,
                                            socklen_t *addrlen, int flags){

    struct io_uring_sqe *sqe =io_uring_get_sqe(ring);

    struct conn_info accept_info={
        .fd=sockfd,
        .event=EVENT_ACCEPT,
    };

    io_uring_prep_accept(sqe, sockfd, (struct sockaddr*)addr, addrlen, flags);
	memcpy(&sqe->user_data, &accept_info, sizeof(struct conn_info));

}

int set_event_recv(struct io_uring *ring, int sockfd,
				      void *buf, size_t len, int flags ){

                    struct io_uring_sqe* sqe=io_uring_get_sqe(ring);
                    struct conn_info accept_info={
                        .fd=sockfd,
                        .event=EVENT_READ,
                    };

                    io_uring_prep_recv(sqe,sockfd,buf,len,flags);
                    memcpy(&sqe->user_data,&accept_info,sizeof(struct conn_info));

}

int set_event_send(struct io_uring *ring, int sockfd,
				      void *buf, size_t len, int flags ){

                    struct io_uring_sqe* sqe=io_uring_get_sqe(ring);
                    struct conn_info accept_info={
                        .fd=sockfd,
                        .event=EVENT_WRITE,
                    };

                    io_uring_prep_send(sqe,sockfd,buf,len,flags);
                    memcpy(&sqe->user_data,&accept_info,sizeof(struct conn_info));

}

int main(int argc,char *argv[]){
    
    unsigned short port =6666;
    int socketfd=init_server(port);

    struct io_uring_params params;
	memset(&params, 0, sizeof(params));

    
    struct io_uring ring;
    io_uring_queue_init_params(ENTRIES_LENGTH,&ring,&params);
#if 0
    struct sockaddr_in clientaddr;
    socklen_t len=sizeof(clientaddr);
    accept(socketfd,(struct sockaddr*)&clientaddr,&len);
    printf("accpeted");
#else 
    struct sockaddr_in clientaddr;
    socklen_t len=sizeof(clientaddr);
    set_event_accept(&ring,socketfd,(struct sockaddr *)&clientaddr,&len,0);


#endif

    char buffer[BUFFER_LENGTH]={0};

    while(1){
        io_uring_submit(&ring);

        struct io_uring_cqe *cqe;
        io_uring_wait_cqe(&ring,&cqe);//将submit 队列

        struct io_uring_cqe *cqes[128];
		int nready = io_uring_peek_batch_cqe(&ring, cqes, 128); //一次最多可以处理多少个IO任务

        int i=0;
        for(i=0;i<nready;i++){

            struct io_uring_cqe *entriesNode=cqes[i];
            struct conn_info result;
            memcpy(&result,&entriesNode->user_data,sizeof(struct conn_info));

            if(result.event==EVENT_ACCEPT){

                set_event_accept(&ring,socketfd,(struct sockaddr *)&clientaddr,&len,0);
                
                int connfd=entriesNode->res;

                //printf("set_event_accept\n");
                set_event_recv(&ring,connfd,buffer,BUFFER_LENGTH,0);

                
            }else if(result.event==EVENT_READ){

                int ret=entriesNode->res;
                if(ret==0){
                    close(result.fd);

                }else if(ret>0){

                    set_event_send(&ring,result.fd,buffer, ret, 0);
                    
                }
            }else if(result.event==EVENT_WRITE){

                    int ret = entriesNode->res;

                    set_event_recv(&ring,result.fd,buffer,BUFFER_LENGTH,0);


            }
        }    
        
        io_uring_cq_advance(&ring, nready);
    }
    return 0;
}

客服端代码:

#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <stdlib.h>
#include <unistd.h>

#include <sys/time.h>
#include <pthread.h>
#include <arpa/inet.h>

typedef struct test_context_s{
    
    char serverip[16];
    int port;
    int threadnum;
    int connection;
    int requestion;

#if 1
    int failed;
#endif

}test_context_t;


int connect_tcpserver(const char * ip,unsigned short port) {	

	int connfd = socket(AF_INET, SOCK_STREAM, 0);	
	struct sockaddr_in serveraddr;	
	memset(&serveraddr, 0, sizeof(struct sockaddr_in));	

	serveraddr.sin_family = AF_INET;	
	serveraddr.sin_addr.s_addr = inet_addr(ip);	
	serveraddr.sin_port = htons(port);	

    int ret= connect(connfd, (struct sockaddr*)&serveraddr, sizeof(struct sockaddr));
	if (ret) {		
		perror("connect");		
		return -1;	
	}	


	return connfd;
}



#define MESSAGE_LENGTH 64
#define TIME_SUB_MS(tv1, tv2)  ((tv1.tv_sec - tv2.tv_sec) * 1000 + (tv1.tv_usec - tv2.tv_usec) / 1000)
#if MESSAGE_LENGTH==64

#define TEST_MESSAGE "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\r\n"
#elif MESSAGE_LENGTH==128
#elif
#endif
#define RBUFFER_LENGTH		2048

#define WBUFFER_LENGTH		2048

int send_recv_tcppkt(int fd){

#if 1
    int res = send(fd, TEST_MESSAGE, strlen(TEST_MESSAGE), 0);
	if (res < 0) {
		exit(1);
	}
	
	char rbuffer[RBUFFER_LENGTH] = {0};
	res = recv(fd, rbuffer, RBUFFER_LENGTH, 0);
	if (res <= 0) {
		exit(1);
	}

	if (strcmp(rbuffer, TEST_MESSAGE) != 0) {
		printf("failed: '%s' != '%s'\n", rbuffer, TEST_MESSAGE);
		return -1;
	}
#else
    
#endif
    return 0;

}

static void* test_qps_entry(void *arg){

    test_context_t *pctx=(test_context_t *)arg;
    int connfd=connect_tcpserver(pctx->serverip,pctx->port);

    if(connfd<0){
        printf("connect_tcpserver failed\n");
		return NULL;
    }
    
    int count =pctx->requestion/pctx->threadnum;

    int i=0;
    int res;
    while (i++<count)
    {
        res=send_recv_tcppkt(connfd);
        if (res != 0) {
			printf("send_recv_tcppkt failed\n%d\n",connfd);
			pctx->failed ++; // 
			continue;
		}

    }

    return  NULL;
    
}

int main(int argc ,char*argv[]){

    
    int opt;
    test_context_t ctx = {0};

    while ((opt = getopt(argc, argv, "s:p:t:c:n:?")) != -1) {

		switch (opt) {

			case 's':
				printf("-s: %s\n", optarg);
				strcpy(ctx.serverip, optarg);
				break;

			case 'p':
				printf("-p: %s\n", optarg);

				ctx.port = atoi(optarg);
				break;

			case 't':
				printf("-t: %s\n", optarg);
				ctx.threadnum = atoi(optarg);
				break;

			case 'c':
				printf("-c: %s\n", optarg);
				ctx.connection = atoi(optarg);
				break;

			case 'n':
				printf("-n: %s\n", optarg);
				ctx.requestion = atoi(optarg);
				break;

			default:
				return -1;
		
		}
		
	}

    pthread_t *ptid=malloc(ctx.threadnum*sizeof(pthread_t));

    int i=0;

    struct timeval tv_begin;
    gettimeofday(&tv_begin,NULL);

    for(i=0;i<ctx.threadnum;i++){
        pthread_create(&ptid[i],NULL,test_qps_entry,&ctx);

    }

    for(i=0;i<ctx.threadnum;i++){
        pthread_join(ptid[i],NULL);
        
    }

    struct timeval tv_end;
    
    gettimeofday(&tv_end, NULL);

	int time_used = TIME_SUB_MS(tv_end, tv_begin);

    printf("success: %d, failed: %d, time_used: %d, qps: %d\n", ctx.requestion-ctx.failed, 
		ctx.failed, time_used, ctx.requestion * 1000 / time_used);
clean: 
	free(ptid);
    return 0;
}

5.io_uring qps测试结果:

6.epoll qps测试结果

 

        epoll代码见:Linux网络:基于reactor(epoll)实现Webserver-优快云博客

7.总结

        可以看到在相同的条件下(50个线程,1000000个连接),使用同步和异步的IO的性能相差10%以上。

学习连接:https://xxetb.xetslk.com/s/4cnbDc

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值