FastDFS是fishman大牛做的分布式文件系统,并且将项目源码进行了开源,小弟抱着学习的态度,学习了源码,并且将自己关心的比如上传文件,下载文件,文件的数据同步在看的途中做了下学习笔记.
首先是数据服务器storage部分.
(一).storage_nio.c
//对于超时的处理:删除文件列表,释放任务到队列里面 void task_finish_clean_up(struct fast_task_info *pTask) { StorageClientInfo *pClientInfo; StorageFileContext *pFileContext; pClientInfo = (StorageClientInfo *)pTask->arg; pFileContext = &(pClientInfo->file_context); if (pFileContext->fd > 0) { close(pFileContext->fd); /* if file does not write to the end, delete it */ if (pFileContext->op == FDFS_STORAGE_FILE_OP_WRITE && / pFileContext->offset < pFileContext->end) { if (unlink(pFileContext->filename) != 0) { logError("file: "__FILE__", line: %d, " / "client ip: %s, " / "delete useless file %s fail," / "errno: %d, error info: %s", / __LINE__, pTask->client_ip, / pFileContext->filename, / errno, strerror(errno)); } } } close(pClientInfo->sock); memset(pTask->arg, 0, sizeof(StorageClientInfo)); free_queue_push(pTask); }
//数据服务器socket事件回调,比如说在上传文件时,接收了一部分之后,调用storage_nio_notify(pTask) //又重新发起接收读socket的操作,而pClientInfo->stage=FDFS_STORAGE_STAGE_NIO_RECV //的这个赋值并没有发生改变 void storage_recv_notify_read(int sock, short event, void *arg) { struct fast_task_info *pTask; StorageClientInfo *pClientInfo; //注意这个参数是不同的,一个是跟踪服务器参数,一个是数据服务器参数 long task_addr; int bytes; int result; while (1) { if ((bytes=read(sock, &task_addr, sizeof(task_addr))) < 0) { if (!(errno == EAGAIN || errno == EWOULDBLOCK)) { logError("file: "__FILE__", line: %d, " / "call read failed, " / "errno: %d, error info: %s", / __LINE__, errno, strerror(errno)); } break; } else if (bytes == 0) {void storage_recv_notify_read logError("file: "__FILE__", line: %d, " / "call read failed, end of file", __LINE__); break; } //socket接收时使用pTask来进行传递参数:接收从工作线程socket服务端的写入内容,开始去读 pTask = (struct fast_task_info *)task_addr; pClientInfo = (StorageClientInfo *)pTask->arg; if (pClientInfo->sock < 0) //quit flag { struct storage_nio_thread_data *pThreadData; struct timeval tv; pThreadData = g_nio_thread_data + / pClientInfo->nio_thread_index; tv.tv_sec = 1; tv.tv_usec = 0; event_base_loopexit(pThreadData->ev_base, &tv); return; } switch (pClientInfo->stage) { case FDFS_STORAGE_STAGE_NIO_INIT: //数据服务器服务端socket接收过来的任务的pClientInfo->stage=FDFS_STORAGE_STAGE_NIO_INIT //因此在这里在重新绑定读写事件 //每连接一个客户端,在这里都会触发这个动作 result = storage_nio_init(pTask); break; case FDFS_STORAGE_STAGE_NIO_RECV: //在次接受包体时pTask->offset偏移量被重置 pTask->offset = 0; //任务的长度=包的总长度-包的总偏移量 //会出现接受客户端分块传来的字节流 pTask->length = pClientInfo->total_length - pClientInfo->total_offset; //总是试图将余下的自己一次接收收完 //pTask->length:数据长度,pTask->size:分配的缓冲大小 if (pTask->length > pTask->size) { pTask->length = pTask->size; } client_sock_read(pClientInfo->sock, EV_READ, pTask); result = 0; break; case FDFS_STORAGE_STAGE_NIO_SEND: result = storage_send_add_event(pTask); break; default: logError("file: "__FILE__", line: %d, " / "invalid stage: %d", __LINE__, / pClientInfo->stage); result = EINVAL; break; } if (result != 0) { task_finish_clean_up(pTask); } } }
//初始化socket读写操作 static int storage_nio_init(struct fast_task_info *pTask) { int result; StorageClientInfo *pClientInfo; struct storage_nio_thread_data *pThreadData; pClientInfo = (StorageClientInfo *)pTask->arg; //取工作线程:依据pClientInfo->nio_thread_index 参数 //已经进行了赋值,在建立客户端socket连接时 //在重新绑定socket读写事件,绑定到Task上面 pThreadData = g_nio_thread_data + pClientInfo->nio_thread_index; event_set(&pTask->ev_read, pClientInfo->sock, EV_READ, client_sock_read, pTask); if ((result=event_base_set(pThreadData->ev_base, &pTask->ev_read)) != 0) { logError("file: "__FILE__", line: %d, " / "event_base_set fail.", __LINE__); return result; } event_set(&pTask->ev_write, pClientInfo->sock, EV_WRITE, client_sock_write, pTask); if ((result=event_base_set(pThreadData->ev_base, / &pTask->ev_write)) != 0) { logError("file: "__FILE__", line: %d, " / "event_base_set fail.", __LINE__); return result; } //建立与客户端的连接后,状态设置为开始接收请求 pClientInfo->stage = FDFS_STORAGE_STAGE_NIO_RECV; if ((result=event_add(&pTask->ev_read, &g_network_tv)) != 0) { logError("file: "__FILE__", line: %d, " / "event_add fail.", __LINE__); return result; } return 0; }
int storage_send_add_event(struct fast_task_info *pTask) { //发送是先重置pTask->offset为 pTask->offset = 0; /* direct send */ client_sock_write(pTask->ev_write.ev_fd, EV_WRITE, pTask); return 0; }
static void client_sock_read(int sock, short event, void *arg) { int bytes; int recv_bytes; struct fast_task_info *pTask; StorageClientInfo *pClientInfo; //得到任务信息 pTask = (struct fast_task_info *)arg; pClientInfo = (StorageClientInfo *)pTask->arg; if (event == EV_TIMEOUT) { if (pClientInfo->total_offset == 0 && pTask->req_count > 0) { if (event_add(&pTask->ev_read, &g_network_tv) != 0) { task_finish_clean_up(pTask);
logError("file: "__FILE__", line: %d, " / "event_add fail.", __LINE__); } } else { logError("file: "__FILE__", line: %d, " / "client ip: %s, recv timeout, " / "recv offset: %d, expect length: %d", / __LINE__, pTask->client_ip, / pTask->offset, pTask->length);
task_finish_clean_up(pTask); }
return; } //进入到while循环 while (1) { //初始时pClientInfo->total_length=0 pTask->offset=0 if (pClientInfo->total_length == 0) //recv header { recv_bytes = sizeof(TrackerHeader) - pTask->offset; } else { //在次接受上传文件的数据包时,因为发生storage_nio_notify(pTask) //所以重新进入到void storage_recv_notify_read()函数中,而pTask->offset被重新设置为 //而pTask->length也被重置设为一次性接收剩余的字节数(如果大于分配的pTask->size,又重新设置为这个pTask->size) recv_bytes = pTask->length - pTask->offset; } //logInfo("recv_bytes=%d, pTask->length=%d, pTask->offset=%d",recv_bytes, pTask->length, pTask->offset); bytes = recv(sock, pTask->data + pTask->offset, recv_bytes, 0); if (bytes < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { if(event_add(&pTask->ev_read, &g_network_tv)!=0) { task_finish_clean_up(pTask);
logError("file: "__FILE__", line: %d, "/ "event_add fail.", __LINE__); } } else { logError("file: "__FILE__", line: %d, " / "client ip: %s, recv failed, " / "errno: %d, error info: %s", / __LINE__, pTask->client_ip, / errno, strerror(errno));
task_finish_clean_up(pTask); }
return; } else if (bytes == 0) { logDebug("file: "__FILE__", line: %d, " / "client ip: %s, recv failed, " / "connection disconnected.", / __LINE__, pTask->client_ip);
task_finish_clean_up(pTask); return; } if (pClientInfo->total_length == 0) //header { if (pTask->offset + bytes < sizeof(TrackerHeader)) { if (event_add(&pTask->ev_read, &g_network_tv)!=0) { task_finish_clean_up(pTask);
logError("file: "__FILE__", line: %d, "/ "event_add fail.", __LINE__); } pTask->offset += bytes; return; } //确定包的总长度:比如下载文件时,接收的包,就只有包的长度 pClientInfo->total_length=buff2long(((TrackerHeader *)pTask->data)->pkg_len); if (pClientInfo->total_length < 0) { logError("file: "__FILE__", line: %d, " / "client ip: %s, pkg length: " / INT64_PRINTF_FORMAT" < 0", / __LINE__, pTask->client_ip, / pClientInfo->total_length);
task_finish_clean_up(pTask); return; } //包的总长度=包头+包体的长度 //设想发送的场景:包头+包体+包体+...(其中在包头里面含有多个包体的总长度) //因为默认的接收缓冲只有K,所以会分次发送 pClientInfo->total_length += sizeof(TrackerHeader); //确定任务的数据长度 if (pClientInfo->total_length > pTask->size) { //如果包的总长大于包的分配的长度,那么任务长度等于任务分配的长度 pTask->length = pTask->size; } else { pTask->length = pClientInfo->total_length; //确定任务的长度 } } pTask->offset += bytes; //接收到当前包完成 if (pTask->offset >= pTask->length) { //上次操作接收的总的偏移量+这次接收的数据长度,如果大于包的总长度,那么说明包接收完毕 //重新设置pClientInfo->stage,设置为发送的状态,否则的话,在继续去写文件 if (pClientInfo->total_offset + pTask->length >= pClientInfo->total_length) { /* current req recv done */ //重新设置为可以发送的状态 //下载文件的流程: //1.接收完客户端发起下载文件的请求包后,pClientInfo->stage = FDFS_STORAGE_STAGE_NIO_SEND设置为 // 发送的状态; //2.数据服务器分片读取文件,发送到客户端,每次读取一片完成,准备发送的时候,就触发storage_nio_notify(pTask) // 函数调用,然后在进入到void storage_recv_notify_read()函数里面,触发写socket事件 //3.因此触发通知函数去调用void storage_recv_notify_read()函数,上传文件触发也是调用void storage_recv_notify_read()函数 pClientInfo->stage = FDFS_STORAGE_STAGE_NIO_SEND; pTask->req_count++; } //初始时pClientInfo->total_offset==0 if (pClientInfo->total_offset == 0) { pClientInfo->total_offset = pTask->length; storage_deal_task(pTask); //数据服务器进行处理 } else { //否则继续写文件 pClientInfo->total_offset += pTask->length; /* continue to write to file */ storage_dio_queue_push(pTask); } return; } } return; }
//socket客户端写操作 static void client_sock_write(int sock, short event, void *arg) { int bytes; int result; struct fast_task_info *pTask; StorageClientInfo *pClientInfo; pTask = (struct fast_task_info *)arg; if (event == EV_TIMEOUT) { logError("file: "__FILE__", line: %d, " / "send timeout", __LINE__);
task_finish_clean_up(pTask);
return; } pClientInfo = (StorageClientInfo *)pTask->arg; while (1) { bytes = send(sock, pTask->data + pTask->offset, pTask->length - pTask->offset, 0); //printf("%08X sended %d bytes/n", (int)pTask, bytes); if (bytes < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { if (event_add(&pTask->ev_write, &g_network_tv) != 0) { task_finish_clean_up(pTask); logError("file: "__FILE__", line: %d, " / "event_add fail.", __LINE__); } } else { logError("file: "__FILE__", line: %d, " / "client ip: %s, recv failed, " / "errno: %d, error info: %s", / __LINE__, pTask->client_ip, / errno, strerror(errno)); task_finish_clean_up(pTask); } return; } else if (bytes == 0) { logWarning("file: "__FILE__", line: %d, " / "send failed, connection disconnected.", / __LINE__);
task_finish_clean_up(pTask); return; } pTask->offset += bytes; //如果包已经发送完毕 if (pTask->offset >= pTask->length) { pClientInfo->total_offset += pTask->length; //客户端发起下载文件命令时,pClientInfo->total_length就是整个文件长度+包上的大小 //可在void storage_read_from_file()函数里面见到pClientInfo->total_length = sizeof(TrackerHeader) + download_bytes; if (pClientInfo->total_offset>=pClientInfo->total_length) { //发送响应,继续接收 pClientInfo->total_length = 0; pClientInfo->total_offset = 0; pTask->offset = 0; pTask->length = 0; //然后重新设置为FDFS_STORAGE_STAGE_NIO_RECV接收的状态,因为与客户端建立的是长连接 pClientInfo->stage = FDFS_STORAGE_STAGE_NIO_RECV; if ((result=event_add(&pTask->ev_read, / &g_network_tv)) != 0) { task_finish_clean_up(pTask); logError("file: "__FILE__", line: %d, "event_add fail.", __LINE__); return; } } else //continue to send file content { //否则的话清空数据缓冲,继续发送文件 pTask->length = 0; storage_dio_queue_push(pTask); } return; } } } |
(二).storage_dio.c
//数据服务器io初始化 int storage_dio_init() { int result; int threads_count_per_path; int context_count; struct storage_dio_thread_data *pThreadData; struct storage_dio_thread_data *pDataEnd; struct storage_dio_context *pContext; struct storage_dio_context *pContextEnd; pthread_t tid; pthread_attr_t thread_attr; if ((result=init_pthread_lock(&g_dio_thread_lock)) != 0) { return result; } if ((result=init_pthread_attr(&thread_attr, g_thread_stack_size)) != 0) { logError("file: "__FILE__", line: %d, " / "init_pthread_attr fail, program exit!", __LINE__); return result; } //g_path_count:数据服务器挂载目录个数 //存放文件时storage server支持多个路径(例如磁盘)。这里配置存放文件的基路径数目,通常只配一个目录 g_dio_thread_data = (struct storage_dio_thread_data *)malloc(sizeof(struct storage_dio_thread_data) * g_path_count); if (g_dio_thread_data == NULL) { logError("file: "__FILE__", line: %d, " / "malloc %d bytes fail, errno: %d, error info: %s", / __LINE__, (int)sizeof(struct storage_dio_thread_data) * / g_path_count, errno, strerror(errno)); return errno != 0 ? errno : ENOMEM; } //每个线程个数=磁盘读线程数+磁盘写线程数 threads_count_per_path = g_disk_reader_threads + g_disk_writer_threads; //上下文个数 context_count = threads_count_per_path * g_path_count; g_dio_contexts = (struct storage_dio_context *)malloc(sizeof(struct storage_dio_context) * context_count); if (g_dio_contexts == NULL) { logError("file: "__FILE__", line: %d, " / "malloc %d bytes fail, " / "errno: %d, error info: %s", __LINE__, / (int)sizeof(struct storage_dio_context) * / context_count, errno, strerror(errno)); return errno != 0 ? errno : ENOMEM; } g_dio_thread_count = 0; //分配到每个目录 pDataEnd = g_dio_thread_data + g_path_count; for (pThreadData=g_dio_thread_data; pThreadData<pDataEnd; pThreadData++) { //每个目录的读写线程 pThreadData->count = threads_count_per_path; pThreadData->contexts = g_dio_contexts + (pThreadData - g_dio_thread_data) * threads_count_per_path; pThreadData->reader=pThreadData->contexts; pThreadData->writer=pThreadData->contexts+g_disk_reader_threads; pContextEnd = pThreadData->contexts + pThreadData->count; for (pContext=pThreadData->contexts; pContext<pContextEnd;pContext++) { if ((result=task_queue_init(&(pContext->queue))) != 0) { return result; }
if ((result=init_pthread_lock(&(pContext->lock))) != 0) { return result; } result = pthread_cond_init(&(pContext->cond), NULL); if (result != 0) { logError("file: "__FILE__", line: %d, " / "pthread_cond_init fail, " / "errno: %d, error info: %s", / __LINE__, result, strerror(result)); return result; } if ((result=pthread_create(&tid, &thread_attr, / dio_thread_entrance, pContext)) != 0) { logError("file: "__FILE__", line: %d, " / "create thread failed, " / "startup threads: %d, " / "errno: %d, error info: %s", / __LINE__, g_dio_thread_count, / result, strerror(result)); return result; } else { pthread_mutex_lock(&g_dio_thread_lock); g_dio_thread_count++; pthread_mutex_unlock(&g_dio_thread_lock); } } }
pthread_attr_destroy(&thread_attr);
return result; }
void storage_dio_terminate() { struct storage_dio_context *pContext; struct storage_dio_context *pContextEnd; pContextEnd = g_dio_contexts + g_dio_thread_count; for (pContext=g_dio_contexts; pContext<pContextEnd; pContext++) { pthread_cond_signal(&(pContext->cond)); } }
int storage_dio_queue_push(struct fast_task_info *pTask) { StorageFileContext *pFileContext; struct storage_dio_context *pContext; int result; //获得文件上下文 pFileContext = &(((StorageClientInfo *)pTask->arg)->file_context); //获得文件上下文的磁盘io的读写线程,pFileContext->dio_thread_index:已经在storage_write_to_file()函数里面进行了赋值 pContext = g_dio_contexts + pFileContext->dio_thread_index; //将任务加入到这个线程里面的队列尾部 if ((result=task_queue_push(&(pContext->queue), pTask)) != 0) { task_finish_clean_up(pTask); return result; } //通知线程可以工作了 if ((result=pthread_cond_signal(&(pContext->cond))) != 0) { logError("file: "__FILE__", line: %d, " / "pthread_cond_signal fail, " / "errno: %d, error info: %s", / __LINE__, result, strerror(result));
task_finish_clean_up(pTask); return result; }
return 0; }
/*获得读或者写的线程下标*/ int storage_dio_get_thread_index(struct fast_task_info *pTask, / const int store_path_index, const char file_op) { StorageClientInfo *pClientInfo; StorageFileContext *pFileContext; struct storage_dio_thread_data *pThreadData; struct storage_dio_context *contexts; struct storage_dio_context *pContext; int count; pClientInfo = (StorageClientInfo *)pTask->arg; pFileContext = &(pClientInfo->file_context); //首先确定是哪一个目录的线程数据 pThreadData = g_dio_thread_data + store_path_index; //如果将读写线程分开 if (g_disk_rw_separated) { if (file_op == FDFS_STORAGE_FILE_OP_READ) { contexts = pThreadData->reader; count = g_disk_reader_threads; } else { contexts = pThreadData->writer; count = g_disk_writer_threads; } } else { contexts = pThreadData->contexts; count = pThreadData->count; } //随机选取其中一个线程来做 pContext = contexts + (((unsigned int)pClientInfo->sock) % count); //获得这个线程在线程数组里面的下标 return pContext - g_dio_contexts; }
//对文件处理的函数 int dio_deal_task(struct fast_task_info *pTask) { StorageFileContext *pFileContext; int result; pFileContext = &(((StorageClientInfo *)pTask->arg)->file_context); //如果是删除文件 if (pFileContext->op == FDFS_STORAGE_FILE_OP_DELETE) { if (unlink(pFileContext->filename) != 0) { result = errno != 0 ? errno : EACCES; pFileContext->log_callback(pTask, result); } else { result = 0; } pFileContext->done_callback(pTask, result); return result; } if (pFileContext->op == FDFS_STORAGE_FILE_OP_DISCARD) { pFileContext->offset+=pTask->length - pFileContext->buff_offset; if (pFileContext->offset >= pFileContext->end) { pFileContext->done_callback(pTask, 0); } else { pFileContext->buff_offset = 0; storage_nio_notify(pTask); //notify nio to deal }
return 0; } do { //初始比如说上传文件时,pFileContext->fd=-1 if (pFileContext->fd < 0) { if (pFileContext->op == FDFS_STORAGE_FILE_OP_READ) { //如果是读的话,就直接打开文件 pFileContext->fd=open(pFileContext->filename, O_RDONLY); } else //write { pFileContext->fd = open(pFileContext->filename, O_WRONLY | O_CREAT | O_TRUNC, 0644); } if (pFileContext->fd < 0) { result = errno != 0 ? errno : EACCES; logError("file: "__FILE__", line: %d, " / "open file: %s fail, " / "errno: %d, error info: %s", / __LINE__, pFileContext->filename, / result, strerror(result)); break; } //如果文件偏移量大于,那么就定位到文件当前偏移量 if (pFileContext->offset > 0 && lseek(pFileContext->fd, / pFileContext->offset, SEEK_SET) < 0) { result = errno != 0 ? errno : EIO; logError("file: "__FILE__", line: %d, " / "lseek file: %s fail, " / "errno: %d, error info: %s", / __LINE__, pFileContext->filename, / result, strerror(result)); break; } } if (pFileContext->op == FDFS_STORAGE_FILE_OP_READ) { int64_t remain_bytes; int capacity_bytes; int read_bytes; //尝试一次读取完整个文件,初始传来的pFileContext->offset=0,pFileContext->end=整个文件字节大小 remain_bytes = pFileContext->end - pFileContext->offset; //任务的容量 //1.在第一次接收请求包时,调用处理下载文件请求时,pTask->length被设置为交易数据长度pTask->length = sizeof(TrackerHeader) // 具体是在void storage_read_from_file()代码里面 capacity_bytes = pTask->size - pTask->length; read_bytes = (capacity_bytes < remain_bytes) ? capacity_bytes : remain_bytes; if (read(pFileContext->fd, pTask->data + pTask->length, / read_bytes) != read_bytes) { result = errno != 0 ? errno : EIO; logError("file: "__FILE__", line: %d, " / "read from file: %s fail, " / "errno: %d, error info: %s", / __LINE__, pFileContext->filename, / result, strerror(result)); break; } //数据长度自增读取的字节数(初始下载文件时设置的长度是交易包长sizeof(TrackerHeader)) pTask->length += read_bytes; pFileContext->offset += read_bytes; result = 0; if (pFileContext->offset < pFileContext->end) { //没有读完,需要在继续去读取文件 storage_nio_notify(pTask); //notify nio to deal } else { /* file read done, close it */ close(pFileContext->fd); pFileContext->fd = -1; pFileContext->done_callback(pTask, result); } } else { int write_bytes; char *pDataBuff; //pDataBuff:数据缓冲的首地址 pDataBuff = pTask->data + pFileContext->buff_offset; //需要写入的字节数 write_bytes = pTask->length - pFileContext->buff_offset; //直接写文件 if (write(pFileContext->fd,pDataBuff,write_bytes)!=write_bytes) { result = errno != 0 ? errno : EIO; logError("file: "__FILE__", line: %d, " / "write to file: %s fail, " / "errno: %d, error info: %s", / __LINE__, pFileContext->filename, / result, strerror(result)); break; } if (g_check_file_duplicate) { CALC_HASH_CODES4(pDataBuff, write_bytes, pFileContext->file_hash_codes) } //然后文件上下文的偏移量=原来的偏移量+写入的字节数 pFileContext->offset += write_bytes; result = 0; //分片写文件,如果偏移量小于文件长度,那么需要继续读取 if (pFileContext->offset < pFileContext->end) { pFileContext->buff_offset = 0; //再次发起写请求时,重新被设置为 storage_nio_notify(pTask); //notify nio to deal } else { /* 否则的话,文件写完,关闭文件,在调用文件处理完的回调函数*/ close(pFileContext->fd); pFileContext->fd = -1; pFileContext->done_callback(pTask, result); } } } while (0); if (result != 0) //error { /* file read/write done, close it */ if (pFileContext->fd > 0) { close(pFileContext->fd); pFileContext->fd = -1;
if (pFileContext->op == FDFS_STORAGE_FILE_OP_WRITE) { if (unlink(pFileContext->filename) != 0) { logError("file: "__FILE__", line: %d, " / "delete file: %s fail, " / "errno: %d, error info: %s", / __LINE__, pFileContext->filename, / errno, strerror(errno)); } } } pFileContext->done_callback(pTask, result); } return result; }
//读写磁盘线程处理 static void *dio_thread_entrance(void* arg) { int result; struct storage_dio_context *pContext; struct fast_task_info *pTask; pContext = (struct storage_dio_context *)arg; pthread_mutex_lock(&(pContext->lock)); while (g_continue_flag) { if ((result=pthread_cond_wait(&(pContext->cond), / &(pContext->lock))) != 0) { logError("file: "__FILE__", line: %d, " / "call pthread_cond_wait fail, " / "errno: %d, error info: %s", / __LINE__, result, strerror(result)); } //取队列头部元素进行处理 while ((pTask=task_queue_pop(&(pContext->queue))) != NULL) { ((StorageClientInfo *)pTask->arg)->deal_func(pTask); } } pthread_mutex_unlock(&(pContext->lock)); if ((result=pthread_mutex_lock(&g_dio_thread_lock)) != 0) { logError("file: "__FILE__", line: %d, " / "call pthread_mutex_lock fail, " / "errno: %d, error info: %s", / __LINE__, result, strerror(result)); } g_dio_thread_count--; if ((result=pthread_mutex_unlock(&g_dio_thread_lock)) != 0) { logError("file: "__FILE__", line: %d, " / "call pthread_mutex_lock fail, " / "errno: %d, error info: %s", / __LINE__, result, strerror(result)); } logDebug("file: "__FILE__", line: %d, " / "dio thread exited, thread count: %d", / __LINE__, g_dio_thread_count);
return NULL; } |