信号量的结构体定义如下:
linux+v2.6.28/include/linux/semaphore.h:
struct semaphore { spinlock_t lock; //自旋锁 unsigned int count; struct list_head wait_list; //内核的双向链表 };
|
初始化信号量:
#define init_MUTEX(sem) sema_init(sem, 1) #define init_MUTEX_LOCKED(sem) sema_init(sem, 0)
|
获取信号量:
extern void down(struct semaphore *sem); extern int __must_check down_interruptible(struct semaphore *sem);
|
以上两个函数都是用来获取信号量的。不同之处在于down会导致睡眠,而且不能被信号打断。而down_interruptible也会导致睡眠,但是它可以被信号打断。
释放信号量:
extern void up(struct semaphore *sem);
|
/***********************************************************************************************/
为了更好的理解阻塞和非阻塞,先来看两段代码:
/******************************代码1***************************************/
/* 阻塞地从串口读取一个字符*/ fd = open("/dev/ttyS1", O_RDWR); ... ret = read(fd, buf, 1); //当串口上有数据时才返回 if(ret > 0) printf("%c\n", buf); /******************************代码2***************************************/ /* 非阻塞地从串口读取一个字符 */ fd = open("/dev/ttyS2", O_RDWR|O_NONBLOCK); ... while(read(fd, buf, 1) != 1); //串口上没有数据也返回,所以要循环读取串口 printf("%c", buf);
|
阻塞是指在执行设备操作时如果不能获得资源则挂起进程,直到资源可以被获取后再进行操作。
阻塞从字面上听起来意味着低效率,实则不然,如果设备不阻塞,则用户想要获取设备资源只能不停的查询,这反而会无谓地消耗CPU资源。而阻塞访问时,不能获取资源的进程将进入休眠,它将CPU资源让给其他进程,直到资源可以被获取。
在LInux驱动程序中,可以使用等待队列来实现阻塞进程的唤醒。
等待队列结构体如下:
linux+v2.6.28/include/linux/wait.h:
typedef struct wait_queue wait_queue_t; struct __wait_queue { unsigned int flags; #define WQ_FLAG_EXCLUSIVE 0x01 void *private; wait_queue_func_t func; struct list_head task_list; };
|
等待队列头定义如下:
linux+v2.6.28/include/linux/wait.h:
struct __wait_queue_head { spinlock_t lock; struct list_head task_list; }; typedef struct __wait_queue_head wait_queue_head_t;
|
初始化等待队列头和等待队列:
初始化等待队列头有两种方式:
void init_waitqueue_head(wait_queue_head_t *q);
DECLARE_WAIT_QUEUE_HEAD(name);
linux+v2.6.28/include/linux/wait.h:
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .task_list = { &(name).task_list, &(name).task_list } } #define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = WAIT_QUEUE_HEAD_INITIALIZER(name)
|
linux+v2.6.28/kernel/wait.c:
void init_waitqueue_head(wait_queue_head_t *q) { spin_lock_init(&q->lock); INIT_LIST_HEAD(&q->task_list); }
|
对于等待队列,使用一个宏去定义和初始化:
DECLARE_WAITQUEUE(name, tsk)
#define __WAITQUEUE_INITIALIZER(name, tsk) { \ .private = tsk, \ .func = default_wake_function, \ .task_list = { NULL, NULL } } #define DECLARE_WAITQUEUE(name, tsk) \ wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
|
在使用等待队列前,需要定义和初始化等待队列头和等待队列。
定义和初始化完成后,需要从等待队列头添加或者移出等待队列:
linux+v2.6.28/kernel/wait.c:
/* 添加等待队列*/
void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; wait->flags &= ~WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); __add_wait_queue(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(add_wait_queue);
|
/* 移出等待队列*/
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); __remove_wait_queue(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(remove_wait_queue);
|
等待事件:
wait_event(queue, condition);
wait_event_interruptible(queue, condition);
wait_event_timeout(queue, condition, timeout);
wait_event_interruptible_timeout(
queue, condition, timeout
);
wait_event_xxx函数第一个参数是等待队列头,第二个参数condition必须满足,否则阻塞。wait_event()和wait_event_interruptible()区别在于后则可以被信号打断。timeout是阻塞等待的超时时间,以jiffy为单位,在第三个参数的timeout到达时,不论condition是否满足,均返回。
举例来看 wait_event_interruptible()
linux+v2.6.28/include/linux/wait.h:
#define __wait_event_interruptible(wq, condition, ret) \ do { \ DEFINE_WAIT(__wait); \ \ for (;;) { \ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ if (condition)//条件满足 返回 \ break; \ if (!signal_pending(current)) { \ schedule(); \ continue; \ } \ ret = -ERESTARTSYS; \ break; \ } \ finish_wait(&wq, &__wait); \ } while (0)
#define wait_event_interruptible(wq, condition) \ 277({ \ 278 int __ret = 0; \ 279 if (!(condition)) \ 280 __wait_event_interruptible(wq, condition, __ret); \ 281 __ret; \ 282})
|
唤醒队列:
void wake_up(wait_queue_head_t *queue);
void wake_up_interruptible(
wait_queue_head_t *queue
);
以上两个函数会唤醒以queue作为等待队列头的所用等待队列的进程.
/***********************************************************************/
poll函数使用来支持用户层的select函数调用.
select函数原型如下:
int select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout);
其中readfsd, writefds, exceptfds分别是被select()监视的读、写、异常处理的文件描述符集合。numfds是需要检查的号码的最大的文件描述符加一。timeout参数是一个指向struct timeval结构体的指针,它可以使select()等待timeout时间后返回.
struct timeval定义如下:
struct timeval {
int tv_sec; //秒
int tv_usec; //微秒
}
下面的宏用来设置、清除、判断文件描述符集合:
FD_ZERO(fd_set *set); 清除一个文件
描述符集合
FD_SET(int fd, fd_set *set); 将一个文件描述符加入到
文件
描述符集中
FD_CLR(int fd, fd_set *set);
将一个文件描述符从
文件
描述符集中清除
FD_ISSET(int fd, fd_set *set); 判断文件描述符是否置位
设备驱动中的poll原型如下:
unsigned int (*poll)(struct file *filp, struct poll_table *wait);
第一个参数为file结构体指针,第二个参数为轮询表指针。
这个函数应该进行以下两项工作:
(1)对可能引起设备文件状态变化的等待队列调用poll_wait函数,将对应的等待队列头添加到poll_table。
(2)返回表示是否能对设备进行无阻塞读,写访问的掩码。
poll_table和poll_wait定义如下:
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); typedef struct poll_table_struct { poll_queue_proc qproc; } poll_table; static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) { if (p && wait_address) p->qproc(filp, wait_address, p); //这里不知道怎么实现的,高手指点 }
|
poll返回设备资源的可获取状态:POLLIN, POLLOUT, POLLPRI, POLLERR, POLLNVAL等宏的位和或的结果.每个宏代表设备的一种状态.
设备驱动的poll()典型模板是:
static unsigned int lan_poll(struct file *filp, poll_table *wait) { unsigned int mask = 0; struct xxx_dev_t *xxx_dev = filp->private_data; down(&xxx_dev->sem); poll_wait(filp, &xxx_dev->r_wait, wait);//加入读等待队列头以便轮询 poll_wait(filp, &xxx_dev->w_wait, wait);//加入写等待队列头以便轮询 if(...) mask |= POLLIN | POLLRDNORM; //表示数据可以被读取 if(...) mask |= POLLOUT | POLLWRNORM; //表示数据可以被写入 up(&xxx_dev->sem); return mask; }
|
以上功能将在下面的字符设备中得到实现:
/* * globalmem设备,没有对应真实的硬件,主要用来学习Linux设备驱动开发。 * (1) 使用信号量支持并发 * (2) 使用等待队列支持阻塞 * (3) 加入了poll函数支持用户下的select查询 * 建议交流:lanpeng722@gmail.com */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/cdev.h> #include <asm/io.h> #include <asm/system.h> #include <asm/uaccess.h> #define LAN_SIZE 0x1000 //全局内存大小:4KB #define MEM_CLEAR 0x1 //清除全局内存 #define LAN_MAJOR 244 //主设备号 static int lan_major = LAN_MAJOR; struct lan_dev_t{ struct cdev cdev; unsigned int current_len; //fifo 有效数据长度 unsigned char lan_buf[LAN_SIZE]; struct semaphore sem; //信号量用于支持并发操作 wait_queue_head_t r_wait; //阻塞读用的等待队列头 wait_queue_head_t w_wait; //阻塞写用的等待队列头 }; struct lan_dev_t *lan_dev; static int lan_open(struct inode *inode, struct file *filp) { filp->private_data = lan_dev; printk("Open OK!\n"); return 0; } static int lan_release(struct inode *inode, struct file *filp) { printk("Close OK!\n"); return 0; } static ssize_t lan_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) { unsigned long p = *ppos; size_t count = size; int ret = 0; struct lan_dev_t *lan_dev = filp->private_data; /***************************************************************************/ if((filp->f_flags&O_NONBLOCK)){ goto no_block; } DECLARE_WAITQUEUE(wait, current); //定义等待队列 down(&lan_dev->sem); add_wait_queue(&lan_dev->r_wait, &wait); //进入读等待队列 while(lan_dev->current_len == 0) { __set_current_state(TASK_INTERRUPTIBLE); //改进程为睡眠状态 up(&lan_dev->sem); schedule(); //调度其他进程 if(signal_pending(current)){ ret = -ERESTARTSYS; goto signal_come; } down(&lan_dev->sem); } if(size > lan_dev->current_len) size = lan_dev->current_len; if(copy_to_user(buf, lan_dev->lan_buf, size)){ ret = -EFAULT; goto out; }else{ memcpy(lan_dev->lan_buf, lan_dev->lan_buf+size, lan_dev->current_len-size); lan_dev->current_len -= size; wake_up_interruptible(&lan_dev->w_wait); //唤醒写等待进程 ret = size; } out: up(&lan_dev->sem); signal_come: remove_wait_queue(&lan_dev->w_wait, &wait); set_current_state(TASK_RUNNING); return ret; /**************************************************************************/ no_block: if(p >= LAN_SIZE) return count? -ENXIO: 0; if(count > LAN_SIZE - p) count = LAN_SIZE - p; if(count > lan_dev->current_len) count = lan_dev->current_len; down(&lan_dev->sem); if(copy_to_user(buf, (void *)(lan_dev->lan_buf+p), count)){ ret = -EFAULT; }else{ *ppos += count; p = *ppos; if(lan_dev->current_len > 0){ memcpy(lan_dev->lan_buf, lan_dev->lan_buf+count, lan_dev->current_len-count); lan_dev->current_len -= count; } ret = count; printk("Read %d byte from %ld\n", count, p); } up(&lan_dev->sem); /****************************************************************************/ return ret; } static ssize_t lan_write(struct file *filp, const char __user *buf, size_t size, loff_t *ppos) { unsigned long p = *ppos; unsigned int count = size; int ret = 0; struct lan_dev_t *lan_dev = filp->private_data; /****************************************************************************/ if((filp->f_flags&O_NONBLOCK)){ goto no_block; } DECLARE_WAITQUEUE(wait, current); down(&lan_dev->sem); add_wait_queue(&lan_dev->w_wait, &wait); while(lan_dev->current_len == LAN_SIZE) { __set_current_state(TASK_INTERRUPTIBLE); up(&lan_dev->sem); schedule(); if(signal_pending(current)){ ret = -EAGAIN; goto signal_come; } down(&lan_dev->sem); } if(size > LAN_SIZE - lan_dev->current_len) size = LAN_SIZE - lan_dev->current_len; if(copy_from_user(lan_dev->lan_buf+lan_dev->current_len, buf, size)){ ret = -EFAULT; goto out; }else{ lan_dev->current_len += size; printk("lan_dev->current_len = %d\n", lan_dev->current_len); wake_up_interruptible(&lan_dev->r_wait); ret = size; } out: up(&lan_dev->sem); signal_come: remove_wait_queue(&lan_dev->w_wait, &wait); set_current_state(TASK_RUNNING); return ret; /****************************************************************************/ no_block: if(p >= LAN_SIZE) return count? -ENXIO: 0; if(count > LAN_SIZE - p) count = LAN_SIZE - p; down(&lan_dev->sem); if(copy_from_user(lan_dev->lan_buf + p, buf, count)) ret = -EFAULT; else{ *ppos += count; p = *ppos; lan_dev->current_len += count; ret = count; printk("Write %d byte from %ld\n", count, p); } up(&lan_dev->sem); return ret; } static int lan_ioctl(struct inode *inodep, struct file *filp, unsigned int cmd, unsigned long arg) { struct lan_dev_t *lan_dev = filp->private_data; switch(cmd) { case MEM_CLEAR: down(&lan_dev->sem); memset(lan_dev->lan_buf, 0, LAN_SIZE); lan_dev->current_len = 0; up(&lan_dev->sem); printk("Memset buf ok!\n"); break; default: return -EINVAL; } return 0; } static unsigned int lan_poll(struct file *filp, poll_table *wait) { unsigned int mask = 0; struct lan_dev_t *lan_dev = filp->private_data; down(&lan_dev->sem); poll_wait(filp, &lan_dev->r_wait, wait); poll_wait(filp, &lan_dev->w_wait, wait); if(lan_dev->current_len != 0) mask |= POLLIN | POLLRDNORM; //表示数据可以被读取 if(lan_dev->current_len != LAN_SIZE) mask |= POLLOUT | POLLWRNORM; //表示数据可以被写入 up(&lan_dev->sem); return mask; } static struct file_operations lan_fops = { .owner = THIS_MODULE, .open = lan_open, //.llseek = lan_llseek, .read = lan_read, .write = lan_write, .ioctl = lan_ioctl, .poll = lan_poll, .release = lan_release, }; static void lan_setup_cdev(struct lan_dev_t *lan_dev, int index) { int err, devno; devno = MKDEV(lan_major, index); cdev_init(&lan_dev->cdev, &lan_fops); err = cdev_add(&lan_dev->cdev, devno, 1); if(err) printk(KERN_NOTICE"Error %d adding cdev!\n", err); } static int __init lancdev_init(void) { int ret; dev_t devno = MKDEV(lan_major, 0); ret = register_chrdev_region(devno, 1, "globalmem"); if(ret < 0) return ret; lan_dev = kmalloc(sizeof(struct lan_dev_t), GFP_KERNEL); if(!lan_dev){ ret = -ENOMEM; printk("kmalloc Error!\n"); return ret; } memset(lan_dev, 0, sizeof(struct lan_dev_t)); lan_setup_cdev(lan_dev, MINOR(devno)); init_MUTEX(&lan_dev->sem); init_waitqueue_head(&lan_dev->r_wait); init_waitqueue_head(&lan_dev->w_wait); return 0; } static void __exit lancdev_exit(void) { cdev_del(&lan_dev->cdev); kfree(lan_dev); unregister_chrdev_region(MKDEV(lan_major, 0), 1); } MODULE_AUTHOR("LanPeng"); MODULE_LICENSE("GPL"); module_init(lancdev_init); module_exit(lancdev_exit);
|
用户测试程序select测试:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
int main(int argc, char** argv) { int fd; fd_set rfds, wfds; fd = open("/dev/globalmem_lan", O_RDWR); if(fd < 0){ printf("Open Error!\n"); return -1; } if(ioctl(fd, 0x01, 0) < 0){ printf("ioctl Error!\n"); return -1; } while(1){ FD_ZERO(&rfds); FD_ZERO(&wfds); FD_SET(fd, &rfds); FD_SET(fd, &wfds); select(fd + 1, &rfds, &wfds, NULL, NULL); if(FD_ISSET(fd, &rfds)){ printf("Poll monitor : can be read!\n"); } if(FD_ISSET(fd, &wfds)){ printf("Poll monitor: can be write!\n"); } } return 0; }
|
<script type=text/javascript charset=utf-8 src="http://static.bshare.cn/b/buttonLite.js#style=-1&uuid=&pophcol=3&lang=zh"></script> <script type=text/javascript charset=utf-8 src="http://static.bshare.cn/b/bshareC0.js"></script>
阅读(826) | 评论(2) | 转发(0) |
给主人留下些什么吧!~~
8353042052013-07-08 23:51:32
p->qproc(filp, wait_address, p); //这里不知道怎么实现的,高手指点
原型在static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)中赋值
例如:static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
poll_table *p)
{
struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
struct poll_table_entry *entry = poll_get_entry(pwq);
if (!entry)
chinaunix网友2010-09-29 11:39:05
很好的, 收藏了 推荐一个博客,提供很多免费软件编程电子书下载: http://free-ebooks.appspot.com