linux之自旋锁、信号量、互斥锁

本文详细解析自旋锁与互斥锁的工作原理、使用场景及它们之间的区别。通过对核心代码片段的分析,帮助读者理解这两种锁如何在并发环境中有效保护资源。

 

 

 

===========================自旋锁==========================

 

1)动态定义初始化:

spinlock_t sleep_spinlock;

//动态初始化制定的spinlock_t

spin_lock_init(&sleep_spinlock);

2)静态定义初始化:

DEFINE_SPINLOCK(sleep_spinlock);

 

3)接口使用

//关抢占,不关本地中断。 

static inline void spin_lock(spinlock_t *lock);

static inline void spin_unlock(spinlock_t *lock);

 

//禁止本地处理器中断并获取制定的锁,针对中断上下文衍生的一套接口

static inline void spin_lock_irq(spinlock_t *lock);

static inline void spin_unlock_irq(spinlock_t *lock);

 

//保存处理器当前的CPSR / PSTATE 寄存器的值到一个变量中,禁止本地处理器中断,并获取指定的锁

一般在中断处理函数和驱动普通上下文里都用锁时,  就需要用spin_lock_irqsave而不是spin_lock。 否则里拿锁时来中断就死锁了。

spin_lock_irqsave(&sleep_spinlock, flags);

spin_unlock_irqrestore(&sleep_spinlock, flags);

 

//如果制定的锁当前正在被获取,则返回0,否则返回1

spin_is_locked

 

 

 

 

============================信号量=========================

信号量中最经典的例子莫过于生产者up和消费者down问题。 比如wifi 驱动里 协议栈上报

当消费者线程发现商店没面包时,

如果是spinlock则会搬个凳子坐在店门口干等;

如果是信号量就将自己电话号码告诉商店服务员,消费者可以离开做别的事,商店服务员在有面包时打电话告知消费者可以拿面包了。

kernel/include/linux/semaphore.h 

/* Please don't access any members of this structure directly */
struct semaphore {
    raw_spinlock_t      lock;                         //对count 和wait_list 操作的保护
    unsigned int        count;                         //用来表示允许进入临界区的内核执行路径的个数,即现在有多少次资源可供消费。(注意初始化时的值只表明一开始有多少次资源,后续count是动态变化的)
    struct list_head    wait_list;                   //管理所有在该信号量上睡眠的task,没有获取到信号量的则加入此链表
};

 

1.1 动态定义初始化

struct semaphore    sem;                                    //定义

sema_init(&sem, 0);                                             //初始化,这里count为零表示一开始信号量就是上锁状态;表明又开始就是缺货状态,一开始调用down是会失败睡眠的,需要先生产一次资源up一下才行。 先生产,后消费

sema_init(&sem, 1);                                            //初始化count 为1,内核里大多初始化为1使用,表示一开始时有一次资源的,一开始调用down是能拿到该信号量的。先消费,再生产

sema_init(&sem, n);                                             //初始化count为n,表明一开始就有n次供使用的资源,down n次后就需要生产up了。先消费n次,再生产(感觉像酷奇共享单车一样,先免费骑行10次)
 

static inline void sema_init(struct semaphore *sem, int val)                                                                                   

{
        static struct lock_class_key __key;
        *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
        lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);                                                                    
}

#define __SEMAPHORE_INITIALIZER(name, n)                                \
{                                                                       \
        .lock           = __RAW_SPIN_LOCK_UNLOCKED((name).lock),        \
        .count          = n,                                            \
        .wait_list      = LIST_HEAD_INIT((name).wait_list),             \                                                                      
}
 

 

1.2 静态定义初始化

#define DEFINE_SEMAPHORE(name)  \
        struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)         //这里静态定义count为1

 

 

void down(struct semaphore *sem)
{
    unsigned long flags;
    raw_spin_lock_irqsave(&sem->lock, flags);        //关中断,关抢占, 保护链表或者count操作
    if (likely(sem->count > 0))                                        //这里如果发现已经有人调用up后,即count>0时,则直接获取信号量成功。一般表明资源已经生产就绪好,可直接拿去用了无需睡眠等待了。
        sem->count--;
    else
        __down(sem);
    raw_spin_unlock_irqrestore(&sem->lock, flags);
}

 

down 的具体实现是怎么样的呢?
struct semaphore_waiter {                                        
        struct list_head list;                                        //链表里各项就是这个list_head, 据此找到其task
        struct task_struct *task;      
        bool up;              
};
 

 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);

static inline int __sched __down_common(struct semaphore *sem, long state,
                                long timeout)
{
    struct task_struct *task = current;
    struct semaphore_waiter waiter;

    list_add_tail(&waiter.list, &sem->wait_list);                    // 信号量down是将semaphore_waiter加入链表,semaphore_waiter里包含了当前task,对应up唤醒后就是删除此节点了。
    waiter.task = task;
    waiter.up = false;

    for (;;) {
        if (signal_pending_state(state, task))
            goto interrupted;
        if (unlikely(timeout <= 0))
            goto timed_out;
        __set_task_state(task, state);                                //更改线程状态,TASK_UNINTERRUPTIBLE表示信号无法唤醒
        raw_spin_unlock_irq(&sem->lock);                    
        timeout = schedule_timeout(timeout);              //进入调度睡眠;直到被显示唤醒、硬件中断唤醒或者超时
        raw_spin_lock_irq(&sem->lock);
        if (waiter.up)                                                               //醒来后,先判断若被显示wakeup唤醒直接返回,若超时走timeout,若硬件中断就走interrupted
            return 0;
    }

 timed_out:
    list_del(&waiter.list);
    return -ETIME;

 interrupted:
    list_del(&waiter.list);
    return -EINTR;
}

 

void up(struct semaphore *sem)
{
    unsigned long flags;

    raw_spin_lock_irqsave(&sem->lock, flags);
    if (likely(list_empty(&sem->wait_list)))                             //链表为空,说明没人等待该信号量,count++直接返回,下次别人down时发现count>0 就直接拿锁了。这种机制类似于完成量,属于生产消费同步类型。 也就是可以可以先释放两次,再拿锁两次。互斥量不能这样,不管连续释放几次,后面只能拿锁一次。
        sem->count++;
    else
        __up(sem);                                                                            //链表不为空,说明有task在睡眠等待在,则up
    raw_spin_unlock_irqrestore(&sem->lock, flags);
}

static noinline void __sched __up(struct semaphore *sem)

{

      //list_first_entry即队列里第一个,即最早加入队列的task。是先进先出思想。

        struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,   struct semaphore_waiter, list);
        list_del(&waiter->list);                                                    //从链表里删除此semaphore_waiter
        waiter->up = true;                                                            //表明时wakeup显示唤醒的
        wake_up_process(waiter->task);                                //唤醒对应task
}

 

 

可以看到信号量其实是一个count用来表明当前可进入临界区的内核执行路径个数,然后用一个链表维护所有等待进入临界区的task,再加一个spinlock保护结构体成员的操作。

 

下面这段介绍摘抄自  http://www.cnblogs.com/diyingyun/archive/2011/12/04/2275229.html

信号量(Semaphore),有时被称为信号灯,是在多线程环境下使用的一种设施, 它负责协调各个线程, 以保证它们能够正确、合理的使用公共资源。 

wifi里多个线程搬运数据时就是这样,

 

信号量与互斥锁之间的区别:

1. 互斥量用于线程的互斥,信号线用于线程的同步。  
这是互斥量和信号量的根本区别,也就是互斥和同步之间的区别。  
互斥:是指某一资源同时只允许一个访问者对其进行访问,具有唯一性和排它性。但互斥无法限制访问者对资源的访问顺序,即访问是无序的。  
同步:是指在互斥的基础上(大多数情况),通过其它机制实现访问者对资源的有序访问。在大多数情况下,同步已经实现了互斥,特别是所有写入资源的情况必定是互斥的。少数情况是指可以允许多个访问者同时访问资源 
2. 互斥量值只能为0/1,信号量值可以为非负整数。  
也就是说,一个互斥量只能用于一个资源的互斥访问,它不能实现多个资源的多线程互斥问题。信号量可以实现多个同类资源的多线程互斥和同步。当信号量为单值信号量是,也可以完成一个资源的互斥访问。  



3. 互斥量的加锁和解锁必须由同一线程分别对应使用,信号量可以由一个线程释放,另一个线程得到。

 

 

 

 

 

 

 

================================互斥锁===========================

信号量是在并行处理环境中对多个处理器访问某个公共资源进行保护的机制,偏向于同步作用。mutex则是互斥作用,偏向于锁的作用。

信号量相当于一个可容纳N(这个N就是sema_init时初始化的count值)个人的洗手间,只要人不满就可以进去。人满了在外面排队等待。

互斥锁类似与街边移动洗手间,每次只能容纳一个人进去,里面有人出来后才能让排队的中的下一个进去。

那么很容易发先,互斥锁类似count为1的信号量,

struct mutex {
    /* 1: unlocked, 0: locked, negative: locked, possible waiters */
    atomic_t        count;                                           //原子计数,1表示没人持有锁,0表示锁被持有,负数表示锁被持有且有人在等待队列中等待。
    spinlock_t      wait_lock;                    //spinlock锁,用于保护wait_list 睡眠等待队列。
    struct list_head    wait_list;               //用于管理所有在该mutex上睡眠的进程,没有成功获取锁的task会睡眠在次链表上。
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
    struct task_struct  *owner;             //用于指向锁持有者的task_struct
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
    void            *spin_mlock;    /* Spinner MCS lock */
#endif

};

 

1)动态定义初始化

static struct mutex    g_sdio_func_lock;       //定义

mutex_init(&g_sdio_func_lock);                    //初始化

# define mutex_init(mutex) \
do {                                                    \
        static struct lock_class_key __key;             \
                                                        \
        __mutex_init((mutex), #mutex, &__key);          \                   //#表示字符串化意思,变成下面的字符串name
} while (0)

void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
        atomic_set(&lock->count, 1);
        spin_lock_init(&lock->wait_lock);
        INIT_LIST_HEAD(&lock->wait_list);
        mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
        osq_lock_init(&lock->osq);
#endif

        debug_mutex_init(lock, name, key);
}

 

 

2)静态定义初始化

static DEFINE_MUTEX(g_sdio_func_lock);

#define __MUTEX_INITIALIZER(lockname) \
                { .count = ATOMIC_INIT(1) \
                , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
                , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
                __DEBUG_MUTEX_INITIALIZER(lockname) \
                __DEP_MAP_MUTEX_INITIALIZER(lockname) }

#define DEFINE_MUTEX(mutexname) \
        struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)

 

 

3)接口使用

mutex_lock(&g_sdio_func_lock);

mutex_unlock(&g_sdio_func_lock);

 

void __sched mutex_lock(struct mutex *lock)
{
    might_sleep();
    /*
     * The locking fastpath is the 1->0 transition from
     * 'unlocked' into 'locked' state.
     */
    __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
    mutex_set_owner(lock);                            

}

static inline void   __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
        if (unlikely(atomic_dec_return(count) < 0))                     //顺利拿锁是互斥锁的count 原子减1后等于0,就拿锁成功了。
                fail_fn(count);  

}

 

__visible void __sched  __mutex_lock_slowpath(atomic_t *lock_count)
{
        struct mutex *lock = container_of(lock_count, struct mutex, count);
        __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_, NULL, 0);

}

 

static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                    struct lockdep_map *nest_lock, unsigned long ip,
                    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{

        preempt_disable();
        mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
        //这个函数里实现判断符合自旋等待机制,就开始自旋等待
        if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
                /* got the lock, yay! */
                preempt_enable();              //自旋等待机制拿到锁了
                return 0;

        }

         //走到这里说明自旋等待机制失败,或者都不符合条件进入自旋等待机制,没拿到锁,只能像信号量一样。将自己加入链表里,然后睡眠了

         spin_lock_mutex(&lock->wait_lock, flags);

        /* Once more, try to acquire the lock. Only try-lock the mutex if
         * it is unlocked to reduce unnecessary xchg() operations./
        if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
                goto skip_wait;

        /* add waiting tasks to the end of the waitqueue (FIFO): */
        list_add_tail(&waiter.list, &lock->wait_list);                //加入链表
        waiter.task = task;

         for (;;) {

                //获取成功就跳出循环,完成slowth流程了。注意这是把count设为-1,负数表示锁被持有且有人在等待队列中等待。其实后面代码会再判断是否有task在等待

                if (atomic_read(&lock->count) >= 0 && (atomic_xchg(&lock->count, -1) == 1))
                        break;
                //如果是被信号唤醒的,拿就走失败流程了。本次mutex_lock 就宣告失败了。
                /* got a signal? (This code gets eliminated in the TASK_UNINTERRUPTIBLE case.)*/
                if (unlikely(signal_pending_state(state, task))) {
                        ret = -EINTR;
                        goto err;
                }

                __set_task_state(task, state);

                /* didn't get the lock, go to sleep: */

                spin_unlock_mutex(&lock->wait_lock, flags);                    

                schedule_preempt_disabled();          //自旋等待机制也拿锁失败,只能睡下去
                spin_lock_mutex(&lock->wait_lock, flags);            //醒来后拿下锁
        }
        __set_task_state(task, TASK_RUNNING);

        mutex_remove_waiter(lock, &waiter, current_thread_info());      //醒来拿锁成功后,将自己移除链表
        /* set it to 0 if there are no waiters left: */
        if (likely(list_empty(&lock->wait_list)))                   //再判断下若没有等待者,就将count 设为0.因为锁还被自己拿到持有在
                atomic_set(&lock->count, 0);                   
        debug_mutex_free_waiter(&waiter);
}

 

//这个函数里实现了自旋转等待
static bool mutex_optimistic_spin(struct mutex *lock,struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
        struct task_struct *task = current;

        //此函数里判断,若need_resched就goto done退出自旋等待机制,然后若锁持有者task_struct->on_cpu为1,则说明锁持有者正在运行临界区代码,就符合自选等待机制了

        if (!mutex_can_spin_on_owner(lock))
                goto done;
        //这是避免多个task都进入自选等待机制
        / * In order to avoid a stampede of mutex spinners trying to
         * acquire the mutex all at once, the spinners need to take a
         * MCS (queued) lock first before spinning on the owner field */
        if (!osq_lock(&lock->osq))
                goto done;

        while (true) {
                struct task_struct *owner;

                /* If there's an owner, wait for it to either  release the lock or go to sleep./

                owner = ACCESS_ONCE(lock->owner);

                //这里真正实现自旋等待的地方

                if (owner && !mutex_spin_on_owner(lock, owner))
                       break;
 

                /* Try to acquire the mutex if it is unlocked. */

                //获取互斥锁,将互斥锁的count设置为0,0表示锁被持有

                if (mutex_try_to_acquire(lock)) {

                        mutex_set_owner(lock);         //重新将当前owner指向当前task_struct
                        osq_unlock(&lock->osq);
                        return true;
                }

                /* When there's no owner, we might have preempted between the
                 * owner acquiring the lock and setting the owner field. If
                 * we're an RT task that will live-lock because we won't let
                 * the owner complete. */
                if (!owner && (need_resched() || rt_task(task)))
                        break;

                /* The cpu_relax() call is a compiler barrier which forces
                 * everything in this loop to be re-loaded. We don't need
                 * memory barriers as we'll eventually observe the right
                 * values at the cost of a few extra spins.*/
                cpu_relax_lowlatency();
        }

}

//真正自旋等待的函数

static noinline int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
{
        rcu_read_lock();
        while (owner_running(lock, owner)) {               //持有者的task_struct->on_cpu为0,即睡眠后被调度出去时返回false,互斥锁的lock->owner 为空,即持有者释放互斥锁了;此两者情况时退出自旋等待
                if (need_resched())                                            //这里当前task需要被调度时,也会退出自旋等待
                        break;
                cpu_relax_lowlatency();
        }
        rcu_read_unlock();

        /* We break out the loop above on need_resched() and when the
         * owner changed, which is a sign for heavy contention. Return
         * success only when lock->owner is NULL*/
        return lock->owner == NULL;
}

 

 

 

2. mutex_unlock 流程

void __sched mutex_unlock(struct mutex *lock)
{
        /* The unlocking fastpath is the 0->1 transition from 'locked'
         * into 'unlocked' state*/
#ifndef CONFIG_DEBUG_MUTEXES
        /* * When debugging is enabled we must not clear the owner before time,
         * the slow path will always be taken, and that clears the owner field
         * after verifying that it was indeed current.*/
        mutex_clear_owner(lock);       
#endif
        __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);

}

 

static inline void
                   __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))                                                                          
{
        if (unlikely(atomic_inc_return(count) <= 0))            //快车道,直接加一要大于0,说明之前不是负数也即没有人slowpath睡眠等待了,就直接返回。否则表明有人进入睡眠等待了,走slowpath
                fail_fn(count);                                     
}

 

__visible void __mutex_unlock_slowpath(atomic_t *lock_count)
{
        struct mutex *lock = container_of(lock_count, struct mutex, count);
        __mutex_unlock_common_slowpath(lock, 1);
}

 

static inline void __mutex_unlock_common_slowpath(struct mutex *lock, int nested)
{
        unsigned long flags;

        /* As a performance measurement, release the lock before doing other
         * wakeup related duties to follow. This allows other tasks to acquire
         * the lock sooner, while still handling cleanups in past unlock calls.
         * This can be done as we do not enforce strict equivalence between the
         * mutex counter and wait_list.
         * Some architectures leave the lock unlocked in the fastpath failure
         * case, others need to leave it locked. In the later case we have to
         * unlock it here - as the lock counter is currently 0 or negative.  */
        if (__mutex_slowpath_needs_to_unlock())      //为了性能考虑,先释放锁即count;这个if一般会进来,直接把count回复初始值1, 这样其他模块就可以拿锁了。故能提升性能。 也可以看出来,连续调多次unlock和调一次时一样的。不会累计 
                atomic_set(&lock->count, 1);

        spin_lock_mutex(&lock->wait_lock, flags);
        mutex_release(&lock->dep_map, nested, _RET_IP_);
        debug_mutex_unlock(lock);

        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
                struct mutex_waiter *waiter =
                                list_entry(lock->wait_list.next,     //这个表明还是链表里第一个task被唤醒
                                           struct mutex_waiter, list);

                debug_mutex_wake_waiter(lock, waiter);

                wake_up_process(waiter->task);
        }

        spin_unlock_mutex(&lock->wait_lock, flags);
}

 

总结:

mutex时最早实现自旋等待机制

mutex在睡眠之前尝试获取锁

mutex实现MSC锁来避免多个CPU争用锁导致CPU高速缓存行颠簸现象

 

 

 

 

 

 


 

 

 

 

 

两种锁的加锁原理

互斥锁:线程会从sleep(加锁)——>running(解锁),过程中有上下文的切换,cpu的抢占,信号的发送等开销。

自旋锁:线程一直是running(加锁——>解锁),死循环检测锁的标志位,机制不复杂。

两种锁的区别

互斥锁的起始原始开销要高于自旋锁,但是基本是一劳永逸,临界区持锁时间的大小并不会对互斥锁的开销造成影响,而自旋锁是死循环检测,加锁全程消耗cpu,起始开销虽然低于互斥锁,但是随着持锁时间,加锁的开销是线性增长。

两种锁的应用

互斥锁用于临界区持锁时间比较长的操作,比如下面这些情况都可以考虑

1 临界区有IO操作

2 临界区代码复杂或者循环量大

3 临界区竞争非常激烈

4 单核处理器

至于自旋锁就主要用在临界区持锁时间非常短且CPU资源不紧张的情况下,另外中断上下文时可用关中断的自旋锁。

 

 

 

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值