//spin_lock_irqsave is basically used to save the interrupt state before taking the spin lock,
//this is because spin lock disables the interrupt, when the lock is taken in interrupt context,
//and re-enables it when while unlocking. The interrupt state is saved so that it should
//reinstate the interrupts again.
//Example:
//Lets say interrupt x was disabled before spin lock was acquired
//spin_lock_irq will disable the interrupt x and take the the lock
//spin_unlock_irq will enable the interrupt x.
//So in the 3rd step above after releasing the lock we will have interrupt x enabled which was
//earlier disabled before the lock was acquired.
//So only when you are sure that interrupts are not disabled only then you should spin_lock_irq
//otherwise you should always use spin_lock_irqsave.
*** include/linux/spinlock.h:
spin_lock[283] static inline void spin_lock(spinlock_t *lock)
static inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
*** include/linux/spinlock.h:
raw_spin_lock[170] #define raw_spin_lock(lock) _raw_spin_lock(lock)
//在spinlock.h中还根据CONFIG选择了是SMP还是UP
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
*/
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
# include <linux spinlock_api_smp="" h="">
#else
# include <linux spinlock_api_up="" h="">
#endif
//针对不同的处理器:
//----------------------------------------------------------------
*** include/linux/spinlock_api_smp.h:
_raw_spin_lock[22]
_raw_spin_lock[22] void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
_raw_spin_lock[47] #define _raw_spin_lock(lock) __raw_spin_lock(lock)
*** include/linux/spinlock_api_up.h:
_raw_spin_lock[52] #define _raw_spin_lock(lock) __LOCK(lock)
*** kernel/spinlock.c:
_raw_spin_lock[135] void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
//对于SMP:
//----------------------------------------------
//for prefix "__lockfunc"
*** include/linux/spinlock.h:
__lockfunc[76] #define __lockfunc __attribute__((section(".spinlock.text")))
//for suffix "__adquires(lock)"
*** include/linux/compiler.h:
__acquires[13] #define __acquires(x) __attribute__((context(x,0,1)))
__acquires[36] #define __acquires(x)
//所以,
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
//就是一个函数声明,指定 _raw_spin_lock locate在 .spinlock.text 段里面
*** include/linux/compiler.h:
#ifdef __CHECKER__
__acquires[13] #define __acquires(x) __attribute__((context(x,0,1)))
#else
__acquires[36] #define __acquires(x)
#endif
//关于__CHECKER__ & context宏参考
http://www.groad.net/bbs/thread-3408-1-1.html -- <__context__>
http://www.groad.net/bbs/thread-3388-1-1.html -- <sparse>
//这两篇文章
//对于SMP:*** include/linux/spinlock_api_smp.h:
__raw_spin_lock[140] static inline void __raw_spin_lock(raw_spinlock_t *lock)
=>atic inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
*** include/linux/lockdep.h:
spin_acquire[460] #define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
spin_acquire[463] #define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
spin_acquire[468] #define spin_acquire(l, s, t, i) do { } while (0)
//看下lock_qcquire
//----------------------------------------------------------
*** include/linux/lockdep.h:
lock_acquire[356] #define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
*** kernel/lockdep.c:
lock_acquire[3384] void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
*/
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip)
{
unsigned long flags;
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquire);
#define raw_local_irq_save(flags) \
do { \
typecheck(unsigned long, flags); \
flags = arch_local_irq_save(); \
} while (0)
*** arch/arm/include/asm/irqflags.h:
arch_local_irq_save[13] static inline unsigned long arch_local_irq_save(void )
arch_local_irq_save[49] static inline unsigned long arch_local_irq_save(void )
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile(
" mrs %0, cpsr @ arch_local_irq_save\n"
" cpsid i"
: "=r" (flags) : : "memory", "cc");
return flags;
}
//__asm__("assembly code":output locations:input operands:changed registers)
/*内在函数 操作码 PRIMASK FAULTMASK
--------------------------------------------
__enable_irq CPSIE i 0
__disable_irq CPSID i 1
__enable_fiq CPSIE f 0
__disable_fiq CPSID f 1
--------------------------------------------
*/
//=================================================================================
static inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
*** include/linux/spinlock_api_smp.h:
_raw_spin_lock_irq[29] void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
_raw_spin_lock_irq[55] #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
*** include/linux/spinlock_api_up.h:
_raw_spin_lock_irq[59] #define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
*** kernel/spinlock.c:
_raw_spin_lock_irq[151] void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
} while (0)
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#else
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_lock_irqsave(lock, flags); \
} while (0)
#endif
static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
#define raw_spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
*** include/linux/spinlock_api_smp.h:
_raw_spin_unlock_irqrestore[43] _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
_raw_spin_unlock_irqrestore[83] #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, fl\
ags)
*** include/linux/spinlock_api_up.h:
_raw_spin_unlock_irqrestore[78] #define _raw_spin_unlock_irqrestore(lock, flags) \
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
</sparse></__context__></linux></linux>
spin_lock
最新推荐文章于 2025-03-03 20:01:44 发布