自旋锁常用api
1,为什么除了spin_lock() api还定义了spin_lock_irq()等。
因为:spin_lock()可以防止线程调度,但不能防止硬件中断的到来,以及随后的中断处理函数(hardirq)的执行。
假设一个CPU上的线程T持有了一个spinlock,发生中断后,该CPU转而执行对应的hardirq。如果该hardirq也试图去持有这个spinlock,那么将无法获取成功,导致hardirq无法退出。在hardirq主动退出之前,线程T是无法继续执行以释放spinlock的,最终将导致该CPU上的代码不能继续向前运行,形成死锁(dead lock),
因此如果spin_lock()锁住得临界区可能被中断打断,要使用spin_lock_irq()或者spin_lock_irqsave()
但是即便使用了spin_lock_irqsave()只能关闭本CPU中断,如果其他CPU上发生了中断,那么这些CPU上的hardirq,也有可能试图去获取一个被本地CPU上运行的线程T占有的spinlock。
不过没有关系,因为此时hardirq和线程T运行在不同的CPU上,等到线程T继续运行释放了这个spinlock,hardirq就有机会获取到,不至于造成死锁。
2,单核cpu使用spin_lock()会出现死锁吗。
不会,因为spin_lock()关闭了抢占,只能按照顺序执行,只要不发生中断是不会发生死锁的,但是若临界区可能发生中断仍然可能死锁。
3,自旋锁是怎么抢占的。
4,自旋锁与互斥锁的区别
自旋锁和互斥锁用于处理内核中并发访问,它们有各自的使用对象。·
互斥锁保护进程的关键资源,而自旋锁保护IRQ处理程序的关键部分。·互斥锁让竞争者在获得锁之前睡眠,而自旋锁在获得锁之前一直自旋循环(消耗CPU)。·
鉴于上一点,自旋锁不能长时间持有,因为等待者在等待取锁期间会浪费CPU时间;而互斥锁则可以长时间持有,只要保护资源需要,
因为竞争者被放入等待队列中进入睡眠状态。[插图]当处理自旋锁时,
请牢记:只是持有自旋锁线程抢占被禁止,而自旋锁的等待者没有禁止抢占。
自旋锁内核实现
自旋锁结构体定义
typedef struct spinlock {
union {
struct raw_spinlock rlock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
struct {
u8 __padding[LOCK_PADSIZE];
struct lockdep_map dep_map;
};
#endif
};
} spinlock_t;
typedef struct raw_spinlock {
arch_spinlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} raw_spinlock_t;
typedef struct {
union {
u32 slock;
struct __raw_tickets {
#ifdef __ARMEB__
u16 next;
u16 owner;
#else
u16 owner;
u16 next;
#endif
} tickets;
};
} arch_spinlock_t;
spin_lock_irqsave实现
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
} while (0)
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave(lock); \
} while (0)
对于单核
/*
* 定义在include/linux/spinlock_api_up.h中
* 对于单核自旋锁的功能就被弱化为禁止抢占,
* 最终实际只执行的是preempt_disable()-关闭抢占
*/
#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define __LOCK_IRQSAVE(lock, flags) \
do { local_irq_save(flags); __LOCK(lock); } while (0)
/*
* 在arm体系结构中定义如下
* In the UP-nondebug case there's no real locking going on, so the
* only thing we have to do is to keep the preempt counts and irq
* flags straight, to suppress compiler warnings of unused lock
* variables, and to add the proper checker annotations:
*/
#define ___LOCK(lock) \
do { __acquire(lock); (void)(lock); } while (0)
#define __LOCK(lock) \
do { preempt_disable(); ___LOCK(lock); } while (0)
对于多核
/*
* 定义在include/linux/spinlock_api_smp.h中
* 多核比较复杂,
*/
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
return flags;
}
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
__acquire(lock);
arch_spin_lock(&lock->raw_lock);
mmiowb_spin_lock();
}
/*
* 在arm体系结构中
*ARMv6 ticket-based spin-locking.
* A memory barrier is required after we get a lock, and before we
* release it, because V6 CPUs are assumed to have weakly ordered
* memory.
*/
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
u32 newval;
arch_spinlock_t lockval;
prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n"
" add %1, %0, %4\n"
" strex %2, %1, [%3]\n"
" teq %2, #0\n"
" bne 1b"
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
while (lockval.tickets.next != lockval.tickets.owner) {
wfe();
lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
}
smp_mb();
}
参考:
https://www.ixigua.com/6940602677994717708?id=6872657862988923392&logTag=79b91f342c90aac06619
https://www.cnblogs.com/sky-heaven/p/13602357.html
https://mp.weixin.qq.com/s/viUgMAnVgC_bHyVifkHqsQ
https://mp.weixin.qq.com/s/mosYi_W-Rp1-HgdtxUqSEg