1 // Mutual exclusion spin locks.
12 initlock(struct spinlock
*lk
, char *name
)
20 // Loops (spins) until the lock is acquired.
21 // Holding a lock for a long time may cause
22 // other CPUs to waste time spinning to acquire it.
24 acquire(struct spinlock
*lk
)
26 pushcli(); // disable interrupts to avoid deadlock.
30 // The xchg is atomic.
31 // It also serializes, so that reads after acquire are not
32 // reordered before it.
33 while(xchg(&lk
->locked
, 1) != 0)
36 // Record info about lock acquisition for debugging.
38 getcallerpcs(&lk
, lk
->pcs
);
43 release(struct spinlock
*lk
)
51 // The xchg serializes, so that reads before release are
52 // not reordered after it. The 1996 PentiumPro manual (Volume 3,
53 // 7.2) says reads can be carried out speculatively and in
54 // any order, which implies we need to serialize here.
55 // But the 2007 Intel 64 Architecture Memory Ordering White
56 // Paper says that Intel 64 and IA-32 will not move a load
57 // after a store. So lock->locked = 0 would work here.
58 // The xchg being asm volatile ensures gcc emits it after
59 // the above assignments (and after the critical section).
65 // Record the current call stack in pcs[] by following the %ebp chain.
67 getcallerpcs(void *v
, uint pcs
[])
73 for(i
= 0; i
< 10; i
++){
74 if(ebp
== 0 || ebp
< (uint
*)0x100000 || ebp
== (uint
*)0xffffffff)
76 pcs
[i
] = ebp
[1]; // saved %eip
77 ebp
= (uint
*)ebp
[0]; // saved %ebp
83 // Check whether this cpu is holding the lock.
85 holding(struct spinlock
*lock
)
87 return lock
->locked
&& lock
->cpu
== cpu
;
91 // Pushcli/popcli are like cli/sti except that they are matched:
92 // it takes two popcli to undo two pushcli. Also, if interrupts
93 // are off, then pushcli, popcli leaves them off.
100 eflags
= readeflags();
103 cpu
->intena
= eflags
& FL_IF
;
109 if(readeflags()&FL_IF
)
110 panic("popcli - interruptible");
113 if(cpu
->ncli
== 0 && cpu
->intena
)