added base src
[xv6-db.git] / spinlock.c
blobe668598aa8035efc4f62a475cded5870b04527c5
1 // Mutual exclusion spin locks.
3 #include "types.h"
4 #include "defs.h"
5 #include "param.h"
6 #include "x86.h"
7 #include "mmu.h"
8 #include "proc.h"
9 #include "spinlock.h"
11 void
12 initlock(struct spinlock *lk, char *name)
14 lk->name = name;
15 lk->locked = 0;
16 lk->cpu = 0;
19 // Acquire the lock.
20 // Loops (spins) until the lock is acquired.
21 // Holding a lock for a long time may cause
22 // other CPUs to waste time spinning to acquire it.
23 void
24 acquire(struct spinlock *lk)
26 pushcli(); // disable interrupts to avoid deadlock.
27 if(holding(lk))
28 panic("acquire");
30 // The xchg is atomic.
31 // It also serializes, so that reads after acquire are not
32 // reordered before it.
33 while(xchg(&lk->locked, 1) != 0)
36 // Record info about lock acquisition for debugging.
37 lk->cpu = cpu;
38 getcallerpcs(&lk, lk->pcs);
41 // Release the lock.
42 void
43 release(struct spinlock *lk)
45 if(!holding(lk))
46 panic("release");
48 lk->pcs[0] = 0;
49 lk->cpu = 0;
51 // The xchg serializes, so that reads before release are
52 // not reordered after it. The 1996 PentiumPro manual (Volume 3,
53 // 7.2) says reads can be carried out speculatively and in
54 // any order, which implies we need to serialize here.
55 // But the 2007 Intel 64 Architecture Memory Ordering White
56 // Paper says that Intel 64 and IA-32 will not move a load
57 // after a store. So lock->locked = 0 would work here.
58 // The xchg being asm volatile ensures gcc emits it after
59 // the above assignments (and after the critical section).
60 xchg(&lk->locked, 0);
62 popcli();
65 // Record the current call stack in pcs[] by following the %ebp chain.
66 void
67 getcallerpcs(void *v, uint pcs[])
69 uint *ebp;
70 int i;
72 ebp = (uint*)v - 2;
73 for(i = 0; i < 10; i++){
74 if(ebp == 0 || ebp < (uint*)0x100000 || ebp == (uint*)0xffffffff)
75 break;
76 pcs[i] = ebp[1]; // saved %eip
77 ebp = (uint*)ebp[0]; // saved %ebp
79 for(; i < 10; i++)
80 pcs[i] = 0;
83 // Check whether this cpu is holding the lock.
84 int
85 holding(struct spinlock *lock)
87 return lock->locked && lock->cpu == cpu;
91 // Pushcli/popcli are like cli/sti except that they are matched:
92 // it takes two popcli to undo two pushcli. Also, if interrupts
93 // are off, then pushcli, popcli leaves them off.
95 void
96 pushcli(void)
98 int eflags;
100 eflags = readeflags();
101 cli();
102 if(cpu->ncli++ == 0)
103 cpu->intena = eflags & FL_IF;
106 void
107 popcli(void)
109 if(readeflags()&FL_IF)
110 panic("popcli - interruptible");
111 if(--cpu->ncli < 0)
112 panic("popcli");
113 if(cpu->ncli == 0 && cpu->intena)
114 sti();