* same with xv6
[mascara-docs.git] / i386 / MIT / course / src / src.lab / kern / spinlock.c
bloba2dfeb7a81f9accd6b507b0aaab2837f65f9b49c
1 // Mutual exclusion spin locks.
3 #include <inc/types.h>
4 #include <inc/assert.h>
5 #include <inc/x86.h>
6 #include <inc/memlayout.h>
7 #include <inc/string.h>
8 #include <kern/cpu.h>
9 #include <kern/spinlock.h>
10 #include <kern/kdebug.h>
12 // The big kernel lock
13 struct spinlock kernel_lock = {
14 #ifdef DEBUG_SPINLOCK
15 .name = "kernel_lock"
16 #endif
19 #ifdef DEBUG_SPINLOCK
20 // Record the current call stack in pcs[] by following the %ebp chain.
21 static void
22 get_caller_pcs(uint32_t pcs[])
24 uint32_t *ebp;
25 int i;
27 ebp = (uint32_t *)read_ebp();
28 for (i = 0; i < 10; i++){
29 if (ebp == 0 || ebp < (uint32_t *)ULIM
30 || ebp >= (uint32_t *)IOMEMBASE)
31 break;
32 pcs[i] = ebp[1]; // saved %eip
33 ebp = (uint32_t *)ebp[0]; // saved %ebp
35 for (; i < 10; i++)
36 pcs[i] = 0;
39 // Check whether this CPU is holding the lock.
40 static int
41 holding(struct spinlock *lock)
43 return lock->locked && lock->cpu == thiscpu;
45 #endif
47 void
48 __spin_initlock(struct spinlock *lk, char *name)
50 lk->locked = 0;
51 #ifdef DEBUG_SPINLOCK
52 lk->name = name;
53 lk->cpu = 0;
54 #endif
57 // Acquire the lock.
58 // Loops (spins) until the lock is acquired.
59 // Holding a lock for a long time may cause
60 // other CPUs to waste time spinning to acquire it.
61 void
62 spin_lock(struct spinlock *lk)
64 #ifdef DEBUG_SPINLOCK
65 if (holding(lk))
66 panic("CPU %d cannot acquire %s: already holding", cpunum(), lk->name);
67 #endif
69 // The xchg is atomic.
70 // It also serializes, so that reads after acquire are not
71 // reordered before it.
72 while (xchg(&lk->locked, 1) != 0)
73 asm volatile ("pause");
75 // Record info about lock acquisition for debugging.
76 #ifdef DEBUG_SPINLOCK
77 lk->cpu = thiscpu;
78 get_caller_pcs(lk->pcs);
79 #endif
82 // Release the lock.
83 void
84 spin_unlock(struct spinlock *lk)
86 #ifdef DEBUG_SPINLOCK
87 if (!holding(lk)) {
88 int i;
89 uint32_t pcs[10];
90 // Nab the acquiring EIP chain before it gets released
91 memmove(pcs, lk->pcs, sizeof pcs);
92 cprintf("CPU %d cannot release %s: held by CPU %d\nAcquired at:",
93 cpunum(), lk->name, lk->cpu->cpu_id);
94 for (i = 0; i < 10 && pcs[i]; i++) {
95 struct Eipdebuginfo info;
96 if (debuginfo_eip(pcs[i], &info) >= 0)
97 cprintf(" %08x %s:%d: %.*s+%x\n", pcs[i],
98 info.eip_file, info.eip_line,
99 info.eip_fn_namelen, info.eip_fn_name,
100 pcs[i] - info.eip_fn_addr);
101 else
102 cprintf(" %08x\n", pcs[i]);
104 panic("spin_unlock");
107 lk->pcs[0] = 0;
108 lk->cpu = 0;
109 #endif
111 // The xchg serializes, so that reads before release are
112 // not reordered after it. The 1996 PentiumPro manual (Volume 3,
113 // 7.2) says reads can be carried out speculatively and in
114 // any order, which implies we need to serialize here.
115 // But the 2007 Intel 64 Architecture Memory Ordering White
116 // Paper says that Intel 64 and IA-32 will not move a load
117 // after a store. So lock->locked = 0 would work here.
118 // The xchg being asm volatile ensures gcc emits it after
119 // the above assignments (and after the critical section).
120 xchg(&lk->locked, 0);