* better
[mascara-docs.git] / i386 / linux-2.3.21 / arch / ppc / kernel / bitops.c
blobfb5a19e3a3e054677d38787911548f851676060c
1 /*
2 * Copyright (C) 1996 Paul Mackerras.
3 */
5 #include <linux/kernel.h>
6 #include <asm/bitops.h>
8 /*
9 * I left these here since the problems with "cc" make it difficult to keep
10 * them in bitops.h -- Cort
12 void set_bit(int nr, volatile void *addr)
14 unsigned int t;
15 unsigned int mask = 1 << (nr & 0x1f);
16 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
18 if ((unsigned long)addr & 3)
19 printk(KERN_ERR "set_bit(%x, %p)\n", nr, addr);
20 __asm__ __volatile__("\n\
21 1: lwarx %0,0,%2
22 or %0,%0,%1
23 stwcx. %0,0,%2
24 bne 1b"
25 : "=&r" (t) /*, "=m" (*p)*/
26 : "r" (mask), "r" (p)
27 : "cc");
30 void clear_bit(int nr, volatile void *addr)
32 unsigned int t;
33 unsigned int mask = 1 << (nr & 0x1f);
34 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
36 if ((unsigned long)addr & 3)
37 printk(KERN_ERR "clear_bit(%x, %p)\n", nr, addr);
38 __asm__ __volatile__("\n\
39 1: lwarx %0,0,%2
40 andc %0,%0,%1
41 stwcx. %0,0,%2
42 bne 1b"
43 : "=&r" (t) /*, "=m" (*p)*/
44 : "r" (mask), "r" (p)
45 : "cc");
48 void change_bit(int nr, volatile void *addr)
50 unsigned int t;
51 unsigned int mask = 1 << (nr & 0x1f);
52 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
54 if ((unsigned long)addr & 3)
55 printk(KERN_ERR "change_bit(%x, %p)\n", nr, addr);
56 __asm__ __volatile__("\n\
57 1: lwarx %0,0,%2
58 xor %0,%0,%1
59 stwcx. %0,0,%2
60 bne 1b"
61 : "=&r" (t) /*, "=m" (*p)*/
62 : "r" (mask), "r" (p)
63 : "cc");
66 int test_and_set_bit(int nr, volatile void *addr)
68 unsigned int old, t;
69 unsigned int mask = 1 << (nr & 0x1f);
70 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
72 if ((unsigned long)addr & 3)
73 printk(KERN_ERR "test_and_set_bit(%x, %p)\n", nr, addr);
74 __asm__ __volatile__("\n\
75 1: lwarx %0,0,%3
76 or %1,%0,%2
77 stwcx. %1,0,%3
78 bne 1b"
79 : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/
80 : "r" (mask), "r" (p)
81 : "cc");
83 return (old & mask) != 0;
86 int test_and_clear_bit(int nr, volatile void *addr)
88 unsigned int old, t;
89 unsigned int mask = 1 << (nr & 0x1f);
90 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
92 if ((unsigned long)addr & 3)
93 printk(KERN_ERR "test_and_clear_bit(%x, %p)\n", nr, addr);
94 __asm__ __volatile__("\n\
95 1: lwarx %0,0,%3
96 andc %1,%0,%2
97 stwcx. %1,0,%3
98 bne 1b"
99 : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/
100 : "r" (mask), "r" (p)
101 : "cc");
103 return (old & mask) != 0;
106 int test_and_change_bit(int nr, volatile void *addr)
108 unsigned int old, t;
109 unsigned int mask = 1 << (nr & 0x1f);
110 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
112 if ((unsigned long)addr & 3)
113 printk(KERN_ERR "test_and_change_bit(%x, %p)\n", nr, addr);
114 __asm__ __volatile__("\n\
115 1: lwarx %0,0,%3
116 xor %1,%0,%2
117 stwcx. %1,0,%3
118 bne 1b"
119 : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/
120 : "r" (mask), "r" (p)
121 : "cc");
123 return (old & mask) != 0;
126 /* I put it in bitops.h -- Cort */
127 #if 0
128 int ffz(unsigned int x)
130 int n;
132 x = ~x & (x+1); /* set LS zero to 1, other bits to 0 */
133 __asm__ ("cntlzw %0,%1" : "=r" (n) : "r" (x));
134 return 31 - n;
138 * This implementation of find_{first,next}_zero_bit was stolen from
139 * Linus' asm-alpha/bitops.h.
142 int find_first_zero_bit(void * addr, int size)
144 unsigned int * p = ((unsigned int *) addr);
145 unsigned int result = 0;
146 unsigned int tmp;
148 if (size == 0)
149 return 0;
150 while (size & ~31UL) {
151 if (~(tmp = *(p++)))
152 goto found_middle;
153 result += 32;
154 size -= 32;
156 if (!size)
157 return result;
158 tmp = *p;
159 tmp |= ~0UL << size;
160 found_middle:
161 return result + ffz(tmp);
165 * Find next zero bit in a bitmap reasonably efficiently..
167 int find_next_zero_bit(void * addr, int size, int offset)
169 unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
170 unsigned int result = offset & ~31UL;
171 unsigned int tmp;
173 if (offset >= size)
174 return size;
175 size -= result;
176 offset &= 31UL;
177 if (offset) {
178 tmp = *(p++);
179 tmp |= ~0UL >> (32-offset);
180 if (size < 32)
181 goto found_first;
182 if (~tmp)
183 goto found_middle;
184 size -= 32;
185 result += 32;
187 while (size & ~31UL) {
188 if (~(tmp = *(p++)))
189 goto found_middle;
190 result += 32;
191 size -= 32;
193 if (!size)
194 return result;
195 tmp = *p;
196 found_first:
197 tmp |= ~0UL << size;
198 found_middle:
199 return result + ffz(tmp);
201 #endif