Linux v2.6.15-rc7
[pohmelfs.git] / include / asm-s390 / bitops.h
blobb07c578b22ea677d55f9bf147d8791a18c75a20e
1 #ifndef _S390_BITOPS_H
2 #define _S390_BITOPS_H
4 /*
5 * include/asm-s390/bitops.h
7 * S390 version
8 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
11 * Derived from "include/asm-i386/bitops.h"
12 * Copyright (C) 1992, Linus Torvalds
15 #include <linux/config.h>
16 #include <linux/compiler.h>
19 * 32 bit bitops format:
20 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
21 * bit 32 is the LSB of *(addr+4). That combined with the
22 * big endian byte order on S390 give the following bit
23 * order in memory:
24 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
25 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
26 * after that follows the next long with bit numbers
27 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
28 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
29 * The reason for this bit ordering is the fact that
30 * in the architecture independent code bits operations
31 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
32 * with operation of the form "set_bit(bitnr, flags)".
34 * 64 bit bitops format:
35 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
36 * bit 64 is the LSB of *(addr+8). That combined with the
37 * big endian byte order on S390 give the following bit
38 * order in memory:
39 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
40 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
41 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
42 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
43 * after that follows the next long with bit numbers
44 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
45 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
46 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
47 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
48 * The reason for this bit ordering is the fact that
49 * in the architecture independent code bits operations
50 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
51 * with operation of the form "set_bit(bitnr, flags)".
54 /* set ALIGN_CS to 1 if the SMP safe bit operations should
55 * align the address to 4 byte boundary. It seems to work
56 * without the alignment.
58 #ifdef __KERNEL__
59 #define ALIGN_CS 0
60 #else
61 #define ALIGN_CS 1
62 #ifndef CONFIG_SMP
63 #error "bitops won't work without CONFIG_SMP"
64 #endif
65 #endif
67 /* bitmap tables from arch/S390/kernel/bitmap.S */
68 extern const char _oi_bitmap[];
69 extern const char _ni_bitmap[];
70 extern const char _zb_findmap[];
71 extern const char _sb_findmap[];
73 #ifndef __s390x__
75 #define __BITOPS_ALIGN 3
76 #define __BITOPS_WORDSIZE 32
77 #define __BITOPS_OR "or"
78 #define __BITOPS_AND "nr"
79 #define __BITOPS_XOR "xr"
81 #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
82 __asm__ __volatile__(" l %0,0(%4)\n" \
83 "0: lr %1,%0\n" \
84 __op_string " %1,%3\n" \
85 " cs %0,%1,0(%4)\n" \
86 " jl 0b" \
87 : "=&d" (__old), "=&d" (__new), \
88 "=m" (*(unsigned long *) __addr) \
89 : "d" (__val), "a" (__addr), \
90 "m" (*(unsigned long *) __addr) : "cc" );
92 #else /* __s390x__ */
94 #define __BITOPS_ALIGN 7
95 #define __BITOPS_WORDSIZE 64
96 #define __BITOPS_OR "ogr"
97 #define __BITOPS_AND "ngr"
98 #define __BITOPS_XOR "xgr"
100 #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
101 __asm__ __volatile__(" lg %0,0(%4)\n" \
102 "0: lgr %1,%0\n" \
103 __op_string " %1,%3\n" \
104 " csg %0,%1,0(%4)\n" \
105 " jl 0b" \
106 : "=&d" (__old), "=&d" (__new), \
107 "=m" (*(unsigned long *) __addr) \
108 : "d" (__val), "a" (__addr), \
109 "m" (*(unsigned long *) __addr) : "cc" );
111 #endif /* __s390x__ */
113 #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
114 #define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" )
116 #ifdef CONFIG_SMP
118 * SMP safe set_bit routine based on compare and swap (CS)
120 static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
122 unsigned long addr, old, new, mask;
124 addr = (unsigned long) ptr;
125 #if ALIGN_CS == 1
126 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
127 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
128 #endif
129 /* calculate address for CS */
130 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
131 /* make OR mask */
132 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
133 /* Do the atomic update. */
134 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
138 * SMP safe clear_bit routine based on compare and swap (CS)
140 static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
142 unsigned long addr, old, new, mask;
144 addr = (unsigned long) ptr;
145 #if ALIGN_CS == 1
146 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
147 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
148 #endif
149 /* calculate address for CS */
150 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
151 /* make AND mask */
152 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
153 /* Do the atomic update. */
154 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
158 * SMP safe change_bit routine based on compare and swap (CS)
160 static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
162 unsigned long addr, old, new, mask;
164 addr = (unsigned long) ptr;
165 #if ALIGN_CS == 1
166 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
167 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
168 #endif
169 /* calculate address for CS */
170 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
171 /* make XOR mask */
172 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
173 /* Do the atomic update. */
174 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
178 * SMP safe test_and_set_bit routine based on compare and swap (CS)
180 static inline int
181 test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
183 unsigned long addr, old, new, mask;
185 addr = (unsigned long) ptr;
186 #if ALIGN_CS == 1
187 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
188 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
189 #endif
190 /* calculate address for CS */
191 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
192 /* make OR/test mask */
193 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
194 /* Do the atomic update. */
195 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
196 __BITOPS_BARRIER();
197 return (old & mask) != 0;
201 * SMP safe test_and_clear_bit routine based on compare and swap (CS)
203 static inline int
204 test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
206 unsigned long addr, old, new, mask;
208 addr = (unsigned long) ptr;
209 #if ALIGN_CS == 1
210 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
211 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
212 #endif
213 /* calculate address for CS */
214 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
215 /* make AND/test mask */
216 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
217 /* Do the atomic update. */
218 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
219 __BITOPS_BARRIER();
220 return (old ^ new) != 0;
224 * SMP safe test_and_change_bit routine based on compare and swap (CS)
226 static inline int
227 test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
229 unsigned long addr, old, new, mask;
231 addr = (unsigned long) ptr;
232 #if ALIGN_CS == 1
233 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
234 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
235 #endif
236 /* calculate address for CS */
237 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
238 /* make XOR/test mask */
239 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
240 /* Do the atomic update. */
241 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
242 __BITOPS_BARRIER();
243 return (old & mask) != 0;
245 #endif /* CONFIG_SMP */
248 * fast, non-SMP set_bit routine
250 static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
252 unsigned long addr;
254 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
255 asm volatile("oc 0(1,%1),0(%2)"
256 : "=m" (*(char *) addr)
257 : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
258 "m" (*(char *) addr) : "cc" );
261 static inline void
262 __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
264 unsigned long addr;
266 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
267 switch (nr&7) {
268 case 0:
269 asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr)
270 : "a" (addr), "m" (*(char *) addr) : "cc" );
271 break;
272 case 1:
273 asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr)
274 : "a" (addr), "m" (*(char *) addr) : "cc" );
275 break;
276 case 2:
277 asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr)
278 : "a" (addr), "m" (*(char *) addr) : "cc" );
279 break;
280 case 3:
281 asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr)
282 : "a" (addr), "m" (*(char *) addr) : "cc" );
283 break;
284 case 4:
285 asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr)
286 : "a" (addr), "m" (*(char *) addr) : "cc" );
287 break;
288 case 5:
289 asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr)
290 : "a" (addr), "m" (*(char *) addr) : "cc" );
291 break;
292 case 6:
293 asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr)
294 : "a" (addr), "m" (*(char *) addr) : "cc" );
295 break;
296 case 7:
297 asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr)
298 : "a" (addr), "m" (*(char *) addr) : "cc" );
299 break;
303 #define set_bit_simple(nr,addr) \
304 (__builtin_constant_p((nr)) ? \
305 __constant_set_bit((nr),(addr)) : \
306 __set_bit((nr),(addr)) )
309 * fast, non-SMP clear_bit routine
311 static inline void
312 __clear_bit(unsigned long nr, volatile unsigned long *ptr)
314 unsigned long addr;
316 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
317 asm volatile("nc 0(1,%1),0(%2)"
318 : "=m" (*(char *) addr)
319 : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
320 "m" (*(char *) addr) : "cc" );
323 static inline void
324 __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
326 unsigned long addr;
328 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
329 switch (nr&7) {
330 case 0:
331 asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr)
332 : "a" (addr), "m" (*(char *) addr) : "cc" );
333 break;
334 case 1:
335 asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr)
336 : "a" (addr), "m" (*(char *) addr) : "cc" );
337 break;
338 case 2:
339 asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr)
340 : "a" (addr), "m" (*(char *) addr) : "cc" );
341 break;
342 case 3:
343 asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr)
344 : "a" (addr), "m" (*(char *) addr) : "cc" );
345 break;
346 case 4:
347 asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr)
348 : "a" (addr), "m" (*(char *) addr) : "cc" );
349 break;
350 case 5:
351 asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr)
352 : "a" (addr), "m" (*(char *) addr) : "cc" );
353 break;
354 case 6:
355 asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr)
356 : "a" (addr), "m" (*(char *) addr) : "cc" );
357 break;
358 case 7:
359 asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr)
360 : "a" (addr), "m" (*(char *) addr) : "cc" );
361 break;
365 #define clear_bit_simple(nr,addr) \
366 (__builtin_constant_p((nr)) ? \
367 __constant_clear_bit((nr),(addr)) : \
368 __clear_bit((nr),(addr)) )
371 * fast, non-SMP change_bit routine
373 static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
375 unsigned long addr;
377 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
378 asm volatile("xc 0(1,%1),0(%2)"
379 : "=m" (*(char *) addr)
380 : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
381 "m" (*(char *) addr) : "cc" );
384 static inline void
385 __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
387 unsigned long addr;
389 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
390 switch (nr&7) {
391 case 0:
392 asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr)
393 : "a" (addr), "m" (*(char *) addr) : "cc" );
394 break;
395 case 1:
396 asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr)
397 : "a" (addr), "m" (*(char *) addr) : "cc" );
398 break;
399 case 2:
400 asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr)
401 : "a" (addr), "m" (*(char *) addr) : "cc" );
402 break;
403 case 3:
404 asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr)
405 : "a" (addr), "m" (*(char *) addr) : "cc" );
406 break;
407 case 4:
408 asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr)
409 : "a" (addr), "m" (*(char *) addr) : "cc" );
410 break;
411 case 5:
412 asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr)
413 : "a" (addr), "m" (*(char *) addr) : "cc" );
414 break;
415 case 6:
416 asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr)
417 : "a" (addr), "m" (*(char *) addr) : "cc" );
418 break;
419 case 7:
420 asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr)
421 : "a" (addr), "m" (*(char *) addr) : "cc" );
422 break;
426 #define change_bit_simple(nr,addr) \
427 (__builtin_constant_p((nr)) ? \
428 __constant_change_bit((nr),(addr)) : \
429 __change_bit((nr),(addr)) )
432 * fast, non-SMP test_and_set_bit routine
434 static inline int
435 test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
437 unsigned long addr;
438 unsigned char ch;
440 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
441 ch = *(unsigned char *) addr;
442 asm volatile("oc 0(1,%1),0(%2)"
443 : "=m" (*(char *) addr)
444 : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
445 "m" (*(char *) addr) : "cc", "memory" );
446 return (ch >> (nr & 7)) & 1;
448 #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
451 * fast, non-SMP test_and_clear_bit routine
453 static inline int
454 test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
456 unsigned long addr;
457 unsigned char ch;
459 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
460 ch = *(unsigned char *) addr;
461 asm volatile("nc 0(1,%1),0(%2)"
462 : "=m" (*(char *) addr)
463 : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
464 "m" (*(char *) addr) : "cc", "memory" );
465 return (ch >> (nr & 7)) & 1;
467 #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
470 * fast, non-SMP test_and_change_bit routine
472 static inline int
473 test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
475 unsigned long addr;
476 unsigned char ch;
478 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
479 ch = *(unsigned char *) addr;
480 asm volatile("xc 0(1,%1),0(%2)"
481 : "=m" (*(char *) addr)
482 : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
483 "m" (*(char *) addr) : "cc", "memory" );
484 return (ch >> (nr & 7)) & 1;
486 #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
488 #ifdef CONFIG_SMP
489 #define set_bit set_bit_cs
490 #define clear_bit clear_bit_cs
491 #define change_bit change_bit_cs
492 #define test_and_set_bit test_and_set_bit_cs
493 #define test_and_clear_bit test_and_clear_bit_cs
494 #define test_and_change_bit test_and_change_bit_cs
495 #else
496 #define set_bit set_bit_simple
497 #define clear_bit clear_bit_simple
498 #define change_bit change_bit_simple
499 #define test_and_set_bit test_and_set_bit_simple
500 #define test_and_clear_bit test_and_clear_bit_simple
501 #define test_and_change_bit test_and_change_bit_simple
502 #endif
506 * This routine doesn't need to be atomic.
509 static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
511 unsigned long addr;
512 unsigned char ch;
514 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
515 ch = *(volatile unsigned char *) addr;
516 return (ch >> (nr & 7)) & 1;
519 static inline int
520 __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
521 return ((((volatile char *) addr)
522 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7)))) != 0;
525 #define test_bit(nr,addr) \
526 (__builtin_constant_p((nr)) ? \
527 __constant_test_bit((nr),(addr)) : \
528 __test_bit((nr),(addr)) )
531 * ffz = Find First Zero in word. Undefined if no zero exists,
532 * so code should check against ~0UL first..
534 static inline unsigned long ffz(unsigned long word)
536 unsigned long bit = 0;
538 #ifdef __s390x__
539 if (likely((word & 0xffffffff) == 0xffffffff)) {
540 word >>= 32;
541 bit += 32;
543 #endif
544 if (likely((word & 0xffff) == 0xffff)) {
545 word >>= 16;
546 bit += 16;
548 if (likely((word & 0xff) == 0xff)) {
549 word >>= 8;
550 bit += 8;
552 return bit + _zb_findmap[word & 0xff];
556 * __ffs = find first bit in word. Undefined if no bit exists,
557 * so code should check against 0UL first..
559 static inline unsigned long __ffs (unsigned long word)
561 unsigned long bit = 0;
563 #ifdef __s390x__
564 if (likely((word & 0xffffffff) == 0)) {
565 word >>= 32;
566 bit += 32;
568 #endif
569 if (likely((word & 0xffff) == 0)) {
570 word >>= 16;
571 bit += 16;
573 if (likely((word & 0xff) == 0)) {
574 word >>= 8;
575 bit += 8;
577 return bit + _sb_findmap[word & 0xff];
581 * Find-bit routines..
584 #ifndef __s390x__
586 static inline int
587 find_first_zero_bit(const unsigned long * addr, unsigned long size)
589 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
590 unsigned long cmp, count;
591 unsigned int res;
593 if (!size)
594 return 0;
595 __asm__(" lhi %1,-1\n"
596 " lr %2,%3\n"
597 " slr %0,%0\n"
598 " ahi %2,31\n"
599 " srl %2,5\n"
600 "0: c %1,0(%0,%4)\n"
601 " jne 1f\n"
602 " la %0,4(%0)\n"
603 " brct %2,0b\n"
604 " lr %0,%3\n"
605 " j 4f\n"
606 "1: l %2,0(%0,%4)\n"
607 " sll %0,3\n"
608 " lhi %1,0xff\n"
609 " tml %2,0xffff\n"
610 " jno 2f\n"
611 " ahi %0,16\n"
612 " srl %2,16\n"
613 "2: tml %2,0x00ff\n"
614 " jno 3f\n"
615 " ahi %0,8\n"
616 " srl %2,8\n"
617 "3: nr %2,%1\n"
618 " ic %2,0(%2,%5)\n"
619 " alr %0,%2\n"
620 "4:"
621 : "=&a" (res), "=&d" (cmp), "=&a" (count)
622 : "a" (size), "a" (addr), "a" (&_zb_findmap),
623 "m" (*(addrtype *) addr) : "cc" );
624 return (res < size) ? res : size;
627 static inline int
628 find_first_bit(const unsigned long * addr, unsigned long size)
630 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
631 unsigned long cmp, count;
632 unsigned int res;
634 if (!size)
635 return 0;
636 __asm__(" slr %1,%1\n"
637 " lr %2,%3\n"
638 " slr %0,%0\n"
639 " ahi %2,31\n"
640 " srl %2,5\n"
641 "0: c %1,0(%0,%4)\n"
642 " jne 1f\n"
643 " la %0,4(%0)\n"
644 " brct %2,0b\n"
645 " lr %0,%3\n"
646 " j 4f\n"
647 "1: l %2,0(%0,%4)\n"
648 " sll %0,3\n"
649 " lhi %1,0xff\n"
650 " tml %2,0xffff\n"
651 " jnz 2f\n"
652 " ahi %0,16\n"
653 " srl %2,16\n"
654 "2: tml %2,0x00ff\n"
655 " jnz 3f\n"
656 " ahi %0,8\n"
657 " srl %2,8\n"
658 "3: nr %2,%1\n"
659 " ic %2,0(%2,%5)\n"
660 " alr %0,%2\n"
661 "4:"
662 : "=&a" (res), "=&d" (cmp), "=&a" (count)
663 : "a" (size), "a" (addr), "a" (&_sb_findmap),
664 "m" (*(addrtype *) addr) : "cc" );
665 return (res < size) ? res : size;
668 #else /* __s390x__ */
670 static inline unsigned long
671 find_first_zero_bit(const unsigned long * addr, unsigned long size)
673 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
674 unsigned long res, cmp, count;
676 if (!size)
677 return 0;
678 __asm__(" lghi %1,-1\n"
679 " lgr %2,%3\n"
680 " slgr %0,%0\n"
681 " aghi %2,63\n"
682 " srlg %2,%2,6\n"
683 "0: cg %1,0(%0,%4)\n"
684 " jne 1f\n"
685 " la %0,8(%0)\n"
686 " brct %2,0b\n"
687 " lgr %0,%3\n"
688 " j 5f\n"
689 "1: lg %2,0(%0,%4)\n"
690 " sllg %0,%0,3\n"
691 " clr %2,%1\n"
692 " jne 2f\n"
693 " aghi %0,32\n"
694 " srlg %2,%2,32\n"
695 "2: lghi %1,0xff\n"
696 " tmll %2,0xffff\n"
697 " jno 3f\n"
698 " aghi %0,16\n"
699 " srl %2,16\n"
700 "3: tmll %2,0x00ff\n"
701 " jno 4f\n"
702 " aghi %0,8\n"
703 " srl %2,8\n"
704 "4: ngr %2,%1\n"
705 " ic %2,0(%2,%5)\n"
706 " algr %0,%2\n"
707 "5:"
708 : "=&a" (res), "=&d" (cmp), "=&a" (count)
709 : "a" (size), "a" (addr), "a" (&_zb_findmap),
710 "m" (*(addrtype *) addr) : "cc" );
711 return (res < size) ? res : size;
714 static inline unsigned long
715 find_first_bit(const unsigned long * addr, unsigned long size)
717 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
718 unsigned long res, cmp, count;
720 if (!size)
721 return 0;
722 __asm__(" slgr %1,%1\n"
723 " lgr %2,%3\n"
724 " slgr %0,%0\n"
725 " aghi %2,63\n"
726 " srlg %2,%2,6\n"
727 "0: cg %1,0(%0,%4)\n"
728 " jne 1f\n"
729 " aghi %0,8\n"
730 " brct %2,0b\n"
731 " lgr %0,%3\n"
732 " j 5f\n"
733 "1: lg %2,0(%0,%4)\n"
734 " sllg %0,%0,3\n"
735 " clr %2,%1\n"
736 " jne 2f\n"
737 " aghi %0,32\n"
738 " srlg %2,%2,32\n"
739 "2: lghi %1,0xff\n"
740 " tmll %2,0xffff\n"
741 " jnz 3f\n"
742 " aghi %0,16\n"
743 " srl %2,16\n"
744 "3: tmll %2,0x00ff\n"
745 " jnz 4f\n"
746 " aghi %0,8\n"
747 " srl %2,8\n"
748 "4: ngr %2,%1\n"
749 " ic %2,0(%2,%5)\n"
750 " algr %0,%2\n"
751 "5:"
752 : "=&a" (res), "=&d" (cmp), "=&a" (count)
753 : "a" (size), "a" (addr), "a" (&_sb_findmap),
754 "m" (*(addrtype *) addr) : "cc" );
755 return (res < size) ? res : size;
758 #endif /* __s390x__ */
760 static inline int
761 find_next_zero_bit (const unsigned long * addr, unsigned long size,
762 unsigned long offset)
764 const unsigned long *p;
765 unsigned long bit, set;
767 if (offset >= size)
768 return size;
769 bit = offset & (__BITOPS_WORDSIZE - 1);
770 offset -= bit;
771 size -= offset;
772 p = addr + offset / __BITOPS_WORDSIZE;
773 if (bit) {
775 * s390 version of ffz returns __BITOPS_WORDSIZE
776 * if no zero bit is present in the word.
778 set = ffz(*p >> bit) + bit;
779 if (set >= size)
780 return size + offset;
781 if (set < __BITOPS_WORDSIZE)
782 return set + offset;
783 offset += __BITOPS_WORDSIZE;
784 size -= __BITOPS_WORDSIZE;
785 p++;
787 return offset + find_first_zero_bit(p, size);
790 static inline int
791 find_next_bit (const unsigned long * addr, unsigned long size,
792 unsigned long offset)
794 const unsigned long *p;
795 unsigned long bit, set;
797 if (offset >= size)
798 return size;
799 bit = offset & (__BITOPS_WORDSIZE - 1);
800 offset -= bit;
801 size -= offset;
802 p = addr + offset / __BITOPS_WORDSIZE;
803 if (bit) {
805 * s390 version of __ffs returns __BITOPS_WORDSIZE
806 * if no one bit is present in the word.
808 set = __ffs(*p & (~0UL << bit));
809 if (set >= size)
810 return size + offset;
811 if (set < __BITOPS_WORDSIZE)
812 return set + offset;
813 offset += __BITOPS_WORDSIZE;
814 size -= __BITOPS_WORDSIZE;
815 p++;
817 return offset + find_first_bit(p, size);
821 * Every architecture must define this function. It's the fastest
822 * way of searching a 140-bit bitmap where the first 100 bits are
823 * unlikely to be set. It's guaranteed that at least one of the 140
824 * bits is cleared.
826 static inline int sched_find_first_bit(unsigned long *b)
828 return find_first_bit(b, 140);
832 * ffs: find first bit set. This is defined the same way as
833 * the libc and compiler builtin ffs routines, therefore
834 * differs in spirit from the above ffz (man ffs).
836 #define ffs(x) generic_ffs(x)
839 * fls: find last bit set.
841 #define fls(x) generic_fls(x)
844 * hweightN: returns the hamming weight (i.e. the number
845 * of bits set) of a N-bit word
847 #define hweight64(x) \
848 ({ \
849 unsigned long __x = (x); \
850 unsigned int __w; \
851 __w = generic_hweight32((unsigned int) __x); \
852 __w += generic_hweight32((unsigned int) (__x>>32)); \
853 __w; \
855 #define hweight32(x) generic_hweight32(x)
856 #define hweight16(x) generic_hweight16(x)
857 #define hweight8(x) generic_hweight8(x)
860 #ifdef __KERNEL__
863 * ATTENTION: intel byte ordering convention for ext2 and minix !!
864 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
865 * bit 32 is the LSB of (addr+4).
866 * That combined with the little endian byte order of Intel gives the
867 * following bit order in memory:
868 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
869 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
872 #define ext2_set_bit(nr, addr) \
873 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
874 #define ext2_set_bit_atomic(lock, nr, addr) \
875 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
876 #define ext2_clear_bit(nr, addr) \
877 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
878 #define ext2_clear_bit_atomic(lock, nr, addr) \
879 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
880 #define ext2_test_bit(nr, addr) \
881 test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
883 #ifndef __s390x__
885 static inline int
886 ext2_find_first_zero_bit(void *vaddr, unsigned int size)
888 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
889 unsigned long cmp, count;
890 unsigned int res;
892 if (!size)
893 return 0;
894 __asm__(" lhi %1,-1\n"
895 " lr %2,%3\n"
896 " ahi %2,31\n"
897 " srl %2,5\n"
898 " slr %0,%0\n"
899 "0: cl %1,0(%0,%4)\n"
900 " jne 1f\n"
901 " ahi %0,4\n"
902 " brct %2,0b\n"
903 " lr %0,%3\n"
904 " j 4f\n"
905 "1: l %2,0(%0,%4)\n"
906 " sll %0,3\n"
907 " ahi %0,24\n"
908 " lhi %1,0xff\n"
909 " tmh %2,0xffff\n"
910 " jo 2f\n"
911 " ahi %0,-16\n"
912 " srl %2,16\n"
913 "2: tml %2,0xff00\n"
914 " jo 3f\n"
915 " ahi %0,-8\n"
916 " srl %2,8\n"
917 "3: nr %2,%1\n"
918 " ic %2,0(%2,%5)\n"
919 " alr %0,%2\n"
920 "4:"
921 : "=&a" (res), "=&d" (cmp), "=&a" (count)
922 : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
923 "m" (*(addrtype *) vaddr) : "cc" );
924 return (res < size) ? res : size;
927 #else /* __s390x__ */
929 static inline unsigned long
930 ext2_find_first_zero_bit(void *vaddr, unsigned long size)
932 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
933 unsigned long res, cmp, count;
935 if (!size)
936 return 0;
937 __asm__(" lghi %1,-1\n"
938 " lgr %2,%3\n"
939 " aghi %2,63\n"
940 " srlg %2,%2,6\n"
941 " slgr %0,%0\n"
942 "0: clg %1,0(%0,%4)\n"
943 " jne 1f\n"
944 " aghi %0,8\n"
945 " brct %2,0b\n"
946 " lgr %0,%3\n"
947 " j 5f\n"
948 "1: cl %1,0(%0,%4)\n"
949 " jne 2f\n"
950 " aghi %0,4\n"
951 "2: l %2,0(%0,%4)\n"
952 " sllg %0,%0,3\n"
953 " aghi %0,24\n"
954 " lghi %1,0xff\n"
955 " tmlh %2,0xffff\n"
956 " jo 3f\n"
957 " aghi %0,-16\n"
958 " srl %2,16\n"
959 "3: tmll %2,0xff00\n"
960 " jo 4f\n"
961 " aghi %0,-8\n"
962 " srl %2,8\n"
963 "4: ngr %2,%1\n"
964 " ic %2,0(%2,%5)\n"
965 " algr %0,%2\n"
966 "5:"
967 : "=&a" (res), "=&d" (cmp), "=&a" (count)
968 : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
969 "m" (*(addrtype *) vaddr) : "cc" );
970 return (res < size) ? res : size;
973 #endif /* __s390x__ */
975 static inline int
976 ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
978 unsigned long *addr = vaddr, *p;
979 unsigned long word, bit, set;
981 if (offset >= size)
982 return size;
983 bit = offset & (__BITOPS_WORDSIZE - 1);
984 offset -= bit;
985 size -= offset;
986 p = addr + offset / __BITOPS_WORDSIZE;
987 if (bit) {
988 #ifndef __s390x__
989 asm(" ic %0,0(%1)\n"
990 " icm %0,2,1(%1)\n"
991 " icm %0,4,2(%1)\n"
992 " icm %0,8,3(%1)"
993 : "=&a" (word) : "a" (p), "m" (*p) : "cc" );
994 #else
995 asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) );
996 #endif
998 * s390 version of ffz returns __BITOPS_WORDSIZE
999 * if no zero bit is present in the word.
1001 set = ffz(word >> bit) + bit;
1002 if (set >= size)
1003 return size + offset;
1004 if (set < __BITOPS_WORDSIZE)
1005 return set + offset;
1006 offset += __BITOPS_WORDSIZE;
1007 size -= __BITOPS_WORDSIZE;
1008 p++;
1010 return offset + ext2_find_first_zero_bit(p, size);
1013 /* Bitmap functions for the minix filesystem. */
1014 /* FIXME !!! */
1015 #define minix_test_and_set_bit(nr,addr) \
1016 test_and_set_bit(nr,(unsigned long *)addr)
1017 #define minix_set_bit(nr,addr) \
1018 set_bit(nr,(unsigned long *)addr)
1019 #define minix_test_and_clear_bit(nr,addr) \
1020 test_and_clear_bit(nr,(unsigned long *)addr)
1021 #define minix_test_bit(nr,addr) \
1022 test_bit(nr,(unsigned long *)addr)
1023 #define minix_find_first_zero_bit(addr,size) \
1024 find_first_zero_bit(addr,size)
1026 #endif /* __KERNEL__ */
1028 #endif /* _S390_BITOPS_H */