2 * Copyright (C) 2017 Imagination Technologies
3 * Author: Paul Burton <paul.burton@mips.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/bitops.h>
12 #include <asm/cmpxchg.h>
14 unsigned long __xchg_small(volatile void *ptr
, unsigned long val
, unsigned int size
)
16 u32 old32
, new32
, load32
, mask
;
20 /* Check that ptr is naturally aligned */
21 WARN_ON((unsigned long)ptr
& (size
- 1));
23 /* Mask value to the correct size. */
24 mask
= GENMASK((size
* BITS_PER_BYTE
) - 1, 0);
28 * Calculate a shift & mask that correspond to the value we wish to
29 * exchange within the naturally aligned 4 byte integerthat includes
32 shift
= (unsigned long)ptr
& 0x3;
33 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
34 shift
^= sizeof(u32
) - size
;
35 shift
*= BITS_PER_BYTE
;
39 * Calculate a pointer to the naturally aligned 4 byte integer that
40 * includes our byte of interest, and load its value.
42 ptr32
= (volatile u32
*)((unsigned long)ptr
& ~0x3);
47 new32
= (load32
& ~mask
) | (val
<< shift
);
48 load32
= cmpxchg(ptr32
, old32
, new32
);
49 } while (load32
!= old32
);
51 return (load32
& mask
) >> shift
;
54 unsigned long __cmpxchg_small(volatile void *ptr
, unsigned long old
,
55 unsigned long new, unsigned int size
)
57 u32 mask
, old32
, new32
, load32
;
62 /* Check that ptr is naturally aligned */
63 WARN_ON((unsigned long)ptr
& (size
- 1));
65 /* Mask inputs to the correct size. */
66 mask
= GENMASK((size
* BITS_PER_BYTE
) - 1, 0);
71 * Calculate a shift & mask that correspond to the value we wish to
72 * compare & exchange within the naturally aligned 4 byte integer
75 shift
= (unsigned long)ptr
& 0x3;
76 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
77 shift
^= sizeof(u32
) - size
;
78 shift
*= BITS_PER_BYTE
;
82 * Calculate a pointer to the naturally aligned 4 byte integer that
83 * includes our byte of interest, and load its value.
85 ptr32
= (volatile u32
*)((unsigned long)ptr
& ~0x3);
90 * Ensure the byte we want to exchange matches the expected
91 * old value, and if not then bail.
93 load
= (load32
& mask
) >> shift
;
98 * Calculate the old & new values of the naturally aligned
99 * 4 byte integer that include the byte we want to exchange.
100 * Attempt to exchange the old value for the new value, and
101 * return if we succeed.
103 old32
= (load32
& ~mask
) | (old
<< shift
);
104 new32
= (load32
& ~mask
) | (new << shift
);
105 load32
= cmpxchg(ptr32
, old32
, new32
);