1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2017 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
7 #include <linux/bitops.h>
8 #include <asm/cmpxchg.h>
10 unsigned long __xchg_small(volatile void *ptr
, unsigned long val
, unsigned int size
)
12 u32 old32
, new32
, load32
, mask
;
16 /* Check that ptr is naturally aligned */
17 WARN_ON((unsigned long)ptr
& (size
- 1));
19 /* Mask value to the correct size. */
20 mask
= GENMASK((size
* BITS_PER_BYTE
) - 1, 0);
24 * Calculate a shift & mask that correspond to the value we wish to
25 * exchange within the naturally aligned 4 byte integer that includes
28 shift
= (unsigned long)ptr
& 0x3;
29 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
30 shift
^= sizeof(u32
) - size
;
31 shift
*= BITS_PER_BYTE
;
35 * Calculate a pointer to the naturally aligned 4 byte integer that
36 * includes our byte of interest, and load its value.
38 ptr32
= (volatile u32
*)((unsigned long)ptr
& ~0x3);
43 new32
= (load32
& ~mask
) | (val
<< shift
);
44 load32
= arch_cmpxchg(ptr32
, old32
, new32
);
45 } while (load32
!= old32
);
47 return (load32
& mask
) >> shift
;
50 unsigned long __cmpxchg_small(volatile void *ptr
, unsigned long old
,
51 unsigned long new, unsigned int size
)
53 u32 mask
, old32
, new32
, load32
, load
;
57 /* Check that ptr is naturally aligned */
58 WARN_ON((unsigned long)ptr
& (size
- 1));
60 /* Mask inputs to the correct size. */
61 mask
= GENMASK((size
* BITS_PER_BYTE
) - 1, 0);
66 * Calculate a shift & mask that correspond to the value we wish to
67 * compare & exchange within the naturally aligned 4 byte integer
70 shift
= (unsigned long)ptr
& 0x3;
71 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
72 shift
^= sizeof(u32
) - size
;
73 shift
*= BITS_PER_BYTE
;
77 * Calculate a pointer to the naturally aligned 4 byte integer that
78 * includes our byte of interest, and load its value.
80 ptr32
= (volatile u32
*)((unsigned long)ptr
& ~0x3);
85 * Ensure the byte we want to exchange matches the expected
86 * old value, and if not then bail.
88 load
= (load32
& mask
) >> shift
;
93 * Calculate the old & new values of the naturally aligned
94 * 4 byte integer that include the byte we want to exchange.
95 * Attempt to exchange the old value for the new value, and
96 * return if we succeed.
98 old32
= (load32
& ~mask
) | (old
<< shift
);
99 new32
= (load32
& ~mask
) | (new << shift
);
100 load32
= arch_cmpxchg(ptr32
, old32
, new32
);
105 EXPORT_SYMBOL(__cmpxchg_small
);