Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / mips / kernel / cmpxchg.c
blob89107deb03fcb9a364493f8acc5e874244ae1386
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2017 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
7 #include <linux/bitops.h>
8 #include <asm/cmpxchg.h>
10 unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
12 u32 old32, new32, load32, mask;
13 volatile u32 *ptr32;
14 unsigned int shift;
16 /* Check that ptr is naturally aligned */
17 WARN_ON((unsigned long)ptr & (size - 1));
19 /* Mask value to the correct size. */
20 mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
21 val &= mask;
24 * Calculate a shift & mask that correspond to the value we wish to
25 * exchange within the naturally aligned 4 byte integerthat includes
26 * it.
28 shift = (unsigned long)ptr & 0x3;
29 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
30 shift ^= sizeof(u32) - size;
31 shift *= BITS_PER_BYTE;
32 mask <<= shift;
35 * Calculate a pointer to the naturally aligned 4 byte integer that
36 * includes our byte of interest, and load its value.
38 ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
39 load32 = *ptr32;
41 do {
42 old32 = load32;
43 new32 = (load32 & ~mask) | (val << shift);
44 load32 = cmpxchg(ptr32, old32, new32);
45 } while (load32 != old32);
47 return (load32 & mask) >> shift;
50 unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
51 unsigned long new, unsigned int size)
53 u32 mask, old32, new32, load32, load;
54 volatile u32 *ptr32;
55 unsigned int shift;
57 /* Check that ptr is naturally aligned */
58 WARN_ON((unsigned long)ptr & (size - 1));
60 /* Mask inputs to the correct size. */
61 mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
62 old &= mask;
63 new &= mask;
66 * Calculate a shift & mask that correspond to the value we wish to
67 * compare & exchange within the naturally aligned 4 byte integer
68 * that includes it.
70 shift = (unsigned long)ptr & 0x3;
71 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
72 shift ^= sizeof(u32) - size;
73 shift *= BITS_PER_BYTE;
74 mask <<= shift;
77 * Calculate a pointer to the naturally aligned 4 byte integer that
78 * includes our byte of interest, and load its value.
80 ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
81 load32 = *ptr32;
83 while (true) {
85 * Ensure the byte we want to exchange matches the expected
86 * old value, and if not then bail.
88 load = (load32 & mask) >> shift;
89 if (load != old)
90 return load;
93 * Calculate the old & new values of the naturally aligned
94 * 4 byte integer that include the byte we want to exchange.
95 * Attempt to exchange the old value for the new value, and
96 * return if we succeed.
98 old32 = (load32 & ~mask) | (old << shift);
99 new32 = (load32 & ~mask) | (new << shift);
100 load32 = cmpxchg(ptr32, old32, new32);
101 if (load32 == old32)
102 return old;