drm/rockchip: Don't change hdmi reference clock rate
[drm/drm-misc.git] / arch / loongarch / include / asm / cmpxchg.h
blob979fde61bba8a42cb4f019f13ded2a3119d4aaf4
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5 #ifndef __ASM_CMPXCHG_H
6 #define __ASM_CMPXCHG_H
8 #include <linux/bits.h>
9 #include <linux/build_bug.h>
10 #include <asm/barrier.h>
12 #define __xchg_asm(amswap_db, m, val) \
13 ({ \
14 __typeof(val) __ret; \
16 __asm__ __volatile__ ( \
17 " "amswap_db" %1, %z2, %0 \n" \
18 : "+ZB" (*m), "=&r" (__ret) \
19 : "Jr" (val) \
20 : "memory"); \
22 __ret; \
25 static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
26 unsigned int size)
28 unsigned int shift;
29 u32 old32, mask, temp;
30 volatile u32 *ptr32;
32 /* Mask value to the correct size. */
33 mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
34 val &= mask;
37 * Calculate a shift & mask that correspond to the value we wish to
38 * exchange within the naturally aligned 4 byte integerthat includes
39 * it.
41 shift = (unsigned long)ptr & 0x3;
42 shift *= BITS_PER_BYTE;
43 mask <<= shift;
46 * Calculate a pointer to the naturally aligned 4 byte integer that
47 * includes our byte of interest, and load its value.
49 ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
51 asm volatile (
52 "1: ll.w %0, %3 \n"
53 " andn %1, %0, %z4 \n"
54 " or %1, %1, %z5 \n"
55 " sc.w %1, %2 \n"
56 " beqz %1, 1b \n"
57 : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
58 : "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
59 : "memory");
61 return (old32 & mask) >> shift;
64 static __always_inline unsigned long
65 __arch_xchg(volatile void *ptr, unsigned long x, int size)
67 switch (size) {
68 case 1:
69 case 2:
70 return __xchg_small(ptr, x, size);
72 case 4:
73 return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
75 case 8:
76 return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
78 default:
79 BUILD_BUG();
82 return 0;
85 #define arch_xchg(ptr, x) \
86 ({ \
87 __typeof__(*(ptr)) __res; \
89 __res = (__typeof__(*(ptr))) \
90 __arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
92 __res; \
95 #define __cmpxchg_asm(ld, st, m, old, new) \
96 ({ \
97 __typeof(old) __ret; \
99 __asm__ __volatile__( \
100 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
101 " bne %0, %z3, 2f \n" \
102 " move $t0, %z4 \n" \
103 " " st " $t0, %1 \n" \
104 " beqz $t0, 1b \n" \
105 "2: \n" \
106 __WEAK_LLSC_MB \
107 : "=&r" (__ret), "=ZB"(*m) \
108 : "ZB"(*m), "Jr" (old), "Jr" (new) \
109 : "t0", "memory"); \
111 __ret; \
114 static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
115 unsigned int new, unsigned int size)
117 unsigned int shift;
118 u32 old32, mask, temp;
119 volatile u32 *ptr32;
121 /* Mask inputs to the correct size. */
122 mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
123 old &= mask;
124 new &= mask;
127 * Calculate a shift & mask that correspond to the value we wish to
128 * compare & exchange within the naturally aligned 4 byte integer
129 * that includes it.
131 shift = (unsigned long)ptr & 0x3;
132 shift *= BITS_PER_BYTE;
133 old <<= shift;
134 new <<= shift;
135 mask <<= shift;
138 * Calculate a pointer to the naturally aligned 4 byte integer that
139 * includes our byte of interest, and load its value.
141 ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
143 asm volatile (
144 "1: ll.w %0, %3 \n"
145 " and %1, %0, %z4 \n"
146 " bne %1, %z5, 2f \n"
147 " andn %1, %0, %z4 \n"
148 " or %1, %1, %z6 \n"
149 " sc.w %1, %2 \n"
150 " beqz %1, 1b \n"
151 " b 3f \n"
152 "2: \n"
153 __WEAK_LLSC_MB
154 "3: \n"
155 : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
156 : "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
157 : "memory");
159 return (old32 & mask) >> shift;
162 static __always_inline unsigned long
163 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size)
165 switch (size) {
166 case 1:
167 case 2:
168 return __cmpxchg_small(ptr, old, new, size);
170 case 4:
171 return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
172 (u32)old, new);
174 case 8:
175 return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr,
176 (u64)old, new);
178 default:
179 BUILD_BUG();
182 return 0;
185 #define arch_cmpxchg_local(ptr, old, new) \
186 ((__typeof__(*(ptr))) \
187 __cmpxchg((ptr), \
188 (unsigned long)(__typeof__(*(ptr)))(old), \
189 (unsigned long)(__typeof__(*(ptr)))(new), \
190 sizeof(*(ptr))))
192 #define arch_cmpxchg(ptr, old, new) \
193 ({ \
194 __typeof__(*(ptr)) __res; \
196 __res = arch_cmpxchg_local((ptr), (old), (new)); \
198 __res; \
201 #ifdef CONFIG_64BIT
202 #define arch_cmpxchg64_local(ptr, o, n) \
203 ({ \
204 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
205 arch_cmpxchg_local((ptr), (o), (n)); \
208 #define arch_cmpxchg64(ptr, o, n) \
209 ({ \
210 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
211 arch_cmpxchg((ptr), (o), (n)); \
213 #else
214 #include <asm-generic/cmpxchg-local.h>
215 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
216 #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
217 #endif
219 #endif /* __ASM_CMPXCHG_H */