cmpxchg.h (8050B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Based on arch/arm/include/asm/cmpxchg.h 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 */ 7#ifndef __ASM_CMPXCHG_H 8#define __ASM_CMPXCHG_H 9 10#include <linux/build_bug.h> 11#include <linux/compiler.h> 12 13#include <asm/barrier.h> 14#include <asm/lse.h> 15 16/* 17 * We need separate acquire parameters for ll/sc and lse, since the full 18 * barrier case is generated as release+dmb for the former and 19 * acquire+release for the latter. 20 */ 21#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \ 22static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \ 23{ \ 24 u##sz ret; \ 25 unsigned long tmp; \ 26 \ 27 asm volatile(ARM64_LSE_ATOMIC_INSN( \ 28 /* LL/SC */ \ 29 " prfm pstl1strm, %2\n" \ 30 "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \ 31 " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \ 32 " cbnz %w1, 1b\n" \ 33 " " #mb, \ 34 /* LSE atomics */ \ 35 " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \ 36 __nops(3) \ 37 " " #nop_lse) \ 38 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \ 39 : "r" (x) \ 40 : cl); \ 41 \ 42 return ret; \ 43} 44 45__XCHG_CASE(w, b, , 8, , , , , , ) 46__XCHG_CASE(w, h, , 16, , , , , , ) 47__XCHG_CASE(w, , , 32, , , , , , ) 48__XCHG_CASE( , , , 64, , , , , , ) 49__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory") 50__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory") 51__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory") 52__XCHG_CASE( , , acq_, 64, , , a, a, , "memory") 53__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory") 54__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory") 55__XCHG_CASE(w, , rel_, 32, , , , , l, "memory") 56__XCHG_CASE( , , rel_, 64, , , , , l, "memory") 57__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory") 58__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory") 59__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory") 60__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") 61 62#undef __XCHG_CASE 63 64#define __XCHG_GEN(sfx) \ 65static __always_inline unsigned long __xchg##sfx(unsigned long x, \ 66 volatile void *ptr, \ 67 int size) \ 68{ \ 69 switch (size) { \ 70 case 1: \ 71 return __xchg_case##sfx##_8(x, ptr); \ 72 case 2: \ 73 return __xchg_case##sfx##_16(x, ptr); \ 74 case 4: \ 75 return __xchg_case##sfx##_32(x, ptr); \ 76 case 8: \ 77 return __xchg_case##sfx##_64(x, ptr); \ 78 default: \ 79 BUILD_BUG(); \ 80 } \ 81 \ 82 unreachable(); \ 83} 84 85__XCHG_GEN() 86__XCHG_GEN(_acq) 87__XCHG_GEN(_rel) 88__XCHG_GEN(_mb) 89 90#undef __XCHG_GEN 91 92#define __xchg_wrapper(sfx, ptr, x) \ 93({ \ 94 __typeof__(*(ptr)) __ret; \ 95 __ret = (__typeof__(*(ptr))) \ 96 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \ 97 __ret; \ 98}) 99 100/* xchg */ 101#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) 102#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) 103#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) 104#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) 105 106#define __CMPXCHG_CASE(name, sz) \ 107static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ 108 u##sz old, \ 109 u##sz new) \ 110{ \ 111 return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \ 112 ptr, old, new); \ 113} 114 115__CMPXCHG_CASE( , 8) 116__CMPXCHG_CASE( , 16) 117__CMPXCHG_CASE( , 32) 118__CMPXCHG_CASE( , 64) 119__CMPXCHG_CASE(acq_, 8) 120__CMPXCHG_CASE(acq_, 16) 121__CMPXCHG_CASE(acq_, 32) 122__CMPXCHG_CASE(acq_, 64) 123__CMPXCHG_CASE(rel_, 8) 124__CMPXCHG_CASE(rel_, 16) 125__CMPXCHG_CASE(rel_, 32) 126__CMPXCHG_CASE(rel_, 64) 127__CMPXCHG_CASE(mb_, 8) 128__CMPXCHG_CASE(mb_, 16) 129__CMPXCHG_CASE(mb_, 32) 130__CMPXCHG_CASE(mb_, 64) 131 132#undef __CMPXCHG_CASE 133 134#define __CMPXCHG_DBL(name) \ 135static inline long __cmpxchg_double##name(unsigned long old1, \ 136 unsigned long old2, \ 137 unsigned long new1, \ 138 unsigned long new2, \ 139 volatile void *ptr) \ 140{ \ 141 return __lse_ll_sc_body(_cmpxchg_double##name, \ 142 old1, old2, new1, new2, ptr); \ 143} 144 145__CMPXCHG_DBL( ) 146__CMPXCHG_DBL(_mb) 147 148#undef __CMPXCHG_DBL 149 150#define __CMPXCHG_GEN(sfx) \ 151static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ 152 unsigned long old, \ 153 unsigned long new, \ 154 int size) \ 155{ \ 156 switch (size) { \ 157 case 1: \ 158 return __cmpxchg_case##sfx##_8(ptr, old, new); \ 159 case 2: \ 160 return __cmpxchg_case##sfx##_16(ptr, old, new); \ 161 case 4: \ 162 return __cmpxchg_case##sfx##_32(ptr, old, new); \ 163 case 8: \ 164 return __cmpxchg_case##sfx##_64(ptr, old, new); \ 165 default: \ 166 BUILD_BUG(); \ 167 } \ 168 \ 169 unreachable(); \ 170} 171 172__CMPXCHG_GEN() 173__CMPXCHG_GEN(_acq) 174__CMPXCHG_GEN(_rel) 175__CMPXCHG_GEN(_mb) 176 177#undef __CMPXCHG_GEN 178 179#define __cmpxchg_wrapper(sfx, ptr, o, n) \ 180({ \ 181 __typeof__(*(ptr)) __ret; \ 182 __ret = (__typeof__(*(ptr))) \ 183 __cmpxchg##sfx((ptr), (unsigned long)(o), \ 184 (unsigned long)(n), sizeof(*(ptr))); \ 185 __ret; \ 186}) 187 188/* cmpxchg */ 189#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) 190#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) 191#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) 192#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) 193#define arch_cmpxchg_local arch_cmpxchg_relaxed 194 195/* cmpxchg64 */ 196#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed 197#define arch_cmpxchg64_acquire arch_cmpxchg_acquire 198#define arch_cmpxchg64_release arch_cmpxchg_release 199#define arch_cmpxchg64 arch_cmpxchg 200#define arch_cmpxchg64_local arch_cmpxchg_local 201 202/* cmpxchg_double */ 203#define system_has_cmpxchg_double() 1 204 205#define __cmpxchg_double_check(ptr1, ptr2) \ 206({ \ 207 if (sizeof(*(ptr1)) != 8) \ 208 BUILD_BUG(); \ 209 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ 210}) 211 212#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ 213({ \ 214 int __ret; \ 215 __cmpxchg_double_check(ptr1, ptr2); \ 216 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ 217 (unsigned long)(n1), (unsigned long)(n2), \ 218 ptr1); \ 219 __ret; \ 220}) 221 222#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ 223({ \ 224 int __ret; \ 225 __cmpxchg_double_check(ptr1, ptr2); \ 226 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ 227 (unsigned long)(n1), (unsigned long)(n2), \ 228 ptr1); \ 229 __ret; \ 230}) 231 232#define __CMPWAIT_CASE(w, sfx, sz) \ 233static inline void __cmpwait_case_##sz(volatile void *ptr, \ 234 unsigned long val) \ 235{ \ 236 unsigned long tmp; \ 237 \ 238 asm volatile( \ 239 " sevl\n" \ 240 " wfe\n" \ 241 " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \ 242 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ 243 " cbnz %" #w "[tmp], 1f\n" \ 244 " wfe\n" \ 245 "1:" \ 246 : [tmp] "=&r" (tmp), [v] "+Q" (*(u##sz *)ptr) \ 247 : [val] "r" (val)); \ 248} 249 250__CMPWAIT_CASE(w, b, 8); 251__CMPWAIT_CASE(w, h, 16); 252__CMPWAIT_CASE(w, , 32); 253__CMPWAIT_CASE( , , 64); 254 255#undef __CMPWAIT_CASE 256 257#define __CMPWAIT_GEN(sfx) \ 258static __always_inline void __cmpwait##sfx(volatile void *ptr, \ 259 unsigned long val, \ 260 int size) \ 261{ \ 262 switch (size) { \ 263 case 1: \ 264 return __cmpwait_case##sfx##_8(ptr, (u8)val); \ 265 case 2: \ 266 return __cmpwait_case##sfx##_16(ptr, (u16)val); \ 267 case 4: \ 268 return __cmpwait_case##sfx##_32(ptr, val); \ 269 case 8: \ 270 return __cmpwait_case##sfx##_64(ptr, val); \ 271 default: \ 272 BUILD_BUG(); \ 273 } \ 274 \ 275 unreachable(); \ 276} 277 278__CMPWAIT_GEN() 279 280#undef __CMPWAIT_GEN 281 282#define __cmpwait_relaxed(ptr, val) \ 283 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) 284 285#endif /* __ASM_CMPXCHG_H */