barrier_64.h (1851B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __TOOLS_LINUX_SPARC64_BARRIER_H 3#define __TOOLS_LINUX_SPARC64_BARRIER_H 4 5/* Copied from the kernel sources to tools/: 6 * 7 * These are here in an effort to more fully work around Spitfire Errata 8 * #51. Essentially, if a memory barrier occurs soon after a mispredicted 9 * branch, the chip can stop executing instructions until a trap occurs. 10 * Therefore, if interrupts are disabled, the chip can hang forever. 11 * 12 * It used to be believed that the memory barrier had to be right in the 13 * delay slot, but a case has been traced recently wherein the memory barrier 14 * was one instruction after the branch delay slot and the chip still hung. 15 * The offending sequence was the following in sym_wakeup_done() of the 16 * sym53c8xx_2 driver: 17 * 18 * call sym_ccb_from_dsa, 0 19 * movge %icc, 0, %l0 20 * brz,pn %o0, .LL1303 21 * mov %o0, %l2 22 * membar #LoadLoad 23 * 24 * The branch has to be mispredicted for the bug to occur. Therefore, we put 25 * the memory barrier explicitly into a "branch always, predicted taken" 26 * delay slot to avoid the problem case. 27 */ 28#define membar_safe(type) \ 29do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ 30 " membar " type "\n" \ 31 "1:\n" \ 32 : : : "memory"); \ 33} while (0) 34 35/* The kernel always executes in TSO memory model these days, 36 * and furthermore most sparc64 chips implement more stringent 37 * memory ordering than required by the specifications. 38 */ 39#define mb() membar_safe("#StoreLoad") 40#define rmb() __asm__ __volatile__("":::"memory") 41#define wmb() __asm__ __volatile__("":::"memory") 42 43#define smp_store_release(p, v) \ 44do { \ 45 barrier(); \ 46 WRITE_ONCE(*p, v); \ 47} while (0) 48 49#define smp_load_acquire(p) \ 50({ \ 51 typeof(*p) ___p1 = READ_ONCE(*p); \ 52 barrier(); \ 53 ___p1; \ 54}) 55 56#endif /* !(__TOOLS_LINUX_SPARC64_BARRIER_H) */