sync_bitops.h (3336B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_X86_SYNC_BITOPS_H 3#define _ASM_X86_SYNC_BITOPS_H 4 5/* 6 * Copyright 1992, Linus Torvalds. 7 */ 8 9/* 10 * These have to be done with inline assembly: that way the bit-setting 11 * is guaranteed to be atomic. All bit operations return 0 if the bit 12 * was cleared before the operation and != 0 if it was not. 13 * 14 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 15 */ 16 17#include <asm/rmwcc.h> 18 19#define ADDR (*(volatile long *)addr) 20 21/** 22 * sync_set_bit - Atomically set a bit in memory 23 * @nr: the bit to set 24 * @addr: the address to start counting from 25 * 26 * This function is atomic and may not be reordered. See __set_bit() 27 * if you do not require the atomic guarantees. 28 * 29 * Note that @nr may be almost arbitrarily large; this function is not 30 * restricted to acting on a single-word quantity. 31 */ 32static inline void sync_set_bit(long nr, volatile unsigned long *addr) 33{ 34 asm volatile("lock; " __ASM_SIZE(bts) " %1,%0" 35 : "+m" (ADDR) 36 : "Ir" (nr) 37 : "memory"); 38} 39 40/** 41 * sync_clear_bit - Clears a bit in memory 42 * @nr: Bit to clear 43 * @addr: Address to start counting from 44 * 45 * sync_clear_bit() is atomic and may not be reordered. However, it does 46 * not contain a memory barrier, so if it is used for locking purposes, 47 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() 48 * in order to ensure changes are visible on other processors. 49 */ 50static inline void sync_clear_bit(long nr, volatile unsigned long *addr) 51{ 52 asm volatile("lock; " __ASM_SIZE(btr) " %1,%0" 53 : "+m" (ADDR) 54 : "Ir" (nr) 55 : "memory"); 56} 57 58/** 59 * sync_change_bit - Toggle a bit in memory 60 * @nr: Bit to change 61 * @addr: Address to start counting from 62 * 63 * sync_change_bit() is atomic and may not be reordered. 64 * Note that @nr may be almost arbitrarily large; this function is not 65 * restricted to acting on a single-word quantity. 66 */ 67static inline void sync_change_bit(long nr, volatile unsigned long *addr) 68{ 69 asm volatile("lock; " __ASM_SIZE(btc) " %1,%0" 70 : "+m" (ADDR) 71 : "Ir" (nr) 72 : "memory"); 73} 74 75/** 76 * sync_test_and_set_bit - Set a bit and return its old value 77 * @nr: Bit to set 78 * @addr: Address to count from 79 * 80 * This operation is atomic and cannot be reordered. 81 * It also implies a memory barrier. 82 */ 83static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr) 84{ 85 return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr); 86} 87 88/** 89 * sync_test_and_clear_bit - Clear a bit and return its old value 90 * @nr: Bit to clear 91 * @addr: Address to count from 92 * 93 * This operation is atomic and cannot be reordered. 94 * It also implies a memory barrier. 95 */ 96static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr) 97{ 98 return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr); 99} 100 101/** 102 * sync_test_and_change_bit - Change a bit and return its old value 103 * @nr: Bit to change 104 * @addr: Address to count from 105 * 106 * This operation is atomic and cannot be reordered. 107 * It also implies a memory barrier. 108 */ 109static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr) 110{ 111 return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr); 112} 113 114#define sync_test_bit(nr, addr) test_bit(nr, addr) 115 116#undef ADDR 117 118#endif /* _ASM_X86_SYNC_BITOPS_H */