diff options
| author | Alex Williamson <alex.williamson@redhat.com> | 2013-09-04 11:25:44 -0600 |
|---|---|---|
| committer | Alex Williamson <alex.williamson@redhat.com> | 2013-09-04 11:25:44 -0600 |
| commit | 3bc4f3993b93dbf1f6402e2034a2e20eb07db807 (patch) | |
| tree | 592283e59e121b76355836295d6016fe33cfc5d1 /include/linux/lockref.h | |
| parent | 17638db1b88184d8895f3f4551c936d7480a1d3f (diff) | |
| parent | cb3e4330e697dffaf3d9cefebc9c7e7d39c89f2e (diff) | |
| download | cachepc-linux-3bc4f3993b93dbf1f6402e2034a2e20eb07db807.tar.gz cachepc-linux-3bc4f3993b93dbf1f6402e2034a2e20eb07db807.zip | |
Merge remote branch 'origin/master' into next-merge
Diffstat (limited to 'include/linux/lockref.h')
| -rw-r--r-- | include/linux/lockref.h | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/include/linux/lockref.h b/include/linux/lockref.h new file mode 100644 index 000000000000..ca07b5028b01 --- /dev/null +++ b/include/linux/lockref.h @@ -0,0 +1,36 @@ +#ifndef __LINUX_LOCKREF_H +#define __LINUX_LOCKREF_H + +/* + * Locked reference counts. + * + * These are different from just plain atomic refcounts in that they + * are atomic with respect to the spinlock that goes with them. In + * particular, there can be implementations that don't actually get + * the spinlock for the common decrement/increment operations, but they + * still have to check that the operation is done semantically as if + * the spinlock had been taken (using a cmpxchg operation that covers + * both the lock and the count word, or using memory transactions, for + * example). + */ + +#include <linux/spinlock.h> + +struct lockref { + union { +#ifdef CONFIG_CMPXCHG_LOCKREF + aligned_u64 lock_count; +#endif + struct { + spinlock_t lock; + unsigned int count; + }; + }; +}; + +extern void lockref_get(struct lockref *); +extern int lockref_get_not_zero(struct lockref *); +extern int lockref_get_or_lock(struct lockref *); +extern int lockref_put_or_lock(struct lockref *); + +#endif /* __LINUX_LOCKREF_H */ |
