cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qspinlock_paravirt.h (1941B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __ASM_QSPINLOCK_PARAVIRT_H
      3#define __ASM_QSPINLOCK_PARAVIRT_H
      4
      5#include <asm/ibt.h>
      6
      7/*
      8 * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
      9 * registers. For i386, however, only 1 32-bit register needs to be saved
     10 * and restored. So an optimized version of __pv_queued_spin_unlock() is
     11 * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
     12 */
     13#ifdef CONFIG_64BIT
     14
     15PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
     16#define __pv_queued_spin_unlock	__pv_queued_spin_unlock
     17#define PV_UNLOCK		"__raw_callee_save___pv_queued_spin_unlock"
     18#define PV_UNLOCK_SLOWPATH	"__raw_callee_save___pv_queued_spin_unlock_slowpath"
     19
     20/*
     21 * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
     22 * which combines the registers saving trunk and the body of the following
     23 * C code:
     24 *
     25 * void __pv_queued_spin_unlock(struct qspinlock *lock)
     26 * {
     27 *	u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
     28 *
     29 *	if (likely(lockval == _Q_LOCKED_VAL))
     30 *		return;
     31 *	pv_queued_spin_unlock_slowpath(lock, lockval);
     32 * }
     33 *
     34 * For x86-64,
     35 *   rdi = lock              (first argument)
     36 *   rsi = lockval           (second argument)
     37 *   rdx = internal variable (set to 0)
     38 */
     39asm    (".pushsection .text;"
     40	".globl " PV_UNLOCK ";"
     41	".type " PV_UNLOCK ", @function;"
     42	".align 4,0x90;"
     43	PV_UNLOCK ": "
     44	ASM_ENDBR
     45	FRAME_BEGIN
     46	"push  %rdx;"
     47	"mov   $0x1,%eax;"
     48	"xor   %edx,%edx;"
     49	LOCK_PREFIX "cmpxchg %dl,(%rdi);"
     50	"cmp   $0x1,%al;"
     51	"jne   .slowpath;"
     52	"pop   %rdx;"
     53	FRAME_END
     54	ASM_RET
     55	".slowpath: "
     56	"push   %rsi;"
     57	"movzbl %al,%esi;"
     58	"call " PV_UNLOCK_SLOWPATH ";"
     59	"pop    %rsi;"
     60	"pop    %rdx;"
     61	FRAME_END
     62	ASM_RET
     63	".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
     64	".popsection");
     65
     66#else /* CONFIG_64BIT */
     67
     68extern void __pv_queued_spin_unlock(struct qspinlock *lock);
     69PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock);
     70
     71#endif /* CONFIG_64BIT */
     72#endif