cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

paravirt.h (18999B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_PARAVIRT_H
      3#define _ASM_X86_PARAVIRT_H
      4/* Various instructions on x86 need to be replaced for
      5 * para-virtualization: those hooks are defined here. */
      6
      7#ifdef CONFIG_PARAVIRT
      8#include <asm/pgtable_types.h>
      9#include <asm/asm.h>
     10#include <asm/nospec-branch.h>
     11
     12#include <asm/paravirt_types.h>
     13
     14#ifndef __ASSEMBLY__
     15#include <linux/bug.h>
     16#include <linux/types.h>
     17#include <linux/cpumask.h>
     18#include <linux/static_call_types.h>
     19#include <asm/frame.h>
     20
     21u64 dummy_steal_clock(int cpu);
     22u64 dummy_sched_clock(void);
     23
     24DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
     25DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
     26
     27void paravirt_set_sched_clock(u64 (*func)(void));
     28
     29static inline u64 paravirt_sched_clock(void)
     30{
     31	return static_call(pv_sched_clock)();
     32}
     33
     34struct static_key;
     35extern struct static_key paravirt_steal_enabled;
     36extern struct static_key paravirt_steal_rq_enabled;
     37
     38__visible void __native_queued_spin_unlock(struct qspinlock *lock);
     39bool pv_is_native_spin_unlock(void);
     40__visible bool __native_vcpu_is_preempted(long cpu);
     41bool pv_is_native_vcpu_is_preempted(void);
     42
     43static inline u64 paravirt_steal_clock(int cpu)
     44{
     45	return static_call(pv_steal_clock)(cpu);
     46}
     47
     48#ifdef CONFIG_PARAVIRT_SPINLOCKS
     49void __init paravirt_set_cap(void);
     50#endif
     51
     52/* The paravirtualized I/O functions */
     53static inline void slow_down_io(void)
     54{
     55	PVOP_VCALL0(cpu.io_delay);
     56#ifdef REALLY_SLOW_IO
     57	PVOP_VCALL0(cpu.io_delay);
     58	PVOP_VCALL0(cpu.io_delay);
     59	PVOP_VCALL0(cpu.io_delay);
     60#endif
     61}
     62
     63void native_flush_tlb_local(void);
     64void native_flush_tlb_global(void);
     65void native_flush_tlb_one_user(unsigned long addr);
     66void native_flush_tlb_multi(const struct cpumask *cpumask,
     67			     const struct flush_tlb_info *info);
     68
     69static inline void __flush_tlb_local(void)
     70{
     71	PVOP_VCALL0(mmu.flush_tlb_user);
     72}
     73
     74static inline void __flush_tlb_global(void)
     75{
     76	PVOP_VCALL0(mmu.flush_tlb_kernel);
     77}
     78
     79static inline void __flush_tlb_one_user(unsigned long addr)
     80{
     81	PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
     82}
     83
     84static inline void __flush_tlb_multi(const struct cpumask *cpumask,
     85				      const struct flush_tlb_info *info)
     86{
     87	PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info);
     88}
     89
     90static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
     91{
     92	PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
     93}
     94
     95static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
     96{
     97	PVOP_VCALL1(mmu.exit_mmap, mm);
     98}
     99
    100static inline void notify_page_enc_status_changed(unsigned long pfn,
    101						  int npages, bool enc)
    102{
    103	PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
    104}
    105
    106#ifdef CONFIG_PARAVIRT_XXL
    107static inline void load_sp0(unsigned long sp0)
    108{
    109	PVOP_VCALL1(cpu.load_sp0, sp0);
    110}
    111
    112/* The paravirtualized CPUID instruction. */
    113static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
    114			   unsigned int *ecx, unsigned int *edx)
    115{
    116	PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
    117}
    118
    119/*
    120 * These special macros can be used to get or set a debugging register
    121 */
    122static __always_inline unsigned long paravirt_get_debugreg(int reg)
    123{
    124	return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
    125}
    126#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
    127static __always_inline void set_debugreg(unsigned long val, int reg)
    128{
    129	PVOP_VCALL2(cpu.set_debugreg, reg, val);
    130}
    131
    132static inline unsigned long read_cr0(void)
    133{
    134	return PVOP_CALL0(unsigned long, cpu.read_cr0);
    135}
    136
    137static inline void write_cr0(unsigned long x)
    138{
    139	PVOP_VCALL1(cpu.write_cr0, x);
    140}
    141
    142static __always_inline unsigned long read_cr2(void)
    143{
    144	return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
    145				"mov %%cr2, %%rax;",
    146				ALT_NOT(X86_FEATURE_XENPV));
    147}
    148
    149static __always_inline void write_cr2(unsigned long x)
    150{
    151	PVOP_VCALL1(mmu.write_cr2, x);
    152}
    153
    154static inline unsigned long __read_cr3(void)
    155{
    156	return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
    157			      "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
    158}
    159
    160static inline void write_cr3(unsigned long x)
    161{
    162	PVOP_ALT_VCALL1(mmu.write_cr3, x,
    163			"mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
    164}
    165
    166static inline void __write_cr4(unsigned long x)
    167{
    168	PVOP_VCALL1(cpu.write_cr4, x);
    169}
    170
    171static inline void arch_safe_halt(void)
    172{
    173	PVOP_VCALL0(irq.safe_halt);
    174}
    175
    176static inline void halt(void)
    177{
    178	PVOP_VCALL0(irq.halt);
    179}
    180
    181static inline void wbinvd(void)
    182{
    183	PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
    184}
    185
    186static inline u64 paravirt_read_msr(unsigned msr)
    187{
    188	return PVOP_CALL1(u64, cpu.read_msr, msr);
    189}
    190
    191static inline void paravirt_write_msr(unsigned msr,
    192				      unsigned low, unsigned high)
    193{
    194	PVOP_VCALL3(cpu.write_msr, msr, low, high);
    195}
    196
    197static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
    198{
    199	return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
    200}
    201
    202static inline int paravirt_write_msr_safe(unsigned msr,
    203					  unsigned low, unsigned high)
    204{
    205	return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
    206}
    207
    208#define rdmsr(msr, val1, val2)			\
    209do {						\
    210	u64 _l = paravirt_read_msr(msr);	\
    211	val1 = (u32)_l;				\
    212	val2 = _l >> 32;			\
    213} while (0)
    214
    215#define wrmsr(msr, val1, val2)			\
    216do {						\
    217	paravirt_write_msr(msr, val1, val2);	\
    218} while (0)
    219
    220#define rdmsrl(msr, val)			\
    221do {						\
    222	val = paravirt_read_msr(msr);		\
    223} while (0)
    224
    225static inline void wrmsrl(unsigned msr, u64 val)
    226{
    227	wrmsr(msr, (u32)val, (u32)(val>>32));
    228}
    229
    230#define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
    231
    232/* rdmsr with exception handling */
    233#define rdmsr_safe(msr, a, b)				\
    234({							\
    235	int _err;					\
    236	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
    237	(*a) = (u32)_l;					\
    238	(*b) = _l >> 32;				\
    239	_err;						\
    240})
    241
    242static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
    243{
    244	int err;
    245
    246	*p = paravirt_read_msr_safe(msr, &err);
    247	return err;
    248}
    249
    250static inline unsigned long long paravirt_read_pmc(int counter)
    251{
    252	return PVOP_CALL1(u64, cpu.read_pmc, counter);
    253}
    254
    255#define rdpmc(counter, low, high)		\
    256do {						\
    257	u64 _l = paravirt_read_pmc(counter);	\
    258	low = (u32)_l;				\
    259	high = _l >> 32;			\
    260} while (0)
    261
    262#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
    263
    264static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
    265{
    266	PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
    267}
    268
    269static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
    270{
    271	PVOP_VCALL2(cpu.free_ldt, ldt, entries);
    272}
    273
    274static inline void load_TR_desc(void)
    275{
    276	PVOP_VCALL0(cpu.load_tr_desc);
    277}
    278static inline void load_gdt(const struct desc_ptr *dtr)
    279{
    280	PVOP_VCALL1(cpu.load_gdt, dtr);
    281}
    282static inline void load_idt(const struct desc_ptr *dtr)
    283{
    284	PVOP_VCALL1(cpu.load_idt, dtr);
    285}
    286static inline void set_ldt(const void *addr, unsigned entries)
    287{
    288	PVOP_VCALL2(cpu.set_ldt, addr, entries);
    289}
    290static inline unsigned long paravirt_store_tr(void)
    291{
    292	return PVOP_CALL0(unsigned long, cpu.store_tr);
    293}
    294
    295#define store_tr(tr)	((tr) = paravirt_store_tr())
    296static inline void load_TLS(struct thread_struct *t, unsigned cpu)
    297{
    298	PVOP_VCALL2(cpu.load_tls, t, cpu);
    299}
    300
    301static inline void load_gs_index(unsigned int gs)
    302{
    303	PVOP_VCALL1(cpu.load_gs_index, gs);
    304}
    305
    306static inline void write_ldt_entry(struct desc_struct *dt, int entry,
    307				   const void *desc)
    308{
    309	PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
    310}
    311
    312static inline void write_gdt_entry(struct desc_struct *dt, int entry,
    313				   void *desc, int type)
    314{
    315	PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
    316}
    317
    318static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
    319{
    320	PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
    321}
    322
    323#ifdef CONFIG_X86_IOPL_IOPERM
    324static inline void tss_invalidate_io_bitmap(void)
    325{
    326	PVOP_VCALL0(cpu.invalidate_io_bitmap);
    327}
    328
    329static inline void tss_update_io_bitmap(void)
    330{
    331	PVOP_VCALL0(cpu.update_io_bitmap);
    332}
    333#endif
    334
    335static inline void paravirt_activate_mm(struct mm_struct *prev,
    336					struct mm_struct *next)
    337{
    338	PVOP_VCALL2(mmu.activate_mm, prev, next);
    339}
    340
    341static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
    342					  struct mm_struct *mm)
    343{
    344	PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
    345}
    346
    347static inline int paravirt_pgd_alloc(struct mm_struct *mm)
    348{
    349	return PVOP_CALL1(int, mmu.pgd_alloc, mm);
    350}
    351
    352static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
    353{
    354	PVOP_VCALL2(mmu.pgd_free, mm, pgd);
    355}
    356
    357static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
    358{
    359	PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
    360}
    361static inline void paravirt_release_pte(unsigned long pfn)
    362{
    363	PVOP_VCALL1(mmu.release_pte, pfn);
    364}
    365
    366static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
    367{
    368	PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
    369}
    370
    371static inline void paravirt_release_pmd(unsigned long pfn)
    372{
    373	PVOP_VCALL1(mmu.release_pmd, pfn);
    374}
    375
    376static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
    377{
    378	PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
    379}
    380static inline void paravirt_release_pud(unsigned long pfn)
    381{
    382	PVOP_VCALL1(mmu.release_pud, pfn);
    383}
    384
    385static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
    386{
    387	PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
    388}
    389
    390static inline void paravirt_release_p4d(unsigned long pfn)
    391{
    392	PVOP_VCALL1(mmu.release_p4d, pfn);
    393}
    394
    395static inline pte_t __pte(pteval_t val)
    396{
    397	return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
    398					  "mov %%rdi, %%rax",
    399					  ALT_NOT(X86_FEATURE_XENPV)) };
    400}
    401
    402static inline pteval_t pte_val(pte_t pte)
    403{
    404	return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
    405				"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
    406}
    407
    408static inline pgd_t __pgd(pgdval_t val)
    409{
    410	return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
    411					  "mov %%rdi, %%rax",
    412					  ALT_NOT(X86_FEATURE_XENPV)) };
    413}
    414
    415static inline pgdval_t pgd_val(pgd_t pgd)
    416{
    417	return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
    418				"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
    419}
    420
    421#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
    422static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
    423					   pte_t *ptep)
    424{
    425	pteval_t ret;
    426
    427	ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
    428
    429	return (pte_t) { .pte = ret };
    430}
    431
    432static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
    433					   pte_t *ptep, pte_t old_pte, pte_t pte)
    434{
    435
    436	PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
    437}
    438
    439static inline void set_pte(pte_t *ptep, pte_t pte)
    440{
    441	PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
    442}
    443
    444static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
    445{
    446	PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
    447}
    448
    449static inline pmd_t __pmd(pmdval_t val)
    450{
    451	return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
    452					  "mov %%rdi, %%rax",
    453					  ALT_NOT(X86_FEATURE_XENPV)) };
    454}
    455
    456static inline pmdval_t pmd_val(pmd_t pmd)
    457{
    458	return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
    459				"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
    460}
    461
    462static inline void set_pud(pud_t *pudp, pud_t pud)
    463{
    464	PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
    465}
    466
    467static inline pud_t __pud(pudval_t val)
    468{
    469	pudval_t ret;
    470
    471	ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
    472			       "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
    473
    474	return (pud_t) { ret };
    475}
    476
    477static inline pudval_t pud_val(pud_t pud)
    478{
    479	return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
    480				"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
    481}
    482
    483static inline void pud_clear(pud_t *pudp)
    484{
    485	set_pud(pudp, native_make_pud(0));
    486}
    487
    488static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
    489{
    490	p4dval_t val = native_p4d_val(p4d);
    491
    492	PVOP_VCALL2(mmu.set_p4d, p4dp, val);
    493}
    494
    495#if CONFIG_PGTABLE_LEVELS >= 5
    496
    497static inline p4d_t __p4d(p4dval_t val)
    498{
    499	p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
    500					"mov %%rdi, %%rax",
    501					ALT_NOT(X86_FEATURE_XENPV));
    502
    503	return (p4d_t) { ret };
    504}
    505
    506static inline p4dval_t p4d_val(p4d_t p4d)
    507{
    508	return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
    509				"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
    510}
    511
    512static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
    513{
    514	PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
    515}
    516
    517#define set_pgd(pgdp, pgdval) do {					\
    518	if (pgtable_l5_enabled())						\
    519		__set_pgd(pgdp, pgdval);				\
    520	else								\
    521		set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });	\
    522} while (0)
    523
    524#define pgd_clear(pgdp) do {						\
    525	if (pgtable_l5_enabled())					\
    526		set_pgd(pgdp, native_make_pgd(0));			\
    527} while (0)
    528
    529#endif  /* CONFIG_PGTABLE_LEVELS == 5 */
    530
    531static inline void p4d_clear(p4d_t *p4dp)
    532{
    533	set_p4d(p4dp, native_make_p4d(0));
    534}
    535
    536static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
    537{
    538	set_pte(ptep, pte);
    539}
    540
    541static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
    542			     pte_t *ptep)
    543{
    544	set_pte(ptep, native_make_pte(0));
    545}
    546
    547static inline void pmd_clear(pmd_t *pmdp)
    548{
    549	set_pmd(pmdp, native_make_pmd(0));
    550}
    551
    552#define  __HAVE_ARCH_START_CONTEXT_SWITCH
    553static inline void arch_start_context_switch(struct task_struct *prev)
    554{
    555	PVOP_VCALL1(cpu.start_context_switch, prev);
    556}
    557
    558static inline void arch_end_context_switch(struct task_struct *next)
    559{
    560	PVOP_VCALL1(cpu.end_context_switch, next);
    561}
    562
    563#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
    564static inline void arch_enter_lazy_mmu_mode(void)
    565{
    566	PVOP_VCALL0(mmu.lazy_mode.enter);
    567}
    568
    569static inline void arch_leave_lazy_mmu_mode(void)
    570{
    571	PVOP_VCALL0(mmu.lazy_mode.leave);
    572}
    573
    574static inline void arch_flush_lazy_mmu_mode(void)
    575{
    576	PVOP_VCALL0(mmu.lazy_mode.flush);
    577}
    578
    579static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
    580				phys_addr_t phys, pgprot_t flags)
    581{
    582	pv_ops.mmu.set_fixmap(idx, phys, flags);
    583}
    584#endif
    585
    586#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
    587
    588static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
    589							u32 val)
    590{
    591	PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
    592}
    593
    594static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
    595{
    596	PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
    597			  "movb $0, (%%" _ASM_ARG1 ");",
    598			  ALT_NOT(X86_FEATURE_PVUNLOCK));
    599}
    600
    601static __always_inline void pv_wait(u8 *ptr, u8 val)
    602{
    603	PVOP_VCALL2(lock.wait, ptr, val);
    604}
    605
    606static __always_inline void pv_kick(int cpu)
    607{
    608	PVOP_VCALL1(lock.kick, cpu);
    609}
    610
    611static __always_inline bool pv_vcpu_is_preempted(long cpu)
    612{
    613	return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
    614				"xor %%" _ASM_AX ", %%" _ASM_AX ";",
    615				ALT_NOT(X86_FEATURE_VCPUPREEMPT));
    616}
    617
    618void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
    619bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
    620
    621#endif /* SMP && PARAVIRT_SPINLOCKS */
    622
    623#ifdef CONFIG_X86_32
    624/* save and restore all caller-save registers, except return value */
    625#define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
    626#define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
    627#else
    628/* save and restore all caller-save registers, except return value */
    629#define PV_SAVE_ALL_CALLER_REGS						\
    630	"push %rcx;"							\
    631	"push %rdx;"							\
    632	"push %rsi;"							\
    633	"push %rdi;"							\
    634	"push %r8;"							\
    635	"push %r9;"							\
    636	"push %r10;"							\
    637	"push %r11;"
    638#define PV_RESTORE_ALL_CALLER_REGS					\
    639	"pop %r11;"							\
    640	"pop %r10;"							\
    641	"pop %r9;"							\
    642	"pop %r8;"							\
    643	"pop %rdi;"							\
    644	"pop %rsi;"							\
    645	"pop %rdx;"							\
    646	"pop %rcx;"
    647#endif
    648
    649/*
    650 * Generate a thunk around a function which saves all caller-save
    651 * registers except for the return value.  This allows C functions to
    652 * be called from assembler code where fewer than normal registers are
    653 * available.  It may also help code generation around calls from C
    654 * code if the common case doesn't use many registers.
    655 *
    656 * When a callee is wrapped in a thunk, the caller can assume that all
    657 * arg regs and all scratch registers are preserved across the
    658 * call. The return value in rax/eax will not be saved, even for void
    659 * functions.
    660 */
    661#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
    662#define __PV_CALLEE_SAVE_REGS_THUNK(func, section)			\
    663	extern typeof(func) __raw_callee_save_##func;			\
    664									\
    665	asm(".pushsection " section ", \"ax\";"				\
    666	    ".globl " PV_THUNK_NAME(func) ";"				\
    667	    ".type " PV_THUNK_NAME(func) ", @function;"			\
    668	    PV_THUNK_NAME(func) ":"					\
    669	    ASM_ENDBR							\
    670	    FRAME_BEGIN							\
    671	    PV_SAVE_ALL_CALLER_REGS					\
    672	    "call " #func ";"						\
    673	    PV_RESTORE_ALL_CALLER_REGS					\
    674	    FRAME_END							\
    675	    ASM_RET							\
    676	    ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";"	\
    677	    ".popsection")
    678
    679#define PV_CALLEE_SAVE_REGS_THUNK(func)			\
    680	__PV_CALLEE_SAVE_REGS_THUNK(func, ".text")
    681
    682/* Get a reference to a callee-save function */
    683#define PV_CALLEE_SAVE(func)						\
    684	((struct paravirt_callee_save) { __raw_callee_save_##func })
    685
    686/* Promise that "func" already uses the right calling convention */
    687#define __PV_IS_CALLEE_SAVE(func)			\
    688	((struct paravirt_callee_save) { func })
    689
    690#ifdef CONFIG_PARAVIRT_XXL
    691static __always_inline unsigned long arch_local_save_flags(void)
    692{
    693	return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
    694				ALT_NOT(X86_FEATURE_XENPV));
    695}
    696
    697static __always_inline void arch_local_irq_disable(void)
    698{
    699	PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
    700}
    701
    702static __always_inline void arch_local_irq_enable(void)
    703{
    704	PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
    705}
    706
    707static __always_inline unsigned long arch_local_irq_save(void)
    708{
    709	unsigned long f;
    710
    711	f = arch_local_save_flags();
    712	arch_local_irq_disable();
    713	return f;
    714}
    715#endif
    716
    717
    718/* Make sure as little as possible of this mess escapes. */
    719#undef PARAVIRT_CALL
    720#undef __PVOP_CALL
    721#undef __PVOP_VCALL
    722#undef PVOP_VCALL0
    723#undef PVOP_CALL0
    724#undef PVOP_VCALL1
    725#undef PVOP_CALL1
    726#undef PVOP_VCALL2
    727#undef PVOP_CALL2
    728#undef PVOP_VCALL3
    729#undef PVOP_CALL3
    730#undef PVOP_VCALL4
    731#undef PVOP_CALL4
    732
    733extern void default_banner(void);
    734
    735#else  /* __ASSEMBLY__ */
    736
    737#define _PVSITE(ptype, ops, word, algn)		\
    738771:;						\
    739	ops;					\
    740772:;						\
    741	.pushsection .parainstructions,"a";	\
    742	 .align	algn;				\
    743	 word 771b;				\
    744	 .byte ptype;				\
    745	 .byte 772b-771b;			\
    746	.popsection
    747
    748
    749#ifdef CONFIG_X86_64
    750#ifdef CONFIG_PARAVIRT_XXL
    751
    752#define PARA_PATCH(off)		((off) / 8)
    753#define PARA_SITE(ptype, ops)	_PVSITE(ptype, ops, .quad, 8)
    754#define PARA_INDIRECT(addr)	*addr(%rip)
    755
    756#ifdef CONFIG_DEBUG_ENTRY
    757.macro PARA_IRQ_save_fl
    758	PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),
    759		  ANNOTATE_RETPOLINE_SAFE;
    760		  call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);)
    761.endm
    762
    763#define SAVE_FLAGS	ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
    764				    ALT_NOT(X86_FEATURE_XENPV)
    765#endif
    766#endif /* CONFIG_PARAVIRT_XXL */
    767#endif	/* CONFIG_X86_64 */
    768
    769#endif /* __ASSEMBLY__ */
    770#else  /* CONFIG_PARAVIRT */
    771# define default_banner x86_init_noop
    772#endif /* !CONFIG_PARAVIRT */
    773
    774#ifndef __ASSEMBLY__
    775#ifndef CONFIG_PARAVIRT_XXL
    776static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
    777					  struct mm_struct *mm)
    778{
    779}
    780#endif
    781
    782#ifndef CONFIG_PARAVIRT
    783static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
    784{
    785}
    786#endif
    787
    788#ifndef CONFIG_PARAVIRT_SPINLOCKS
    789static inline void paravirt_set_cap(void)
    790{
    791}
    792#endif
    793#endif /* __ASSEMBLY__ */
    794#endif /* _ASM_X86_PARAVIRT_H */