cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmx-helper.c (1289B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *
      4 * Copyright (C) IBM Corporation, 2011
      5 *
      6 * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
      7 *          Anton Blanchard <anton@au.ibm.com>
      8 */
      9#include <linux/uaccess.h>
     10#include <linux/hardirq.h>
     11#include <asm/switch_to.h>
     12
     13int enter_vmx_usercopy(void)
     14{
     15	if (in_interrupt())
     16		return 0;
     17
     18	preempt_disable();
     19	/*
     20	 * We need to disable page faults as they can call schedule and
     21	 * thus make us lose the VMX context. So on page faults, we just
     22	 * fail which will cause a fallback to the normal non-vmx copy.
     23	 */
     24	pagefault_disable();
     25
     26	enable_kernel_altivec();
     27
     28	return 1;
     29}
     30
     31/*
     32 * This function must return 0 because we tail call optimise when calling
     33 * from __copy_tofrom_user_power7 which returns 0 on success.
     34 */
     35int exit_vmx_usercopy(void)
     36{
     37	disable_kernel_altivec();
     38	pagefault_enable();
     39	preempt_enable();
     40	return 0;
     41}
     42
     43int enter_vmx_ops(void)
     44{
     45	if (in_interrupt())
     46		return 0;
     47
     48	preempt_disable();
     49
     50	enable_kernel_altivec();
     51
     52	return 1;
     53}
     54
     55/*
     56 * All calls to this function will be optimised into tail calls. We are
     57 * passed a pointer to the destination which we return as required by a
     58 * memcpy implementation.
     59 */
     60void *exit_vmx_ops(void *dest)
     61{
     62	disable_kernel_altivec();
     63	preempt_enable();
     64	return dest;
     65}