cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xscale-cp0.c (3976B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * linux/arch/arm/kernel/xscale-cp0.c
      4 *
      5 * XScale DSP and iWMMXt coprocessor context switching and handling
      6 */
      7
      8#include <linux/types.h>
      9#include <linux/kernel.h>
     10#include <linux/signal.h>
     11#include <linux/sched.h>
     12#include <linux/init.h>
     13#include <linux/io.h>
     14#include <asm/thread_notify.h>
     15#include <asm/cputype.h>
     16
     17asm("	.arch armv5te\n");
     18
     19static inline void dsp_save_state(u32 *state)
     20{
     21	__asm__ __volatile__ (
     22		"mrrc	p0, 0, %0, %1, c0\n"
     23		: "=r" (state[0]), "=r" (state[1]));
     24}
     25
     26static inline void dsp_load_state(u32 *state)
     27{
     28	__asm__ __volatile__ (
     29		"mcrr	p0, 0, %0, %1, c0\n"
     30		: : "r" (state[0]), "r" (state[1]));
     31}
     32
     33static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t)
     34{
     35	struct thread_info *thread = t;
     36
     37	switch (cmd) {
     38	case THREAD_NOTIFY_FLUSH:
     39		thread->cpu_context.extra[0] = 0;
     40		thread->cpu_context.extra[1] = 0;
     41		break;
     42
     43	case THREAD_NOTIFY_SWITCH:
     44		dsp_save_state(current_thread_info()->cpu_context.extra);
     45		dsp_load_state(thread->cpu_context.extra);
     46		break;
     47	}
     48
     49	return NOTIFY_DONE;
     50}
     51
     52static struct notifier_block dsp_notifier_block = {
     53	.notifier_call	= dsp_do,
     54};
     55
     56
     57#ifdef CONFIG_IWMMXT
     58static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
     59{
     60	struct thread_info *thread = t;
     61
     62	switch (cmd) {
     63	case THREAD_NOTIFY_FLUSH:
     64		/*
     65		 * flush_thread() zeroes thread->fpstate, so no need
     66		 * to do anything here.
     67		 *
     68		 * FALLTHROUGH: Ensure we don't try to overwrite our newly
     69		 * initialised state information on the first fault.
     70		 */
     71
     72	case THREAD_NOTIFY_EXIT:
     73		iwmmxt_task_release(thread);
     74		break;
     75
     76	case THREAD_NOTIFY_SWITCH:
     77		iwmmxt_task_switch(thread);
     78		break;
     79	}
     80
     81	return NOTIFY_DONE;
     82}
     83
     84static struct notifier_block iwmmxt_notifier_block = {
     85	.notifier_call	= iwmmxt_do,
     86};
     87#endif
     88
     89
     90static u32 __init xscale_cp_access_read(void)
     91{
     92	u32 value;
     93
     94	__asm__ __volatile__ (
     95		"mrc	p15, 0, %0, c15, c1, 0\n\t"
     96		: "=r" (value));
     97
     98	return value;
     99}
    100
    101static void __init xscale_cp_access_write(u32 value)
    102{
    103	u32 temp;
    104
    105	__asm__ __volatile__ (
    106		"mcr	p15, 0, %1, c15, c1, 0\n\t"
    107		"mrc	p15, 0, %0, c15, c1, 0\n\t"
    108		"mov	%0, %0\n\t"
    109		"sub	pc, pc, #4\n\t"
    110		: "=r" (temp) : "r" (value));
    111}
    112
    113/*
    114 * Detect whether we have a MAC coprocessor (40 bit register) or an
    115 * iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000
    116 * into a coprocessor register and reading it back, and checking
    117 * whether the upper word survived intact.
    118 */
    119static int __init cpu_has_iwmmxt(void)
    120{
    121	u32 lo;
    122	u32 hi;
    123
    124	/*
    125	 * This sequence is interpreted by the DSP coprocessor as:
    126	 *	mar	acc0, %2, %3
    127	 *	mra	%0, %1, acc0
    128	 *
    129	 * And by the iWMMXt coprocessor as:
    130	 *	tmcrr	wR0, %2, %3
    131	 *	tmrrc	%0, %1, wR0
    132	 */
    133	__asm__ __volatile__ (
    134		"mcrr	p0, 0, %2, %3, c0\n"
    135		"mrrc	p0, 0, %0, %1, c0\n"
    136		: "=r" (lo), "=r" (hi)
    137		: "r" (0), "r" (0x100));
    138
    139	return !!hi;
    140}
    141
    142
    143/*
    144 * If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we
    145 * disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
    146 * switch code handle iWMMXt context switching.  If on the other
    147 * hand the CPU has a DSP coprocessor, we keep access to CP0 enabled
    148 * all the time, and save/restore acc0 on context switch in non-lazy
    149 * fashion.
    150 */
    151static int __init xscale_cp0_init(void)
    152{
    153	u32 cp_access;
    154
    155	/* do not attempt to probe iwmmxt on non-xscale family CPUs */
    156	if (!cpu_is_xscale_family())
    157		return 0;
    158
    159	cp_access = xscale_cp_access_read() & ~3;
    160	xscale_cp_access_write(cp_access | 1);
    161
    162	if (cpu_has_iwmmxt()) {
    163#ifndef CONFIG_IWMMXT
    164		pr_warn("CAUTION: XScale iWMMXt coprocessor detected, but kernel support is missing.\n");
    165#else
    166		pr_info("XScale iWMMXt coprocessor detected.\n");
    167		elf_hwcap |= HWCAP_IWMMXT;
    168		thread_register_notifier(&iwmmxt_notifier_block);
    169#endif
    170	} else {
    171		pr_info("XScale DSP coprocessor detected.\n");
    172		thread_register_notifier(&dsp_notifier_block);
    173		cp_access |= 1;
    174	}
    175
    176	xscale_cp_access_write(cp_access);
    177
    178	return 0;
    179}
    180
    181late_initcall(xscale_cp0_init);