cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

c-octeon.c (8802B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Copyright (C) 2005-2007 Cavium Networks
      7 */
      8#include <linux/export.h>
      9#include <linux/kernel.h>
     10#include <linux/sched.h>
     11#include <linux/smp.h>
     12#include <linux/mm.h>
     13#include <linux/bitops.h>
     14#include <linux/cpu.h>
     15#include <linux/io.h>
     16
     17#include <asm/bcache.h>
     18#include <asm/bootinfo.h>
     19#include <asm/cacheops.h>
     20#include <asm/cpu-features.h>
     21#include <asm/cpu-type.h>
     22#include <asm/page.h>
     23#include <asm/r4kcache.h>
     24#include <asm/traps.h>
     25#include <asm/mmu_context.h>
     26
     27#include <asm/octeon/octeon.h>
     28
     29unsigned long long cache_err_dcache[NR_CPUS];
     30EXPORT_SYMBOL_GPL(cache_err_dcache);
     31
     32/*
     33 * Octeon automatically flushes the dcache on tlb changes, so
     34 * from Linux's viewpoint it acts much like a physically
     35 * tagged cache. No flushing is needed
     36 *
     37 */
     38static void octeon_flush_data_cache_page(unsigned long addr)
     39{
     40    /* Nothing to do */
     41}
     42
     43static inline void octeon_local_flush_icache(void)
     44{
     45	asm volatile ("synci 0($0)");
     46}
     47
     48/*
     49 * Flush local I-cache for the specified range.
     50 */
     51static void local_octeon_flush_icache_range(unsigned long start,
     52					    unsigned long end)
     53{
     54	octeon_local_flush_icache();
     55}
     56
     57/**
     58 * octeon_flush_icache_all_cores -  Flush caches as necessary for all cores
     59 * affected by a vma. If no vma is supplied, all cores are flushed.
     60 *
     61 * @vma:    VMA to flush or NULL to flush all icaches.
     62 */
     63static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
     64{
     65	extern void octeon_send_ipi_single(int cpu, unsigned int action);
     66#ifdef CONFIG_SMP
     67	int cpu;
     68	cpumask_t mask;
     69#endif
     70
     71	mb();
     72	octeon_local_flush_icache();
     73#ifdef CONFIG_SMP
     74	preempt_disable();
     75	cpu = smp_processor_id();
     76
     77	/*
     78	 * If we have a vma structure, we only need to worry about
     79	 * cores it has been used on
     80	 */
     81	if (vma)
     82		mask = *mm_cpumask(vma->vm_mm);
     83	else
     84		mask = *cpu_online_mask;
     85	cpumask_clear_cpu(cpu, &mask);
     86	for_each_cpu(cpu, &mask)
     87		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
     88
     89	preempt_enable();
     90#endif
     91}
     92
     93
     94/*
     95 * Called to flush the icache on all cores
     96 */
     97static void octeon_flush_icache_all(void)
     98{
     99	octeon_flush_icache_all_cores(NULL);
    100}
    101
    102
    103/**
    104 * octeon_flush_cache_mm - flush all memory associated with a memory context.
    105 *
    106 * @mm:	    Memory context to flush
    107 */
    108static void octeon_flush_cache_mm(struct mm_struct *mm)
    109{
    110	/*
    111	 * According to the R4K version of this file, CPUs without
    112	 * dcache aliases don't need to do anything here
    113	 */
    114}
    115
    116
    117/*
    118 * Flush a range of kernel addresses out of the icache
    119 *
    120 */
    121static void octeon_flush_icache_range(unsigned long start, unsigned long end)
    122{
    123	octeon_flush_icache_all_cores(NULL);
    124}
    125
    126
    127/**
    128 * octeon_flush_cache_range - Flush a range out of a vma
    129 *
    130 * @vma:    VMA to flush
    131 * @start:  beginning address for flush
    132 * @end:    ending address for flush
    133 */
    134static void octeon_flush_cache_range(struct vm_area_struct *vma,
    135				     unsigned long start, unsigned long end)
    136{
    137	if (vma->vm_flags & VM_EXEC)
    138		octeon_flush_icache_all_cores(vma);
    139}
    140
    141
    142/**
    143 * octeon_flush_cache_page - Flush a specific page of a vma
    144 *
    145 * @vma:    VMA to flush page for
    146 * @page:   Page to flush
    147 * @pfn:    Page frame number
    148 */
    149static void octeon_flush_cache_page(struct vm_area_struct *vma,
    150				    unsigned long page, unsigned long pfn)
    151{
    152	if (vma->vm_flags & VM_EXEC)
    153		octeon_flush_icache_all_cores(vma);
    154}
    155
    156static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
    157{
    158	BUG();
    159}
    160
    161/*
    162 * Probe Octeon's caches
    163 *
    164 */
    165static void probe_octeon(void)
    166{
    167	unsigned long icache_size;
    168	unsigned long dcache_size;
    169	unsigned int config1;
    170	struct cpuinfo_mips *c = &current_cpu_data;
    171	int cputype = current_cpu_type();
    172
    173	config1 = read_c0_config1();
    174	switch (cputype) {
    175	case CPU_CAVIUM_OCTEON:
    176	case CPU_CAVIUM_OCTEON_PLUS:
    177		c->icache.linesz = 2 << ((config1 >> 19) & 7);
    178		c->icache.sets = 64 << ((config1 >> 22) & 7);
    179		c->icache.ways = 1 + ((config1 >> 16) & 7);
    180		c->icache.flags |= MIPS_CACHE_VTAG;
    181		icache_size =
    182			c->icache.sets * c->icache.ways * c->icache.linesz;
    183		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
    184		c->dcache.linesz = 128;
    185		if (cputype == CPU_CAVIUM_OCTEON_PLUS)
    186			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
    187		else
    188			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
    189		c->dcache.ways = 64;
    190		dcache_size =
    191			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
    192		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
    193		c->options |= MIPS_CPU_PREFETCH;
    194		break;
    195
    196	case CPU_CAVIUM_OCTEON2:
    197		c->icache.linesz = 2 << ((config1 >> 19) & 7);
    198		c->icache.sets = 8;
    199		c->icache.ways = 37;
    200		c->icache.flags |= MIPS_CACHE_VTAG;
    201		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
    202
    203		c->dcache.linesz = 128;
    204		c->dcache.ways = 32;
    205		c->dcache.sets = 8;
    206		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
    207		c->options |= MIPS_CPU_PREFETCH;
    208		break;
    209
    210	case CPU_CAVIUM_OCTEON3:
    211		c->icache.linesz = 128;
    212		c->icache.sets = 16;
    213		c->icache.ways = 39;
    214		c->icache.flags |= MIPS_CACHE_VTAG;
    215		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
    216
    217		c->dcache.linesz = 128;
    218		c->dcache.ways = 32;
    219		c->dcache.sets = 8;
    220		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
    221		c->options |= MIPS_CPU_PREFETCH;
    222		break;
    223
    224	default:
    225		panic("Unsupported Cavium Networks CPU type");
    226		break;
    227	}
    228
    229	/* compute a couple of other cache variables */
    230	c->icache.waysize = icache_size / c->icache.ways;
    231	c->dcache.waysize = dcache_size / c->dcache.ways;
    232
    233	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
    234	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
    235
    236	if (smp_processor_id() == 0) {
    237		pr_info("Primary instruction cache %ldkB, %s, %d way, "
    238			"%d sets, linesize %d bytes.\n",
    239			icache_size >> 10,
    240			cpu_has_vtag_icache ?
    241				"virtually tagged" : "physically tagged",
    242			c->icache.ways, c->icache.sets, c->icache.linesz);
    243
    244		pr_info("Primary data cache %ldkB, %d-way, %d sets, "
    245			"linesize %d bytes.\n",
    246			dcache_size >> 10, c->dcache.ways,
    247			c->dcache.sets, c->dcache.linesz);
    248	}
    249}
    250
    251static void  octeon_cache_error_setup(void)
    252{
    253	extern char except_vec2_octeon;
    254	set_handler(0x100, &except_vec2_octeon, 0x80);
    255}
    256
    257/*
    258 * Setup the Octeon cache flush routines
    259 *
    260 */
    261void octeon_cache_init(void)
    262{
    263	probe_octeon();
    264
    265	shm_align_mask = PAGE_SIZE - 1;
    266
    267	flush_cache_all			= octeon_flush_icache_all;
    268	__flush_cache_all		= octeon_flush_icache_all;
    269	flush_cache_mm			= octeon_flush_cache_mm;
    270	flush_cache_page		= octeon_flush_cache_page;
    271	flush_cache_range		= octeon_flush_cache_range;
    272	flush_icache_all		= octeon_flush_icache_all;
    273	flush_data_cache_page		= octeon_flush_data_cache_page;
    274	flush_icache_range		= octeon_flush_icache_range;
    275	local_flush_icache_range	= local_octeon_flush_icache_range;
    276	__flush_icache_user_range	= octeon_flush_icache_range;
    277	__local_flush_icache_user_range	= local_octeon_flush_icache_range;
    278
    279	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;
    280
    281	build_clear_page();
    282	build_copy_page();
    283
    284	board_cache_error_setup = octeon_cache_error_setup;
    285}
    286
    287/*
    288 * Handle a cache error exception
    289 */
    290static RAW_NOTIFIER_HEAD(co_cache_error_chain);
    291
    292int register_co_cache_error_notifier(struct notifier_block *nb)
    293{
    294	return raw_notifier_chain_register(&co_cache_error_chain, nb);
    295}
    296EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
    297
    298int unregister_co_cache_error_notifier(struct notifier_block *nb)
    299{
    300	return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
    301}
    302EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
    303
    304static void co_cache_error_call_notifiers(unsigned long val)
    305{
    306	int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
    307	if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
    308		u64 dcache_err;
    309		unsigned long coreid = cvmx_get_core_num();
    310		u64 icache_err = read_octeon_c0_icacheerr();
    311
    312		if (val) {
    313			dcache_err = cache_err_dcache[coreid];
    314			cache_err_dcache[coreid] = 0;
    315		} else {
    316			dcache_err = read_octeon_c0_dcacheerr();
    317		}
    318
    319		pr_err("Core%lu: Cache error exception:\n", coreid);
    320		pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
    321		if (icache_err & 1) {
    322			pr_err("CacheErr (Icache) == %llx\n",
    323			       (unsigned long long)icache_err);
    324			write_octeon_c0_icacheerr(0);
    325		}
    326		if (dcache_err & 1) {
    327			pr_err("CacheErr (Dcache) == %llx\n",
    328			       (unsigned long long)dcache_err);
    329		}
    330	}
    331}
    332
    333/*
    334 * Called when the exception is recoverable
    335 */
    336
    337asmlinkage void cache_parity_error_octeon_recoverable(void)
    338{
    339	co_cache_error_call_notifiers(0);
    340}
    341
    342/*
    343 * Called when the exception is not recoverable
    344 */
    345
    346asmlinkage void cache_parity_error_octeon_non_recoverable(void)
    347{
    348	co_cache_error_call_notifiers(1);
    349	panic("Can't handle cache error: nested exception");
    350}