cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

core_irongate.c (10441B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *	linux/arch/alpha/kernel/core_irongate.c
      4 *
      5 * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
      6 *
      7 *	Copyright (C) 1999 Alpha Processor, Inc.,
      8 *		(David Daniel, Stig Telfer, Soohoon Lee)
      9 *
     10 * Code common to all IRONGATE core logic chips.
     11 */
     12
     13#define __EXTERN_INLINE inline
     14#include <asm/io.h>
     15#include <asm/core_irongate.h>
     16#undef __EXTERN_INLINE
     17
     18#include <linux/types.h>
     19#include <linux/pci.h>
     20#include <linux/sched.h>
     21#include <linux/init.h>
     22#include <linux/initrd.h>
     23#include <linux/memblock.h>
     24
     25#include <asm/ptrace.h>
     26#include <asm/cacheflush.h>
     27#include <asm/tlbflush.h>
     28
     29#include "proto.h"
     30#include "pci_impl.h"
     31
     32/*
     33 * BIOS32-style PCI interface:
     34 */
     35
     36#define DEBUG_CONFIG 0
     37
     38#if DEBUG_CONFIG
     39# define DBG_CFG(args)	printk args
     40#else
     41# define DBG_CFG(args)
     42#endif
     43
     44igcsr32 *IronECC;
     45
     46/*
     47 * Given a bus, device, and function number, compute resulting
     48 * configuration space address accordingly.  It is therefore not safe
     49 * to have concurrent invocations to configuration space access
     50 * routines, but there really shouldn't be any need for this.
     51 *
     52 *	addr[31:24]		reserved
     53 *	addr[23:16]		bus number (8 bits = 128 possible buses)
     54 *	addr[15:11]		Device number (5 bits)
     55 *	addr[10: 8]		function number
     56 *	addr[ 7: 2]		register number
     57 *
     58 * For IRONGATE:
     59 *    if (bus = addr[23:16]) == 0
     60 *    then
     61 *	  type 0 config cycle:
     62 *	      addr_on_pci[31:11] = id selection for device = addr[15:11]
     63 *	      addr_on_pci[10: 2] = addr[10: 2] ???
     64 *	      addr_on_pci[ 1: 0] = 00
     65 *    else
     66 *	  type 1 config cycle (pass on with no decoding):
     67 *	      addr_on_pci[31:24] = 0
     68 *	      addr_on_pci[23: 2] = addr[23: 2]
     69 *	      addr_on_pci[ 1: 0] = 01
     70 *    fi
     71 *
     72 * Notes:
     73 *	The function number selects which function of a multi-function device
     74 *	(e.g., SCSI and Ethernet).
     75 *
     76 *	The register selects a DWORD (32 bit) register offset.	Hence it
     77 *	doesn't get shifted by 2 bits as we want to "drop" the bottom two
     78 *	bits.
     79 */
     80
     81static int
     82mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
     83	     unsigned long *pci_addr, unsigned char *type1)
     84{
     85	unsigned long addr;
     86	u8 bus = pbus->number;
     87
     88	DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
     89		 "pci_addr=0x%p, type1=0x%p)\n",
     90		 bus, device_fn, where, pci_addr, type1));
     91
     92	*type1 = (bus != 0);
     93
     94	addr = (bus << 16) | (device_fn << 8) | where;
     95	addr |= IRONGATE_CONF;
     96
     97	*pci_addr = addr;
     98	DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
     99	return 0;
    100}
    101
    102static int
    103irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where,
    104		     int size, u32 *value)
    105{
    106	unsigned long addr;
    107	unsigned char type1;
    108
    109	if (mk_conf_addr(bus, devfn, where, &addr, &type1))
    110		return PCIBIOS_DEVICE_NOT_FOUND;
    111
    112	switch (size) {
    113	case 1:
    114		*value = __kernel_ldbu(*(vucp)addr);
    115		break;
    116	case 2:
    117		*value = __kernel_ldwu(*(vusp)addr);
    118		break;
    119	case 4:
    120		*value = *(vuip)addr;
    121		break;
    122	}
    123
    124	return PCIBIOS_SUCCESSFUL;
    125}
    126
    127static int
    128irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where,
    129		      int size, u32 value)
    130{
    131	unsigned long addr;
    132	unsigned char type1;
    133
    134	if (mk_conf_addr(bus, devfn, where, &addr, &type1))
    135		return PCIBIOS_DEVICE_NOT_FOUND;
    136
    137	switch (size) {
    138	case 1:
    139		__kernel_stb(value, *(vucp)addr);
    140		mb();
    141		__kernel_ldbu(*(vucp)addr);
    142		break;
    143	case 2:
    144		__kernel_stw(value, *(vusp)addr);
    145		mb();
    146		__kernel_ldwu(*(vusp)addr);
    147		break;
    148	case 4:
    149		*(vuip)addr = value;
    150		mb();
    151		*(vuip)addr;
    152		break;
    153	}
    154
    155	return PCIBIOS_SUCCESSFUL;
    156}
    157
    158struct pci_ops irongate_pci_ops =
    159{
    160	.read =		irongate_read_config,
    161	.write =	irongate_write_config,
    162};
    163
    164int
    165irongate_pci_clr_err(void)
    166{
    167	unsigned int nmi_ctl=0;
    168	unsigned int IRONGATE_jd;
    169
    170again:
    171	IRONGATE_jd = IRONGATE0->stat_cmd;
    172	printk("Iron stat_cmd %x\n", IRONGATE_jd);
    173	IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */
    174	mb();
    175	IRONGATE_jd = IRONGATE0->stat_cmd;  /* re-read to force write */
    176
    177	IRONGATE_jd = *IronECC;
    178	printk("Iron ECC %x\n", IRONGATE_jd);
    179	*IronECC = IRONGATE_jd; /* write again clears error bits */
    180	mb();
    181	IRONGATE_jd = *IronECC;  /* re-read to force write */
    182
    183	/* Clear ALI NMI */
    184        nmi_ctl = inb(0x61);
    185        nmi_ctl |= 0x0c;
    186        outb(nmi_ctl, 0x61);
    187        nmi_ctl &= ~0x0c;
    188        outb(nmi_ctl, 0x61);
    189
    190	IRONGATE_jd = *IronECC;
    191	if (IRONGATE_jd & 0x300) goto again;
    192
    193	return 0;
    194}
    195
    196#define IRONGATE_3GB 0xc0000000UL
    197
    198/* On Albacore (aka UP1500) with 4Gb of RAM we have to reserve some
    199   memory for PCI. At this point we just reserve memory above 3Gb. Most
    200   of this memory will be freed after PCI setup is done. */
    201static void __init
    202albacore_init_arch(void)
    203{
    204	unsigned long memtop = max_low_pfn << PAGE_SHIFT;
    205	unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL;
    206	struct percpu_struct *cpu;
    207	int pal_rev, pal_var;
    208
    209	cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
    210	pal_rev = cpu->pal_revision & 0xffff;
    211	pal_var = (cpu->pal_revision >> 16) & 0xff;
    212
    213	/* Consoles earlier than A5.6-18 (OSF PALcode v1.62-2) set up
    214	   the CPU incorrectly (leave speculative stores enabled),
    215	   which causes memory corruption under certain conditions.
    216	   Issue a warning for such consoles. */
    217	if (alpha_using_srm &&
    218	    (pal_rev < 0x13e ||	(pal_rev == 0x13e && pal_var < 2)))
    219		printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 "
    220				    "or later\n");
    221
    222	if (pci_mem > IRONGATE_3GB)
    223		pci_mem = IRONGATE_3GB;
    224	IRONGATE0->pci_mem = pci_mem;
    225	alpha_mv.min_mem_address = pci_mem;
    226	if (memtop > pci_mem) {
    227#ifdef CONFIG_BLK_DEV_INITRD
    228		extern unsigned long initrd_start, initrd_end;
    229		extern void *move_initrd(unsigned long);
    230
    231		/* Move the initrd out of the way. */
    232		if (initrd_end && __pa(initrd_end) > pci_mem) {
    233			unsigned long size;
    234
    235			size = initrd_end - initrd_start;
    236			memblock_free((void *)initrd_start, PAGE_ALIGN(size));
    237			if (!move_initrd(pci_mem))
    238				printk("irongate_init_arch: initrd too big "
    239				       "(%ldK)\ndisabling initrd\n",
    240				       size / 1024);
    241		}
    242#endif
    243		memblock_reserve(pci_mem, memtop - pci_mem);
    244		printk("irongate_init_arch: temporarily reserving "
    245			"region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
    246	}
    247}
    248
    249static void __init
    250irongate_setup_agp(void)
    251{
    252	/* Disable the GART window. AGPGART doesn't work due to yet
    253	   unresolved memory coherency issues... */
    254	IRONGATE0->agpva = IRONGATE0->agpva & ~0xf;
    255	alpha_agpgart_size = 0;
    256}
    257
    258void __init
    259irongate_init_arch(void)
    260{
    261	struct pci_controller *hose;
    262	int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006;	/* Albacore? */
    263
    264	IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms;
    265
    266	irongate_pci_clr_err();
    267
    268	if (amd761)
    269		albacore_init_arch();
    270
    271	irongate_setup_agp();
    272
    273	/*
    274	 * Create our single hose.
    275	 */
    276
    277	pci_isa_hose = hose = alloc_pci_controller();
    278	hose->io_space = &ioport_resource;
    279	hose->mem_space = &iomem_resource;
    280	hose->index = 0;
    281
    282	/* This is for userland consumption.  For some reason, the 40-bit
    283	   PIO bias that we use in the kernel through KSEG didn't work for
    284	   the page table based user mappings.  So make sure we get the
    285	   43-bit PIO bias.  */
    286	hose->sparse_mem_base = 0;
    287	hose->sparse_io_base = 0;
    288	hose->dense_mem_base
    289	  = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL;
    290	hose->dense_io_base
    291	  = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL;
    292
    293	hose->sg_isa = hose->sg_pci = NULL;
    294	__direct_map_base = 0;
    295	__direct_map_size = 0xffffffff;
    296}
    297
    298/*
    299 * IO map and AGP support
    300 */
    301#include <linux/vmalloc.h>
    302#include <linux/agp_backend.h>
    303#include <linux/agpgart.h>
    304#include <linux/export.h>
    305
    306#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
    307#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr))
    308
    309#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) 
    310#define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
    311
    312void __iomem *
    313irongate_ioremap(unsigned long addr, unsigned long size)
    314{
    315	struct vm_struct *area;
    316	unsigned long vaddr;
    317	unsigned long baddr, last;
    318	u32 *mmio_regs, *gatt_pages, *cur_gatt, pte;
    319	unsigned long gart_bus_addr;
    320
    321	if (!alpha_agpgart_size)
    322		return (void __iomem *)(addr + IRONGATE_MEM);
    323
    324	gart_bus_addr = (unsigned long)IRONGATE0->bar0 &
    325			PCI_BASE_ADDRESS_MEM_MASK; 
    326
    327	/* 
    328	 * Check for within the AGP aperture...
    329	 */
    330	do {
    331		/*
    332		 * Check the AGP area
    333		 */
    334		if (addr >= gart_bus_addr && addr + size - 1 < 
    335		    gart_bus_addr + alpha_agpgart_size)
    336			break;
    337
    338		/*
    339		 * Not found - assume legacy ioremap
    340		 */
    341		return (void __iomem *)(addr + IRONGATE_MEM);
    342	} while(0);
    343
    344	mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 &
    345			PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM);
    346
    347	gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */
    348
    349	/*
    350	 * Adjust the limits (mappings must be page aligned)
    351	 */
    352	if (addr & ~PAGE_MASK) {
    353		printk("AGP ioremap failed... addr not page aligned (0x%lx)\n",
    354		       addr);
    355		return (void __iomem *)(addr + IRONGATE_MEM);
    356	}
    357	last = addr + size - 1;
    358	size = PAGE_ALIGN(last) - addr;
    359
    360#if 0
    361	printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size);
    362	printk("irongate_ioremap:  gart_bus_addr  0x%lx\n", gart_bus_addr);
    363	printk("irongate_ioremap:  gart_aper_size 0x%lx\n", gart_aper_size);
    364	printk("irongate_ioremap:  mmio_regs      %p\n", mmio_regs);
    365	printk("irongate_ioremap:  gatt_pages     %p\n", gatt_pages);
    366	
    367	for(baddr = addr; baddr <= last; baddr += PAGE_SIZE)
    368	{
    369		cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
    370		pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
    371		printk("irongate_ioremap:  cur_gatt %p pte 0x%x\n",
    372		       cur_gatt, pte);
    373	}
    374#endif
    375
    376	/*
    377	 * Map it
    378	 */
    379	area = get_vm_area(size, VM_IOREMAP);
    380	if (!area) return NULL;
    381
    382	for(baddr = addr, vaddr = (unsigned long)area->addr; 
    383	    baddr <= last; 
    384	    baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
    385	{
    386		cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
    387		pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
    388
    389		if (__alpha_remap_area_pages(vaddr,
    390					     pte, PAGE_SIZE, 0)) {
    391			printk("AGP ioremap: FAILED to map...\n");
    392			vfree(area->addr);
    393			return NULL;
    394		}
    395	}
    396
    397	flush_tlb_all();
    398
    399	vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
    400#if 0
    401	printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
    402	       addr, size, vaddr);
    403#endif
    404	return (void __iomem *)vaddr;
    405}
    406EXPORT_SYMBOL(irongate_ioremap);
    407
    408void
    409irongate_iounmap(volatile void __iomem *xaddr)
    410{
    411	unsigned long addr = (unsigned long) xaddr;
    412	if (((long)addr >> 41) == -2)
    413		return;	/* kseg map, nothing to do */
    414	if (addr)
    415		return vfree((void *)(PAGE_MASK & addr)); 
    416}
    417EXPORT_SYMBOL(irongate_iounmap);