cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

acpi.c (7159B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
      4 *
      5 * Author: Jianmin Lv <lvjianmin@loongson.cn>
      6 *         Huacai Chen <chenhuacai@loongson.cn>
      7 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
      8 */
      9
     10#include <linux/init.h>
     11#include <linux/acpi.h>
     12#include <linux/irq.h>
     13#include <linux/irqdomain.h>
     14#include <linux/memblock.h>
     15#include <linux/serial_core.h>
     16#include <asm/io.h>
     17#include <asm/numa.h>
     18#include <asm/loongson.h>
     19
     20int acpi_disabled;
     21EXPORT_SYMBOL(acpi_disabled);
     22int acpi_noirq;
     23int acpi_pci_disabled;
     24EXPORT_SYMBOL(acpi_pci_disabled);
     25int acpi_strict = 1; /* We have no workarounds on LoongArch */
     26int num_processors;
     27int disabled_cpus;
     28enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
     29
     30u64 acpi_saved_sp;
     31
     32#define MAX_CORE_PIC 256
     33
     34#define PREFIX			"ACPI: "
     35
     36int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
     37{
     38	if (irqp != NULL)
     39		*irqp = acpi_register_gsi(NULL, gsi, -1, -1);
     40	return (*irqp >= 0) ? 0 : -EINVAL;
     41}
     42EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
     43
     44int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi)
     45{
     46	if (gsi)
     47		*gsi = isa_irq;
     48	return 0;
     49}
     50
     51/*
     52 * success: return IRQ number (>=0)
     53 * failure: return < 0
     54 */
     55int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
     56{
     57	struct irq_fwspec fwspec;
     58
     59	switch (gsi) {
     60	case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
     61		fwspec.fwnode = liointc_domain->fwnode;
     62		fwspec.param[0] = gsi - GSI_MIN_CPU_IRQ;
     63		fwspec.param_count = 1;
     64
     65		return irq_create_fwspec_mapping(&fwspec);
     66
     67	case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
     68		if (!pch_lpc_domain)
     69			return -EINVAL;
     70
     71		fwspec.fwnode = pch_lpc_domain->fwnode;
     72		fwspec.param[0] = gsi - GSI_MIN_LPC_IRQ;
     73		fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
     74		fwspec.param_count = 2;
     75
     76		return irq_create_fwspec_mapping(&fwspec);
     77
     78	case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
     79		if (!pch_pic_domain[0])
     80			return -EINVAL;
     81
     82		fwspec.fwnode = pch_pic_domain[0]->fwnode;
     83		fwspec.param[0] = gsi - GSI_MIN_PCH_IRQ;
     84		fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
     85		fwspec.param_count = 2;
     86
     87		return irq_create_fwspec_mapping(&fwspec);
     88	}
     89
     90	return -EINVAL;
     91}
     92EXPORT_SYMBOL_GPL(acpi_register_gsi);
     93
     94void acpi_unregister_gsi(u32 gsi)
     95{
     96
     97}
     98EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
     99
    100void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
    101{
    102
    103	if (!phys || !size)
    104		return NULL;
    105
    106	return early_memremap(phys, size);
    107}
    108void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
    109{
    110	if (!map || !size)
    111		return;
    112
    113	early_memunmap(map, size);
    114}
    115
    116void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
    117{
    118	if (!memblock_is_memory(phys))
    119		return ioremap(phys, size);
    120	else
    121		return ioremap_cache(phys, size);
    122}
    123
    124void __init acpi_boot_table_init(void)
    125{
    126	/*
    127	 * If acpi_disabled, bail out
    128	 */
    129	if (acpi_disabled)
    130		return;
    131
    132	/*
    133	 * Initialize the ACPI boot-time table parser.
    134	 */
    135	if (acpi_table_init()) {
    136		disable_acpi();
    137		return;
    138	}
    139}
    140
    141#ifdef CONFIG_SMP
    142static int set_processor_mask(u32 id, u32 flags)
    143{
    144
    145	int cpu, cpuid = id;
    146
    147	if (num_processors >= nr_cpu_ids) {
    148		pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
    149			" processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
    150
    151		return -ENODEV;
    152
    153	}
    154	if (cpuid == loongson_sysconf.boot_cpu_id)
    155		cpu = 0;
    156	else
    157		cpu = cpumask_next_zero(-1, cpu_present_mask);
    158
    159	if (flags & ACPI_MADT_ENABLED) {
    160		num_processors++;
    161		set_cpu_possible(cpu, true);
    162		set_cpu_present(cpu, true);
    163		__cpu_number_map[cpuid] = cpu;
    164		__cpu_logical_map[cpu] = cpuid;
    165	} else
    166		disabled_cpus++;
    167
    168	return cpu;
    169}
    170#endif
    171
    172static void __init acpi_process_madt(void)
    173{
    174#ifdef CONFIG_SMP
    175	int i;
    176
    177	for (i = 0; i < NR_CPUS; i++) {
    178		__cpu_number_map[i] = -1;
    179		__cpu_logical_map[i] = -1;
    180	}
    181#endif
    182
    183	loongson_sysconf.nr_cpus = num_processors;
    184}
    185
    186int __init acpi_boot_init(void)
    187{
    188	/*
    189	 * If acpi_disabled, bail out
    190	 */
    191	if (acpi_disabled)
    192		return -1;
    193
    194	loongson_sysconf.boot_cpu_id = read_csr_cpuid();
    195
    196	/*
    197	 * Process the Multiple APIC Description Table (MADT), if present
    198	 */
    199	acpi_process_madt();
    200
    201	/* Do not enable ACPI SPCR console by default */
    202	acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
    203
    204	return 0;
    205}
    206
    207#ifdef CONFIG_ACPI_NUMA
    208
    209static __init int setup_node(int pxm)
    210{
    211	return acpi_map_pxm_to_node(pxm);
    212}
    213
    214/*
    215 * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
    216 * I/O localities since SRAT does not list them.  I/O localities are
    217 * not supported at this point.
    218 */
    219unsigned int numa_distance_cnt;
    220
    221static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
    222{
    223	return slit->locality_count;
    224}
    225
    226void __init numa_set_distance(int from, int to, int distance)
    227{
    228	if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
    229		pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
    230				from, to, distance);
    231		return;
    232	}
    233
    234	node_distances[from][to] = distance;
    235}
    236
    237/* Callback for Proximity Domain -> CPUID mapping */
    238void __init
    239acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
    240{
    241	int pxm, node;
    242
    243	if (srat_disabled())
    244		return;
    245	if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
    246		bad_srat();
    247		return;
    248	}
    249	if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
    250		return;
    251	pxm = pa->proximity_domain_lo;
    252	if (acpi_srat_revision >= 2) {
    253		pxm |= (pa->proximity_domain_hi[0] << 8);
    254		pxm |= (pa->proximity_domain_hi[1] << 16);
    255		pxm |= (pa->proximity_domain_hi[2] << 24);
    256	}
    257	node = setup_node(pxm);
    258	if (node < 0) {
    259		pr_err("SRAT: Too many proximity domains %x\n", pxm);
    260		bad_srat();
    261		return;
    262	}
    263
    264	if (pa->apic_id >= CONFIG_NR_CPUS) {
    265		pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
    266				pxm, pa->apic_id, node);
    267		return;
    268	}
    269
    270	early_numa_add_cpu(pa->apic_id, node);
    271
    272	set_cpuid_to_node(pa->apic_id, node);
    273	node_set(node, numa_nodes_parsed);
    274	pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
    275}
    276
    277void __init acpi_numa_arch_fixup(void) {}
    278#endif
    279
    280void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
    281{
    282	memblock_reserve(addr, size);
    283}
    284
    285#ifdef CONFIG_ACPI_HOTPLUG_CPU
    286
    287#include <acpi/processor.h>
    288
    289static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
    290{
    291#ifdef CONFIG_ACPI_NUMA
    292	int nid;
    293
    294	nid = acpi_get_node(handle);
    295	if (nid != NUMA_NO_NODE) {
    296		set_cpuid_to_node(physid, nid);
    297		node_set(nid, numa_nodes_parsed);
    298		set_cpu_numa_node(cpu, nid);
    299		cpumask_set_cpu(cpu, cpumask_of_node(nid));
    300	}
    301#endif
    302	return 0;
    303}
    304
    305int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
    306{
    307	int cpu;
    308
    309	cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
    310	if (cpu < 0) {
    311		pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
    312		return cpu;
    313	}
    314
    315	acpi_map_cpu2node(handle, cpu, physid);
    316
    317	*pcpu = cpu;
    318
    319	return 0;
    320}
    321EXPORT_SYMBOL(acpi_map_cpu);
    322
    323int acpi_unmap_cpu(int cpu)
    324{
    325#ifdef CONFIG_ACPI_NUMA
    326	set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
    327#endif
    328	set_cpu_present(cpu, false);
    329	num_processors--;
    330
    331	pr_info("cpu%d hot remove!\n", cpu);
    332
    333	return 0;
    334}
    335EXPORT_SYMBOL(acpi_unmap_cpu);
    336
    337#endif /* CONFIG_ACPI_HOTPLUG_CPU */