cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

x2apic_cluster.c (6234B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3#include <linux/cpuhotplug.h>
      4#include <linux/cpumask.h>
      5#include <linux/slab.h>
      6#include <linux/mm.h>
      7
      8#include <asm/apic.h>
      9
     10#include "local.h"
     11
     12struct cluster_mask {
     13	unsigned int	clusterid;
     14	int		node;
     15	struct cpumask	mask;
     16};
     17
     18/*
     19 * __x2apic_send_IPI_mask() possibly needs to read
     20 * x86_cpu_to_logical_apicid for all online cpus in a sequential way.
     21 * Using per cpu variable would cost one cache line per cpu.
     22 */
     23static u32 *x86_cpu_to_logical_apicid __read_mostly;
     24
     25static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
     26static DEFINE_PER_CPU_READ_MOSTLY(struct cluster_mask *, cluster_masks);
     27static struct cluster_mask *cluster_hotplug_mask;
     28
     29static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
     30{
     31	return x2apic_enabled();
     32}
     33
     34static void x2apic_send_IPI(int cpu, int vector)
     35{
     36	u32 dest = x86_cpu_to_logical_apicid[cpu];
     37
     38	/* x2apic MSRs are special and need a special fence: */
     39	weak_wrmsr_fence();
     40	__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
     41}
     42
     43static void
     44__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
     45{
     46	unsigned int cpu, clustercpu;
     47	struct cpumask *tmpmsk;
     48	unsigned long flags;
     49	u32 dest;
     50
     51	/* x2apic MSRs are special and need a special fence: */
     52	weak_wrmsr_fence();
     53	local_irq_save(flags);
     54
     55	tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
     56	cpumask_copy(tmpmsk, mask);
     57	/* If IPI should not be sent to self, clear current CPU */
     58	if (apic_dest != APIC_DEST_ALLINC)
     59		__cpumask_clear_cpu(smp_processor_id(), tmpmsk);
     60
     61	/* Collapse cpus in a cluster so a single IPI per cluster is sent */
     62	for_each_cpu(cpu, tmpmsk) {
     63		struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
     64
     65		dest = 0;
     66		for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
     67			dest |= x86_cpu_to_logical_apicid[clustercpu];
     68
     69		if (!dest)
     70			continue;
     71
     72		__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
     73		/* Remove cluster CPUs from tmpmask */
     74		cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
     75	}
     76
     77	local_irq_restore(flags);
     78}
     79
     80static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
     81{
     82	__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
     83}
     84
     85static void
     86x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
     87{
     88	__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
     89}
     90
     91static void x2apic_send_IPI_allbutself(int vector)
     92{
     93	__x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT);
     94}
     95
     96static void x2apic_send_IPI_all(int vector)
     97{
     98	__x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC);
     99}
    100
    101static u32 x2apic_calc_apicid(unsigned int cpu)
    102{
    103	return x86_cpu_to_logical_apicid[cpu];
    104}
    105
    106static void init_x2apic_ldr(void)
    107{
    108	struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
    109	u32 cluster, apicid = apic_read(APIC_LDR);
    110	unsigned int cpu;
    111
    112	x86_cpu_to_logical_apicid[smp_processor_id()] = apicid;
    113
    114	if (cmsk)
    115		goto update;
    116
    117	cluster = apicid >> 16;
    118	for_each_online_cpu(cpu) {
    119		cmsk = per_cpu(cluster_masks, cpu);
    120		/* Matching cluster found. Link and update it. */
    121		if (cmsk && cmsk->clusterid == cluster)
    122			goto update;
    123	}
    124	cmsk = cluster_hotplug_mask;
    125	cmsk->clusterid = cluster;
    126	cluster_hotplug_mask = NULL;
    127update:
    128	this_cpu_write(cluster_masks, cmsk);
    129	cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
    130}
    131
    132static int alloc_clustermask(unsigned int cpu, int node)
    133{
    134	if (per_cpu(cluster_masks, cpu))
    135		return 0;
    136	/*
    137	 * If a hotplug spare mask exists, check whether it's on the right
    138	 * node. If not, free it and allocate a new one.
    139	 */
    140	if (cluster_hotplug_mask) {
    141		if (cluster_hotplug_mask->node == node)
    142			return 0;
    143		kfree(cluster_hotplug_mask);
    144	}
    145
    146	cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
    147					    GFP_KERNEL, node);
    148	if (!cluster_hotplug_mask)
    149		return -ENOMEM;
    150	cluster_hotplug_mask->node = node;
    151	return 0;
    152}
    153
    154static int x2apic_prepare_cpu(unsigned int cpu)
    155{
    156	if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
    157		return -ENOMEM;
    158	if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
    159		return -ENOMEM;
    160	return 0;
    161}
    162
    163static int x2apic_dead_cpu(unsigned int dead_cpu)
    164{
    165	struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
    166
    167	if (cmsk)
    168		cpumask_clear_cpu(dead_cpu, &cmsk->mask);
    169	free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
    170	return 0;
    171}
    172
    173static int x2apic_cluster_probe(void)
    174{
    175	u32 slots;
    176
    177	if (!x2apic_mode)
    178		return 0;
    179
    180	slots = max_t(u32, L1_CACHE_BYTES/sizeof(u32), nr_cpu_ids);
    181	x86_cpu_to_logical_apicid = kcalloc(slots, sizeof(u32), GFP_KERNEL);
    182	if (!x86_cpu_to_logical_apicid)
    183		return 0;
    184
    185	if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
    186			      x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
    187		pr_err("Failed to register X2APIC_PREPARE\n");
    188		kfree(x86_cpu_to_logical_apicid);
    189		x86_cpu_to_logical_apicid = NULL;
    190		return 0;
    191	}
    192	init_x2apic_ldr();
    193	return 1;
    194}
    195
    196static struct apic apic_x2apic_cluster __ro_after_init = {
    197
    198	.name				= "cluster x2apic",
    199	.probe				= x2apic_cluster_probe,
    200	.acpi_madt_oem_check		= x2apic_acpi_madt_oem_check,
    201	.apic_id_valid			= x2apic_apic_id_valid,
    202	.apic_id_registered		= x2apic_apic_id_registered,
    203
    204	.delivery_mode			= APIC_DELIVERY_MODE_FIXED,
    205	.dest_mode_logical		= true,
    206
    207	.disable_esr			= 0,
    208
    209	.check_apicid_used		= NULL,
    210	.init_apic_ldr			= init_x2apic_ldr,
    211	.ioapic_phys_id_map		= NULL,
    212	.setup_apic_routing		= NULL,
    213	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
    214	.apicid_to_cpu_present		= NULL,
    215	.check_phys_apicid_present	= default_check_phys_apicid_present,
    216	.phys_pkg_id			= x2apic_phys_pkg_id,
    217
    218	.get_apic_id			= x2apic_get_apic_id,
    219	.set_apic_id			= x2apic_set_apic_id,
    220
    221	.calc_dest_apicid		= x2apic_calc_apicid,
    222
    223	.send_IPI			= x2apic_send_IPI,
    224	.send_IPI_mask			= x2apic_send_IPI_mask,
    225	.send_IPI_mask_allbutself	= x2apic_send_IPI_mask_allbutself,
    226	.send_IPI_allbutself		= x2apic_send_IPI_allbutself,
    227	.send_IPI_all			= x2apic_send_IPI_all,
    228	.send_IPI_self			= x2apic_send_IPI_self,
    229
    230	.inquire_remote_apic		= NULL,
    231
    232	.read				= native_apic_msr_read,
    233	.write				= native_apic_msr_write,
    234	.eoi_write			= native_apic_msr_eoi_write,
    235	.icr_read			= native_x2apic_icr_read,
    236	.icr_write			= native_x2apic_icr_write,
    237	.wait_icr_idle			= native_x2apic_wait_icr_idle,
    238	.safe_wait_icr_idle		= native_safe_x2apic_wait_icr_idle,
    239};
    240
    241apic_driver(apic_x2apic_cluster);