cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

spapr_numa.c (24127B)


      1/*
      2 * QEMU PowerPC pSeries Logical Partition NUMA associativity handling
      3 *
      4 * Copyright IBM Corp. 2020
      5 *
      6 * Authors:
      7 *  Daniel Henrique Barboza      <danielhb413@gmail.com>
      8 *
      9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
     10 * See the COPYING file in the top-level directory.
     11 */
     12
     13#include "qemu/osdep.h"
     14#include "qemu-common.h"
     15#include "hw/ppc/spapr_numa.h"
     16#include "hw/pci-host/spapr.h"
     17#include "hw/ppc/fdt.h"
     18
     19/* Moved from hw/ppc/spapr_pci_nvlink2.c */
     20#define SPAPR_GPU_NUMA_ID           (cpu_to_be32(1))
     21
     22/*
     23 * Retrieves max_dist_ref_points of the current NUMA affinity.
     24 */
     25static int get_max_dist_ref_points(SpaprMachineState *spapr)
     26{
     27    if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) {
     28        return FORM2_DIST_REF_POINTS;
     29    }
     30
     31    return FORM1_DIST_REF_POINTS;
     32}
     33
     34/*
     35 * Retrieves numa_assoc_size of the current NUMA affinity.
     36 */
     37static int get_numa_assoc_size(SpaprMachineState *spapr)
     38{
     39    if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) {
     40        return FORM2_NUMA_ASSOC_SIZE;
     41    }
     42
     43    return FORM1_NUMA_ASSOC_SIZE;
     44}
     45
     46/*
     47 * Retrieves vcpu_assoc_size of the current NUMA affinity.
     48 *
     49 * vcpu_assoc_size is the size of ibm,associativity array
     50 * for CPUs, which has an extra element (vcpu_id) in the end.
     51 */
     52static int get_vcpu_assoc_size(SpaprMachineState *spapr)
     53{
     54    return get_numa_assoc_size(spapr) + 1;
     55}
     56
     57/*
     58 * Retrieves the ibm,associativity array of NUMA node 'node_id'
     59 * for the current NUMA affinity.
     60 */
     61static const uint32_t *get_associativity(SpaprMachineState *spapr, int node_id)
     62{
     63    if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) {
     64        return spapr->FORM2_assoc_array[node_id];
     65    }
     66    return spapr->FORM1_assoc_array[node_id];
     67}
     68
     69static bool spapr_numa_is_symmetrical(MachineState *ms)
     70{
     71    int src, dst;
     72    int nb_numa_nodes = ms->numa_state->num_nodes;
     73    NodeInfo *numa_info = ms->numa_state->nodes;
     74
     75    for (src = 0; src < nb_numa_nodes; src++) {
     76        for (dst = src; dst < nb_numa_nodes; dst++) {
     77            if (numa_info[src].distance[dst] !=
     78                numa_info[dst].distance[src]) {
     79                return false;
     80            }
     81        }
     82    }
     83
     84    return true;
     85}
     86
     87/*
     88 * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
     89 * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
     90 * called from vPHB reset handler so we initialize the counter here.
     91 * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
     92 * must be equally distant from any other node.
     93 * The final value of spapr->gpu_numa_id is going to be written to
     94 * max-associativity-domains in spapr_build_fdt().
     95 */
     96unsigned int spapr_numa_initial_nvgpu_numa_id(MachineState *machine)
     97{
     98    return MAX(1, machine->numa_state->num_nodes);
     99}
    100
    101/*
    102 * This function will translate the user distances into
    103 * what the kernel understand as possible values: 10
    104 * (local distance), 20, 40, 80 and 160, and return the equivalent
    105 * NUMA level for each. Current heuristic is:
    106 *  - local distance (10) returns numa_level = 0x4, meaning there is
    107 *    no rounding for local distance
    108 *  - distances between 11 and 30 inclusive -> rounded to 20,
    109 *    numa_level = 0x3
    110 *  - distances between 31 and 60 inclusive -> rounded to 40,
    111 *    numa_level = 0x2
    112 *  - distances between 61 and 120 inclusive -> rounded to 80,
    113 *    numa_level = 0x1
    114 *  - everything above 120 returns numa_level = 0 to indicate that
    115 *    there is no match. This will be calculated as disntace = 160
    116 *    by the kernel (as of v5.9)
    117 */
    118static uint8_t spapr_numa_get_numa_level(uint8_t distance)
    119{
    120    if (distance == 10) {
    121        return 0x4;
    122    } else if (distance > 11 && distance <= 30) {
    123        return 0x3;
    124    } else if (distance > 31 && distance <= 60) {
    125        return 0x2;
    126    } else if (distance > 61 && distance <= 120) {
    127        return 0x1;
    128    }
    129
    130    return 0;
    131}
    132
    133static void spapr_numa_define_FORM1_domains(SpaprMachineState *spapr)
    134{
    135    MachineState *ms = MACHINE(spapr);
    136    NodeInfo *numa_info = ms->numa_state->nodes;
    137    int nb_numa_nodes = ms->numa_state->num_nodes;
    138    int src, dst, i, j;
    139
    140    /*
    141     * Fill all associativity domains of non-zero NUMA nodes with
    142     * node_id. This is required because the default value (0) is
    143     * considered a match with associativity domains of node 0.
    144     */
    145    for (i = 1; i < nb_numa_nodes; i++) {
    146        for (j = 1; j < FORM1_DIST_REF_POINTS; j++) {
    147            spapr->FORM1_assoc_array[i][j] = cpu_to_be32(i);
    148        }
    149    }
    150
    151    for (src = 0; src < nb_numa_nodes; src++) {
    152        for (dst = src; dst < nb_numa_nodes; dst++) {
    153            /*
    154             * This is how the associativity domain between A and B
    155             * is calculated:
    156             *
    157             * - get the distance D between them
    158             * - get the correspondent NUMA level 'n_level' for D
    159             * - all associativity arrays were initialized with their own
    160             * numa_ids, and we're calculating the distance in node_id
    161             * ascending order, starting from node id 0 (the first node
    162             * retrieved by numa_state). This will have a cascade effect in
    163             * the algorithm because the associativity domains that node 0
    164             * defines will be carried over to other nodes, and node 1
    165             * associativities will be carried over after taking node 0
    166             * associativities into account, and so on. This happens because
    167             * we'll assign assoc_src as the associativity domain of dst
    168             * as well, for all NUMA levels beyond and including n_level.
    169             *
    170             * The PPC kernel expects the associativity domains of node 0 to
    171             * be always 0, and this algorithm will grant that by default.
    172             */
    173            uint8_t distance = numa_info[src].distance[dst];
    174            uint8_t n_level = spapr_numa_get_numa_level(distance);
    175            uint32_t assoc_src;
    176
    177            /*
    178             * n_level = 0 means that the distance is greater than our last
    179             * rounded value (120). In this case there is no NUMA level match
    180             * between src and dst and we can skip the remaining of the loop.
    181             *
    182             * The Linux kernel will assume that the distance between src and
    183             * dst, in this case of no match, is 10 (local distance) doubled
    184             * for each NUMA it didn't match. We have FORM1_DIST_REF_POINTS
    185             * levels (4), so this gives us 10*2*2*2*2 = 160.
    186             *
    187             * This logic can be seen in the Linux kernel source code, as of
    188             * v5.9, in arch/powerpc/mm/numa.c, function __node_distance().
    189             */
    190            if (n_level == 0) {
    191                continue;
    192            }
    193
    194            /*
    195             * We must assign all assoc_src to dst, starting from n_level
    196             * and going up to 0x1.
    197             */
    198            for (i = n_level; i > 0; i--) {
    199                assoc_src = spapr->FORM1_assoc_array[src][i];
    200                spapr->FORM1_assoc_array[dst][i] = assoc_src;
    201            }
    202        }
    203    }
    204
    205}
    206
    207static void spapr_numa_FORM1_affinity_check(MachineState *machine)
    208{
    209    int i;
    210
    211    /*
    212     * Check we don't have a memory-less/cpu-less NUMA node
    213     * Firmware relies on the existing memory/cpu topology to provide the
    214     * NUMA topology to the kernel.
    215     * And the linux kernel needs to know the NUMA topology at start
    216     * to be able to hotplug CPUs later.
    217     */
    218    if (machine->numa_state->num_nodes) {
    219        for (i = 0; i < machine->numa_state->num_nodes; ++i) {
    220            /* check for memory-less node */
    221            if (machine->numa_state->nodes[i].node_mem == 0) {
    222                CPUState *cs;
    223                int found = 0;
    224                /* check for cpu-less node */
    225                CPU_FOREACH(cs) {
    226                    PowerPCCPU *cpu = POWERPC_CPU(cs);
    227                    if (cpu->node_id == i) {
    228                        found = 1;
    229                        break;
    230                    }
    231                }
    232                /* memory-less and cpu-less node */
    233                if (!found) {
    234                    error_report(
    235"Memory-less/cpu-less nodes are not supported with FORM1 NUMA (node %d)", i);
    236                    exit(EXIT_FAILURE);
    237                }
    238            }
    239        }
    240    }
    241
    242    if (!spapr_numa_is_symmetrical(machine)) {
    243        error_report(
    244"Asymmetrical NUMA topologies aren't supported in the pSeries machine using FORM1 NUMA");
    245        exit(EXIT_FAILURE);
    246    }
    247}
    248
    249/*
    250 * Set NUMA machine state data based on FORM1 affinity semantics.
    251 */
    252static void spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr,
    253                                           MachineState *machine)
    254{
    255    SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
    256    int nb_numa_nodes = machine->numa_state->num_nodes;
    257    int i, j, max_nodes_with_gpus;
    258
    259    /*
    260     * For all associativity arrays: first position is the size,
    261     * position FORM1_DIST_REF_POINTS is always the numa_id,
    262     * represented by the index 'i'.
    263     *
    264     * This will break on sparse NUMA setups, when/if QEMU starts
    265     * to support it, because there will be no more guarantee that
    266     * 'i' will be a valid node_id set by the user.
    267     */
    268    for (i = 0; i < nb_numa_nodes; i++) {
    269        spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS);
    270        spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i);
    271    }
    272
    273    /*
    274     * Initialize NVLink GPU associativity arrays. We know that
    275     * the first GPU will take the first available NUMA id, and
    276     * we'll have a maximum of NVGPU_MAX_NUM GPUs in the machine.
    277     * At this point we're not sure if there are GPUs or not, but
    278     * let's initialize the associativity arrays and allow NVLink
    279     * GPUs to be handled like regular NUMA nodes later on.
    280     */
    281    max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM;
    282
    283    for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) {
    284        spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS);
    285
    286        for (j = 1; j < FORM1_DIST_REF_POINTS; j++) {
    287            uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ?
    288                                 SPAPR_GPU_NUMA_ID : cpu_to_be32(i);
    289            spapr->FORM1_assoc_array[i][j] = gpu_assoc;
    290        }
    291
    292        spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i);
    293    }
    294
    295    /*
    296     * Guests pseries-5.1 and older uses zeroed associativity domains,
    297     * i.e. no domain definition based on NUMA distance input.
    298     *
    299     * Same thing with guests that have only one NUMA node.
    300     */
    301    if (smc->pre_5_2_numa_associativity ||
    302        machine->numa_state->num_nodes <= 1) {
    303        return;
    304    }
    305
    306    spapr_numa_define_FORM1_domains(spapr);
    307}
    308
    309/*
    310 * Init NUMA FORM2 machine state data
    311 */
    312static void spapr_numa_FORM2_affinity_init(SpaprMachineState *spapr)
    313{
    314    int i;
    315
    316    /*
    317     * For all resources but CPUs, FORM2 associativity arrays will
    318     * be a size 2 array with the following format:
    319     *
    320     * ibm,associativity = {1, numa_id}
    321     *
    322     * CPUs will write an additional 'vcpu_id' on top of the arrays
    323     * being initialized here. 'numa_id' is represented by the
    324     * index 'i' of the loop.
    325     *
    326     * Given that this initialization is also valid for GPU associativity
    327     * arrays, handle everything in one single step by populating the
    328     * arrays up to NUMA_NODES_MAX_NUM.
    329     */
    330    for (i = 0; i < NUMA_NODES_MAX_NUM; i++) {
    331        spapr->FORM2_assoc_array[i][0] = cpu_to_be32(1);
    332        spapr->FORM2_assoc_array[i][1] = cpu_to_be32(i);
    333    }
    334}
    335
    336void spapr_numa_associativity_init(SpaprMachineState *spapr,
    337                                   MachineState *machine)
    338{
    339    spapr_numa_FORM1_affinity_init(spapr, machine);
    340    spapr_numa_FORM2_affinity_init(spapr);
    341}
    342
    343void spapr_numa_associativity_check(SpaprMachineState *spapr)
    344{
    345    /*
    346     * FORM2 does not have any restrictions we need to handle
    347     * at CAS time, for now.
    348     */
    349    if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) {
    350        return;
    351    }
    352
    353    spapr_numa_FORM1_affinity_check(MACHINE(spapr));
    354}
    355
    356void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
    357                                       int offset, int nodeid)
    358{
    359    const uint32_t *associativity = get_associativity(spapr, nodeid);
    360
    361    _FDT((fdt_setprop(fdt, offset, "ibm,associativity",
    362                      associativity,
    363                      get_numa_assoc_size(spapr) * sizeof(uint32_t))));
    364}
    365
    366static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
    367                                           PowerPCCPU *cpu)
    368{
    369    const uint32_t *associativity = get_associativity(spapr, cpu->node_id);
    370    int max_distance_ref_points = get_max_dist_ref_points(spapr);
    371    int vcpu_assoc_size = get_vcpu_assoc_size(spapr);
    372    uint32_t *vcpu_assoc = g_new(uint32_t, vcpu_assoc_size);
    373    int index = spapr_get_vcpu_id(cpu);
    374
    375    /*
    376     * VCPUs have an extra 'cpu_id' value in ibm,associativity
    377     * compared to other resources. Increment the size at index
    378     * 0, put cpu_id last, then copy the remaining associativity
    379     * domains.
    380     */
    381    vcpu_assoc[0] = cpu_to_be32(max_distance_ref_points + 1);
    382    vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
    383    memcpy(vcpu_assoc + 1, associativity + 1,
    384           (vcpu_assoc_size - 2) * sizeof(uint32_t));
    385
    386    return vcpu_assoc;
    387}
    388
    389int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
    390                            int offset, PowerPCCPU *cpu)
    391{
    392    g_autofree uint32_t *vcpu_assoc = NULL;
    393    int vcpu_assoc_size = get_vcpu_assoc_size(spapr);
    394
    395    vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu);
    396
    397    /* Advertise NUMA via ibm,associativity */
    398    return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc,
    399                       vcpu_assoc_size * sizeof(uint32_t));
    400}
    401
    402
    403int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
    404                                         int offset)
    405{
    406    MachineState *machine = MACHINE(spapr);
    407    int max_distance_ref_points = get_max_dist_ref_points(spapr);
    408    int nb_numa_nodes = machine->numa_state->num_nodes;
    409    int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
    410    uint32_t *int_buf, *cur_index, buf_len;
    411    int ret, i;
    412
    413    /* ibm,associativity-lookup-arrays */
    414    buf_len = (nr_nodes * max_distance_ref_points + 2) * sizeof(uint32_t);
    415    cur_index = int_buf = g_malloc0(buf_len);
    416    int_buf[0] = cpu_to_be32(nr_nodes);
    417     /* Number of entries per associativity list */
    418    int_buf[1] = cpu_to_be32(max_distance_ref_points);
    419    cur_index += 2;
    420    for (i = 0; i < nr_nodes; i++) {
    421        /*
    422         * For the lookup-array we use the ibm,associativity array of the
    423         * current NUMA affinity, without the first element (size).
    424         */
    425        const uint32_t *associativity = get_associativity(spapr, i);
    426        memcpy(cur_index, ++associativity,
    427               sizeof(uint32_t) * max_distance_ref_points);
    428        cur_index += max_distance_ref_points;
    429    }
    430    ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
    431                      (cur_index - int_buf) * sizeof(uint32_t));
    432    g_free(int_buf);
    433
    434    return ret;
    435}
    436
    437static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr,
    438                                           void *fdt, int rtas)
    439{
    440    MachineState *ms = MACHINE(spapr);
    441    SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
    442    uint32_t number_nvgpus_nodes = spapr->gpu_numa_id -
    443                                   spapr_numa_initial_nvgpu_numa_id(ms);
    444    uint32_t refpoints[] = {
    445        cpu_to_be32(0x4),
    446        cpu_to_be32(0x3),
    447        cpu_to_be32(0x2),
    448        cpu_to_be32(0x1),
    449    };
    450    uint32_t nr_refpoints = ARRAY_SIZE(refpoints);
    451    uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes;
    452    uint32_t maxdomains[] = {
    453        cpu_to_be32(4),
    454        cpu_to_be32(maxdomain),
    455        cpu_to_be32(maxdomain),
    456        cpu_to_be32(maxdomain),
    457        cpu_to_be32(maxdomain)
    458    };
    459
    460    if (smc->pre_5_2_numa_associativity ||
    461        ms->numa_state->num_nodes <= 1) {
    462        uint32_t legacy_refpoints[] = {
    463            cpu_to_be32(0x4),
    464            cpu_to_be32(0x4),
    465            cpu_to_be32(0x2),
    466        };
    467        uint32_t legacy_maxdomain = spapr->gpu_numa_id > 1 ? 1 : 0;
    468        uint32_t legacy_maxdomains[] = {
    469            cpu_to_be32(4),
    470            cpu_to_be32(legacy_maxdomain),
    471            cpu_to_be32(legacy_maxdomain),
    472            cpu_to_be32(legacy_maxdomain),
    473            cpu_to_be32(spapr->gpu_numa_id),
    474        };
    475
    476        G_STATIC_ASSERT(sizeof(legacy_refpoints) <= sizeof(refpoints));
    477        G_STATIC_ASSERT(sizeof(legacy_maxdomains) <= sizeof(maxdomains));
    478
    479        nr_refpoints = 3;
    480
    481        memcpy(refpoints, legacy_refpoints, sizeof(legacy_refpoints));
    482        memcpy(maxdomains, legacy_maxdomains, sizeof(legacy_maxdomains));
    483
    484        /* pseries-5.0 and older reference-points array is {0x4, 0x4} */
    485        if (smc->pre_5_1_assoc_refpoints) {
    486            nr_refpoints = 2;
    487        }
    488    }
    489
    490    _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
    491                     refpoints, nr_refpoints * sizeof(refpoints[0])));
    492
    493    _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains",
    494                     maxdomains, sizeof(maxdomains)));
    495}
    496
    497static void spapr_numa_FORM2_write_rtas_tables(SpaprMachineState *spapr,
    498                                               void *fdt, int rtas)
    499{
    500    MachineState *ms = MACHINE(spapr);
    501    NodeInfo *numa_info = ms->numa_state->nodes;
    502    int nb_numa_nodes = ms->numa_state->num_nodes;
    503    int distance_table_entries = nb_numa_nodes * nb_numa_nodes;
    504    g_autofree uint32_t *lookup_index_table = NULL;
    505    g_autofree uint8_t *distance_table = NULL;
    506    int src, dst, i, distance_table_size;
    507
    508    /*
    509     * ibm,numa-lookup-index-table: array with length and a
    510     * list of NUMA ids present in the guest.
    511     */
    512    lookup_index_table = g_new0(uint32_t, nb_numa_nodes + 1);
    513    lookup_index_table[0] = cpu_to_be32(nb_numa_nodes);
    514
    515    for (i = 0; i < nb_numa_nodes; i++) {
    516        lookup_index_table[i + 1] = cpu_to_be32(i);
    517    }
    518
    519    _FDT(fdt_setprop(fdt, rtas, "ibm,numa-lookup-index-table",
    520                     lookup_index_table,
    521                     (nb_numa_nodes + 1) * sizeof(uint32_t)));
    522
    523    /*
    524     * ibm,numa-distance-table: contains all node distances. First
    525     * element is the size of the table as uint32, followed up
    526     * by all the uint8 distances from the first NUMA node, then all
    527     * distances from the second NUMA node and so on.
    528     *
    529     * ibm,numa-lookup-index-table is used by guest to navigate this
    530     * array because NUMA ids can be sparse (node 0 is the first,
    531     * node 8 is the second ...).
    532     */
    533    distance_table_size = distance_table_entries * sizeof(uint8_t) +
    534                          sizeof(uint32_t);
    535    distance_table = g_new0(uint8_t, distance_table_size);
    536    stl_be_p(distance_table, distance_table_entries);
    537
    538    /* Skip the uint32_t array length at the start */
    539    i = sizeof(uint32_t);
    540
    541    for (src = 0; src < nb_numa_nodes; src++) {
    542        for (dst = 0; dst < nb_numa_nodes; dst++) {
    543            /*
    544             * We need to be explicit with the local distance
    545             * value to cover the case where the user didn't added any
    546             * NUMA nodes, but QEMU adds the default NUMA node without
    547             * adding the numa_info to retrieve distance info from.
    548             */
    549            if (src == dst) {
    550                distance_table[i++] = NUMA_DISTANCE_MIN;
    551                continue;
    552            }
    553
    554            distance_table[i++] = numa_info[src].distance[dst];
    555        }
    556    }
    557
    558    _FDT(fdt_setprop(fdt, rtas, "ibm,numa-distance-table",
    559                     distance_table, distance_table_size));
    560}
    561
    562/*
    563 * This helper could be compressed in a single function with
    564 * FORM1 logic since we're setting the same DT values, with the
    565 * difference being a call to spapr_numa_FORM2_write_rtas_tables()
    566 * in the end. The separation was made to avoid clogging FORM1 code
    567 * which already has to deal with compat modes from previous
    568 * QEMU machine types.
    569 */
    570static void spapr_numa_FORM2_write_rtas_dt(SpaprMachineState *spapr,
    571                                           void *fdt, int rtas)
    572{
    573    MachineState *ms = MACHINE(spapr);
    574    uint32_t number_nvgpus_nodes = spapr->gpu_numa_id -
    575                                   spapr_numa_initial_nvgpu_numa_id(ms);
    576
    577    /*
    578     * In FORM2, ibm,associativity-reference-points will point to
    579     * the element in the ibm,associativity array that contains the
    580     * primary domain index (for FORM2, the first element).
    581     *
    582     * This value (in our case, the numa-id) is then used as an index
    583     * to retrieve all other attributes of the node (distance,
    584     * bandwidth, latency) via ibm,numa-lookup-index-table and other
    585     * ibm,numa-*-table properties.
    586     */
    587    uint32_t refpoints[] = { cpu_to_be32(1) };
    588
    589    uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes;
    590    uint32_t maxdomains[] = { cpu_to_be32(1), cpu_to_be32(maxdomain) };
    591
    592    _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
    593                     refpoints, sizeof(refpoints)));
    594
    595    _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains",
    596                     maxdomains, sizeof(maxdomains)));
    597
    598    spapr_numa_FORM2_write_rtas_tables(spapr, fdt, rtas);
    599}
    600
    601/*
    602 * Helper that writes ibm,associativity-reference-points and
    603 * max-associativity-domains in the RTAS pointed by @rtas
    604 * in the DT @fdt.
    605 */
    606void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas)
    607{
    608    if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) {
    609        spapr_numa_FORM2_write_rtas_dt(spapr, fdt, rtas);
    610        return;
    611    }
    612
    613    spapr_numa_FORM1_write_rtas_dt(spapr, fdt, rtas);
    614}
    615
    616static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
    617                                              SpaprMachineState *spapr,
    618                                              target_ulong opcode,
    619                                              target_ulong *args)
    620{
    621    g_autofree uint32_t *vcpu_assoc = NULL;
    622    target_ulong flags = args[0];
    623    target_ulong procno = args[1];
    624    PowerPCCPU *tcpu;
    625    int idx, assoc_idx;
    626    int vcpu_assoc_size = get_vcpu_assoc_size(spapr);
    627
    628    /* only support procno from H_REGISTER_VPA */
    629    if (flags != 0x1) {
    630        return H_FUNCTION;
    631    }
    632
    633    tcpu = spapr_find_cpu(procno);
    634    if (tcpu == NULL) {
    635        return H_P2;
    636    }
    637
    638    /*
    639     * Given that we want to be flexible with the sizes and indexes,
    640     * we must consider that there is a hard limit of how many
    641     * associativities domain we can fit in R4 up to R9, which would be
    642     * 12 associativity domains for vcpus. Assert and bail if that's
    643     * not the case.
    644     */
    645    g_assert((vcpu_assoc_size - 1) <= 12);
    646
    647    vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, tcpu);
    648    /* assoc_idx starts at 1 to skip associativity size */
    649    assoc_idx = 1;
    650
    651#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
    652                             ((uint64_t)(b) & 0xffffffff))
    653
    654    for (idx = 0; idx < 6; idx++) {
    655        int32_t a, b;
    656
    657        /*
    658         * vcpu_assoc[] will contain the associativity domains for tcpu,
    659         * including tcpu->node_id and procno, meaning that we don't
    660         * need to use these variables here.
    661         *
    662         * We'll read 2 values at a time to fill up the ASSOCIATIVITY()
    663         * macro. The ternary will fill the remaining registers with -1
    664         * after we went through vcpu_assoc[].
    665         */
    666        a = assoc_idx < vcpu_assoc_size ?
    667            be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1;
    668        b = assoc_idx < vcpu_assoc_size ?
    669            be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1;
    670
    671        args[idx] = ASSOCIATIVITY(a, b);
    672    }
    673#undef ASSOCIATIVITY
    674
    675    return H_SUCCESS;
    676}
    677
    678static void spapr_numa_register_types(void)
    679{
    680    /* Virtual Processor Home Node */
    681    spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
    682                             h_home_node_associativity);
    683}
    684
    685type_init(spapr_numa_register_types)