cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-mq-cpumap.c (2226B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * CPU <-> hardware queue mapping helpers
      4 *
      5 * Copyright (C) 2013-2014 Jens Axboe
      6 */
      7#include <linux/kernel.h>
      8#include <linux/threads.h>
      9#include <linux/module.h>
     10#include <linux/mm.h>
     11#include <linux/smp.h>
     12#include <linux/cpu.h>
     13
     14#include <linux/blk-mq.h>
     15#include "blk.h"
     16#include "blk-mq.h"
     17
     18static int queue_index(struct blk_mq_queue_map *qmap,
     19		       unsigned int nr_queues, const int q)
     20{
     21	return qmap->queue_offset + (q % nr_queues);
     22}
     23
     24static int get_first_sibling(unsigned int cpu)
     25{
     26	unsigned int ret;
     27
     28	ret = cpumask_first(topology_sibling_cpumask(cpu));
     29	if (ret < nr_cpu_ids)
     30		return ret;
     31
     32	return cpu;
     33}
     34
     35int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
     36{
     37	unsigned int *map = qmap->mq_map;
     38	unsigned int nr_queues = qmap->nr_queues;
     39	unsigned int cpu, first_sibling, q = 0;
     40
     41	for_each_possible_cpu(cpu)
     42		map[cpu] = -1;
     43
     44	/*
     45	 * Spread queues among present CPUs first for minimizing
     46	 * count of dead queues which are mapped by all un-present CPUs
     47	 */
     48	for_each_present_cpu(cpu) {
     49		if (q >= nr_queues)
     50			break;
     51		map[cpu] = queue_index(qmap, nr_queues, q++);
     52	}
     53
     54	for_each_possible_cpu(cpu) {
     55		if (map[cpu] != -1)
     56			continue;
     57		/*
     58		 * First do sequential mapping between CPUs and queues.
     59		 * In case we still have CPUs to map, and we have some number of
     60		 * threads per cores then map sibling threads to the same queue
     61		 * for performance optimizations.
     62		 */
     63		if (q < nr_queues) {
     64			map[cpu] = queue_index(qmap, nr_queues, q++);
     65		} else {
     66			first_sibling = get_first_sibling(cpu);
     67			if (first_sibling == cpu)
     68				map[cpu] = queue_index(qmap, nr_queues, q++);
     69			else
     70				map[cpu] = map[first_sibling];
     71		}
     72	}
     73
     74	return 0;
     75}
     76EXPORT_SYMBOL_GPL(blk_mq_map_queues);
     77
     78/**
     79 * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
     80 * @qmap: CPU to hardware queue map.
     81 * @index: hardware queue index.
     82 *
     83 * We have no quick way of doing reverse lookups. This is only used at
     84 * queue init time, so runtime isn't important.
     85 */
     86int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
     87{
     88	int i;
     89
     90	for_each_possible_cpu(i) {
     91		if (index == qmap->mq_map[i])
     92			return cpu_to_node(i);
     93	}
     94
     95	return NUMA_NO_NODE;
     96}