]> git.ipfire.org Git - thirdparty/linux.git/blame - block/blk-mq-cpumap.c
blk-throttle: remove blk_throtl_drain
[thirdparty/linux.git] / block / blk-mq-cpumap.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
75bb4625
JA
2/*
3 * CPU <-> hardware queue mapping helpers
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 */
320ae51f
JA
7#include <linux/kernel.h>
8#include <linux/threads.h>
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/smp.h>
12#include <linux/cpu.h>
13
14#include <linux/blk-mq.h>
15#include "blk.h"
16#include "blk-mq.h"
17
556f36e9
ML
18static int queue_index(struct blk_mq_queue_map *qmap,
19 unsigned int nr_queues, const int q)
320ae51f 20{
556f36e9 21 return qmap->queue_offset + (q % nr_queues);
320ae51f
JA
22}
23
24static int get_first_sibling(unsigned int cpu)
25{
26 unsigned int ret;
27
06931e62 28 ret = cpumask_first(topology_sibling_cpumask(cpu));
320ae51f
JA
29 if (ret < nr_cpu_ids)
30 return ret;
31
32 return cpu;
33}
34
ed76e329 35int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
320ae51f 36{
ed76e329
JA
37 unsigned int *map = qmap->mq_map;
38 unsigned int nr_queues = qmap->nr_queues;
556f36e9
ML
39 unsigned int cpu, first_sibling, q = 0;
40
41 for_each_possible_cpu(cpu)
42 map[cpu] = -1;
43
44 /*
45 * Spread queues among present CPUs first for minimizing
46 * count of dead queues which are mapped by all un-present CPUs
47 */
48 for_each_present_cpu(cpu) {
49 if (q >= nr_queues)
50 break;
51 map[cpu] = queue_index(qmap, nr_queues, q++);
52 }
320ae51f 53
fe631457 54 for_each_possible_cpu(cpu) {
556f36e9
ML
55 if (map[cpu] != -1)
56 continue;
320ae51f 57 /*
fe631457
MG
58 * First do sequential mapping between CPUs and queues.
59 * In case we still have CPUs to map, and we have some number of
ef025d7e
BVA
60 * threads per cores then map sibling threads to the same queue
61 * for performance optimizations.
320ae51f 62 */
556f36e9
ML
63 if (q < nr_queues) {
64 map[cpu] = queue_index(qmap, nr_queues, q++);
fe631457
MG
65 } else {
66 first_sibling = get_first_sibling(cpu);
67 if (first_sibling == cpu)
556f36e9 68 map[cpu] = queue_index(qmap, nr_queues, q++);
fe631457
MG
69 else
70 map[cpu] = map[first_sibling];
320ae51f 71 }
320ae51f
JA
72 }
73
320ae51f
JA
74 return 0;
75}
9e5a7e22 76EXPORT_SYMBOL_GPL(blk_mq_map_queues);
320ae51f 77
cd669f88
BVA
78/**
79 * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
80 * @qmap: CPU to hardware queue map.
81 * @index: hardware queue index.
82 *
f14bbe77
JA
83 * We have no quick way of doing reverse lookups. This is only used at
84 * queue init time, so runtime isn't important.
85 */
ed76e329 86int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
f14bbe77
JA
87{
88 int i;
89
90 for_each_possible_cpu(i) {
ed76e329 91 if (index == qmap->mq_map[i])
bffed457 92 return local_memory_node(cpu_to_node(i));
f14bbe77
JA
93 }
94
95 return NUMA_NO_NODE;
96}