0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/threads.h>
0009 #include <linux/module.h>
0010 #include <linux/mm.h>
0011 #include <linux/smp.h>
0012 #include <linux/cpu.h>
0013
0014 #include <linux/blk-mq.h>
0015 #include "blk.h"
0016 #include "blk-mq.h"
0017
0018 static int queue_index(struct blk_mq_queue_map *qmap,
0019 unsigned int nr_queues, const int q)
0020 {
0021 return qmap->queue_offset + (q % nr_queues);
0022 }
0023
0024 static int get_first_sibling(unsigned int cpu)
0025 {
0026 unsigned int ret;
0027
0028 ret = cpumask_first(topology_sibling_cpumask(cpu));
0029 if (ret < nr_cpu_ids)
0030 return ret;
0031
0032 return cpu;
0033 }
0034
0035 int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
0036 {
0037 unsigned int *map = qmap->mq_map;
0038 unsigned int nr_queues = qmap->nr_queues;
0039 unsigned int cpu, first_sibling, q = 0;
0040
0041 for_each_possible_cpu(cpu)
0042 map[cpu] = -1;
0043
0044
0045
0046
0047
0048 for_each_present_cpu(cpu) {
0049 if (q >= nr_queues)
0050 break;
0051 map[cpu] = queue_index(qmap, nr_queues, q++);
0052 }
0053
0054 for_each_possible_cpu(cpu) {
0055 if (map[cpu] != -1)
0056 continue;
0057
0058
0059
0060
0061
0062
0063 if (q < nr_queues) {
0064 map[cpu] = queue_index(qmap, nr_queues, q++);
0065 } else {
0066 first_sibling = get_first_sibling(cpu);
0067 if (first_sibling == cpu)
0068 map[cpu] = queue_index(qmap, nr_queues, q++);
0069 else
0070 map[cpu] = map[first_sibling];
0071 }
0072 }
0073
0074 return 0;
0075 }
0076 EXPORT_SYMBOL_GPL(blk_mq_map_queues);
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
0087 {
0088 int i;
0089
0090 for_each_possible_cpu(i) {
0091 if (index == qmap->mq_map[i])
0092 return cpu_to_node(i);
0093 }
0094
0095 return NUMA_NO_NODE;
0096 }