Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2020 Oracle Corporation
0004  *
0005  * Module Author: Mike Christie
0006  */
0007 #include "dm-path-selector.h"
0008 
0009 #include <linux/device-mapper.h>
0010 #include <linux/module.h>
0011 
0012 #define DM_MSG_PREFIX "multipath io-affinity"
0013 
0014 struct path_info {
0015     struct dm_path *path;
0016     cpumask_var_t cpumask;
0017     refcount_t refcount;
0018     bool failed;
0019 };
0020 
0021 struct selector {
0022     struct path_info **path_map;
0023     cpumask_var_t path_mask;
0024     atomic_t map_misses;
0025 };
0026 
0027 static void ioa_free_path(struct selector *s, unsigned int cpu)
0028 {
0029     struct path_info *pi = s->path_map[cpu];
0030 
0031     if (!pi)
0032         return;
0033 
0034     if (refcount_dec_and_test(&pi->refcount)) {
0035         cpumask_clear_cpu(cpu, s->path_mask);
0036         free_cpumask_var(pi->cpumask);
0037         kfree(pi);
0038 
0039         s->path_map[cpu] = NULL;
0040     }
0041 }
0042 
0043 static int ioa_add_path(struct path_selector *ps, struct dm_path *path,
0044             int argc, char **argv, char **error)
0045 {
0046     struct selector *s = ps->context;
0047     struct path_info *pi = NULL;
0048     unsigned int cpu;
0049     int ret;
0050 
0051     if (argc != 1) {
0052         *error = "io-affinity ps: invalid number of arguments";
0053         return -EINVAL;
0054     }
0055 
0056     pi = kzalloc(sizeof(*pi), GFP_KERNEL);
0057     if (!pi) {
0058         *error = "io-affinity ps: Error allocating path context";
0059         return -ENOMEM;
0060     }
0061 
0062     pi->path = path;
0063     path->pscontext = pi;
0064     refcount_set(&pi->refcount, 1);
0065 
0066     if (!zalloc_cpumask_var(&pi->cpumask, GFP_KERNEL)) {
0067         *error = "io-affinity ps: Error allocating cpumask context";
0068         ret = -ENOMEM;
0069         goto free_pi;
0070     }
0071 
0072     ret = cpumask_parse(argv[0], pi->cpumask);
0073     if (ret) {
0074         *error = "io-affinity ps: invalid cpumask";
0075         ret = -EINVAL;
0076         goto free_mask;
0077     }
0078 
0079     for_each_cpu(cpu, pi->cpumask) {
0080         if (cpu >= nr_cpu_ids) {
0081             DMWARN_LIMIT("Ignoring mapping for CPU %u. Max CPU is %u",
0082                      cpu, nr_cpu_ids);
0083             break;
0084         }
0085 
0086         if (s->path_map[cpu]) {
0087             DMWARN("CPU mapping for %u exists. Ignoring.", cpu);
0088             continue;
0089         }
0090 
0091         cpumask_set_cpu(cpu, s->path_mask);
0092         s->path_map[cpu] = pi;
0093         refcount_inc(&pi->refcount);
0094     }
0095 
0096     if (refcount_dec_and_test(&pi->refcount)) {
0097         *error = "io-affinity ps: No new/valid CPU mapping found";
0098         ret = -EINVAL;
0099         goto free_mask;
0100     }
0101 
0102     return 0;
0103 
0104 free_mask:
0105     free_cpumask_var(pi->cpumask);
0106 free_pi:
0107     kfree(pi);
0108     return ret;
0109 }
0110 
0111 static int ioa_create(struct path_selector *ps, unsigned argc, char **argv)
0112 {
0113     struct selector *s;
0114 
0115     s = kmalloc(sizeof(*s), GFP_KERNEL);
0116     if (!s)
0117         return -ENOMEM;
0118 
0119     s->path_map = kzalloc(nr_cpu_ids * sizeof(struct path_info *),
0120                   GFP_KERNEL);
0121     if (!s->path_map)
0122         goto free_selector;
0123 
0124     if (!zalloc_cpumask_var(&s->path_mask, GFP_KERNEL))
0125         goto free_map;
0126 
0127     atomic_set(&s->map_misses, 0);
0128     ps->context = s;
0129     return 0;
0130 
0131 free_map:
0132     kfree(s->path_map);
0133 free_selector:
0134     kfree(s);
0135     return -ENOMEM;
0136 }
0137 
0138 static void ioa_destroy(struct path_selector *ps)
0139 {
0140     struct selector *s = ps->context;
0141     unsigned cpu;
0142 
0143     for_each_cpu(cpu, s->path_mask)
0144         ioa_free_path(s, cpu);
0145 
0146     free_cpumask_var(s->path_mask);
0147     kfree(s->path_map);
0148     kfree(s);
0149 
0150     ps->context = NULL;
0151 }
0152 
0153 static int ioa_status(struct path_selector *ps, struct dm_path *path,
0154               status_type_t type, char *result, unsigned int maxlen)
0155 {
0156     struct selector *s = ps->context;
0157     struct path_info *pi;
0158     int sz = 0;
0159 
0160     if (!path) {
0161         DMEMIT("0 ");
0162         return sz;
0163     }
0164 
0165     switch(type) {
0166     case STATUSTYPE_INFO:
0167         DMEMIT("%d ", atomic_read(&s->map_misses));
0168         break;
0169     case STATUSTYPE_TABLE:
0170         pi = path->pscontext;
0171         DMEMIT("%*pb ", cpumask_pr_args(pi->cpumask));
0172         break;
0173     case STATUSTYPE_IMA:
0174         *result = '\0';
0175         break;
0176     }
0177 
0178     return sz;
0179 }
0180 
0181 static void ioa_fail_path(struct path_selector *ps, struct dm_path *p)
0182 {
0183     struct path_info *pi = p->pscontext;
0184 
0185     pi->failed = true;
0186 }
0187 
0188 static int ioa_reinstate_path(struct path_selector *ps, struct dm_path *p)
0189 {
0190     struct path_info *pi = p->pscontext;
0191 
0192     pi->failed = false;
0193     return 0;
0194 }
0195 
0196 static struct dm_path *ioa_select_path(struct path_selector *ps,
0197                        size_t nr_bytes)
0198 {
0199     unsigned int cpu, node;
0200     struct selector *s = ps->context;
0201     const struct cpumask *cpumask;
0202     struct path_info *pi;
0203     int i;
0204 
0205     cpu = get_cpu();
0206 
0207     pi = s->path_map[cpu];
0208     if (pi && !pi->failed)
0209         goto done;
0210 
0211     /*
0212      * Perf is not optimal, but we at least try the local node then just
0213      * try not to fail.
0214      */
0215     if (!pi)
0216         atomic_inc(&s->map_misses);
0217 
0218     node = cpu_to_node(cpu);
0219     cpumask = cpumask_of_node(node);
0220     for_each_cpu(i, cpumask) {
0221         pi = s->path_map[i];
0222         if (pi && !pi->failed)
0223             goto done;
0224     }
0225 
0226     for_each_cpu(i, s->path_mask) {
0227         pi = s->path_map[i];
0228         if (pi && !pi->failed)
0229             goto done;
0230     }
0231     pi = NULL;
0232 
0233 done:
0234     put_cpu();
0235     return pi ? pi->path : NULL;
0236 }
0237 
0238 static struct path_selector_type ioa_ps = {
0239     .name       = "io-affinity",
0240     .module     = THIS_MODULE,
0241     .table_args = 1,
0242     .info_args  = 1,
0243     .create     = ioa_create,
0244     .destroy    = ioa_destroy,
0245     .status     = ioa_status,
0246     .add_path   = ioa_add_path,
0247     .fail_path  = ioa_fail_path,
0248     .reinstate_path = ioa_reinstate_path,
0249     .select_path    = ioa_select_path,
0250 };
0251 
0252 static int __init dm_ioa_init(void)
0253 {
0254     int ret = dm_register_path_selector(&ioa_ps);
0255 
0256     if (ret < 0)
0257         DMERR("register failed %d", ret);
0258     return ret;
0259 }
0260 
0261 static void __exit dm_ioa_exit(void)
0262 {
0263     int ret = dm_unregister_path_selector(&ioa_ps);
0264 
0265     if (ret < 0)
0266         DMERR("unregister failed %d", ret);
0267 }
0268 
0269 module_init(dm_ioa_init);
0270 module_exit(dm_ioa_exit);
0271 
0272 MODULE_DESCRIPTION(DM_NAME " multipath path selector that selects paths based on the CPU IO is being executed on");
0273 MODULE_AUTHOR("Mike Christie <michael.christie@oracle.com>");
0274 MODULE_LICENSE("GPL");