Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/export.h>
0003 #include <linux/preempt.h>
0004 #include <linux/smp.h>
0005 #include <linux/completion.h>
0006 #include <asm/msr.h>
0007 
0008 static void __rdmsr_on_cpu(void *info)
0009 {
0010     struct msr_info *rv = info;
0011     struct msr *reg;
0012     int this_cpu = raw_smp_processor_id();
0013 
0014     if (rv->msrs)
0015         reg = per_cpu_ptr(rv->msrs, this_cpu);
0016     else
0017         reg = &rv->reg;
0018 
0019     rdmsr(rv->msr_no, reg->l, reg->h);
0020 }
0021 
0022 static void __wrmsr_on_cpu(void *info)
0023 {
0024     struct msr_info *rv = info;
0025     struct msr *reg;
0026     int this_cpu = raw_smp_processor_id();
0027 
0028     if (rv->msrs)
0029         reg = per_cpu_ptr(rv->msrs, this_cpu);
0030     else
0031         reg = &rv->reg;
0032 
0033     wrmsr(rv->msr_no, reg->l, reg->h);
0034 }
0035 
0036 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
0037 {
0038     int err;
0039     struct msr_info rv;
0040 
0041     memset(&rv, 0, sizeof(rv));
0042 
0043     rv.msr_no = msr_no;
0044     err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
0045     *l = rv.reg.l;
0046     *h = rv.reg.h;
0047 
0048     return err;
0049 }
0050 EXPORT_SYMBOL(rdmsr_on_cpu);
0051 
0052 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
0053 {
0054     int err;
0055     struct msr_info rv;
0056 
0057     memset(&rv, 0, sizeof(rv));
0058 
0059     rv.msr_no = msr_no;
0060     err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
0061     *q = rv.reg.q;
0062 
0063     return err;
0064 }
0065 EXPORT_SYMBOL(rdmsrl_on_cpu);
0066 
0067 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
0068 {
0069     int err;
0070     struct msr_info rv;
0071 
0072     memset(&rv, 0, sizeof(rv));
0073 
0074     rv.msr_no = msr_no;
0075     rv.reg.l = l;
0076     rv.reg.h = h;
0077     err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
0078 
0079     return err;
0080 }
0081 EXPORT_SYMBOL(wrmsr_on_cpu);
0082 
0083 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
0084 {
0085     int err;
0086     struct msr_info rv;
0087 
0088     memset(&rv, 0, sizeof(rv));
0089 
0090     rv.msr_no = msr_no;
0091     rv.reg.q = q;
0092 
0093     err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
0094 
0095     return err;
0096 }
0097 EXPORT_SYMBOL(wrmsrl_on_cpu);
0098 
0099 static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
0100                 struct msr *msrs,
0101                 void (*msr_func) (void *info))
0102 {
0103     struct msr_info rv;
0104     int this_cpu;
0105 
0106     memset(&rv, 0, sizeof(rv));
0107 
0108     rv.msrs   = msrs;
0109     rv.msr_no = msr_no;
0110 
0111     this_cpu = get_cpu();
0112 
0113     if (cpumask_test_cpu(this_cpu, mask))
0114         msr_func(&rv);
0115 
0116     smp_call_function_many(mask, msr_func, &rv, 1);
0117     put_cpu();
0118 }
0119 
0120 /* rdmsr on a bunch of CPUs
0121  *
0122  * @mask:       which CPUs
0123  * @msr_no:     which MSR
0124  * @msrs:       array of MSR values
0125  *
0126  */
0127 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
0128 {
0129     __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
0130 }
0131 EXPORT_SYMBOL(rdmsr_on_cpus);
0132 
0133 /*
0134  * wrmsr on a bunch of CPUs
0135  *
0136  * @mask:       which CPUs
0137  * @msr_no:     which MSR
0138  * @msrs:       array of MSR values
0139  *
0140  */
0141 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
0142 {
0143     __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
0144 }
0145 EXPORT_SYMBOL(wrmsr_on_cpus);
0146 
0147 struct msr_info_completion {
0148     struct msr_info     msr;
0149     struct completion   done;
0150 };
0151 
0152 /* These "safe" variants are slower and should be used when the target MSR
0153    may not actually exist. */
0154 static void __rdmsr_safe_on_cpu(void *info)
0155 {
0156     struct msr_info_completion *rv = info;
0157 
0158     rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
0159     complete(&rv->done);
0160 }
0161 
0162 static void __wrmsr_safe_on_cpu(void *info)
0163 {
0164     struct msr_info *rv = info;
0165 
0166     rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
0167 }
0168 
0169 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
0170 {
0171     struct msr_info_completion rv;
0172     call_single_data_t csd;
0173     int err;
0174 
0175     INIT_CSD(&csd, __rdmsr_safe_on_cpu, &rv);
0176 
0177     memset(&rv, 0, sizeof(rv));
0178     init_completion(&rv.done);
0179     rv.msr.msr_no = msr_no;
0180 
0181     err = smp_call_function_single_async(cpu, &csd);
0182     if (!err) {
0183         wait_for_completion(&rv.done);
0184         err = rv.msr.err;
0185     }
0186     *l = rv.msr.reg.l;
0187     *h = rv.msr.reg.h;
0188 
0189     return err;
0190 }
0191 EXPORT_SYMBOL(rdmsr_safe_on_cpu);
0192 
0193 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
0194 {
0195     int err;
0196     struct msr_info rv;
0197 
0198     memset(&rv, 0, sizeof(rv));
0199 
0200     rv.msr_no = msr_no;
0201     rv.reg.l = l;
0202     rv.reg.h = h;
0203     err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
0204 
0205     return err ? err : rv.err;
0206 }
0207 EXPORT_SYMBOL(wrmsr_safe_on_cpu);
0208 
0209 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
0210 {
0211     int err;
0212     struct msr_info rv;
0213 
0214     memset(&rv, 0, sizeof(rv));
0215 
0216     rv.msr_no = msr_no;
0217     rv.reg.q = q;
0218 
0219     err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
0220 
0221     return err ? err : rv.err;
0222 }
0223 EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
0224 
0225 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
0226 {
0227     u32 low, high;
0228     int err;
0229 
0230     err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
0231     *q = (u64)high << 32 | low;
0232 
0233     return err;
0234 }
0235 EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
0236 
0237 /*
0238  * These variants are significantly slower, but allows control over
0239  * the entire 32-bit GPR set.
0240  */
0241 static void __rdmsr_safe_regs_on_cpu(void *info)
0242 {
0243     struct msr_regs_info *rv = info;
0244 
0245     rv->err = rdmsr_safe_regs(rv->regs);
0246 }
0247 
0248 static void __wrmsr_safe_regs_on_cpu(void *info)
0249 {
0250     struct msr_regs_info *rv = info;
0251 
0252     rv->err = wrmsr_safe_regs(rv->regs);
0253 }
0254 
0255 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
0256 {
0257     int err;
0258     struct msr_regs_info rv;
0259 
0260     rv.regs   = regs;
0261     rv.err    = -EIO;
0262     err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
0263 
0264     return err ? err : rv.err;
0265 }
0266 EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
0267 
0268 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
0269 {
0270     int err;
0271     struct msr_regs_info rv;
0272 
0273     rv.regs = regs;
0274     rv.err  = -EIO;
0275     err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
0276 
0277     return err ? err : rv.err;
0278 }
0279 EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);