0001
0002 #ifndef _ASM_X86_MSR_H
0003 #define _ASM_X86_MSR_H
0004
0005 #include "msr-index.h"
0006
0007 #ifndef __ASSEMBLY__
0008
0009 #include <asm/asm.h>
0010 #include <asm/errno.h>
0011 #include <asm/cpumask.h>
0012 #include <uapi/asm/msr.h>
0013 #include <asm/shared/msr.h>
0014
0015 struct msr_info {
0016 u32 msr_no;
0017 struct msr reg;
0018 struct msr *msrs;
0019 int err;
0020 };
0021
0022 struct msr_regs_info {
0023 u32 *regs;
0024 int err;
0025 };
0026
0027 struct saved_msr {
0028 bool valid;
0029 struct msr_info info;
0030 };
0031
0032 struct saved_msrs {
0033 unsigned int num;
0034 struct saved_msr *array;
0035 };
0036
0037
0038
0039
0040
0041
0042
0043 #ifdef CONFIG_X86_64
0044
0045 #define DECLARE_ARGS(val, low, high) unsigned long low, high
0046 #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
0047 #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
0048 #else
0049 #define DECLARE_ARGS(val, low, high) unsigned long long val
0050 #define EAX_EDX_VAL(val, low, high) (val)
0051 #define EAX_EDX_RET(val, low, high) "=A" (val)
0052 #endif
0053
0054
0055
0056
0057 #include <asm/atomic.h>
0058 #include <linux/tracepoint-defs.h>
0059
0060 #ifdef CONFIG_TRACEPOINTS
0061 DECLARE_TRACEPOINT(read_msr);
0062 DECLARE_TRACEPOINT(write_msr);
0063 DECLARE_TRACEPOINT(rdpmc);
0064 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
0065 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
0066 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
0067 #else
0068 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
0069 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
0070 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
0071 #endif
0072
0073
0074
0075
0076
0077
0078
0079
0080 static __always_inline unsigned long long __rdmsr(unsigned int msr)
0081 {
0082 DECLARE_ARGS(val, low, high);
0083
0084 asm volatile("1: rdmsr\n"
0085 "2:\n"
0086 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR)
0087 : EAX_EDX_RET(val, low, high) : "c" (msr));
0088
0089 return EAX_EDX_VAL(val, low, high);
0090 }
0091
0092 static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
0093 {
0094 asm volatile("1: wrmsr\n"
0095 "2:\n"
0096 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
0097 : : "c" (msr), "a"(low), "d" (high) : "memory");
0098 }
0099
0100 #define native_rdmsr(msr, val1, val2) \
0101 do { \
0102 u64 __val = __rdmsr((msr)); \
0103 (void)((val1) = (u32)__val); \
0104 (void)((val2) = (u32)(__val >> 32)); \
0105 } while (0)
0106
0107 #define native_wrmsr(msr, low, high) \
0108 __wrmsr(msr, low, high)
0109
0110 #define native_wrmsrl(msr, val) \
0111 __wrmsr((msr), (u32)((u64)(val)), \
0112 (u32)((u64)(val) >> 32))
0113
0114 static inline unsigned long long native_read_msr(unsigned int msr)
0115 {
0116 unsigned long long val;
0117
0118 val = __rdmsr(msr);
0119
0120 if (tracepoint_enabled(read_msr))
0121 do_trace_read_msr(msr, val, 0);
0122
0123 return val;
0124 }
0125
0126 static inline unsigned long long native_read_msr_safe(unsigned int msr,
0127 int *err)
0128 {
0129 DECLARE_ARGS(val, low, high);
0130
0131 asm volatile("1: rdmsr ; xor %[err],%[err]\n"
0132 "2:\n\t"
0133 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
0134 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
0135 : "c" (msr));
0136 if (tracepoint_enabled(read_msr))
0137 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
0138 return EAX_EDX_VAL(val, low, high);
0139 }
0140
0141
0142 static inline void notrace
0143 native_write_msr(unsigned int msr, u32 low, u32 high)
0144 {
0145 __wrmsr(msr, low, high);
0146
0147 if (tracepoint_enabled(write_msr))
0148 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
0149 }
0150
0151
0152 static inline int notrace
0153 native_write_msr_safe(unsigned int msr, u32 low, u32 high)
0154 {
0155 int err;
0156
0157 asm volatile("1: wrmsr ; xor %[err],%[err]\n"
0158 "2:\n\t"
0159 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
0160 : [err] "=a" (err)
0161 : "c" (msr), "0" (low), "d" (high)
0162 : "memory");
0163 if (tracepoint_enabled(write_msr))
0164 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
0165 return err;
0166 }
0167
0168 extern int rdmsr_safe_regs(u32 regs[8]);
0169 extern int wrmsr_safe_regs(u32 regs[8]);
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 static __always_inline unsigned long long rdtsc(void)
0181 {
0182 DECLARE_ARGS(val, low, high);
0183
0184 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
0185
0186 return EAX_EDX_VAL(val, low, high);
0187 }
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 static __always_inline unsigned long long rdtsc_ordered(void)
0198 {
0199 DECLARE_ARGS(val, low, high);
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215 asm volatile(ALTERNATIVE_2("rdtsc",
0216 "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
0217 "rdtscp", X86_FEATURE_RDTSCP)
0218 : EAX_EDX_RET(val, low, high)
0219
0220 :: "ecx");
0221
0222 return EAX_EDX_VAL(val, low, high);
0223 }
0224
0225 static inline unsigned long long native_read_pmc(int counter)
0226 {
0227 DECLARE_ARGS(val, low, high);
0228
0229 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
0230 if (tracepoint_enabled(rdpmc))
0231 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
0232 return EAX_EDX_VAL(val, low, high);
0233 }
0234
0235 #ifdef CONFIG_PARAVIRT_XXL
0236 #include <asm/paravirt.h>
0237 #else
0238 #include <linux/errno.h>
0239
0240
0241
0242
0243
0244
0245 #define rdmsr(msr, low, high) \
0246 do { \
0247 u64 __val = native_read_msr((msr)); \
0248 (void)((low) = (u32)__val); \
0249 (void)((high) = (u32)(__val >> 32)); \
0250 } while (0)
0251
0252 static inline void wrmsr(unsigned int msr, u32 low, u32 high)
0253 {
0254 native_write_msr(msr, low, high);
0255 }
0256
0257 #define rdmsrl(msr, val) \
0258 ((val) = native_read_msr((msr)))
0259
0260 static inline void wrmsrl(unsigned int msr, u64 val)
0261 {
0262 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
0263 }
0264
0265
0266 static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
0267 {
0268 return native_write_msr_safe(msr, low, high);
0269 }
0270
0271
0272 #define rdmsr_safe(msr, low, high) \
0273 ({ \
0274 int __err; \
0275 u64 __val = native_read_msr_safe((msr), &__err); \
0276 (*low) = (u32)__val; \
0277 (*high) = (u32)(__val >> 32); \
0278 __err; \
0279 })
0280
0281 static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
0282 {
0283 int err;
0284
0285 *p = native_read_msr_safe(msr, &err);
0286 return err;
0287 }
0288
0289 #define rdpmc(counter, low, high) \
0290 do { \
0291 u64 _l = native_read_pmc((counter)); \
0292 (low) = (u32)_l; \
0293 (high) = (u32)(_l >> 32); \
0294 } while (0)
0295
0296 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
0297
0298 #endif
0299
0300
0301
0302
0303 static inline int wrmsrl_safe(u32 msr, u64 val)
0304 {
0305 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
0306 }
0307
0308 struct msr *msrs_alloc(void);
0309 void msrs_free(struct msr *msrs);
0310 int msr_set_bit(u32 msr, u8 bit);
0311 int msr_clear_bit(u32 msr, u8 bit);
0312
0313 #ifdef CONFIG_SMP
0314 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
0315 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
0316 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
0317 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
0318 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
0319 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
0320 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
0321 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
0322 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
0323 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
0324 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
0325 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
0326 #else
0327 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
0328 {
0329 rdmsr(msr_no, *l, *h);
0330 return 0;
0331 }
0332 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
0333 {
0334 wrmsr(msr_no, l, h);
0335 return 0;
0336 }
0337 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
0338 {
0339 rdmsrl(msr_no, *q);
0340 return 0;
0341 }
0342 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
0343 {
0344 wrmsrl(msr_no, q);
0345 return 0;
0346 }
0347 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
0348 struct msr *msrs)
0349 {
0350 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
0351 }
0352 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
0353 struct msr *msrs)
0354 {
0355 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
0356 }
0357 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
0358 u32 *l, u32 *h)
0359 {
0360 return rdmsr_safe(msr_no, l, h);
0361 }
0362 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
0363 {
0364 return wrmsr_safe(msr_no, l, h);
0365 }
0366 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
0367 {
0368 return rdmsrl_safe(msr_no, q);
0369 }
0370 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
0371 {
0372 return wrmsrl_safe(msr_no, q);
0373 }
0374 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
0375 {
0376 return rdmsr_safe_regs(regs);
0377 }
0378 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
0379 {
0380 return wrmsr_safe_regs(regs);
0381 }
0382 #endif
0383 #endif
0384 #endif