0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/kvm_host.h>
0018 #include <asm/mtrr.h>
0019
0020 #include "cpuid.h"
0021 #include "mmu.h"
0022
0023 #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
0024 #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
0025 #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
0026
0027 static bool msr_mtrr_valid(unsigned msr)
0028 {
0029 switch (msr) {
0030 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
0031 case MSR_MTRRfix64K_00000:
0032 case MSR_MTRRfix16K_80000:
0033 case MSR_MTRRfix16K_A0000:
0034 case MSR_MTRRfix4K_C0000:
0035 case MSR_MTRRfix4K_C8000:
0036 case MSR_MTRRfix4K_D0000:
0037 case MSR_MTRRfix4K_D8000:
0038 case MSR_MTRRfix4K_E0000:
0039 case MSR_MTRRfix4K_E8000:
0040 case MSR_MTRRfix4K_F0000:
0041 case MSR_MTRRfix4K_F8000:
0042 case MSR_MTRRdefType:
0043 case MSR_IA32_CR_PAT:
0044 return true;
0045 }
0046 return false;
0047 }
0048
0049 static bool valid_mtrr_type(unsigned t)
0050 {
0051 return t < 8 && (1 << t) & 0x73;
0052 }
0053
0054 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
0055 {
0056 int i;
0057 u64 mask;
0058
0059 if (!msr_mtrr_valid(msr))
0060 return false;
0061
0062 if (msr == MSR_IA32_CR_PAT) {
0063 return kvm_pat_valid(data);
0064 } else if (msr == MSR_MTRRdefType) {
0065 if (data & ~0xcff)
0066 return false;
0067 return valid_mtrr_type(data & 0xff);
0068 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
0069 for (i = 0; i < 8 ; i++)
0070 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
0071 return false;
0072 return true;
0073 }
0074
0075
0076 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
0077
0078 mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
0079 if ((msr & 1) == 0) {
0080
0081 if (!valid_mtrr_type(data & 0xff))
0082 return false;
0083 mask |= 0xf00;
0084 } else
0085
0086 mask |= 0x7ff;
0087
0088 return (data & mask) == 0;
0089 }
0090 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
0091
0092 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
0093 {
0094 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
0095 }
0096
0097 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
0098 {
0099 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
0100 }
0101
0102 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
0103 {
0104 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
0105 }
0106
0107 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
0108 {
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
0120 return MTRR_TYPE_UNCACHABLE;
0121 else
0122 return MTRR_TYPE_WRBACK;
0123 }
0124
0125
0126
0127
0128
0129
0130
0131 struct fixed_mtrr_segment {
0132 u64 start;
0133 u64 end;
0134
0135 int range_shift;
0136
0137
0138 int range_start;
0139 };
0140
0141 static struct fixed_mtrr_segment fixed_seg_table[] = {
0142
0143 {
0144 .start = 0x0,
0145 .end = 0x80000,
0146 .range_shift = 16,
0147 .range_start = 0,
0148 },
0149
0150
0151
0152
0153
0154 {
0155 .start = 0x80000,
0156 .end = 0xc0000,
0157 .range_shift = 14,
0158 .range_start = 8,
0159 },
0160
0161
0162
0163
0164
0165 {
0166 .start = 0xc0000,
0167 .end = 0x100000,
0168 .range_shift = 12,
0169 .range_start = 24,
0170 }
0171 };
0172
0173
0174
0175
0176
0177 static u64 fixed_mtrr_seg_unit_size(int seg)
0178 {
0179 return 8 << fixed_seg_table[seg].range_shift;
0180 }
0181
0182 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
0183 {
0184 switch (msr) {
0185 case MSR_MTRRfix64K_00000:
0186 *seg = 0;
0187 *unit = 0;
0188 break;
0189 case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
0190 *seg = 1;
0191 *unit = array_index_nospec(
0192 msr - MSR_MTRRfix16K_80000,
0193 MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
0194 break;
0195 case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
0196 *seg = 2;
0197 *unit = array_index_nospec(
0198 msr - MSR_MTRRfix4K_C0000,
0199 MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
0200 break;
0201 default:
0202 return false;
0203 }
0204
0205 return true;
0206 }
0207
0208 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
0209 {
0210 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
0211 u64 unit_size = fixed_mtrr_seg_unit_size(seg);
0212
0213 *start = mtrr_seg->start + unit * unit_size;
0214 *end = *start + unit_size;
0215 WARN_ON(*end > mtrr_seg->end);
0216 }
0217
0218 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
0219 {
0220 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
0221
0222 WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
0223 > mtrr_seg->end);
0224
0225
0226 return mtrr_seg->range_start + 8 * unit;
0227 }
0228
0229 static int fixed_mtrr_seg_end_range_index(int seg)
0230 {
0231 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
0232 int n;
0233
0234 n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
0235 return mtrr_seg->range_start + n - 1;
0236 }
0237
0238 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
0239 {
0240 int seg, unit;
0241
0242 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
0243 return false;
0244
0245 fixed_mtrr_seg_unit_range(seg, unit, start, end);
0246 return true;
0247 }
0248
0249 static int fixed_msr_to_range_index(u32 msr)
0250 {
0251 int seg, unit;
0252
0253 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
0254 return -1;
0255
0256 return fixed_mtrr_seg_unit_range_index(seg, unit);
0257 }
0258
0259 static int fixed_mtrr_addr_to_seg(u64 addr)
0260 {
0261 struct fixed_mtrr_segment *mtrr_seg;
0262 int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
0263
0264 for (seg = 0; seg < seg_num; seg++) {
0265 mtrr_seg = &fixed_seg_table[seg];
0266 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
0267 return seg;
0268 }
0269
0270 return -1;
0271 }
0272
0273 static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
0274 {
0275 struct fixed_mtrr_segment *mtrr_seg;
0276 int index;
0277
0278 mtrr_seg = &fixed_seg_table[seg];
0279 index = mtrr_seg->range_start;
0280 index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
0281 return index;
0282 }
0283
0284 static u64 fixed_mtrr_range_end_addr(int seg, int index)
0285 {
0286 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
0287 int pos = index - mtrr_seg->range_start;
0288
0289 return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
0290 }
0291
0292 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
0293 {
0294 u64 mask;
0295
0296 *start = range->base & PAGE_MASK;
0297
0298 mask = range->mask & PAGE_MASK;
0299
0300
0301
0302
0303 *end = (*start | ~mask) + 1;
0304 }
0305
0306 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
0307 {
0308 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
0309 gfn_t start, end;
0310 int index;
0311
0312 if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
0313 !kvm_arch_has_noncoherent_dma(vcpu->kvm))
0314 return;
0315
0316 if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
0317 return;
0318
0319
0320 if (fixed_msr_to_range(msr, &start, &end)) {
0321 if (!fixed_mtrr_is_enabled(mtrr_state))
0322 return;
0323 } else if (msr == MSR_MTRRdefType) {
0324 start = 0x0;
0325 end = ~0ULL;
0326 } else {
0327
0328 index = (msr - 0x200) / 2;
0329 var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
0330 }
0331
0332 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
0333 }
0334
0335 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
0336 {
0337 return (range->mask & (1 << 11)) != 0;
0338 }
0339
0340 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
0341 {
0342 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
0343 struct kvm_mtrr_range *tmp, *cur;
0344 int index, is_mtrr_mask;
0345
0346 index = (msr - 0x200) / 2;
0347 is_mtrr_mask = msr - 0x200 - 2 * index;
0348 cur = &mtrr_state->var_ranges[index];
0349
0350
0351 if (var_mtrr_range_is_valid(cur))
0352 list_del(&mtrr_state->var_ranges[index].node);
0353
0354
0355
0356
0357
0358 if (!is_mtrr_mask)
0359 cur->base = data;
0360 else
0361 cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu);
0362
0363
0364 if (var_mtrr_range_is_valid(cur)) {
0365 list_for_each_entry(tmp, &mtrr_state->head, node)
0366 if (cur->base >= tmp->base)
0367 break;
0368 list_add_tail(&cur->node, &tmp->node);
0369 }
0370 }
0371
0372 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
0373 {
0374 int index;
0375
0376 if (!kvm_mtrr_valid(vcpu, msr, data))
0377 return 1;
0378
0379 index = fixed_msr_to_range_index(msr);
0380 if (index >= 0)
0381 *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
0382 else if (msr == MSR_MTRRdefType)
0383 vcpu->arch.mtrr_state.deftype = data;
0384 else if (msr == MSR_IA32_CR_PAT)
0385 vcpu->arch.pat = data;
0386 else
0387 set_var_mtrr_msr(vcpu, msr, data);
0388
0389 update_mtrr(vcpu, msr);
0390 return 0;
0391 }
0392
0393 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
0394 {
0395 int index;
0396
0397
0398 if (msr == MSR_MTRRcap) {
0399
0400
0401
0402
0403
0404
0405 *pdata = 0x500 | KVM_NR_VAR_MTRR;
0406 return 0;
0407 }
0408
0409 if (!msr_mtrr_valid(msr))
0410 return 1;
0411
0412 index = fixed_msr_to_range_index(msr);
0413 if (index >= 0)
0414 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
0415 else if (msr == MSR_MTRRdefType)
0416 *pdata = vcpu->arch.mtrr_state.deftype;
0417 else if (msr == MSR_IA32_CR_PAT)
0418 *pdata = vcpu->arch.pat;
0419 else {
0420 int is_mtrr_mask;
0421
0422 index = (msr - 0x200) / 2;
0423 is_mtrr_mask = msr - 0x200 - 2 * index;
0424 if (!is_mtrr_mask)
0425 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
0426 else
0427 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
0428
0429 *pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
0430 }
0431
0432 return 0;
0433 }
0434
0435 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
0436 {
0437 INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
0438 }
0439
0440 struct mtrr_iter {
0441
0442 struct kvm_mtrr *mtrr_state;
0443 u64 start;
0444 u64 end;
0445
0446
0447 int mem_type;
0448
0449 bool mtrr_disabled;
0450
0451 bool partial_map;
0452
0453
0454 union {
0455
0456 struct {
0457 int index;
0458 int seg;
0459 };
0460
0461
0462 struct {
0463 struct kvm_mtrr_range *range;
0464
0465 u64 start_max;
0466 };
0467 };
0468
0469 bool fixed;
0470 };
0471
0472 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
0473 {
0474 int seg, index;
0475
0476 if (!fixed_mtrr_is_enabled(iter->mtrr_state))
0477 return false;
0478
0479 seg = fixed_mtrr_addr_to_seg(iter->start);
0480 if (seg < 0)
0481 return false;
0482
0483 iter->fixed = true;
0484 index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
0485 iter->index = index;
0486 iter->seg = seg;
0487 return true;
0488 }
0489
0490 static bool match_var_range(struct mtrr_iter *iter,
0491 struct kvm_mtrr_range *range)
0492 {
0493 u64 start, end;
0494
0495 var_mtrr_range(range, &start, &end);
0496 if (!(start >= iter->end || end <= iter->start)) {
0497 iter->range = range;
0498
0499
0500
0501
0502
0503
0504 iter->partial_map |= iter->start_max < start;
0505
0506
0507 iter->start_max = max(iter->start_max, end);
0508 return true;
0509 }
0510
0511 return false;
0512 }
0513
0514 static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
0515 {
0516 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
0517
0518 list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
0519 if (match_var_range(iter, iter->range))
0520 return;
0521
0522 iter->range = NULL;
0523 iter->partial_map |= iter->start_max < iter->end;
0524 }
0525
0526 static void mtrr_lookup_var_start(struct mtrr_iter *iter)
0527 {
0528 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
0529
0530 iter->fixed = false;
0531 iter->start_max = iter->start;
0532 iter->range = NULL;
0533 iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
0534
0535 __mtrr_lookup_var_next(iter);
0536 }
0537
0538 static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
0539 {
0540
0541 if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
0542 iter->fixed = false;
0543 iter->range = NULL;
0544 return;
0545 }
0546
0547 iter->index++;
0548
0549
0550 if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
0551 return mtrr_lookup_var_start(iter);
0552
0553
0554 if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
0555 iter->seg++;
0556 }
0557
0558 static void mtrr_lookup_var_next(struct mtrr_iter *iter)
0559 {
0560 __mtrr_lookup_var_next(iter);
0561 }
0562
0563 static void mtrr_lookup_start(struct mtrr_iter *iter)
0564 {
0565 if (!mtrr_is_enabled(iter->mtrr_state)) {
0566 iter->mtrr_disabled = true;
0567 return;
0568 }
0569
0570 if (!mtrr_lookup_fixed_start(iter))
0571 mtrr_lookup_var_start(iter);
0572 }
0573
0574 static void mtrr_lookup_init(struct mtrr_iter *iter,
0575 struct kvm_mtrr *mtrr_state, u64 start, u64 end)
0576 {
0577 iter->mtrr_state = mtrr_state;
0578 iter->start = start;
0579 iter->end = end;
0580 iter->mtrr_disabled = false;
0581 iter->partial_map = false;
0582 iter->fixed = false;
0583 iter->range = NULL;
0584
0585 mtrr_lookup_start(iter);
0586 }
0587
0588 static bool mtrr_lookup_okay(struct mtrr_iter *iter)
0589 {
0590 if (iter->fixed) {
0591 iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
0592 return true;
0593 }
0594
0595 if (iter->range) {
0596 iter->mem_type = iter->range->base & 0xff;
0597 return true;
0598 }
0599
0600 return false;
0601 }
0602
0603 static void mtrr_lookup_next(struct mtrr_iter *iter)
0604 {
0605 if (iter->fixed)
0606 mtrr_lookup_fixed_next(iter);
0607 else
0608 mtrr_lookup_var_next(iter);
0609 }
0610
0611 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
0612 for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
0613 mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
0614
0615 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
0616 {
0617 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
0618 struct mtrr_iter iter;
0619 u64 start, end;
0620 int type = -1;
0621 const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
0622 | (1 << MTRR_TYPE_WRTHROUGH);
0623
0624 start = gfn_to_gpa(gfn);
0625 end = start + PAGE_SIZE;
0626
0627 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
0628 int curr_type = iter.mem_type;
0629
0630
0631
0632
0633
0634
0635 if (type == -1) {
0636 type = curr_type;
0637 continue;
0638 }
0639
0640
0641
0642
0643
0644
0645 if (type == curr_type)
0646 continue;
0647
0648
0649
0650
0651
0652 if (curr_type == MTRR_TYPE_UNCACHABLE)
0653 return MTRR_TYPE_UNCACHABLE;
0654
0655
0656
0657
0658
0659 if (((1 << type) & wt_wb_mask) &&
0660 ((1 << curr_type) & wt_wb_mask)) {
0661 type = MTRR_TYPE_WRTHROUGH;
0662 continue;
0663 }
0664
0665
0666
0667
0668
0669
0670
0671 return MTRR_TYPE_WRBACK;
0672 }
0673
0674 if (iter.mtrr_disabled)
0675 return mtrr_disabled_type(vcpu);
0676
0677
0678 if (type == -1)
0679 return mtrr_default_type(mtrr_state);
0680
0681
0682
0683
0684
0685 WARN_ON(iter.partial_map);
0686
0687 return type;
0688 }
0689 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
0690
0691 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
0692 int page_num)
0693 {
0694 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
0695 struct mtrr_iter iter;
0696 u64 start, end;
0697 int type = -1;
0698
0699 start = gfn_to_gpa(gfn);
0700 end = gfn_to_gpa(gfn + page_num);
0701 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
0702 if (type == -1) {
0703 type = iter.mem_type;
0704 continue;
0705 }
0706
0707 if (type != iter.mem_type)
0708 return false;
0709 }
0710
0711 if (iter.mtrr_disabled)
0712 return true;
0713
0714 if (!iter.partial_map)
0715 return true;
0716
0717 if (type == -1)
0718 return true;
0719
0720 return type == mtrr_default_type(mtrr_state);
0721 }