0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kvm_host.h>
0011 #include <linux/init.h>
0012 #include <linux/export.h>
0013 #include <linux/kmemleak.h>
0014 #include <linux/kvm_para.h>
0015 #include <linux/slab.h>
0016 #include <linux/of.h>
0017 #include <linux/pagemap.h>
0018
0019 #include <asm/reg.h>
0020 #include <asm/sections.h>
0021 #include <asm/cacheflush.h>
0022 #include <asm/disassemble.h>
0023 #include <asm/ppc-opcode.h>
0024 #include <asm/epapr_hcalls.h>
0025
0026 #define KVM_MAGIC_PAGE (-4096L)
0027 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
0028
0029 #define KVM_INST_LWZ 0x80000000
0030 #define KVM_INST_STW 0x90000000
0031 #define KVM_INST_LD 0xe8000000
0032 #define KVM_INST_STD 0xf8000000
0033 #define KVM_INST_NOP 0x60000000
0034 #define KVM_INST_B 0x48000000
0035 #define KVM_INST_B_MASK 0x03ffffff
0036 #define KVM_INST_B_MAX 0x01ffffff
0037 #define KVM_INST_LI 0x38000000
0038
0039 #define KVM_MASK_RT 0x03e00000
0040 #define KVM_RT_30 0x03c00000
0041 #define KVM_MASK_RB 0x0000f800
0042 #define KVM_INST_MFMSR 0x7c0000a6
0043
0044 #define SPR_FROM 0
0045 #define SPR_TO 0x100
0046
0047 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
0048 (((sprn) & 0x1f) << 16) | \
0049 (((sprn) & 0x3e0) << 6) | \
0050 (moveto))
0051
0052 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
0053 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
0054
0055 #define KVM_INST_TLBSYNC 0x7c00046c
0056 #define KVM_INST_MTMSRD_L0 0x7c000164
0057 #define KVM_INST_MTMSRD_L1 0x7c010164
0058 #define KVM_INST_MTMSR 0x7c000124
0059
0060 #define KVM_INST_WRTEE 0x7c000106
0061 #define KVM_INST_WRTEEI_0 0x7c000146
0062 #define KVM_INST_WRTEEI_1 0x7c008146
0063
0064 #define KVM_INST_MTSRIN 0x7c0001e4
0065
0066 static bool kvm_patching_worked = true;
0067 extern char kvm_tmp[];
0068 extern char kvm_tmp_end[];
0069 static int kvm_tmp_index;
0070
0071 static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
0072 {
0073 *inst = new_inst;
0074 flush_icache_range((ulong)inst, (ulong)inst + 4);
0075 }
0076
0077 static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
0078 {
0079 #ifdef CONFIG_64BIT
0080 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
0081 #else
0082 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
0083 #endif
0084 }
0085
0086 static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
0087 {
0088 #ifdef CONFIG_64BIT
0089 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
0090 #else
0091 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
0092 #endif
0093 }
0094
0095 static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
0096 {
0097 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
0098 }
0099
0100 static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
0101 {
0102 #ifdef CONFIG_64BIT
0103 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
0104 #else
0105 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
0106 #endif
0107 }
0108
0109 static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
0110 {
0111 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
0112 }
0113
0114 static void __init kvm_patch_ins_nop(u32 *inst)
0115 {
0116 kvm_patch_ins(inst, KVM_INST_NOP);
0117 }
0118
0119 static void __init kvm_patch_ins_b(u32 *inst, int addr)
0120 {
0121 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
0122
0123
0124
0125 if ((ulong)inst < (ulong)&__end_interrupts)
0126 return;
0127 #endif
0128
0129 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
0130 }
0131
0132 static u32 * __init kvm_alloc(int len)
0133 {
0134 u32 *p;
0135
0136 if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) {
0137 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
0138 kvm_tmp_index, len);
0139 kvm_patching_worked = false;
0140 return NULL;
0141 }
0142
0143 p = (void*)&kvm_tmp[kvm_tmp_index];
0144 kvm_tmp_index += len;
0145
0146 return p;
0147 }
0148
0149 extern u32 kvm_emulate_mtmsrd_branch_offs;
0150 extern u32 kvm_emulate_mtmsrd_reg_offs;
0151 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
0152 extern u32 kvm_emulate_mtmsrd_len;
0153 extern u32 kvm_emulate_mtmsrd[];
0154
0155 static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
0156 {
0157 u32 *p;
0158 int distance_start;
0159 int distance_end;
0160 ulong next_inst;
0161
0162 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
0163 if (!p)
0164 return;
0165
0166
0167 distance_start = (ulong)p - (ulong)inst;
0168 next_inst = ((ulong)inst + 4);
0169 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
0170
0171
0172 if (distance_start > KVM_INST_B_MAX) {
0173 kvm_patching_worked = false;
0174 return;
0175 }
0176
0177
0178 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
0179 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
0180 switch (get_rt(rt)) {
0181 case 30:
0182 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
0183 magic_var(scratch2), KVM_RT_30);
0184 break;
0185 case 31:
0186 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
0187 magic_var(scratch1), KVM_RT_30);
0188 break;
0189 default:
0190 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
0191 break;
0192 }
0193
0194 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
0195 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
0196
0197
0198 kvm_patch_ins_b(inst, distance_start);
0199 }
0200
0201 extern u32 kvm_emulate_mtmsr_branch_offs;
0202 extern u32 kvm_emulate_mtmsr_reg1_offs;
0203 extern u32 kvm_emulate_mtmsr_reg2_offs;
0204 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
0205 extern u32 kvm_emulate_mtmsr_len;
0206 extern u32 kvm_emulate_mtmsr[];
0207
0208 static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
0209 {
0210 u32 *p;
0211 int distance_start;
0212 int distance_end;
0213 ulong next_inst;
0214
0215 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
0216 if (!p)
0217 return;
0218
0219
0220 distance_start = (ulong)p - (ulong)inst;
0221 next_inst = ((ulong)inst + 4);
0222 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
0223
0224
0225 if (distance_start > KVM_INST_B_MAX) {
0226 kvm_patching_worked = false;
0227 return;
0228 }
0229
0230
0231 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
0232 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
0233
0234
0235 switch (get_rt(rt)) {
0236 case 30:
0237 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
0238 magic_var(scratch2), KVM_RT_30);
0239 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
0240 magic_var(scratch2), KVM_RT_30);
0241 break;
0242 case 31:
0243 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
0244 magic_var(scratch1), KVM_RT_30);
0245 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
0246 magic_var(scratch1), KVM_RT_30);
0247 break;
0248 default:
0249 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
0250 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
0251 break;
0252 }
0253
0254 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
0255 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
0256
0257
0258 kvm_patch_ins_b(inst, distance_start);
0259 }
0260
0261 #ifdef CONFIG_BOOKE
0262
0263 extern u32 kvm_emulate_wrtee_branch_offs;
0264 extern u32 kvm_emulate_wrtee_reg_offs;
0265 extern u32 kvm_emulate_wrtee_orig_ins_offs;
0266 extern u32 kvm_emulate_wrtee_len;
0267 extern u32 kvm_emulate_wrtee[];
0268
0269 static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
0270 {
0271 u32 *p;
0272 int distance_start;
0273 int distance_end;
0274 ulong next_inst;
0275
0276 p = kvm_alloc(kvm_emulate_wrtee_len * 4);
0277 if (!p)
0278 return;
0279
0280
0281 distance_start = (ulong)p - (ulong)inst;
0282 next_inst = ((ulong)inst + 4);
0283 distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
0284
0285
0286 if (distance_start > KVM_INST_B_MAX) {
0287 kvm_patching_worked = false;
0288 return;
0289 }
0290
0291
0292 memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
0293 p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
0294
0295 if (imm_one) {
0296 p[kvm_emulate_wrtee_reg_offs] =
0297 KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
0298 } else {
0299
0300 switch (get_rt(rt)) {
0301 case 30:
0302 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
0303 magic_var(scratch2), KVM_RT_30);
0304 break;
0305 case 31:
0306 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
0307 magic_var(scratch1), KVM_RT_30);
0308 break;
0309 default:
0310 p[kvm_emulate_wrtee_reg_offs] |= rt;
0311 break;
0312 }
0313 }
0314
0315 p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
0316 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
0317
0318
0319 kvm_patch_ins_b(inst, distance_start);
0320 }
0321
0322 extern u32 kvm_emulate_wrteei_0_branch_offs;
0323 extern u32 kvm_emulate_wrteei_0_len;
0324 extern u32 kvm_emulate_wrteei_0[];
0325
0326 static void __init kvm_patch_ins_wrteei_0(u32 *inst)
0327 {
0328 u32 *p;
0329 int distance_start;
0330 int distance_end;
0331 ulong next_inst;
0332
0333 p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
0334 if (!p)
0335 return;
0336
0337
0338 distance_start = (ulong)p - (ulong)inst;
0339 next_inst = ((ulong)inst + 4);
0340 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
0341
0342
0343 if (distance_start > KVM_INST_B_MAX) {
0344 kvm_patching_worked = false;
0345 return;
0346 }
0347
0348 memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
0349 p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
0350 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
0351
0352
0353 kvm_patch_ins_b(inst, distance_start);
0354 }
0355
0356 #endif
0357
0358 #ifdef CONFIG_PPC_BOOK3S_32
0359
0360 extern u32 kvm_emulate_mtsrin_branch_offs;
0361 extern u32 kvm_emulate_mtsrin_reg1_offs;
0362 extern u32 kvm_emulate_mtsrin_reg2_offs;
0363 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
0364 extern u32 kvm_emulate_mtsrin_len;
0365 extern u32 kvm_emulate_mtsrin[];
0366
0367 static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
0368 {
0369 u32 *p;
0370 int distance_start;
0371 int distance_end;
0372 ulong next_inst;
0373
0374 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
0375 if (!p)
0376 return;
0377
0378
0379 distance_start = (ulong)p - (ulong)inst;
0380 next_inst = ((ulong)inst + 4);
0381 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
0382
0383
0384 if (distance_start > KVM_INST_B_MAX) {
0385 kvm_patching_worked = false;
0386 return;
0387 }
0388
0389
0390 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
0391 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
0392 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
0393 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
0394 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
0395 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
0396
0397
0398 kvm_patch_ins_b(inst, distance_start);
0399 }
0400
0401 #endif
0402
0403 static void __init kvm_map_magic_page(void *data)
0404 {
0405 u32 *features = data;
0406
0407 ulong in[8] = {0};
0408 ulong out[8];
0409
0410 in[0] = KVM_MAGIC_PAGE;
0411 in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
0412
0413 epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
0414
0415 *features = out[0];
0416 }
0417
0418 static void __init kvm_check_ins(u32 *inst, u32 features)
0419 {
0420 u32 _inst = *inst;
0421 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
0422 u32 inst_rt = _inst & KVM_MASK_RT;
0423
0424 switch (inst_no_rt) {
0425
0426 case KVM_INST_MFMSR:
0427 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
0428 break;
0429 case KVM_INST_MFSPR(SPRN_SPRG0):
0430 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
0431 break;
0432 case KVM_INST_MFSPR(SPRN_SPRG1):
0433 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
0434 break;
0435 case KVM_INST_MFSPR(SPRN_SPRG2):
0436 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
0437 break;
0438 case KVM_INST_MFSPR(SPRN_SPRG3):
0439 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
0440 break;
0441 case KVM_INST_MFSPR(SPRN_SRR0):
0442 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
0443 break;
0444 case KVM_INST_MFSPR(SPRN_SRR1):
0445 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
0446 break;
0447 #ifdef CONFIG_BOOKE
0448 case KVM_INST_MFSPR(SPRN_DEAR):
0449 #else
0450 case KVM_INST_MFSPR(SPRN_DAR):
0451 #endif
0452 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
0453 break;
0454 case KVM_INST_MFSPR(SPRN_DSISR):
0455 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
0456 break;
0457
0458 #ifdef CONFIG_PPC_BOOK3E_MMU
0459 case KVM_INST_MFSPR(SPRN_MAS0):
0460 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0461 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
0462 break;
0463 case KVM_INST_MFSPR(SPRN_MAS1):
0464 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0465 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
0466 break;
0467 case KVM_INST_MFSPR(SPRN_MAS2):
0468 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0469 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
0470 break;
0471 case KVM_INST_MFSPR(SPRN_MAS3):
0472 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0473 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
0474 break;
0475 case KVM_INST_MFSPR(SPRN_MAS4):
0476 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0477 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
0478 break;
0479 case KVM_INST_MFSPR(SPRN_MAS6):
0480 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0481 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
0482 break;
0483 case KVM_INST_MFSPR(SPRN_MAS7):
0484 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0485 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
0486 break;
0487 #endif
0488
0489 case KVM_INST_MFSPR(SPRN_SPRG4):
0490 #ifdef CONFIG_BOOKE
0491 case KVM_INST_MFSPR(SPRN_SPRG4R):
0492 #endif
0493 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0494 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
0495 break;
0496 case KVM_INST_MFSPR(SPRN_SPRG5):
0497 #ifdef CONFIG_BOOKE
0498 case KVM_INST_MFSPR(SPRN_SPRG5R):
0499 #endif
0500 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0501 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
0502 break;
0503 case KVM_INST_MFSPR(SPRN_SPRG6):
0504 #ifdef CONFIG_BOOKE
0505 case KVM_INST_MFSPR(SPRN_SPRG6R):
0506 #endif
0507 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0508 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
0509 break;
0510 case KVM_INST_MFSPR(SPRN_SPRG7):
0511 #ifdef CONFIG_BOOKE
0512 case KVM_INST_MFSPR(SPRN_SPRG7R):
0513 #endif
0514 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0515 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
0516 break;
0517
0518 #ifdef CONFIG_BOOKE
0519 case KVM_INST_MFSPR(SPRN_ESR):
0520 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0521 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
0522 break;
0523 #endif
0524
0525 case KVM_INST_MFSPR(SPRN_PIR):
0526 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0527 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
0528 break;
0529
0530
0531
0532 case KVM_INST_MTSPR(SPRN_SPRG0):
0533 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
0534 break;
0535 case KVM_INST_MTSPR(SPRN_SPRG1):
0536 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
0537 break;
0538 case KVM_INST_MTSPR(SPRN_SPRG2):
0539 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
0540 break;
0541 case KVM_INST_MTSPR(SPRN_SPRG3):
0542 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
0543 break;
0544 case KVM_INST_MTSPR(SPRN_SRR0):
0545 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
0546 break;
0547 case KVM_INST_MTSPR(SPRN_SRR1):
0548 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
0549 break;
0550 #ifdef CONFIG_BOOKE
0551 case KVM_INST_MTSPR(SPRN_DEAR):
0552 #else
0553 case KVM_INST_MTSPR(SPRN_DAR):
0554 #endif
0555 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
0556 break;
0557 case KVM_INST_MTSPR(SPRN_DSISR):
0558 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
0559 break;
0560 #ifdef CONFIG_PPC_BOOK3E_MMU
0561 case KVM_INST_MTSPR(SPRN_MAS0):
0562 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0563 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
0564 break;
0565 case KVM_INST_MTSPR(SPRN_MAS1):
0566 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0567 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
0568 break;
0569 case KVM_INST_MTSPR(SPRN_MAS2):
0570 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0571 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
0572 break;
0573 case KVM_INST_MTSPR(SPRN_MAS3):
0574 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0575 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
0576 break;
0577 case KVM_INST_MTSPR(SPRN_MAS4):
0578 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0579 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
0580 break;
0581 case KVM_INST_MTSPR(SPRN_MAS6):
0582 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0583 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
0584 break;
0585 case KVM_INST_MTSPR(SPRN_MAS7):
0586 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0587 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
0588 break;
0589 #endif
0590
0591 case KVM_INST_MTSPR(SPRN_SPRG4):
0592 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0593 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
0594 break;
0595 case KVM_INST_MTSPR(SPRN_SPRG5):
0596 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0597 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
0598 break;
0599 case KVM_INST_MTSPR(SPRN_SPRG6):
0600 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0601 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
0602 break;
0603 case KVM_INST_MTSPR(SPRN_SPRG7):
0604 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0605 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
0606 break;
0607
0608 #ifdef CONFIG_BOOKE
0609 case KVM_INST_MTSPR(SPRN_ESR):
0610 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
0611 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
0612 break;
0613 #endif
0614
0615
0616 case KVM_INST_TLBSYNC:
0617 kvm_patch_ins_nop(inst);
0618 break;
0619
0620
0621 case KVM_INST_MTMSRD_L1:
0622 kvm_patch_ins_mtmsrd(inst, inst_rt);
0623 break;
0624 case KVM_INST_MTMSR:
0625 case KVM_INST_MTMSRD_L0:
0626 kvm_patch_ins_mtmsr(inst, inst_rt);
0627 break;
0628 #ifdef CONFIG_BOOKE
0629 case KVM_INST_WRTEE:
0630 kvm_patch_ins_wrtee(inst, inst_rt, 0);
0631 break;
0632 #endif
0633 }
0634
0635 switch (inst_no_rt & ~KVM_MASK_RB) {
0636 #ifdef CONFIG_PPC_BOOK3S_32
0637 case KVM_INST_MTSRIN:
0638 if (features & KVM_MAGIC_FEAT_SR) {
0639 u32 inst_rb = _inst & KVM_MASK_RB;
0640 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
0641 }
0642 break;
0643 #endif
0644 }
0645
0646 switch (_inst) {
0647 #ifdef CONFIG_BOOKE
0648 case KVM_INST_WRTEEI_0:
0649 kvm_patch_ins_wrteei_0(inst);
0650 break;
0651
0652 case KVM_INST_WRTEEI_1:
0653 kvm_patch_ins_wrtee(inst, 0, 1);
0654 break;
0655 #endif
0656 }
0657 }
0658
0659 extern u32 kvm_template_start[];
0660 extern u32 kvm_template_end[];
0661
0662 static void __init kvm_use_magic_page(void)
0663 {
0664 u32 *p;
0665 u32 *start, *end;
0666 u32 features;
0667
0668
0669 on_each_cpu(kvm_map_magic_page, &features, 1);
0670
0671
0672 if (fault_in_readable((const char __user *)KVM_MAGIC_PAGE,
0673 sizeof(u32))) {
0674 kvm_patching_worked = false;
0675 return;
0676 }
0677
0678
0679 start = (void*)_stext;
0680 end = (void*)_etext;
0681
0682
0683
0684
0685
0686
0687 local_irq_disable();
0688
0689 for (p = start; p < end; p++) {
0690
0691 if (p >= kvm_template_start && p < kvm_template_end) {
0692 p = kvm_template_end - 1;
0693 continue;
0694 }
0695 kvm_check_ins(p, features);
0696 }
0697
0698 local_irq_enable();
0699
0700 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
0701 kvm_patching_worked ? "worked" : "failed");
0702 }
0703
0704 static int __init kvm_guest_init(void)
0705 {
0706 if (!kvm_para_available())
0707 return 0;
0708
0709 if (!epapr_paravirt_enabled)
0710 return 0;
0711
0712 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
0713 kvm_use_magic_page();
0714
0715 #ifdef CONFIG_PPC_BOOK3S_64
0716
0717 powersave_nap = 1;
0718 #endif
0719
0720 return 0;
0721 }
0722
0723 postcore_initcall(kvm_guest_init);