0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/anon_inodes.h>
0016
0017 #include <linux/uaccess.h>
0018 #include <asm/kvm_ppc.h>
0019 #include <asm/kvm_book3s.h>
0020
0021 #define HPTE_SIZE 16
0022
0023 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
0024 {
0025 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
0026 unsigned long pteg_addr;
0027
0028 pte_index <<= 4;
0029 pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
0030 pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
0031 pteg_addr |= pte_index;
0032
0033 return pteg_addr;
0034 }
0035
0036 static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
0037 {
0038 long flags = kvmppc_get_gpr(vcpu, 4);
0039 long pte_index = kvmppc_get_gpr(vcpu, 5);
0040 __be64 pteg[2 * 8];
0041 __be64 *hpte;
0042 unsigned long pteg_addr, i;
0043 long int ret;
0044
0045 i = pte_index & 7;
0046 pte_index &= ~7UL;
0047 pteg_addr = get_pteg_addr(vcpu, pte_index);
0048
0049 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
0050 ret = H_FUNCTION;
0051 if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
0052 goto done;
0053 hpte = pteg;
0054
0055 ret = H_PTEG_FULL;
0056 if (likely((flags & H_EXACT) == 0)) {
0057 for (i = 0; ; ++i) {
0058 if (i == 8)
0059 goto done;
0060 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0)
0061 break;
0062 hpte += 2;
0063 }
0064 } else {
0065 hpte += i * 2;
0066 if (*hpte & HPTE_V_VALID)
0067 goto done;
0068 }
0069
0070 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
0071 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
0072 pteg_addr += i * HPTE_SIZE;
0073 ret = H_FUNCTION;
0074 if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
0075 goto done;
0076 kvmppc_set_gpr(vcpu, 4, pte_index | i);
0077 ret = H_SUCCESS;
0078
0079 done:
0080 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
0081 kvmppc_set_gpr(vcpu, 3, ret);
0082
0083 return EMULATE_DONE;
0084 }
0085
0086 static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
0087 {
0088 unsigned long flags= kvmppc_get_gpr(vcpu, 4);
0089 unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
0090 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
0091 unsigned long v = 0, pteg, rb;
0092 unsigned long pte[2];
0093 long int ret;
0094
0095 pteg = get_pteg_addr(vcpu, pte_index);
0096 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
0097 ret = H_FUNCTION;
0098 if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
0099 goto done;
0100 pte[0] = be64_to_cpu((__force __be64)pte[0]);
0101 pte[1] = be64_to_cpu((__force __be64)pte[1]);
0102
0103 ret = H_NOT_FOUND;
0104 if ((pte[0] & HPTE_V_VALID) == 0 ||
0105 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
0106 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
0107 goto done;
0108
0109 ret = H_FUNCTION;
0110 if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
0111 goto done;
0112
0113 rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
0114 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
0115
0116 ret = H_SUCCESS;
0117 kvmppc_set_gpr(vcpu, 4, pte[0]);
0118 kvmppc_set_gpr(vcpu, 5, pte[1]);
0119
0120 done:
0121 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
0122 kvmppc_set_gpr(vcpu, 3, ret);
0123
0124 return EMULATE_DONE;
0125 }
0126
0127
0128 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
0129 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
0130 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
0131 #define H_BULK_REMOVE_END 0xc000000000000000ULL
0132 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
0133 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
0134 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
0135 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
0136 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
0137 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
0138 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
0139 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
0140 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
0141 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
0142 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
0143 #define H_BULK_REMOVE_MAX_BATCH 4
0144
0145 static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
0146 {
0147 int i;
0148 int paramnr = 4;
0149 int ret = H_SUCCESS;
0150
0151 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
0152 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
0153 unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
0154 unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
0155 unsigned long pteg, rb, flags;
0156 unsigned long pte[2];
0157 unsigned long v = 0;
0158
0159 if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
0160 break;
0161 } else if ((tsh & H_BULK_REMOVE_TYPE) !=
0162 H_BULK_REMOVE_REQUEST) {
0163 ret = H_PARAMETER;
0164 break;
0165 }
0166
0167 tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
0168 tsh |= H_BULK_REMOVE_RESPONSE;
0169
0170 if ((tsh & H_BULK_REMOVE_ANDCOND) &&
0171 (tsh & H_BULK_REMOVE_AVPN)) {
0172 tsh |= H_BULK_REMOVE_PARM;
0173 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
0174 ret = H_PARAMETER;
0175 break;
0176 }
0177
0178 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
0179 if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
0180 ret = H_FUNCTION;
0181 break;
0182 }
0183 pte[0] = be64_to_cpu((__force __be64)pte[0]);
0184 pte[1] = be64_to_cpu((__force __be64)pte[1]);
0185
0186
0187 flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
0188
0189 if ((pte[0] & HPTE_V_VALID) == 0 ||
0190 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
0191 ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
0192 tsh |= H_BULK_REMOVE_NOT_FOUND;
0193 } else {
0194
0195 if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
0196 ret = H_FUNCTION;
0197 break;
0198 }
0199
0200 rb = compute_tlbie_rb(pte[0], pte[1],
0201 tsh & H_BULK_REMOVE_PTEX);
0202 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
0203 tsh |= H_BULK_REMOVE_SUCCESS;
0204 tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
0205 }
0206 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
0207 }
0208 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
0209 kvmppc_set_gpr(vcpu, 3, ret);
0210
0211 return EMULATE_DONE;
0212 }
0213
0214 static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
0215 {
0216 unsigned long flags = kvmppc_get_gpr(vcpu, 4);
0217 unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
0218 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
0219 unsigned long rb, pteg, r, v;
0220 unsigned long pte[2];
0221 long int ret;
0222
0223 pteg = get_pteg_addr(vcpu, pte_index);
0224 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
0225 ret = H_FUNCTION;
0226 if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
0227 goto done;
0228 pte[0] = be64_to_cpu((__force __be64)pte[0]);
0229 pte[1] = be64_to_cpu((__force __be64)pte[1]);
0230
0231 ret = H_NOT_FOUND;
0232 if ((pte[0] & HPTE_V_VALID) == 0 ||
0233 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
0234 goto done;
0235
0236 v = pte[0];
0237 r = pte[1];
0238 r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
0239 HPTE_R_KEY_LO);
0240 r |= (flags << 55) & HPTE_R_PP0;
0241 r |= (flags << 48) & HPTE_R_KEY_HI;
0242 r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
0243
0244 pte[1] = r;
0245
0246 rb = compute_tlbie_rb(v, r, pte_index);
0247 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
0248 pte[0] = (__force u64)cpu_to_be64(pte[0]);
0249 pte[1] = (__force u64)cpu_to_be64(pte[1]);
0250 ret = H_FUNCTION;
0251 if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
0252 goto done;
0253 ret = H_SUCCESS;
0254
0255 done:
0256 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
0257 kvmppc_set_gpr(vcpu, 3, ret);
0258
0259 return EMULATE_DONE;
0260 }
0261
0262 static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
0263 {
0264 long rc;
0265
0266 rc = kvmppc_h_logical_ci_load(vcpu);
0267 if (rc == H_TOO_HARD)
0268 return EMULATE_FAIL;
0269 kvmppc_set_gpr(vcpu, 3, rc);
0270 return EMULATE_DONE;
0271 }
0272
0273 static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
0274 {
0275 long rc;
0276
0277 rc = kvmppc_h_logical_ci_store(vcpu);
0278 if (rc == H_TOO_HARD)
0279 return EMULATE_FAIL;
0280 kvmppc_set_gpr(vcpu, 3, rc);
0281 return EMULATE_DONE;
0282 }
0283
0284 static int kvmppc_h_pr_set_mode(struct kvm_vcpu *vcpu)
0285 {
0286 unsigned long mflags = kvmppc_get_gpr(vcpu, 4);
0287 unsigned long resource = kvmppc_get_gpr(vcpu, 5);
0288
0289 if (resource == H_SET_MODE_RESOURCE_ADDR_TRANS_MODE) {
0290
0291 if (mflags == 0)
0292 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
0293 else
0294 kvmppc_set_gpr(vcpu, 3, H_UNSUPPORTED_FLAG_START - 63);
0295 return EMULATE_DONE;
0296 }
0297 return EMULATE_FAIL;
0298 }
0299
0300 #ifdef CONFIG_SPAPR_TCE_IOMMU
0301 static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
0302 {
0303 unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
0304 unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
0305 unsigned long tce = kvmppc_get_gpr(vcpu, 6);
0306 long rc;
0307
0308 rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
0309 if (rc == H_TOO_HARD)
0310 return EMULATE_FAIL;
0311 kvmppc_set_gpr(vcpu, 3, rc);
0312 return EMULATE_DONE;
0313 }
0314
0315 static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
0316 {
0317 unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
0318 unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
0319 unsigned long tce = kvmppc_get_gpr(vcpu, 6);
0320 unsigned long npages = kvmppc_get_gpr(vcpu, 7);
0321 long rc;
0322
0323 rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba,
0324 tce, npages);
0325 if (rc == H_TOO_HARD)
0326 return EMULATE_FAIL;
0327 kvmppc_set_gpr(vcpu, 3, rc);
0328 return EMULATE_DONE;
0329 }
0330
0331 static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
0332 {
0333 unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
0334 unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
0335 unsigned long tce_value = kvmppc_get_gpr(vcpu, 6);
0336 unsigned long npages = kvmppc_get_gpr(vcpu, 7);
0337 long rc;
0338
0339 rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages);
0340 if (rc == H_TOO_HARD)
0341 return EMULATE_FAIL;
0342 kvmppc_set_gpr(vcpu, 3, rc);
0343 return EMULATE_DONE;
0344 }
0345
0346 #else
0347 static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
0348 {
0349 return EMULATE_FAIL;
0350 }
0351
0352 static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
0353 {
0354 return EMULATE_FAIL;
0355 }
0356
0357 static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
0358 {
0359 return EMULATE_FAIL;
0360 }
0361 #endif
0362
0363 static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
0364 {
0365 long rc = kvmppc_xics_hcall(vcpu, cmd);
0366 kvmppc_set_gpr(vcpu, 3, rc);
0367 return EMULATE_DONE;
0368 }
0369
0370 int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
0371 {
0372 int rc, idx;
0373
0374 if (cmd <= MAX_HCALL_OPCODE &&
0375 !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
0376 return EMULATE_FAIL;
0377
0378 switch (cmd) {
0379 case H_ENTER:
0380 return kvmppc_h_pr_enter(vcpu);
0381 case H_REMOVE:
0382 return kvmppc_h_pr_remove(vcpu);
0383 case H_PROTECT:
0384 return kvmppc_h_pr_protect(vcpu);
0385 case H_BULK_REMOVE:
0386 return kvmppc_h_pr_bulk_remove(vcpu);
0387 case H_PUT_TCE:
0388 return kvmppc_h_pr_put_tce(vcpu);
0389 case H_PUT_TCE_INDIRECT:
0390 return kvmppc_h_pr_put_tce_indirect(vcpu);
0391 case H_STUFF_TCE:
0392 return kvmppc_h_pr_stuff_tce(vcpu);
0393 case H_CEDE:
0394 kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
0395 kvm_vcpu_halt(vcpu);
0396 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
0397 vcpu->stat.generic.halt_wakeup++;
0398 return EMULATE_DONE;
0399 case H_LOGICAL_CI_LOAD:
0400 return kvmppc_h_pr_logical_ci_load(vcpu);
0401 case H_LOGICAL_CI_STORE:
0402 return kvmppc_h_pr_logical_ci_store(vcpu);
0403 case H_SET_MODE:
0404 return kvmppc_h_pr_set_mode(vcpu);
0405 case H_XIRR:
0406 case H_CPPR:
0407 case H_EOI:
0408 case H_IPI:
0409 case H_IPOLL:
0410 case H_XIRR_X:
0411 if (kvmppc_xics_enabled(vcpu))
0412 return kvmppc_h_pr_xics_hcall(vcpu, cmd);
0413 break;
0414 case H_RTAS:
0415 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
0416 break;
0417 idx = srcu_read_lock(&vcpu->kvm->srcu);
0418 rc = kvmppc_rtas_hcall(vcpu);
0419 srcu_read_unlock(&vcpu->kvm->srcu, idx);
0420 if (rc)
0421 break;
0422 kvmppc_set_gpr(vcpu, 3, 0);
0423 return EMULATE_DONE;
0424 }
0425
0426 return EMULATE_FAIL;
0427 }
0428
0429 int kvmppc_hcall_impl_pr(unsigned long cmd)
0430 {
0431 switch (cmd) {
0432 case H_ENTER:
0433 case H_REMOVE:
0434 case H_PROTECT:
0435 case H_BULK_REMOVE:
0436 #ifdef CONFIG_SPAPR_TCE_IOMMU
0437 case H_GET_TCE:
0438 case H_PUT_TCE:
0439 case H_PUT_TCE_INDIRECT:
0440 case H_STUFF_TCE:
0441 #endif
0442 case H_CEDE:
0443 case H_LOGICAL_CI_LOAD:
0444 case H_LOGICAL_CI_STORE:
0445 case H_SET_MODE:
0446 #ifdef CONFIG_KVM_XICS
0447 case H_XIRR:
0448 case H_CPPR:
0449 case H_EOI:
0450 case H_IPI:
0451 case H_IPOLL:
0452 case H_XIRR_X:
0453 #endif
0454 return 1;
0455 }
0456 return 0;
0457 }
0458
0459
0460
0461
0462
0463
0464
0465 static unsigned int default_hcall_list[] = {
0466 H_ENTER,
0467 H_REMOVE,
0468 H_PROTECT,
0469 H_BULK_REMOVE,
0470 #ifdef CONFIG_SPAPR_TCE_IOMMU
0471 H_GET_TCE,
0472 H_PUT_TCE,
0473 #endif
0474 H_CEDE,
0475 H_SET_MODE,
0476 #ifdef CONFIG_KVM_XICS
0477 H_XIRR,
0478 H_CPPR,
0479 H_EOI,
0480 H_IPI,
0481 H_IPOLL,
0482 H_XIRR_X,
0483 #endif
0484 0
0485 };
0486
0487 void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
0488 {
0489 int i;
0490 unsigned int hcall;
0491
0492 for (i = 0; default_hcall_list[i]; ++i) {
0493 hcall = default_hcall_list[i];
0494 WARN_ON(!kvmppc_hcall_impl_pr(hcall));
0495 __set_bit(hcall / 4, kvm->arch.enabled_hcalls);
0496 }
0497 }