0001
0002
0003
0004
0005
0006
0007 #include <linux/kvm_host.h>
0008 #include <asm/kvm_emulate.h>
0009 #include <trace/events/kvm.h>
0010
0011 #include "trace.h"
0012
0013 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
0014 {
0015 void *datap = NULL;
0016 union {
0017 u8 byte;
0018 u16 hword;
0019 u32 word;
0020 u64 dword;
0021 } tmp;
0022
0023 switch (len) {
0024 case 1:
0025 tmp.byte = data;
0026 datap = &tmp.byte;
0027 break;
0028 case 2:
0029 tmp.hword = data;
0030 datap = &tmp.hword;
0031 break;
0032 case 4:
0033 tmp.word = data;
0034 datap = &tmp.word;
0035 break;
0036 case 8:
0037 tmp.dword = data;
0038 datap = &tmp.dword;
0039 break;
0040 }
0041
0042 memcpy(buf, datap, len);
0043 }
0044
0045 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
0046 {
0047 unsigned long data = 0;
0048 union {
0049 u16 hword;
0050 u32 word;
0051 u64 dword;
0052 } tmp;
0053
0054 switch (len) {
0055 case 1:
0056 data = *(u8 *)buf;
0057 break;
0058 case 2:
0059 memcpy(&tmp.hword, buf, len);
0060 data = tmp.hword;
0061 break;
0062 case 4:
0063 memcpy(&tmp.word, buf, len);
0064 data = tmp.word;
0065 break;
0066 case 8:
0067 memcpy(&tmp.dword, buf, len);
0068 data = tmp.dword;
0069 break;
0070 }
0071
0072 return data;
0073 }
0074
0075
0076
0077
0078
0079
0080
0081 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
0082 {
0083 unsigned long data;
0084 unsigned int len;
0085 int mask;
0086
0087
0088 if (unlikely(!vcpu->mmio_needed))
0089 return 0;
0090
0091 vcpu->mmio_needed = 0;
0092
0093 if (!kvm_vcpu_dabt_iswrite(vcpu)) {
0094 struct kvm_run *run = vcpu->run;
0095
0096 len = kvm_vcpu_dabt_get_as(vcpu);
0097 data = kvm_mmio_read_buf(run->mmio.data, len);
0098
0099 if (kvm_vcpu_dabt_issext(vcpu) &&
0100 len < sizeof(unsigned long)) {
0101 mask = 1U << ((len * 8) - 1);
0102 data = (data ^ mask) - mask;
0103 }
0104
0105 if (!kvm_vcpu_dabt_issf(vcpu))
0106 data = data & 0xffffffff;
0107
0108 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
0109 &data);
0110 data = vcpu_data_host_to_guest(vcpu, data, len);
0111 vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
0112 }
0113
0114
0115
0116
0117
0118 kvm_incr_pc(vcpu);
0119
0120 return 0;
0121 }
0122
0123 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
0124 {
0125 struct kvm_run *run = vcpu->run;
0126 unsigned long data;
0127 unsigned long rt;
0128 int ret;
0129 bool is_write;
0130 int len;
0131 u8 data_buf[8];
0132
0133
0134
0135
0136
0137 if (!kvm_vcpu_dabt_isvalid(vcpu)) {
0138 if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
0139 &vcpu->kvm->arch.flags)) {
0140 run->exit_reason = KVM_EXIT_ARM_NISV;
0141 run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
0142 run->arm_nisv.fault_ipa = fault_ipa;
0143 return 0;
0144 }
0145
0146 kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
0147 return -ENOSYS;
0148 }
0149
0150
0151
0152
0153
0154
0155 is_write = kvm_vcpu_dabt_iswrite(vcpu);
0156 len = kvm_vcpu_dabt_get_as(vcpu);
0157 rt = kvm_vcpu_dabt_get_rd(vcpu);
0158
0159 if (is_write) {
0160 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
0161 len);
0162
0163 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
0164 kvm_mmio_write_buf(data_buf, len, data);
0165
0166 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
0167 data_buf);
0168 } else {
0169 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
0170 fault_ipa, NULL);
0171
0172 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
0173 data_buf);
0174 }
0175
0176
0177 run->mmio.is_write = is_write;
0178 run->mmio.phys_addr = fault_ipa;
0179 run->mmio.len = len;
0180 vcpu->mmio_needed = 1;
0181
0182 if (!ret) {
0183
0184 if (!is_write)
0185 memcpy(run->mmio.data, data_buf, len);
0186 vcpu->stat.mmio_exit_kernel++;
0187 kvm_handle_mmio_return(vcpu);
0188 return 1;
0189 }
0190
0191 if (is_write)
0192 memcpy(run->mmio.data, data_buf, len);
0193 vcpu->stat.mmio_exit_user++;
0194 run->exit_reason = KVM_EXIT_MMIO;
0195 return 0;
0196 }