Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Hyper-V Isolation VM interface with paravisor and hypervisor
0004  *
0005  * Author:
0006  *  Tianyu Lan <Tianyu.Lan@microsoft.com>
0007  */
0008 
0009 #include <linux/bitfield.h>
0010 #include <linux/hyperv.h>
0011 #include <linux/types.h>
0012 #include <linux/slab.h>
0013 #include <asm/svm.h>
0014 #include <asm/sev.h>
0015 #include <asm/io.h>
0016 #include <asm/mshyperv.h>
0017 #include <asm/hypervisor.h>
0018 
0019 #ifdef CONFIG_AMD_MEM_ENCRYPT
0020 
0021 #define GHCB_USAGE_HYPERV_CALL  1
0022 
0023 union hv_ghcb {
0024     struct ghcb ghcb;
0025     struct {
0026         u64 hypercalldata[509];
0027         u64 outputgpa;
0028         union {
0029             union {
0030                 struct {
0031                     u32 callcode        : 16;
0032                     u32 isfast          : 1;
0033                     u32 reserved1       : 14;
0034                     u32 isnested        : 1;
0035                     u32 countofelements : 12;
0036                     u32 reserved2       : 4;
0037                     u32 repstartindex   : 12;
0038                     u32 reserved3       : 4;
0039                 };
0040                 u64 asuint64;
0041             } hypercallinput;
0042             union {
0043                 struct {
0044                     u16 callstatus;
0045                     u16 reserved1;
0046                     u32 elementsprocessed : 12;
0047                     u32 reserved2         : 20;
0048                 };
0049                 u64 asunit64;
0050             } hypercalloutput;
0051         };
0052         u64 reserved2;
0053     } hypercall;
0054 } __packed __aligned(HV_HYP_PAGE_SIZE);
0055 
0056 static u16 hv_ghcb_version __ro_after_init;
0057 
0058 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
0059 {
0060     union hv_ghcb *hv_ghcb;
0061     void **ghcb_base;
0062     unsigned long flags;
0063     u64 status;
0064 
0065     if (!hv_ghcb_pg)
0066         return -EFAULT;
0067 
0068     WARN_ON(in_nmi());
0069 
0070     local_irq_save(flags);
0071     ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
0072     hv_ghcb = (union hv_ghcb *)*ghcb_base;
0073     if (!hv_ghcb) {
0074         local_irq_restore(flags);
0075         return -EFAULT;
0076     }
0077 
0078     hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
0079     hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
0080 
0081     hv_ghcb->hypercall.outputgpa = (u64)output;
0082     hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
0083     hv_ghcb->hypercall.hypercallinput.callcode = control;
0084 
0085     if (input_size)
0086         memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
0087 
0088     VMGEXIT();
0089 
0090     hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
0091     memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
0092            sizeof(hv_ghcb->ghcb.save.valid_bitmap));
0093 
0094     status = hv_ghcb->hypercall.hypercalloutput.callstatus;
0095 
0096     local_irq_restore(flags);
0097 
0098     return status;
0099 }
0100 
0101 static inline u64 rd_ghcb_msr(void)
0102 {
0103     return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
0104 }
0105 
0106 static inline void wr_ghcb_msr(u64 val)
0107 {
0108     native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
0109 }
0110 
0111 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
0112                    u64 exit_info_1, u64 exit_info_2)
0113 {
0114     /* Fill in protocol and format specifiers */
0115     ghcb->protocol_version = hv_ghcb_version;
0116     ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
0117 
0118     ghcb_set_sw_exit_code(ghcb, exit_code);
0119     ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
0120     ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
0121 
0122     VMGEXIT();
0123 
0124     if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
0125         return ES_VMM_ERROR;
0126     else
0127         return ES_OK;
0128 }
0129 
0130 void hv_ghcb_terminate(unsigned int set, unsigned int reason)
0131 {
0132     u64 val = GHCB_MSR_TERM_REQ;
0133 
0134     /* Tell the hypervisor what went wrong. */
0135     val |= GHCB_SEV_TERM_REASON(set, reason);
0136 
0137     /* Request Guest Termination from Hypvervisor */
0138     wr_ghcb_msr(val);
0139     VMGEXIT();
0140 
0141     while (true)
0142         asm volatile("hlt\n" : : : "memory");
0143 }
0144 
0145 bool hv_ghcb_negotiate_protocol(void)
0146 {
0147     u64 ghcb_gpa;
0148     u64 val;
0149 
0150     /* Save ghcb page gpa. */
0151     ghcb_gpa = rd_ghcb_msr();
0152 
0153     /* Do the GHCB protocol version negotiation */
0154     wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
0155     VMGEXIT();
0156     val = rd_ghcb_msr();
0157 
0158     if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
0159         return false;
0160 
0161     if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
0162         GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
0163         return false;
0164 
0165     hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
0166                  GHCB_PROTOCOL_MAX);
0167 
0168     /* Write ghcb page back after negotiating protocol. */
0169     wr_ghcb_msr(ghcb_gpa);
0170     VMGEXIT();
0171 
0172     return true;
0173 }
0174 
0175 void hv_ghcb_msr_write(u64 msr, u64 value)
0176 {
0177     union hv_ghcb *hv_ghcb;
0178     void **ghcb_base;
0179     unsigned long flags;
0180 
0181     if (!hv_ghcb_pg)
0182         return;
0183 
0184     WARN_ON(in_nmi());
0185 
0186     local_irq_save(flags);
0187     ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
0188     hv_ghcb = (union hv_ghcb *)*ghcb_base;
0189     if (!hv_ghcb) {
0190         local_irq_restore(flags);
0191         return;
0192     }
0193 
0194     ghcb_set_rcx(&hv_ghcb->ghcb, msr);
0195     ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
0196     ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
0197 
0198     if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
0199         pr_warn("Fail to write msr via ghcb %llx.\n", msr);
0200 
0201     local_irq_restore(flags);
0202 }
0203 EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
0204 
0205 void hv_ghcb_msr_read(u64 msr, u64 *value)
0206 {
0207     union hv_ghcb *hv_ghcb;
0208     void **ghcb_base;
0209     unsigned long flags;
0210 
0211     /* Check size of union hv_ghcb here. */
0212     BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
0213 
0214     if (!hv_ghcb_pg)
0215         return;
0216 
0217     WARN_ON(in_nmi());
0218 
0219     local_irq_save(flags);
0220     ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
0221     hv_ghcb = (union hv_ghcb *)*ghcb_base;
0222     if (!hv_ghcb) {
0223         local_irq_restore(flags);
0224         return;
0225     }
0226 
0227     ghcb_set_rcx(&hv_ghcb->ghcb, msr);
0228     if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
0229         pr_warn("Fail to read msr via ghcb %llx.\n", msr);
0230     else
0231         *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
0232             | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
0233     local_irq_restore(flags);
0234 }
0235 EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
0236 #endif
0237 
0238 enum hv_isolation_type hv_get_isolation_type(void)
0239 {
0240     if (!(ms_hyperv.priv_high & HV_ISOLATION))
0241         return HV_ISOLATION_TYPE_NONE;
0242     return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
0243 }
0244 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
0245 
0246 /*
0247  * hv_is_isolation_supported - Check system runs in the Hyper-V
0248  * isolation VM.
0249  */
0250 bool hv_is_isolation_supported(void)
0251 {
0252     if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
0253         return false;
0254 
0255     if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
0256         return false;
0257 
0258     return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
0259 }
0260 
0261 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
0262 
0263 /*
0264  * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
0265  * isolation VM.
0266  */
0267 bool hv_isolation_type_snp(void)
0268 {
0269     return static_branch_unlikely(&isolation_type_snp);
0270 }
0271 
0272 /*
0273  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
0274  *
0275  * In Isolation VM, all guest memory is encrypted from host and guest
0276  * needs to set memory visible to host via hvcall before sharing memory
0277  * with host.
0278  */
0279 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
0280                enum hv_mem_host_visibility visibility)
0281 {
0282     struct hv_gpa_range_for_visibility **input_pcpu, *input;
0283     u16 pages_processed;
0284     u64 hv_status;
0285     unsigned long flags;
0286 
0287     /* no-op if partition isolation is not enabled */
0288     if (!hv_is_isolation_supported())
0289         return 0;
0290 
0291     if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
0292         pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
0293             HV_MAX_MODIFY_GPA_REP_COUNT);
0294         return -EINVAL;
0295     }
0296 
0297     local_irq_save(flags);
0298     input_pcpu = (struct hv_gpa_range_for_visibility **)
0299             this_cpu_ptr(hyperv_pcpu_input_arg);
0300     input = *input_pcpu;
0301     if (unlikely(!input)) {
0302         local_irq_restore(flags);
0303         return -EINVAL;
0304     }
0305 
0306     input->partition_id = HV_PARTITION_ID_SELF;
0307     input->host_visibility = visibility;
0308     input->reserved0 = 0;
0309     input->reserved1 = 0;
0310     memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
0311     hv_status = hv_do_rep_hypercall(
0312             HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
0313             0, input, &pages_processed);
0314     local_irq_restore(flags);
0315 
0316     if (hv_result_success(hv_status))
0317         return 0;
0318     else
0319         return -EFAULT;
0320 }
0321 
0322 /*
0323  * hv_set_mem_host_visibility - Set specified memory visible to host.
0324  *
0325  * In Isolation VM, all guest memory is encrypted from host and guest
0326  * needs to set memory visible to host via hvcall before sharing memory
0327  * with host. This function works as wrap of hv_mark_gpa_visibility()
0328  * with memory base and size.
0329  */
0330 int hv_set_mem_host_visibility(unsigned long kbuffer, int pagecount, bool visible)
0331 {
0332     enum hv_mem_host_visibility visibility = visible ?
0333             VMBUS_PAGE_VISIBLE_READ_WRITE : VMBUS_PAGE_NOT_VISIBLE;
0334     u64 *pfn_array;
0335     int ret = 0;
0336     int i, pfn;
0337 
0338     if (!hv_is_isolation_supported() || !hv_hypercall_pg)
0339         return 0;
0340 
0341     pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
0342     if (!pfn_array)
0343         return -ENOMEM;
0344 
0345     for (i = 0, pfn = 0; i < pagecount; i++) {
0346         pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
0347         pfn++;
0348 
0349         if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
0350             ret = hv_mark_gpa_visibility(pfn, pfn_array,
0351                              visibility);
0352             if (ret)
0353                 goto err_free_pfn_array;
0354             pfn = 0;
0355         }
0356     }
0357 
0358  err_free_pfn_array:
0359     kfree(pfn_array);
0360     return ret;
0361 }
0362 
0363 /*
0364  * hv_map_memory - map memory to extra space in the AMD SEV-SNP Isolation VM.
0365  */
0366 void *hv_map_memory(void *addr, unsigned long size)
0367 {
0368     unsigned long *pfns = kcalloc(size / PAGE_SIZE,
0369                       sizeof(unsigned long), GFP_KERNEL);
0370     void *vaddr;
0371     int i;
0372 
0373     if (!pfns)
0374         return NULL;
0375 
0376     for (i = 0; i < size / PAGE_SIZE; i++)
0377         pfns[i] = vmalloc_to_pfn(addr + i * PAGE_SIZE) +
0378             (ms_hyperv.shared_gpa_boundary >> PAGE_SHIFT);
0379 
0380     vaddr = vmap_pfn(pfns, size / PAGE_SIZE, PAGE_KERNEL_IO);
0381     kfree(pfns);
0382 
0383     return vaddr;
0384 }
0385 
0386 void hv_unmap_memory(void *addr)
0387 {
0388     vunmap(addr);
0389 }