0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) "Hyper-V: " fmt
0011
0012
0013 #include <linux/types.h>
0014 #include <asm/hyperv-tlfs.h>
0015 #include <asm/mshyperv.h>
0016 #include <asm/tlbflush.h>
0017
0018 #include <asm/trace/hyperv.h>
0019
0020 int hyperv_flush_guest_mapping(u64 as)
0021 {
0022 struct hv_guest_mapping_flush **flush_pcpu;
0023 struct hv_guest_mapping_flush *flush;
0024 u64 status;
0025 unsigned long flags;
0026 int ret = -ENOTSUPP;
0027
0028 if (!hv_hypercall_pg)
0029 goto fault;
0030
0031 local_irq_save(flags);
0032
0033 flush_pcpu = (struct hv_guest_mapping_flush **)
0034 this_cpu_ptr(hyperv_pcpu_input_arg);
0035
0036 flush = *flush_pcpu;
0037
0038 if (unlikely(!flush)) {
0039 local_irq_restore(flags);
0040 goto fault;
0041 }
0042
0043 flush->address_space = as;
0044 flush->flags = 0;
0045
0046 status = hv_do_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE,
0047 flush, NULL);
0048 local_irq_restore(flags);
0049
0050 if (hv_result_success(status))
0051 ret = 0;
0052
0053 fault:
0054 trace_hyperv_nested_flush_guest_mapping(as, ret);
0055 return ret;
0056 }
0057 EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
0058
0059 int hyperv_fill_flush_guest_mapping_list(
0060 struct hv_guest_mapping_flush_list *flush,
0061 u64 start_gfn, u64 pages)
0062 {
0063 u64 cur = start_gfn;
0064 u64 additional_pages;
0065 int gpa_n = 0;
0066
0067 do {
0068
0069
0070
0071
0072 if (gpa_n >= HV_MAX_FLUSH_REP_COUNT)
0073 return -ENOSPC;
0074
0075 additional_pages = min_t(u64, pages, HV_MAX_FLUSH_PAGES) - 1;
0076
0077 flush->gpa_list[gpa_n].page.additional_pages = additional_pages;
0078 flush->gpa_list[gpa_n].page.largepage = false;
0079 flush->gpa_list[gpa_n].page.basepfn = cur;
0080
0081 pages -= additional_pages + 1;
0082 cur += additional_pages + 1;
0083 gpa_n++;
0084 } while (pages > 0);
0085
0086 return gpa_n;
0087 }
0088 EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list);
0089
0090 int hyperv_flush_guest_mapping_range(u64 as,
0091 hyperv_fill_flush_list_func fill_flush_list_func, void *data)
0092 {
0093 struct hv_guest_mapping_flush_list **flush_pcpu;
0094 struct hv_guest_mapping_flush_list *flush;
0095 u64 status;
0096 unsigned long flags;
0097 int ret = -ENOTSUPP;
0098 int gpa_n = 0;
0099
0100 if (!hv_hypercall_pg || !fill_flush_list_func)
0101 goto fault;
0102
0103 local_irq_save(flags);
0104
0105 flush_pcpu = (struct hv_guest_mapping_flush_list **)
0106 this_cpu_ptr(hyperv_pcpu_input_arg);
0107
0108 flush = *flush_pcpu;
0109 if (unlikely(!flush)) {
0110 local_irq_restore(flags);
0111 goto fault;
0112 }
0113
0114 flush->address_space = as;
0115 flush->flags = 0;
0116
0117 gpa_n = fill_flush_list_func(flush, data);
0118 if (gpa_n < 0) {
0119 local_irq_restore(flags);
0120 goto fault;
0121 }
0122
0123 status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST,
0124 gpa_n, 0, flush, NULL);
0125
0126 local_irq_restore(flags);
0127
0128 if (hv_result_success(status))
0129 ret = 0;
0130 else
0131 ret = hv_result(status);
0132 fault:
0133 trace_hyperv_nested_flush_guest_mapping_range(as, ret);
0134 return ret;
0135 }
0136 EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range);