0001
0002 #ifndef INCLUDE_XEN_OPS_H
0003 #define INCLUDE_XEN_OPS_H
0004
0005 #include <linux/percpu.h>
0006 #include <linux/notifier.h>
0007 #include <linux/efi.h>
0008 #include <linux/virtio_anchor.h>
0009 #include <xen/features.h>
0010 #include <asm/xen/interface.h>
0011 #include <xen/interface/vcpu.h>
0012
0013 DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
0014
0015 DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
0016 static inline uint32_t xen_vcpu_nr(int cpu)
0017 {
0018 return per_cpu(xen_vcpu_id, cpu);
0019 }
0020
0021 #define XEN_VCPU_ID_INVALID U32_MAX
0022
0023 void xen_arch_pre_suspend(void);
0024 void xen_arch_post_suspend(int suspend_cancelled);
0025
0026 void xen_timer_resume(void);
0027 void xen_arch_resume(void);
0028 void xen_arch_suspend(void);
0029
0030 void xen_reboot(int reason);
0031
0032 void xen_resume_notifier_register(struct notifier_block *nb);
0033 void xen_resume_notifier_unregister(struct notifier_block *nb);
0034
0035 bool xen_vcpu_stolen(int vcpu);
0036 void xen_setup_runstate_info(int cpu);
0037 void xen_time_setup_guest(void);
0038 void xen_manage_runstate_time(int action);
0039 void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
0040 u64 xen_steal_clock(int cpu);
0041
0042 int xen_setup_shutdown_event(void);
0043
0044 extern unsigned long *xen_contiguous_bitmap;
0045
0046 #if defined(CONFIG_XEN_PV)
0047 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
0048 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
0049 unsigned int domid, bool no_translate);
0050 #else
0051 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
0052 xen_pfn_t *pfn, int nr, int *err_ptr,
0053 pgprot_t prot, unsigned int domid,
0054 bool no_translate)
0055 {
0056 BUG();
0057 return 0;
0058 }
0059 #endif
0060
0061 struct vm_area_struct;
0062
0063 #ifdef CONFIG_XEN_AUTO_XLATE
0064 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
0065 unsigned long addr,
0066 xen_pfn_t *gfn, int nr,
0067 int *err_ptr, pgprot_t prot,
0068 unsigned int domid,
0069 struct page **pages);
0070 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
0071 int nr, struct page **pages);
0072 #else
0073
0074
0075
0076
0077 static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
0078 unsigned long addr,
0079 xen_pfn_t *gfn, int nr,
0080 int *err_ptr, pgprot_t prot,
0081 unsigned int domid,
0082 struct page **pages)
0083 {
0084 return -EOPNOTSUPP;
0085 }
0086
0087 static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
0088 int nr, struct page **pages)
0089 {
0090 return -EOPNOTSUPP;
0091 }
0092 #endif
0093
0094 int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
0095 unsigned long len);
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
0115 unsigned long addr,
0116 xen_pfn_t *gfn, int nr,
0117 int *err_ptr, pgprot_t prot,
0118 unsigned int domid,
0119 struct page **pages)
0120 {
0121 if (xen_feature(XENFEAT_auto_translated_physmap))
0122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
0123 prot, domid, pages);
0124
0125
0126
0127
0128
0129 BUG_ON(err_ptr == NULL);
0130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
0131 false);
0132 }
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
0151 unsigned long addr, xen_pfn_t *mfn,
0152 int nr, int *err_ptr,
0153 pgprot_t prot, unsigned int domid)
0154 {
0155 if (xen_feature(XENFEAT_auto_translated_physmap))
0156 return -EOPNOTSUPP;
0157
0158 return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
0159 true);
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
0175 unsigned long addr,
0176 xen_pfn_t gfn, int nr,
0177 pgprot_t prot, unsigned int domid,
0178 struct page **pages)
0179 {
0180 if (xen_feature(XENFEAT_auto_translated_physmap))
0181 return -EOPNOTSUPP;
0182
0183 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
0184 }
0185
0186 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
0187 int numpgs, struct page **pages);
0188
0189 int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
0190 unsigned long nr_grant_frames);
0191
0192 bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
0193
0194 void xen_efi_runtime_setup(void);
0195
0196
0197 #if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION)
0198
0199 DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
0200
0201 static inline void xen_preemptible_hcall_begin(void)
0202 {
0203 __this_cpu_write(xen_in_preemptible_hcall, true);
0204 }
0205
0206 static inline void xen_preemptible_hcall_end(void)
0207 {
0208 __this_cpu_write(xen_in_preemptible_hcall, false);
0209 }
0210
0211 #else
0212
0213 static inline void xen_preemptible_hcall_begin(void) { }
0214 static inline void xen_preemptible_hcall_end(void) { }
0215
0216 #endif
0217
0218 #ifdef CONFIG_XEN_GRANT_DMA_OPS
0219 void xen_grant_setup_dma_ops(struct device *dev);
0220 bool xen_is_grant_dma_device(struct device *dev);
0221 bool xen_virtio_mem_acc(struct virtio_device *dev);
0222 #else
0223 static inline void xen_grant_setup_dma_ops(struct device *dev)
0224 {
0225 }
0226 static inline bool xen_is_grant_dma_device(struct device *dev)
0227 {
0228 return false;
0229 }
0230
0231 struct virtio_device;
0232
0233 static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
0234 {
0235 return false;
0236 }
0237 #endif
0238
0239 #endif