0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #ifndef _GVT_H_
0034 #define _GVT_H_
0035
0036 #include <uapi/linux/pci_regs.h>
0037 #include <linux/kvm_host.h>
0038 #include <linux/vfio.h>
0039
0040 #include "i915_drv.h"
0041 #include "intel_gvt.h"
0042
0043 #include "debug.h"
0044 #include "mmio.h"
0045 #include "reg.h"
0046 #include "interrupt.h"
0047 #include "gtt.h"
0048 #include "display.h"
0049 #include "edid.h"
0050 #include "execlist.h"
0051 #include "scheduler.h"
0052 #include "sched_policy.h"
0053 #include "mmio_context.h"
0054 #include "cmd_parser.h"
0055 #include "fb_decoder.h"
0056 #include "dmabuf.h"
0057 #include "page_track.h"
0058
0059 #define GVT_MAX_VGPU 8
0060
0061
0062 struct intel_gvt_device_info {
0063 u32 max_support_vgpus;
0064 u32 cfg_space_size;
0065 u32 mmio_size;
0066 u32 mmio_bar;
0067 unsigned long msi_cap_offset;
0068 u32 gtt_start_offset;
0069 u32 gtt_entry_size;
0070 u32 gtt_entry_size_shift;
0071 int gmadr_bytes_in_cmd;
0072 u32 max_surface_size;
0073 };
0074
0075
0076 struct intel_vgpu_gm {
0077 u64 aperture_sz;
0078 u64 hidden_sz;
0079 struct drm_mm_node low_gm_node;
0080 struct drm_mm_node high_gm_node;
0081 };
0082
0083 #define INTEL_GVT_MAX_NUM_FENCES 32
0084
0085
0086 struct intel_vgpu_fence {
0087 struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
0088 u32 base;
0089 u32 size;
0090 };
0091
0092 struct intel_vgpu_mmio {
0093 void *vreg;
0094 };
0095
0096 #define INTEL_GVT_MAX_BAR_NUM 4
0097
0098 struct intel_vgpu_pci_bar {
0099 u64 size;
0100 bool tracked;
0101 };
0102
0103 struct intel_vgpu_cfg_space {
0104 unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
0105 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
0106 u32 pmcsr_off;
0107 };
0108
0109 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
0110
0111 struct intel_vgpu_irq {
0112 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
0113 DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
0114 INTEL_GVT_EVENT_MAX);
0115 };
0116
0117 struct intel_vgpu_opregion {
0118 bool mapped;
0119 void *va;
0120 u32 gfn[INTEL_GVT_OPREGION_PAGES];
0121 };
0122
0123 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
0124
0125 struct intel_vgpu_display {
0126 struct intel_vgpu_i2c_edid i2c_edid;
0127 struct intel_vgpu_port ports[I915_MAX_PORTS];
0128 struct intel_vgpu_sbi sbi;
0129 enum port port_num;
0130 };
0131
0132 struct vgpu_sched_ctl {
0133 int weight;
0134 };
0135
0136 enum {
0137 INTEL_VGPU_EXECLIST_SUBMISSION = 1,
0138 INTEL_VGPU_GUC_SUBMISSION,
0139 };
0140
0141 struct intel_vgpu_submission_ops {
0142 const char *name;
0143 int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
0144 void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
0145 void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
0146 };
0147
0148 struct intel_vgpu_submission {
0149 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
0150 struct list_head workload_q_head[I915_NUM_ENGINES];
0151 struct intel_context *shadow[I915_NUM_ENGINES];
0152 struct kmem_cache *workloads;
0153 atomic_t running_workload_num;
0154 union {
0155 u64 i915_context_pml4;
0156 u64 i915_context_pdps[GEN8_3LVL_PDPES];
0157 };
0158 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
0159 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
0160 void *ring_scan_buffer[I915_NUM_ENGINES];
0161 int ring_scan_buffer_size[I915_NUM_ENGINES];
0162 const struct intel_vgpu_submission_ops *ops;
0163 int virtual_submission_interface;
0164 bool active;
0165 struct {
0166 u32 lrca;
0167 bool valid;
0168 u64 ring_context_gpa;
0169 } last_ctx[I915_NUM_ENGINES];
0170 };
0171
0172 #define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
0173
0174 struct intel_vgpu {
0175 struct intel_gvt *gvt;
0176 struct mutex vgpu_lock;
0177 int id;
0178 bool active;
0179 bool attached;
0180 bool pv_notified;
0181 bool failsafe;
0182 unsigned int resetting_eng;
0183
0184
0185
0186
0187
0188 void *sched_data;
0189 struct vgpu_sched_ctl sched_ctl;
0190
0191 struct intel_vgpu_fence fence;
0192 struct intel_vgpu_gm gm;
0193 struct intel_vgpu_cfg_space cfg_space;
0194 struct intel_vgpu_mmio mmio;
0195 struct intel_vgpu_irq irq;
0196 struct intel_vgpu_gtt gtt;
0197 struct intel_vgpu_opregion opregion;
0198 struct intel_vgpu_display display;
0199 struct intel_vgpu_submission submission;
0200 struct radix_tree_root page_track_tree;
0201 u32 hws_pga[I915_NUM_ENGINES];
0202
0203 bool d3_entered;
0204
0205 struct dentry *debugfs;
0206
0207 struct list_head dmabuf_obj_list_head;
0208 struct mutex dmabuf_lock;
0209 struct idr object_idr;
0210 struct intel_vgpu_vblank_timer vblank_timer;
0211
0212 u32 scan_nonprivbb;
0213
0214 struct vfio_device vfio_device;
0215 struct vfio_region *region;
0216 int num_regions;
0217 struct eventfd_ctx *intx_trigger;
0218 struct eventfd_ctx *msi_trigger;
0219
0220
0221
0222
0223
0224 struct rb_root gfn_cache;
0225 struct rb_root dma_addr_cache;
0226 unsigned long nr_cache_entries;
0227 struct mutex cache_lock;
0228
0229 atomic_t released;
0230
0231 struct kvm_page_track_notifier_node track_node;
0232 #define NR_BKT (1 << 18)
0233 struct hlist_head ptable[NR_BKT];
0234 #undef NR_BKT
0235 };
0236
0237
0238 #define vgpu_is_vm_unhealthy(ret_val) \
0239 (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
0240
0241 struct intel_gvt_gm {
0242 unsigned long vgpu_allocated_low_gm_size;
0243 unsigned long vgpu_allocated_high_gm_size;
0244 };
0245
0246 struct intel_gvt_fence {
0247 unsigned long vgpu_allocated_fence_num;
0248 };
0249
0250
0251 struct gvt_mmio_block {
0252 unsigned int device;
0253 i915_reg_t offset;
0254 unsigned int size;
0255 gvt_mmio_func read;
0256 gvt_mmio_func write;
0257 };
0258
0259 #define INTEL_GVT_MMIO_HASH_BITS 11
0260
0261 struct intel_gvt_mmio {
0262 u16 *mmio_attribute;
0263
0264 #define F_RO (1 << 0)
0265
0266 #define F_GMADR (1 << 1)
0267
0268 #define F_MODE_MASK (1 << 2)
0269
0270 #define F_CMD_ACCESS (1 << 3)
0271
0272 #define F_ACCESSED (1 << 4)
0273
0274 #define F_PM_SAVE (1 << 5)
0275
0276 #define F_UNALIGN (1 << 6)
0277
0278
0279
0280 #define F_SR_IN_CTX (1 << 7)
0281
0282 #define F_CMD_WRITE_PATCH (1 << 8)
0283
0284 struct gvt_mmio_block *mmio_block;
0285 unsigned int num_mmio_block;
0286
0287 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
0288 unsigned long num_tracked_mmio;
0289 };
0290
0291 struct intel_gvt_firmware {
0292 void *cfg_space;
0293 void *mmio;
0294 bool firmware_loaded;
0295 };
0296
0297 #define NR_MAX_INTEL_VGPU_TYPES 20
0298 struct intel_vgpu_type {
0299 char name[16];
0300 unsigned int avail_instance;
0301 unsigned int low_gm_size;
0302 unsigned int high_gm_size;
0303 unsigned int fence;
0304 unsigned int weight;
0305 enum intel_vgpu_edid resolution;
0306 };
0307
0308 struct intel_gvt {
0309
0310
0311
0312 struct mutex lock;
0313
0314 struct mutex sched_lock;
0315
0316 struct intel_gt *gt;
0317 struct idr vgpu_idr;
0318
0319 struct intel_gvt_device_info device_info;
0320 struct intel_gvt_gm gm;
0321 struct intel_gvt_fence fence;
0322 struct intel_gvt_mmio mmio;
0323 struct intel_gvt_firmware firmware;
0324 struct intel_gvt_irq irq;
0325 struct intel_gvt_gtt gtt;
0326 struct intel_gvt_workload_scheduler scheduler;
0327 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
0328 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
0329 struct intel_vgpu_type *types;
0330 unsigned int num_types;
0331 struct intel_vgpu *idle_vgpu;
0332
0333 struct task_struct *service_thread;
0334 wait_queue_head_t service_thread_wq;
0335
0336
0337
0338
0339 unsigned long service_request;
0340
0341 struct {
0342 struct engine_mmio *mmio;
0343 int ctx_mmio_count[I915_NUM_ENGINES];
0344 u32 *tlb_mmio_offset_list;
0345 u32 tlb_mmio_offset_list_cnt;
0346 u32 *mocs_mmio_offset_list;
0347 u32 mocs_mmio_offset_list_cnt;
0348 } engine_mmio_list;
0349 bool is_reg_whitelist_updated;
0350
0351 struct dentry *debugfs_root;
0352 };
0353
0354 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
0355 {
0356 return i915->gvt;
0357 }
0358
0359 enum {
0360
0361 INTEL_GVT_REQUEST_SCHED = 0,
0362
0363
0364 INTEL_GVT_REQUEST_EVENT_SCHED = 1,
0365
0366
0367 INTEL_GVT_REQUEST_EMULATE_VBLANK = 2,
0368 INTEL_GVT_REQUEST_EMULATE_VBLANK_MAX = INTEL_GVT_REQUEST_EMULATE_VBLANK
0369 + GVT_MAX_VGPU,
0370 };
0371
0372 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
0373 int service)
0374 {
0375 set_bit(service, (void *)&gvt->service_request);
0376 wake_up(&gvt->service_thread_wq);
0377 }
0378
0379 void intel_gvt_free_firmware(struct intel_gvt *gvt);
0380 int intel_gvt_load_firmware(struct intel_gvt *gvt);
0381
0382
0383 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
0384 #define BYTES_TO_MB(b) ((b) >> 20ULL)
0385
0386 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
0387 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
0388 #define HOST_FENCE 4
0389
0390 #define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
0391
0392
0393 #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
0394 #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
0395
0396 #define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
0397 #define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
0398 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
0399
0400 #define gvt_aperture_gmadr_base(gvt) (0)
0401 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
0402 + gvt_aperture_sz(gvt) - 1)
0403
0404 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
0405 + gvt_aperture_sz(gvt))
0406 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
0407 + gvt_hidden_sz(gvt) - 1)
0408
0409 #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
0410
0411
0412 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
0413 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
0414 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
0415 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
0416
0417 #define vgpu_aperture_pa_base(vgpu) \
0418 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
0419
0420 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
0421
0422 #define vgpu_aperture_pa_end(vgpu) \
0423 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
0424
0425 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
0426 #define vgpu_aperture_gmadr_end(vgpu) \
0427 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
0428
0429 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
0430 #define vgpu_hidden_gmadr_end(vgpu) \
0431 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
0432
0433 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
0434 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
0435
0436
0437 #define RING_CTX_SIZE 320
0438
0439 struct intel_vgpu_creation_params {
0440 __u64 low_gm_sz;
0441 __u64 high_gm_sz;
0442 __u64 fence_sz;
0443 __u64 resolution;
0444 __s32 primary;
0445 __u64 vgpu_id;
0446
0447 __u32 weight;
0448 };
0449
0450 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
0451 struct intel_vgpu_creation_params *param);
0452 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
0453 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
0454 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
0455 u32 fence, u64 value);
0456
0457
0458
0459 #define vgpu_vreg_t(vgpu, reg) \
0460 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
0461 #define vgpu_vreg(vgpu, offset) \
0462 (*(u32 *)(vgpu->mmio.vreg + (offset)))
0463 #define vgpu_vreg64_t(vgpu, reg) \
0464 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
0465 #define vgpu_vreg64(vgpu, offset) \
0466 (*(u64 *)(vgpu->mmio.vreg + (offset)))
0467
0468 #define for_each_active_vgpu(gvt, vgpu, id) \
0469 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
0470 for_each_if(vgpu->active)
0471
0472 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
0473 u32 offset, u32 val, bool low)
0474 {
0475 u32 *pval;
0476
0477
0478 offset = rounddown(offset, 4);
0479 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
0480
0481 if (low) {
0482
0483
0484
0485
0486 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
0487 } else {
0488 *pval = val;
0489 }
0490 }
0491
0492 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
0493 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
0494
0495 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
0496 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
0497 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
0498 struct intel_vgpu_type *type);
0499 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
0500 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
0501 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
0502 intel_engine_mask_t engine_mask);
0503 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
0504 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
0505 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
0506
0507 int intel_gvt_set_opregion(struct intel_vgpu *vgpu);
0508 int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num);
0509
0510
0511 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
0512 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
0513 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
0514
0515 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
0516 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
0517 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
0518
0519 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
0520 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
0521 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
0522
0523 #define gvt_gmadr_is_aperture(gvt, gmadr) \
0524 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
0525 (gmadr <= gvt_aperture_gmadr_end(gvt)))
0526
0527 #define gvt_gmadr_is_hidden(gvt, gmadr) \
0528 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
0529 (gmadr <= gvt_hidden_gmadr_end(gvt)))
0530
0531 #define gvt_gmadr_is_valid(gvt, gmadr) \
0532 (gvt_gmadr_is_aperture(gvt, gmadr) || \
0533 gvt_gmadr_is_hidden(gvt, gmadr))
0534
0535 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
0536 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
0537 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
0538 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
0539 unsigned long *h_index);
0540 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
0541 unsigned long *g_index);
0542
0543 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
0544 bool primary);
0545 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
0546
0547 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
0548 void *p_data, unsigned int bytes);
0549
0550 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
0551 void *p_data, unsigned int bytes);
0552
0553 void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
0554
0555 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
0556 {
0557
0558 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
0559 PCI_BASE_ADDRESS_MEM_MASK;
0560 }
0561
0562 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
0563 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
0564 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
0565
0566 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
0567 void populate_pvinfo_page(struct intel_vgpu *vgpu);
0568
0569 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
0570 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
0571 void intel_vgpu_detach_regions(struct intel_vgpu *vgpu);
0572
0573 enum {
0574 GVT_FAILSAFE_UNSUPPORTED_GUEST,
0575 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
0576 GVT_FAILSAFE_GUEST_ERR,
0577 };
0578
0579 static inline void mmio_hw_access_pre(struct intel_gt *gt)
0580 {
0581 intel_runtime_pm_get(gt->uncore->rpm);
0582 }
0583
0584 static inline void mmio_hw_access_post(struct intel_gt *gt)
0585 {
0586 intel_runtime_pm_put_unchecked(gt->uncore->rpm);
0587 }
0588
0589
0590
0591
0592
0593
0594
0595 static inline void intel_gvt_mmio_set_accessed(
0596 struct intel_gvt *gvt, unsigned int offset)
0597 {
0598 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
0599 }
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609 static inline bool intel_gvt_mmio_is_cmd_accessible(
0610 struct intel_gvt *gvt, unsigned int offset)
0611 {
0612 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
0613 }
0614
0615
0616
0617
0618
0619
0620
0621
0622 static inline void intel_gvt_mmio_set_cmd_accessible(
0623 struct intel_gvt *gvt, unsigned int offset)
0624 {
0625 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS;
0626 }
0627
0628
0629
0630
0631
0632
0633
0634 static inline bool intel_gvt_mmio_is_unalign(
0635 struct intel_gvt *gvt, unsigned int offset)
0636 {
0637 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
0638 }
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649 static inline bool intel_gvt_mmio_has_mode_mask(
0650 struct intel_gvt *gvt, unsigned int offset)
0651 {
0652 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
0653 }
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665 static inline bool intel_gvt_mmio_is_sr_in_ctx(
0666 struct intel_gvt *gvt, unsigned int offset)
0667 {
0668 return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX;
0669 }
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679 static inline void intel_gvt_mmio_set_sr_in_ctx(
0680 struct intel_gvt *gvt, unsigned int offset)
0681 {
0682 gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX;
0683 }
0684
0685 void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
0686
0687
0688
0689
0690
0691
0692
0693
0694 static inline void intel_gvt_mmio_set_cmd_write_patch(
0695 struct intel_gvt *gvt, unsigned int offset)
0696 {
0697 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH;
0698 }
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709 static inline bool intel_gvt_mmio_is_cmd_write_patch(
0710 struct intel_gvt *gvt, unsigned int offset)
0711 {
0712 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
0713 }
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725 static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
0726 void *buf, unsigned long len)
0727 {
0728 if (!vgpu->attached)
0729 return -ESRCH;
0730 return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
0731 }
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743 static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
0744 unsigned long gpa, void *buf, unsigned long len)
0745 {
0746 if (!vgpu->attached)
0747 return -ESRCH;
0748 return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
0749 }
0750
0751 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
0752 void intel_gvt_debugfs_init(struct intel_gvt *gvt);
0753 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
0754
0755 int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn);
0756 int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn);
0757 int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
0758 int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
0759 unsigned long size, dma_addr_t *dma_addr);
0760 void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
0761 dma_addr_t dma_addr);
0762
0763 #include "trace.h"
0764
0765 #endif