0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #ifndef KFD_SVM_H_
0026 #define KFD_SVM_H_
0027
0028 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
0029
0030 #include <linux/rwsem.h>
0031 #include <linux/list.h>
0032 #include <linux/mutex.h>
0033 #include <linux/sched/mm.h>
0034 #include <linux/hmm.h>
0035 #include "amdgpu.h"
0036 #include "kfd_priv.h"
0037
0038 #define SVM_RANGE_VRAM_DOMAIN (1UL << 0)
0039 #define SVM_ADEV_PGMAP_OWNER(adev)\
0040 ((adev)->hive ? (void *)(adev)->hive : (void *)(adev))
0041
0042 struct svm_range_bo {
0043 struct amdgpu_bo *bo;
0044 struct kref kref;
0045 struct list_head range_list;
0046 spinlock_t list_lock;
0047 struct amdgpu_amdkfd_fence *eviction_fence;
0048 struct work_struct eviction_work;
0049 uint32_t evicting;
0050 struct work_struct release_work;
0051 };
0052
0053 enum svm_work_list_ops {
0054 SVM_OP_NULL,
0055 SVM_OP_UNMAP_RANGE,
0056 SVM_OP_UPDATE_RANGE_NOTIFIER,
0057 SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP,
0058 SVM_OP_ADD_RANGE,
0059 SVM_OP_ADD_RANGE_AND_MAP
0060 };
0061
0062 struct svm_work_list_item {
0063 enum svm_work_list_ops op;
0064 struct mm_struct *mm;
0065 };
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 struct svm_range {
0106 struct svm_range_list *svms;
0107 struct mutex migrate_mutex;
0108 unsigned long start;
0109 unsigned long last;
0110 struct interval_tree_node it_node;
0111 struct list_head list;
0112 struct list_head update_list;
0113 uint64_t npages;
0114 dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
0115 struct ttm_resource *ttm_res;
0116 uint64_t offset;
0117 struct svm_range_bo *svm_bo;
0118 struct list_head svm_bo_list;
0119 struct mutex lock;
0120 unsigned int saved_flags;
0121 uint32_t flags;
0122 uint32_t preferred_loc;
0123 uint32_t prefetch_loc;
0124 uint32_t actual_loc;
0125 uint8_t granularity;
0126 atomic_t invalid;
0127 ktime_t validate_timestamp;
0128 struct mmu_interval_notifier notifier;
0129 struct svm_work_list_item work_item;
0130 struct list_head deferred_list;
0131 struct list_head child_list;
0132 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
0133 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
0134 bool validated_once;
0135 bool mapped_to_gpu;
0136 };
0137
0138 static inline void svm_range_lock(struct svm_range *prange)
0139 {
0140 mutex_lock(&prange->lock);
0141 prange->saved_flags = memalloc_noreclaim_save();
0142
0143 }
0144 static inline void svm_range_unlock(struct svm_range *prange)
0145 {
0146 memalloc_noreclaim_restore(prange->saved_flags);
0147 mutex_unlock(&prange->lock);
0148 }
0149
0150 static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
0151 {
0152 if (svm_bo)
0153 kref_get(&svm_bo->kref);
0154
0155 return svm_bo;
0156 }
0157
0158 int svm_range_list_init(struct kfd_process *p);
0159 void svm_range_list_fini(struct kfd_process *p);
0160 int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
0161 uint64_t size, uint32_t nattrs,
0162 struct kfd_ioctl_svm_attribute *attrs);
0163 struct svm_range *svm_range_from_addr(struct svm_range_list *svms,
0164 unsigned long addr,
0165 struct svm_range **parent);
0166 struct amdgpu_device *svm_range_get_adev_by_id(struct svm_range *prange,
0167 uint32_t id);
0168 int svm_range_vram_node_new(struct amdgpu_device *adev,
0169 struct svm_range *prange, bool clear);
0170 void svm_range_vram_node_free(struct svm_range *prange);
0171 int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
0172 unsigned long addr, struct svm_range *parent,
0173 struct svm_range *prange);
0174 int svm_range_restore_pages(struct amdgpu_device *adev,
0175 unsigned int pasid, uint64_t addr, bool write_fault);
0176 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
0177 void svm_range_add_list_work(struct svm_range_list *svms,
0178 struct svm_range *prange, struct mm_struct *mm,
0179 enum svm_work_list_ops op);
0180 void schedule_deferred_list_work(struct svm_range_list *svms);
0181 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
0182 unsigned long offset, unsigned long npages);
0183 void svm_range_free_dma_mappings(struct svm_range *prange);
0184 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
0185 void *owner);
0186 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
0187 uint64_t *svm_priv_data_size);
0188 int kfd_criu_checkpoint_svm(struct kfd_process *p,
0189 uint8_t __user *user_priv_data,
0190 uint64_t *priv_offset);
0191 int kfd_criu_restore_svm(struct kfd_process *p,
0192 uint8_t __user *user_priv_ptr,
0193 uint64_t *priv_data_offset,
0194 uint64_t max_priv_data_size);
0195 int kfd_criu_resume_svm(struct kfd_process *p);
0196 struct kfd_process_device *
0197 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
0198 void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
0199
0200
0201
0202
0203 #define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
0204
0205 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
0206
0207 void svm_range_set_max_pages(struct amdgpu_device *adev);
0208
0209 #else
0210
0211 struct kfd_process;
0212
0213 static inline int svm_range_list_init(struct kfd_process *p)
0214 {
0215 return 0;
0216 }
0217 static inline void svm_range_list_fini(struct kfd_process *p)
0218 {
0219
0220 }
0221
0222 static inline int svm_range_restore_pages(struct amdgpu_device *adev,
0223 unsigned int pasid, uint64_t addr,
0224 bool write_fault)
0225 {
0226 return -EFAULT;
0227 }
0228
0229 static inline int svm_range_schedule_evict_svm_bo(
0230 struct amdgpu_amdkfd_fence *fence)
0231 {
0232 WARN_ONCE(1, "SVM eviction fence triggered, but SVM is disabled");
0233 return -EINVAL;
0234 }
0235
0236 static inline int svm_range_get_info(struct kfd_process *p,
0237 uint32_t *num_svm_ranges,
0238 uint64_t *svm_priv_data_size)
0239 {
0240 *num_svm_ranges = 0;
0241 *svm_priv_data_size = 0;
0242 return 0;
0243 }
0244
0245 static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,
0246 uint8_t __user *user_priv_data,
0247 uint64_t *priv_offset)
0248 {
0249 return 0;
0250 }
0251
0252 static inline int kfd_criu_restore_svm(struct kfd_process *p,
0253 uint8_t __user *user_priv_ptr,
0254 uint64_t *priv_data_offset,
0255 uint64_t max_priv_data_size)
0256 {
0257 return -EINVAL;
0258 }
0259
0260 static inline int kfd_criu_resume_svm(struct kfd_process *p)
0261 {
0262 return 0;
0263 }
0264
0265 #define KFD_IS_SVM_API_SUPPORTED(dev) false
0266
0267 #endif
0268
0269 #endif