Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
0002 /*
0003  * Copyright 2020-2021 Advanced Micro Devices, Inc.
0004  *
0005  * Permission is hereby granted, free of charge, to any person obtaining a
0006  * copy of this software and associated documentation files (the "Software"),
0007  * to deal in the Software without restriction, including without limitation
0008  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0009  * and/or sell copies of the Software, and to permit persons to whom the
0010  * Software is furnished to do so, subject to the following conditions:
0011  *
0012  * The above copyright notice and this permission notice shall be included in
0013  * all copies or substantial portions of the Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0019  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0020  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0021  * OTHER DEALINGS IN THE SOFTWARE.
0022  *
0023  */
0024 
0025 #ifndef KFD_SVM_H_
0026 #define KFD_SVM_H_
0027 
0028 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
0029 
0030 #include <linux/rwsem.h>
0031 #include <linux/list.h>
0032 #include <linux/mutex.h>
0033 #include <linux/sched/mm.h>
0034 #include <linux/hmm.h>
0035 #include "amdgpu.h"
0036 #include "kfd_priv.h"
0037 
0038 #define SVM_RANGE_VRAM_DOMAIN (1UL << 0)
0039 #define SVM_ADEV_PGMAP_OWNER(adev)\
0040             ((adev)->hive ? (void *)(adev)->hive : (void *)(adev))
0041 
0042 struct svm_range_bo {
0043     struct amdgpu_bo        *bo;
0044     struct kref         kref;
0045     struct list_head        range_list; /* all svm ranges shared this bo */
0046     spinlock_t          list_lock;
0047     struct amdgpu_amdkfd_fence  *eviction_fence;
0048     struct work_struct      eviction_work;
0049     uint32_t            evicting;
0050     struct work_struct      release_work;
0051 };
0052 
0053 enum svm_work_list_ops {
0054     SVM_OP_NULL,
0055     SVM_OP_UNMAP_RANGE,
0056     SVM_OP_UPDATE_RANGE_NOTIFIER,
0057     SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP,
0058     SVM_OP_ADD_RANGE,
0059     SVM_OP_ADD_RANGE_AND_MAP
0060 };
0061 
0062 struct svm_work_list_item {
0063     enum svm_work_list_ops op;
0064     struct mm_struct *mm;
0065 };
0066 
0067 /**
0068  * struct svm_range - shared virtual memory range
0069  *
0070  * @svms:       list of svm ranges, structure defined in kfd_process
0071  * @migrate_mutex: to serialize range migration, validation and mapping update
0072  * @start:      range start address in pages
0073  * @last:       range last address in pages
0074  * @it_node:    node [start, last] stored in interval tree, start, last are page
0075  *              aligned, page size is (last - start + 1)
0076  * @list:       link list node, used to scan all ranges of svms
0077  * @update_list:link list node used to add to update_list
0078  * @mapping:    bo_va mapping structure to create and update GPU page table
0079  * @npages:     number of pages
0080  * @dma_addr:   dma mapping address on each GPU for system memory physical page
0081  * @ttm_res:    vram ttm resource map
0082  * @offset:     range start offset within mm_nodes
0083  * @svm_bo:     struct to manage splited amdgpu_bo
0084  * @svm_bo_list:link list node, to scan all ranges which share same svm_bo
0085  * @lock:       protect prange start, last, child_list, svm_bo_list
0086  * @saved_flags:save/restore current PF_MEMALLOC flags
0087  * @flags:      flags defined as KFD_IOCTL_SVM_FLAG_*
0088  * @perferred_loc: perferred location, 0 for CPU, or GPU id
0089  * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id
0090  * @actual_loc: the actual location, 0 for CPU, or GPU id
0091  * @granularity:migration granularity, log2 num pages
0092  * @invalid:    not 0 means cpu page table is invalidated
0093  * @validate_timestamp: system timestamp when range is validated
0094  * @notifier:   register mmu interval notifier
0095  * @work_item:  deferred work item information
0096  * @deferred_list: list header used to add range to deferred list
0097  * @child_list: list header for split ranges which are not added to svms yet
0098  * @bitmap_access: index bitmap of GPUs which can access the range
0099  * @bitmap_aip: index bitmap of GPUs which can access the range in place
0100  *
0101  * Data structure for virtual memory range shared by CPU and GPUs, it can be
0102  * allocated from system memory ram or device vram, and migrate from ram to vram
0103  * or from vram to ram.
0104  */
0105 struct svm_range {
0106     struct svm_range_list       *svms;
0107     struct mutex            migrate_mutex;
0108     unsigned long           start;
0109     unsigned long           last;
0110     struct interval_tree_node   it_node;
0111     struct list_head        list;
0112     struct list_head        update_list;
0113     uint64_t            npages;
0114     dma_addr_t          *dma_addr[MAX_GPU_INSTANCE];
0115     struct ttm_resource     *ttm_res;
0116     uint64_t            offset;
0117     struct svm_range_bo     *svm_bo;
0118     struct list_head        svm_bo_list;
0119     struct mutex                    lock;
0120     unsigned int                    saved_flags;
0121     uint32_t            flags;
0122     uint32_t            preferred_loc;
0123     uint32_t            prefetch_loc;
0124     uint32_t            actual_loc;
0125     uint8_t             granularity;
0126     atomic_t            invalid;
0127     ktime_t             validate_timestamp;
0128     struct mmu_interval_notifier    notifier;
0129     struct svm_work_list_item   work_item;
0130     struct list_head        deferred_list;
0131     struct list_head        child_list;
0132     DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
0133     DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
0134     bool                validated_once;
0135     bool                mapped_to_gpu;
0136 };
0137 
0138 static inline void svm_range_lock(struct svm_range *prange)
0139 {
0140     mutex_lock(&prange->lock);
0141     prange->saved_flags = memalloc_noreclaim_save();
0142 
0143 }
0144 static inline void svm_range_unlock(struct svm_range *prange)
0145 {
0146     memalloc_noreclaim_restore(prange->saved_flags);
0147     mutex_unlock(&prange->lock);
0148 }
0149 
0150 static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
0151 {
0152     if (svm_bo)
0153         kref_get(&svm_bo->kref);
0154 
0155     return svm_bo;
0156 }
0157 
0158 int svm_range_list_init(struct kfd_process *p);
0159 void svm_range_list_fini(struct kfd_process *p);
0160 int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
0161           uint64_t size, uint32_t nattrs,
0162           struct kfd_ioctl_svm_attribute *attrs);
0163 struct svm_range *svm_range_from_addr(struct svm_range_list *svms,
0164                       unsigned long addr,
0165                       struct svm_range **parent);
0166 struct amdgpu_device *svm_range_get_adev_by_id(struct svm_range *prange,
0167                            uint32_t id);
0168 int svm_range_vram_node_new(struct amdgpu_device *adev,
0169                 struct svm_range *prange, bool clear);
0170 void svm_range_vram_node_free(struct svm_range *prange);
0171 int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
0172                    unsigned long addr, struct svm_range *parent,
0173                    struct svm_range *prange);
0174 int svm_range_restore_pages(struct amdgpu_device *adev,
0175                 unsigned int pasid, uint64_t addr, bool write_fault);
0176 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
0177 void svm_range_add_list_work(struct svm_range_list *svms,
0178                  struct svm_range *prange, struct mm_struct *mm,
0179                  enum svm_work_list_ops op);
0180 void schedule_deferred_list_work(struct svm_range_list *svms);
0181 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
0182              unsigned long offset, unsigned long npages);
0183 void svm_range_free_dma_mappings(struct svm_range *prange);
0184 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
0185             void *owner);
0186 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
0187                uint64_t *svm_priv_data_size);
0188 int kfd_criu_checkpoint_svm(struct kfd_process *p,
0189                 uint8_t __user *user_priv_data,
0190                 uint64_t *priv_offset);
0191 int kfd_criu_restore_svm(struct kfd_process *p,
0192              uint8_t __user *user_priv_ptr,
0193              uint64_t *priv_data_offset,
0194              uint64_t max_priv_data_size);
0195 int kfd_criu_resume_svm(struct kfd_process *p);
0196 struct kfd_process_device *
0197 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
0198 void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
0199 
0200 /* SVM API and HMM page migration work together, device memory type
0201  * is initialized to not 0 when page migration register device memory.
0202  */
0203 #define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
0204 
0205 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
0206 
0207 void svm_range_set_max_pages(struct amdgpu_device *adev);
0208 
0209 #else
0210 
0211 struct kfd_process;
0212 
0213 static inline int svm_range_list_init(struct kfd_process *p)
0214 {
0215     return 0;
0216 }
0217 static inline void svm_range_list_fini(struct kfd_process *p)
0218 {
0219     /* empty */
0220 }
0221 
0222 static inline int svm_range_restore_pages(struct amdgpu_device *adev,
0223                       unsigned int pasid, uint64_t addr,
0224                       bool write_fault)
0225 {
0226     return -EFAULT;
0227 }
0228 
0229 static inline int svm_range_schedule_evict_svm_bo(
0230         struct amdgpu_amdkfd_fence *fence)
0231 {
0232     WARN_ONCE(1, "SVM eviction fence triggered, but SVM is disabled");
0233     return -EINVAL;
0234 }
0235 
0236 static inline int svm_range_get_info(struct kfd_process *p,
0237                      uint32_t *num_svm_ranges,
0238                      uint64_t *svm_priv_data_size)
0239 {
0240     *num_svm_ranges = 0;
0241     *svm_priv_data_size = 0;
0242     return 0;
0243 }
0244 
0245 static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,
0246                       uint8_t __user *user_priv_data,
0247                       uint64_t *priv_offset)
0248 {
0249     return 0;
0250 }
0251 
0252 static inline int kfd_criu_restore_svm(struct kfd_process *p,
0253                        uint8_t __user *user_priv_ptr,
0254                        uint64_t *priv_data_offset,
0255                        uint64_t max_priv_data_size)
0256 {
0257     return -EINVAL;
0258 }
0259 
0260 static inline int kfd_criu_resume_svm(struct kfd_process *p)
0261 {
0262     return 0;
0263 }
0264 
0265 #define KFD_IS_SVM_API_SUPPORTED(dev) false
0266 
0267 #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
0268 
0269 #endif /* KFD_SVM_H_ */