0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 #include <linux/firmware.h>
0047 #include <linux/module.h>
0048 #include <drm/drm.h>
0049
0050 #include "amdgpu.h"
0051 #include "amdgpu_amdkfd.h"
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
0064 const struct mmu_notifier_range *range,
0065 unsigned long cur_seq)
0066 {
0067 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
0068 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
0069 long r;
0070
0071 if (!mmu_notifier_range_blockable(range))
0072 return false;
0073
0074 mutex_lock(&adev->notifier_lock);
0075
0076 mmu_interval_set_seq(mni, cur_seq);
0077
0078 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
0079 false, MAX_SCHEDULE_TIMEOUT);
0080 mutex_unlock(&adev->notifier_lock);
0081 if (r <= 0)
0082 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
0083 return true;
0084 }
0085
0086 static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = {
0087 .invalidate = amdgpu_mn_invalidate_gfx,
0088 };
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
0101 const struct mmu_notifier_range *range,
0102 unsigned long cur_seq)
0103 {
0104 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
0105 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
0106
0107 if (!mmu_notifier_range_blockable(range))
0108 return false;
0109
0110 mutex_lock(&adev->notifier_lock);
0111
0112 mmu_interval_set_seq(mni, cur_seq);
0113
0114 amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
0115 mutex_unlock(&adev->notifier_lock);
0116
0117 return true;
0118 }
0119
0120 static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = {
0121 .invalidate = amdgpu_mn_invalidate_hsa,
0122 };
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
0134 {
0135 if (bo->kfd_bo)
0136 return mmu_interval_notifier_insert(&bo->notifier, current->mm,
0137 addr, amdgpu_bo_size(bo),
0138 &amdgpu_mn_hsa_ops);
0139 return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
0140 amdgpu_bo_size(bo),
0141 &amdgpu_mn_gfx_ops);
0142 }
0143
0144
0145
0146
0147
0148
0149
0150
0151 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
0152 {
0153 if (!bo->notifier.mm)
0154 return;
0155 mmu_interval_notifier_remove(&bo->notifier);
0156 bo->notifier.mm = NULL;
0157 }
0158
0159 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
0160 struct mm_struct *mm, struct page **pages,
0161 uint64_t start, uint64_t npages,
0162 struct hmm_range **phmm_range, bool readonly,
0163 bool mmap_locked, void *owner)
0164 {
0165 struct hmm_range *hmm_range;
0166 unsigned long timeout;
0167 unsigned long i;
0168 unsigned long *pfns;
0169 int r = 0;
0170
0171 hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
0172 if (unlikely(!hmm_range))
0173 return -ENOMEM;
0174
0175 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
0176 if (unlikely(!pfns)) {
0177 r = -ENOMEM;
0178 goto out_free_range;
0179 }
0180
0181 hmm_range->notifier = notifier;
0182 hmm_range->default_flags = HMM_PFN_REQ_FAULT;
0183 if (!readonly)
0184 hmm_range->default_flags |= HMM_PFN_REQ_WRITE;
0185 hmm_range->hmm_pfns = pfns;
0186 hmm_range->start = start;
0187 hmm_range->end = start + npages * PAGE_SIZE;
0188 hmm_range->dev_private_owner = owner;
0189
0190
0191 timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT;
0192 timeout = jiffies + msecs_to_jiffies(timeout);
0193
0194 retry:
0195 hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
0196
0197 if (likely(!mmap_locked))
0198 mmap_read_lock(mm);
0199
0200 r = hmm_range_fault(hmm_range);
0201
0202 if (likely(!mmap_locked))
0203 mmap_read_unlock(mm);
0204 if (unlikely(r)) {
0205
0206
0207
0208
0209 if (r == -EBUSY && !time_after(jiffies, timeout))
0210 goto retry;
0211 goto out_free_pfns;
0212 }
0213
0214
0215
0216
0217
0218
0219 for (i = 0; pages && i < npages; i++)
0220 pages[i] = hmm_pfn_to_page(pfns[i]);
0221
0222 *phmm_range = hmm_range;
0223
0224 return 0;
0225
0226 out_free_pfns:
0227 kvfree(pfns);
0228 out_free_range:
0229 kfree(hmm_range);
0230
0231 return r;
0232 }
0233
0234 int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
0235 {
0236 int r;
0237
0238 r = mmu_interval_read_retry(hmm_range->notifier,
0239 hmm_range->notifier_seq);
0240 kvfree(hmm_range->hmm_pfns);
0241 kfree(hmm_range);
0242
0243 return r;
0244 }