Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2008 Advanced Micro Devices, Inc.
0003  * Copyright 2008 Red Hat Inc.
0004  * Copyright 2009 Jerome Glisse.
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the "Software"),
0008  * to deal in the Software without restriction, including without limitation
0009  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0010  * and/or sell copies of the Software, and to permit persons to whom the
0011  * Software is furnished to do so, subject to the following conditions:
0012  *
0013  * The above copyright notice and this permission notice shall be included in
0014  * all copies or substantial portions of the Software.
0015  *
0016  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0017  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0018  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0019  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0020  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0021  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0022  * OTHER DEALINGS IN THE SOFTWARE.
0023  *
0024  * Authors: Dave Airlie
0025  *          Alex Deucher
0026  *          Jerome Glisse
0027  */
0028 
0029 #include <linux/dma-fence-array.h>
0030 #include <linux/interval_tree_generic.h>
0031 #include <linux/idr.h>
0032 #include <linux/dma-buf.h>
0033 
0034 #include <drm/amdgpu_drm.h>
0035 #include <drm/drm_drv.h>
0036 #include "amdgpu.h"
0037 #include "amdgpu_trace.h"
0038 #include "amdgpu_amdkfd.h"
0039 #include "amdgpu_gmc.h"
0040 #include "amdgpu_xgmi.h"
0041 #include "amdgpu_dma_buf.h"
0042 #include "amdgpu_res_cursor.h"
0043 #include "kfd_svm.h"
0044 
0045 /**
0046  * DOC: GPUVM
0047  *
0048  * GPUVM is similar to the legacy gart on older asics, however
0049  * rather than there being a single global gart table
0050  * for the entire GPU, there are multiple VM page tables active
0051  * at any given time.  The VM page tables can contain a mix
0052  * vram pages and system memory pages and system memory pages
0053  * can be mapped as snooped (cached system pages) or unsnooped
0054  * (uncached system pages).
0055  * Each VM has an ID associated with it and there is a page table
0056  * associated with each VMID.  When executing a command buffer,
0057  * the kernel tells the ring what VMID to use for that command
0058  * buffer.  VMIDs are allocated dynamically as commands are submitted.
0059  * The userspace drivers maintain their own address space and the kernel
0060  * sets up their pages tables accordingly when they submit their
0061  * command buffers and a VMID is assigned.
0062  * Cayman/Trinity support up to 8 active VMs at any given time;
0063  * SI supports 16.
0064  */
0065 
0066 #define START(node) ((node)->start)
0067 #define LAST(node) ((node)->last)
0068 
0069 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
0070              START, LAST, static, amdgpu_vm_it)
0071 
0072 #undef START
0073 #undef LAST
0074 
0075 /**
0076  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
0077  */
0078 struct amdgpu_prt_cb {
0079 
0080     /**
0081      * @adev: amdgpu device
0082      */
0083     struct amdgpu_device *adev;
0084 
0085     /**
0086      * @cb: callback
0087      */
0088     struct dma_fence_cb cb;
0089 };
0090 
0091 /**
0092  * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence
0093  */
0094 struct amdgpu_vm_tlb_seq_cb {
0095     /**
0096      * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
0097      */
0098     struct amdgpu_vm *vm;
0099 
0100     /**
0101      * @cb: callback
0102      */
0103     struct dma_fence_cb cb;
0104 };
0105 
0106 /**
0107  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
0108  *
0109  * @adev: amdgpu_device pointer
0110  * @vm: amdgpu_vm pointer
0111  * @pasid: the pasid the VM is using on this GPU
0112  *
0113  * Set the pasid this VM is using on this GPU, can also be used to remove the
0114  * pasid by passing in zero.
0115  *
0116  */
0117 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
0118             u32 pasid)
0119 {
0120     int r;
0121 
0122     if (vm->pasid == pasid)
0123         return 0;
0124 
0125     if (vm->pasid) {
0126         r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
0127         if (r < 0)
0128             return r;
0129 
0130         vm->pasid = 0;
0131     }
0132 
0133     if (pasid) {
0134         r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
0135                     GFP_KERNEL));
0136         if (r < 0)
0137             return r;
0138 
0139         vm->pasid = pasid;
0140     }
0141 
0142 
0143     return 0;
0144 }
0145 
0146 /*
0147  * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
0148  * happens while holding this lock anywhere to prevent deadlocks when
0149  * an MMU notifier runs in reclaim-FS context.
0150  */
0151 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
0152 {
0153     mutex_lock(&vm->eviction_lock);
0154     vm->saved_flags = memalloc_noreclaim_save();
0155 }
0156 
0157 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
0158 {
0159     if (mutex_trylock(&vm->eviction_lock)) {
0160         vm->saved_flags = memalloc_noreclaim_save();
0161         return 1;
0162     }
0163     return 0;
0164 }
0165 
0166 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
0167 {
0168     memalloc_noreclaim_restore(vm->saved_flags);
0169     mutex_unlock(&vm->eviction_lock);
0170 }
0171 
0172 /**
0173  * amdgpu_vm_bo_evicted - vm_bo is evicted
0174  *
0175  * @vm_bo: vm_bo which is evicted
0176  *
0177  * State for PDs/PTs and per VM BOs which are not at the location they should
0178  * be.
0179  */
0180 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
0181 {
0182     struct amdgpu_vm *vm = vm_bo->vm;
0183     struct amdgpu_bo *bo = vm_bo->bo;
0184 
0185     vm_bo->moved = true;
0186     if (bo->tbo.type == ttm_bo_type_kernel)
0187         list_move(&vm_bo->vm_status, &vm->evicted);
0188     else
0189         list_move_tail(&vm_bo->vm_status, &vm->evicted);
0190 }
0191 /**
0192  * amdgpu_vm_bo_moved - vm_bo is moved
0193  *
0194  * @vm_bo: vm_bo which is moved
0195  *
0196  * State for per VM BOs which are moved, but that change is not yet reflected
0197  * in the page tables.
0198  */
0199 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
0200 {
0201     list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
0202 }
0203 
0204 /**
0205  * amdgpu_vm_bo_idle - vm_bo is idle
0206  *
0207  * @vm_bo: vm_bo which is now idle
0208  *
0209  * State for PDs/PTs and per VM BOs which have gone through the state machine
0210  * and are now idle.
0211  */
0212 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
0213 {
0214     list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
0215     vm_bo->moved = false;
0216 }
0217 
0218 /**
0219  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
0220  *
0221  * @vm_bo: vm_bo which is now invalidated
0222  *
0223  * State for normal BOs which are invalidated and that change not yet reflected
0224  * in the PTs.
0225  */
0226 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
0227 {
0228     spin_lock(&vm_bo->vm->invalidated_lock);
0229     list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
0230     spin_unlock(&vm_bo->vm->invalidated_lock);
0231 }
0232 
0233 /**
0234  * amdgpu_vm_bo_relocated - vm_bo is reloacted
0235  *
0236  * @vm_bo: vm_bo which is relocated
0237  *
0238  * State for PDs/PTs which needs to update their parent PD.
0239  * For the root PD, just move to idle state.
0240  */
0241 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
0242 {
0243     if (vm_bo->bo->parent)
0244         list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
0245     else
0246         amdgpu_vm_bo_idle(vm_bo);
0247 }
0248 
0249 /**
0250  * amdgpu_vm_bo_done - vm_bo is done
0251  *
0252  * @vm_bo: vm_bo which is now done
0253  *
0254  * State for normal BOs which are invalidated and that change has been updated
0255  * in the PTs.
0256  */
0257 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
0258 {
0259     spin_lock(&vm_bo->vm->invalidated_lock);
0260     list_move(&vm_bo->vm_status, &vm_bo->vm->done);
0261     spin_unlock(&vm_bo->vm->invalidated_lock);
0262 }
0263 
0264 /**
0265  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
0266  *
0267  * @base: base structure for tracking BO usage in a VM
0268  * @vm: vm to which bo is to be added
0269  * @bo: amdgpu buffer object
0270  *
0271  * Initialize a bo_va_base structure and add it to the appropriate lists
0272  *
0273  */
0274 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
0275                 struct amdgpu_vm *vm, struct amdgpu_bo *bo)
0276 {
0277     base->vm = vm;
0278     base->bo = bo;
0279     base->next = NULL;
0280     INIT_LIST_HEAD(&base->vm_status);
0281 
0282     if (!bo)
0283         return;
0284     base->next = bo->vm_bo;
0285     bo->vm_bo = base;
0286 
0287     if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
0288         return;
0289 
0290     dma_resv_assert_held(vm->root.bo->tbo.base.resv);
0291 
0292     ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
0293     if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
0294         amdgpu_vm_bo_relocated(base);
0295     else
0296         amdgpu_vm_bo_idle(base);
0297 
0298     if (bo->preferred_domains &
0299         amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
0300         return;
0301 
0302     /*
0303      * we checked all the prerequisites, but it looks like this per vm bo
0304      * is currently evicted. add the bo to the evicted list to make sure it
0305      * is validated on next vm use to avoid fault.
0306      * */
0307     amdgpu_vm_bo_evicted(base);
0308 }
0309 
0310 /**
0311  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
0312  *
0313  * @vm: vm providing the BOs
0314  * @validated: head of validation list
0315  * @entry: entry to add
0316  *
0317  * Add the page directory to the list of BOs to
0318  * validate for command submission.
0319  */
0320 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
0321              struct list_head *validated,
0322              struct amdgpu_bo_list_entry *entry)
0323 {
0324     entry->priority = 0;
0325     entry->tv.bo = &vm->root.bo->tbo;
0326     /* Two for VM updates, one for TTM and one for the CS job */
0327     entry->tv.num_shared = 4;
0328     entry->user_pages = NULL;
0329     list_add(&entry->tv.head, validated);
0330 }
0331 
0332 /**
0333  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
0334  *
0335  * @adev: amdgpu device pointer
0336  * @vm: vm providing the BOs
0337  *
0338  * Move all BOs to the end of LRU and remember their positions to put them
0339  * together.
0340  */
0341 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
0342                 struct amdgpu_vm *vm)
0343 {
0344     spin_lock(&adev->mman.bdev.lru_lock);
0345     ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
0346     spin_unlock(&adev->mman.bdev.lru_lock);
0347 }
0348 
0349 /**
0350  * amdgpu_vm_validate_pt_bos - validate the page table BOs
0351  *
0352  * @adev: amdgpu device pointer
0353  * @vm: vm providing the BOs
0354  * @validate: callback to do the validation
0355  * @param: parameter for the validation callback
0356  *
0357  * Validate the page table BOs on command submission if neccessary.
0358  *
0359  * Returns:
0360  * Validation result.
0361  */
0362 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
0363                   int (*validate)(void *p, struct amdgpu_bo *bo),
0364                   void *param)
0365 {
0366     struct amdgpu_vm_bo_base *bo_base, *tmp;
0367     int r;
0368 
0369     list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
0370         struct amdgpu_bo *bo = bo_base->bo;
0371         struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
0372 
0373         r = validate(param, bo);
0374         if (r)
0375             return r;
0376         if (shadow) {
0377             r = validate(param, shadow);
0378             if (r)
0379                 return r;
0380         }
0381 
0382         if (bo->tbo.type != ttm_bo_type_kernel) {
0383             amdgpu_vm_bo_moved(bo_base);
0384         } else {
0385             vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
0386             amdgpu_vm_bo_relocated(bo_base);
0387         }
0388     }
0389 
0390     amdgpu_vm_eviction_lock(vm);
0391     vm->evicting = false;
0392     amdgpu_vm_eviction_unlock(vm);
0393 
0394     return 0;
0395 }
0396 
0397 /**
0398  * amdgpu_vm_ready - check VM is ready for updates
0399  *
0400  * @vm: VM to check
0401  *
0402  * Check if all VM PDs/PTs are ready for updates
0403  *
0404  * Returns:
0405  * True if VM is not evicting.
0406  */
0407 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
0408 {
0409     bool ret;
0410 
0411     amdgpu_vm_eviction_lock(vm);
0412     ret = !vm->evicting;
0413     amdgpu_vm_eviction_unlock(vm);
0414 
0415     return ret && list_empty(&vm->evicted);
0416 }
0417 
0418 /**
0419  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
0420  *
0421  * @adev: amdgpu_device pointer
0422  */
0423 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
0424 {
0425     const struct amdgpu_ip_block *ip_block;
0426     bool has_compute_vm_bug;
0427     struct amdgpu_ring *ring;
0428     int i;
0429 
0430     has_compute_vm_bug = false;
0431 
0432     ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
0433     if (ip_block) {
0434         /* Compute has a VM bug for GFX version < 7.
0435            Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
0436         if (ip_block->version->major <= 7)
0437             has_compute_vm_bug = true;
0438         else if (ip_block->version->major == 8)
0439             if (adev->gfx.mec_fw_version < 673)
0440                 has_compute_vm_bug = true;
0441     }
0442 
0443     for (i = 0; i < adev->num_rings; i++) {
0444         ring = adev->rings[i];
0445         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
0446             /* only compute rings */
0447             ring->has_compute_vm_bug = has_compute_vm_bug;
0448         else
0449             ring->has_compute_vm_bug = false;
0450     }
0451 }
0452 
0453 /**
0454  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
0455  *
0456  * @ring: ring on which the job will be submitted
0457  * @job: job to submit
0458  *
0459  * Returns:
0460  * True if sync is needed.
0461  */
0462 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
0463                   struct amdgpu_job *job)
0464 {
0465     struct amdgpu_device *adev = ring->adev;
0466     unsigned vmhub = ring->funcs->vmhub;
0467     struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
0468     struct amdgpu_vmid *id;
0469     bool gds_switch_needed;
0470     bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
0471 
0472     if (job->vmid == 0)
0473         return false;
0474     id = &id_mgr->ids[job->vmid];
0475     gds_switch_needed = ring->funcs->emit_gds_switch && (
0476         id->gds_base != job->gds_base ||
0477         id->gds_size != job->gds_size ||
0478         id->gws_base != job->gws_base ||
0479         id->gws_size != job->gws_size ||
0480         id->oa_base != job->oa_base ||
0481         id->oa_size != job->oa_size);
0482 
0483     if (amdgpu_vmid_had_gpu_reset(adev, id))
0484         return true;
0485 
0486     return vm_flush_needed || gds_switch_needed;
0487 }
0488 
0489 /**
0490  * amdgpu_vm_flush - hardware flush the vm
0491  *
0492  * @ring: ring to use for flush
0493  * @job:  related job
0494  * @need_pipe_sync: is pipe sync needed
0495  *
0496  * Emit a VM flush when it is necessary.
0497  *
0498  * Returns:
0499  * 0 on success, errno otherwise.
0500  */
0501 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
0502             bool need_pipe_sync)
0503 {
0504     struct amdgpu_device *adev = ring->adev;
0505     unsigned vmhub = ring->funcs->vmhub;
0506     struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
0507     struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
0508     bool gds_switch_needed = ring->funcs->emit_gds_switch && (
0509         id->gds_base != job->gds_base ||
0510         id->gds_size != job->gds_size ||
0511         id->gws_base != job->gws_base ||
0512         id->gws_size != job->gws_size ||
0513         id->oa_base != job->oa_base ||
0514         id->oa_size != job->oa_size);
0515     bool vm_flush_needed = job->vm_needs_flush;
0516     struct dma_fence *fence = NULL;
0517     bool pasid_mapping_needed = false;
0518     unsigned patch_offset = 0;
0519     bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
0520     int r;
0521 
0522     if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
0523         adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
0524 
0525     if (amdgpu_vmid_had_gpu_reset(adev, id)) {
0526         gds_switch_needed = true;
0527         vm_flush_needed = true;
0528         pasid_mapping_needed = true;
0529     }
0530 
0531     mutex_lock(&id_mgr->lock);
0532     if (id->pasid != job->pasid || !id->pasid_mapping ||
0533         !dma_fence_is_signaled(id->pasid_mapping))
0534         pasid_mapping_needed = true;
0535     mutex_unlock(&id_mgr->lock);
0536 
0537     gds_switch_needed &= !!ring->funcs->emit_gds_switch;
0538     vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
0539             job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
0540     pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
0541         ring->funcs->emit_wreg;
0542 
0543     if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
0544         return 0;
0545 
0546     if (ring->funcs->init_cond_exec)
0547         patch_offset = amdgpu_ring_init_cond_exec(ring);
0548 
0549     if (need_pipe_sync)
0550         amdgpu_ring_emit_pipeline_sync(ring);
0551 
0552     if (vm_flush_needed) {
0553         trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
0554         amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
0555     }
0556 
0557     if (pasid_mapping_needed)
0558         amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
0559 
0560     if (vm_flush_needed || pasid_mapping_needed) {
0561         r = amdgpu_fence_emit(ring, &fence, NULL, 0);
0562         if (r)
0563             return r;
0564     }
0565 
0566     if (vm_flush_needed) {
0567         mutex_lock(&id_mgr->lock);
0568         dma_fence_put(id->last_flush);
0569         id->last_flush = dma_fence_get(fence);
0570         id->current_gpu_reset_count =
0571             atomic_read(&adev->gpu_reset_counter);
0572         mutex_unlock(&id_mgr->lock);
0573     }
0574 
0575     if (pasid_mapping_needed) {
0576         mutex_lock(&id_mgr->lock);
0577         id->pasid = job->pasid;
0578         dma_fence_put(id->pasid_mapping);
0579         id->pasid_mapping = dma_fence_get(fence);
0580         mutex_unlock(&id_mgr->lock);
0581     }
0582     dma_fence_put(fence);
0583 
0584     if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
0585         gds_switch_needed) {
0586         id->gds_base = job->gds_base;
0587         id->gds_size = job->gds_size;
0588         id->gws_base = job->gws_base;
0589         id->gws_size = job->gws_size;
0590         id->oa_base = job->oa_base;
0591         id->oa_size = job->oa_size;
0592         amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
0593                         job->gds_size, job->gws_base,
0594                         job->gws_size, job->oa_base,
0595                         job->oa_size);
0596     }
0597 
0598     if (ring->funcs->patch_cond_exec)
0599         amdgpu_ring_patch_cond_exec(ring, patch_offset);
0600 
0601     /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
0602     if (ring->funcs->emit_switch_buffer) {
0603         amdgpu_ring_emit_switch_buffer(ring);
0604         amdgpu_ring_emit_switch_buffer(ring);
0605     }
0606     return 0;
0607 }
0608 
0609 /**
0610  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
0611  *
0612  * @vm: requested vm
0613  * @bo: requested buffer object
0614  *
0615  * Find @bo inside the requested vm.
0616  * Search inside the @bos vm list for the requested vm
0617  * Returns the found bo_va or NULL if none is found
0618  *
0619  * Object has to be reserved!
0620  *
0621  * Returns:
0622  * Found bo_va or NULL.
0623  */
0624 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
0625                        struct amdgpu_bo *bo)
0626 {
0627     struct amdgpu_vm_bo_base *base;
0628 
0629     for (base = bo->vm_bo; base; base = base->next) {
0630         if (base->vm != vm)
0631             continue;
0632 
0633         return container_of(base, struct amdgpu_bo_va, base);
0634     }
0635     return NULL;
0636 }
0637 
0638 /**
0639  * amdgpu_vm_map_gart - Resolve gart mapping of addr
0640  *
0641  * @pages_addr: optional DMA address to use for lookup
0642  * @addr: the unmapped addr
0643  *
0644  * Look up the physical address of the page that the pte resolves
0645  * to.
0646  *
0647  * Returns:
0648  * The pointer for the page table entry.
0649  */
0650 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
0651 {
0652     uint64_t result;
0653 
0654     /* page table offset */
0655     result = pages_addr[addr >> PAGE_SHIFT];
0656 
0657     /* in case cpu page size != gpu page size*/
0658     result |= addr & (~PAGE_MASK);
0659 
0660     result &= 0xFFFFFFFFFFFFF000ULL;
0661 
0662     return result;
0663 }
0664 
0665 /**
0666  * amdgpu_vm_update_pdes - make sure that all directories are valid
0667  *
0668  * @adev: amdgpu_device pointer
0669  * @vm: requested vm
0670  * @immediate: submit immediately to the paging queue
0671  *
0672  * Makes sure all directories are up to date.
0673  *
0674  * Returns:
0675  * 0 for success, error for failure.
0676  */
0677 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
0678               struct amdgpu_vm *vm, bool immediate)
0679 {
0680     struct amdgpu_vm_update_params params;
0681     struct amdgpu_vm_bo_base *entry;
0682     bool flush_tlb_needed = false;
0683     int r, idx;
0684 
0685     if (list_empty(&vm->relocated))
0686         return 0;
0687 
0688     if (!drm_dev_enter(adev_to_drm(adev), &idx))
0689         return -ENODEV;
0690 
0691     memset(&params, 0, sizeof(params));
0692     params.adev = adev;
0693     params.vm = vm;
0694     params.immediate = immediate;
0695 
0696     r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
0697     if (r)
0698         goto error;
0699 
0700     list_for_each_entry(entry, &vm->relocated, vm_status) {
0701         /* vm_flush_needed after updating moved PDEs */
0702         flush_tlb_needed |= entry->moved;
0703 
0704         r = amdgpu_vm_pde_update(&params, entry);
0705         if (r)
0706             goto error;
0707     }
0708 
0709     r = vm->update_funcs->commit(&params, &vm->last_update);
0710     if (r)
0711         goto error;
0712 
0713     if (flush_tlb_needed)
0714         atomic64_inc(&vm->tlb_seq);
0715 
0716     while (!list_empty(&vm->relocated)) {
0717         entry = list_first_entry(&vm->relocated,
0718                      struct amdgpu_vm_bo_base,
0719                      vm_status);
0720         amdgpu_vm_bo_idle(entry);
0721     }
0722 
0723 error:
0724     drm_dev_exit(idx);
0725     return r;
0726 }
0727 
0728 /**
0729  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
0730  * @fence: unused
0731  * @cb: the callback structure
0732  *
0733  * Increments the tlb sequence to make sure that future CS execute a VM flush.
0734  */
0735 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
0736                  struct dma_fence_cb *cb)
0737 {
0738     struct amdgpu_vm_tlb_seq_cb *tlb_cb;
0739 
0740     tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
0741     atomic64_inc(&tlb_cb->vm->tlb_seq);
0742     kfree(tlb_cb);
0743 }
0744 
0745 /**
0746  * amdgpu_vm_update_range - update a range in the vm page table
0747  *
0748  * @adev: amdgpu_device pointer to use for commands
0749  * @vm: the VM to update the range
0750  * @immediate: immediate submission in a page fault
0751  * @unlocked: unlocked invalidation during MM callback
0752  * @flush_tlb: trigger tlb invalidation after update completed
0753  * @resv: fences we need to sync to
0754  * @start: start of mapped range
0755  * @last: last mapped entry
0756  * @flags: flags for the entries
0757  * @offset: offset into nodes and pages_addr
0758  * @vram_base: base for vram mappings
0759  * @res: ttm_resource to map
0760  * @pages_addr: DMA addresses to use for mapping
0761  * @fence: optional resulting fence
0762  *
0763  * Fill in the page table entries between @start and @last.
0764  *
0765  * Returns:
0766  * 0 for success, negative erro code for failure.
0767  */
0768 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
0769                bool immediate, bool unlocked, bool flush_tlb,
0770                struct dma_resv *resv, uint64_t start, uint64_t last,
0771                uint64_t flags, uint64_t offset, uint64_t vram_base,
0772                struct ttm_resource *res, dma_addr_t *pages_addr,
0773                struct dma_fence **fence)
0774 {
0775     struct amdgpu_vm_update_params params;
0776     struct amdgpu_vm_tlb_seq_cb *tlb_cb;
0777     struct amdgpu_res_cursor cursor;
0778     enum amdgpu_sync_mode sync_mode;
0779     int r, idx;
0780 
0781     if (!drm_dev_enter(adev_to_drm(adev), &idx))
0782         return -ENODEV;
0783 
0784     tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
0785     if (!tlb_cb) {
0786         r = -ENOMEM;
0787         goto error_unlock;
0788     }
0789 
0790     /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
0791      * heavy-weight flush TLB unconditionally.
0792      */
0793     flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
0794              adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);
0795 
0796     /*
0797      * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
0798      */
0799     flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0);
0800 
0801     memset(&params, 0, sizeof(params));
0802     params.adev = adev;
0803     params.vm = vm;
0804     params.immediate = immediate;
0805     params.pages_addr = pages_addr;
0806     params.unlocked = unlocked;
0807 
0808     /* Implicitly sync to command submissions in the same VM before
0809      * unmapping. Sync to moving fences before mapping.
0810      */
0811     if (!(flags & AMDGPU_PTE_VALID))
0812         sync_mode = AMDGPU_SYNC_EQ_OWNER;
0813     else
0814         sync_mode = AMDGPU_SYNC_EXPLICIT;
0815 
0816     amdgpu_vm_eviction_lock(vm);
0817     if (vm->evicting) {
0818         r = -EBUSY;
0819         goto error_free;
0820     }
0821 
0822     if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
0823         struct dma_fence *tmp = dma_fence_get_stub();
0824 
0825         amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
0826         swap(vm->last_unlocked, tmp);
0827         dma_fence_put(tmp);
0828     }
0829 
0830     r = vm->update_funcs->prepare(&params, resv, sync_mode);
0831     if (r)
0832         goto error_free;
0833 
0834     amdgpu_res_first(pages_addr ? NULL : res, offset,
0835              (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
0836     while (cursor.remaining) {
0837         uint64_t tmp, num_entries, addr;
0838 
0839         num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
0840         if (pages_addr) {
0841             bool contiguous = true;
0842 
0843             if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
0844                 uint64_t pfn = cursor.start >> PAGE_SHIFT;
0845                 uint64_t count;
0846 
0847                 contiguous = pages_addr[pfn + 1] ==
0848                     pages_addr[pfn] + PAGE_SIZE;
0849 
0850                 tmp = num_entries /
0851                     AMDGPU_GPU_PAGES_IN_CPU_PAGE;
0852                 for (count = 2; count < tmp; ++count) {
0853                     uint64_t idx = pfn + count;
0854 
0855                     if (contiguous != (pages_addr[idx] ==
0856                         pages_addr[idx - 1] + PAGE_SIZE))
0857                         break;
0858                 }
0859                 num_entries = count *
0860                     AMDGPU_GPU_PAGES_IN_CPU_PAGE;
0861             }
0862 
0863             if (!contiguous) {
0864                 addr = cursor.start;
0865                 params.pages_addr = pages_addr;
0866             } else {
0867                 addr = pages_addr[cursor.start >> PAGE_SHIFT];
0868                 params.pages_addr = NULL;
0869             }
0870 
0871         } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
0872             addr = vram_base + cursor.start;
0873         } else {
0874             addr = 0;
0875         }
0876 
0877         tmp = start + num_entries;
0878         r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
0879         if (r)
0880             goto error_free;
0881 
0882         amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
0883         start = tmp;
0884     }
0885 
0886     r = vm->update_funcs->commit(&params, fence);
0887 
0888     if (flush_tlb || params.table_freed) {
0889         tlb_cb->vm = vm;
0890         if (fence && *fence &&
0891             !dma_fence_add_callback(*fence, &tlb_cb->cb,
0892                        amdgpu_vm_tlb_seq_cb)) {
0893             dma_fence_put(vm->last_tlb_flush);
0894             vm->last_tlb_flush = dma_fence_get(*fence);
0895         } else {
0896             amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
0897         }
0898         tlb_cb = NULL;
0899     }
0900 
0901 error_free:
0902     kfree(tlb_cb);
0903 
0904 error_unlock:
0905     amdgpu_vm_eviction_unlock(vm);
0906     drm_dev_exit(idx);
0907     return r;
0908 }
0909 
0910 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
0911                 uint64_t *gtt_mem, uint64_t *cpu_mem)
0912 {
0913     struct amdgpu_bo_va *bo_va, *tmp;
0914 
0915     list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
0916         if (!bo_va->base.bo)
0917             continue;
0918         amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
0919                 gtt_mem, cpu_mem);
0920     }
0921     list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
0922         if (!bo_va->base.bo)
0923             continue;
0924         amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
0925                 gtt_mem, cpu_mem);
0926     }
0927     list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
0928         if (!bo_va->base.bo)
0929             continue;
0930         amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
0931                 gtt_mem, cpu_mem);
0932     }
0933     list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
0934         if (!bo_va->base.bo)
0935             continue;
0936         amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
0937                 gtt_mem, cpu_mem);
0938     }
0939     spin_lock(&vm->invalidated_lock);
0940     list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
0941         if (!bo_va->base.bo)
0942             continue;
0943         amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
0944                 gtt_mem, cpu_mem);
0945     }
0946     list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
0947         if (!bo_va->base.bo)
0948             continue;
0949         amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
0950                 gtt_mem, cpu_mem);
0951     }
0952     spin_unlock(&vm->invalidated_lock);
0953 }
0954 /**
0955  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
0956  *
0957  * @adev: amdgpu_device pointer
0958  * @bo_va: requested BO and VM object
0959  * @clear: if true clear the entries
0960  *
0961  * Fill in the page table entries for @bo_va.
0962  *
0963  * Returns:
0964  * 0 for success, -EINVAL for failure.
0965  */
0966 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
0967             bool clear)
0968 {
0969     struct amdgpu_bo *bo = bo_va->base.bo;
0970     struct amdgpu_vm *vm = bo_va->base.vm;
0971     struct amdgpu_bo_va_mapping *mapping;
0972     dma_addr_t *pages_addr = NULL;
0973     struct ttm_resource *mem;
0974     struct dma_fence **last_update;
0975     bool flush_tlb = clear;
0976     struct dma_resv *resv;
0977     uint64_t vram_base;
0978     uint64_t flags;
0979     int r;
0980 
0981     if (clear || !bo) {
0982         mem = NULL;
0983         resv = vm->root.bo->tbo.base.resv;
0984     } else {
0985         struct drm_gem_object *obj = &bo->tbo.base;
0986 
0987         resv = bo->tbo.base.resv;
0988         if (obj->import_attach && bo_va->is_xgmi) {
0989             struct dma_buf *dma_buf = obj->import_attach->dmabuf;
0990             struct drm_gem_object *gobj = dma_buf->priv;
0991             struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
0992 
0993             if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
0994                 bo = gem_to_amdgpu_bo(gobj);
0995         }
0996         mem = bo->tbo.resource;
0997         if (mem->mem_type == TTM_PL_TT ||
0998             mem->mem_type == AMDGPU_PL_PREEMPT)
0999             pages_addr = bo->tbo.ttm->dma_address;
1000     }
1001 
1002     if (bo) {
1003         struct amdgpu_device *bo_adev;
1004 
1005         flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1006 
1007         if (amdgpu_bo_encrypted(bo))
1008             flags |= AMDGPU_PTE_TMZ;
1009 
1010         bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1011         vram_base = bo_adev->vm_manager.vram_base_offset;
1012     } else {
1013         flags = 0x0;
1014         vram_base = 0;
1015     }
1016 
1017     if (clear || (bo && bo->tbo.base.resv ==
1018               vm->root.bo->tbo.base.resv))
1019         last_update = &vm->last_update;
1020     else
1021         last_update = &bo_va->last_pt_update;
1022 
1023     if (!clear && bo_va->base.moved) {
1024         flush_tlb = true;
1025         list_splice_init(&bo_va->valids, &bo_va->invalids);
1026 
1027     } else if (bo_va->cleared != clear) {
1028         list_splice_init(&bo_va->valids, &bo_va->invalids);
1029     }
1030 
1031     list_for_each_entry(mapping, &bo_va->invalids, list) {
1032         uint64_t update_flags = flags;
1033 
1034         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1035          * but in case of something, we filter the flags in first place
1036          */
1037         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1038             update_flags &= ~AMDGPU_PTE_READABLE;
1039         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1040             update_flags &= ~AMDGPU_PTE_WRITEABLE;
1041 
1042         /* Apply ASIC specific mapping flags */
1043         amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1044 
1045         trace_amdgpu_vm_bo_update(mapping);
1046 
1047         r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1048                        resv, mapping->start, mapping->last,
1049                        update_flags, mapping->offset,
1050                        vram_base, mem, pages_addr,
1051                        last_update);
1052         if (r)
1053             return r;
1054     }
1055 
1056     /* If the BO is not in its preferred location add it back to
1057      * the evicted list so that it gets validated again on the
1058      * next command submission.
1059      */
1060     if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1061         uint32_t mem_type = bo->tbo.resource->mem_type;
1062 
1063         if (!(bo->preferred_domains &
1064               amdgpu_mem_type_to_domain(mem_type)))
1065             amdgpu_vm_bo_evicted(&bo_va->base);
1066         else
1067             amdgpu_vm_bo_idle(&bo_va->base);
1068     } else {
1069         amdgpu_vm_bo_done(&bo_va->base);
1070     }
1071 
1072     list_splice_init(&bo_va->invalids, &bo_va->valids);
1073     bo_va->cleared = clear;
1074     bo_va->base.moved = false;
1075 
1076     if (trace_amdgpu_vm_bo_mapping_enabled()) {
1077         list_for_each_entry(mapping, &bo_va->valids, list)
1078             trace_amdgpu_vm_bo_mapping(mapping);
1079     }
1080 
1081     return 0;
1082 }
1083 
1084 /**
1085  * amdgpu_vm_update_prt_state - update the global PRT state
1086  *
1087  * @adev: amdgpu_device pointer
1088  */
1089 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1090 {
1091     unsigned long flags;
1092     bool enable;
1093 
1094     spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1095     enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1096     adev->gmc.gmc_funcs->set_prt(adev, enable);
1097     spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1098 }
1099 
1100 /**
1101  * amdgpu_vm_prt_get - add a PRT user
1102  *
1103  * @adev: amdgpu_device pointer
1104  */
1105 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1106 {
1107     if (!adev->gmc.gmc_funcs->set_prt)
1108         return;
1109 
1110     if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1111         amdgpu_vm_update_prt_state(adev);
1112 }
1113 
1114 /**
1115  * amdgpu_vm_prt_put - drop a PRT user
1116  *
1117  * @adev: amdgpu_device pointer
1118  */
1119 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1120 {
1121     if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1122         amdgpu_vm_update_prt_state(adev);
1123 }
1124 
1125 /**
1126  * amdgpu_vm_prt_cb - callback for updating the PRT status
1127  *
1128  * @fence: fence for the callback
1129  * @_cb: the callback function
1130  */
1131 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1132 {
1133     struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1134 
1135     amdgpu_vm_prt_put(cb->adev);
1136     kfree(cb);
1137 }
1138 
1139 /**
1140  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1141  *
1142  * @adev: amdgpu_device pointer
1143  * @fence: fence for the callback
1144  */
1145 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1146                  struct dma_fence *fence)
1147 {
1148     struct amdgpu_prt_cb *cb;
1149 
1150     if (!adev->gmc.gmc_funcs->set_prt)
1151         return;
1152 
1153     cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1154     if (!cb) {
1155         /* Last resort when we are OOM */
1156         if (fence)
1157             dma_fence_wait(fence, false);
1158 
1159         amdgpu_vm_prt_put(adev);
1160     } else {
1161         cb->adev = adev;
1162         if (!fence || dma_fence_add_callback(fence, &cb->cb,
1163                              amdgpu_vm_prt_cb))
1164             amdgpu_vm_prt_cb(fence, &cb->cb);
1165     }
1166 }
1167 
1168 /**
1169  * amdgpu_vm_free_mapping - free a mapping
1170  *
1171  * @adev: amdgpu_device pointer
1172  * @vm: requested vm
1173  * @mapping: mapping to be freed
1174  * @fence: fence of the unmap operation
1175  *
1176  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1177  */
1178 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1179                    struct amdgpu_vm *vm,
1180                    struct amdgpu_bo_va_mapping *mapping,
1181                    struct dma_fence *fence)
1182 {
1183     if (mapping->flags & AMDGPU_PTE_PRT)
1184         amdgpu_vm_add_prt_cb(adev, fence);
1185     kfree(mapping);
1186 }
1187 
1188 /**
1189  * amdgpu_vm_prt_fini - finish all prt mappings
1190  *
1191  * @adev: amdgpu_device pointer
1192  * @vm: requested vm
1193  *
1194  * Register a cleanup callback to disable PRT support after VM dies.
1195  */
1196 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1197 {
1198     struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1199     struct dma_resv_iter cursor;
1200     struct dma_fence *fence;
1201 
1202     dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1203         /* Add a callback for each fence in the reservation object */
1204         amdgpu_vm_prt_get(adev);
1205         amdgpu_vm_add_prt_cb(adev, fence);
1206     }
1207 }
1208 
1209 /**
1210  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1211  *
1212  * @adev: amdgpu_device pointer
1213  * @vm: requested vm
1214  * @fence: optional resulting fence (unchanged if no work needed to be done
1215  * or if an error occurred)
1216  *
1217  * Make sure all freed BOs are cleared in the PT.
1218  * PTs have to be reserved and mutex must be locked!
1219  *
1220  * Returns:
1221  * 0 for success.
1222  *
1223  */
1224 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1225               struct amdgpu_vm *vm,
1226               struct dma_fence **fence)
1227 {
1228     struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1229     struct amdgpu_bo_va_mapping *mapping;
1230     uint64_t init_pte_value = 0;
1231     struct dma_fence *f = NULL;
1232     int r;
1233 
1234     while (!list_empty(&vm->freed)) {
1235         mapping = list_first_entry(&vm->freed,
1236             struct amdgpu_bo_va_mapping, list);
1237         list_del(&mapping->list);
1238 
1239         if (vm->pte_support_ats &&
1240             mapping->start < AMDGPU_GMC_HOLE_START)
1241             init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1242 
1243         r = amdgpu_vm_update_range(adev, vm, false, false, true, resv,
1244                        mapping->start, mapping->last,
1245                        init_pte_value, 0, 0, NULL, NULL,
1246                        &f);
1247         amdgpu_vm_free_mapping(adev, vm, mapping, f);
1248         if (r) {
1249             dma_fence_put(f);
1250             return r;
1251         }
1252     }
1253 
1254     if (fence && f) {
1255         dma_fence_put(*fence);
1256         *fence = f;
1257     } else {
1258         dma_fence_put(f);
1259     }
1260 
1261     return 0;
1262 
1263 }
1264 
1265 /**
1266  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1267  *
1268  * @adev: amdgpu_device pointer
1269  * @vm: requested vm
1270  *
1271  * Make sure all BOs which are moved are updated in the PTs.
1272  *
1273  * Returns:
1274  * 0 for success.
1275  *
1276  * PTs have to be reserved!
1277  */
1278 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1279                struct amdgpu_vm *vm)
1280 {
1281     struct amdgpu_bo_va *bo_va, *tmp;
1282     struct dma_resv *resv;
1283     bool clear;
1284     int r;
1285 
1286     list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
1287         /* Per VM BOs never need to bo cleared in the page tables */
1288         r = amdgpu_vm_bo_update(adev, bo_va, false);
1289         if (r)
1290             return r;
1291     }
1292 
1293     spin_lock(&vm->invalidated_lock);
1294     while (!list_empty(&vm->invalidated)) {
1295         bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1296                      base.vm_status);
1297         resv = bo_va->base.bo->tbo.base.resv;
1298         spin_unlock(&vm->invalidated_lock);
1299 
1300         /* Try to reserve the BO to avoid clearing its ptes */
1301         if (!amdgpu_vm_debug && dma_resv_trylock(resv))
1302             clear = false;
1303         /* Somebody else is using the BO right now */
1304         else
1305             clear = true;
1306 
1307         r = amdgpu_vm_bo_update(adev, bo_va, clear);
1308         if (r)
1309             return r;
1310 
1311         if (!clear)
1312             dma_resv_unlock(resv);
1313         spin_lock(&vm->invalidated_lock);
1314     }
1315     spin_unlock(&vm->invalidated_lock);
1316 
1317     return 0;
1318 }
1319 
1320 /**
1321  * amdgpu_vm_bo_add - add a bo to a specific vm
1322  *
1323  * @adev: amdgpu_device pointer
1324  * @vm: requested vm
1325  * @bo: amdgpu buffer object
1326  *
1327  * Add @bo into the requested vm.
1328  * Add @bo to the list of bos associated with the vm
1329  *
1330  * Returns:
1331  * Newly added bo_va or NULL for failure
1332  *
1333  * Object has to be reserved!
1334  */
1335 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1336                       struct amdgpu_vm *vm,
1337                       struct amdgpu_bo *bo)
1338 {
1339     struct amdgpu_bo_va *bo_va;
1340 
1341     bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1342     if (bo_va == NULL) {
1343         return NULL;
1344     }
1345     amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1346 
1347     bo_va->ref_count = 1;
1348     INIT_LIST_HEAD(&bo_va->valids);
1349     INIT_LIST_HEAD(&bo_va->invalids);
1350 
1351     if (!bo)
1352         return bo_va;
1353 
1354     dma_resv_assert_held(bo->tbo.base.resv);
1355     if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1356         bo_va->is_xgmi = true;
1357         /* Power up XGMI if it can be potentially used */
1358         amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1359     }
1360 
1361     return bo_va;
1362 }
1363 
1364 
1365 /**
1366  * amdgpu_vm_bo_insert_map - insert a new mapping
1367  *
1368  * @adev: amdgpu_device pointer
1369  * @bo_va: bo_va to store the address
1370  * @mapping: the mapping to insert
1371  *
1372  * Insert a new mapping into all structures.
1373  */
1374 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1375                     struct amdgpu_bo_va *bo_va,
1376                     struct amdgpu_bo_va_mapping *mapping)
1377 {
1378     struct amdgpu_vm *vm = bo_va->base.vm;
1379     struct amdgpu_bo *bo = bo_va->base.bo;
1380 
1381     mapping->bo_va = bo_va;
1382     list_add(&mapping->list, &bo_va->invalids);
1383     amdgpu_vm_it_insert(mapping, &vm->va);
1384 
1385     if (mapping->flags & AMDGPU_PTE_PRT)
1386         amdgpu_vm_prt_get(adev);
1387 
1388     if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1389         !bo_va->base.moved) {
1390         list_move(&bo_va->base.vm_status, &vm->moved);
1391     }
1392     trace_amdgpu_vm_bo_map(bo_va, mapping);
1393 }
1394 
1395 /**
1396  * amdgpu_vm_bo_map - map bo inside a vm
1397  *
1398  * @adev: amdgpu_device pointer
1399  * @bo_va: bo_va to store the address
1400  * @saddr: where to map the BO
1401  * @offset: requested offset in the BO
1402  * @size: BO size in bytes
1403  * @flags: attributes of pages (read/write/valid/etc.)
1404  *
1405  * Add a mapping of the BO at the specefied addr into the VM.
1406  *
1407  * Returns:
1408  * 0 for success, error for failure.
1409  *
1410  * Object has to be reserved and unreserved outside!
1411  */
1412 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1413              struct amdgpu_bo_va *bo_va,
1414              uint64_t saddr, uint64_t offset,
1415              uint64_t size, uint64_t flags)
1416 {
1417     struct amdgpu_bo_va_mapping *mapping, *tmp;
1418     struct amdgpu_bo *bo = bo_va->base.bo;
1419     struct amdgpu_vm *vm = bo_va->base.vm;
1420     uint64_t eaddr;
1421 
1422     /* validate the parameters */
1423     if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
1424         size == 0 || size & ~PAGE_MASK)
1425         return -EINVAL;
1426 
1427     /* make sure object fit at this offset */
1428     eaddr = saddr + size - 1;
1429     if (saddr >= eaddr ||
1430         (bo && offset + size > amdgpu_bo_size(bo)) ||
1431         (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
1432         return -EINVAL;
1433 
1434     saddr /= AMDGPU_GPU_PAGE_SIZE;
1435     eaddr /= AMDGPU_GPU_PAGE_SIZE;
1436 
1437     tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1438     if (tmp) {
1439         /* bo and tmp overlap, invalid addr */
1440         dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1441             "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1442             tmp->start, tmp->last + 1);
1443         return -EINVAL;
1444     }
1445 
1446     mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1447     if (!mapping)
1448         return -ENOMEM;
1449 
1450     mapping->start = saddr;
1451     mapping->last = eaddr;
1452     mapping->offset = offset;
1453     mapping->flags = flags;
1454 
1455     amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1456 
1457     return 0;
1458 }
1459 
1460 /**
1461  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1462  *
1463  * @adev: amdgpu_device pointer
1464  * @bo_va: bo_va to store the address
1465  * @saddr: where to map the BO
1466  * @offset: requested offset in the BO
1467  * @size: BO size in bytes
1468  * @flags: attributes of pages (read/write/valid/etc.)
1469  *
1470  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1471  * mappings as we do so.
1472  *
1473  * Returns:
1474  * 0 for success, error for failure.
1475  *
1476  * Object has to be reserved and unreserved outside!
1477  */
1478 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1479                  struct amdgpu_bo_va *bo_va,
1480                  uint64_t saddr, uint64_t offset,
1481                  uint64_t size, uint64_t flags)
1482 {
1483     struct amdgpu_bo_va_mapping *mapping;
1484     struct amdgpu_bo *bo = bo_va->base.bo;
1485     uint64_t eaddr;
1486     int r;
1487 
1488     /* validate the parameters */
1489     if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
1490         size == 0 || size & ~PAGE_MASK)
1491         return -EINVAL;
1492 
1493     /* make sure object fit at this offset */
1494     eaddr = saddr + size - 1;
1495     if (saddr >= eaddr ||
1496         (bo && offset + size > amdgpu_bo_size(bo)) ||
1497         (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
1498         return -EINVAL;
1499 
1500     /* Allocate all the needed memory */
1501     mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1502     if (!mapping)
1503         return -ENOMEM;
1504 
1505     r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1506     if (r) {
1507         kfree(mapping);
1508         return r;
1509     }
1510 
1511     saddr /= AMDGPU_GPU_PAGE_SIZE;
1512     eaddr /= AMDGPU_GPU_PAGE_SIZE;
1513 
1514     mapping->start = saddr;
1515     mapping->last = eaddr;
1516     mapping->offset = offset;
1517     mapping->flags = flags;
1518 
1519     amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1520 
1521     return 0;
1522 }
1523 
1524 /**
1525  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1526  *
1527  * @adev: amdgpu_device pointer
1528  * @bo_va: bo_va to remove the address from
1529  * @saddr: where to the BO is mapped
1530  *
1531  * Remove a mapping of the BO at the specefied addr from the VM.
1532  *
1533  * Returns:
1534  * 0 for success, error for failure.
1535  *
1536  * Object has to be reserved and unreserved outside!
1537  */
1538 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1539                struct amdgpu_bo_va *bo_va,
1540                uint64_t saddr)
1541 {
1542     struct amdgpu_bo_va_mapping *mapping;
1543     struct amdgpu_vm *vm = bo_va->base.vm;
1544     bool valid = true;
1545 
1546     saddr /= AMDGPU_GPU_PAGE_SIZE;
1547 
1548     list_for_each_entry(mapping, &bo_va->valids, list) {
1549         if (mapping->start == saddr)
1550             break;
1551     }
1552 
1553     if (&mapping->list == &bo_va->valids) {
1554         valid = false;
1555 
1556         list_for_each_entry(mapping, &bo_va->invalids, list) {
1557             if (mapping->start == saddr)
1558                 break;
1559         }
1560 
1561         if (&mapping->list == &bo_va->invalids)
1562             return -ENOENT;
1563     }
1564 
1565     list_del(&mapping->list);
1566     amdgpu_vm_it_remove(mapping, &vm->va);
1567     mapping->bo_va = NULL;
1568     trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1569 
1570     if (valid)
1571         list_add(&mapping->list, &vm->freed);
1572     else
1573         amdgpu_vm_free_mapping(adev, vm, mapping,
1574                        bo_va->last_pt_update);
1575 
1576     return 0;
1577 }
1578 
1579 /**
1580  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1581  *
1582  * @adev: amdgpu_device pointer
1583  * @vm: VM structure to use
1584  * @saddr: start of the range
1585  * @size: size of the range
1586  *
1587  * Remove all mappings in a range, split them as appropriate.
1588  *
1589  * Returns:
1590  * 0 for success, error for failure.
1591  */
1592 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1593                 struct amdgpu_vm *vm,
1594                 uint64_t saddr, uint64_t size)
1595 {
1596     struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1597     LIST_HEAD(removed);
1598     uint64_t eaddr;
1599 
1600     eaddr = saddr + size - 1;
1601     saddr /= AMDGPU_GPU_PAGE_SIZE;
1602     eaddr /= AMDGPU_GPU_PAGE_SIZE;
1603 
1604     /* Allocate all the needed memory */
1605     before = kzalloc(sizeof(*before), GFP_KERNEL);
1606     if (!before)
1607         return -ENOMEM;
1608     INIT_LIST_HEAD(&before->list);
1609 
1610     after = kzalloc(sizeof(*after), GFP_KERNEL);
1611     if (!after) {
1612         kfree(before);
1613         return -ENOMEM;
1614     }
1615     INIT_LIST_HEAD(&after->list);
1616 
1617     /* Now gather all removed mappings */
1618     tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1619     while (tmp) {
1620         /* Remember mapping split at the start */
1621         if (tmp->start < saddr) {
1622             before->start = tmp->start;
1623             before->last = saddr - 1;
1624             before->offset = tmp->offset;
1625             before->flags = tmp->flags;
1626             before->bo_va = tmp->bo_va;
1627             list_add(&before->list, &tmp->bo_va->invalids);
1628         }
1629 
1630         /* Remember mapping split at the end */
1631         if (tmp->last > eaddr) {
1632             after->start = eaddr + 1;
1633             after->last = tmp->last;
1634             after->offset = tmp->offset;
1635             after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1636             after->flags = tmp->flags;
1637             after->bo_va = tmp->bo_va;
1638             list_add(&after->list, &tmp->bo_va->invalids);
1639         }
1640 
1641         list_del(&tmp->list);
1642         list_add(&tmp->list, &removed);
1643 
1644         tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1645     }
1646 
1647     /* And free them up */
1648     list_for_each_entry_safe(tmp, next, &removed, list) {
1649         amdgpu_vm_it_remove(tmp, &vm->va);
1650         list_del(&tmp->list);
1651 
1652         if (tmp->start < saddr)
1653             tmp->start = saddr;
1654         if (tmp->last > eaddr)
1655             tmp->last = eaddr;
1656 
1657         tmp->bo_va = NULL;
1658         list_add(&tmp->list, &vm->freed);
1659         trace_amdgpu_vm_bo_unmap(NULL, tmp);
1660     }
1661 
1662     /* Insert partial mapping before the range */
1663     if (!list_empty(&before->list)) {
1664         amdgpu_vm_it_insert(before, &vm->va);
1665         if (before->flags & AMDGPU_PTE_PRT)
1666             amdgpu_vm_prt_get(adev);
1667     } else {
1668         kfree(before);
1669     }
1670 
1671     /* Insert partial mapping after the range */
1672     if (!list_empty(&after->list)) {
1673         amdgpu_vm_it_insert(after, &vm->va);
1674         if (after->flags & AMDGPU_PTE_PRT)
1675             amdgpu_vm_prt_get(adev);
1676     } else {
1677         kfree(after);
1678     }
1679 
1680     return 0;
1681 }
1682 
1683 /**
1684  * amdgpu_vm_bo_lookup_mapping - find mapping by address
1685  *
1686  * @vm: the requested VM
1687  * @addr: the address
1688  *
1689  * Find a mapping by it's address.
1690  *
1691  * Returns:
1692  * The amdgpu_bo_va_mapping matching for addr or NULL
1693  *
1694  */
1695 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1696                              uint64_t addr)
1697 {
1698     return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1699 }
1700 
1701 /**
1702  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1703  *
1704  * @vm: the requested vm
1705  * @ticket: CS ticket
1706  *
1707  * Trace all mappings of BOs reserved during a command submission.
1708  */
1709 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
1710 {
1711     struct amdgpu_bo_va_mapping *mapping;
1712 
1713     if (!trace_amdgpu_vm_bo_cs_enabled())
1714         return;
1715 
1716     for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
1717          mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
1718         if (mapping->bo_va && mapping->bo_va->base.bo) {
1719             struct amdgpu_bo *bo;
1720 
1721             bo = mapping->bo_va->base.bo;
1722             if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
1723                 ticket)
1724                 continue;
1725         }
1726 
1727         trace_amdgpu_vm_bo_cs(mapping);
1728     }
1729 }
1730 
1731 /**
1732  * amdgpu_vm_bo_del - remove a bo from a specific vm
1733  *
1734  * @adev: amdgpu_device pointer
1735  * @bo_va: requested bo_va
1736  *
1737  * Remove @bo_va->bo from the requested vm.
1738  *
1739  * Object have to be reserved!
1740  */
1741 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
1742               struct amdgpu_bo_va *bo_va)
1743 {
1744     struct amdgpu_bo_va_mapping *mapping, *next;
1745     struct amdgpu_bo *bo = bo_va->base.bo;
1746     struct amdgpu_vm *vm = bo_va->base.vm;
1747     struct amdgpu_vm_bo_base **base;
1748 
1749     dma_resv_assert_held(vm->root.bo->tbo.base.resv);
1750 
1751     if (bo) {
1752         dma_resv_assert_held(bo->tbo.base.resv);
1753         if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
1754             ttm_bo_set_bulk_move(&bo->tbo, NULL);
1755 
1756         for (base = &bo_va->base.bo->vm_bo; *base;
1757              base = &(*base)->next) {
1758             if (*base != &bo_va->base)
1759                 continue;
1760 
1761             *base = bo_va->base.next;
1762             break;
1763         }
1764     }
1765 
1766     spin_lock(&vm->invalidated_lock);
1767     list_del(&bo_va->base.vm_status);
1768     spin_unlock(&vm->invalidated_lock);
1769 
1770     list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1771         list_del(&mapping->list);
1772         amdgpu_vm_it_remove(mapping, &vm->va);
1773         mapping->bo_va = NULL;
1774         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1775         list_add(&mapping->list, &vm->freed);
1776     }
1777     list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1778         list_del(&mapping->list);
1779         amdgpu_vm_it_remove(mapping, &vm->va);
1780         amdgpu_vm_free_mapping(adev, vm, mapping,
1781                        bo_va->last_pt_update);
1782     }
1783 
1784     dma_fence_put(bo_va->last_pt_update);
1785 
1786     if (bo && bo_va->is_xgmi)
1787         amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
1788 
1789     kfree(bo_va);
1790 }
1791 
1792 /**
1793  * amdgpu_vm_evictable - check if we can evict a VM
1794  *
1795  * @bo: A page table of the VM.
1796  *
1797  * Check if it is possible to evict a VM.
1798  */
1799 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
1800 {
1801     struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
1802 
1803     /* Page tables of a destroyed VM can go away immediately */
1804     if (!bo_base || !bo_base->vm)
1805         return true;
1806 
1807     /* Don't evict VM page tables while they are busy */
1808     if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
1809         return false;
1810 
1811     /* Try to block ongoing updates */
1812     if (!amdgpu_vm_eviction_trylock(bo_base->vm))
1813         return false;
1814 
1815     /* Don't evict VM page tables while they are updated */
1816     if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
1817         amdgpu_vm_eviction_unlock(bo_base->vm);
1818         return false;
1819     }
1820 
1821     bo_base->vm->evicting = true;
1822     amdgpu_vm_eviction_unlock(bo_base->vm);
1823     return true;
1824 }
1825 
1826 /**
1827  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @bo: amdgpu buffer object
1831  * @evicted: is the BO evicted
1832  *
1833  * Mark @bo as invalid.
1834  */
1835 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1836                  struct amdgpu_bo *bo, bool evicted)
1837 {
1838     struct amdgpu_vm_bo_base *bo_base;
1839 
1840     /* shadow bo doesn't have bo base, its validation needs its parent */
1841     if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
1842         bo = bo->parent;
1843 
1844     for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
1845         struct amdgpu_vm *vm = bo_base->vm;
1846 
1847         if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1848             amdgpu_vm_bo_evicted(bo_base);
1849             continue;
1850         }
1851 
1852         if (bo_base->moved)
1853             continue;
1854         bo_base->moved = true;
1855 
1856         if (bo->tbo.type == ttm_bo_type_kernel)
1857             amdgpu_vm_bo_relocated(bo_base);
1858         else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
1859             amdgpu_vm_bo_moved(bo_base);
1860         else
1861             amdgpu_vm_bo_invalidated(bo_base);
1862     }
1863 }
1864 
1865 /**
1866  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
1867  *
1868  * @vm_size: VM size
1869  *
1870  * Returns:
1871  * VM page table as power of two
1872  */
1873 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
1874 {
1875     /* Total bits covered by PD + PTs */
1876     unsigned bits = ilog2(vm_size) + 18;
1877 
1878     /* Make sure the PD is 4K in size up to 8GB address space.
1879        Above that split equal between PD and PTs */
1880     if (vm_size <= 8)
1881         return (bits - 9);
1882     else
1883         return ((bits + 3) / 2);
1884 }
1885 
1886 /**
1887  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
1888  *
1889  * @adev: amdgpu_device pointer
1890  * @min_vm_size: the minimum vm size in GB if it's set auto
1891  * @fragment_size_default: Default PTE fragment size
1892  * @max_level: max VMPT level
1893  * @max_bits: max address space size in bits
1894  *
1895  */
1896 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
1897                uint32_t fragment_size_default, unsigned max_level,
1898                unsigned max_bits)
1899 {
1900     unsigned int max_size = 1 << (max_bits - 30);
1901     unsigned int vm_size;
1902     uint64_t tmp;
1903 
1904     /* adjust vm size first */
1905     if (amdgpu_vm_size != -1) {
1906         vm_size = amdgpu_vm_size;
1907         if (vm_size > max_size) {
1908             dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
1909                  amdgpu_vm_size, max_size);
1910             vm_size = max_size;
1911         }
1912     } else {
1913         struct sysinfo si;
1914         unsigned int phys_ram_gb;
1915 
1916         /* Optimal VM size depends on the amount of physical
1917          * RAM available. Underlying requirements and
1918          * assumptions:
1919          *
1920          *  - Need to map system memory and VRAM from all GPUs
1921          *     - VRAM from other GPUs not known here
1922          *     - Assume VRAM <= system memory
1923          *  - On GFX8 and older, VM space can be segmented for
1924          *    different MTYPEs
1925          *  - Need to allow room for fragmentation, guard pages etc.
1926          *
1927          * This adds up to a rough guess of system memory x3.
1928          * Round up to power of two to maximize the available
1929          * VM size with the given page table size.
1930          */
1931         si_meminfo(&si);
1932         phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
1933                    (1 << 30) - 1) >> 30;
1934         vm_size = roundup_pow_of_two(
1935             min(max(phys_ram_gb * 3, min_vm_size), max_size));
1936     }
1937 
1938     adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
1939 
1940     tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
1941     if (amdgpu_vm_block_size != -1)
1942         tmp >>= amdgpu_vm_block_size - 9;
1943     tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
1944     adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
1945     switch (adev->vm_manager.num_level) {
1946     case 3:
1947         adev->vm_manager.root_level = AMDGPU_VM_PDB2;
1948         break;
1949     case 2:
1950         adev->vm_manager.root_level = AMDGPU_VM_PDB1;
1951         break;
1952     case 1:
1953         adev->vm_manager.root_level = AMDGPU_VM_PDB0;
1954         break;
1955     default:
1956         dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
1957     }
1958     /* block size depends on vm size and hw setup*/
1959     if (amdgpu_vm_block_size != -1)
1960         adev->vm_manager.block_size =
1961             min((unsigned)amdgpu_vm_block_size, max_bits
1962                 - AMDGPU_GPU_PAGE_SHIFT
1963                 - 9 * adev->vm_manager.num_level);
1964     else if (adev->vm_manager.num_level > 1)
1965         adev->vm_manager.block_size = 9;
1966     else
1967         adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
1968 
1969     if (amdgpu_vm_fragment_size == -1)
1970         adev->vm_manager.fragment_size = fragment_size_default;
1971     else
1972         adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
1973 
1974     DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
1975          vm_size, adev->vm_manager.num_level + 1,
1976          adev->vm_manager.block_size,
1977          adev->vm_manager.fragment_size);
1978 }
1979 
1980 /**
1981  * amdgpu_vm_wait_idle - wait for the VM to become idle
1982  *
1983  * @vm: VM object to wait for
1984  * @timeout: timeout to wait for VM to become idle
1985  */
1986 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
1987 {
1988     timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
1989                     DMA_RESV_USAGE_BOOKKEEP,
1990                     true, timeout);
1991     if (timeout <= 0)
1992         return timeout;
1993 
1994     return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
1995 }
1996 
1997 /**
1998  * amdgpu_vm_init - initialize a vm instance
1999  *
2000  * @adev: amdgpu_device pointer
2001  * @vm: requested vm
2002  *
2003  * Init @vm fields.
2004  *
2005  * Returns:
2006  * 0 for success, error for failure.
2007  */
2008 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2009 {
2010     struct amdgpu_bo *root_bo;
2011     struct amdgpu_bo_vm *root;
2012     int r, i;
2013 
2014     vm->va = RB_ROOT_CACHED;
2015     for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2016         vm->reserved_vmid[i] = NULL;
2017     INIT_LIST_HEAD(&vm->evicted);
2018     INIT_LIST_HEAD(&vm->relocated);
2019     INIT_LIST_HEAD(&vm->moved);
2020     INIT_LIST_HEAD(&vm->idle);
2021     INIT_LIST_HEAD(&vm->invalidated);
2022     spin_lock_init(&vm->invalidated_lock);
2023     INIT_LIST_HEAD(&vm->freed);
2024     INIT_LIST_HEAD(&vm->done);
2025 
2026     /* create scheduler entities for page table updates */
2027     r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
2028                   adev->vm_manager.vm_pte_scheds,
2029                   adev->vm_manager.vm_pte_num_scheds, NULL);
2030     if (r)
2031         return r;
2032 
2033     r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
2034                   adev->vm_manager.vm_pte_scheds,
2035                   adev->vm_manager.vm_pte_num_scheds, NULL);
2036     if (r)
2037         goto error_free_immediate;
2038 
2039     vm->pte_support_ats = false;
2040     vm->is_compute_context = false;
2041 
2042     vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2043                     AMDGPU_VM_USE_CPU_FOR_GFX);
2044 
2045     DRM_DEBUG_DRIVER("VM update mode is %s\n",
2046              vm->use_cpu_for_update ? "CPU" : "SDMA");
2047     WARN_ONCE((vm->use_cpu_for_update &&
2048            !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2049           "CPU update of VM recommended only for large BAR system\n");
2050 
2051     if (vm->use_cpu_for_update)
2052         vm->update_funcs = &amdgpu_vm_cpu_funcs;
2053     else
2054         vm->update_funcs = &amdgpu_vm_sdma_funcs;
2055     vm->last_update = NULL;
2056     vm->last_unlocked = dma_fence_get_stub();
2057     vm->last_tlb_flush = dma_fence_get_stub();
2058 
2059     mutex_init(&vm->eviction_lock);
2060     vm->evicting = false;
2061 
2062     r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2063                 false, &root);
2064     if (r)
2065         goto error_free_delayed;
2066     root_bo = &root->bo;
2067     r = amdgpu_bo_reserve(root_bo, true);
2068     if (r)
2069         goto error_free_root;
2070 
2071     r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2072     if (r)
2073         goto error_unreserve;
2074 
2075     amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2076 
2077     r = amdgpu_vm_pt_clear(adev, vm, root, false);
2078     if (r)
2079         goto error_unreserve;
2080 
2081     amdgpu_bo_unreserve(vm->root.bo);
2082 
2083     INIT_KFIFO(vm->faults);
2084 
2085     return 0;
2086 
2087 error_unreserve:
2088     amdgpu_bo_unreserve(vm->root.bo);
2089 
2090 error_free_root:
2091     amdgpu_bo_unref(&root->shadow);
2092     amdgpu_bo_unref(&root_bo);
2093     vm->root.bo = NULL;
2094 
2095 error_free_delayed:
2096     dma_fence_put(vm->last_tlb_flush);
2097     dma_fence_put(vm->last_unlocked);
2098     drm_sched_entity_destroy(&vm->delayed);
2099 
2100 error_free_immediate:
2101     drm_sched_entity_destroy(&vm->immediate);
2102 
2103     return r;
2104 }
2105 
2106 /**
2107  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2108  *
2109  * @adev: amdgpu_device pointer
2110  * @vm: requested vm
2111  *
2112  * This only works on GFX VMs that don't have any BOs added and no
2113  * page tables allocated yet.
2114  *
2115  * Changes the following VM parameters:
2116  * - use_cpu_for_update
2117  * - pte_supports_ats
2118  *
2119  * Reinitializes the page directory to reflect the changed ATS
2120  * setting.
2121  *
2122  * Returns:
2123  * 0 for success, -errno for errors.
2124  */
2125 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2126 {
2127     bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2128     int r;
2129 
2130     r = amdgpu_bo_reserve(vm->root.bo, true);
2131     if (r)
2132         return r;
2133 
2134     /* Sanity checks */
2135     if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
2136         r = -EINVAL;
2137         goto unreserve_bo;
2138     }
2139 
2140     /* Check if PD needs to be reinitialized and do it before
2141      * changing any other state, in case it fails.
2142      */
2143     if (pte_support_ats != vm->pte_support_ats) {
2144         vm->pte_support_ats = pte_support_ats;
2145         r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
2146                        false);
2147         if (r)
2148             goto unreserve_bo;
2149     }
2150 
2151     /* Update VM state */
2152     vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2153                     AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2154     DRM_DEBUG_DRIVER("VM update mode is %s\n",
2155              vm->use_cpu_for_update ? "CPU" : "SDMA");
2156     WARN_ONCE((vm->use_cpu_for_update &&
2157            !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2158           "CPU update of VM recommended only for large BAR system\n");
2159 
2160     if (vm->use_cpu_for_update) {
2161         /* Sync with last SDMA update/clear before switching to CPU */
2162         r = amdgpu_bo_sync_wait(vm->root.bo,
2163                     AMDGPU_FENCE_OWNER_UNDEFINED, true);
2164         if (r)
2165             goto unreserve_bo;
2166 
2167         vm->update_funcs = &amdgpu_vm_cpu_funcs;
2168     } else {
2169         vm->update_funcs = &amdgpu_vm_sdma_funcs;
2170     }
2171     /*
2172      * Make sure root PD gets mapped. As vm_update_mode could be changed
2173      * when turning a GFX VM into a compute VM.
2174      */
2175     r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo));
2176     if (r)
2177         goto unreserve_bo;
2178 
2179     dma_fence_put(vm->last_update);
2180     vm->last_update = NULL;
2181     vm->is_compute_context = true;
2182 
2183     /* Free the shadow bo for compute VM */
2184     amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2185 
2186     goto unreserve_bo;
2187 
2188 unreserve_bo:
2189     amdgpu_bo_unreserve(vm->root.bo);
2190     return r;
2191 }
2192 
2193 /**
2194  * amdgpu_vm_release_compute - release a compute vm
2195  * @adev: amdgpu_device pointer
2196  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2197  *
2198  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2199  * pasid from vm. Compute should stop use of vm after this call.
2200  */
2201 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2202 {
2203     amdgpu_vm_set_pasid(adev, vm, 0);
2204     vm->is_compute_context = false;
2205 }
2206 
2207 /**
2208  * amdgpu_vm_fini - tear down a vm instance
2209  *
2210  * @adev: amdgpu_device pointer
2211  * @vm: requested vm
2212  *
2213  * Tear down @vm.
2214  * Unbind the VM and remove all bos from the vm bo list
2215  */
2216 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2217 {
2218     struct amdgpu_bo_va_mapping *mapping, *tmp;
2219     bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2220     struct amdgpu_bo *root;
2221     unsigned long flags;
2222     int i;
2223 
2224     amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2225 
2226     root = amdgpu_bo_ref(vm->root.bo);
2227     amdgpu_bo_reserve(root, true);
2228     amdgpu_vm_set_pasid(adev, vm, 0);
2229     dma_fence_wait(vm->last_unlocked, false);
2230     dma_fence_put(vm->last_unlocked);
2231     dma_fence_wait(vm->last_tlb_flush, false);
2232     /* Make sure that all fence callbacks have completed */
2233     spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2234     spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2235     dma_fence_put(vm->last_tlb_flush);
2236 
2237     list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2238         if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2239             amdgpu_vm_prt_fini(adev, vm);
2240             prt_fini_needed = false;
2241         }
2242 
2243         list_del(&mapping->list);
2244         amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2245     }
2246 
2247     amdgpu_vm_pt_free_root(adev, vm);
2248     amdgpu_bo_unreserve(root);
2249     amdgpu_bo_unref(&root);
2250     WARN_ON(vm->root.bo);
2251 
2252     drm_sched_entity_destroy(&vm->immediate);
2253     drm_sched_entity_destroy(&vm->delayed);
2254 
2255     if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2256         dev_err(adev->dev, "still active bo inside vm\n");
2257     }
2258     rbtree_postorder_for_each_entry_safe(mapping, tmp,
2259                          &vm->va.rb_root, rb) {
2260         /* Don't remove the mapping here, we don't want to trigger a
2261          * rebalance and the tree is about to be destroyed anyway.
2262          */
2263         list_del(&mapping->list);
2264         kfree(mapping);
2265     }
2266 
2267     dma_fence_put(vm->last_update);
2268     for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2269         amdgpu_vmid_free_reserved(adev, vm, i);
2270 }
2271 
2272 /**
2273  * amdgpu_vm_manager_init - init the VM manager
2274  *
2275  * @adev: amdgpu_device pointer
2276  *
2277  * Initialize the VM manager structures
2278  */
2279 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2280 {
2281     unsigned i;
2282 
2283     /* Concurrent flushes are only possible starting with Vega10 and
2284      * are broken on Navi10 and Navi14.
2285      */
2286     adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2287                           adev->asic_type == CHIP_NAVI10 ||
2288                           adev->asic_type == CHIP_NAVI14);
2289     amdgpu_vmid_mgr_init(adev);
2290 
2291     adev->vm_manager.fence_context =
2292         dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2293     for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2294         adev->vm_manager.seqno[i] = 0;
2295 
2296     spin_lock_init(&adev->vm_manager.prt_lock);
2297     atomic_set(&adev->vm_manager.num_prt_users, 0);
2298 
2299     /* If not overridden by the user, by default, only in large BAR systems
2300      * Compute VM tables will be updated by CPU
2301      */
2302 #ifdef CONFIG_X86_64
2303     if (amdgpu_vm_update_mode == -1) {
2304         if (amdgpu_gmc_vram_full_visible(&adev->gmc))
2305             adev->vm_manager.vm_update_mode =
2306                 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2307         else
2308             adev->vm_manager.vm_update_mode = 0;
2309     } else
2310         adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2311 #else
2312     adev->vm_manager.vm_update_mode = 0;
2313 #endif
2314 
2315     xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2316 }
2317 
2318 /**
2319  * amdgpu_vm_manager_fini - cleanup VM manager
2320  *
2321  * @adev: amdgpu_device pointer
2322  *
2323  * Cleanup the VM manager and free resources.
2324  */
2325 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2326 {
2327     WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2328     xa_destroy(&adev->vm_manager.pasids);
2329 
2330     amdgpu_vmid_mgr_fini(adev);
2331 }
2332 
2333 /**
2334  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2335  *
2336  * @dev: drm device pointer
2337  * @data: drm_amdgpu_vm
2338  * @filp: drm file pointer
2339  *
2340  * Returns:
2341  * 0 for success, -errno for errors.
2342  */
2343 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2344 {
2345     union drm_amdgpu_vm *args = data;
2346     struct amdgpu_device *adev = drm_to_adev(dev);
2347     struct amdgpu_fpriv *fpriv = filp->driver_priv;
2348     long timeout = msecs_to_jiffies(2000);
2349     int r;
2350 
2351     switch (args->in.op) {
2352     case AMDGPU_VM_OP_RESERVE_VMID:
2353         /* We only have requirement to reserve vmid from gfxhub */
2354         r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
2355                            AMDGPU_GFXHUB_0);
2356         if (r)
2357             return r;
2358         break;
2359     case AMDGPU_VM_OP_UNRESERVE_VMID:
2360         if (amdgpu_sriov_runtime(adev))
2361             timeout = 8 * timeout;
2362 
2363         /* Wait vm idle to make sure the vmid set in SPM_VMID is
2364          * not referenced anymore.
2365          */
2366         r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
2367         if (r)
2368             return r;
2369 
2370         r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
2371         if (r < 0)
2372             return r;
2373 
2374         amdgpu_bo_unreserve(fpriv->vm.root.bo);
2375         amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
2376         break;
2377     default:
2378         return -EINVAL;
2379     }
2380 
2381     return 0;
2382 }
2383 
2384 /**
2385  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2386  *
2387  * @adev: drm device pointer
2388  * @pasid: PASID identifier for VM
2389  * @task_info: task_info to fill.
2390  */
2391 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
2392              struct amdgpu_task_info *task_info)
2393 {
2394     struct amdgpu_vm *vm;
2395     unsigned long flags;
2396 
2397     xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2398 
2399     vm = xa_load(&adev->vm_manager.pasids, pasid);
2400     if (vm)
2401         *task_info = vm->task_info;
2402 
2403     xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2404 }
2405 
2406 /**
2407  * amdgpu_vm_set_task_info - Sets VMs task info.
2408  *
2409  * @vm: vm for which to set the info
2410  */
2411 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2412 {
2413     if (vm->task_info.pid)
2414         return;
2415 
2416     vm->task_info.pid = current->pid;
2417     get_task_comm(vm->task_info.task_name, current);
2418 
2419     if (current->group_leader->mm != current->mm)
2420         return;
2421 
2422     vm->task_info.tgid = current->group_leader->pid;
2423     get_task_comm(vm->task_info.process_name, current->group_leader);
2424 }
2425 
2426 /**
2427  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2428  * @adev: amdgpu device pointer
2429  * @pasid: PASID of the VM
2430  * @addr: Address of the fault
2431  * @write_fault: true is write fault, false is read fault
2432  *
2433  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2434  * shouldn't be reported any more.
2435  */
2436 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2437                 uint64_t addr, bool write_fault)
2438 {
2439     bool is_compute_context = false;
2440     struct amdgpu_bo *root;
2441     unsigned long irqflags;
2442     uint64_t value, flags;
2443     struct amdgpu_vm *vm;
2444     int r;
2445 
2446     xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2447     vm = xa_load(&adev->vm_manager.pasids, pasid);
2448     if (vm) {
2449         root = amdgpu_bo_ref(vm->root.bo);
2450         is_compute_context = vm->is_compute_context;
2451     } else {
2452         root = NULL;
2453     }
2454     xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2455 
2456     if (!root)
2457         return false;
2458 
2459     addr /= AMDGPU_GPU_PAGE_SIZE;
2460 
2461     if (is_compute_context &&
2462         !svm_range_restore_pages(adev, pasid, addr, write_fault)) {
2463         amdgpu_bo_unref(&root);
2464         return true;
2465     }
2466 
2467     r = amdgpu_bo_reserve(root, true);
2468     if (r)
2469         goto error_unref;
2470 
2471     /* Double check that the VM still exists */
2472     xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2473     vm = xa_load(&adev->vm_manager.pasids, pasid);
2474     if (vm && vm->root.bo != root)
2475         vm = NULL;
2476     xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2477     if (!vm)
2478         goto error_unlock;
2479 
2480     flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2481         AMDGPU_PTE_SYSTEM;
2482 
2483     if (is_compute_context) {
2484         /* Intentionally setting invalid PTE flag
2485          * combination to force a no-retry-fault
2486          */
2487         flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
2488         value = 0;
2489     } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2490         /* Redirect the access to the dummy page */
2491         value = adev->dummy_page_addr;
2492         flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2493             AMDGPU_PTE_WRITEABLE;
2494 
2495     } else {
2496         /* Let the hw retry silently on the PTE */
2497         value = 0;
2498     }
2499 
2500     r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2501     if (r) {
2502         pr_debug("failed %d to reserve fence slot\n", r);
2503         goto error_unlock;
2504     }
2505 
2506     r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr,
2507                    addr, flags, value, 0, NULL, NULL, NULL);
2508     if (r)
2509         goto error_unlock;
2510 
2511     r = amdgpu_vm_update_pdes(adev, vm, true);
2512 
2513 error_unlock:
2514     amdgpu_bo_unreserve(root);
2515     if (r < 0)
2516         DRM_ERROR("Can't handle page fault (%d)\n", r);
2517 
2518 error_unref:
2519     amdgpu_bo_unref(&root);
2520 
2521     return false;
2522 }
2523 
2524 #if defined(CONFIG_DEBUG_FS)
2525 /**
2526  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2527  *
2528  * @vm: Requested VM for printing BO info
2529  * @m: debugfs file
2530  *
2531  * Print BO information in debugfs file for the VM
2532  */
2533 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2534 {
2535     struct amdgpu_bo_va *bo_va, *tmp;
2536     u64 total_idle = 0;
2537     u64 total_evicted = 0;
2538     u64 total_relocated = 0;
2539     u64 total_moved = 0;
2540     u64 total_invalidated = 0;
2541     u64 total_done = 0;
2542     unsigned int total_idle_objs = 0;
2543     unsigned int total_evicted_objs = 0;
2544     unsigned int total_relocated_objs = 0;
2545     unsigned int total_moved_objs = 0;
2546     unsigned int total_invalidated_objs = 0;
2547     unsigned int total_done_objs = 0;
2548     unsigned int id = 0;
2549 
2550     seq_puts(m, "\tIdle BOs:\n");
2551     list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2552         if (!bo_va->base.bo)
2553             continue;
2554         total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2555     }
2556     total_idle_objs = id;
2557     id = 0;
2558 
2559     seq_puts(m, "\tEvicted BOs:\n");
2560     list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2561         if (!bo_va->base.bo)
2562             continue;
2563         total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2564     }
2565     total_evicted_objs = id;
2566     id = 0;
2567 
2568     seq_puts(m, "\tRelocated BOs:\n");
2569     list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2570         if (!bo_va->base.bo)
2571             continue;
2572         total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2573     }
2574     total_relocated_objs = id;
2575     id = 0;
2576 
2577     seq_puts(m, "\tMoved BOs:\n");
2578     list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2579         if (!bo_va->base.bo)
2580             continue;
2581         total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2582     }
2583     total_moved_objs = id;
2584     id = 0;
2585 
2586     seq_puts(m, "\tInvalidated BOs:\n");
2587     spin_lock(&vm->invalidated_lock);
2588     list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2589         if (!bo_va->base.bo)
2590             continue;
2591         total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2592     }
2593     total_invalidated_objs = id;
2594     id = 0;
2595 
2596     seq_puts(m, "\tDone BOs:\n");
2597     list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2598         if (!bo_va->base.bo)
2599             continue;
2600         total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2601     }
2602     spin_unlock(&vm->invalidated_lock);
2603     total_done_objs = id;
2604 
2605     seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2606            total_idle_objs);
2607     seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2608            total_evicted_objs);
2609     seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2610            total_relocated_objs);
2611     seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2612            total_moved_objs);
2613     seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2614            total_invalidated_objs);
2615     seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2616            total_done_objs);
2617 }
2618 #endif