Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /*
0003  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
0004  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the "Software"),
0008  * to deal in the Software without restriction, including without limitation
0009  * the rights to use, copy, modify, merge, publish, distribute, sub license,
0010  * and/or sell copies of the Software, and to permit persons to whom the
0011  * Software is furnished to do so, subject to the following conditions:
0012  *
0013  * The above copyright notice and this permission notice (including the
0014  * next paragraph) shall be included in all copies or substantial portions
0015  * of the Software.
0016  *
0017  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0018  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0019  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0020  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0021  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0022  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0023  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0024  */
0025 
0026 #include <linux/limits.h>
0027 #include <linux/swiotlb.h>
0028 
0029 #include <drm/ttm/ttm_range_manager.h>
0030 
0031 #include "nouveau_drv.h"
0032 #include "nouveau_gem.h"
0033 #include "nouveau_mem.h"
0034 #include "nouveau_ttm.h"
0035 
0036 #include <core/tegra.h>
0037 
0038 static void
0039 nouveau_manager_del(struct ttm_resource_manager *man,
0040             struct ttm_resource *reg)
0041 {
0042     nouveau_mem_del(man, reg);
0043 }
0044 
0045 static int
0046 nouveau_vram_manager_new(struct ttm_resource_manager *man,
0047              struct ttm_buffer_object *bo,
0048              const struct ttm_place *place,
0049              struct ttm_resource **res)
0050 {
0051     struct nouveau_bo *nvbo = nouveau_bo(bo);
0052     struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
0053     int ret;
0054 
0055     if (drm->client.device.info.ram_size == 0)
0056         return -ENOMEM;
0057 
0058     ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
0059     if (ret)
0060         return ret;
0061 
0062     ttm_resource_init(bo, place, *res);
0063 
0064     ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
0065     if (ret) {
0066         nouveau_mem_del(man, *res);
0067         return ret;
0068     }
0069 
0070     return 0;
0071 }
0072 
0073 const struct ttm_resource_manager_func nouveau_vram_manager = {
0074     .alloc = nouveau_vram_manager_new,
0075     .free = nouveau_manager_del,
0076 };
0077 
0078 static int
0079 nouveau_gart_manager_new(struct ttm_resource_manager *man,
0080              struct ttm_buffer_object *bo,
0081              const struct ttm_place *place,
0082              struct ttm_resource **res)
0083 {
0084     struct nouveau_bo *nvbo = nouveau_bo(bo);
0085     struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
0086     int ret;
0087 
0088     ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
0089     if (ret)
0090         return ret;
0091 
0092     ttm_resource_init(bo, place, *res);
0093     (*res)->start = 0;
0094     return 0;
0095 }
0096 
0097 const struct ttm_resource_manager_func nouveau_gart_manager = {
0098     .alloc = nouveau_gart_manager_new,
0099     .free = nouveau_manager_del,
0100 };
0101 
0102 static int
0103 nv04_gart_manager_new(struct ttm_resource_manager *man,
0104               struct ttm_buffer_object *bo,
0105               const struct ttm_place *place,
0106               struct ttm_resource **res)
0107 {
0108     struct nouveau_bo *nvbo = nouveau_bo(bo);
0109     struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
0110     struct nouveau_mem *mem;
0111     int ret;
0112 
0113     ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
0114     if (ret)
0115         return ret;
0116 
0117     mem = nouveau_mem(*res);
0118     ttm_resource_init(bo, place, *res);
0119     ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
0120                (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
0121     if (ret) {
0122         nouveau_mem_del(man, *res);
0123         return ret;
0124     }
0125 
0126     (*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
0127     return 0;
0128 }
0129 
0130 const struct ttm_resource_manager_func nv04_gart_manager = {
0131     .alloc = nv04_gart_manager_new,
0132     .free = nouveau_manager_del,
0133 };
0134 
0135 static int
0136 nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
0137 {
0138     struct nvif_mmu *mmu = &drm->client.mmu;
0139     int typei;
0140 
0141     typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
0142                         kind | NVIF_MEM_COHERENT);
0143     if (typei < 0)
0144         return -ENOSYS;
0145 
0146     drm->ttm.type_host[!!kind] = typei;
0147 
0148     typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
0149     if (typei < 0)
0150         return -ENOSYS;
0151 
0152     drm->ttm.type_ncoh[!!kind] = typei;
0153     return 0;
0154 }
0155 
0156 static int
0157 nouveau_ttm_init_vram(struct nouveau_drm *drm)
0158 {
0159     if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
0160         struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
0161 
0162         if (!man)
0163             return -ENOMEM;
0164 
0165         man->func = &nouveau_vram_manager;
0166 
0167         ttm_resource_manager_init(man, &drm->ttm.bdev,
0168                       drm->gem.vram_available >> PAGE_SHIFT);
0169         ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
0170         ttm_resource_manager_set_used(man, true);
0171         return 0;
0172     } else {
0173         return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false,
0174                       drm->gem.vram_available >> PAGE_SHIFT);
0175     }
0176 }
0177 
0178 static void
0179 nouveau_ttm_fini_vram(struct nouveau_drm *drm)
0180 {
0181     struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
0182 
0183     if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
0184         ttm_resource_manager_set_used(man, false);
0185         ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
0186         ttm_resource_manager_cleanup(man);
0187         ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
0188         kfree(man);
0189     } else
0190         ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
0191 }
0192 
0193 static int
0194 nouveau_ttm_init_gtt(struct nouveau_drm *drm)
0195 {
0196     struct ttm_resource_manager *man;
0197     unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
0198     const struct ttm_resource_manager_func *func = NULL;
0199 
0200     if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
0201         func = &nouveau_gart_manager;
0202     else if (!drm->agp.bridge)
0203         func = &nv04_gart_manager;
0204     else
0205         return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true,
0206                       size_pages);
0207 
0208     man = kzalloc(sizeof(*man), GFP_KERNEL);
0209     if (!man)
0210         return -ENOMEM;
0211 
0212     man->func = func;
0213     man->use_tt = true;
0214     ttm_resource_manager_init(man, &drm->ttm.bdev, size_pages);
0215     ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
0216     ttm_resource_manager_set_used(man, true);
0217     return 0;
0218 }
0219 
0220 static void
0221 nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
0222 {
0223     struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT);
0224 
0225     if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
0226         drm->agp.bridge)
0227         ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
0228     else {
0229         ttm_resource_manager_set_used(man, false);
0230         ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
0231         ttm_resource_manager_cleanup(man);
0232         ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
0233         kfree(man);
0234     }
0235 }
0236 
0237 int
0238 nouveau_ttm_init(struct nouveau_drm *drm)
0239 {
0240     struct nvkm_device *device = nvxx_device(&drm->client.device);
0241     struct nvkm_pci *pci = device->pci;
0242     struct nvif_mmu *mmu = &drm->client.mmu;
0243     struct drm_device *dev = drm->dev;
0244     bool need_swiotlb = false;
0245     int typei, ret;
0246 
0247     ret = nouveau_ttm_init_host(drm, 0);
0248     if (ret)
0249         return ret;
0250 
0251     if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
0252         drm->client.device.info.chipset != 0x50) {
0253         ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
0254         if (ret)
0255             return ret;
0256     }
0257 
0258     if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
0259         drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
0260         typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
0261                        NVIF_MEM_KIND |
0262                        NVIF_MEM_COMP |
0263                        NVIF_MEM_DISP);
0264         if (typei < 0)
0265             return -ENOSYS;
0266 
0267         drm->ttm.type_vram = typei;
0268     } else {
0269         drm->ttm.type_vram = -1;
0270     }
0271 
0272     if (pci && pci->agp.bridge) {
0273         drm->agp.bridge = pci->agp.bridge;
0274         drm->agp.base = pci->agp.base;
0275         drm->agp.size = pci->agp.size;
0276         drm->agp.cma = pci->agp.cma;
0277     }
0278 
0279 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
0280     need_swiotlb = is_swiotlb_active(dev->dev);
0281 #endif
0282 
0283     ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
0284                   dev->anon_inode->i_mapping,
0285                   dev->vma_offset_manager, need_swiotlb,
0286                   drm->client.mmu.dmabits <= 32);
0287     if (ret) {
0288         NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
0289         return ret;
0290     }
0291 
0292     /* VRAM init */
0293     drm->gem.vram_available = drm->client.device.info.ram_user;
0294 
0295     arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
0296                    device->func->resource_size(device, 1));
0297 
0298     ret = nouveau_ttm_init_vram(drm);
0299     if (ret) {
0300         NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
0301         return ret;
0302     }
0303 
0304     drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
0305                      device->func->resource_size(device, 1));
0306 
0307     /* GART init */
0308     if (!drm->agp.bridge) {
0309         drm->gem.gart_available = drm->client.vmm.vmm.limit;
0310     } else {
0311         drm->gem.gart_available = drm->agp.size;
0312     }
0313 
0314     ret = nouveau_ttm_init_gtt(drm);
0315     if (ret) {
0316         NV_ERROR(drm, "GART mm init failed, %d\n", ret);
0317         return ret;
0318     }
0319 
0320     mutex_init(&drm->ttm.io_reserve_mutex);
0321     INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
0322 
0323     NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
0324     NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
0325     return 0;
0326 }
0327 
0328 void
0329 nouveau_ttm_fini(struct nouveau_drm *drm)
0330 {
0331     struct nvkm_device *device = nvxx_device(&drm->client.device);
0332 
0333     nouveau_ttm_fini_vram(drm);
0334     nouveau_ttm_fini_gtt(drm);
0335 
0336     ttm_device_fini(&drm->ttm.bdev);
0337 
0338     arch_phys_wc_del(drm->ttm.mtrr);
0339     drm->ttm.mtrr = 0;
0340     arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
0341                 device->func->resource_size(device, 1));
0342 
0343 }