Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2013 Red Hat Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  * Authors: Dave Airlie
0023  *          Alon Levy
0024  */
0025 
0026 #include <linux/iosys-map.h>
0027 #include <linux/io-mapping.h>
0028 
0029 #include "qxl_drv.h"
0030 #include "qxl_object.h"
0031 
0032 static int __qxl_bo_pin(struct qxl_bo *bo);
0033 static void __qxl_bo_unpin(struct qxl_bo *bo);
0034 
0035 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
0036 {
0037     struct qxl_bo *bo;
0038     struct qxl_device *qdev;
0039 
0040     bo = to_qxl_bo(tbo);
0041     qdev = to_qxl(bo->tbo.base.dev);
0042 
0043     qxl_surface_evict(qdev, bo, false);
0044     WARN_ON_ONCE(bo->map_count > 0);
0045     mutex_lock(&qdev->gem.mutex);
0046     list_del_init(&bo->list);
0047     mutex_unlock(&qdev->gem.mutex);
0048     drm_gem_object_release(&bo->tbo.base);
0049     kfree(bo);
0050 }
0051 
0052 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
0053 {
0054     if (bo->destroy == &qxl_ttm_bo_destroy)
0055         return true;
0056     return false;
0057 }
0058 
0059 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
0060 {
0061     u32 c = 0;
0062     u32 pflag = 0;
0063     unsigned int i;
0064 
0065     if (qbo->tbo.base.size <= PAGE_SIZE)
0066         pflag |= TTM_PL_FLAG_TOPDOWN;
0067 
0068     qbo->placement.placement = qbo->placements;
0069     qbo->placement.busy_placement = qbo->placements;
0070     if (domain == QXL_GEM_DOMAIN_VRAM) {
0071         qbo->placements[c].mem_type = TTM_PL_VRAM;
0072         qbo->placements[c++].flags = pflag;
0073     }
0074     if (domain == QXL_GEM_DOMAIN_SURFACE) {
0075         qbo->placements[c].mem_type = TTM_PL_PRIV;
0076         qbo->placements[c++].flags = pflag;
0077         qbo->placements[c].mem_type = TTM_PL_VRAM;
0078         qbo->placements[c++].flags = pflag;
0079     }
0080     if (domain == QXL_GEM_DOMAIN_CPU) {
0081         qbo->placements[c].mem_type = TTM_PL_SYSTEM;
0082         qbo->placements[c++].flags = pflag;
0083     }
0084     if (!c) {
0085         qbo->placements[c].mem_type = TTM_PL_SYSTEM;
0086         qbo->placements[c++].flags = 0;
0087     }
0088     qbo->placement.num_placement = c;
0089     qbo->placement.num_busy_placement = c;
0090     for (i = 0; i < c; ++i) {
0091         qbo->placements[i].fpfn = 0;
0092         qbo->placements[i].lpfn = 0;
0093     }
0094 }
0095 
0096 static const struct drm_gem_object_funcs qxl_object_funcs = {
0097     .free = qxl_gem_object_free,
0098     .open = qxl_gem_object_open,
0099     .close = qxl_gem_object_close,
0100     .pin = qxl_gem_prime_pin,
0101     .unpin = qxl_gem_prime_unpin,
0102     .get_sg_table = qxl_gem_prime_get_sg_table,
0103     .vmap = qxl_gem_prime_vmap,
0104     .vunmap = qxl_gem_prime_vunmap,
0105     .mmap = drm_gem_ttm_mmap,
0106     .print_info = drm_gem_ttm_print_info,
0107 };
0108 
0109 int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
0110           bool kernel, bool pinned, u32 domain, u32 priority,
0111           struct qxl_surface *surf,
0112           struct qxl_bo **bo_ptr)
0113 {
0114     struct ttm_operation_ctx ctx = { !kernel, false };
0115     struct qxl_bo *bo;
0116     enum ttm_bo_type type;
0117     int r;
0118 
0119     if (kernel)
0120         type = ttm_bo_type_kernel;
0121     else
0122         type = ttm_bo_type_device;
0123     *bo_ptr = NULL;
0124     bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
0125     if (bo == NULL)
0126         return -ENOMEM;
0127     size = roundup(size, PAGE_SIZE);
0128     r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
0129     if (unlikely(r)) {
0130         kfree(bo);
0131         return r;
0132     }
0133     bo->tbo.base.funcs = &qxl_object_funcs;
0134     bo->type = domain;
0135     bo->surface_id = 0;
0136     INIT_LIST_HEAD(&bo->list);
0137 
0138     if (surf)
0139         bo->surf = *surf;
0140 
0141     qxl_ttm_placement_from_domain(bo, domain);
0142 
0143     bo->tbo.priority = priority;
0144     r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
0145                  &bo->placement, 0, &ctx, NULL, NULL,
0146                  &qxl_ttm_bo_destroy);
0147     if (unlikely(r != 0)) {
0148         if (r != -ERESTARTSYS)
0149             dev_err(qdev->ddev.dev,
0150                 "object_init failed for (%lu, 0x%08X)\n",
0151                 size, domain);
0152         return r;
0153     }
0154     if (pinned)
0155         ttm_bo_pin(&bo->tbo);
0156     ttm_bo_unreserve(&bo->tbo);
0157     *bo_ptr = bo;
0158     return 0;
0159 }
0160 
0161 int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
0162 {
0163     int r;
0164 
0165     dma_resv_assert_held(bo->tbo.base.resv);
0166 
0167     if (bo->kptr) {
0168         bo->map_count++;
0169         goto out;
0170     }
0171     r = ttm_bo_vmap(&bo->tbo, &bo->map);
0172     if (r)
0173         return r;
0174     bo->map_count = 1;
0175 
0176     /* TODO: Remove kptr in favor of map everywhere. */
0177     if (bo->map.is_iomem)
0178         bo->kptr = (void *)bo->map.vaddr_iomem;
0179     else
0180         bo->kptr = bo->map.vaddr;
0181 
0182 out:
0183     *map = bo->map;
0184     return 0;
0185 }
0186 
0187 int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
0188 {
0189     int r;
0190 
0191     r = qxl_bo_reserve(bo);
0192     if (r)
0193         return r;
0194 
0195     r = __qxl_bo_pin(bo);
0196     if (r) {
0197         qxl_bo_unreserve(bo);
0198         return r;
0199     }
0200 
0201     r = qxl_bo_vmap_locked(bo, map);
0202     qxl_bo_unreserve(bo);
0203     return r;
0204 }
0205 
0206 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
0207                   struct qxl_bo *bo, int page_offset)
0208 {
0209     unsigned long offset;
0210     void *rptr;
0211     int ret;
0212     struct io_mapping *map;
0213     struct iosys_map bo_map;
0214 
0215     if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
0216         map = qdev->vram_mapping;
0217     else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
0218         map = qdev->surface_mapping;
0219     else
0220         goto fallback;
0221 
0222     offset = bo->tbo.resource->start << PAGE_SHIFT;
0223     return io_mapping_map_atomic_wc(map, offset + page_offset);
0224 fallback:
0225     if (bo->kptr) {
0226         rptr = bo->kptr + (page_offset * PAGE_SIZE);
0227         return rptr;
0228     }
0229 
0230     ret = qxl_bo_vmap_locked(bo, &bo_map);
0231     if (ret)
0232         return NULL;
0233     rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
0234 
0235     rptr += page_offset * PAGE_SIZE;
0236     return rptr;
0237 }
0238 
0239 void qxl_bo_vunmap_locked(struct qxl_bo *bo)
0240 {
0241     dma_resv_assert_held(bo->tbo.base.resv);
0242 
0243     if (bo->kptr == NULL)
0244         return;
0245     bo->map_count--;
0246     if (bo->map_count > 0)
0247         return;
0248     bo->kptr = NULL;
0249     ttm_bo_vunmap(&bo->tbo, &bo->map);
0250 }
0251 
0252 int qxl_bo_vunmap(struct qxl_bo *bo)
0253 {
0254     int r;
0255 
0256     r = qxl_bo_reserve(bo);
0257     if (r)
0258         return r;
0259 
0260     qxl_bo_vunmap_locked(bo);
0261     __qxl_bo_unpin(bo);
0262     qxl_bo_unreserve(bo);
0263     return 0;
0264 }
0265 
0266 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
0267                    struct qxl_bo *bo, void *pmap)
0268 {
0269     if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
0270         (bo->tbo.resource->mem_type != TTM_PL_PRIV))
0271         goto fallback;
0272 
0273     io_mapping_unmap_atomic(pmap);
0274     return;
0275  fallback:
0276     qxl_bo_vunmap_locked(bo);
0277 }
0278 
0279 void qxl_bo_unref(struct qxl_bo **bo)
0280 {
0281     if ((*bo) == NULL)
0282         return;
0283 
0284     drm_gem_object_put(&(*bo)->tbo.base);
0285     *bo = NULL;
0286 }
0287 
0288 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
0289 {
0290     drm_gem_object_get(&bo->tbo.base);
0291     return bo;
0292 }
0293 
0294 static int __qxl_bo_pin(struct qxl_bo *bo)
0295 {
0296     struct ttm_operation_ctx ctx = { false, false };
0297     struct drm_device *ddev = bo->tbo.base.dev;
0298     int r;
0299 
0300     if (bo->tbo.pin_count) {
0301         ttm_bo_pin(&bo->tbo);
0302         return 0;
0303     }
0304     qxl_ttm_placement_from_domain(bo, bo->type);
0305     r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
0306     if (likely(r == 0))
0307         ttm_bo_pin(&bo->tbo);
0308     if (unlikely(r != 0))
0309         dev_err(ddev->dev, "%p pin failed\n", bo);
0310     return r;
0311 }
0312 
0313 static void __qxl_bo_unpin(struct qxl_bo *bo)
0314 {
0315     ttm_bo_unpin(&bo->tbo);
0316 }
0317 
0318 /*
0319  * Reserve the BO before pinning the object.  If the BO was reserved
0320  * beforehand, use the internal version directly __qxl_bo_pin.
0321  *
0322  */
0323 int qxl_bo_pin(struct qxl_bo *bo)
0324 {
0325     int r;
0326 
0327     r = qxl_bo_reserve(bo);
0328     if (r)
0329         return r;
0330 
0331     r = __qxl_bo_pin(bo);
0332     qxl_bo_unreserve(bo);
0333     return r;
0334 }
0335 
0336 /*
0337  * Reserve the BO before pinning the object.  If the BO was reserved
0338  * beforehand, use the internal version directly __qxl_bo_unpin.
0339  *
0340  */
0341 int qxl_bo_unpin(struct qxl_bo *bo)
0342 {
0343     int r;
0344 
0345     r = qxl_bo_reserve(bo);
0346     if (r)
0347         return r;
0348 
0349     __qxl_bo_unpin(bo);
0350     qxl_bo_unreserve(bo);
0351     return 0;
0352 }
0353 
0354 void qxl_bo_force_delete(struct qxl_device *qdev)
0355 {
0356     struct qxl_bo *bo, *n;
0357 
0358     if (list_empty(&qdev->gem.objects))
0359         return;
0360     dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
0361     list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
0362         dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
0363             &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
0364             *((unsigned long *)&bo->tbo.base.refcount));
0365         mutex_lock(&qdev->gem.mutex);
0366         list_del_init(&bo->list);
0367         mutex_unlock(&qdev->gem.mutex);
0368         /* this should unref the ttm bo */
0369         drm_gem_object_put(&bo->tbo.base);
0370     }
0371 }
0372 
0373 int qxl_bo_init(struct qxl_device *qdev)
0374 {
0375     return qxl_ttm_init(qdev);
0376 }
0377 
0378 void qxl_bo_fini(struct qxl_device *qdev)
0379 {
0380     qxl_ttm_fini(qdev);
0381 }
0382 
0383 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
0384 {
0385     int ret;
0386 
0387     if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
0388         /* allocate a surface id for this surface now */
0389         ret = qxl_surface_id_alloc(qdev, bo);
0390         if (ret)
0391             return ret;
0392 
0393         ret = qxl_hw_surface_alloc(qdev, bo);
0394         if (ret)
0395             return ret;
0396     }
0397     return 0;
0398 }
0399 
0400 int qxl_surf_evict(struct qxl_device *qdev)
0401 {
0402     struct ttm_resource_manager *man;
0403 
0404     man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
0405     return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
0406 }
0407 
0408 int qxl_vram_evict(struct qxl_device *qdev)
0409 {
0410     struct ttm_resource_manager *man;
0411 
0412     man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
0413     return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
0414 }