Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2014-2018 Intel Corporation
0004  */
0005 
0006 #include "gem/i915_gem_internal.h"
0007 #include "gem/i915_gem_object.h"
0008 
0009 #include "i915_drv.h"
0010 #include "intel_engine_pm.h"
0011 #include "intel_gt_buffer_pool.h"
0012 
0013 static struct list_head *
0014 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
0015 {
0016     int n;
0017 
0018     /*
0019      * Compute a power-of-two bucket, but throw everything greater than
0020      * 16KiB into the same bucket: i.e. the buckets hold objects of
0021      * (1 page, 2 pages, 4 pages, 8+ pages).
0022      */
0023     n = fls(sz >> PAGE_SHIFT) - 1;
0024     if (n >= ARRAY_SIZE(pool->cache_list))
0025         n = ARRAY_SIZE(pool->cache_list) - 1;
0026 
0027     return &pool->cache_list[n];
0028 }
0029 
0030 static void node_free(struct intel_gt_buffer_pool_node *node)
0031 {
0032     i915_gem_object_put(node->obj);
0033     i915_active_fini(&node->active);
0034     kfree_rcu(node, rcu);
0035 }
0036 
0037 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
0038 {
0039     struct intel_gt_buffer_pool_node *node, *stale = NULL;
0040     bool active = false;
0041     int n;
0042 
0043     /* Free buffers that have not been used in the past second */
0044     for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
0045         struct list_head *list = &pool->cache_list[n];
0046 
0047         if (list_empty(list))
0048             continue;
0049 
0050         if (spin_trylock_irq(&pool->lock)) {
0051             struct list_head *pos;
0052 
0053             /* Most recent at head; oldest at tail */
0054             list_for_each_prev(pos, list) {
0055                 unsigned long age;
0056 
0057                 node = list_entry(pos, typeof(*node), link);
0058 
0059                 age = READ_ONCE(node->age);
0060                 if (!age || jiffies - age < keep)
0061                     break;
0062 
0063                 /* Check we are the first to claim this node */
0064                 if (!xchg(&node->age, 0))
0065                     break;
0066 
0067                 node->free = stale;
0068                 stale = node;
0069             }
0070             if (!list_is_last(pos, list))
0071                 __list_del_many(pos, list);
0072 
0073             spin_unlock_irq(&pool->lock);
0074         }
0075 
0076         active |= !list_empty(list);
0077     }
0078 
0079     while ((node = stale)) {
0080         stale = stale->free;
0081         node_free(node);
0082     }
0083 
0084     return active;
0085 }
0086 
0087 static void pool_free_work(struct work_struct *wrk)
0088 {
0089     struct intel_gt_buffer_pool *pool =
0090         container_of(wrk, typeof(*pool), work.work);
0091 
0092     if (pool_free_older_than(pool, HZ))
0093         schedule_delayed_work(&pool->work,
0094                       round_jiffies_up_relative(HZ));
0095 }
0096 
0097 static void pool_retire(struct i915_active *ref)
0098 {
0099     struct intel_gt_buffer_pool_node *node =
0100         container_of(ref, typeof(*node), active);
0101     struct intel_gt_buffer_pool *pool = node->pool;
0102     struct list_head *list = bucket_for_size(pool, node->obj->base.size);
0103     unsigned long flags;
0104 
0105     if (node->pinned) {
0106         i915_gem_object_unpin_pages(node->obj);
0107 
0108         /* Return this object to the shrinker pool */
0109         i915_gem_object_make_purgeable(node->obj);
0110         node->pinned = false;
0111     }
0112 
0113     GEM_BUG_ON(node->age);
0114     spin_lock_irqsave(&pool->lock, flags);
0115     list_add_rcu(&node->link, list);
0116     WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
0117     spin_unlock_irqrestore(&pool->lock, flags);
0118 
0119     schedule_delayed_work(&pool->work,
0120                   round_jiffies_up_relative(HZ));
0121 }
0122 
0123 void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
0124 {
0125     assert_object_held(node->obj);
0126 
0127     if (node->pinned)
0128         return;
0129 
0130     __i915_gem_object_pin_pages(node->obj);
0131     /* Hide this pinned object from the shrinker until retired */
0132     i915_gem_object_make_unshrinkable(node->obj);
0133     node->pinned = true;
0134 }
0135 
0136 static struct intel_gt_buffer_pool_node *
0137 node_create(struct intel_gt_buffer_pool *pool, size_t sz,
0138         enum i915_map_type type)
0139 {
0140     struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
0141     struct intel_gt_buffer_pool_node *node;
0142     struct drm_i915_gem_object *obj;
0143 
0144     node = kmalloc(sizeof(*node),
0145                GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
0146     if (!node)
0147         return ERR_PTR(-ENOMEM);
0148 
0149     node->age = 0;
0150     node->pool = pool;
0151     node->pinned = false;
0152     i915_active_init(&node->active, NULL, pool_retire, 0);
0153 
0154     obj = i915_gem_object_create_internal(gt->i915, sz);
0155     if (IS_ERR(obj)) {
0156         i915_active_fini(&node->active);
0157         kfree(node);
0158         return ERR_CAST(obj);
0159     }
0160 
0161     i915_gem_object_set_readonly(obj);
0162 
0163     node->type = type;
0164     node->obj = obj;
0165     return node;
0166 }
0167 
0168 struct intel_gt_buffer_pool_node *
0169 intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
0170              enum i915_map_type type)
0171 {
0172     struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
0173     struct intel_gt_buffer_pool_node *node;
0174     struct list_head *list;
0175     int ret;
0176 
0177     size = PAGE_ALIGN(size);
0178     list = bucket_for_size(pool, size);
0179 
0180     rcu_read_lock();
0181     list_for_each_entry_rcu(node, list, link) {
0182         unsigned long age;
0183 
0184         if (node->obj->base.size < size)
0185             continue;
0186 
0187         if (node->type != type)
0188             continue;
0189 
0190         age = READ_ONCE(node->age);
0191         if (!age)
0192             continue;
0193 
0194         if (cmpxchg(&node->age, age, 0) == age) {
0195             spin_lock_irq(&pool->lock);
0196             list_del_rcu(&node->link);
0197             spin_unlock_irq(&pool->lock);
0198             break;
0199         }
0200     }
0201     rcu_read_unlock();
0202 
0203     if (&node->link == list) {
0204         node = node_create(pool, size, type);
0205         if (IS_ERR(node))
0206             return node;
0207     }
0208 
0209     ret = i915_active_acquire(&node->active);
0210     if (ret) {
0211         node_free(node);
0212         return ERR_PTR(ret);
0213     }
0214 
0215     return node;
0216 }
0217 
0218 void intel_gt_init_buffer_pool(struct intel_gt *gt)
0219 {
0220     struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
0221     int n;
0222 
0223     spin_lock_init(&pool->lock);
0224     for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
0225         INIT_LIST_HEAD(&pool->cache_list[n]);
0226     INIT_DELAYED_WORK(&pool->work, pool_free_work);
0227 }
0228 
0229 void intel_gt_flush_buffer_pool(struct intel_gt *gt)
0230 {
0231     struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
0232 
0233     do {
0234         while (pool_free_older_than(pool, 0))
0235             ;
0236     } while (cancel_delayed_work_sync(&pool->work));
0237 }
0238 
0239 void intel_gt_fini_buffer_pool(struct intel_gt *gt)
0240 {
0241     struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
0242     int n;
0243 
0244     for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
0245         GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
0246 }