0001
0002
0003
0004
0005
0006
0007 #include <drm/drm_cache.h>
0008
0009 #include "display/intel_frontbuffer.h"
0010
0011 #include "i915_drv.h"
0012 #include "i915_gem_clflush.h"
0013 #include "i915_sw_fence_work.h"
0014 #include "i915_trace.h"
0015
0016 struct clflush {
0017 struct dma_fence_work base;
0018 struct drm_i915_gem_object *obj;
0019 };
0020
0021 static void __do_clflush(struct drm_i915_gem_object *obj)
0022 {
0023 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
0024 drm_clflush_sg(obj->mm.pages);
0025
0026 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
0027 }
0028
0029 static void clflush_work(struct dma_fence_work *base)
0030 {
0031 struct clflush *clflush = container_of(base, typeof(*clflush), base);
0032
0033 __do_clflush(clflush->obj);
0034 }
0035
0036 static void clflush_release(struct dma_fence_work *base)
0037 {
0038 struct clflush *clflush = container_of(base, typeof(*clflush), base);
0039
0040 i915_gem_object_unpin_pages(clflush->obj);
0041 i915_gem_object_put(clflush->obj);
0042 }
0043
0044 static const struct dma_fence_work_ops clflush_ops = {
0045 .name = "clflush",
0046 .work = clflush_work,
0047 .release = clflush_release,
0048 };
0049
0050 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
0051 {
0052 struct clflush *clflush;
0053
0054 GEM_BUG_ON(!obj->cache_dirty);
0055
0056 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
0057 if (!clflush)
0058 return NULL;
0059
0060 if (__i915_gem_object_get_pages(obj) < 0) {
0061 kfree(clflush);
0062 return NULL;
0063 }
0064
0065 dma_fence_work_init(&clflush->base, &clflush_ops);
0066 clflush->obj = i915_gem_object_get(obj);
0067
0068 return clflush;
0069 }
0070
0071 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
0072 unsigned int flags)
0073 {
0074 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0075 struct clflush *clflush;
0076
0077 assert_object_held(obj);
0078
0079 if (IS_DGFX(i915)) {
0080 WARN_ON_ONCE(obj->cache_dirty);
0081 return false;
0082 }
0083
0084
0085
0086
0087
0088
0089
0090
0091 if (!i915_gem_object_has_struct_page(obj)) {
0092 obj->cache_dirty = false;
0093 return false;
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 if (!(flags & I915_CLFLUSH_FORCE) &&
0105 obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
0106 return false;
0107
0108 trace_i915_gem_object_clflush(obj);
0109
0110 clflush = NULL;
0111 if (!(flags & I915_CLFLUSH_SYNC) &&
0112 dma_resv_reserve_fences(obj->base.resv, 1) == 0)
0113 clflush = clflush_work_create(obj);
0114 if (clflush) {
0115 i915_sw_fence_await_reservation(&clflush->base.chain,
0116 obj->base.resv, NULL, true,
0117 i915_fence_timeout(i915),
0118 I915_FENCE_GFP);
0119 dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
0120 DMA_RESV_USAGE_KERNEL);
0121 dma_fence_work_commit(&clflush->base);
0122
0123
0124
0125
0126
0127
0128
0129 obj->cache_dirty = false;
0130 } else if (obj->mm.pages) {
0131 __do_clflush(obj);
0132 obj->cache_dirty = false;
0133 } else {
0134 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
0135 }
0136
0137 return true;
0138 }