treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gem / i915_gem_clflush.c
blob34be4c0ee7c59a3d9b64a8219d224f28ed6c52ca
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
5 */
7 #include "display/intel_frontbuffer.h"
9 #include "i915_drv.h"
10 #include "i915_gem_clflush.h"
11 #include "i915_sw_fence_work.h"
12 #include "i915_trace.h"
14 struct clflush {
15 struct dma_fence_work base;
16 struct drm_i915_gem_object *obj;
19 static void __do_clflush(struct drm_i915_gem_object *obj)
21 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
22 drm_clflush_sg(obj->mm.pages);
24 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
27 static int clflush_work(struct dma_fence_work *base)
29 struct clflush *clflush = container_of(base, typeof(*clflush), base);
30 struct drm_i915_gem_object *obj = clflush->obj;
31 int err;
33 err = i915_gem_object_pin_pages(obj);
34 if (err)
35 return err;
37 __do_clflush(obj);
38 i915_gem_object_unpin_pages(obj);
40 return 0;
43 static void clflush_release(struct dma_fence_work *base)
45 struct clflush *clflush = container_of(base, typeof(*clflush), base);
47 i915_gem_object_put(clflush->obj);
50 static const struct dma_fence_work_ops clflush_ops = {
51 .name = "clflush",
52 .work = clflush_work,
53 .release = clflush_release,
56 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
58 struct clflush *clflush;
60 GEM_BUG_ON(!obj->cache_dirty);
62 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
63 if (!clflush)
64 return NULL;
66 dma_fence_work_init(&clflush->base, &clflush_ops);
67 clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
69 return clflush;
72 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
73 unsigned int flags)
75 struct clflush *clflush;
77 assert_object_held(obj);
80 * Stolen memory is always coherent with the GPU as it is explicitly
81 * marked as wc by the system, or the system is cache-coherent.
82 * Similarly, we only access struct pages through the CPU cache, so
83 * anything not backed by physical memory we consider to be always
84 * coherent and not need clflushing.
86 if (!i915_gem_object_has_struct_page(obj)) {
87 obj->cache_dirty = false;
88 return false;
91 /* If the GPU is snooping the contents of the CPU cache,
92 * we do not need to manually clear the CPU cache lines. However,
93 * the caches are only snooped when the render cache is
94 * flushed/invalidated. As we always have to emit invalidations
95 * and flushes when moving into and out of the RENDER domain, correct
96 * snooping behaviour occurs naturally as the result of our domain
97 * tracking.
99 if (!(flags & I915_CLFLUSH_FORCE) &&
100 obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
101 return false;
103 trace_i915_gem_object_clflush(obj);
105 clflush = NULL;
106 if (!(flags & I915_CLFLUSH_SYNC))
107 clflush = clflush_work_create(obj);
108 if (clflush) {
109 i915_sw_fence_await_reservation(&clflush->base.chain,
110 obj->base.resv, NULL, true,
111 I915_FENCE_TIMEOUT,
112 I915_FENCE_GFP);
113 dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
114 dma_fence_work_commit(&clflush->base);
115 } else if (obj->mm.pages) {
116 __do_clflush(obj);
117 } else {
118 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
121 obj->cache_dirty = false;
122 return true;