2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #include "display/intel_frontbuffer.h"
10 #include "i915_gem_clflush.h"
11 #include "i915_sw_fence_work.h"
12 #include "i915_trace.h"
15 struct dma_fence_work base
;
16 struct drm_i915_gem_object
*obj
;
19 static void __do_clflush(struct drm_i915_gem_object
*obj
)
21 GEM_BUG_ON(!i915_gem_object_has_pages(obj
));
22 drm_clflush_sg(obj
->mm
.pages
);
24 i915_gem_object_flush_frontbuffer(obj
, ORIGIN_CPU
);
27 static int clflush_work(struct dma_fence_work
*base
)
29 struct clflush
*clflush
= container_of(base
, typeof(*clflush
), base
);
30 struct drm_i915_gem_object
*obj
= clflush
->obj
;
33 err
= i915_gem_object_pin_pages(obj
);
38 i915_gem_object_unpin_pages(obj
);
43 static void clflush_release(struct dma_fence_work
*base
)
45 struct clflush
*clflush
= container_of(base
, typeof(*clflush
), base
);
47 i915_gem_object_put(clflush
->obj
);
50 static const struct dma_fence_work_ops clflush_ops
= {
53 .release
= clflush_release
,
56 static struct clflush
*clflush_work_create(struct drm_i915_gem_object
*obj
)
58 struct clflush
*clflush
;
60 GEM_BUG_ON(!obj
->cache_dirty
);
62 clflush
= kmalloc(sizeof(*clflush
), GFP_KERNEL
);
66 dma_fence_work_init(&clflush
->base
, &clflush_ops
);
67 clflush
->obj
= i915_gem_object_get(obj
); /* obj <-> clflush cycle */
72 bool i915_gem_clflush_object(struct drm_i915_gem_object
*obj
,
75 struct clflush
*clflush
;
77 assert_object_held(obj
);
80 * Stolen memory is always coherent with the GPU as it is explicitly
81 * marked as wc by the system, or the system is cache-coherent.
82 * Similarly, we only access struct pages through the CPU cache, so
83 * anything not backed by physical memory we consider to be always
84 * coherent and not need clflushing.
86 if (!i915_gem_object_has_struct_page(obj
)) {
87 obj
->cache_dirty
= false;
91 /* If the GPU is snooping the contents of the CPU cache,
92 * we do not need to manually clear the CPU cache lines. However,
93 * the caches are only snooped when the render cache is
94 * flushed/invalidated. As we always have to emit invalidations
95 * and flushes when moving into and out of the RENDER domain, correct
96 * snooping behaviour occurs naturally as the result of our domain
99 if (!(flags
& I915_CLFLUSH_FORCE
) &&
100 obj
->cache_coherent
& I915_BO_CACHE_COHERENT_FOR_READ
)
103 trace_i915_gem_object_clflush(obj
);
106 if (!(flags
& I915_CLFLUSH_SYNC
))
107 clflush
= clflush_work_create(obj
);
109 i915_sw_fence_await_reservation(&clflush
->base
.chain
,
110 obj
->base
.resv
, NULL
, true,
113 dma_resv_add_excl_fence(obj
->base
.resv
, &clflush
->base
.dma
);
114 dma_fence_work_commit(&clflush
->base
);
115 } else if (obj
->mm
.pages
) {
118 GEM_BUG_ON(obj
->write_domain
!= I915_GEM_DOMAIN_CPU
);
121 obj
->cache_dirty
= false;