2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
16 #include "gt/intel_gt.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_region.h"
20 #include "i915_scatterlist.h"
22 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object
*obj
)
24 struct address_space
*mapping
= obj
->base
.filp
->f_mapping
;
25 struct drm_dma_handle
*phys
;
27 struct scatterlist
*sg
;
32 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj
)))
35 /* Always aligning to the object size, allows a single allocation
36 * to handle all possible callers, and given typical object sizes,
37 * the alignment of the buddy allocation will naturally match.
39 phys
= drm_pci_alloc(obj
->base
.dev
,
40 roundup_pow_of_two(obj
->base
.size
),
41 roundup_pow_of_two(obj
->base
.size
));
46 for (i
= 0; i
< obj
->base
.size
/ PAGE_SIZE
; i
++) {
50 page
= shmem_read_mapping_page(mapping
, i
);
56 src
= kmap_atomic(page
);
57 memcpy(vaddr
, src
, PAGE_SIZE
);
58 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
65 intel_gt_chipset_flush(&to_i915(obj
->base
.dev
)->gt
);
67 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
73 if (sg_alloc_table(st
, 1, GFP_KERNEL
)) {
81 sg
->length
= obj
->base
.size
;
83 sg_dma_address(sg
) = phys
->busaddr
;
84 sg_dma_len(sg
) = obj
->base
.size
;
86 obj
->phys_handle
= phys
;
88 __i915_gem_object_set_pages(obj
, st
, sg
->length
);
93 drm_pci_free(obj
->base
.dev
, phys
);
99 i915_gem_object_put_pages_phys(struct drm_i915_gem_object
*obj
,
100 struct sg_table
*pages
)
102 __i915_gem_object_release_shmem(obj
, pages
, false);
105 struct address_space
*mapping
= obj
->base
.filp
->f_mapping
;
106 char *vaddr
= obj
->phys_handle
->vaddr
;
109 for (i
= 0; i
< obj
->base
.size
/ PAGE_SIZE
; i
++) {
113 page
= shmem_read_mapping_page(mapping
, i
);
117 dst
= kmap_atomic(page
);
118 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
119 memcpy(dst
, vaddr
, PAGE_SIZE
);
122 set_page_dirty(page
);
123 if (obj
->mm
.madv
== I915_MADV_WILLNEED
)
124 mark_page_accessed(page
);
128 obj
->mm
.dirty
= false;
131 sg_free_table(pages
);
134 drm_pci_free(obj
->base
.dev
, obj
->phys_handle
);
137 static void phys_release(struct drm_i915_gem_object
*obj
)
139 fput(obj
->base
.filp
);
142 static const struct drm_i915_gem_object_ops i915_gem_phys_ops
= {
143 .get_pages
= i915_gem_object_get_pages_phys
,
144 .put_pages
= i915_gem_object_put_pages_phys
,
146 .release
= phys_release
,
149 int i915_gem_object_attach_phys(struct drm_i915_gem_object
*obj
, int align
)
151 struct sg_table
*pages
;
154 if (align
> obj
->base
.size
)
157 if (obj
->ops
== &i915_gem_phys_ops
)
160 if (obj
->ops
!= &i915_gem_shmem_ops
)
163 err
= i915_gem_object_unbind(obj
, I915_GEM_OBJECT_UNBIND_ACTIVE
);
167 mutex_lock_nested(&obj
->mm
.lock
, I915_MM_GET_PAGES
);
169 if (obj
->mm
.madv
!= I915_MADV_WILLNEED
) {
174 if (obj
->mm
.quirked
) {
179 if (obj
->mm
.mapping
) {
184 pages
= __i915_gem_object_unset_pages(obj
);
186 obj
->ops
= &i915_gem_phys_ops
;
188 err
= ____i915_gem_object_get_pages(obj
);
192 /* Perma-pin (until release) the physical set of pages */
193 __i915_gem_object_pin_pages(obj
);
195 if (!IS_ERR_OR_NULL(pages
)) {
196 i915_gem_shmem_ops
.put_pages(obj
, pages
);
197 i915_gem_object_release_memory_region(obj
);
199 mutex_unlock(&obj
->mm
.lock
);
203 obj
->ops
= &i915_gem_shmem_ops
;
204 if (!IS_ERR_OR_NULL(pages
)) {
205 unsigned int sg_page_sizes
= i915_sg_page_sizes(pages
->sgl
);
207 __i915_gem_object_set_pages(obj
, pages
, sg_page_sizes
);
210 mutex_unlock(&obj
->mm
.lock
);
214 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
215 #include "selftests/i915_gem_phys.c"