treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gem / i915_gem_phys.c
blobb1b7c1b3038aaa41b8bbdc1c5c7cdfef13d4e2d0
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
5 */
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
16 #include "gt/intel_gt.h"
17 #include "i915_drv.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_region.h"
20 #include "i915_scatterlist.h"
22 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
24 struct address_space *mapping = obj->base.filp->f_mapping;
25 struct drm_dma_handle *phys;
26 struct sg_table *st;
27 struct scatterlist *sg;
28 char *vaddr;
29 int i;
30 int err;
32 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
33 return -EINVAL;
35 /* Always aligning to the object size, allows a single allocation
36 * to handle all possible callers, and given typical object sizes,
37 * the alignment of the buddy allocation will naturally match.
39 phys = drm_pci_alloc(obj->base.dev,
40 roundup_pow_of_two(obj->base.size),
41 roundup_pow_of_two(obj->base.size));
42 if (!phys)
43 return -ENOMEM;
45 vaddr = phys->vaddr;
46 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
47 struct page *page;
48 char *src;
50 page = shmem_read_mapping_page(mapping, i);
51 if (IS_ERR(page)) {
52 err = PTR_ERR(page);
53 goto err_phys;
56 src = kmap_atomic(page);
57 memcpy(vaddr, src, PAGE_SIZE);
58 drm_clflush_virt_range(vaddr, PAGE_SIZE);
59 kunmap_atomic(src);
61 put_page(page);
62 vaddr += PAGE_SIZE;
65 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
67 st = kmalloc(sizeof(*st), GFP_KERNEL);
68 if (!st) {
69 err = -ENOMEM;
70 goto err_phys;
73 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
74 kfree(st);
75 err = -ENOMEM;
76 goto err_phys;
79 sg = st->sgl;
80 sg->offset = 0;
81 sg->length = obj->base.size;
83 sg_dma_address(sg) = phys->busaddr;
84 sg_dma_len(sg) = obj->base.size;
86 obj->phys_handle = phys;
88 __i915_gem_object_set_pages(obj, st, sg->length);
90 return 0;
92 err_phys:
93 drm_pci_free(obj->base.dev, phys);
95 return err;
98 static void
99 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
100 struct sg_table *pages)
102 __i915_gem_object_release_shmem(obj, pages, false);
104 if (obj->mm.dirty) {
105 struct address_space *mapping = obj->base.filp->f_mapping;
106 char *vaddr = obj->phys_handle->vaddr;
107 int i;
109 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
110 struct page *page;
111 char *dst;
113 page = shmem_read_mapping_page(mapping, i);
114 if (IS_ERR(page))
115 continue;
117 dst = kmap_atomic(page);
118 drm_clflush_virt_range(vaddr, PAGE_SIZE);
119 memcpy(dst, vaddr, PAGE_SIZE);
120 kunmap_atomic(dst);
122 set_page_dirty(page);
123 if (obj->mm.madv == I915_MADV_WILLNEED)
124 mark_page_accessed(page);
125 put_page(page);
126 vaddr += PAGE_SIZE;
128 obj->mm.dirty = false;
131 sg_free_table(pages);
132 kfree(pages);
134 drm_pci_free(obj->base.dev, obj->phys_handle);
137 static void phys_release(struct drm_i915_gem_object *obj)
139 fput(obj->base.filp);
142 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
143 .get_pages = i915_gem_object_get_pages_phys,
144 .put_pages = i915_gem_object_put_pages_phys,
146 .release = phys_release,
149 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
151 struct sg_table *pages;
152 int err;
154 if (align > obj->base.size)
155 return -EINVAL;
157 if (obj->ops == &i915_gem_phys_ops)
158 return 0;
160 if (obj->ops != &i915_gem_shmem_ops)
161 return -EINVAL;
163 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
164 if (err)
165 return err;
167 mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
169 if (obj->mm.madv != I915_MADV_WILLNEED) {
170 err = -EFAULT;
171 goto err_unlock;
174 if (obj->mm.quirked) {
175 err = -EFAULT;
176 goto err_unlock;
179 if (obj->mm.mapping) {
180 err = -EBUSY;
181 goto err_unlock;
184 pages = __i915_gem_object_unset_pages(obj);
186 obj->ops = &i915_gem_phys_ops;
188 err = ____i915_gem_object_get_pages(obj);
189 if (err)
190 goto err_xfer;
192 /* Perma-pin (until release) the physical set of pages */
193 __i915_gem_object_pin_pages(obj);
195 if (!IS_ERR_OR_NULL(pages)) {
196 i915_gem_shmem_ops.put_pages(obj, pages);
197 i915_gem_object_release_memory_region(obj);
199 mutex_unlock(&obj->mm.lock);
200 return 0;
202 err_xfer:
203 obj->ops = &i915_gem_shmem_ops;
204 if (!IS_ERR_OR_NULL(pages)) {
205 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
207 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
209 err_unlock:
210 mutex_unlock(&obj->mm.lock);
211 return err;
214 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
215 #include "selftests/i915_gem_phys.c"
216 #endif