treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / vkms / vkms_gem.c
blob2e01186fb943b897b7b40dd29176465d2b85a1e7
1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/dma-buf.h>
4 #include <linux/shmem_fs.h>
5 #include <linux/vmalloc.h>
6 #include <drm/drm_prime.h>
8 #include "vkms_drv.h"
10 static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
11 u64 size)
13 struct vkms_gem_object *obj;
14 int ret;
16 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
17 if (!obj)
18 return ERR_PTR(-ENOMEM);
20 size = roundup(size, PAGE_SIZE);
21 ret = drm_gem_object_init(dev, &obj->gem, size);
22 if (ret) {
23 kfree(obj);
24 return ERR_PTR(ret);
27 mutex_init(&obj->pages_lock);
29 return obj;
32 void vkms_gem_free_object(struct drm_gem_object *obj)
34 struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
35 gem);
37 WARN_ON(gem->pages);
38 WARN_ON(gem->vaddr);
40 mutex_destroy(&gem->pages_lock);
41 drm_gem_object_release(obj);
42 kfree(gem);
45 vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
47 struct vm_area_struct *vma = vmf->vma;
48 struct vkms_gem_object *obj = vma->vm_private_data;
49 unsigned long vaddr = vmf->address;
50 pgoff_t page_offset;
51 loff_t num_pages;
52 vm_fault_t ret = VM_FAULT_SIGBUS;
54 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
55 num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
57 if (page_offset > num_pages)
58 return VM_FAULT_SIGBUS;
60 mutex_lock(&obj->pages_lock);
61 if (obj->pages) {
62 get_page(obj->pages[page_offset]);
63 vmf->page = obj->pages[page_offset];
64 ret = 0;
66 mutex_unlock(&obj->pages_lock);
67 if (ret) {
68 struct page *page;
69 struct address_space *mapping;
71 mapping = file_inode(obj->gem.filp)->i_mapping;
72 page = shmem_read_mapping_page(mapping, page_offset);
74 if (!IS_ERR(page)) {
75 vmf->page = page;
76 ret = 0;
77 } else {
78 switch (PTR_ERR(page)) {
79 case -ENOSPC:
80 case -ENOMEM:
81 ret = VM_FAULT_OOM;
82 break;
83 case -EBUSY:
84 ret = VM_FAULT_RETRY;
85 break;
86 case -EFAULT:
87 case -EINVAL:
88 ret = VM_FAULT_SIGBUS;
89 break;
90 default:
91 WARN_ON(PTR_ERR(page));
92 ret = VM_FAULT_SIGBUS;
93 break;
97 return ret;
100 struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
101 struct drm_file *file,
102 u32 *handle,
103 u64 size)
105 struct vkms_gem_object *obj;
106 int ret;
108 if (!file || !dev || !handle)
109 return ERR_PTR(-EINVAL);
111 obj = __vkms_gem_create(dev, size);
112 if (IS_ERR(obj))
113 return ERR_CAST(obj);
115 ret = drm_gem_handle_create(file, &obj->gem, handle);
116 drm_gem_object_put_unlocked(&obj->gem);
117 if (ret)
118 return ERR_PTR(ret);
120 return &obj->gem;
123 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
124 struct drm_mode_create_dumb *args)
126 struct drm_gem_object *gem_obj;
127 u64 pitch, size;
129 if (!args || !dev || !file)
130 return -EINVAL;
132 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
133 size = pitch * args->height;
135 if (!size)
136 return -EINVAL;
138 gem_obj = vkms_gem_create(dev, file, &args->handle, size);
139 if (IS_ERR(gem_obj))
140 return PTR_ERR(gem_obj);
142 args->size = gem_obj->size;
143 args->pitch = pitch;
145 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
147 return 0;
150 static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
152 struct drm_gem_object *gem_obj = &vkms_obj->gem;
154 if (!vkms_obj->pages) {
155 struct page **pages = drm_gem_get_pages(gem_obj);
157 if (IS_ERR(pages))
158 return pages;
160 if (cmpxchg(&vkms_obj->pages, NULL, pages))
161 drm_gem_put_pages(gem_obj, pages, false, true);
164 return vkms_obj->pages;
167 void vkms_gem_vunmap(struct drm_gem_object *obj)
169 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
171 mutex_lock(&vkms_obj->pages_lock);
172 if (vkms_obj->vmap_count < 1) {
173 WARN_ON(vkms_obj->vaddr);
174 WARN_ON(vkms_obj->pages);
175 mutex_unlock(&vkms_obj->pages_lock);
176 return;
179 vkms_obj->vmap_count--;
181 if (vkms_obj->vmap_count == 0) {
182 vunmap(vkms_obj->vaddr);
183 vkms_obj->vaddr = NULL;
184 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
185 vkms_obj->pages = NULL;
188 mutex_unlock(&vkms_obj->pages_lock);
191 int vkms_gem_vmap(struct drm_gem_object *obj)
193 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
194 int ret = 0;
196 mutex_lock(&vkms_obj->pages_lock);
198 if (!vkms_obj->vaddr) {
199 unsigned int n_pages = obj->size >> PAGE_SHIFT;
200 struct page **pages = _get_pages(vkms_obj);
202 if (IS_ERR(pages)) {
203 ret = PTR_ERR(pages);
204 goto out;
207 vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
208 if (!vkms_obj->vaddr)
209 goto err_vmap;
212 vkms_obj->vmap_count++;
213 goto out;
215 err_vmap:
216 ret = -ENOMEM;
217 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
218 vkms_obj->pages = NULL;
219 out:
220 mutex_unlock(&vkms_obj->pages_lock);
221 return ret;
224 struct drm_gem_object *
225 vkms_prime_import_sg_table(struct drm_device *dev,
226 struct dma_buf_attachment *attach,
227 struct sg_table *sg)
229 struct vkms_gem_object *obj;
230 int npages;
232 obj = __vkms_gem_create(dev, attach->dmabuf->size);
233 if (IS_ERR(obj))
234 return ERR_CAST(obj);
236 npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
237 DRM_DEBUG_PRIME("Importing %d pages\n", npages);
239 obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
240 if (!obj->pages) {
241 vkms_gem_free_object(&obj->gem);
242 return ERR_PTR(-ENOMEM);
245 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
246 return &obj->gem;