treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_bo_list.c
blob85b0515c0fdcf7c0e69558988fdbb534c9efa302
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
31 #include <linux/uaccess.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
36 #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
37 #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
39 static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
41 struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
42 rhead);
44 kvfree(list);
47 static void amdgpu_bo_list_free(struct kref *ref)
49 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
50 refcount);
51 struct amdgpu_bo_list_entry *e;
53 amdgpu_bo_list_for_each_entry(e, list) {
54 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
56 amdgpu_bo_unref(&bo);
59 call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
62 int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
63 struct drm_amdgpu_bo_list_entry *info,
64 unsigned num_entries, struct amdgpu_bo_list **result)
66 unsigned last_entry = 0, first_userptr = num_entries;
67 struct amdgpu_bo_list_entry *array;
68 struct amdgpu_bo_list *list;
69 uint64_t total_size = 0;
70 size_t size;
71 unsigned i;
72 int r;
74 if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list))
75 / sizeof(struct amdgpu_bo_list_entry))
76 return -EINVAL;
78 size = sizeof(struct amdgpu_bo_list);
79 size += num_entries * sizeof(struct amdgpu_bo_list_entry);
80 list = kvmalloc(size, GFP_KERNEL);
81 if (!list)
82 return -ENOMEM;
84 kref_init(&list->refcount);
85 list->gds_obj = NULL;
86 list->gws_obj = NULL;
87 list->oa_obj = NULL;
89 array = amdgpu_bo_list_array_entry(list, 0);
90 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
92 for (i = 0; i < num_entries; ++i) {
93 struct amdgpu_bo_list_entry *entry;
94 struct drm_gem_object *gobj;
95 struct amdgpu_bo *bo;
96 struct mm_struct *usermm;
98 gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
99 if (!gobj) {
100 r = -ENOENT;
101 goto error_free;
104 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
105 drm_gem_object_put_unlocked(gobj);
107 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
108 if (usermm) {
109 if (usermm != current->mm) {
110 amdgpu_bo_unref(&bo);
111 r = -EPERM;
112 goto error_free;
114 entry = &array[--first_userptr];
115 } else {
116 entry = &array[last_entry++];
119 entry->priority = min(info[i].bo_priority,
120 AMDGPU_BO_LIST_MAX_PRIORITY);
121 entry->tv.bo = &bo->tbo;
123 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
124 list->gds_obj = bo;
125 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
126 list->gws_obj = bo;
127 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
128 list->oa_obj = bo;
130 total_size += amdgpu_bo_size(bo);
131 trace_amdgpu_bo_list_set(list, bo);
134 list->first_userptr = first_userptr;
135 list->num_entries = num_entries;
137 trace_amdgpu_cs_bo_status(list->num_entries, total_size);
139 *result = list;
140 return 0;
142 error_free:
143 for (i = 0; i < last_entry; ++i) {
144 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
146 amdgpu_bo_unref(&bo);
148 for (i = first_userptr; i < num_entries; ++i) {
149 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
151 amdgpu_bo_unref(&bo);
153 kvfree(list);
154 return r;
158 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
160 struct amdgpu_bo_list *list;
162 mutex_lock(&fpriv->bo_list_lock);
163 list = idr_remove(&fpriv->bo_list_handles, id);
164 mutex_unlock(&fpriv->bo_list_lock);
165 if (list)
166 kref_put(&list->refcount, amdgpu_bo_list_free);
169 int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
170 struct amdgpu_bo_list **result)
172 rcu_read_lock();
173 *result = idr_find(&fpriv->bo_list_handles, id);
175 if (*result && kref_get_unless_zero(&(*result)->refcount)) {
176 rcu_read_unlock();
177 return 0;
180 rcu_read_unlock();
181 return -ENOENT;
184 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
185 struct list_head *validated)
187 /* This is based on the bucket sort with O(n) time complexity.
188 * An item with priority "i" is added to bucket[i]. The lists are then
189 * concatenated in descending order.
191 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
192 struct amdgpu_bo_list_entry *e;
193 unsigned i;
195 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
196 INIT_LIST_HEAD(&bucket[i]);
198 /* Since buffers which appear sooner in the relocation list are
199 * likely to be used more often than buffers which appear later
200 * in the list, the sort mustn't change the ordering of buffers
201 * with the same priority, i.e. it must be stable.
203 amdgpu_bo_list_for_each_entry(e, list) {
204 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
205 unsigned priority = e->priority;
207 if (!bo->parent)
208 list_add_tail(&e->tv.head, &bucket[priority]);
210 e->user_pages = NULL;
213 /* Connect the sorted buckets in the output list. */
214 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
215 list_splice(&bucket[i], validated);
218 void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
220 kref_put(&list->refcount, amdgpu_bo_list_free);
223 int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
224 struct drm_amdgpu_bo_list_entry **info_param)
226 const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
227 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
228 struct drm_amdgpu_bo_list_entry *info;
229 int r;
231 info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
232 if (!info)
233 return -ENOMEM;
235 /* copy the handle array from userspace to a kernel buffer */
236 r = -EFAULT;
237 if (likely(info_size == in->bo_info_size)) {
238 unsigned long bytes = in->bo_number *
239 in->bo_info_size;
241 if (copy_from_user(info, uptr, bytes))
242 goto error_free;
244 } else {
245 unsigned long bytes = min(in->bo_info_size, info_size);
246 unsigned i;
248 memset(info, 0, in->bo_number * info_size);
249 for (i = 0; i < in->bo_number; ++i) {
250 if (copy_from_user(&info[i], uptr, bytes))
251 goto error_free;
253 uptr += in->bo_info_size;
257 *info_param = info;
258 return 0;
260 error_free:
261 kvfree(info);
262 return r;
265 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
266 struct drm_file *filp)
268 struct amdgpu_device *adev = dev->dev_private;
269 struct amdgpu_fpriv *fpriv = filp->driver_priv;
270 union drm_amdgpu_bo_list *args = data;
271 uint32_t handle = args->in.list_handle;
272 struct drm_amdgpu_bo_list_entry *info = NULL;
273 struct amdgpu_bo_list *list, *old;
274 int r;
276 r = amdgpu_bo_create_list_entry_array(&args->in, &info);
277 if (r)
278 return r;
280 switch (args->in.operation) {
281 case AMDGPU_BO_LIST_OP_CREATE:
282 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
283 &list);
284 if (r)
285 goto error_free;
287 mutex_lock(&fpriv->bo_list_lock);
288 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
289 mutex_unlock(&fpriv->bo_list_lock);
290 if (r < 0) {
291 goto error_put_list;
294 handle = r;
295 break;
297 case AMDGPU_BO_LIST_OP_DESTROY:
298 amdgpu_bo_list_destroy(fpriv, handle);
299 handle = 0;
300 break;
302 case AMDGPU_BO_LIST_OP_UPDATE:
303 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
304 &list);
305 if (r)
306 goto error_free;
308 mutex_lock(&fpriv->bo_list_lock);
309 old = idr_replace(&fpriv->bo_list_handles, list, handle);
310 mutex_unlock(&fpriv->bo_list_lock);
312 if (IS_ERR(old)) {
313 r = PTR_ERR(old);
314 goto error_put_list;
317 amdgpu_bo_list_put(old);
318 break;
320 default:
321 r = -EINVAL;
322 goto error_free;
325 memset(args, 0, sizeof(*args));
326 args->out.list_handle = handle;
327 kvfree(info);
329 return 0;
331 error_put_list:
332 amdgpu_bo_list_put(list);
334 error_free:
335 kvfree(info);
336 return r;