treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gem / i915_gem_pages.c
blob54aca5c9101e03fa0a53cfe503a6c379dc9881ed
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
5 */
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
14 struct sg_table *pages,
15 unsigned int sg_page_sizes)
17 struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 unsigned long supported = INTEL_INFO(i915)->page_sizes;
19 int i;
21 lockdep_assert_held(&obj->mm.lock);
23 if (i915_gem_object_is_volatile(obj))
24 obj->mm.madv = I915_MADV_DONTNEED;
26 /* Make the pages coherent with the GPU (flushing any swapin). */
27 if (obj->cache_dirty) {
28 obj->write_domain = 0;
29 if (i915_gem_object_has_struct_page(obj))
30 drm_clflush_sg(pages);
31 obj->cache_dirty = false;
34 obj->mm.get_page.sg_pos = pages->sgl;
35 obj->mm.get_page.sg_idx = 0;
37 obj->mm.pages = pages;
39 if (i915_gem_object_is_tiled(obj) &&
40 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
41 GEM_BUG_ON(obj->mm.quirked);
42 __i915_gem_object_pin_pages(obj);
43 obj->mm.quirked = true;
46 GEM_BUG_ON(!sg_page_sizes);
47 obj->mm.page_sizes.phys = sg_page_sizes;
50 * Calculate the supported page-sizes which fit into the given
51 * sg_page_sizes. This will give us the page-sizes which we may be able
52 * to use opportunistically when later inserting into the GTT. For
53 * example if phys=2G, then in theory we should be able to use 1G, 2M,
54 * 64K or 4K pages, although in practice this will depend on a number of
55 * other factors.
57 obj->mm.page_sizes.sg = 0;
58 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
59 if (obj->mm.page_sizes.phys & ~0u << i)
60 obj->mm.page_sizes.sg |= BIT(i);
62 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
64 if (i915_gem_object_is_shrinkable(obj)) {
65 struct list_head *list;
66 unsigned long flags;
68 spin_lock_irqsave(&i915->mm.obj_lock, flags);
70 i915->mm.shrink_count++;
71 i915->mm.shrink_memory += obj->base.size;
73 if (obj->mm.madv != I915_MADV_WILLNEED)
74 list = &i915->mm.purge_list;
75 else
76 list = &i915->mm.shrink_list;
77 list_add_tail(&obj->mm.link, list);
79 atomic_set(&obj->mm.shrink_pin, 0);
80 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
84 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
86 int err;
88 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
89 DRM_DEBUG("Attempting to obtain a purgeable object\n");
90 return -EFAULT;
93 err = obj->ops->get_pages(obj);
94 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
96 return err;
99 /* Ensure that the associated pages are gathered from the backing storage
100 * and pinned into our object. i915_gem_object_pin_pages() may be called
101 * multiple times before they are released by a single call to
102 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
103 * either as a result of memory pressure (reaping pages under the shrinker)
104 * or as the object is itself released.
106 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
108 int err;
110 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
111 if (err)
112 return err;
114 if (unlikely(!i915_gem_object_has_pages(obj))) {
115 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
117 err = ____i915_gem_object_get_pages(obj);
118 if (err)
119 goto unlock;
121 smp_mb__before_atomic();
123 atomic_inc(&obj->mm.pages_pin_count);
125 unlock:
126 mutex_unlock(&obj->mm.lock);
127 return err;
130 /* Immediately discard the backing storage */
131 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
133 drm_gem_free_mmap_offset(&obj->base);
134 if (obj->ops->truncate)
135 obj->ops->truncate(obj);
138 /* Try to discard unwanted pages */
139 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
141 lockdep_assert_held(&obj->mm.lock);
142 GEM_BUG_ON(i915_gem_object_has_pages(obj));
144 if (obj->ops->writeback)
145 obj->ops->writeback(obj);
148 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
150 struct radix_tree_iter iter;
151 void __rcu **slot;
153 rcu_read_lock();
154 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
155 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
156 rcu_read_unlock();
159 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
161 if (is_vmalloc_addr(ptr))
162 vunmap(ptr);
163 else
164 kunmap(kmap_to_page(ptr));
167 struct sg_table *
168 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
170 struct sg_table *pages;
172 pages = fetch_and_zero(&obj->mm.pages);
173 if (IS_ERR_OR_NULL(pages))
174 return pages;
176 if (i915_gem_object_is_volatile(obj))
177 obj->mm.madv = I915_MADV_WILLNEED;
179 i915_gem_object_make_unshrinkable(obj);
181 if (obj->mm.mapping) {
182 unmap_object(obj, page_mask_bits(obj->mm.mapping));
183 obj->mm.mapping = NULL;
186 __i915_gem_object_reset_page_iter(obj);
187 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
189 return pages;
192 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
194 struct sg_table *pages;
195 int err;
197 if (i915_gem_object_has_pinned_pages(obj))
198 return -EBUSY;
200 GEM_BUG_ON(atomic_read(&obj->bind_count));
202 /* May be called by shrinker from within get_pages() (on another bo) */
203 mutex_lock(&obj->mm.lock);
204 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
205 err = -EBUSY;
206 goto unlock;
209 i915_gem_object_release_mmap_offset(obj);
212 * ->put_pages might need to allocate memory for the bit17 swizzle
213 * array, hence protect them from being reaped by removing them from gtt
214 * lists early.
216 pages = __i915_gem_object_unset_pages(obj);
219 * XXX Temporary hijinx to avoid updating all backends to handle
220 * NULL pages. In the future, when we have more asynchronous
221 * get_pages backends we should be better able to handle the
222 * cancellation of the async task in a more uniform manner.
224 if (!pages && !i915_gem_object_needs_async_cancel(obj))
225 pages = ERR_PTR(-EINVAL);
227 if (!IS_ERR(pages))
228 obj->ops->put_pages(obj, pages);
230 err = 0;
231 unlock:
232 mutex_unlock(&obj->mm.lock);
234 return err;
237 static inline pte_t iomap_pte(resource_size_t base,
238 dma_addr_t offset,
239 pgprot_t prot)
241 return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
244 /* The 'mapping' part of i915_gem_object_pin_map() below */
245 static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
246 enum i915_map_type type)
248 unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
249 struct sg_table *sgt = obj->mm.pages;
250 pte_t *stack[32], **mem;
251 struct vm_struct *area;
252 pgprot_t pgprot;
254 if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
255 return NULL;
257 /* A single page can always be kmapped */
258 if (n_pte == 1 && type == I915_MAP_WB)
259 return kmap(sg_page(sgt->sgl));
261 mem = stack;
262 if (n_pte > ARRAY_SIZE(stack)) {
263 /* Too big for stack -- allocate temporary array instead */
264 mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
265 if (!mem)
266 return NULL;
269 area = alloc_vm_area(obj->base.size, mem);
270 if (!area) {
271 if (mem != stack)
272 kvfree(mem);
273 return NULL;
276 switch (type) {
277 default:
278 MISSING_CASE(type);
279 /* fallthrough - to use PAGE_KERNEL anyway */
280 case I915_MAP_WB:
281 pgprot = PAGE_KERNEL;
282 break;
283 case I915_MAP_WC:
284 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
285 break;
288 if (i915_gem_object_has_struct_page(obj)) {
289 struct sgt_iter iter;
290 struct page *page;
291 pte_t **ptes = mem;
293 for_each_sgt_page(page, iter, sgt)
294 **ptes++ = mk_pte(page, pgprot);
295 } else {
296 resource_size_t iomap;
297 struct sgt_iter iter;
298 pte_t **ptes = mem;
299 dma_addr_t addr;
301 iomap = obj->mm.region->iomap.base;
302 iomap -= obj->mm.region->region.start;
304 for_each_sgt_daddr(addr, iter, sgt)
305 **ptes++ = iomap_pte(iomap, addr, pgprot);
308 if (mem != stack)
309 kvfree(mem);
311 return area->addr;
314 /* get, pin, and map the pages of the object into kernel space */
315 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
316 enum i915_map_type type)
318 enum i915_map_type has_type;
319 unsigned int flags;
320 bool pinned;
321 void *ptr;
322 int err;
324 flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
325 if (!i915_gem_object_type_has(obj, flags))
326 return ERR_PTR(-ENXIO);
328 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
329 if (err)
330 return ERR_PTR(err);
332 pinned = !(type & I915_MAP_OVERRIDE);
333 type &= ~I915_MAP_OVERRIDE;
335 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
336 if (unlikely(!i915_gem_object_has_pages(obj))) {
337 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
339 err = ____i915_gem_object_get_pages(obj);
340 if (err)
341 goto err_unlock;
343 smp_mb__before_atomic();
345 atomic_inc(&obj->mm.pages_pin_count);
346 pinned = false;
348 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
350 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
351 if (ptr && has_type != type) {
352 if (pinned) {
353 err = -EBUSY;
354 goto err_unpin;
357 unmap_object(obj, ptr);
359 ptr = obj->mm.mapping = NULL;
362 if (!ptr) {
363 ptr = i915_gem_object_map(obj, type);
364 if (!ptr) {
365 err = -ENOMEM;
366 goto err_unpin;
369 obj->mm.mapping = page_pack_bits(ptr, type);
372 out_unlock:
373 mutex_unlock(&obj->mm.lock);
374 return ptr;
376 err_unpin:
377 atomic_dec(&obj->mm.pages_pin_count);
378 err_unlock:
379 ptr = ERR_PTR(err);
380 goto out_unlock;
383 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
384 unsigned long offset,
385 unsigned long size)
387 enum i915_map_type has_type;
388 void *ptr;
390 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
391 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
392 offset, size, obj->base.size));
394 obj->mm.dirty = true;
396 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
397 return;
399 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
400 if (has_type == I915_MAP_WC)
401 return;
403 drm_clflush_virt_range(ptr + offset, size);
404 if (size == obj->base.size) {
405 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
406 obj->cache_dirty = false;
410 struct scatterlist *
411 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
412 unsigned int n,
413 unsigned int *offset)
415 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
416 struct scatterlist *sg;
417 unsigned int idx, count;
419 might_sleep();
420 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
421 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
423 /* As we iterate forward through the sg, we record each entry in a
424 * radixtree for quick repeated (backwards) lookups. If we have seen
425 * this index previously, we will have an entry for it.
427 * Initial lookup is O(N), but this is amortized to O(1) for
428 * sequential page access (where each new request is consecutive
429 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
430 * i.e. O(1) with a large constant!
432 if (n < READ_ONCE(iter->sg_idx))
433 goto lookup;
435 mutex_lock(&iter->lock);
437 /* We prefer to reuse the last sg so that repeated lookup of this
438 * (or the subsequent) sg are fast - comparing against the last
439 * sg is faster than going through the radixtree.
442 sg = iter->sg_pos;
443 idx = iter->sg_idx;
444 count = __sg_page_count(sg);
446 while (idx + count <= n) {
447 void *entry;
448 unsigned long i;
449 int ret;
451 /* If we cannot allocate and insert this entry, or the
452 * individual pages from this range, cancel updating the
453 * sg_idx so that on this lookup we are forced to linearly
454 * scan onwards, but on future lookups we will try the
455 * insertion again (in which case we need to be careful of
456 * the error return reporting that we have already inserted
457 * this index).
459 ret = radix_tree_insert(&iter->radix, idx, sg);
460 if (ret && ret != -EEXIST)
461 goto scan;
463 entry = xa_mk_value(idx);
464 for (i = 1; i < count; i++) {
465 ret = radix_tree_insert(&iter->radix, idx + i, entry);
466 if (ret && ret != -EEXIST)
467 goto scan;
470 idx += count;
471 sg = ____sg_next(sg);
472 count = __sg_page_count(sg);
475 scan:
476 iter->sg_pos = sg;
477 iter->sg_idx = idx;
479 mutex_unlock(&iter->lock);
481 if (unlikely(n < idx)) /* insertion completed by another thread */
482 goto lookup;
484 /* In case we failed to insert the entry into the radixtree, we need
485 * to look beyond the current sg.
487 while (idx + count <= n) {
488 idx += count;
489 sg = ____sg_next(sg);
490 count = __sg_page_count(sg);
493 *offset = n - idx;
494 return sg;
496 lookup:
497 rcu_read_lock();
499 sg = radix_tree_lookup(&iter->radix, n);
500 GEM_BUG_ON(!sg);
502 /* If this index is in the middle of multi-page sg entry,
503 * the radix tree will contain a value entry that points
504 * to the start of that range. We will return the pointer to
505 * the base page and the offset of this page within the
506 * sg entry's range.
508 *offset = 0;
509 if (unlikely(xa_is_value(sg))) {
510 unsigned long base = xa_to_value(sg);
512 sg = radix_tree_lookup(&iter->radix, base);
513 GEM_BUG_ON(!sg);
515 *offset = n - base;
518 rcu_read_unlock();
520 return sg;
523 struct page *
524 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
526 struct scatterlist *sg;
527 unsigned int offset;
529 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
531 sg = i915_gem_object_get_sg(obj, n, &offset);
532 return nth_page(sg_page(sg), offset);
535 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
536 struct page *
537 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
538 unsigned int n)
540 struct page *page;
542 page = i915_gem_object_get_page(obj, n);
543 if (!obj->mm.dirty)
544 set_page_dirty(page);
546 return page;
549 dma_addr_t
550 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
551 unsigned long n,
552 unsigned int *len)
554 struct scatterlist *sg;
555 unsigned int offset;
557 sg = i915_gem_object_get_sg(obj, n, &offset);
559 if (len)
560 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
562 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
565 dma_addr_t
566 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
567 unsigned long n)
569 return i915_gem_object_get_dma_address_len(obj, n, NULL);