WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gem / i915_gem_pages.c
blobe2c7b2a7895fff25d7c4d13a24112ce95939c3a3
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
5 */
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
14 struct sg_table *pages,
15 unsigned int sg_page_sizes)
17 struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 unsigned long supported = INTEL_INFO(i915)->page_sizes;
19 int i;
21 lockdep_assert_held(&obj->mm.lock);
23 if (i915_gem_object_is_volatile(obj))
24 obj->mm.madv = I915_MADV_DONTNEED;
26 /* Make the pages coherent with the GPU (flushing any swapin). */
27 if (obj->cache_dirty) {
28 obj->write_domain = 0;
29 if (i915_gem_object_has_struct_page(obj))
30 drm_clflush_sg(pages);
31 obj->cache_dirty = false;
34 obj->mm.get_page.sg_pos = pages->sgl;
35 obj->mm.get_page.sg_idx = 0;
36 obj->mm.get_dma_page.sg_pos = pages->sgl;
37 obj->mm.get_dma_page.sg_idx = 0;
39 obj->mm.pages = pages;
41 if (i915_gem_object_is_tiled(obj) &&
42 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
43 GEM_BUG_ON(obj->mm.quirked);
44 __i915_gem_object_pin_pages(obj);
45 obj->mm.quirked = true;
48 GEM_BUG_ON(!sg_page_sizes);
49 obj->mm.page_sizes.phys = sg_page_sizes;
52 * Calculate the supported page-sizes which fit into the given
53 * sg_page_sizes. This will give us the page-sizes which we may be able
54 * to use opportunistically when later inserting into the GTT. For
55 * example if phys=2G, then in theory we should be able to use 1G, 2M,
56 * 64K or 4K pages, although in practice this will depend on a number of
57 * other factors.
59 obj->mm.page_sizes.sg = 0;
60 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
61 if (obj->mm.page_sizes.phys & ~0u << i)
62 obj->mm.page_sizes.sg |= BIT(i);
64 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
66 if (i915_gem_object_is_shrinkable(obj)) {
67 struct list_head *list;
68 unsigned long flags;
70 spin_lock_irqsave(&i915->mm.obj_lock, flags);
72 i915->mm.shrink_count++;
73 i915->mm.shrink_memory += obj->base.size;
75 if (obj->mm.madv != I915_MADV_WILLNEED)
76 list = &i915->mm.purge_list;
77 else
78 list = &i915->mm.shrink_list;
79 list_add_tail(&obj->mm.link, list);
81 atomic_set(&obj->mm.shrink_pin, 0);
82 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
86 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
88 struct drm_i915_private *i915 = to_i915(obj->base.dev);
89 int err;
91 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
92 drm_dbg(&i915->drm,
93 "Attempting to obtain a purgeable object\n");
94 return -EFAULT;
97 err = obj->ops->get_pages(obj);
98 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
100 return err;
103 /* Ensure that the associated pages are gathered from the backing storage
104 * and pinned into our object. i915_gem_object_pin_pages() may be called
105 * multiple times before they are released by a single call to
106 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
107 * either as a result of memory pressure (reaping pages under the shrinker)
108 * or as the object is itself released.
110 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
112 int err;
114 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
115 if (err)
116 return err;
118 if (unlikely(!i915_gem_object_has_pages(obj))) {
119 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
121 err = ____i915_gem_object_get_pages(obj);
122 if (err)
123 goto unlock;
125 smp_mb__before_atomic();
127 atomic_inc(&obj->mm.pages_pin_count);
129 unlock:
130 mutex_unlock(&obj->mm.lock);
131 return err;
134 /* Immediately discard the backing storage */
135 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
137 drm_gem_free_mmap_offset(&obj->base);
138 if (obj->ops->truncate)
139 obj->ops->truncate(obj);
142 /* Try to discard unwanted pages */
143 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
145 lockdep_assert_held(&obj->mm.lock);
146 GEM_BUG_ON(i915_gem_object_has_pages(obj));
148 if (obj->ops->writeback)
149 obj->ops->writeback(obj);
152 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
154 struct radix_tree_iter iter;
155 void __rcu **slot;
157 rcu_read_lock();
158 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
159 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
160 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
161 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
162 rcu_read_unlock();
165 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
167 if (is_vmalloc_addr(ptr))
168 vunmap(ptr);
171 struct sg_table *
172 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
174 struct sg_table *pages;
176 pages = fetch_and_zero(&obj->mm.pages);
177 if (IS_ERR_OR_NULL(pages))
178 return pages;
180 if (i915_gem_object_is_volatile(obj))
181 obj->mm.madv = I915_MADV_WILLNEED;
183 i915_gem_object_make_unshrinkable(obj);
185 if (obj->mm.mapping) {
186 unmap_object(obj, page_mask_bits(obj->mm.mapping));
187 obj->mm.mapping = NULL;
190 __i915_gem_object_reset_page_iter(obj);
191 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
193 return pages;
196 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
198 struct sg_table *pages;
199 int err;
201 if (i915_gem_object_has_pinned_pages(obj))
202 return -EBUSY;
204 /* May be called by shrinker from within get_pages() (on another bo) */
205 mutex_lock(&obj->mm.lock);
206 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
207 err = -EBUSY;
208 goto unlock;
211 i915_gem_object_release_mmap_offset(obj);
214 * ->put_pages might need to allocate memory for the bit17 swizzle
215 * array, hence protect them from being reaped by removing them from gtt
216 * lists early.
218 pages = __i915_gem_object_unset_pages(obj);
221 * XXX Temporary hijinx to avoid updating all backends to handle
222 * NULL pages. In the future, when we have more asynchronous
223 * get_pages backends we should be better able to handle the
224 * cancellation of the async task in a more uniform manner.
226 if (!pages && !i915_gem_object_needs_async_cancel(obj))
227 pages = ERR_PTR(-EINVAL);
229 if (!IS_ERR(pages))
230 obj->ops->put_pages(obj, pages);
232 err = 0;
233 unlock:
234 mutex_unlock(&obj->mm.lock);
236 return err;
239 /* The 'mapping' part of i915_gem_object_pin_map() below */
240 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
241 enum i915_map_type type)
243 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
244 struct page *stack[32], **pages = stack, *page;
245 struct sgt_iter iter;
246 pgprot_t pgprot;
247 void *vaddr;
249 switch (type) {
250 default:
251 MISSING_CASE(type);
252 fallthrough; /* to use PAGE_KERNEL anyway */
253 case I915_MAP_WB:
255 * On 32b, highmem using a finite set of indirect PTE (i.e.
256 * vmap) to provide virtual mappings of the high pages.
257 * As these are finite, map_new_virtual() must wait for some
258 * other kmap() to finish when it runs out. If we map a large
259 * number of objects, there is no method for it to tell us
260 * to release the mappings, and we deadlock.
262 * However, if we make an explicit vmap of the page, that
263 * uses a larger vmalloc arena, and also has the ability
264 * to tell us to release unwanted mappings. Most importantly,
265 * it will fail and propagate an error instead of waiting
266 * forever.
268 * So if the page is beyond the 32b boundary, make an explicit
269 * vmap.
271 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
272 return page_address(sg_page(obj->mm.pages->sgl));
273 pgprot = PAGE_KERNEL;
274 break;
275 case I915_MAP_WC:
276 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
277 break;
280 if (n_pages > ARRAY_SIZE(stack)) {
281 /* Too big for stack -- allocate temporary array instead */
282 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
283 if (!pages)
284 return NULL;
287 i = 0;
288 for_each_sgt_page(page, iter, obj->mm.pages)
289 pages[i++] = page;
290 vaddr = vmap(pages, n_pages, 0, pgprot);
291 if (pages != stack)
292 kvfree(pages);
293 return vaddr;
296 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
297 enum i915_map_type type)
299 resource_size_t iomap = obj->mm.region->iomap.base -
300 obj->mm.region->region.start;
301 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
302 unsigned long stack[32], *pfns = stack, i;
303 struct sgt_iter iter;
304 dma_addr_t addr;
305 void *vaddr;
307 if (type != I915_MAP_WC)
308 return NULL;
310 if (n_pfn > ARRAY_SIZE(stack)) {
311 /* Too big for stack -- allocate temporary array instead */
312 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
313 if (!pfns)
314 return NULL;
317 i = 0;
318 for_each_sgt_daddr(addr, iter, obj->mm.pages)
319 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
320 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
321 if (pfns != stack)
322 kvfree(pfns);
323 return vaddr;
326 /* get, pin, and map the pages of the object into kernel space */
327 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
328 enum i915_map_type type)
330 enum i915_map_type has_type;
331 unsigned int flags;
332 bool pinned;
333 void *ptr;
334 int err;
336 flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
337 if (!i915_gem_object_type_has(obj, flags))
338 return ERR_PTR(-ENXIO);
340 err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
341 if (err)
342 return ERR_PTR(err);
344 pinned = !(type & I915_MAP_OVERRIDE);
345 type &= ~I915_MAP_OVERRIDE;
347 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
348 if (unlikely(!i915_gem_object_has_pages(obj))) {
349 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
351 err = ____i915_gem_object_get_pages(obj);
352 if (err)
353 goto err_unlock;
355 smp_mb__before_atomic();
357 atomic_inc(&obj->mm.pages_pin_count);
358 pinned = false;
360 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
362 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
363 if (ptr && has_type != type) {
364 if (pinned) {
365 err = -EBUSY;
366 goto err_unpin;
369 unmap_object(obj, ptr);
371 ptr = obj->mm.mapping = NULL;
374 if (!ptr) {
375 if (GEM_WARN_ON(type == I915_MAP_WC &&
376 !static_cpu_has(X86_FEATURE_PAT)))
377 ptr = NULL;
378 else if (i915_gem_object_has_struct_page(obj))
379 ptr = i915_gem_object_map_page(obj, type);
380 else
381 ptr = i915_gem_object_map_pfn(obj, type);
382 if (!ptr) {
383 err = -ENOMEM;
384 goto err_unpin;
387 obj->mm.mapping = page_pack_bits(ptr, type);
390 out_unlock:
391 mutex_unlock(&obj->mm.lock);
392 return ptr;
394 err_unpin:
395 atomic_dec(&obj->mm.pages_pin_count);
396 err_unlock:
397 ptr = ERR_PTR(err);
398 goto out_unlock;
401 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
402 unsigned long offset,
403 unsigned long size)
405 enum i915_map_type has_type;
406 void *ptr;
408 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
409 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
410 offset, size, obj->base.size));
412 wmb(); /* let all previous writes be visible to coherent partners */
413 obj->mm.dirty = true;
415 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
416 return;
418 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
419 if (has_type == I915_MAP_WC)
420 return;
422 drm_clflush_virt_range(ptr + offset, size);
423 if (size == obj->base.size) {
424 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
425 obj->cache_dirty = false;
429 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
431 GEM_BUG_ON(!obj->mm.mapping);
434 * We allow removing the mapping from underneath pinned pages!
436 * Furthermore, since this is an unsafe operation reserved only
437 * for construction time manipulation, we ignore locking prudence.
439 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
441 i915_gem_object_unpin_map(obj);
444 struct scatterlist *
445 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
446 struct i915_gem_object_page_iter *iter,
447 unsigned int n,
448 unsigned int *offset)
450 const bool dma = iter == &obj->mm.get_dma_page;
451 struct scatterlist *sg;
452 unsigned int idx, count;
454 might_sleep();
455 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
456 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
458 /* As we iterate forward through the sg, we record each entry in a
459 * radixtree for quick repeated (backwards) lookups. If we have seen
460 * this index previously, we will have an entry for it.
462 * Initial lookup is O(N), but this is amortized to O(1) for
463 * sequential page access (where each new request is consecutive
464 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
465 * i.e. O(1) with a large constant!
467 if (n < READ_ONCE(iter->sg_idx))
468 goto lookup;
470 mutex_lock(&iter->lock);
472 /* We prefer to reuse the last sg so that repeated lookup of this
473 * (or the subsequent) sg are fast - comparing against the last
474 * sg is faster than going through the radixtree.
477 sg = iter->sg_pos;
478 idx = iter->sg_idx;
479 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
481 while (idx + count <= n) {
482 void *entry;
483 unsigned long i;
484 int ret;
486 /* If we cannot allocate and insert this entry, or the
487 * individual pages from this range, cancel updating the
488 * sg_idx so that on this lookup we are forced to linearly
489 * scan onwards, but on future lookups we will try the
490 * insertion again (in which case we need to be careful of
491 * the error return reporting that we have already inserted
492 * this index).
494 ret = radix_tree_insert(&iter->radix, idx, sg);
495 if (ret && ret != -EEXIST)
496 goto scan;
498 entry = xa_mk_value(idx);
499 for (i = 1; i < count; i++) {
500 ret = radix_tree_insert(&iter->radix, idx + i, entry);
501 if (ret && ret != -EEXIST)
502 goto scan;
505 idx += count;
506 sg = ____sg_next(sg);
507 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
510 scan:
511 iter->sg_pos = sg;
512 iter->sg_idx = idx;
514 mutex_unlock(&iter->lock);
516 if (unlikely(n < idx)) /* insertion completed by another thread */
517 goto lookup;
519 /* In case we failed to insert the entry into the radixtree, we need
520 * to look beyond the current sg.
522 while (idx + count <= n) {
523 idx += count;
524 sg = ____sg_next(sg);
525 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
528 *offset = n - idx;
529 return sg;
531 lookup:
532 rcu_read_lock();
534 sg = radix_tree_lookup(&iter->radix, n);
535 GEM_BUG_ON(!sg);
537 /* If this index is in the middle of multi-page sg entry,
538 * the radix tree will contain a value entry that points
539 * to the start of that range. We will return the pointer to
540 * the base page and the offset of this page within the
541 * sg entry's range.
543 *offset = 0;
544 if (unlikely(xa_is_value(sg))) {
545 unsigned long base = xa_to_value(sg);
547 sg = radix_tree_lookup(&iter->radix, base);
548 GEM_BUG_ON(!sg);
550 *offset = n - base;
553 rcu_read_unlock();
555 return sg;
558 struct page *
559 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
561 struct scatterlist *sg;
562 unsigned int offset;
564 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
566 sg = i915_gem_object_get_sg(obj, n, &offset);
567 return nth_page(sg_page(sg), offset);
570 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
571 struct page *
572 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
573 unsigned int n)
575 struct page *page;
577 page = i915_gem_object_get_page(obj, n);
578 if (!obj->mm.dirty)
579 set_page_dirty(page);
581 return page;
584 dma_addr_t
585 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
586 unsigned long n,
587 unsigned int *len)
589 struct scatterlist *sg;
590 unsigned int offset;
592 sg = i915_gem_object_get_sg_dma(obj, n, &offset);
594 if (len)
595 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
597 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
600 dma_addr_t
601 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
602 unsigned long n)
604 return i915_gem_object_get_dma_address_len(obj, n, NULL);