Merge branch 'akpm' (patches from Andrew)
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gt / gen8_ppgtt.c
blob4d1de2d97d5cf2b3268d46f5b7264761aec9874b
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
6 #include <linux/log2.h>
8 #include "gen8_ppgtt.h"
9 #include "i915_scatterlist.h"
10 #include "i915_trace.h"
11 #include "i915_vgpu.h"
12 #include "intel_gt.h"
13 #include "intel_gtt.h"
15 static u64 gen8_pde_encode(const dma_addr_t addr,
16 const enum i915_cache_level level)
18 u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
20 if (level != I915_CACHE_NONE)
21 pde |= PPAT_CACHED_PDE;
22 else
23 pde |= PPAT_UNCACHED;
25 return pde;
28 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
30 struct drm_i915_private *i915 = ppgtt->vm.i915;
31 struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
32 enum vgt_g2v_type msg;
33 int i;
35 if (create)
36 atomic_inc(px_used(ppgtt->pd)); /* never remove */
37 else
38 atomic_dec(px_used(ppgtt->pd));
40 mutex_lock(&i915->vgpu.lock);
42 if (i915_vm_is_4lvl(&ppgtt->vm)) {
43 const u64 daddr = px_dma(ppgtt->pd);
45 intel_uncore_write(uncore,
46 vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
47 intel_uncore_write(uncore,
48 vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
50 msg = create ?
51 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
52 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
53 } else {
54 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
55 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
57 intel_uncore_write(uncore,
58 vgtif_reg(pdp[i].lo),
59 lower_32_bits(daddr));
60 intel_uncore_write(uncore,
61 vgtif_reg(pdp[i].hi),
62 upper_32_bits(daddr));
65 msg = create ?
66 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
67 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
70 /* g2v_notify atomically (via hv trap) consumes the message packet. */
71 intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
73 mutex_unlock(&i915->vgpu.lock);
76 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
77 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
78 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
79 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
80 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
81 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
82 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
83 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
85 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
87 static inline unsigned int
88 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
90 const int shift = gen8_pd_shift(lvl);
91 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
93 GEM_BUG_ON(start >= end);
94 end += ~mask >> gen8_pd_shift(1);
96 *idx = i915_pde_index(start, shift);
97 if ((start ^ end) & mask)
98 return GEN8_PDES - *idx;
99 else
100 return i915_pde_index(end, shift) - *idx;
103 static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
105 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
107 GEM_BUG_ON(start >= end);
108 return (start ^ end) & mask && (start & ~mask) == 0;
111 static inline unsigned int gen8_pt_count(u64 start, u64 end)
113 GEM_BUG_ON(start >= end);
114 if ((start ^ end) >> gen8_pd_shift(1))
115 return GEN8_PDES - (start & (GEN8_PDES - 1));
116 else
117 return end - start;
120 static inline unsigned int
121 gen8_pd_top_count(const struct i915_address_space *vm)
123 unsigned int shift = __gen8_pte_shift(vm->top);
124 return (vm->total + (1ull << shift) - 1) >> shift;
127 static inline struct i915_page_directory *
128 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
130 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
132 if (vm->top == 2)
133 return ppgtt->pd;
134 else
135 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
138 static inline struct i915_page_directory *
139 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
141 return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
144 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
145 struct i915_page_directory *pd,
146 int count, int lvl)
148 if (lvl) {
149 void **pde = pd->entry;
151 do {
152 if (!*pde)
153 continue;
155 __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
156 } while (pde++, --count);
159 free_px(vm, pd);
162 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
164 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
166 if (intel_vgpu_active(vm->i915))
167 gen8_ppgtt_notify_vgt(ppgtt, false);
169 __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
170 free_scratch(vm);
173 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
174 struct i915_page_directory * const pd,
175 u64 start, const u64 end, int lvl)
177 const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
178 unsigned int idx, len;
180 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
182 len = gen8_pd_range(start, end, lvl--, &idx);
183 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
184 __func__, vm, lvl + 1, start, end,
185 idx, len, atomic_read(px_used(pd)));
186 GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
188 do {
189 struct i915_page_table *pt = pd->entry[idx];
191 if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
192 gen8_pd_contains(start, end, lvl)) {
193 DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
194 __func__, vm, lvl + 1, idx, start, end);
195 clear_pd_entry(pd, idx, scratch);
196 __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
197 start += (u64)I915_PDES << gen8_pd_shift(lvl);
198 continue;
201 if (lvl) {
202 start = __gen8_ppgtt_clear(vm, as_pd(pt),
203 start, end, lvl);
204 } else {
205 unsigned int count;
206 u64 *vaddr;
208 count = gen8_pt_count(start, end);
209 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
210 __func__, vm, lvl, start, end,
211 gen8_pd_index(start, 0), count,
212 atomic_read(&pt->used));
213 GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
215 vaddr = kmap_atomic_px(pt);
216 memset64(vaddr + gen8_pd_index(start, 0),
217 vm->scratch[0].encode,
218 count);
219 kunmap_atomic(vaddr);
221 atomic_sub(count, &pt->used);
222 start += count;
225 if (release_pd_entry(pd, idx, pt, scratch))
226 free_px(vm, pt);
227 } while (idx++, --len);
229 return start;
232 static void gen8_ppgtt_clear(struct i915_address_space *vm,
233 u64 start, u64 length)
235 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
236 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
237 GEM_BUG_ON(range_overflows(start, length, vm->total));
239 start >>= GEN8_PTE_SHIFT;
240 length >>= GEN8_PTE_SHIFT;
241 GEM_BUG_ON(length == 0);
243 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
244 start, start + length, vm->top);
247 static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
248 struct i915_page_directory * const pd,
249 u64 * const start, const u64 end, int lvl)
251 const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
252 struct i915_page_table *alloc = NULL;
253 unsigned int idx, len;
254 int ret = 0;
256 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
258 len = gen8_pd_range(*start, end, lvl--, &idx);
259 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
260 __func__, vm, lvl + 1, *start, end,
261 idx, len, atomic_read(px_used(pd)));
262 GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
264 spin_lock(&pd->lock);
265 GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
266 do {
267 struct i915_page_table *pt = pd->entry[idx];
269 if (!pt) {
270 spin_unlock(&pd->lock);
272 DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
273 __func__, vm, lvl + 1, idx);
275 pt = fetch_and_zero(&alloc);
276 if (lvl) {
277 if (!pt) {
278 pt = &alloc_pd(vm)->pt;
279 if (IS_ERR(pt)) {
280 ret = PTR_ERR(pt);
281 goto out;
285 fill_px(pt, vm->scratch[lvl].encode);
286 } else {
287 if (!pt) {
288 pt = alloc_pt(vm);
289 if (IS_ERR(pt)) {
290 ret = PTR_ERR(pt);
291 goto out;
295 if (intel_vgpu_active(vm->i915) ||
296 gen8_pt_count(*start, end) < I915_PDES)
297 fill_px(pt, vm->scratch[lvl].encode);
300 spin_lock(&pd->lock);
301 if (likely(!pd->entry[idx]))
302 set_pd_entry(pd, idx, pt);
303 else
304 alloc = pt, pt = pd->entry[idx];
307 if (lvl) {
308 atomic_inc(&pt->used);
309 spin_unlock(&pd->lock);
311 ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
312 start, end, lvl);
313 if (unlikely(ret)) {
314 if (release_pd_entry(pd, idx, pt, scratch))
315 free_px(vm, pt);
316 goto out;
319 spin_lock(&pd->lock);
320 atomic_dec(&pt->used);
321 GEM_BUG_ON(!atomic_read(&pt->used));
322 } else {
323 unsigned int count = gen8_pt_count(*start, end);
325 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
326 __func__, vm, lvl, *start, end,
327 gen8_pd_index(*start, 0), count,
328 atomic_read(&pt->used));
330 atomic_add(count, &pt->used);
331 /* All other pdes may be simultaneously removed */
332 GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
333 *start += count;
335 } while (idx++, --len);
336 spin_unlock(&pd->lock);
337 out:
338 if (alloc)
339 free_px(vm, alloc);
340 return ret;
343 static int gen8_ppgtt_alloc(struct i915_address_space *vm,
344 u64 start, u64 length)
346 u64 from;
347 int err;
349 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
350 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
351 GEM_BUG_ON(range_overflows(start, length, vm->total));
353 start >>= GEN8_PTE_SHIFT;
354 length >>= GEN8_PTE_SHIFT;
355 GEM_BUG_ON(length == 0);
356 from = start;
358 err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
359 &start, start + length, vm->top);
360 if (unlikely(err && from != start))
361 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
362 from, start, vm->top);
364 return err;
367 static __always_inline u64
368 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
369 struct i915_page_directory *pdp,
370 struct sgt_dma *iter,
371 u64 idx,
372 enum i915_cache_level cache_level,
373 u32 flags)
375 struct i915_page_directory *pd;
376 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
377 gen8_pte_t *vaddr;
379 pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
380 vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
381 do {
382 GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
383 vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
385 iter->dma += I915_GTT_PAGE_SIZE;
386 if (iter->dma >= iter->max) {
387 iter->sg = __sg_next(iter->sg);
388 if (!iter->sg) {
389 idx = 0;
390 break;
393 iter->dma = sg_dma_address(iter->sg);
394 iter->max = iter->dma + iter->sg->length;
397 if (gen8_pd_index(++idx, 0) == 0) {
398 if (gen8_pd_index(idx, 1) == 0) {
399 /* Limited by sg length for 3lvl */
400 if (gen8_pd_index(idx, 2) == 0)
401 break;
403 pd = pdp->entry[gen8_pd_index(idx, 2)];
406 kunmap_atomic(vaddr);
407 vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
409 } while (1);
410 kunmap_atomic(vaddr);
412 return idx;
415 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
416 struct sgt_dma *iter,
417 enum i915_cache_level cache_level,
418 u32 flags)
420 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
421 u64 start = vma->node.start;
422 dma_addr_t rem = iter->sg->length;
424 GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
426 do {
427 struct i915_page_directory * const pdp =
428 gen8_pdp_for_page_address(vma->vm, start);
429 struct i915_page_directory * const pd =
430 i915_pd_entry(pdp, __gen8_pte_index(start, 2));
431 gen8_pte_t encode = pte_encode;
432 unsigned int maybe_64K = -1;
433 unsigned int page_size;
434 gen8_pte_t *vaddr;
435 u16 index;
437 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
438 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
439 rem >= I915_GTT_PAGE_SIZE_2M &&
440 !__gen8_pte_index(start, 0)) {
441 index = __gen8_pte_index(start, 1);
442 encode |= GEN8_PDE_PS_2M;
443 page_size = I915_GTT_PAGE_SIZE_2M;
445 vaddr = kmap_atomic_px(pd);
446 } else {
447 struct i915_page_table *pt =
448 i915_pt_entry(pd, __gen8_pte_index(start, 1));
450 index = __gen8_pte_index(start, 0);
451 page_size = I915_GTT_PAGE_SIZE;
453 if (!index &&
454 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
455 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
456 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
457 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
458 maybe_64K = __gen8_pte_index(start, 1);
460 vaddr = kmap_atomic_px(pt);
463 do {
464 GEM_BUG_ON(iter->sg->length < page_size);
465 vaddr[index++] = encode | iter->dma;
467 start += page_size;
468 iter->dma += page_size;
469 rem -= page_size;
470 if (iter->dma >= iter->max) {
471 iter->sg = __sg_next(iter->sg);
472 if (!iter->sg)
473 break;
475 rem = iter->sg->length;
476 iter->dma = sg_dma_address(iter->sg);
477 iter->max = iter->dma + rem;
479 if (maybe_64K != -1 && index < I915_PDES &&
480 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
481 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
482 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
483 maybe_64K = -1;
485 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
486 break;
488 } while (rem >= page_size && index < I915_PDES);
490 kunmap_atomic(vaddr);
493 * Is it safe to mark the 2M block as 64K? -- Either we have
494 * filled whole page-table with 64K entries, or filled part of
495 * it and have reached the end of the sg table and we have
496 * enough padding.
498 if (maybe_64K != -1 &&
499 (index == I915_PDES ||
500 (i915_vm_has_scratch_64K(vma->vm) &&
501 !iter->sg && IS_ALIGNED(vma->node.start +
502 vma->node.size,
503 I915_GTT_PAGE_SIZE_2M)))) {
504 vaddr = kmap_atomic_px(pd);
505 vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
506 kunmap_atomic(vaddr);
507 page_size = I915_GTT_PAGE_SIZE_64K;
510 * We write all 4K page entries, even when using 64K
511 * pages. In order to verify that the HW isn't cheating
512 * by using the 4K PTE instead of the 64K PTE, we want
513 * to remove all the surplus entries. If the HW skipped
514 * the 64K PTE, it will read/write into the scratch page
515 * instead - which we detect as missing results during
516 * selftests.
518 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
519 u16 i;
521 encode = vma->vm->scratch[0].encode;
522 vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
524 for (i = 1; i < index; i += 16)
525 memset64(vaddr + i, encode, 15);
527 kunmap_atomic(vaddr);
531 vma->page_sizes.gtt |= page_size;
532 } while (iter->sg);
535 static void gen8_ppgtt_insert(struct i915_address_space *vm,
536 struct i915_vma *vma,
537 enum i915_cache_level cache_level,
538 u32 flags)
540 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
541 struct sgt_dma iter = sgt_dma(vma);
543 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
544 gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
545 } else {
546 u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
548 do {
549 struct i915_page_directory * const pdp =
550 gen8_pdp_for_page_index(vm, idx);
552 idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
553 cache_level, flags);
554 } while (idx);
556 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
560 static int gen8_init_scratch(struct i915_address_space *vm)
562 int ret;
563 int i;
566 * If everybody agrees to not to write into the scratch page,
567 * we can reuse it for all vm, keeping contexts and processes separate.
569 if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
570 struct i915_address_space *clone = vm->gt->vm;
572 GEM_BUG_ON(!clone->has_read_only);
574 vm->scratch_order = clone->scratch_order;
575 memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
576 px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
577 return 0;
580 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
581 if (ret)
582 return ret;
584 vm->scratch[0].encode =
585 gen8_pte_encode(px_dma(&vm->scratch[0]),
586 I915_CACHE_LLC, vm->has_read_only);
588 for (i = 1; i <= vm->top; i++) {
589 if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
590 goto free_scratch;
592 fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
593 vm->scratch[i].encode =
594 gen8_pde_encode(px_dma(&vm->scratch[i]),
595 I915_CACHE_LLC);
598 return 0;
600 free_scratch:
601 free_scratch(vm);
602 return -ENOMEM;
605 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
607 struct i915_address_space *vm = &ppgtt->vm;
608 struct i915_page_directory *pd = ppgtt->pd;
609 unsigned int idx;
611 GEM_BUG_ON(vm->top != 2);
612 GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
614 for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
615 struct i915_page_directory *pde;
617 pde = alloc_pd(vm);
618 if (IS_ERR(pde))
619 return PTR_ERR(pde);
621 fill_px(pde, vm->scratch[1].encode);
622 set_pd_entry(pd, idx, pde);
623 atomic_inc(px_used(pde)); /* keep pinned */
625 wmb();
627 return 0;
630 static struct i915_page_directory *
631 gen8_alloc_top_pd(struct i915_address_space *vm)
633 const unsigned int count = gen8_pd_top_count(vm);
634 struct i915_page_directory *pd;
636 GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
638 pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
639 if (unlikely(!pd))
640 return ERR_PTR(-ENOMEM);
642 if (unlikely(setup_page_dma(vm, px_base(pd)))) {
643 kfree(pd);
644 return ERR_PTR(-ENOMEM);
647 fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
648 atomic_inc(px_used(pd)); /* mark as pinned */
649 return pd;
653 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
654 * with a net effect resembling a 2-level page table in normal x86 terms. Each
655 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
656 * space.
659 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
661 struct i915_ppgtt *ppgtt;
662 int err;
664 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
665 if (!ppgtt)
666 return ERR_PTR(-ENOMEM);
668 ppgtt_init(ppgtt, gt);
669 ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
672 * From bdw, there is hw support for read-only pages in the PPGTT.
674 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
675 * for now.
677 * Gen12 has inherited the same read-only fault issue from gen11.
679 ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
682 * There are only few exceptions for gen >=6. chv and bxt.
683 * And we are not sure about the latter so play safe for now.
685 if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
686 ppgtt->vm.pt_kmap_wc = true;
688 err = gen8_init_scratch(&ppgtt->vm);
689 if (err)
690 goto err_free;
692 ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
693 if (IS_ERR(ppgtt->pd)) {
694 err = PTR_ERR(ppgtt->pd);
695 goto err_free_scratch;
698 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
699 err = gen8_preallocate_top_level_pdp(ppgtt);
700 if (err)
701 goto err_free_pd;
704 ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
705 ppgtt->vm.insert_entries = gen8_ppgtt_insert;
706 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
707 ppgtt->vm.clear_range = gen8_ppgtt_clear;
709 if (intel_vgpu_active(gt->i915))
710 gen8_ppgtt_notify_vgt(ppgtt, true);
712 ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
714 return ppgtt;
716 err_free_pd:
717 __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
718 gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
719 err_free_scratch:
720 free_scratch(&ppgtt->vm);
721 err_free:
722 kfree(ppgtt);
723 return ERR_PTR(err);