WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gt / gen8_ppgtt.c
bloba37c968ef8f7cc30127a214041bc50666dda52f0
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
6 #include <linux/log2.h>
8 #include "gen8_ppgtt.h"
9 #include "i915_scatterlist.h"
10 #include "i915_trace.h"
11 #include "i915_pvinfo.h"
12 #include "i915_vgpu.h"
13 #include "intel_gt.h"
14 #include "intel_gtt.h"
16 static u64 gen8_pde_encode(const dma_addr_t addr,
17 const enum i915_cache_level level)
19 u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
21 if (level != I915_CACHE_NONE)
22 pde |= PPAT_CACHED_PDE;
23 else
24 pde |= PPAT_UNCACHED;
26 return pde;
29 static u64 gen8_pte_encode(dma_addr_t addr,
30 enum i915_cache_level level,
31 u32 flags)
33 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
35 if (unlikely(flags & PTE_READ_ONLY))
36 pte &= ~_PAGE_RW;
38 switch (level) {
39 case I915_CACHE_NONE:
40 pte |= PPAT_UNCACHED;
41 break;
42 case I915_CACHE_WT:
43 pte |= PPAT_DISPLAY_ELLC;
44 break;
45 default:
46 pte |= PPAT_CACHED;
47 break;
50 return pte;
53 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
55 struct drm_i915_private *i915 = ppgtt->vm.i915;
56 struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
57 enum vgt_g2v_type msg;
58 int i;
60 if (create)
61 atomic_inc(px_used(ppgtt->pd)); /* never remove */
62 else
63 atomic_dec(px_used(ppgtt->pd));
65 mutex_lock(&i915->vgpu.lock);
67 if (i915_vm_is_4lvl(&ppgtt->vm)) {
68 const u64 daddr = px_dma(ppgtt->pd);
70 intel_uncore_write(uncore,
71 vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
72 intel_uncore_write(uncore,
73 vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
75 msg = create ?
76 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
77 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
78 } else {
79 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
80 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
82 intel_uncore_write(uncore,
83 vgtif_reg(pdp[i].lo),
84 lower_32_bits(daddr));
85 intel_uncore_write(uncore,
86 vgtif_reg(pdp[i].hi),
87 upper_32_bits(daddr));
90 msg = create ?
91 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
92 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
95 /* g2v_notify atomically (via hv trap) consumes the message packet. */
96 intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
98 mutex_unlock(&i915->vgpu.lock);
101 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
102 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
103 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
104 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
105 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
106 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
107 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
108 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
110 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
112 static inline unsigned int
113 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
115 const int shift = gen8_pd_shift(lvl);
116 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
118 GEM_BUG_ON(start >= end);
119 end += ~mask >> gen8_pd_shift(1);
121 *idx = i915_pde_index(start, shift);
122 if ((start ^ end) & mask)
123 return GEN8_PDES - *idx;
124 else
125 return i915_pde_index(end, shift) - *idx;
128 static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
130 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
132 GEM_BUG_ON(start >= end);
133 return (start ^ end) & mask && (start & ~mask) == 0;
136 static inline unsigned int gen8_pt_count(u64 start, u64 end)
138 GEM_BUG_ON(start >= end);
139 if ((start ^ end) >> gen8_pd_shift(1))
140 return GEN8_PDES - (start & (GEN8_PDES - 1));
141 else
142 return end - start;
145 static inline unsigned int
146 gen8_pd_top_count(const struct i915_address_space *vm)
148 unsigned int shift = __gen8_pte_shift(vm->top);
149 return (vm->total + (1ull << shift) - 1) >> shift;
152 static inline struct i915_page_directory *
153 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
155 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
157 if (vm->top == 2)
158 return ppgtt->pd;
159 else
160 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
163 static inline struct i915_page_directory *
164 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
166 return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
169 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
170 struct i915_page_directory *pd,
171 int count, int lvl)
173 if (lvl) {
174 void **pde = pd->entry;
176 do {
177 if (!*pde)
178 continue;
180 __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
181 } while (pde++, --count);
184 free_px(vm, &pd->pt, lvl);
187 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
189 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
191 if (intel_vgpu_active(vm->i915))
192 gen8_ppgtt_notify_vgt(ppgtt, false);
194 __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
195 free_scratch(vm);
198 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
199 struct i915_page_directory * const pd,
200 u64 start, const u64 end, int lvl)
202 const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
203 unsigned int idx, len;
205 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
207 len = gen8_pd_range(start, end, lvl--, &idx);
208 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
209 __func__, vm, lvl + 1, start, end,
210 idx, len, atomic_read(px_used(pd)));
211 GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
213 do {
214 struct i915_page_table *pt = pd->entry[idx];
216 if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
217 gen8_pd_contains(start, end, lvl)) {
218 DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
219 __func__, vm, lvl + 1, idx, start, end);
220 clear_pd_entry(pd, idx, scratch);
221 __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
222 start += (u64)I915_PDES << gen8_pd_shift(lvl);
223 continue;
226 if (lvl) {
227 start = __gen8_ppgtt_clear(vm, as_pd(pt),
228 start, end, lvl);
229 } else {
230 unsigned int count;
231 u64 *vaddr;
233 count = gen8_pt_count(start, end);
234 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
235 __func__, vm, lvl, start, end,
236 gen8_pd_index(start, 0), count,
237 atomic_read(&pt->used));
238 GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
240 vaddr = kmap_atomic_px(pt);
241 memset64(vaddr + gen8_pd_index(start, 0),
242 vm->scratch[0]->encode,
243 count);
244 kunmap_atomic(vaddr);
246 atomic_sub(count, &pt->used);
247 start += count;
250 if (release_pd_entry(pd, idx, pt, scratch))
251 free_px(vm, pt, lvl);
252 } while (idx++, --len);
254 return start;
257 static void gen8_ppgtt_clear(struct i915_address_space *vm,
258 u64 start, u64 length)
260 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
261 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
262 GEM_BUG_ON(range_overflows(start, length, vm->total));
264 start >>= GEN8_PTE_SHIFT;
265 length >>= GEN8_PTE_SHIFT;
266 GEM_BUG_ON(length == 0);
268 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
269 start, start + length, vm->top);
272 static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
273 struct i915_vm_pt_stash *stash,
274 struct i915_page_directory * const pd,
275 u64 * const start, const u64 end, int lvl)
277 unsigned int idx, len;
279 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
281 len = gen8_pd_range(*start, end, lvl--, &idx);
282 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
283 __func__, vm, lvl + 1, *start, end,
284 idx, len, atomic_read(px_used(pd)));
285 GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
287 spin_lock(&pd->lock);
288 GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
289 do {
290 struct i915_page_table *pt = pd->entry[idx];
292 if (!pt) {
293 spin_unlock(&pd->lock);
295 DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
296 __func__, vm, lvl + 1, idx);
298 pt = stash->pt[!!lvl];
299 __i915_gem_object_pin_pages(pt->base);
300 i915_gem_object_make_unshrinkable(pt->base);
302 if (lvl ||
303 gen8_pt_count(*start, end) < I915_PDES ||
304 intel_vgpu_active(vm->i915))
305 fill_px(pt, vm->scratch[lvl]->encode);
307 spin_lock(&pd->lock);
308 if (likely(!pd->entry[idx])) {
309 stash->pt[!!lvl] = pt->stash;
310 atomic_set(&pt->used, 0);
311 set_pd_entry(pd, idx, pt);
312 } else {
313 pt = pd->entry[idx];
317 if (lvl) {
318 atomic_inc(&pt->used);
319 spin_unlock(&pd->lock);
321 __gen8_ppgtt_alloc(vm, stash,
322 as_pd(pt), start, end, lvl);
324 spin_lock(&pd->lock);
325 atomic_dec(&pt->used);
326 GEM_BUG_ON(!atomic_read(&pt->used));
327 } else {
328 unsigned int count = gen8_pt_count(*start, end);
330 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
331 __func__, vm, lvl, *start, end,
332 gen8_pd_index(*start, 0), count,
333 atomic_read(&pt->used));
335 atomic_add(count, &pt->used);
336 /* All other pdes may be simultaneously removed */
337 GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
338 *start += count;
340 } while (idx++, --len);
341 spin_unlock(&pd->lock);
344 static void gen8_ppgtt_alloc(struct i915_address_space *vm,
345 struct i915_vm_pt_stash *stash,
346 u64 start, u64 length)
348 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
349 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
350 GEM_BUG_ON(range_overflows(start, length, vm->total));
352 start >>= GEN8_PTE_SHIFT;
353 length >>= GEN8_PTE_SHIFT;
354 GEM_BUG_ON(length == 0);
356 __gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
357 &start, start + length, vm->top);
360 static __always_inline u64
361 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
362 struct i915_page_directory *pdp,
363 struct sgt_dma *iter,
364 u64 idx,
365 enum i915_cache_level cache_level,
366 u32 flags)
368 struct i915_page_directory *pd;
369 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
370 gen8_pte_t *vaddr;
372 pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
373 vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
374 do {
375 GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
376 vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
378 iter->dma += I915_GTT_PAGE_SIZE;
379 if (iter->dma >= iter->max) {
380 iter->sg = __sg_next(iter->sg);
381 if (!iter->sg || sg_dma_len(iter->sg) == 0) {
382 idx = 0;
383 break;
386 iter->dma = sg_dma_address(iter->sg);
387 iter->max = iter->dma + sg_dma_len(iter->sg);
390 if (gen8_pd_index(++idx, 0) == 0) {
391 if (gen8_pd_index(idx, 1) == 0) {
392 /* Limited by sg length for 3lvl */
393 if (gen8_pd_index(idx, 2) == 0)
394 break;
396 pd = pdp->entry[gen8_pd_index(idx, 2)];
399 clflush_cache_range(vaddr, PAGE_SIZE);
400 kunmap_atomic(vaddr);
401 vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
403 } while (1);
404 clflush_cache_range(vaddr, PAGE_SIZE);
405 kunmap_atomic(vaddr);
407 return idx;
410 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
411 struct sgt_dma *iter,
412 enum i915_cache_level cache_level,
413 u32 flags)
415 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
416 unsigned int rem = sg_dma_len(iter->sg);
417 u64 start = vma->node.start;
419 GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
421 do {
422 struct i915_page_directory * const pdp =
423 gen8_pdp_for_page_address(vma->vm, start);
424 struct i915_page_directory * const pd =
425 i915_pd_entry(pdp, __gen8_pte_index(start, 2));
426 gen8_pte_t encode = pte_encode;
427 unsigned int maybe_64K = -1;
428 unsigned int page_size;
429 gen8_pte_t *vaddr;
430 u16 index;
432 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
433 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
434 rem >= I915_GTT_PAGE_SIZE_2M &&
435 !__gen8_pte_index(start, 0)) {
436 index = __gen8_pte_index(start, 1);
437 encode |= GEN8_PDE_PS_2M;
438 page_size = I915_GTT_PAGE_SIZE_2M;
440 vaddr = kmap_atomic_px(pd);
441 } else {
442 struct i915_page_table *pt =
443 i915_pt_entry(pd, __gen8_pte_index(start, 1));
445 index = __gen8_pte_index(start, 0);
446 page_size = I915_GTT_PAGE_SIZE;
448 if (!index &&
449 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
450 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
451 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
452 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
453 maybe_64K = __gen8_pte_index(start, 1);
455 vaddr = kmap_atomic_px(pt);
458 do {
459 GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
460 vaddr[index++] = encode | iter->dma;
462 start += page_size;
463 iter->dma += page_size;
464 rem -= page_size;
465 if (iter->dma >= iter->max) {
466 iter->sg = __sg_next(iter->sg);
467 if (!iter->sg)
468 break;
470 rem = sg_dma_len(iter->sg);
471 if (!rem)
472 break;
474 iter->dma = sg_dma_address(iter->sg);
475 iter->max = iter->dma + rem;
477 if (maybe_64K != -1 && index < I915_PDES &&
478 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
479 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
480 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
481 maybe_64K = -1;
483 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
484 break;
486 } while (rem >= page_size && index < I915_PDES);
488 clflush_cache_range(vaddr, PAGE_SIZE);
489 kunmap_atomic(vaddr);
492 * Is it safe to mark the 2M block as 64K? -- Either we have
493 * filled whole page-table with 64K entries, or filled part of
494 * it and have reached the end of the sg table and we have
495 * enough padding.
497 if (maybe_64K != -1 &&
498 (index == I915_PDES ||
499 (i915_vm_has_scratch_64K(vma->vm) &&
500 !iter->sg && IS_ALIGNED(vma->node.start +
501 vma->node.size,
502 I915_GTT_PAGE_SIZE_2M)))) {
503 vaddr = kmap_atomic_px(pd);
504 vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
505 kunmap_atomic(vaddr);
506 page_size = I915_GTT_PAGE_SIZE_64K;
509 * We write all 4K page entries, even when using 64K
510 * pages. In order to verify that the HW isn't cheating
511 * by using the 4K PTE instead of the 64K PTE, we want
512 * to remove all the surplus entries. If the HW skipped
513 * the 64K PTE, it will read/write into the scratch page
514 * instead - which we detect as missing results during
515 * selftests.
517 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
518 u16 i;
520 encode = vma->vm->scratch[0]->encode;
521 vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
523 for (i = 1; i < index; i += 16)
524 memset64(vaddr + i, encode, 15);
526 kunmap_atomic(vaddr);
530 vma->page_sizes.gtt |= page_size;
531 } while (iter->sg && sg_dma_len(iter->sg));
534 static void gen8_ppgtt_insert(struct i915_address_space *vm,
535 struct i915_vma *vma,
536 enum i915_cache_level cache_level,
537 u32 flags)
539 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
540 struct sgt_dma iter = sgt_dma(vma);
542 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
543 gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
544 } else {
545 u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
547 do {
548 struct i915_page_directory * const pdp =
549 gen8_pdp_for_page_index(vm, idx);
551 idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
552 cache_level, flags);
553 } while (idx);
555 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
559 static int gen8_init_scratch(struct i915_address_space *vm)
561 int ret;
562 int i;
565 * If everybody agrees to not to write into the scratch page,
566 * we can reuse it for all vm, keeping contexts and processes separate.
568 if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
569 struct i915_address_space *clone = vm->gt->vm;
571 GEM_BUG_ON(!clone->has_read_only);
573 vm->scratch_order = clone->scratch_order;
574 for (i = 0; i <= vm->top; i++)
575 vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
577 return 0;
580 ret = setup_scratch_page(vm);
581 if (ret)
582 return ret;
584 vm->scratch[0]->encode =
585 gen8_pte_encode(px_dma(vm->scratch[0]),
586 I915_CACHE_LLC, vm->has_read_only);
588 for (i = 1; i <= vm->top; i++) {
589 struct drm_i915_gem_object *obj;
591 obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
592 if (IS_ERR(obj))
593 goto free_scratch;
595 ret = pin_pt_dma(vm, obj);
596 if (ret) {
597 i915_gem_object_put(obj);
598 goto free_scratch;
601 fill_px(obj, vm->scratch[i - 1]->encode);
602 obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC);
604 vm->scratch[i] = obj;
607 return 0;
609 free_scratch:
610 while (i--)
611 i915_gem_object_put(vm->scratch[i]);
612 return -ENOMEM;
615 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
617 struct i915_address_space *vm = &ppgtt->vm;
618 struct i915_page_directory *pd = ppgtt->pd;
619 unsigned int idx;
621 GEM_BUG_ON(vm->top != 2);
622 GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
624 for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
625 struct i915_page_directory *pde;
626 int err;
628 pde = alloc_pd(vm);
629 if (IS_ERR(pde))
630 return PTR_ERR(pde);
632 err = pin_pt_dma(vm, pde->pt.base);
633 if (err) {
634 i915_gem_object_put(pde->pt.base);
635 free_pd(vm, pde);
636 return err;
639 fill_px(pde, vm->scratch[1]->encode);
640 set_pd_entry(pd, idx, pde);
641 atomic_inc(px_used(pde)); /* keep pinned */
643 wmb();
645 return 0;
648 static struct i915_page_directory *
649 gen8_alloc_top_pd(struct i915_address_space *vm)
651 const unsigned int count = gen8_pd_top_count(vm);
652 struct i915_page_directory *pd;
653 int err;
655 GEM_BUG_ON(count > I915_PDES);
657 pd = __alloc_pd(count);
658 if (unlikely(!pd))
659 return ERR_PTR(-ENOMEM);
661 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
662 if (IS_ERR(pd->pt.base)) {
663 err = PTR_ERR(pd->pt.base);
664 pd->pt.base = NULL;
665 goto err_pd;
668 err = pin_pt_dma(vm, pd->pt.base);
669 if (err)
670 goto err_pd;
672 fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
673 atomic_inc(px_used(pd)); /* mark as pinned */
674 return pd;
676 err_pd:
677 free_pd(vm, pd);
678 return ERR_PTR(err);
682 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
683 * with a net effect resembling a 2-level page table in normal x86 terms. Each
684 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
685 * space.
688 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
690 struct i915_ppgtt *ppgtt;
691 int err;
693 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
694 if (!ppgtt)
695 return ERR_PTR(-ENOMEM);
697 ppgtt_init(ppgtt, gt);
698 ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
699 ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
702 * From bdw, there is hw support for read-only pages in the PPGTT.
704 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
705 * for now.
707 * Gen12 has inherited the same read-only fault issue from gen11.
709 ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
711 ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
713 err = gen8_init_scratch(&ppgtt->vm);
714 if (err)
715 goto err_free;
717 ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
718 if (IS_ERR(ppgtt->pd)) {
719 err = PTR_ERR(ppgtt->pd);
720 goto err_free_scratch;
723 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
724 err = gen8_preallocate_top_level_pdp(ppgtt);
725 if (err)
726 goto err_free_pd;
729 ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
730 ppgtt->vm.insert_entries = gen8_ppgtt_insert;
731 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
732 ppgtt->vm.clear_range = gen8_ppgtt_clear;
734 ppgtt->vm.pte_encode = gen8_pte_encode;
736 if (intel_vgpu_active(gt->i915))
737 gen8_ppgtt_notify_vgt(ppgtt, true);
739 ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
741 return ppgtt;
743 err_free_pd:
744 __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
745 gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
746 err_free_scratch:
747 free_scratch(&ppgtt->vm);
748 err_free:
749 kfree(ppgtt);
750 return ERR_PTR(err);