treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gt / intel_ppgtt.c
blobf86f7e68ce5e002f607f45d6a413158a7dae58cf
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
6 #include <linux/slab.h>
8 #include "i915_trace.h"
9 #include "intel_gtt.h"
10 #include "gen6_ppgtt.h"
11 #include "gen8_ppgtt.h"
13 struct i915_page_table *alloc_pt(struct i915_address_space *vm)
15 struct i915_page_table *pt;
17 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
18 if (unlikely(!pt))
19 return ERR_PTR(-ENOMEM);
21 if (unlikely(setup_page_dma(vm, &pt->base))) {
22 kfree(pt);
23 return ERR_PTR(-ENOMEM);
26 atomic_set(&pt->used, 0);
27 return pt;
30 struct i915_page_directory *__alloc_pd(size_t sz)
32 struct i915_page_directory *pd;
34 pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
35 if (unlikely(!pd))
36 return NULL;
38 spin_lock_init(&pd->lock);
39 return pd;
42 struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
44 struct i915_page_directory *pd;
46 pd = __alloc_pd(sizeof(*pd));
47 if (unlikely(!pd))
48 return ERR_PTR(-ENOMEM);
50 if (unlikely(setup_page_dma(vm, px_base(pd)))) {
51 kfree(pd);
52 return ERR_PTR(-ENOMEM);
55 return pd;
58 void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
60 cleanup_page_dma(vm, pd);
61 kfree(pd);
64 static inline void
65 write_dma_entry(struct i915_page_dma * const pdma,
66 const unsigned short idx,
67 const u64 encoded_entry)
69 u64 * const vaddr = kmap_atomic(pdma->page);
71 vaddr[idx] = encoded_entry;
72 kunmap_atomic(vaddr);
75 void
76 __set_pd_entry(struct i915_page_directory * const pd,
77 const unsigned short idx,
78 struct i915_page_dma * const to,
79 u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
81 /* Each thread pre-pins the pd, and we may have a thread per pde. */
82 GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
84 atomic_inc(px_used(pd));
85 pd->entry[idx] = to;
86 write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
89 void
90 clear_pd_entry(struct i915_page_directory * const pd,
91 const unsigned short idx,
92 const struct i915_page_scratch * const scratch)
94 GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
96 write_dma_entry(px_base(pd), idx, scratch->encode);
97 pd->entry[idx] = NULL;
98 atomic_dec(px_used(pd));
101 bool
102 release_pd_entry(struct i915_page_directory * const pd,
103 const unsigned short idx,
104 struct i915_page_table * const pt,
105 const struct i915_page_scratch * const scratch)
107 bool free = false;
109 if (atomic_add_unless(&pt->used, -1, 1))
110 return false;
112 spin_lock(&pd->lock);
113 if (atomic_dec_and_test(&pt->used)) {
114 clear_pd_entry(pd, idx, scratch);
115 free = true;
117 spin_unlock(&pd->lock);
119 return free;
122 int i915_ppgtt_init_hw(struct intel_gt *gt)
124 struct drm_i915_private *i915 = gt->i915;
126 gtt_write_workarounds(gt);
128 if (IS_GEN(i915, 6))
129 gen6_ppgtt_enable(gt);
130 else if (IS_GEN(i915, 7))
131 gen7_ppgtt_enable(gt);
133 return 0;
136 static struct i915_ppgtt *
137 __ppgtt_create(struct intel_gt *gt)
139 if (INTEL_GEN(gt->i915) < 8)
140 return gen6_ppgtt_create(gt);
141 else
142 return gen8_ppgtt_create(gt);
145 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
147 struct i915_ppgtt *ppgtt;
149 ppgtt = __ppgtt_create(gt);
150 if (IS_ERR(ppgtt))
151 return ppgtt;
153 trace_i915_ppgtt_create(&ppgtt->vm);
155 return ppgtt;
158 static int ppgtt_bind_vma(struct i915_vma *vma,
159 enum i915_cache_level cache_level,
160 u32 flags)
162 u32 pte_flags;
163 int err;
165 if (flags & I915_VMA_ALLOC) {
166 err = vma->vm->allocate_va_range(vma->vm,
167 vma->node.start, vma->size);
168 if (err)
169 return err;
171 set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
174 /* Applicable to VLV, and gen8+ */
175 pte_flags = 0;
176 if (i915_gem_object_is_readonly(vma->obj))
177 pte_flags |= PTE_READ_ONLY;
179 GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
180 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
181 wmb();
183 return 0;
186 static void ppgtt_unbind_vma(struct i915_vma *vma)
188 if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
189 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
192 int ppgtt_set_pages(struct i915_vma *vma)
194 GEM_BUG_ON(vma->pages);
196 vma->pages = vma->obj->mm.pages;
198 vma->page_sizes = vma->obj->mm.page_sizes;
200 return 0;
203 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
205 struct drm_i915_private *i915 = gt->i915;
207 ppgtt->vm.gt = gt;
208 ppgtt->vm.i915 = i915;
209 ppgtt->vm.dma = &i915->drm.pdev->dev;
210 ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
212 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
214 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
215 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
216 ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
217 ppgtt->vm.vma_ops.clear_pages = clear_pages;