1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/slab.h>
8 #include "i915_trace.h"
10 #include "gen6_ppgtt.h"
11 #include "gen8_ppgtt.h"
13 struct i915_page_table
*alloc_pt(struct i915_address_space
*vm
)
15 struct i915_page_table
*pt
;
17 pt
= kmalloc(sizeof(*pt
), I915_GFP_ALLOW_FAIL
);
19 return ERR_PTR(-ENOMEM
);
21 if (unlikely(setup_page_dma(vm
, &pt
->base
))) {
23 return ERR_PTR(-ENOMEM
);
26 atomic_set(&pt
->used
, 0);
30 struct i915_page_directory
*__alloc_pd(size_t sz
)
32 struct i915_page_directory
*pd
;
34 pd
= kzalloc(sz
, I915_GFP_ALLOW_FAIL
);
38 spin_lock_init(&pd
->lock
);
42 struct i915_page_directory
*alloc_pd(struct i915_address_space
*vm
)
44 struct i915_page_directory
*pd
;
46 pd
= __alloc_pd(sizeof(*pd
));
48 return ERR_PTR(-ENOMEM
);
50 if (unlikely(setup_page_dma(vm
, px_base(pd
)))) {
52 return ERR_PTR(-ENOMEM
);
58 void free_pd(struct i915_address_space
*vm
, struct i915_page_dma
*pd
)
60 cleanup_page_dma(vm
, pd
);
65 write_dma_entry(struct i915_page_dma
* const pdma
,
66 const unsigned short idx
,
67 const u64 encoded_entry
)
69 u64
* const vaddr
= kmap_atomic(pdma
->page
);
71 vaddr
[idx
] = encoded_entry
;
76 __set_pd_entry(struct i915_page_directory
* const pd
,
77 const unsigned short idx
,
78 struct i915_page_dma
* const to
,
79 u64 (*encode
)(const dma_addr_t
, const enum i915_cache_level
))
81 /* Each thread pre-pins the pd, and we may have a thread per pde. */
82 GEM_BUG_ON(atomic_read(px_used(pd
)) > NALLOC
* ARRAY_SIZE(pd
->entry
));
84 atomic_inc(px_used(pd
));
86 write_dma_entry(px_base(pd
), idx
, encode(to
->daddr
, I915_CACHE_LLC
));
90 clear_pd_entry(struct i915_page_directory
* const pd
,
91 const unsigned short idx
,
92 const struct i915_page_scratch
* const scratch
)
94 GEM_BUG_ON(atomic_read(px_used(pd
)) == 0);
96 write_dma_entry(px_base(pd
), idx
, scratch
->encode
);
97 pd
->entry
[idx
] = NULL
;
98 atomic_dec(px_used(pd
));
102 release_pd_entry(struct i915_page_directory
* const pd
,
103 const unsigned short idx
,
104 struct i915_page_table
* const pt
,
105 const struct i915_page_scratch
* const scratch
)
109 if (atomic_add_unless(&pt
->used
, -1, 1))
112 spin_lock(&pd
->lock
);
113 if (atomic_dec_and_test(&pt
->used
)) {
114 clear_pd_entry(pd
, idx
, scratch
);
117 spin_unlock(&pd
->lock
);
122 int i915_ppgtt_init_hw(struct intel_gt
*gt
)
124 struct drm_i915_private
*i915
= gt
->i915
;
126 gtt_write_workarounds(gt
);
129 gen6_ppgtt_enable(gt
);
130 else if (IS_GEN(i915
, 7))
131 gen7_ppgtt_enable(gt
);
136 static struct i915_ppgtt
*
137 __ppgtt_create(struct intel_gt
*gt
)
139 if (INTEL_GEN(gt
->i915
) < 8)
140 return gen6_ppgtt_create(gt
);
142 return gen8_ppgtt_create(gt
);
145 struct i915_ppgtt
*i915_ppgtt_create(struct intel_gt
*gt
)
147 struct i915_ppgtt
*ppgtt
;
149 ppgtt
= __ppgtt_create(gt
);
153 trace_i915_ppgtt_create(&ppgtt
->vm
);
158 static int ppgtt_bind_vma(struct i915_vma
*vma
,
159 enum i915_cache_level cache_level
,
165 if (flags
& I915_VMA_ALLOC
) {
166 err
= vma
->vm
->allocate_va_range(vma
->vm
,
167 vma
->node
.start
, vma
->size
);
171 set_bit(I915_VMA_ALLOC_BIT
, __i915_vma_flags(vma
));
174 /* Applicable to VLV, and gen8+ */
176 if (i915_gem_object_is_readonly(vma
->obj
))
177 pte_flags
|= PTE_READ_ONLY
;
179 GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT
, __i915_vma_flags(vma
)));
180 vma
->vm
->insert_entries(vma
->vm
, vma
, cache_level
, pte_flags
);
186 static void ppgtt_unbind_vma(struct i915_vma
*vma
)
188 if (test_and_clear_bit(I915_VMA_ALLOC_BIT
, __i915_vma_flags(vma
)))
189 vma
->vm
->clear_range(vma
->vm
, vma
->node
.start
, vma
->size
);
192 int ppgtt_set_pages(struct i915_vma
*vma
)
194 GEM_BUG_ON(vma
->pages
);
196 vma
->pages
= vma
->obj
->mm
.pages
;
198 vma
->page_sizes
= vma
->obj
->mm
.page_sizes
;
203 void ppgtt_init(struct i915_ppgtt
*ppgtt
, struct intel_gt
*gt
)
205 struct drm_i915_private
*i915
= gt
->i915
;
208 ppgtt
->vm
.i915
= i915
;
209 ppgtt
->vm
.dma
= &i915
->drm
.pdev
->dev
;
210 ppgtt
->vm
.total
= BIT_ULL(INTEL_INFO(i915
)->ppgtt_size
);
212 i915_address_space_init(&ppgtt
->vm
, VM_CLASS_PPGTT
);
214 ppgtt
->vm
.vma_ops
.bind_vma
= ppgtt_bind_vma
;
215 ppgtt
->vm
.vma_ops
.unbind_vma
= ppgtt_unbind_vma
;
216 ppgtt
->vm
.vma_ops
.set_pages
= ppgtt_set_pages
;
217 ppgtt
->vm
.vma_ops
.clear_pages
= clear_pages
;