2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "nouveau_drv.h"
27 #include "nouveau_mm.h"
28 #include "nouveau_vm.h"
31 nouveau_vm_map_at(struct nouveau_vma
*vma
, u64 delta
, struct nouveau_mem
*node
)
33 struct nouveau_vm
*vm
= vma
->vm
;
34 struct nouveau_mm_node
*r
;
35 int big
= vma
->node
->type
!= vm
->spg_shift
;
36 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
37 u32 bits
= vma
->node
->type
- 12;
38 u32 pde
= (offset
>> vm
->pgt_bits
) - vm
->fpde
;
39 u32 pte
= (offset
& ((1 << vm
->pgt_bits
) - 1)) >> bits
;
40 u32 max
= 1 << (vm
->pgt_bits
- bits
);
44 list_for_each_entry(r
, &node
->regions
, rl_entry
) {
45 u64 phys
= (u64
)r
->offset
<< 12;
46 u32 num
= r
->length
>> bits
;
49 struct nouveau_gpuobj
*pgt
= vm
->pgt
[pde
].obj
[big
];
52 if (unlikely(end
>= max
))
56 vm
->map(vma
, pgt
, node
, pte
, len
, phys
, delta
);
60 if (unlikely(end
>= max
)) {
65 delta
+= (u64
)len
<< vma
->node
->type
;
73 nouveau_vm_map(struct nouveau_vma
*vma
, struct nouveau_mem
*node
)
75 nouveau_vm_map_at(vma
, 0, node
);
79 nouveau_vm_map_sg(struct nouveau_vma
*vma
, u64 delta
, u64 length
,
80 struct nouveau_mem
*mem
, dma_addr_t
*list
)
82 struct nouveau_vm
*vm
= vma
->vm
;
83 int big
= vma
->node
->type
!= vm
->spg_shift
;
84 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
85 u32 bits
= vma
->node
->type
- 12;
86 u32 num
= length
>> vma
->node
->type
;
87 u32 pde
= (offset
>> vm
->pgt_bits
) - vm
->fpde
;
88 u32 pte
= (offset
& ((1 << vm
->pgt_bits
) - 1)) >> bits
;
89 u32 max
= 1 << (vm
->pgt_bits
- bits
);
93 struct nouveau_gpuobj
*pgt
= vm
->pgt
[pde
].obj
[big
];
96 if (unlikely(end
>= max
))
100 vm
->map_sg(vma
, pgt
, mem
, pte
, len
, list
);
105 if (unlikely(end
>= max
)) {
115 nouveau_vm_unmap_at(struct nouveau_vma
*vma
, u64 delta
, u64 length
)
117 struct nouveau_vm
*vm
= vma
->vm
;
118 int big
= vma
->node
->type
!= vm
->spg_shift
;
119 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
120 u32 bits
= vma
->node
->type
- 12;
121 u32 num
= length
>> vma
->node
->type
;
122 u32 pde
= (offset
>> vm
->pgt_bits
) - vm
->fpde
;
123 u32 pte
= (offset
& ((1 << vm
->pgt_bits
) - 1)) >> bits
;
124 u32 max
= 1 << (vm
->pgt_bits
- bits
);
128 struct nouveau_gpuobj
*pgt
= vm
->pgt
[pde
].obj
[big
];
131 if (unlikely(end
>= max
))
135 vm
->unmap(pgt
, pte
, len
);
139 if (unlikely(end
>= max
)) {
149 nouveau_vm_unmap(struct nouveau_vma
*vma
)
151 nouveau_vm_unmap_at(vma
, 0, (u64
)vma
->node
->length
<< 12);
155 nouveau_vm_unmap_pgt(struct nouveau_vm
*vm
, int big
, u32 fpde
, u32 lpde
)
157 struct nouveau_vm_pgd
*vpgd
;
158 struct nouveau_vm_pgt
*vpgt
;
159 struct nouveau_gpuobj
*pgt
;
162 for (pde
= fpde
; pde
<= lpde
; pde
++) {
163 vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
164 if (--vpgt
->refcount
[big
])
167 pgt
= vpgt
->obj
[big
];
168 vpgt
->obj
[big
] = NULL
;
170 list_for_each_entry(vpgd
, &vm
->pgd_list
, head
) {
171 vm
->map_pgt(vpgd
->obj
, pde
, vpgt
->obj
);
174 mutex_unlock(&vm
->mm
->mutex
);
175 nouveau_gpuobj_ref(NULL
, &pgt
);
176 mutex_lock(&vm
->mm
->mutex
);
181 nouveau_vm_map_pgt(struct nouveau_vm
*vm
, u32 pde
, u32 type
)
183 struct nouveau_vm_pgt
*vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
184 struct nouveau_vm_pgd
*vpgd
;
185 struct nouveau_gpuobj
*pgt
;
186 int big
= (type
!= vm
->spg_shift
);
190 pgt_size
= (1 << (vm
->pgt_bits
+ 12)) >> type
;
193 mutex_unlock(&vm
->mm
->mutex
);
194 ret
= nouveau_gpuobj_new(vm
->dev
, NULL
, pgt_size
, 0x1000,
195 NVOBJ_FLAG_ZERO_ALLOC
, &pgt
);
196 mutex_lock(&vm
->mm
->mutex
);
200 /* someone beat us to filling the PDE while we didn't have the lock */
201 if (unlikely(vpgt
->refcount
[big
]++)) {
202 mutex_unlock(&vm
->mm
->mutex
);
203 nouveau_gpuobj_ref(NULL
, &pgt
);
204 mutex_lock(&vm
->mm
->mutex
);
208 vpgt
->obj
[big
] = pgt
;
209 list_for_each_entry(vpgd
, &vm
->pgd_list
, head
) {
210 vm
->map_pgt(vpgd
->obj
, pde
, vpgt
->obj
);
217 nouveau_vm_get(struct nouveau_vm
*vm
, u64 size
, u32 page_shift
,
218 u32 access
, struct nouveau_vma
*vma
)
220 u32 align
= (1 << page_shift
) >> 12;
221 u32 msize
= size
>> 12;
225 mutex_lock(&vm
->mm
->mutex
);
226 ret
= nouveau_mm_get(vm
->mm
, page_shift
, msize
, 0, align
, &vma
->node
);
227 if (unlikely(ret
!= 0)) {
228 mutex_unlock(&vm
->mm
->mutex
);
232 fpde
= (vma
->node
->offset
>> vm
->pgt_bits
);
233 lpde
= (vma
->node
->offset
+ vma
->node
->length
- 1) >> vm
->pgt_bits
;
234 for (pde
= fpde
; pde
<= lpde
; pde
++) {
235 struct nouveau_vm_pgt
*vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
236 int big
= (vma
->node
->type
!= vm
->spg_shift
);
238 if (likely(vpgt
->refcount
[big
])) {
239 vpgt
->refcount
[big
]++;
243 ret
= nouveau_vm_map_pgt(vm
, pde
, vma
->node
->type
);
246 nouveau_vm_unmap_pgt(vm
, big
, fpde
, pde
- 1);
247 nouveau_mm_put(vm
->mm
, vma
->node
);
248 mutex_unlock(&vm
->mm
->mutex
);
253 mutex_unlock(&vm
->mm
->mutex
);
256 vma
->offset
= (u64
)vma
->node
->offset
<< 12;
257 vma
->access
= access
;
262 nouveau_vm_put(struct nouveau_vma
*vma
)
264 struct nouveau_vm
*vm
= vma
->vm
;
267 if (unlikely(vma
->node
== NULL
))
269 fpde
= (vma
->node
->offset
>> vm
->pgt_bits
);
270 lpde
= (vma
->node
->offset
+ vma
->node
->length
- 1) >> vm
->pgt_bits
;
272 mutex_lock(&vm
->mm
->mutex
);
273 nouveau_vm_unmap_pgt(vm
, vma
->node
->type
!= vm
->spg_shift
, fpde
, lpde
);
274 nouveau_mm_put(vm
->mm
, vma
->node
);
276 mutex_unlock(&vm
->mm
->mutex
);
280 nouveau_vm_new(struct drm_device
*dev
, u64 offset
, u64 length
, u64 mm_offset
,
281 struct nouveau_vm
**pvm
)
283 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
284 struct nouveau_vm
*vm
;
285 u64 mm_length
= (offset
+ length
) - mm_offset
;
289 vm
= kzalloc(sizeof(*vm
), GFP_KERNEL
);
293 if (dev_priv
->card_type
== NV_50
) {
294 vm
->map_pgt
= nv50_vm_map_pgt
;
295 vm
->map
= nv50_vm_map
;
296 vm
->map_sg
= nv50_vm_map_sg
;
297 vm
->unmap
= nv50_vm_unmap
;
298 vm
->flush
= nv50_vm_flush
;
303 block
= (1 << pgt_bits
);
308 if (dev_priv
->card_type
== NV_C0
) {
309 vm
->map_pgt
= nvc0_vm_map_pgt
;
310 vm
->map
= nvc0_vm_map
;
311 vm
->map_sg
= nvc0_vm_map_sg
;
312 vm
->unmap
= nvc0_vm_unmap
;
313 vm
->flush
= nvc0_vm_flush
;
323 vm
->fpde
= offset
>> pgt_bits
;
324 vm
->lpde
= (offset
+ length
- 1) >> pgt_bits
;
325 vm
->pgt
= kcalloc(vm
->lpde
- vm
->fpde
+ 1, sizeof(*vm
->pgt
), GFP_KERNEL
);
331 INIT_LIST_HEAD(&vm
->pgd_list
);
334 vm
->pgt_bits
= pgt_bits
- 12;
336 ret
= nouveau_mm_init(&vm
->mm
, mm_offset
>> 12, mm_length
>> 12,
348 nouveau_vm_link(struct nouveau_vm
*vm
, struct nouveau_gpuobj
*pgd
)
350 struct nouveau_vm_pgd
*vpgd
;
356 vpgd
= kzalloc(sizeof(*vpgd
), GFP_KERNEL
);
360 nouveau_gpuobj_ref(pgd
, &vpgd
->obj
);
362 mutex_lock(&vm
->mm
->mutex
);
363 for (i
= vm
->fpde
; i
<= vm
->lpde
; i
++)
364 vm
->map_pgt(pgd
, i
, vm
->pgt
[i
- vm
->fpde
].obj
);
365 list_add(&vpgd
->head
, &vm
->pgd_list
);
366 mutex_unlock(&vm
->mm
->mutex
);
371 nouveau_vm_unlink(struct nouveau_vm
*vm
, struct nouveau_gpuobj
*pgd
)
373 struct nouveau_vm_pgd
*vpgd
, *tmp
;
378 mutex_lock(&vm
->mm
->mutex
);
379 list_for_each_entry_safe(vpgd
, tmp
, &vm
->pgd_list
, head
) {
380 if (vpgd
->obj
!= pgd
)
383 list_del(&vpgd
->head
);
384 nouveau_gpuobj_ref(NULL
, &vpgd
->obj
);
387 mutex_unlock(&vm
->mm
->mutex
);
391 nouveau_vm_del(struct nouveau_vm
*vm
)
393 struct nouveau_vm_pgd
*vpgd
, *tmp
;
395 list_for_each_entry_safe(vpgd
, tmp
, &vm
->pgd_list
, head
) {
396 nouveau_vm_unlink(vm
, vpgd
->obj
);
398 WARN_ON(nouveau_mm_fini(&vm
->mm
) != 0);
405 nouveau_vm_ref(struct nouveau_vm
*ref
, struct nouveau_vm
**ptr
,
406 struct nouveau_gpuobj
*pgd
)
408 struct nouveau_vm
*vm
;
413 ret
= nouveau_vm_link(vm
, pgd
);
424 nouveau_vm_unlink(vm
, pgd
);
426 if (--vm
->refcount
== 0)