2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "nouveau_drv.h"
27 #include "nouveau_mm.h"
28 #include "nouveau_vm.h"
31 nouveau_vm_map_at(struct nouveau_vma
*vma
, u64 delta
, struct nouveau_mem
*node
)
33 struct nouveau_vm
*vm
= vma
->vm
;
34 struct nouveau_mm_node
*r
;
35 int big
= vma
->node
->type
!= vm
->spg_shift
;
36 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
37 u32 bits
= vma
->node
->type
- 12;
38 u32 pde
= (offset
>> vm
->pgt_bits
) - vm
->fpde
;
39 u32 pte
= (offset
& ((1 << vm
->pgt_bits
) - 1)) >> bits
;
40 u32 max
= 1 << (vm
->pgt_bits
- bits
);
44 list_for_each_entry(r
, &node
->regions
, rl_entry
) {
45 u64 phys
= (u64
)r
->offset
<< 12;
46 u32 num
= r
->length
>> bits
;
49 struct nouveau_gpuobj
*pgt
= vm
->pgt
[pde
].obj
[big
];
52 if (unlikely(end
>= max
))
56 vm
->map(vma
, pgt
, node
, pte
, len
, phys
, delta
);
60 if (unlikely(end
>= max
)) {
61 phys
+= len
<< (bits
+ 12);
66 delta
+= (u64
)len
<< vma
->node
->type
;
74 nouveau_vm_map(struct nouveau_vma
*vma
, struct nouveau_mem
*node
)
76 nouveau_vm_map_at(vma
, 0, node
);
80 nouveau_vm_map_sg(struct nouveau_vma
*vma
, u64 delta
, u64 length
,
81 struct nouveau_mem
*mem
)
83 struct nouveau_vm
*vm
= vma
->vm
;
84 dma_addr_t
*list
= mem
->pages
;
85 int big
= vma
->node
->type
!= vm
->spg_shift
;
86 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
87 u32 bits
= vma
->node
->type
- 12;
88 u32 num
= length
>> vma
->node
->type
;
89 u32 pde
= (offset
>> vm
->pgt_bits
) - vm
->fpde
;
90 u32 pte
= (offset
& ((1 << vm
->pgt_bits
) - 1)) >> bits
;
91 u32 max
= 1 << (vm
->pgt_bits
- bits
);
95 struct nouveau_gpuobj
*pgt
= vm
->pgt
[pde
].obj
[big
];
98 if (unlikely(end
>= max
))
102 vm
->map_sg(vma
, pgt
, mem
, pte
, len
, list
);
107 if (unlikely(end
>= max
)) {
117 nouveau_vm_unmap_at(struct nouveau_vma
*vma
, u64 delta
, u64 length
)
119 struct nouveau_vm
*vm
= vma
->vm
;
120 int big
= vma
->node
->type
!= vm
->spg_shift
;
121 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
122 u32 bits
= vma
->node
->type
- 12;
123 u32 num
= length
>> vma
->node
->type
;
124 u32 pde
= (offset
>> vm
->pgt_bits
) - vm
->fpde
;
125 u32 pte
= (offset
& ((1 << vm
->pgt_bits
) - 1)) >> bits
;
126 u32 max
= 1 << (vm
->pgt_bits
- bits
);
130 struct nouveau_gpuobj
*pgt
= vm
->pgt
[pde
].obj
[big
];
133 if (unlikely(end
>= max
))
137 vm
->unmap(pgt
, pte
, len
);
141 if (unlikely(end
>= max
)) {
151 nouveau_vm_unmap(struct nouveau_vma
*vma
)
153 nouveau_vm_unmap_at(vma
, 0, (u64
)vma
->node
->length
<< 12);
157 nouveau_vm_unmap_pgt(struct nouveau_vm
*vm
, int big
, u32 fpde
, u32 lpde
)
159 struct nouveau_vm_pgd
*vpgd
;
160 struct nouveau_vm_pgt
*vpgt
;
161 struct nouveau_gpuobj
*pgt
;
164 for (pde
= fpde
; pde
<= lpde
; pde
++) {
165 vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
166 if (--vpgt
->refcount
[big
])
169 pgt
= vpgt
->obj
[big
];
170 vpgt
->obj
[big
] = NULL
;
172 list_for_each_entry(vpgd
, &vm
->pgd_list
, head
) {
173 vm
->map_pgt(vpgd
->obj
, pde
, vpgt
->obj
);
176 mutex_unlock(&vm
->mm
.mutex
);
177 nouveau_gpuobj_ref(NULL
, &pgt
);
178 mutex_lock(&vm
->mm
.mutex
);
183 nouveau_vm_map_pgt(struct nouveau_vm
*vm
, u32 pde
, u32 type
)
185 struct nouveau_vm_pgt
*vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
186 struct nouveau_vm_pgd
*vpgd
;
187 struct nouveau_gpuobj
*pgt
;
188 int big
= (type
!= vm
->spg_shift
);
192 pgt_size
= (1 << (vm
->pgt_bits
+ 12)) >> type
;
195 mutex_unlock(&vm
->mm
.mutex
);
196 ret
= nouveau_gpuobj_new(vm
->dev
, NULL
, pgt_size
, 0x1000,
197 NVOBJ_FLAG_ZERO_ALLOC
, &pgt
);
198 mutex_lock(&vm
->mm
.mutex
);
202 /* someone beat us to filling the PDE while we didn't have the lock */
203 if (unlikely(vpgt
->refcount
[big
]++)) {
204 mutex_unlock(&vm
->mm
.mutex
);
205 nouveau_gpuobj_ref(NULL
, &pgt
);
206 mutex_lock(&vm
->mm
.mutex
);
210 vpgt
->obj
[big
] = pgt
;
211 list_for_each_entry(vpgd
, &vm
->pgd_list
, head
) {
212 vm
->map_pgt(vpgd
->obj
, pde
, vpgt
->obj
);
219 nouveau_vm_get(struct nouveau_vm
*vm
, u64 size
, u32 page_shift
,
220 u32 access
, struct nouveau_vma
*vma
)
222 u32 align
= (1 << page_shift
) >> 12;
223 u32 msize
= size
>> 12;
227 mutex_lock(&vm
->mm
.mutex
);
228 ret
= nouveau_mm_get(&vm
->mm
, page_shift
, msize
, 0, align
, &vma
->node
);
229 if (unlikely(ret
!= 0)) {
230 mutex_unlock(&vm
->mm
.mutex
);
234 fpde
= (vma
->node
->offset
>> vm
->pgt_bits
);
235 lpde
= (vma
->node
->offset
+ vma
->node
->length
- 1) >> vm
->pgt_bits
;
236 for (pde
= fpde
; pde
<= lpde
; pde
++) {
237 struct nouveau_vm_pgt
*vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
238 int big
= (vma
->node
->type
!= vm
->spg_shift
);
240 if (likely(vpgt
->refcount
[big
])) {
241 vpgt
->refcount
[big
]++;
245 ret
= nouveau_vm_map_pgt(vm
, pde
, vma
->node
->type
);
248 nouveau_vm_unmap_pgt(vm
, big
, fpde
, pde
- 1);
249 nouveau_mm_put(&vm
->mm
, vma
->node
);
250 mutex_unlock(&vm
->mm
.mutex
);
255 mutex_unlock(&vm
->mm
.mutex
);
258 vma
->offset
= (u64
)vma
->node
->offset
<< 12;
259 vma
->access
= access
;
264 nouveau_vm_put(struct nouveau_vma
*vma
)
266 struct nouveau_vm
*vm
= vma
->vm
;
269 if (unlikely(vma
->node
== NULL
))
271 fpde
= (vma
->node
->offset
>> vm
->pgt_bits
);
272 lpde
= (vma
->node
->offset
+ vma
->node
->length
- 1) >> vm
->pgt_bits
;
274 mutex_lock(&vm
->mm
.mutex
);
275 nouveau_vm_unmap_pgt(vm
, vma
->node
->type
!= vm
->spg_shift
, fpde
, lpde
);
276 nouveau_mm_put(&vm
->mm
, vma
->node
);
278 mutex_unlock(&vm
->mm
.mutex
);
282 nouveau_vm_new(struct drm_device
*dev
, u64 offset
, u64 length
, u64 mm_offset
,
283 struct nouveau_vm
**pvm
)
285 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
286 struct nouveau_vm
*vm
;
287 u64 mm_length
= (offset
+ length
) - mm_offset
;
291 vm
= kzalloc(sizeof(*vm
), GFP_KERNEL
);
295 if (dev_priv
->card_type
== NV_50
) {
296 vm
->map_pgt
= nv50_vm_map_pgt
;
297 vm
->map
= nv50_vm_map
;
298 vm
->map_sg
= nv50_vm_map_sg
;
299 vm
->unmap
= nv50_vm_unmap
;
300 vm
->flush
= nv50_vm_flush
;
305 block
= (1 << pgt_bits
);
310 if (dev_priv
->card_type
>= NV_C0
) {
311 vm
->map_pgt
= nvc0_vm_map_pgt
;
312 vm
->map
= nvc0_vm_map
;
313 vm
->map_sg
= nvc0_vm_map_sg
;
314 vm
->unmap
= nvc0_vm_unmap
;
315 vm
->flush
= nvc0_vm_flush
;
325 vm
->fpde
= offset
>> pgt_bits
;
326 vm
->lpde
= (offset
+ length
- 1) >> pgt_bits
;
327 vm
->pgt
= kcalloc(vm
->lpde
- vm
->fpde
+ 1, sizeof(*vm
->pgt
), GFP_KERNEL
);
333 INIT_LIST_HEAD(&vm
->pgd_list
);
336 vm
->pgt_bits
= pgt_bits
- 12;
338 ret
= nouveau_mm_init(&vm
->mm
, mm_offset
>> 12, mm_length
>> 12,
350 nouveau_vm_link(struct nouveau_vm
*vm
, struct nouveau_gpuobj
*pgd
)
352 struct nouveau_vm_pgd
*vpgd
;
358 vpgd
= kzalloc(sizeof(*vpgd
), GFP_KERNEL
);
362 nouveau_gpuobj_ref(pgd
, &vpgd
->obj
);
364 mutex_lock(&vm
->mm
.mutex
);
365 for (i
= vm
->fpde
; i
<= vm
->lpde
; i
++)
366 vm
->map_pgt(pgd
, i
, vm
->pgt
[i
- vm
->fpde
].obj
);
367 list_add(&vpgd
->head
, &vm
->pgd_list
);
368 mutex_unlock(&vm
->mm
.mutex
);
373 nouveau_vm_unlink(struct nouveau_vm
*vm
, struct nouveau_gpuobj
*mpgd
)
375 struct nouveau_vm_pgd
*vpgd
, *tmp
;
376 struct nouveau_gpuobj
*pgd
= NULL
;
381 mutex_lock(&vm
->mm
.mutex
);
382 list_for_each_entry_safe(vpgd
, tmp
, &vm
->pgd_list
, head
) {
383 if (vpgd
->obj
== mpgd
) {
385 list_del(&vpgd
->head
);
390 mutex_unlock(&vm
->mm
.mutex
);
392 nouveau_gpuobj_ref(NULL
, &pgd
);
396 nouveau_vm_del(struct nouveau_vm
*vm
)
398 struct nouveau_vm_pgd
*vpgd
, *tmp
;
400 list_for_each_entry_safe(vpgd
, tmp
, &vm
->pgd_list
, head
) {
401 nouveau_vm_unlink(vm
, vpgd
->obj
);
404 nouveau_mm_fini(&vm
->mm
);
410 nouveau_vm_ref(struct nouveau_vm
*ref
, struct nouveau_vm
**ptr
,
411 struct nouveau_gpuobj
*pgd
)
413 struct nouveau_vm
*vm
;
418 ret
= nouveau_vm_link(vm
, pgd
);
429 nouveau_vm_unlink(vm
, pgd
);
431 if (--vm
->refcount
== 0)