2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/device.h>
27 #include <core/gpuobj.h>
28 #include <subdev/fb.h>
29 #include <subdev/mmu.h>
31 struct gf100_bar_priv_vm
{
32 struct nvkm_gpuobj
*mem
;
33 struct nvkm_gpuobj
*pgd
;
37 struct gf100_bar_priv
{
40 struct gf100_bar_priv_vm bar
[2];
44 gf100_bar_kmap(struct nvkm_bar
*bar
, struct nvkm_mem
*mem
, u32 flags
,
47 struct gf100_bar_priv
*priv
= (void *)bar
;
50 ret
= nvkm_vm_get(priv
->bar
[0].vm
, mem
->size
<< 12, 12, flags
, vma
);
54 nvkm_vm_map(vma
, mem
);
59 gf100_bar_umap(struct nvkm_bar
*bar
, struct nvkm_mem
*mem
, u32 flags
,
62 struct gf100_bar_priv
*priv
= (void *)bar
;
65 ret
= nvkm_vm_get(priv
->bar
[1].vm
, mem
->size
<< 12,
66 mem
->page_shift
, flags
, vma
);
70 nvkm_vm_map(vma
, mem
);
75 gf100_bar_unmap(struct nvkm_bar
*bar
, struct nvkm_vma
*vma
)
82 gf100_bar_ctor_vm(struct gf100_bar_priv
*priv
, struct gf100_bar_priv_vm
*bar_vm
,
85 struct nvkm_device
*device
= nv_device(&priv
->base
);
87 resource_size_t bar_len
;
90 ret
= nvkm_gpuobj_new(nv_object(priv
), NULL
, 0x1000, 0, 0,
95 ret
= nvkm_gpuobj_new(nv_object(priv
), NULL
, 0x8000, 0, 0,
100 bar_len
= nv_device_resource_len(device
, bar_nr
);
102 ret
= nvkm_vm_new(device
, 0, bar_len
, 0, &vm
);
106 atomic_inc(&vm
->engref
[NVDEV_SUBDEV_BAR
]);
109 * Bootstrap page table lookup.
112 ret
= nvkm_gpuobj_new(nv_object(priv
), NULL
,
113 (bar_len
>> 12) * 8, 0x1000,
114 NVOBJ_FLAG_ZERO_ALLOC
,
116 vm
->pgt
[0].refcount
[0] = 1;
121 ret
= nvkm_vm_ref(vm
, &bar_vm
->vm
, bar_vm
->pgd
);
122 nvkm_vm_ref(NULL
, &vm
, NULL
);
126 nv_wo32(bar_vm
->mem
, 0x0200, lower_32_bits(bar_vm
->pgd
->addr
));
127 nv_wo32(bar_vm
->mem
, 0x0204, upper_32_bits(bar_vm
->pgd
->addr
));
128 nv_wo32(bar_vm
->mem
, 0x0208, lower_32_bits(bar_len
- 1));
129 nv_wo32(bar_vm
->mem
, 0x020c, upper_32_bits(bar_len
- 1));
134 gf100_bar_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
135 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
136 struct nvkm_object
**pobject
)
138 struct nvkm_device
*device
= nv_device(parent
);
139 struct gf100_bar_priv
*priv
;
140 bool has_bar3
= nv_device_resource_len(device
, 3) != 0;
143 ret
= nvkm_bar_create(parent
, engine
, oclass
, &priv
);
144 *pobject
= nv_object(priv
);
150 ret
= gf100_bar_ctor_vm(priv
, &priv
->bar
[0], 3);
156 ret
= gf100_bar_ctor_vm(priv
, &priv
->bar
[1], 1);
161 priv
->base
.alloc
= nvkm_bar_alloc
;
162 priv
->base
.kmap
= gf100_bar_kmap
;
164 priv
->base
.umap
= gf100_bar_umap
;
165 priv
->base
.unmap
= gf100_bar_unmap
;
166 priv
->base
.flush
= g84_bar_flush
;
167 spin_lock_init(&priv
->lock
);
172 gf100_bar_dtor(struct nvkm_object
*object
)
174 struct gf100_bar_priv
*priv
= (void *)object
;
176 nvkm_vm_ref(NULL
, &priv
->bar
[1].vm
, priv
->bar
[1].pgd
);
177 nvkm_gpuobj_ref(NULL
, &priv
->bar
[1].pgd
);
178 nvkm_gpuobj_ref(NULL
, &priv
->bar
[1].mem
);
180 if (priv
->bar
[0].vm
) {
181 nvkm_gpuobj_ref(NULL
, &priv
->bar
[0].vm
->pgt
[0].obj
[0]);
182 nvkm_vm_ref(NULL
, &priv
->bar
[0].vm
, priv
->bar
[0].pgd
);
184 nvkm_gpuobj_ref(NULL
, &priv
->bar
[0].pgd
);
185 nvkm_gpuobj_ref(NULL
, &priv
->bar
[0].mem
);
187 nvkm_bar_destroy(&priv
->base
);
191 gf100_bar_init(struct nvkm_object
*object
)
193 struct gf100_bar_priv
*priv
= (void *)object
;
196 ret
= nvkm_bar_init(&priv
->base
);
200 nv_mask(priv
, 0x000200, 0x00000100, 0x00000000);
201 nv_mask(priv
, 0x000200, 0x00000100, 0x00000100);
203 nv_wr32(priv
, 0x001704, 0x80000000 | priv
->bar
[1].mem
->addr
>> 12);
204 if (priv
->bar
[0].mem
)
205 nv_wr32(priv
, 0x001714,
206 0xc0000000 | priv
->bar
[0].mem
->addr
>> 12);
212 .handle
= NV_SUBDEV(BAR
, 0xc0),
213 .ofuncs
= &(struct nvkm_ofuncs
) {
214 .ctor
= gf100_bar_ctor
,
215 .dtor
= gf100_bar_dtor
,
216 .init
= gf100_bar_init
,
217 .fini
= _nvkm_bar_fini
,