2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/gpuobj.h>
27 #include <subdev/fb.h>
28 #include <subdev/mmu.h>
29 #include <subdev/timer.h>
32 nv50_bar_flush(struct nvkm_bar
*base
)
34 struct nv50_bar
*bar
= nv50_bar(base
);
35 struct nvkm_device
*device
= bar
->base
.subdev
.device
;
37 spin_lock_irqsave(&bar
->base
.lock
, flags
);
38 nvkm_wr32(device
, 0x00330c, 0x00000001);
39 nvkm_msec(device
, 2000,
40 if (!(nvkm_rd32(device
, 0x00330c) & 0x00000002))
43 spin_unlock_irqrestore(&bar
->base
.lock
, flags
);
47 nv50_bar_bar1_vmm(struct nvkm_bar
*base
)
49 return nv50_bar(base
)->bar1_vmm
;
53 nv50_bar_bar1_wait(struct nvkm_bar
*base
)
59 nv50_bar_bar1_fini(struct nvkm_bar
*bar
)
61 nvkm_wr32(bar
->subdev
.device
, 0x001708, 0x00000000);
65 nv50_bar_bar1_init(struct nvkm_bar
*base
)
67 struct nvkm_device
*device
= base
->subdev
.device
;
68 struct nv50_bar
*bar
= nv50_bar(base
);
69 nvkm_wr32(device
, 0x001708, 0x80000000 | bar
->bar1
->node
->offset
>> 4);
73 nv50_bar_bar2_vmm(struct nvkm_bar
*base
)
75 return nv50_bar(base
)->bar2_vmm
;
79 nv50_bar_bar2_fini(struct nvkm_bar
*bar
)
81 nvkm_wr32(bar
->subdev
.device
, 0x00170c, 0x00000000);
85 nv50_bar_bar2_init(struct nvkm_bar
*base
)
87 struct nvkm_device
*device
= base
->subdev
.device
;
88 struct nv50_bar
*bar
= nv50_bar(base
);
89 nvkm_wr32(device
, 0x001704, 0x00000000 | bar
->mem
->addr
>> 12);
90 nvkm_wr32(device
, 0x001704, 0x40000000 | bar
->mem
->addr
>> 12);
91 nvkm_wr32(device
, 0x00170c, 0x80000000 | bar
->bar2
->node
->offset
>> 4);
95 nv50_bar_init(struct nvkm_bar
*base
)
97 struct nv50_bar
*bar
= nv50_bar(base
);
98 struct nvkm_device
*device
= bar
->base
.subdev
.device
;
101 for (i
= 0; i
< 8; i
++)
102 nvkm_wr32(device
, 0x001900 + (i
* 4), 0x00000000);
106 nv50_bar_oneinit(struct nvkm_bar
*base
)
108 struct nv50_bar
*bar
= nv50_bar(base
);
109 struct nvkm_device
*device
= bar
->base
.subdev
.device
;
110 static struct lock_class_key bar1_lock
;
111 static struct lock_class_key bar2_lock
;
112 u64 start
, limit
, size
;
115 ret
= nvkm_gpuobj_new(device
, 0x20000, 0, false, NULL
, &bar
->mem
);
119 ret
= nvkm_gpuobj_new(device
, bar
->pgd_addr
, 0, false, bar
->mem
,
124 ret
= nvkm_gpuobj_new(device
, 0x4000, 0, false, bar
->mem
, &bar
->pgd
);
129 start
= 0x0100000000ULL
;
130 size
= device
->func
->resource_size(device
, 3);
133 limit
= start
+ size
;
135 ret
= nvkm_vmm_new(device
, start
, limit
-- - start
, NULL
, 0,
136 &bar2_lock
, "bar2", &bar
->bar2_vmm
);
140 atomic_inc(&bar
->bar2_vmm
->engref
[NVKM_SUBDEV_BAR
]);
141 bar
->bar2_vmm
->debug
= bar
->base
.subdev
.debug
;
143 ret
= nvkm_vmm_boot(bar
->bar2_vmm
);
147 ret
= nvkm_vmm_join(bar
->bar2_vmm
, bar
->mem
->memory
);
151 ret
= nvkm_gpuobj_new(device
, 24, 16, false, bar
->mem
, &bar
->bar2
);
155 nvkm_kmap(bar
->bar2
);
156 nvkm_wo32(bar
->bar2
, 0x00, 0x7fc00000);
157 nvkm_wo32(bar
->bar2
, 0x04, lower_32_bits(limit
));
158 nvkm_wo32(bar
->bar2
, 0x08, lower_32_bits(start
));
159 nvkm_wo32(bar
->bar2
, 0x0c, upper_32_bits(limit
) << 24 |
160 upper_32_bits(start
));
161 nvkm_wo32(bar
->bar2
, 0x10, 0x00000000);
162 nvkm_wo32(bar
->bar2
, 0x14, 0x00000000);
163 nvkm_done(bar
->bar2
);
165 bar
->base
.subdev
.oneinit
= true;
166 nvkm_bar_bar2_init(device
);
169 start
= 0x0000000000ULL
;
170 size
= device
->func
->resource_size(device
, 1);
173 limit
= start
+ size
;
175 ret
= nvkm_vmm_new(device
, start
, limit
-- - start
, NULL
, 0,
176 &bar1_lock
, "bar1", &bar
->bar1_vmm
);
180 atomic_inc(&bar
->bar1_vmm
->engref
[NVKM_SUBDEV_BAR
]);
181 bar
->bar1_vmm
->debug
= bar
->base
.subdev
.debug
;
183 ret
= nvkm_vmm_join(bar
->bar1_vmm
, bar
->mem
->memory
);
187 ret
= nvkm_gpuobj_new(device
, 24, 16, false, bar
->mem
, &bar
->bar1
);
191 nvkm_kmap(bar
->bar1
);
192 nvkm_wo32(bar
->bar1
, 0x00, 0x7fc00000);
193 nvkm_wo32(bar
->bar1
, 0x04, lower_32_bits(limit
));
194 nvkm_wo32(bar
->bar1
, 0x08, lower_32_bits(start
));
195 nvkm_wo32(bar
->bar1
, 0x0c, upper_32_bits(limit
) << 24 |
196 upper_32_bits(start
));
197 nvkm_wo32(bar
->bar1
, 0x10, 0x00000000);
198 nvkm_wo32(bar
->bar1
, 0x14, 0x00000000);
199 nvkm_done(bar
->bar1
);
204 nv50_bar_dtor(struct nvkm_bar
*base
)
206 struct nv50_bar
*bar
= nv50_bar(base
);
208 nvkm_gpuobj_del(&bar
->bar1
);
209 nvkm_vmm_part(bar
->bar1_vmm
, bar
->mem
->memory
);
210 nvkm_vmm_unref(&bar
->bar1_vmm
);
211 nvkm_gpuobj_del(&bar
->bar2
);
212 nvkm_vmm_part(bar
->bar2_vmm
, bar
->mem
->memory
);
213 nvkm_vmm_unref(&bar
->bar2_vmm
);
214 nvkm_gpuobj_del(&bar
->pgd
);
215 nvkm_gpuobj_del(&bar
->pad
);
216 nvkm_gpuobj_del(&bar
->mem
);
222 nv50_bar_new_(const struct nvkm_bar_func
*func
, struct nvkm_device
*device
,
223 int index
, u32 pgd_addr
, struct nvkm_bar
**pbar
)
225 struct nv50_bar
*bar
;
226 if (!(bar
= kzalloc(sizeof(*bar
), GFP_KERNEL
)))
228 nvkm_bar_ctor(func
, device
, index
, &bar
->base
);
229 bar
->pgd_addr
= pgd_addr
;
234 static const struct nvkm_bar_func
236 .dtor
= nv50_bar_dtor
,
237 .oneinit
= nv50_bar_oneinit
,
238 .init
= nv50_bar_init
,
239 .bar1
.init
= nv50_bar_bar1_init
,
240 .bar1
.fini
= nv50_bar_bar1_fini
,
241 .bar1
.wait
= nv50_bar_bar1_wait
,
242 .bar1
.vmm
= nv50_bar_bar1_vmm
,
243 .bar2
.init
= nv50_bar_bar2_init
,
244 .bar2
.fini
= nv50_bar_bar2_fini
,
245 .bar2
.wait
= nv50_bar_bar1_wait
,
246 .bar2
.vmm
= nv50_bar_bar2_vmm
,
247 .flush
= nv50_bar_flush
,
251 nv50_bar_new(struct nvkm_device
*device
, int index
, struct nvkm_bar
**pbar
)
253 return nv50_bar_new_(&nv50_bar_func
, device
, index
, 0x1400, pbar
);