ARM: pmu: add support for interrupt-affinity property
[linux/fpc-iii.git] / drivers / gpu / drm / nouveau / nvkm / subdev / fb / ramgf100.c
blobde9f39569943b3d2dfeca3f2d6f6efdc901deaf5
1 /*
2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
24 #include "gf100.h"
25 #include "ramfuc.h"
27 #include <core/device.h>
28 #include <core/option.h>
29 #include <subdev/bios.h>
30 #include <subdev/bios/pll.h>
31 #include <subdev/bios/rammap.h>
32 #include <subdev/bios/timing.h>
33 #include <subdev/clk.h>
34 #include <subdev/clk/pll.h>
35 #include <subdev/ltc.h>
37 struct gf100_ramfuc {
38 struct ramfuc base;
40 struct ramfuc_reg r_0x10fe20;
41 struct ramfuc_reg r_0x10fe24;
42 struct ramfuc_reg r_0x137320;
43 struct ramfuc_reg r_0x137330;
45 struct ramfuc_reg r_0x132000;
46 struct ramfuc_reg r_0x132004;
47 struct ramfuc_reg r_0x132100;
49 struct ramfuc_reg r_0x137390;
51 struct ramfuc_reg r_0x10f290;
52 struct ramfuc_reg r_0x10f294;
53 struct ramfuc_reg r_0x10f298;
54 struct ramfuc_reg r_0x10f29c;
55 struct ramfuc_reg r_0x10f2a0;
57 struct ramfuc_reg r_0x10f300;
58 struct ramfuc_reg r_0x10f338;
59 struct ramfuc_reg r_0x10f340;
60 struct ramfuc_reg r_0x10f344;
61 struct ramfuc_reg r_0x10f348;
63 struct ramfuc_reg r_0x10f910;
64 struct ramfuc_reg r_0x10f914;
66 struct ramfuc_reg r_0x100b0c;
67 struct ramfuc_reg r_0x10f050;
68 struct ramfuc_reg r_0x10f090;
69 struct ramfuc_reg r_0x10f200;
70 struct ramfuc_reg r_0x10f210;
71 struct ramfuc_reg r_0x10f310;
72 struct ramfuc_reg r_0x10f314;
73 struct ramfuc_reg r_0x10f610;
74 struct ramfuc_reg r_0x10f614;
75 struct ramfuc_reg r_0x10f800;
76 struct ramfuc_reg r_0x10f808;
77 struct ramfuc_reg r_0x10f824;
78 struct ramfuc_reg r_0x10f830;
79 struct ramfuc_reg r_0x10f988;
80 struct ramfuc_reg r_0x10f98c;
81 struct ramfuc_reg r_0x10f990;
82 struct ramfuc_reg r_0x10f998;
83 struct ramfuc_reg r_0x10f9b0;
84 struct ramfuc_reg r_0x10f9b4;
85 struct ramfuc_reg r_0x10fb04;
86 struct ramfuc_reg r_0x10fb08;
87 struct ramfuc_reg r_0x137300;
88 struct ramfuc_reg r_0x137310;
89 struct ramfuc_reg r_0x137360;
90 struct ramfuc_reg r_0x1373ec;
91 struct ramfuc_reg r_0x1373f0;
92 struct ramfuc_reg r_0x1373f8;
94 struct ramfuc_reg r_0x61c140;
95 struct ramfuc_reg r_0x611200;
97 struct ramfuc_reg r_0x13d8f4;
100 struct gf100_ram {
101 struct nvkm_ram base;
102 struct gf100_ramfuc fuc;
103 struct nvbios_pll refpll;
104 struct nvbios_pll mempll;
107 static void
108 gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
110 struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
111 struct nvkm_fb *pfb = nvkm_fb(ram);
112 u32 part = nv_rd32(pfb, 0x022438), i;
113 u32 mask = nv_rd32(pfb, 0x022554);
114 u32 addr = 0x110974;
116 ram_wr32(fuc, 0x10f910, magic);
117 ram_wr32(fuc, 0x10f914, magic);
119 for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) {
120 if (mask & (1 << i))
121 continue;
122 ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
126 static int
127 gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
129 struct nvkm_clk *clk = nvkm_clk(pfb);
130 struct nvkm_bios *bios = nvkm_bios(pfb);
131 struct gf100_ram *ram = (void *)pfb->ram;
132 struct gf100_ramfuc *fuc = &ram->fuc;
133 struct nvbios_ramcfg cfg;
134 u8 ver, cnt, len, strap;
135 struct {
136 u32 data;
137 u8 size;
138 } rammap, ramcfg, timing;
139 int ref, div, out;
140 int from, mode;
141 int N1, M1, P;
142 int ret;
144 /* lookup memory config data relevant to the target frequency */
145 rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
146 &cnt, &ramcfg.size, &cfg);
147 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
148 nv_error(pfb, "invalid/missing rammap entry\n");
149 return -EINVAL;
152 /* locate specific data set for the attached memory */
153 strap = nvbios_ramcfg_index(nv_subdev(pfb));
154 if (strap >= cnt) {
155 nv_error(pfb, "invalid ramcfg strap\n");
156 return -EINVAL;
159 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
160 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
161 nv_error(pfb, "invalid/missing ramcfg entry\n");
162 return -EINVAL;
165 /* lookup memory timings, if bios says they're present */
166 strap = nv_ro08(bios, ramcfg.data + 0x01);
167 if (strap != 0xff) {
168 timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
169 &cnt, &len);
170 if (!timing.data || ver != 0x10 || timing.size < 0x19) {
171 nv_error(pfb, "invalid/missing timing entry\n");
172 return -EINVAL;
174 } else {
175 timing.data = 0;
178 ret = ram_init(fuc, pfb);
179 if (ret)
180 return ret;
182 /* determine current mclk configuration */
183 from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */
185 /* determine target mclk configuration */
186 if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
187 ref = clk->read(clk, nv_clk_src_sppll0);
188 else
189 ref = clk->read(clk, nv_clk_src_sppll1);
190 div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
191 out = (ref * 2) / (div + 2);
192 mode = freq != out;
194 ram_mask(fuc, 0x137360, 0x00000002, 0x00000000);
196 if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) {
197 ram_nuke(fuc, 0x132000);
198 ram_mask(fuc, 0x132000, 0x00000002, 0x00000002);
199 ram_mask(fuc, 0x132000, 0x00000002, 0x00000000);
202 if (mode == 1) {
203 ram_nuke(fuc, 0x10fe20);
204 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002);
205 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000);
208 // 0x00020034 // 0x0000000a
209 ram_wr32(fuc, 0x132100, 0x00000001);
211 if (mode == 1 && from == 0) {
212 /* calculate refpll */
213 ret = gt215_pll_calc(nv_subdev(pfb), &ram->refpll,
214 ram->mempll.refclk, &N1, NULL, &M1, &P);
215 if (ret <= 0) {
216 nv_error(pfb, "unable to calc refpll\n");
217 return ret ? ret : -ERANGE;
220 ram_wr32(fuc, 0x10fe20, 0x20010000);
221 ram_wr32(fuc, 0x137320, 0x00000003);
222 ram_wr32(fuc, 0x137330, 0x81200006);
223 ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1);
224 ram_wr32(fuc, 0x10fe20, 0x20010001);
225 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
227 /* calculate mempll */
228 ret = gt215_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
229 &N1, NULL, &M1, &P);
230 if (ret <= 0) {
231 nv_error(pfb, "unable to calc refpll\n");
232 return ret ? ret : -ERANGE;
235 ram_wr32(fuc, 0x10fe20, 0x20010005);
236 ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1);
237 ram_wr32(fuc, 0x132000, 0x18010101);
238 ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
239 } else
240 if (mode == 0) {
241 ram_wr32(fuc, 0x137300, 0x00000003);
244 if (from == 0) {
245 ram_nuke(fuc, 0x10fb04);
246 ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000);
247 ram_nuke(fuc, 0x10fb08);
248 ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000);
249 ram_wr32(fuc, 0x10f988, 0x2004ff00);
250 ram_wr32(fuc, 0x10f98c, 0x003fc040);
251 ram_wr32(fuc, 0x10f990, 0x20012001);
252 ram_wr32(fuc, 0x10f998, 0x00011a00);
253 ram_wr32(fuc, 0x13d8f4, 0x00000000);
254 } else {
255 ram_wr32(fuc, 0x10f988, 0x20010000);
256 ram_wr32(fuc, 0x10f98c, 0x00000000);
257 ram_wr32(fuc, 0x10f990, 0x20012001);
258 ram_wr32(fuc, 0x10f998, 0x00010a00);
261 if (from == 0) {
262 // 0x00020039 // 0x000000ba
265 // 0x0002003a // 0x00000002
266 ram_wr32(fuc, 0x100b0c, 0x00080012);
267 // 0x00030014 // 0x00000000 // 0x02b5f070
268 // 0x00030014 // 0x00010000 // 0x02b5f070
269 ram_wr32(fuc, 0x611200, 0x00003300);
270 // 0x00020034 // 0x0000000a
271 // 0x00030020 // 0x00000001 // 0x00000000
273 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
274 ram_wr32(fuc, 0x10f210, 0x00000000);
275 ram_nsec(fuc, 1000);
276 if (mode == 0)
277 gf100_ram_train(fuc, 0x000c1001);
278 ram_wr32(fuc, 0x10f310, 0x00000001);
279 ram_nsec(fuc, 1000);
280 ram_wr32(fuc, 0x10f090, 0x00000061);
281 ram_wr32(fuc, 0x10f090, 0xc000007f);
282 ram_nsec(fuc, 1000);
284 if (from == 0) {
285 ram_wr32(fuc, 0x10f824, 0x00007fd4);
286 } else {
287 ram_wr32(fuc, 0x1373ec, 0x00020404);
290 if (mode == 0) {
291 ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
292 ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
293 ram_wr32(fuc, 0x10f830, 0x41500010);
294 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
295 ram_mask(fuc, 0x132100, 0x00000100, 0x00000100);
296 ram_wr32(fuc, 0x10f050, 0xff000090);
297 ram_wr32(fuc, 0x1373ec, 0x00020f0f);
298 ram_wr32(fuc, 0x1373f0, 0x00000003);
299 ram_wr32(fuc, 0x137310, 0x81201616);
300 ram_wr32(fuc, 0x132100, 0x00000001);
301 // 0x00020039 // 0x000000ba
302 ram_wr32(fuc, 0x10f830, 0x00300017);
303 ram_wr32(fuc, 0x1373f0, 0x00000001);
304 ram_wr32(fuc, 0x10f824, 0x00007e77);
305 ram_wr32(fuc, 0x132000, 0x18030001);
306 ram_wr32(fuc, 0x10f090, 0x4000007e);
307 ram_nsec(fuc, 2000);
308 ram_wr32(fuc, 0x10f314, 0x00000001);
309 ram_wr32(fuc, 0x10f210, 0x80000000);
310 ram_wr32(fuc, 0x10f338, 0x00300220);
311 ram_wr32(fuc, 0x10f300, 0x0000011d);
312 ram_nsec(fuc, 1000);
313 ram_wr32(fuc, 0x10f290, 0x02060505);
314 ram_wr32(fuc, 0x10f294, 0x34208288);
315 ram_wr32(fuc, 0x10f298, 0x44050411);
316 ram_wr32(fuc, 0x10f29c, 0x0000114c);
317 ram_wr32(fuc, 0x10f2a0, 0x42e10069);
318 ram_wr32(fuc, 0x10f614, 0x40044f77);
319 ram_wr32(fuc, 0x10f610, 0x40044f77);
320 ram_wr32(fuc, 0x10f344, 0x00600009);
321 ram_nsec(fuc, 1000);
322 ram_wr32(fuc, 0x10f348, 0x00700008);
323 ram_wr32(fuc, 0x61c140, 0x19240000);
324 ram_wr32(fuc, 0x10f830, 0x00300017);
325 gf100_ram_train(fuc, 0x80021001);
326 gf100_ram_train(fuc, 0x80081001);
327 ram_wr32(fuc, 0x10f340, 0x00500004);
328 ram_nsec(fuc, 1000);
329 ram_wr32(fuc, 0x10f830, 0x01300017);
330 ram_wr32(fuc, 0x10f830, 0x00300017);
331 // 0x00030020 // 0x00000000 // 0x00000000
332 // 0x00020034 // 0x0000000b
333 ram_wr32(fuc, 0x100b0c, 0x00080028);
334 ram_wr32(fuc, 0x611200, 0x00003330);
335 } else {
336 ram_wr32(fuc, 0x10f800, 0x00001800);
337 ram_wr32(fuc, 0x13d8f4, 0x00000000);
338 ram_wr32(fuc, 0x1373ec, 0x00020404);
339 ram_wr32(fuc, 0x1373f0, 0x00000003);
340 ram_wr32(fuc, 0x10f830, 0x40700010);
341 ram_wr32(fuc, 0x10f830, 0x40500010);
342 ram_wr32(fuc, 0x13d8f4, 0x00000000);
343 ram_wr32(fuc, 0x1373f8, 0x00000000);
344 ram_wr32(fuc, 0x132100, 0x00000101);
345 ram_wr32(fuc, 0x137310, 0x89201616);
346 ram_wr32(fuc, 0x10f050, 0xff000090);
347 ram_wr32(fuc, 0x1373ec, 0x00030404);
348 ram_wr32(fuc, 0x1373f0, 0x00000002);
349 // 0x00020039 // 0x00000011
350 ram_wr32(fuc, 0x132100, 0x00000001);
351 ram_wr32(fuc, 0x1373f8, 0x00002000);
352 ram_nsec(fuc, 2000);
353 ram_wr32(fuc, 0x10f808, 0x7aaa0050);
354 ram_wr32(fuc, 0x10f830, 0x00500010);
355 ram_wr32(fuc, 0x10f200, 0x00ce1000);
356 ram_wr32(fuc, 0x10f090, 0x4000007e);
357 ram_nsec(fuc, 2000);
358 ram_wr32(fuc, 0x10f314, 0x00000001);
359 ram_wr32(fuc, 0x10f210, 0x80000000);
360 ram_wr32(fuc, 0x10f338, 0x00300200);
361 ram_wr32(fuc, 0x10f300, 0x0000084d);
362 ram_nsec(fuc, 1000);
363 ram_wr32(fuc, 0x10f290, 0x0b343825);
364 ram_wr32(fuc, 0x10f294, 0x3483028e);
365 ram_wr32(fuc, 0x10f298, 0x440c0600);
366 ram_wr32(fuc, 0x10f29c, 0x0000214c);
367 ram_wr32(fuc, 0x10f2a0, 0x42e20069);
368 ram_wr32(fuc, 0x10f200, 0x00ce0000);
369 ram_wr32(fuc, 0x10f614, 0x60044e77);
370 ram_wr32(fuc, 0x10f610, 0x60044e77);
371 ram_wr32(fuc, 0x10f340, 0x00500000);
372 ram_nsec(fuc, 1000);
373 ram_wr32(fuc, 0x10f344, 0x00600228);
374 ram_nsec(fuc, 1000);
375 ram_wr32(fuc, 0x10f348, 0x00700000);
376 ram_wr32(fuc, 0x13d8f4, 0x00000000);
377 ram_wr32(fuc, 0x61c140, 0x09a40000);
379 gf100_ram_train(fuc, 0x800e1008);
381 ram_nsec(fuc, 1000);
382 ram_wr32(fuc, 0x10f800, 0x00001804);
383 // 0x00030020 // 0x00000000 // 0x00000000
384 // 0x00020034 // 0x0000000b
385 ram_wr32(fuc, 0x13d8f4, 0x00000000);
386 ram_wr32(fuc, 0x100b0c, 0x00080028);
387 ram_wr32(fuc, 0x611200, 0x00003330);
388 ram_nsec(fuc, 100000);
389 ram_wr32(fuc, 0x10f9b0, 0x05313f41);
390 ram_wr32(fuc, 0x10f9b4, 0x00002f50);
392 gf100_ram_train(fuc, 0x010c1001);
395 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
396 // 0x00020016 // 0x00000000
398 if (mode == 0)
399 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
401 return 0;
404 static int
405 gf100_ram_prog(struct nvkm_fb *pfb)
407 struct nvkm_device *device = nv_device(pfb);
408 struct gf100_ram *ram = (void *)pfb->ram;
409 struct gf100_ramfuc *fuc = &ram->fuc;
410 ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
411 return 0;
414 static void
415 gf100_ram_tidy(struct nvkm_fb *pfb)
417 struct gf100_ram *ram = (void *)pfb->ram;
418 struct gf100_ramfuc *fuc = &ram->fuc;
419 ram_exec(fuc, false);
422 extern const u8 gf100_pte_storage_type_map[256];
424 void
425 gf100_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
427 struct nvkm_ltc *ltc = nvkm_ltc(pfb);
428 struct nvkm_mem *mem = *pmem;
430 *pmem = NULL;
431 if (unlikely(mem == NULL))
432 return;
434 mutex_lock(&pfb->base.mutex);
435 if (mem->tag)
436 ltc->tags_free(ltc, &mem->tag);
437 __nv50_ram_put(pfb, mem);
438 mutex_unlock(&pfb->base.mutex);
440 kfree(mem);
444 gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
445 u32 memtype, struct nvkm_mem **pmem)
447 struct nvkm_mm *mm = &pfb->vram;
448 struct nvkm_mm_node *r;
449 struct nvkm_mem *mem;
450 int type = (memtype & 0x0ff);
451 int back = (memtype & 0x800);
452 const bool comp = gf100_pte_storage_type_map[type] != type;
453 int ret;
455 size >>= 12;
456 align >>= 12;
457 ncmin >>= 12;
458 if (!ncmin)
459 ncmin = size;
461 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
462 if (!mem)
463 return -ENOMEM;
465 INIT_LIST_HEAD(&mem->regions);
466 mem->size = size;
468 mutex_lock(&pfb->base.mutex);
469 if (comp) {
470 struct nvkm_ltc *ltc = nvkm_ltc(pfb);
472 /* compression only works with lpages */
473 if (align == (1 << (17 - 12))) {
474 int n = size >> 5;
475 ltc->tags_alloc(ltc, n, &mem->tag);
478 if (unlikely(!mem->tag))
479 type = gf100_pte_storage_type_map[type];
481 mem->memtype = type;
483 do {
484 if (back)
485 ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
486 else
487 ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
488 if (ret) {
489 mutex_unlock(&pfb->base.mutex);
490 pfb->ram->put(pfb, &mem);
491 return ret;
494 list_add_tail(&r->rl_entry, &mem->regions);
495 size -= r->length;
496 } while (size);
497 mutex_unlock(&pfb->base.mutex);
499 r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
500 mem->offset = (u64)r->offset << 12;
501 *pmem = mem;
502 return 0;
506 gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
507 struct nvkm_oclass *oclass, u32 maskaddr, int size,
508 void **pobject)
510 struct nvkm_fb *pfb = nvkm_fb(parent);
511 struct nvkm_bios *bios = nvkm_bios(pfb);
512 struct nvkm_ram *ram;
513 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
514 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
515 u32 parts = nv_rd32(pfb, 0x022438);
516 u32 pmask = nv_rd32(pfb, maskaddr);
517 u32 bsize = nv_rd32(pfb, 0x10f20c);
518 u32 offset, length;
519 bool uniform = true;
520 int ret, part;
522 ret = nvkm_ram_create_(parent, engine, oclass, size, pobject);
523 ram = *pobject;
524 if (ret)
525 return ret;
527 nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
528 nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
530 ram->type = nvkm_fb_bios_memtype(bios);
531 ram->ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
533 /* read amount of vram attached to each memory controller */
534 for (part = 0; part < parts; part++) {
535 if (!(pmask & (1 << part))) {
536 u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
537 if (psize != bsize) {
538 if (psize < bsize)
539 bsize = psize;
540 uniform = false;
543 nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
544 ram->size += (u64)psize << 20;
548 /* if all controllers have the same amount attached, there's no holes */
549 if (uniform) {
550 offset = rsvd_head;
551 length = (ram->size >> 12) - rsvd_head - rsvd_tail;
552 ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
553 } else {
554 /* otherwise, address lowest common amount from 0GiB */
555 ret = nvkm_mm_init(&pfb->vram, rsvd_head,
556 (bsize << 8) * parts - rsvd_head, 1);
557 if (ret)
558 return ret;
560 /* and the rest starting from (8GiB + common_size) */
561 offset = (0x0200000000ULL >> 12) + (bsize << 8);
562 length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
564 ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
565 if (ret)
566 nvkm_mm_fini(&pfb->vram);
569 if (ret)
570 return ret;
572 ram->get = gf100_ram_get;
573 ram->put = gf100_ram_put;
574 return 0;
577 static int
578 gf100_ram_init(struct nvkm_object *object)
580 struct nvkm_fb *pfb = (void *)object->parent;
581 struct gf100_ram *ram = (void *)object;
582 int ret, i;
584 ret = nvkm_ram_init(&ram->base);
585 if (ret)
586 return ret;
588 /* prepare for ddr link training, and load training patterns */
589 switch (ram->base.type) {
590 case NV_MEM_TYPE_GDDR5: {
591 static const u8 train0[] = {
592 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
593 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
595 static const u32 train1[] = {
596 0x00000000, 0xffffffff,
597 0x55555555, 0xaaaaaaaa,
598 0x33333333, 0xcccccccc,
599 0xf0f0f0f0, 0x0f0f0f0f,
600 0x00ff00ff, 0xff00ff00,
601 0x0000ffff, 0xffff0000,
604 for (i = 0; i < 0x30; i++) {
605 nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
606 nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
607 nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
608 nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
609 nv_wr32(pfb, 0x10f918, train1[i % 12]);
610 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
611 nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
612 nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
613 nv_wr32(pfb, 0x10f918, train1[i % 12]);
614 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
616 } break;
617 default:
618 break;
621 return 0;
624 static int
625 gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
626 struct nvkm_oclass *oclass, void *data, u32 size,
627 struct nvkm_object **pobject)
629 struct nvkm_bios *bios = nvkm_bios(parent);
630 struct gf100_ram *ram;
631 int ret;
633 ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
634 *pobject = nv_object(ram);
635 if (ret)
636 return ret;
638 ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
639 if (ret) {
640 nv_error(ram, "mclk refpll data not found\n");
641 return ret;
644 ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
645 if (ret) {
646 nv_error(ram, "mclk pll data not found\n");
647 return ret;
650 switch (ram->base.type) {
651 case NV_MEM_TYPE_GDDR5:
652 ram->base.calc = gf100_ram_calc;
653 ram->base.prog = gf100_ram_prog;
654 ram->base.tidy = gf100_ram_tidy;
655 break;
656 default:
657 nv_warn(ram, "reclocking of this ram type unsupported\n");
658 return 0;
661 ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
662 ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
663 ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
664 ram->fuc.r_0x137330 = ramfuc_reg(0x137330);
666 ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
667 ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
668 ram->fuc.r_0x132100 = ramfuc_reg(0x132100);
670 ram->fuc.r_0x137390 = ramfuc_reg(0x137390);
672 ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
673 ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
674 ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
675 ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
676 ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
678 ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300);
679 ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338);
680 ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340);
681 ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344);
682 ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348);
684 ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
685 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
687 ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c);
688 ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050);
689 ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
690 ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
691 ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
692 ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
693 ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
694 ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
695 ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
696 ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
697 ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
698 ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
699 ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
700 ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988);
701 ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c);
702 ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990);
703 ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998);
704 ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0);
705 ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4);
706 ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04);
707 ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08);
708 ram->fuc.r_0x137310 = ramfuc_reg(0x137300);
709 ram->fuc.r_0x137310 = ramfuc_reg(0x137310);
710 ram->fuc.r_0x137360 = ramfuc_reg(0x137360);
711 ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
712 ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
713 ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8);
715 ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140);
716 ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
718 ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
719 return 0;
722 struct nvkm_oclass
723 gf100_ram_oclass = {
724 .handle = 0,
725 .ofuncs = &(struct nvkm_ofuncs) {
726 .ctor = gf100_ram_ctor,
727 .dtor = _nvkm_ram_dtor,
728 .init = gf100_ram_init,
729 .fini = _nvkm_ram_fini,