2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/memory.h>
28 #include <core/option.h>
29 #include <subdev/bios.h>
30 #include <subdev/bios/M0203.h>
31 #include <engine/gr.h>
32 #include <engine/mpeg.h>
35 nvkm_fb_tile_fini(struct nvkm_fb
*fb
, int region
, struct nvkm_fb_tile
*tile
)
37 fb
->func
->tile
.fini(fb
, region
, tile
);
41 nvkm_fb_tile_init(struct nvkm_fb
*fb
, int region
, u32 addr
, u32 size
,
42 u32 pitch
, u32 flags
, struct nvkm_fb_tile
*tile
)
44 fb
->func
->tile
.init(fb
, region
, addr
, size
, pitch
, flags
, tile
);
48 nvkm_fb_tile_prog(struct nvkm_fb
*fb
, int region
, struct nvkm_fb_tile
*tile
)
50 struct nvkm_device
*device
= fb
->subdev
.device
;
51 if (fb
->func
->tile
.prog
) {
52 fb
->func
->tile
.prog(fb
, region
, tile
);
54 nvkm_engine_tile(&device
->gr
->engine
, region
);
56 nvkm_engine_tile(device
->mpeg
, region
);
61 nvkm_fb_bios_memtype(struct nvkm_bios
*bios
)
63 struct nvkm_subdev
*subdev
= &bios
->subdev
;
64 struct nvkm_device
*device
= subdev
->device
;
65 const u8 ramcfg
= (nvkm_rd32(device
, 0x101000) & 0x0000003c) >> 2;
66 struct nvbios_M0203E M0203E
;
69 if (nvbios_M0203Em(bios
, ramcfg
, &ver
, &hdr
, &M0203E
)) {
70 switch (M0203E
.type
) {
71 case M0203E_TYPE_DDR2
: return NVKM_RAM_TYPE_DDR2
;
72 case M0203E_TYPE_DDR3
: return NVKM_RAM_TYPE_DDR3
;
73 case M0203E_TYPE_GDDR3
: return NVKM_RAM_TYPE_GDDR3
;
74 case M0203E_TYPE_GDDR5
: return NVKM_RAM_TYPE_GDDR5
;
75 case M0203E_TYPE_GDDR5X
: return NVKM_RAM_TYPE_GDDR5X
;
76 case M0203E_TYPE_GDDR6
: return NVKM_RAM_TYPE_GDDR6
;
77 case M0203E_TYPE_HBM2
: return NVKM_RAM_TYPE_HBM2
;
79 nvkm_warn(subdev
, "M0203E type %02x\n", M0203E
.type
);
80 return NVKM_RAM_TYPE_UNKNOWN
;
84 nvkm_warn(subdev
, "M0203E not matched!\n");
85 return NVKM_RAM_TYPE_UNKNOWN
;
89 nvkm_fb_intr(struct nvkm_subdev
*subdev
)
91 struct nvkm_fb
*fb
= nvkm_fb(subdev
);
97 nvkm_fb_oneinit(struct nvkm_subdev
*subdev
)
99 struct nvkm_fb
*fb
= nvkm_fb(subdev
);
102 if (fb
->func
->ram_new
) {
103 int ret
= fb
->func
->ram_new(fb
, &fb
->ram
);
105 nvkm_error(subdev
, "vram setup failed, %d\n", ret
);
110 if (fb
->func
->oneinit
) {
111 int ret
= fb
->func
->oneinit(fb
);
116 /* Initialise compression tag allocator.
118 * LTC oneinit() will override this on Fermi and newer.
120 if (fb
->func
->tags
) {
121 tags
= fb
->func
->tags(fb
);
122 nvkm_debug(subdev
, "%d comptags\n", tags
);
125 return nvkm_mm_init(&fb
->tags
, 0, 0, tags
, 1);
129 nvkm_fb_init_scrub_vpr(struct nvkm_fb
*fb
)
131 struct nvkm_subdev
*subdev
= &fb
->subdev
;
134 nvkm_debug(subdev
, "VPR locked, running scrubber binary\n");
136 if (!fb
->vpr_scrubber
.size
) {
137 nvkm_warn(subdev
, "VPR locked, but no scrubber binary!\n");
141 ret
= fb
->func
->vpr
.scrub(fb
);
143 nvkm_error(subdev
, "VPR scrubber binary failed\n");
147 if (fb
->func
->vpr
.scrub_required(fb
)) {
148 nvkm_error(subdev
, "VPR still locked after scrub!\n");
152 nvkm_debug(subdev
, "VPR scrubber binary successful\n");
157 nvkm_fb_init(struct nvkm_subdev
*subdev
)
159 struct nvkm_fb
*fb
= nvkm_fb(subdev
);
163 ret
= nvkm_ram_init(fb
->ram
);
168 for (i
= 0; i
< fb
->tile
.regions
; i
++)
169 fb
->func
->tile
.prog(fb
, i
, &fb
->tile
.region
[i
]);
174 if (fb
->func
->init_remapper
)
175 fb
->func
->init_remapper(fb
);
177 if (fb
->func
->init_page
) {
178 ret
= fb
->func
->init_page(fb
);
183 if (fb
->func
->init_unkn
)
184 fb
->func
->init_unkn(fb
);
186 if (fb
->func
->vpr
.scrub_required
&&
187 fb
->func
->vpr
.scrub_required(fb
)) {
188 ret
= nvkm_fb_init_scrub_vpr(fb
);
197 nvkm_fb_dtor(struct nvkm_subdev
*subdev
)
199 struct nvkm_fb
*fb
= nvkm_fb(subdev
);
202 nvkm_memory_unref(&fb
->mmu_wr
);
203 nvkm_memory_unref(&fb
->mmu_rd
);
205 for (i
= 0; i
< fb
->tile
.regions
; i
++)
206 fb
->func
->tile
.fini(fb
, i
, &fb
->tile
.region
[i
]);
208 nvkm_mm_fini(&fb
->tags
);
209 nvkm_ram_del(&fb
->ram
);
211 nvkm_blob_dtor(&fb
->vpr_scrubber
);
214 return fb
->func
->dtor(fb
);
218 static const struct nvkm_subdev_func
220 .dtor
= nvkm_fb_dtor
,
221 .oneinit
= nvkm_fb_oneinit
,
222 .init
= nvkm_fb_init
,
223 .intr
= nvkm_fb_intr
,
227 nvkm_fb_ctor(const struct nvkm_fb_func
*func
, struct nvkm_device
*device
,
228 int index
, struct nvkm_fb
*fb
)
230 nvkm_subdev_ctor(&nvkm_fb
, device
, index
, &fb
->subdev
);
232 fb
->tile
.regions
= fb
->func
->tile
.regions
;
233 fb
->page
= nvkm_longopt(device
->cfgopt
, "NvFbBigPage",
234 fb
->func
->default_bigpage
);
238 nvkm_fb_new_(const struct nvkm_fb_func
*func
, struct nvkm_device
*device
,
239 int index
, struct nvkm_fb
**pfb
)
241 if (!(*pfb
= kzalloc(sizeof(**pfb
), GFP_KERNEL
)))
243 nvkm_fb_ctor(func
, device
, index
, *pfb
);