2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
27 #include <core/ramht.h>
28 #include <engine/gr/nv40.h>
31 struct nvkm_instmem base
;
36 /******************************************************************************
37 * instmem object implementation
38 *****************************************************************************/
39 #define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory)
42 struct nvkm_instobj base
;
43 struct nv40_instmem
*imem
;
44 struct nvkm_mm_node
*node
;
48 nv40_instobj_wr32(struct nvkm_memory
*memory
, u64 offset
, u32 data
)
50 struct nv40_instobj
*iobj
= nv40_instobj(memory
);
51 iowrite32_native(data
, iobj
->imem
->iomem
+ iobj
->node
->offset
+ offset
);
55 nv40_instobj_rd32(struct nvkm_memory
*memory
, u64 offset
)
57 struct nv40_instobj
*iobj
= nv40_instobj(memory
);
58 return ioread32_native(iobj
->imem
->iomem
+ iobj
->node
->offset
+ offset
);
61 static const struct nvkm_memory_ptrs
63 .rd32
= nv40_instobj_rd32
,
64 .wr32
= nv40_instobj_wr32
,
68 nv40_instobj_release(struct nvkm_memory
*memory
)
74 nv40_instobj_acquire(struct nvkm_memory
*memory
)
76 struct nv40_instobj
*iobj
= nv40_instobj(memory
);
77 return iobj
->imem
->iomem
+ iobj
->node
->offset
;
81 nv40_instobj_size(struct nvkm_memory
*memory
)
83 return nv40_instobj(memory
)->node
->length
;
87 nv40_instobj_addr(struct nvkm_memory
*memory
)
89 return nv40_instobj(memory
)->node
->offset
;
92 static enum nvkm_memory_target
93 nv40_instobj_target(struct nvkm_memory
*memory
)
95 return NVKM_MEM_TARGET_INST
;
99 nv40_instobj_dtor(struct nvkm_memory
*memory
)
101 struct nv40_instobj
*iobj
= nv40_instobj(memory
);
102 mutex_lock(&iobj
->imem
->base
.subdev
.mutex
);
103 nvkm_mm_free(&iobj
->imem
->heap
, &iobj
->node
);
104 mutex_unlock(&iobj
->imem
->base
.subdev
.mutex
);
105 nvkm_instobj_dtor(&iobj
->imem
->base
, &iobj
->base
);
109 static const struct nvkm_memory_func
110 nv40_instobj_func
= {
111 .dtor
= nv40_instobj_dtor
,
112 .target
= nv40_instobj_target
,
113 .size
= nv40_instobj_size
,
114 .addr
= nv40_instobj_addr
,
115 .acquire
= nv40_instobj_acquire
,
116 .release
= nv40_instobj_release
,
120 nv40_instobj_new(struct nvkm_instmem
*base
, u32 size
, u32 align
, bool zero
,
121 struct nvkm_memory
**pmemory
)
123 struct nv40_instmem
*imem
= nv40_instmem(base
);
124 struct nv40_instobj
*iobj
;
127 if (!(iobj
= kzalloc(sizeof(*iobj
), GFP_KERNEL
)))
129 *pmemory
= &iobj
->base
.memory
;
131 nvkm_instobj_ctor(&nv40_instobj_func
, &imem
->base
, &iobj
->base
);
132 iobj
->base
.memory
.ptrs
= &nv40_instobj_ptrs
;
135 mutex_lock(&imem
->base
.subdev
.mutex
);
136 ret
= nvkm_mm_head(&imem
->heap
, 0, 1, size
, size
,
137 align
? align
: 1, &iobj
->node
);
138 mutex_unlock(&imem
->base
.subdev
.mutex
);
142 /******************************************************************************
143 * instmem subdev implementation
144 *****************************************************************************/
147 nv40_instmem_rd32(struct nvkm_instmem
*base
, u32 addr
)
149 return ioread32_native(nv40_instmem(base
)->iomem
+ addr
);
153 nv40_instmem_wr32(struct nvkm_instmem
*base
, u32 addr
, u32 data
)
155 iowrite32_native(data
, nv40_instmem(base
)->iomem
+ addr
);
159 nv40_instmem_oneinit(struct nvkm_instmem
*base
)
161 struct nv40_instmem
*imem
= nv40_instmem(base
);
162 struct nvkm_device
*device
= imem
->base
.subdev
.device
;
165 /* PRAMIN aperture maps over the end of vram, reserve enough space
166 * to fit graphics contexts for every channel, the magics come
167 * from engine/gr/nv40.c
169 vs
= hweight8((nvkm_rd32(device
, 0x001540) & 0x0000ff00) >> 8);
170 if (device
->chipset
== 0x40) imem
->base
.reserved
= 0x6aa0 * vs
;
171 else if (device
->chipset
< 0x43) imem
->base
.reserved
= 0x4f00 * vs
;
172 else if (nv44_gr_class(device
)) imem
->base
.reserved
= 0x4980 * vs
;
173 else imem
->base
.reserved
= 0x4a40 * vs
;
174 imem
->base
.reserved
+= 16 * 1024;
175 imem
->base
.reserved
*= 32; /* per-channel */
176 imem
->base
.reserved
+= 512 * 1024; /* pci(e)gart table */
177 imem
->base
.reserved
+= 512 * 1024; /* object storage */
178 imem
->base
.reserved
= round_up(imem
->base
.reserved
, 4096);
180 ret
= nvkm_mm_init(&imem
->heap
, 0, 0, imem
->base
.reserved
, 1);
184 /* 0x00000-0x10000: reserve for probable vbios image */
185 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
, 0x10000, 0, false,
190 /* 0x10000-0x18000: reserve for RAMHT */
191 ret
= nvkm_ramht_new(device
, 0x08000, 0, NULL
, &imem
->base
.ramht
);
195 /* 0x18000-0x18200: reserve for RAMRO
196 * 0x18200-0x20000: padding
198 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
, 0x08000, 0, false,
203 /* 0x20000-0x21000: reserve for RAMFC
204 * 0x21000-0x40000: padding and some unknown crap
206 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
, 0x20000, 0, true,
215 nv40_instmem_dtor(struct nvkm_instmem
*base
)
217 struct nv40_instmem
*imem
= nv40_instmem(base
);
218 nvkm_memory_unref(&imem
->base
.ramfc
);
219 nvkm_memory_unref(&imem
->base
.ramro
);
220 nvkm_ramht_del(&imem
->base
.ramht
);
221 nvkm_memory_unref(&imem
->base
.vbios
);
222 nvkm_mm_fini(&imem
->heap
);
224 iounmap(imem
->iomem
);
228 static const struct nvkm_instmem_func
230 .dtor
= nv40_instmem_dtor
,
231 .oneinit
= nv40_instmem_oneinit
,
232 .rd32
= nv40_instmem_rd32
,
233 .wr32
= nv40_instmem_wr32
,
234 .memory_new
= nv40_instobj_new
,
239 nv40_instmem_new(struct nvkm_device
*device
, int index
,
240 struct nvkm_instmem
**pimem
)
242 struct nv40_instmem
*imem
;
245 if (!(imem
= kzalloc(sizeof(*imem
), GFP_KERNEL
)))
247 nvkm_instmem_ctor(&nv40_instmem
, device
, index
, &imem
->base
);
248 *pimem
= &imem
->base
;
251 if (device
->func
->resource_size(device
, 2))
256 imem
->iomem
= ioremap_wc(device
->func
->resource_addr(device
, bar
),
257 device
->func
->resource_size(device
, bar
));
259 nvkm_error(&imem
->base
.subdev
, "unable to map PRAMIN BAR\n");