2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/memory.h>
27 #include <subdev/bar.h>
29 /******************************************************************************
30 * instmem object base implementation
31 *****************************************************************************/
32 #define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
35 struct nvkm_memory memory
;
36 struct nvkm_memory
*parent
;
37 struct nvkm_instmem
*imem
;
38 struct list_head head
;
43 static enum nvkm_memory_target
44 nvkm_instobj_target(struct nvkm_memory
*memory
)
46 memory
= nvkm_instobj(memory
)->parent
;
47 return nvkm_memory_target(memory
);
51 nvkm_instobj_addr(struct nvkm_memory
*memory
)
53 memory
= nvkm_instobj(memory
)->parent
;
54 return nvkm_memory_addr(memory
);
58 nvkm_instobj_size(struct nvkm_memory
*memory
)
60 memory
= nvkm_instobj(memory
)->parent
;
61 return nvkm_memory_size(memory
);
65 nvkm_instobj_release(struct nvkm_memory
*memory
)
67 struct nvkm_instobj
*iobj
= nvkm_instobj(memory
);
68 nvkm_bar_flush(iobj
->imem
->subdev
.device
->bar
);
72 nvkm_instobj_acquire(struct nvkm_memory
*memory
)
74 return nvkm_instobj(memory
)->map
;
78 nvkm_instobj_rd32(struct nvkm_memory
*memory
, u64 offset
)
80 return ioread32_native(nvkm_instobj(memory
)->map
+ offset
);
84 nvkm_instobj_wr32(struct nvkm_memory
*memory
, u64 offset
, u32 data
)
86 iowrite32_native(data
, nvkm_instobj(memory
)->map
+ offset
);
90 nvkm_instobj_map(struct nvkm_memory
*memory
, struct nvkm_vma
*vma
, u64 offset
)
92 memory
= nvkm_instobj(memory
)->parent
;
93 nvkm_memory_map(memory
, vma
, offset
);
97 nvkm_instobj_dtor(struct nvkm_memory
*memory
)
99 struct nvkm_instobj
*iobj
= nvkm_instobj(memory
);
100 spin_lock(&iobj
->imem
->lock
);
101 list_del(&iobj
->head
);
102 spin_unlock(&iobj
->imem
->lock
);
103 nvkm_memory_del(&iobj
->parent
);
107 const struct nvkm_memory_func
108 nvkm_instobj_func
= {
109 .dtor
= nvkm_instobj_dtor
,
110 .target
= nvkm_instobj_target
,
111 .addr
= nvkm_instobj_addr
,
112 .size
= nvkm_instobj_size
,
113 .acquire
= nvkm_instobj_acquire
,
114 .release
= nvkm_instobj_release
,
115 .rd32
= nvkm_instobj_rd32
,
116 .wr32
= nvkm_instobj_wr32
,
117 .map
= nvkm_instobj_map
,
121 nvkm_instobj_boot(struct nvkm_memory
*memory
, struct nvkm_vm
*vm
)
123 memory
= nvkm_instobj(memory
)->parent
;
124 nvkm_memory_boot(memory
, vm
);
128 nvkm_instobj_release_slow(struct nvkm_memory
*memory
)
130 struct nvkm_instobj
*iobj
= nvkm_instobj(memory
);
131 nvkm_instobj_release(memory
);
132 nvkm_done(iobj
->parent
);
135 static void __iomem
*
136 nvkm_instobj_acquire_slow(struct nvkm_memory
*memory
)
138 struct nvkm_instobj
*iobj
= nvkm_instobj(memory
);
139 iobj
->map
= nvkm_kmap(iobj
->parent
);
141 memory
->func
= &nvkm_instobj_func
;
146 nvkm_instobj_rd32_slow(struct nvkm_memory
*memory
, u64 offset
)
148 struct nvkm_instobj
*iobj
= nvkm_instobj(memory
);
149 return nvkm_ro32(iobj
->parent
, offset
);
153 nvkm_instobj_wr32_slow(struct nvkm_memory
*memory
, u64 offset
, u32 data
)
155 struct nvkm_instobj
*iobj
= nvkm_instobj(memory
);
156 return nvkm_wo32(iobj
->parent
, offset
, data
);
159 const struct nvkm_memory_func
160 nvkm_instobj_func_slow
= {
161 .dtor
= nvkm_instobj_dtor
,
162 .target
= nvkm_instobj_target
,
163 .addr
= nvkm_instobj_addr
,
164 .size
= nvkm_instobj_size
,
165 .boot
= nvkm_instobj_boot
,
166 .acquire
= nvkm_instobj_acquire_slow
,
167 .release
= nvkm_instobj_release_slow
,
168 .rd32
= nvkm_instobj_rd32_slow
,
169 .wr32
= nvkm_instobj_wr32_slow
,
170 .map
= nvkm_instobj_map
,
174 nvkm_instobj_new(struct nvkm_instmem
*imem
, u32 size
, u32 align
, bool zero
,
175 struct nvkm_memory
**pmemory
)
177 struct nvkm_memory
*memory
= NULL
;
178 struct nvkm_instobj
*iobj
;
182 ret
= imem
->func
->memory_new(imem
, size
, align
, zero
, &memory
);
186 if (!imem
->func
->persistent
) {
187 if (!(iobj
= kzalloc(sizeof(*iobj
), GFP_KERNEL
))) {
192 nvkm_memory_ctor(&nvkm_instobj_func_slow
, &iobj
->memory
);
193 iobj
->parent
= memory
;
195 spin_lock(&iobj
->imem
->lock
);
196 list_add_tail(&iobj
->head
, &imem
->list
);
197 spin_unlock(&iobj
->imem
->lock
);
198 memory
= &iobj
->memory
;
201 if (!imem
->func
->zero
&& zero
) {
202 void __iomem
*map
= nvkm_kmap(memory
);
203 if (unlikely(!map
)) {
204 for (offset
= 0; offset
< size
; offset
+= 4)
205 nvkm_wo32(memory
, offset
, 0x00000000);
207 memset_io(map
, 0x00, size
);
214 nvkm_memory_del(&memory
);
219 /******************************************************************************
220 * instmem subdev base implementation
221 *****************************************************************************/
224 nvkm_instmem_rd32(struct nvkm_instmem
*imem
, u32 addr
)
226 return imem
->func
->rd32(imem
, addr
);
230 nvkm_instmem_wr32(struct nvkm_instmem
*imem
, u32 addr
, u32 data
)
232 return imem
->func
->wr32(imem
, addr
, data
);
236 nvkm_instmem_fini(struct nvkm_subdev
*subdev
, bool suspend
)
238 struct nvkm_instmem
*imem
= nvkm_instmem(subdev
);
239 struct nvkm_instobj
*iobj
;
242 if (imem
->func
->fini
)
243 imem
->func
->fini(imem
);
246 list_for_each_entry(iobj
, &imem
->list
, head
) {
247 struct nvkm_memory
*memory
= iobj
->parent
;
248 u64 size
= nvkm_memory_size(memory
);
250 iobj
->suspend
= vmalloc(size
);
254 for (i
= 0; i
< size
; i
+= 4)
255 iobj
->suspend
[i
/ 4] = nvkm_ro32(memory
, i
);
263 nvkm_instmem_oneinit(struct nvkm_subdev
*subdev
)
265 struct nvkm_instmem
*imem
= nvkm_instmem(subdev
);
266 if (imem
->func
->oneinit
)
267 return imem
->func
->oneinit(imem
);
272 nvkm_instmem_init(struct nvkm_subdev
*subdev
)
274 struct nvkm_instmem
*imem
= nvkm_instmem(subdev
);
275 struct nvkm_instobj
*iobj
;
278 list_for_each_entry(iobj
, &imem
->list
, head
) {
280 struct nvkm_memory
*memory
= iobj
->parent
;
281 u64 size
= nvkm_memory_size(memory
);
282 for (i
= 0; i
< size
; i
+= 4)
283 nvkm_wo32(memory
, i
, iobj
->suspend
[i
/ 4]);
284 vfree(iobj
->suspend
);
285 iobj
->suspend
= NULL
;
293 nvkm_instmem_dtor(struct nvkm_subdev
*subdev
)
295 struct nvkm_instmem
*imem
= nvkm_instmem(subdev
);
296 if (imem
->func
->dtor
)
297 return imem
->func
->dtor(imem
);
301 static const struct nvkm_subdev_func
303 .dtor
= nvkm_instmem_dtor
,
304 .oneinit
= nvkm_instmem_oneinit
,
305 .init
= nvkm_instmem_init
,
306 .fini
= nvkm_instmem_fini
,
310 nvkm_instmem_ctor(const struct nvkm_instmem_func
*func
,
311 struct nvkm_device
*device
, int index
,
312 struct nvkm_instmem
*imem
)
314 nvkm_subdev_ctor(&nvkm_instmem
, device
, index
, 0, &imem
->subdev
);
316 spin_lock_init(&imem
->lock
);
317 INIT_LIST_HEAD(&imem
->list
);