2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <subdev/bar.h>
28 /******************************************************************************
29 * instmem object base implementation
30 *****************************************************************************/
32 nvkm_instobj_load(struct nvkm_instobj
*iobj
)
34 struct nvkm_memory
*memory
= &iobj
->memory
;
35 const u64 size
= nvkm_memory_size(memory
);
39 if (!(map
= nvkm_kmap(memory
))) {
40 for (i
= 0; i
< size
; i
+= 4)
41 nvkm_wo32(memory
, i
, iobj
->suspend
[i
/ 4]);
43 memcpy_toio(map
, iobj
->suspend
, size
);
47 kvfree(iobj
->suspend
);
52 nvkm_instobj_save(struct nvkm_instobj
*iobj
)
54 struct nvkm_memory
*memory
= &iobj
->memory
;
55 const u64 size
= nvkm_memory_size(memory
);
59 iobj
->suspend
= kvmalloc(size
, GFP_KERNEL
);
63 if (!(map
= nvkm_kmap(memory
))) {
64 for (i
= 0; i
< size
; i
+= 4)
65 iobj
->suspend
[i
/ 4] = nvkm_ro32(memory
, i
);
67 memcpy_fromio(iobj
->suspend
, map
, size
);
74 nvkm_instobj_dtor(struct nvkm_instmem
*imem
, struct nvkm_instobj
*iobj
)
76 spin_lock(&imem
->lock
);
77 list_del(&iobj
->head
);
78 spin_unlock(&imem
->lock
);
82 nvkm_instobj_ctor(const struct nvkm_memory_func
*func
,
83 struct nvkm_instmem
*imem
, struct nvkm_instobj
*iobj
)
85 nvkm_memory_ctor(func
, &iobj
->memory
);
87 spin_lock(&imem
->lock
);
88 list_add_tail(&iobj
->head
, &imem
->list
);
89 spin_unlock(&imem
->lock
);
93 nvkm_instobj_new(struct nvkm_instmem
*imem
, u32 size
, u32 align
, bool zero
,
94 struct nvkm_memory
**pmemory
)
96 struct nvkm_subdev
*subdev
= &imem
->subdev
;
97 struct nvkm_memory
*memory
= NULL
;
101 ret
= imem
->func
->memory_new(imem
, size
, align
, zero
, &memory
);
103 nvkm_error(subdev
, "OOM: %08x %08x %d\n", size
, align
, ret
);
107 nvkm_trace(subdev
, "new %08x %08x %d: %010llx %010llx\n", size
, align
,
108 zero
, nvkm_memory_addr(memory
), nvkm_memory_size(memory
));
110 if (!imem
->func
->zero
&& zero
) {
111 void __iomem
*map
= nvkm_kmap(memory
);
112 if (unlikely(!map
)) {
113 for (offset
= 0; offset
< size
; offset
+= 4)
114 nvkm_wo32(memory
, offset
, 0x00000000);
116 memset_io(map
, 0x00, size
);
123 nvkm_memory_unref(&memory
);
128 /******************************************************************************
129 * instmem subdev base implementation
130 *****************************************************************************/
133 nvkm_instmem_rd32(struct nvkm_instmem
*imem
, u32 addr
)
135 return imem
->func
->rd32(imem
, addr
);
139 nvkm_instmem_wr32(struct nvkm_instmem
*imem
, u32 addr
, u32 data
)
141 return imem
->func
->wr32(imem
, addr
, data
);
145 nvkm_instmem_boot(struct nvkm_instmem
*imem
)
147 /* Separate bootstrapped objects from normal list, as we need
148 * to make sure they're accessed with the slowpath on suspend
151 struct nvkm_instobj
*iobj
, *itmp
;
152 spin_lock(&imem
->lock
);
153 list_for_each_entry_safe(iobj
, itmp
, &imem
->list
, head
) {
154 list_move_tail(&iobj
->head
, &imem
->boot
);
156 spin_unlock(&imem
->lock
);
160 nvkm_instmem_fini(struct nvkm_subdev
*subdev
, bool suspend
)
162 struct nvkm_instmem
*imem
= nvkm_instmem(subdev
);
163 struct nvkm_instobj
*iobj
;
166 list_for_each_entry(iobj
, &imem
->list
, head
) {
167 int ret
= nvkm_instobj_save(iobj
);
172 nvkm_bar_bar2_fini(subdev
->device
);
174 list_for_each_entry(iobj
, &imem
->boot
, head
) {
175 int ret
= nvkm_instobj_save(iobj
);
181 if (imem
->func
->fini
)
182 imem
->func
->fini(imem
);
188 nvkm_instmem_init(struct nvkm_subdev
*subdev
)
190 struct nvkm_instmem
*imem
= nvkm_instmem(subdev
);
191 struct nvkm_instobj
*iobj
;
193 list_for_each_entry(iobj
, &imem
->boot
, head
) {
195 nvkm_instobj_load(iobj
);
198 nvkm_bar_bar2_init(subdev
->device
);
200 list_for_each_entry(iobj
, &imem
->list
, head
) {
202 nvkm_instobj_load(iobj
);
209 nvkm_instmem_oneinit(struct nvkm_subdev
*subdev
)
211 struct nvkm_instmem
*imem
= nvkm_instmem(subdev
);
212 if (imem
->func
->oneinit
)
213 return imem
->func
->oneinit(imem
);
218 nvkm_instmem_dtor(struct nvkm_subdev
*subdev
)
220 struct nvkm_instmem
*imem
= nvkm_instmem(subdev
);
221 if (imem
->func
->dtor
)
222 return imem
->func
->dtor(imem
);
226 static const struct nvkm_subdev_func
228 .dtor
= nvkm_instmem_dtor
,
229 .oneinit
= nvkm_instmem_oneinit
,
230 .init
= nvkm_instmem_init
,
231 .fini
= nvkm_instmem_fini
,
235 nvkm_instmem_ctor(const struct nvkm_instmem_func
*func
,
236 struct nvkm_device
*device
, int index
,
237 struct nvkm_instmem
*imem
)
239 nvkm_subdev_ctor(&nvkm_instmem
, device
, index
, &imem
->subdev
);
241 spin_lock_init(&imem
->lock
);
242 INIT_LIST_HEAD(&imem
->list
);
243 INIT_LIST_HEAD(&imem
->boot
);