2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <core/memory.h>
25 #include <core/notify.h>
28 nvkm_fault_ntfy_fini(struct nvkm_event
*event
, int type
, int index
)
30 struct nvkm_fault
*fault
= container_of(event
, typeof(*fault
), event
);
31 fault
->func
->buffer
.intr(fault
->buffer
[index
], false);
35 nvkm_fault_ntfy_init(struct nvkm_event
*event
, int type
, int index
)
37 struct nvkm_fault
*fault
= container_of(event
, typeof(*fault
), event
);
38 fault
->func
->buffer
.intr(fault
->buffer
[index
], true);
42 nvkm_fault_ntfy_ctor(struct nvkm_object
*object
, void *argv
, u32 argc
,
43 struct nvkm_notify
*notify
)
45 struct nvkm_fault_buffer
*buffer
= nvkm_fault_buffer(object
);
49 notify
->index
= buffer
->id
;
55 static const struct nvkm_event_func
57 .ctor
= nvkm_fault_ntfy_ctor
,
58 .init
= nvkm_fault_ntfy_init
,
59 .fini
= nvkm_fault_ntfy_fini
,
63 nvkm_fault_intr(struct nvkm_subdev
*subdev
)
65 struct nvkm_fault
*fault
= nvkm_fault(subdev
);
66 return fault
->func
->intr(fault
);
70 nvkm_fault_fini(struct nvkm_subdev
*subdev
, bool suspend
)
72 struct nvkm_fault
*fault
= nvkm_fault(subdev
);
73 if (fault
->func
->fini
)
74 fault
->func
->fini(fault
);
79 nvkm_fault_init(struct nvkm_subdev
*subdev
)
81 struct nvkm_fault
*fault
= nvkm_fault(subdev
);
82 if (fault
->func
->init
)
83 fault
->func
->init(fault
);
88 nvkm_fault_oneinit_buffer(struct nvkm_fault
*fault
, int id
)
90 struct nvkm_subdev
*subdev
= &fault
->subdev
;
91 struct nvkm_device
*device
= subdev
->device
;
92 struct nvkm_fault_buffer
*buffer
;
95 if (!(buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
)))
97 buffer
->fault
= fault
;
99 fault
->func
->buffer
.info(buffer
);
100 fault
->buffer
[id
] = buffer
;
102 nvkm_debug(subdev
, "buffer %d: %d entries\n", id
, buffer
->entries
);
104 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
, buffer
->entries
*
105 fault
->func
->buffer
.entry_size
, 0x1000, true,
110 /* Pin fault buffer in BAR2. */
111 buffer
->addr
= fault
->func
->buffer
.pin(buffer
);
112 if (buffer
->addr
== ~0ULL)
119 nvkm_fault_oneinit(struct nvkm_subdev
*subdev
)
121 struct nvkm_fault
*fault
= nvkm_fault(subdev
);
124 for (i
= 0; i
< ARRAY_SIZE(fault
->buffer
); i
++) {
125 if (i
< fault
->func
->buffer
.nr
) {
126 ret
= nvkm_fault_oneinit_buffer(fault
, i
);
129 fault
->buffer_nr
= i
+ 1;
133 ret
= nvkm_event_init(&nvkm_fault_ntfy
, 1, fault
->buffer_nr
,
138 if (fault
->func
->oneinit
)
139 ret
= fault
->func
->oneinit(fault
);
144 nvkm_fault_dtor(struct nvkm_subdev
*subdev
)
146 struct nvkm_fault
*fault
= nvkm_fault(subdev
);
149 nvkm_notify_fini(&fault
->nrpfb
);
150 nvkm_event_fini(&fault
->event
);
152 for (i
= 0; i
< fault
->buffer_nr
; i
++) {
153 if (fault
->buffer
[i
]) {
154 nvkm_memory_unref(&fault
->buffer
[i
]->mem
);
155 kfree(fault
->buffer
[i
]);
162 static const struct nvkm_subdev_func
164 .dtor
= nvkm_fault_dtor
,
165 .oneinit
= nvkm_fault_oneinit
,
166 .init
= nvkm_fault_init
,
167 .fini
= nvkm_fault_fini
,
168 .intr
= nvkm_fault_intr
,
172 nvkm_fault_new_(const struct nvkm_fault_func
*func
, struct nvkm_device
*device
,
173 int index
, struct nvkm_fault
**pfault
)
175 struct nvkm_fault
*fault
;
176 if (!(fault
= *pfault
= kzalloc(sizeof(*fault
), GFP_KERNEL
)))
178 nvkm_subdev_ctor(&nvkm_fault
, device
, index
, &fault
->subdev
);
180 fault
->user
.ctor
= nvkm_ufault_new
;
181 fault
->user
.base
= func
->user
.base
;