2 * Copyright 2020 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/poll.h>
24 #include <linux/wait.h>
25 #include <linux/anon_inodes.h>
26 #include <uapi/linux/kfd_ioctl.h>
28 #include "amdgpu_vm.h"
30 #include "kfd_smi_events.h"
32 struct kfd_smi_client
{
33 struct list_head list
;
35 wait_queue_head_t wait_queue
;
42 #define MAX_KFIFO_SIZE 1024
44 static __poll_t
kfd_smi_ev_poll(struct file
*, struct poll_table_struct
*);
45 static ssize_t
kfd_smi_ev_read(struct file
*, char __user
*, size_t, loff_t
*);
46 static ssize_t
kfd_smi_ev_write(struct file
*, const char __user
*, size_t,
48 static int kfd_smi_ev_release(struct inode
*, struct file
*);
50 static const char kfd_smi_name
[] = "kfd_smi_ev";
52 static const struct file_operations kfd_smi_ev_fops
= {
54 .poll
= kfd_smi_ev_poll
,
55 .read
= kfd_smi_ev_read
,
56 .write
= kfd_smi_ev_write
,
57 .release
= kfd_smi_ev_release
60 static __poll_t
kfd_smi_ev_poll(struct file
*filep
,
61 struct poll_table_struct
*wait
)
63 struct kfd_smi_client
*client
= filep
->private_data
;
66 poll_wait(filep
, &client
->wait_queue
, wait
);
68 spin_lock(&client
->lock
);
69 if (!kfifo_is_empty(&client
->fifo
))
70 mask
= EPOLLIN
| EPOLLRDNORM
;
71 spin_unlock(&client
->lock
);
76 static ssize_t
kfd_smi_ev_read(struct file
*filep
, char __user
*user
,
77 size_t size
, loff_t
*offset
)
81 struct kfd_smi_client
*client
= filep
->private_data
;
84 buf
= kmalloc(MAX_KFIFO_SIZE
* sizeof(*buf
), GFP_KERNEL
);
88 /* kfifo_to_user can sleep so we can't use spinlock protection around
89 * it. Instead, we kfifo out as spinlocked then copy them to the user.
91 spin_lock(&client
->lock
);
92 to_copy
= kfifo_len(&client
->fifo
);
94 spin_unlock(&client
->lock
);
98 to_copy
= min3(size
, sizeof(buf
), to_copy
);
99 ret
= kfifo_out(&client
->fifo
, buf
, to_copy
);
100 spin_unlock(&client
->lock
);
106 ret
= copy_to_user(user
, buf
, to_copy
);
120 static ssize_t
kfd_smi_ev_write(struct file
*filep
, const char __user
*user
,
121 size_t size
, loff_t
*offset
)
123 struct kfd_smi_client
*client
= filep
->private_data
;
126 if (!access_ok(user
, size
) || size
< sizeof(events
))
128 if (copy_from_user(&events
, user
, sizeof(events
)))
131 WRITE_ONCE(client
->events
, events
);
133 return sizeof(events
);
136 static int kfd_smi_ev_release(struct inode
*inode
, struct file
*filep
)
138 struct kfd_smi_client
*client
= filep
->private_data
;
139 struct kfd_dev
*dev
= client
->dev
;
141 spin_lock(&dev
->smi_lock
);
142 list_del_rcu(&client
->list
);
143 spin_unlock(&dev
->smi_lock
);
146 kfifo_free(&client
->fifo
);
152 static void add_event_to_kfifo(struct kfd_dev
*dev
, unsigned int smi_event
,
153 char *event_msg
, int len
)
155 struct kfd_smi_client
*client
;
159 list_for_each_entry_rcu(client
, &dev
->smi_clients
, list
) {
160 if (!(READ_ONCE(client
->events
) &
161 KFD_SMI_EVENT_MASK_FROM_INDEX(smi_event
)))
163 spin_lock(&client
->lock
);
164 if (kfifo_avail(&client
->fifo
) >= len
) {
165 kfifo_in(&client
->fifo
, event_msg
, len
);
166 wake_up_all(&client
->wait_queue
);
168 pr_debug("smi_event(EventID: %u): no space left\n",
171 spin_unlock(&client
->lock
);
177 void kfd_smi_event_update_gpu_reset(struct kfd_dev
*dev
, bool post_reset
)
180 * GpuReset msg = Reset seq number (incremented for
181 * every reset message sent before GPU reset).
182 * 1 byte event + 1 byte space + 8 bytes seq num +
183 * 1 byte \n + 1 byte \0 = 12
189 if (list_empty(&dev
->smi_clients
))
192 memset(fifo_in
, 0x0, sizeof(fifo_in
));
195 event
= KFD_SMI_EVENT_GPU_POST_RESET
;
197 event
= KFD_SMI_EVENT_GPU_PRE_RESET
;
198 ++(dev
->reset_seq_num
);
201 len
= snprintf(fifo_in
, sizeof(fifo_in
), "%x %x\n", event
,
204 add_event_to_kfifo(dev
, event
, fifo_in
, len
);
207 void kfd_smi_event_update_thermal_throttling(struct kfd_dev
*dev
,
208 uint32_t throttle_bitmask
)
210 struct amdgpu_device
*adev
= (struct amdgpu_device
*)dev
->kgd
;
212 * ThermalThrottle msg = throttle_bitmask(8):
213 * thermal_interrupt_count(16):
214 * 1 byte event + 1 byte space + 8 byte throttle_bitmask +
215 * 1 byte : + 16 byte thermal_interupt_counter + 1 byte \n +
221 if (list_empty(&dev
->smi_clients
))
224 len
= snprintf(fifo_in
, sizeof(fifo_in
), "%x %x:%llx\n",
225 KFD_SMI_EVENT_THERMAL_THROTTLE
, throttle_bitmask
,
226 atomic64_read(&adev
->smu
.throttle_int_counter
));
228 add_event_to_kfifo(dev
, KFD_SMI_EVENT_THERMAL_THROTTLE
, fifo_in
, len
);
231 void kfd_smi_event_update_vmfault(struct kfd_dev
*dev
, uint16_t pasid
)
233 struct amdgpu_device
*adev
= (struct amdgpu_device
*)dev
->kgd
;
234 struct amdgpu_task_info task_info
;
235 /* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */
236 /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n +
242 if (list_empty(&dev
->smi_clients
))
245 memset(&task_info
, 0, sizeof(struct amdgpu_task_info
));
246 amdgpu_vm_get_task_info(adev
, pasid
, &task_info
);
247 /* Report VM faults from user applications, not retry from kernel */
251 len
= snprintf(fifo_in
, sizeof(fifo_in
), "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT
,
252 task_info
.pid
, task_info
.task_name
);
254 add_event_to_kfifo(dev
, KFD_SMI_EVENT_VMFAULT
, fifo_in
, len
);
257 int kfd_smi_event_open(struct kfd_dev
*dev
, uint32_t *fd
)
259 struct kfd_smi_client
*client
;
262 client
= kzalloc(sizeof(struct kfd_smi_client
), GFP_KERNEL
);
265 INIT_LIST_HEAD(&client
->list
);
267 ret
= kfifo_alloc(&client
->fifo
, MAX_KFIFO_SIZE
, GFP_KERNEL
);
273 ret
= anon_inode_getfd(kfd_smi_name
, &kfd_smi_ev_fops
, (void *)client
,
276 kfifo_free(&client
->fifo
);
282 init_waitqueue_head(&client
->wait_queue
);
283 spin_lock_init(&client
->lock
);
287 spin_lock(&dev
->smi_lock
);
288 list_add_rcu(&client
->list
, &dev
->smi_clients
);
289 spin_unlock(&dev
->smi_lock
);