1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 /* For debugging crashes, userspace can:
9 * tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
11 * to log the cmdstream in a format that is understood by freedreno/cffdump
12 * utility. By comparing the last successfully completed fence #, to the
13 * cmdstream for the next fence, you can narrow down which process and submit
14 * caused the gpu crash/lockup.
18 * tail -f /sys/kernel/debug/dri/<minor>/hangrd > logfile.rd
20 * will capture just the cmdstream from submits which triggered a GPU hang.
22 * This bypasses drm_debugfs_create_files() mainly because we need to use
23 * our own fops for a bit more control. In particular, we don't want to
24 * do anything if userspace doesn't have the debugfs file open.
26 * The module-param "rd_full", which defaults to false, enables snapshotting
27 * all (non-written) buffers in the submit, rather than just cmdstream bo's.
28 * This is useful to capture the contents of (for example) vbo's or textures,
29 * or shader programs (if not emitted inline in cmdstream).
32 #include <linux/circ_buf.h>
33 #include <linux/debugfs.h>
34 #include <linux/kfifo.h>
35 #include <linux/uaccess.h>
36 #include <linux/wait.h>
38 #include <drm/drm_file.h>
45 MODULE_PARM_DESC(rd_full
, "If true, $debugfs/.../rd will snapshot all buffer contents");
46 module_param_named(rd_full
, rd_full
, bool, 0600);
48 #ifdef CONFIG_DEBUG_FS
52 RD_TEST
, /* ascii text */
53 RD_CMD
, /* ascii text */
54 RD_GPUADDR
, /* u32 gpuaddr, u32 size */
55 RD_CONTEXT
, /* raw dump */
56 RD_CMDSTREAM
, /* raw dump */
57 RD_CMDSTREAM_ADDR
, /* gpu addr of cmdstream */
58 RD_PARAM
, /* u32 param_type, u32 param_val, u32 bitlen */
59 RD_FLUSH
, /* empty, clear previous params */
60 RD_PROGRAM
, /* shader program, raw dump */
67 #define BUF_SZ 512 /* should be power of 2 */
70 #define circ_count(circ) \
71 (CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
72 #define circ_count_to_end(circ) \
73 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
74 /* space available: */
75 #define circ_space(circ) \
76 (CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
77 #define circ_space_to_end(circ) \
78 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
81 struct drm_device
*dev
;
85 /* current submit to read out: */
86 struct msm_gem_submit
*submit
;
88 /* fifo access is synchronized on the producer side by
89 * struct_mutex held by submit code (otherwise we could
90 * end up w/ cmds logged in different order than they
91 * were executed). And read_lock synchronizes the reads
93 struct mutex read_lock
;
95 wait_queue_head_t fifo_event
;
101 static void rd_write(struct msm_rd_state
*rd
, const void *buf
, int sz
)
103 struct circ_buf
*fifo
= &rd
->fifo
;
104 const char *ptr
= buf
;
107 char *fptr
= &fifo
->buf
[fifo
->head
];
110 wait_event(rd
->fifo_event
, circ_space(&rd
->fifo
) > 0 || !rd
->open
);
114 /* Note that smp_load_acquire() is not strictly required
115 * as CIRC_SPACE_TO_END() does not access the tail more
118 n
= min(sz
, circ_space_to_end(&rd
->fifo
));
119 memcpy(fptr
, ptr
, n
);
121 smp_store_release(&fifo
->head
, (fifo
->head
+ n
) & (BUF_SZ
- 1));
125 wake_up_all(&rd
->fifo_event
);
129 static void rd_write_section(struct msm_rd_state
*rd
,
130 enum rd_sect_type type
, const void *buf
, int sz
)
132 rd_write(rd
, &type
, 4);
133 rd_write(rd
, &sz
, 4);
134 rd_write(rd
, buf
, sz
);
137 static ssize_t
rd_read(struct file
*file
, char __user
*buf
,
138 size_t sz
, loff_t
*ppos
)
140 struct msm_rd_state
*rd
= file
->private_data
;
141 struct circ_buf
*fifo
= &rd
->fifo
;
142 const char *fptr
= &fifo
->buf
[fifo
->tail
];
145 mutex_lock(&rd
->read_lock
);
147 ret
= wait_event_interruptible(rd
->fifo_event
,
148 circ_count(&rd
->fifo
) > 0);
152 /* Note that smp_load_acquire() is not strictly required
153 * as CIRC_CNT_TO_END() does not access the head more than
156 n
= min_t(int, sz
, circ_count_to_end(&rd
->fifo
));
157 if (copy_to_user(buf
, fptr
, n
)) {
162 smp_store_release(&fifo
->tail
, (fifo
->tail
+ n
) & (BUF_SZ
- 1));
165 wake_up_all(&rd
->fifo_event
);
168 mutex_unlock(&rd
->read_lock
);
174 static int rd_open(struct inode
*inode
, struct file
*file
)
176 struct msm_rd_state
*rd
= inode
->i_private
;
177 struct drm_device
*dev
= rd
->dev
;
178 struct msm_drm_private
*priv
= dev
->dev_private
;
179 struct msm_gpu
*gpu
= priv
->gpu
;
184 mutex_lock(&dev
->struct_mutex
);
186 if (rd
->open
|| !gpu
) {
191 file
->private_data
= rd
;
194 /* the parsing tools need to know gpu-id to know which
195 * register database to load.
197 gpu
->funcs
->get_param(gpu
, MSM_PARAM_GPU_ID
, &val
);
200 rd_write_section(rd
, RD_GPU_ID
, &gpu_id
, sizeof(gpu_id
));
203 mutex_unlock(&dev
->struct_mutex
);
207 static int rd_release(struct inode
*inode
, struct file
*file
)
209 struct msm_rd_state
*rd
= inode
->i_private
;
212 wake_up_all(&rd
->fifo_event
);
218 static const struct file_operations rd_debugfs_fops
= {
219 .owner
= THIS_MODULE
,
223 .release
= rd_release
,
227 static void rd_cleanup(struct msm_rd_state
*rd
)
232 mutex_destroy(&rd
->read_lock
);
236 static struct msm_rd_state
*rd_init(struct drm_minor
*minor
, const char *name
)
238 struct msm_rd_state
*rd
;
240 rd
= kzalloc(sizeof(*rd
), GFP_KERNEL
);
242 return ERR_PTR(-ENOMEM
);
244 rd
->dev
= minor
->dev
;
245 rd
->fifo
.buf
= rd
->buf
;
247 mutex_init(&rd
->read_lock
);
249 init_waitqueue_head(&rd
->fifo_event
);
251 debugfs_create_file(name
, S_IFREG
| S_IRUGO
, minor
->debugfs_root
, rd
,
257 int msm_rd_debugfs_init(struct drm_minor
*minor
)
259 struct msm_drm_private
*priv
= minor
->dev
->dev_private
;
260 struct msm_rd_state
*rd
;
263 /* only create on first minor: */
267 rd
= rd_init(minor
, "rd");
275 rd
= rd_init(minor
, "hangrd");
286 msm_rd_debugfs_cleanup(priv
);
290 void msm_rd_debugfs_cleanup(struct msm_drm_private
*priv
)
292 rd_cleanup(priv
->rd
);
295 rd_cleanup(priv
->hangrd
);
299 static void snapshot_buf(struct msm_rd_state
*rd
,
300 struct msm_gem_submit
*submit
, int idx
,
301 uint64_t iova
, uint32_t size
, bool full
)
303 struct msm_gem_object
*obj
= submit
->bos
[idx
].obj
;
308 offset
= iova
- submit
->bos
[idx
].iova
;
310 iova
= submit
->bos
[idx
].iova
;
311 size
= obj
->base
.size
;
315 * Always write the GPUADDR header so can get a complete list of all the
318 rd_write_section(rd
, RD_GPUADDR
,
319 (uint32_t[3]){ iova
, size
, iova
>> 32 }, 12);
324 /* But only dump the contents of buffers marked READ */
325 if (!(submit
->bos
[idx
].flags
& MSM_SUBMIT_BO_READ
))
328 buf
= msm_gem_get_vaddr_active(&obj
->base
);
334 rd_write_section(rd
, RD_BUFFER_CONTENTS
, buf
, size
);
336 msm_gem_put_vaddr_locked(&obj
->base
);
339 /* called under struct_mutex */
340 void msm_rd_dump_submit(struct msm_rd_state
*rd
, struct msm_gem_submit
*submit
,
341 const char *fmt
, ...)
343 struct drm_device
*dev
= submit
->dev
;
344 struct task_struct
*task
;
351 /* writing into fifo is serialized by caller, and
352 * rd->read_lock is used to serialize the reads
354 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
360 n
= vscnprintf(msg
, sizeof(msg
), fmt
, args
);
363 rd_write_section(rd
, RD_CMD
, msg
, ALIGN(n
, 4));
367 task
= pid_task(submit
->pid
, PIDTYPE_PID
);
369 n
= scnprintf(msg
, sizeof(msg
), "%.*s/%d: fence=%u",
370 TASK_COMM_LEN
, task
->comm
,
371 pid_nr(submit
->pid
), submit
->seqno
);
373 n
= scnprintf(msg
, sizeof(msg
), "???/%d: fence=%u",
374 pid_nr(submit
->pid
), submit
->seqno
);
378 rd_write_section(rd
, RD_CMD
, msg
, ALIGN(n
, 4));
380 for (i
= 0; i
< submit
->nr_bos
; i
++)
381 snapshot_buf(rd
, submit
, i
, 0, 0, should_dump(submit
, i
));
383 for (i
= 0; i
< submit
->nr_cmds
; i
++) {
384 uint32_t szd
= submit
->cmd
[i
].size
; /* in dwords */
386 /* snapshot cmdstream bo's (if we haven't already): */
387 if (!should_dump(submit
, i
)) {
388 snapshot_buf(rd
, submit
, submit
->cmd
[i
].idx
,
389 submit
->cmd
[i
].iova
, szd
* 4, true);
393 for (i
= 0; i
< submit
->nr_cmds
; i
++) {
394 uint64_t iova
= submit
->cmd
[i
].iova
;
395 uint32_t szd
= submit
->cmd
[i
].size
; /* in dwords */
397 switch (submit
->cmd
[i
].type
) {
398 case MSM_SUBMIT_CMD_IB_TARGET_BUF
:
399 /* ignore IB-targets, we've logged the buffer, the
400 * parser tool will follow the IB based on the logged
401 * buffer/gpuaddr, so nothing more to do.
404 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF
:
405 case MSM_SUBMIT_CMD_BUF
:
406 rd_write_section(rd
, RD_CMDSTREAM_ADDR
,
407 (uint32_t[3]){ iova
, szd
, iova
>> 32 }, 12);