2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 /* For debugging crashes, userspace can:
20 * tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
22 * To log the cmdstream in a format that is understood by freedreno/cffdump
23 * utility. By comparing the last successfully completed fence #, to the
24 * cmdstream for the next fence, you can narrow down which process and submit
25 * caused the gpu crash/lockup.
27 * This bypasses drm_debugfs_create_files() mainly because we need to use
28 * our own fops for a bit more control. In particular, we don't want to
29 * do anything if userspace doesn't have the debugfs file open.
32 #ifdef CONFIG_DEBUG_FS
34 #include <linux/kfifo.h>
35 #include <linux/debugfs.h>
36 #include <linux/circ_buf.h>
37 #include <linux/wait.h>
45 RD_TEST
, /* ascii text */
46 RD_CMD
, /* ascii text */
47 RD_GPUADDR
, /* u32 gpuaddr, u32 size */
48 RD_CONTEXT
, /* raw dump */
49 RD_CMDSTREAM
, /* raw dump */
50 RD_CMDSTREAM_ADDR
, /* gpu addr of cmdstream */
51 RD_PARAM
, /* u32 param_type, u32 param_val, u32 bitlen */
52 RD_FLUSH
, /* empty, clear previous params */
53 RD_PROGRAM
, /* shader program, raw dump */
60 #define BUF_SZ 512 /* should be power of 2 */
63 #define circ_count(circ) \
64 (CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
65 #define circ_count_to_end(circ) \
66 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
67 /* space available: */
68 #define circ_space(circ) \
69 (CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
70 #define circ_space_to_end(circ) \
71 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
74 struct drm_device
*dev
;
79 struct drm_info_node
*node
;
81 /* current submit to read out: */
82 struct msm_gem_submit
*submit
;
84 /* fifo access is synchronized on the producer side by
85 * struct_mutex held by submit code (otherwise we could
86 * end up w/ cmds logged in different order than they
87 * were executed). And read_lock synchronizes the reads
89 struct mutex read_lock
;
91 wait_queue_head_t fifo_event
;
97 static void rd_write(struct msm_rd_state
*rd
, const void *buf
, int sz
)
99 struct circ_buf
*fifo
= &rd
->fifo
;
100 const char *ptr
= buf
;
103 char *fptr
= &fifo
->buf
[fifo
->head
];
106 wait_event(rd
->fifo_event
, circ_space(&rd
->fifo
) > 0);
108 n
= min(sz
, circ_space_to_end(&rd
->fifo
));
109 memcpy(fptr
, ptr
, n
);
111 fifo
->head
= (fifo
->head
+ n
) & (BUF_SZ
- 1);
115 wake_up_all(&rd
->fifo_event
);
119 static void rd_write_section(struct msm_rd_state
*rd
,
120 enum rd_sect_type type
, const void *buf
, int sz
)
122 rd_write(rd
, &type
, 4);
123 rd_write(rd
, &sz
, 4);
124 rd_write(rd
, buf
, sz
);
127 static ssize_t
rd_read(struct file
*file
, char __user
*buf
,
128 size_t sz
, loff_t
*ppos
)
130 struct msm_rd_state
*rd
= file
->private_data
;
131 struct circ_buf
*fifo
= &rd
->fifo
;
132 const char *fptr
= &fifo
->buf
[fifo
->tail
];
135 mutex_lock(&rd
->read_lock
);
137 ret
= wait_event_interruptible(rd
->fifo_event
,
138 circ_count(&rd
->fifo
) > 0);
142 n
= min_t(int, sz
, circ_count_to_end(&rd
->fifo
));
143 ret
= copy_to_user(buf
, fptr
, n
);
147 fifo
->tail
= (fifo
->tail
+ n
) & (BUF_SZ
- 1);
150 wake_up_all(&rd
->fifo_event
);
153 mutex_unlock(&rd
->read_lock
);
159 static int rd_open(struct inode
*inode
, struct file
*file
)
161 struct msm_rd_state
*rd
= inode
->i_private
;
162 struct drm_device
*dev
= rd
->dev
;
163 struct msm_drm_private
*priv
= dev
->dev_private
;
164 struct msm_gpu
*gpu
= priv
->gpu
;
169 mutex_lock(&dev
->struct_mutex
);
171 if (rd
->open
|| !gpu
) {
176 file
->private_data
= rd
;
179 /* the parsing tools need to know gpu-id to know which
180 * register database to load.
182 gpu
->funcs
->get_param(gpu
, MSM_PARAM_GPU_ID
, &val
);
185 rd_write_section(rd
, RD_GPU_ID
, &gpu_id
, sizeof(gpu_id
));
188 mutex_unlock(&dev
->struct_mutex
);
192 static int rd_release(struct inode
*inode
, struct file
*file
)
194 struct msm_rd_state
*rd
= inode
->i_private
;
200 static const struct file_operations rd_debugfs_fops
= {
201 .owner
= THIS_MODULE
,
205 .release
= rd_release
,
208 int msm_rd_debugfs_init(struct drm_minor
*minor
)
210 struct msm_drm_private
*priv
= minor
->dev
->dev_private
;
211 struct msm_rd_state
*rd
;
213 /* only create on first minor: */
217 rd
= kzalloc(sizeof(*rd
), GFP_KERNEL
);
221 rd
->dev
= minor
->dev
;
222 rd
->fifo
.buf
= rd
->buf
;
224 mutex_init(&rd
->read_lock
);
227 init_waitqueue_head(&rd
->fifo_event
);
229 rd
->node
= kzalloc(sizeof(*rd
->node
), GFP_KERNEL
);
233 rd
->ent
= debugfs_create_file("rd", S_IFREG
| S_IRUGO
,
234 minor
->debugfs_root
, rd
, &rd_debugfs_fops
);
236 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/rd\n",
237 minor
->debugfs_root
->d_name
.name
);
241 rd
->node
->minor
= minor
;
242 rd
->node
->dent
= rd
->ent
;
243 rd
->node
->info_ent
= NULL
;
245 mutex_lock(&minor
->debugfs_lock
);
246 list_add(&rd
->node
->list
, &minor
->debugfs_list
);
247 mutex_unlock(&minor
->debugfs_lock
);
252 msm_rd_debugfs_cleanup(minor
);
256 void msm_rd_debugfs_cleanup(struct drm_minor
*minor
)
258 struct msm_drm_private
*priv
= minor
->dev
->dev_private
;
259 struct msm_rd_state
*rd
= priv
->rd
;
266 debugfs_remove(rd
->ent
);
269 mutex_lock(&minor
->debugfs_lock
);
270 list_del(&rd
->node
->list
);
271 mutex_unlock(&minor
->debugfs_lock
);
275 mutex_destroy(&rd
->read_lock
);
280 /* called under struct_mutex */
281 void msm_rd_dump_submit(struct msm_gem_submit
*submit
)
283 struct drm_device
*dev
= submit
->dev
;
284 struct msm_drm_private
*priv
= dev
->dev_private
;
285 struct msm_rd_state
*rd
= priv
->rd
;
292 /* writing into fifo is serialized by caller, and
293 * rd->read_lock is used to serialize the reads
295 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
297 n
= snprintf(msg
, sizeof(msg
), "%.*s/%d: fence=%u",
298 TASK_COMM_LEN
, current
->comm
, task_pid_nr(current
),
301 rd_write_section(rd
, RD_CMD
, msg
, ALIGN(n
, 4));
303 /* could be nice to have an option (module-param?) to snapshot
304 * all the bo's associated with the submit. Handy to see vtx
305 * buffers, etc. For now just the cmdstream bo's is enough.
308 for (i
= 0; i
< submit
->nr_cmds
; i
++) {
309 uint32_t idx
= submit
->cmd
[i
].idx
;
310 uint32_t iova
= submit
->cmd
[i
].iova
;
311 uint32_t szd
= submit
->cmd
[i
].size
; /* in dwords */
312 struct msm_gem_object
*obj
= submit
->bos
[idx
].obj
;
313 const char *buf
= msm_gem_vaddr_locked(&obj
->base
);
315 buf
+= iova
- submit
->bos
[idx
].iova
;
317 rd_write_section(rd
, RD_GPUADDR
,
318 (uint32_t[2]){ iova
, szd
* 4 }, 8);
319 rd_write_section(rd
, RD_BUFFER_CONTENTS
,
322 switch (submit
->cmd
[i
].type
) {
323 case MSM_SUBMIT_CMD_IB_TARGET_BUF
:
324 /* ignore IB-targets, we've logged the buffer, the
325 * parser tool will follow the IB based on the logged
326 * buffer/gpuaddr, so nothing more to do.
329 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF
:
330 case MSM_SUBMIT_CMD_BUF
:
331 rd_write_section(rd
, RD_CMDSTREAM_ADDR
,
332 (uint32_t[2]){ iova
, szd
}, 8);