drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / fs / gfs2 / trace_gfs2.h
blob8eae8d62a41322083fcf95918b554f285f2c0757
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM gfs2
5 #if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_GFS2_H
8 #include <linux/tracepoint.h>
10 #include <linux/fs.h>
11 #include <linux/buffer_head.h>
12 #include <linux/dlmconstants.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/writeback.h>
15 #include <linux/ktime.h>
16 #include <linux/iomap.h>
17 #include "incore.h"
18 #include "glock.h"
19 #include "rgrp.h"
21 #define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
22 #define glock_trace_name(x) __print_symbolic(x, \
23 dlm_state_name(IV), \
24 dlm_state_name(NL), \
25 dlm_state_name(CR), \
26 dlm_state_name(CW), \
27 dlm_state_name(PR), \
28 dlm_state_name(PW), \
29 dlm_state_name(EX))
31 #define block_state_name(x) __print_symbolic(x, \
32 { GFS2_BLKST_FREE, "free" }, \
33 { GFS2_BLKST_USED, "used" }, \
34 { GFS2_BLKST_DINODE, "dinode" }, \
35 { GFS2_BLKST_UNLINKED, "unlinked" })
37 #define TRACE_RS_DELETE 0
38 #define TRACE_RS_TREEDEL 1
39 #define TRACE_RS_INSERT 2
40 #define TRACE_RS_CLAIM 3
42 #define rs_func_name(x) __print_symbolic(x, \
43 { 0, "del " }, \
44 { 1, "tdel" }, \
45 { 2, "ins " }, \
46 { 3, "clm " })
48 #define show_glock_flags(flags) __print_flags(flags, "", \
49 {(1UL << GLF_LOCK), "l" }, \
50 {(1UL << GLF_DEMOTE), "D" }, \
51 {(1UL << GLF_PENDING_DEMOTE), "d" }, \
52 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
53 {(1UL << GLF_DIRTY), "y" }, \
54 {(1UL << GLF_LFLUSH), "f" }, \
55 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
56 {(1UL << GLF_HAVE_REPLY), "r" }, \
57 {(1UL << GLF_INITIAL), "a" }, \
58 {(1UL << GLF_HAVE_FROZEN_REPLY), "F" }, \
59 {(1UL << GLF_LRU), "L" }, \
60 {(1UL << GLF_OBJECT), "o" }, \
61 {(1UL << GLF_BLOCKING), "b" })
63 #ifndef NUMPTY
64 #define NUMPTY
65 static inline u8 glock_trace_state(unsigned int state)
67 switch(state) {
68 case LM_ST_SHARED:
69 return DLM_LOCK_PR;
70 case LM_ST_DEFERRED:
71 return DLM_LOCK_CW;
72 case LM_ST_EXCLUSIVE:
73 return DLM_LOCK_EX;
75 return DLM_LOCK_NL;
77 #endif
79 /* Section 1 - Locking
81 * Objectives:
82 * Latency: Remote demote request to state change
83 * Latency: Local lock request to state change
84 * Latency: State change to lock grant
85 * Correctness: Ordering of local lock state vs. I/O requests
86 * Correctness: Responses to remote demote requests
89 /* General glock state change (DLM lock request completes) */
90 TRACE_EVENT(gfs2_glock_state_change,
92 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
94 TP_ARGS(gl, new_state),
96 TP_STRUCT__entry(
97 __field( dev_t, dev )
98 __field( u64, glnum )
99 __field( u32, gltype )
100 __field( u8, cur_state )
101 __field( u8, new_state )
102 __field( u8, dmt_state )
103 __field( u8, tgt_state )
104 __field( unsigned long, flags )
107 TP_fast_assign(
108 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
109 __entry->glnum = gl->gl_name.ln_number;
110 __entry->gltype = gl->gl_name.ln_type;
111 __entry->cur_state = glock_trace_state(gl->gl_state);
112 __entry->new_state = glock_trace_state(new_state);
113 __entry->tgt_state = glock_trace_state(gl->gl_target);
114 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
115 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
118 TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
119 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
120 (unsigned long long)__entry->glnum,
121 glock_trace_name(__entry->cur_state),
122 glock_trace_name(__entry->new_state),
123 glock_trace_name(__entry->tgt_state),
124 glock_trace_name(__entry->dmt_state),
125 show_glock_flags(__entry->flags))
128 /* State change -> unlocked, glock is being deallocated */
129 TRACE_EVENT(gfs2_glock_put,
131 TP_PROTO(const struct gfs2_glock *gl),
133 TP_ARGS(gl),
135 TP_STRUCT__entry(
136 __field( dev_t, dev )
137 __field( u64, glnum )
138 __field( u32, gltype )
139 __field( u8, cur_state )
140 __field( unsigned long, flags )
143 TP_fast_assign(
144 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
145 __entry->gltype = gl->gl_name.ln_type;
146 __entry->glnum = gl->gl_name.ln_number;
147 __entry->cur_state = glock_trace_state(gl->gl_state);
148 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
151 TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
152 MAJOR(__entry->dev), MINOR(__entry->dev),
153 __entry->gltype, (unsigned long long)__entry->glnum,
154 glock_trace_name(__entry->cur_state),
155 glock_trace_name(DLM_LOCK_IV),
156 show_glock_flags(__entry->flags))
160 /* Callback (local or remote) requesting lock demotion */
161 TRACE_EVENT(gfs2_demote_rq,
163 TP_PROTO(const struct gfs2_glock *gl, bool remote),
165 TP_ARGS(gl, remote),
167 TP_STRUCT__entry(
168 __field( dev_t, dev )
169 __field( u64, glnum )
170 __field( u32, gltype )
171 __field( u8, cur_state )
172 __field( u8, dmt_state )
173 __field( unsigned long, flags )
174 __field( bool, remote )
177 TP_fast_assign(
178 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
179 __entry->gltype = gl->gl_name.ln_type;
180 __entry->glnum = gl->gl_name.ln_number;
181 __entry->cur_state = glock_trace_state(gl->gl_state);
182 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
183 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
184 __entry->remote = remote;
187 TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s %s",
188 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
189 (unsigned long long)__entry->glnum,
190 glock_trace_name(__entry->cur_state),
191 glock_trace_name(__entry->dmt_state),
192 show_glock_flags(__entry->flags),
193 __entry->remote ? "remote" : "local")
197 /* Promotion/grant of a glock */
198 TRACE_EVENT(gfs2_promote,
200 TP_PROTO(const struct gfs2_holder *gh),
202 TP_ARGS(gh),
204 TP_STRUCT__entry(
205 __field( dev_t, dev )
206 __field( u64, glnum )
207 __field( u32, gltype )
208 __field( u8, state )
211 TP_fast_assign(
212 __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
213 __entry->glnum = gh->gh_gl->gl_name.ln_number;
214 __entry->gltype = gh->gh_gl->gl_name.ln_type;
215 __entry->state = glock_trace_state(gh->gh_state);
218 TP_printk("%u,%u glock %u:%llu promote %s",
219 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
220 (unsigned long long)__entry->glnum,
221 glock_trace_name(__entry->state))
224 /* Queue/dequeue a lock request */
225 TRACE_EVENT(gfs2_glock_queue,
227 TP_PROTO(const struct gfs2_holder *gh, int queue),
229 TP_ARGS(gh, queue),
231 TP_STRUCT__entry(
232 __field( dev_t, dev )
233 __field( u64, glnum )
234 __field( u32, gltype )
235 __field( int, queue )
236 __field( u8, state )
239 TP_fast_assign(
240 __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
241 __entry->glnum = gh->gh_gl->gl_name.ln_number;
242 __entry->gltype = gh->gh_gl->gl_name.ln_type;
243 __entry->queue = queue;
244 __entry->state = glock_trace_state(gh->gh_state);
247 TP_printk("%u,%u glock %u:%llu %squeue %s",
248 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
249 (unsigned long long)__entry->glnum,
250 __entry->queue ? "" : "de",
251 glock_trace_name(__entry->state))
254 /* DLM sends a reply to GFS2 */
255 TRACE_EVENT(gfs2_glock_lock_time,
257 TP_PROTO(const struct gfs2_glock *gl, s64 tdiff),
259 TP_ARGS(gl, tdiff),
261 TP_STRUCT__entry(
262 __field( dev_t, dev )
263 __field( u64, glnum )
264 __field( u32, gltype )
265 __field( int, status )
266 __field( char, flags )
267 __field( s64, tdiff )
268 __field( u64, srtt )
269 __field( u64, srttvar )
270 __field( u64, srttb )
271 __field( u64, srttvarb )
272 __field( u64, sirt )
273 __field( u64, sirtvar )
274 __field( u64, dcount )
275 __field( u64, qcount )
278 TP_fast_assign(
279 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
280 __entry->glnum = gl->gl_name.ln_number;
281 __entry->gltype = gl->gl_name.ln_type;
282 __entry->status = gl->gl_lksb.sb_status;
283 __entry->flags = gl->gl_lksb.sb_flags;
284 __entry->tdiff = tdiff;
285 __entry->srtt = gl->gl_stats.stats[GFS2_LKS_SRTT];
286 __entry->srttvar = gl->gl_stats.stats[GFS2_LKS_SRTTVAR];
287 __entry->srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
288 __entry->srttvarb = gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
289 __entry->sirt = gl->gl_stats.stats[GFS2_LKS_SIRT];
290 __entry->sirtvar = gl->gl_stats.stats[GFS2_LKS_SIRTVAR];
291 __entry->dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
292 __entry->qcount = gl->gl_stats.stats[GFS2_LKS_QCOUNT];
295 TP_printk("%u,%u glock %d:%lld status:%d flags:%02x tdiff:%lld srtt:%lld/%lld srttb:%lld/%lld sirt:%lld/%lld dcnt:%lld qcnt:%lld",
296 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
297 (unsigned long long)__entry->glnum,
298 __entry->status, __entry->flags,
299 (long long)__entry->tdiff,
300 (long long)__entry->srtt,
301 (long long)__entry->srttvar,
302 (long long)__entry->srttb,
303 (long long)__entry->srttvarb,
304 (long long)__entry->sirt,
305 (long long)__entry->sirtvar,
306 (long long)__entry->dcount,
307 (long long)__entry->qcount)
310 /* Section 2 - Log/journal
312 * Objectives:
313 * Latency: Log flush time
314 * Correctness: pin/unpin vs. disk I/O ordering
315 * Performance: Log usage stats
318 /* Pin/unpin a block in the log */
319 TRACE_EVENT(gfs2_pin,
321 TP_PROTO(const struct gfs2_bufdata *bd, int pin),
323 TP_ARGS(bd, pin),
325 TP_STRUCT__entry(
326 __field( dev_t, dev )
327 __field( int, pin )
328 __field( u32, len )
329 __field( sector_t, block )
330 __field( u64, ino )
333 TP_fast_assign(
334 __entry->dev = bd->bd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
335 __entry->pin = pin;
336 __entry->len = bd->bd_bh->b_size;
337 __entry->block = bd->bd_bh->b_blocknr;
338 __entry->ino = bd->bd_gl->gl_name.ln_number;
341 TP_printk("%u,%u log %s %llu/%lu inode %llu",
342 MAJOR(__entry->dev), MINOR(__entry->dev),
343 __entry->pin ? "pin" : "unpin",
344 (unsigned long long)__entry->block,
345 (unsigned long)__entry->len,
346 (unsigned long long)__entry->ino)
349 /* Flushing the log */
350 TRACE_EVENT(gfs2_log_flush,
352 TP_PROTO(const struct gfs2_sbd *sdp, int start, u32 flags),
354 TP_ARGS(sdp, start, flags),
356 TP_STRUCT__entry(
357 __field( dev_t, dev )
358 __field( int, start )
359 __field( u64, log_seq )
360 __field( u32, flags )
363 TP_fast_assign(
364 __entry->dev = sdp->sd_vfs->s_dev;
365 __entry->start = start;
366 __entry->log_seq = sdp->sd_log_sequence;
367 __entry->flags = flags;
370 TP_printk("%u,%u log flush %s %llu %llx",
371 MAJOR(__entry->dev), MINOR(__entry->dev),
372 __entry->start ? "start" : "end",
373 (unsigned long long)__entry->log_seq,
374 (unsigned long long)__entry->flags)
377 /* Reserving/releasing blocks in the log */
378 TRACE_EVENT(gfs2_log_blocks,
380 TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
382 TP_ARGS(sdp, blocks),
384 TP_STRUCT__entry(
385 __field( dev_t, dev )
386 __field( int, blocks )
387 __field( int, blks_free )
390 TP_fast_assign(
391 __entry->dev = sdp->sd_vfs->s_dev;
392 __entry->blocks = blocks;
393 __entry->blks_free = atomic_read(&sdp->sd_log_blks_free);
396 TP_printk("%u,%u log reserve %d %d", MAJOR(__entry->dev),
397 MINOR(__entry->dev), __entry->blocks, __entry->blks_free)
400 /* Writing back the AIL */
401 TRACE_EVENT(gfs2_ail_flush,
403 TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start),
405 TP_ARGS(sdp, wbc, start),
407 TP_STRUCT__entry(
408 __field( dev_t, dev )
409 __field( int, start )
410 __field( int, sync_mode )
411 __field( long, nr_to_write )
414 TP_fast_assign(
415 __entry->dev = sdp->sd_vfs->s_dev;
416 __entry->start = start;
417 __entry->sync_mode = wbc->sync_mode;
418 __entry->nr_to_write = wbc->nr_to_write;
421 TP_printk("%u,%u ail flush %s %s %ld", MAJOR(__entry->dev),
422 MINOR(__entry->dev), __entry->start ? "start" : "end",
423 __entry->sync_mode == WB_SYNC_ALL ? "all" : "none",
424 __entry->nr_to_write)
427 /* Section 3 - bmap
429 * Objectives:
430 * Latency: Bmap request time
431 * Performance: Block allocator tracing
432 * Correctness: Test of disard generation vs. blocks allocated
435 /* Map an extent of blocks, possibly a new allocation */
436 TRACE_EVENT(gfs2_bmap,
438 TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
439 sector_t lblock, int create, int errno),
441 TP_ARGS(ip, bh, lblock, create, errno),
443 TP_STRUCT__entry(
444 __field( dev_t, dev )
445 __field( sector_t, lblock )
446 __field( sector_t, pblock )
447 __field( u64, inum )
448 __field( unsigned long, state )
449 __field( u32, len )
450 __field( int, create )
451 __field( int, errno )
454 TP_fast_assign(
455 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
456 __entry->lblock = lblock;
457 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
458 __entry->inum = ip->i_no_addr;
459 __entry->state = bh->b_state;
460 __entry->len = bh->b_size;
461 __entry->create = create;
462 __entry->errno = errno;
465 TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
466 MAJOR(__entry->dev), MINOR(__entry->dev),
467 (unsigned long long)__entry->inum,
468 (unsigned long long)__entry->lblock,
469 (unsigned long)__entry->len,
470 (unsigned long long)__entry->pblock,
471 __entry->state, __entry->create ? "create " : "nocreate",
472 __entry->errno)
475 TRACE_EVENT(gfs2_iomap_start,
477 TP_PROTO(const struct gfs2_inode *ip, loff_t pos, ssize_t length,
478 u16 flags),
480 TP_ARGS(ip, pos, length, flags),
482 TP_STRUCT__entry(
483 __field( dev_t, dev )
484 __field( u64, inum )
485 __field( loff_t, pos )
486 __field( ssize_t, length )
487 __field( u16, flags )
490 TP_fast_assign(
491 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
492 __entry->inum = ip->i_no_addr;
493 __entry->pos = pos;
494 __entry->length = length;
495 __entry->flags = flags;
498 TP_printk("%u,%u bmap %llu iomap start %llu/%lu flags:%08x",
499 MAJOR(__entry->dev), MINOR(__entry->dev),
500 (unsigned long long)__entry->inum,
501 (unsigned long long)__entry->pos,
502 (unsigned long)__entry->length, (u16)__entry->flags)
505 TRACE_EVENT(gfs2_iomap_end,
507 TP_PROTO(const struct gfs2_inode *ip, struct iomap *iomap, int ret),
509 TP_ARGS(ip, iomap, ret),
511 TP_STRUCT__entry(
512 __field( dev_t, dev )
513 __field( u64, inum )
514 __field( loff_t, offset )
515 __field( ssize_t, length )
516 __field( sector_t, pblock )
517 __field( u16, flags )
518 __field( u16, type )
519 __field( int, ret )
522 TP_fast_assign(
523 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
524 __entry->inum = ip->i_no_addr;
525 __entry->offset = iomap->offset;
526 __entry->length = iomap->length;
527 __entry->pblock = iomap->addr == IOMAP_NULL_ADDR ? 0 :
528 (iomap->addr >> ip->i_inode.i_blkbits);
529 __entry->flags = iomap->flags;
530 __entry->type = iomap->type;
531 __entry->ret = ret;
534 TP_printk("%u,%u bmap %llu iomap end %llu/%lu to %llu ty:%d flags:%08x rc:%d",
535 MAJOR(__entry->dev), MINOR(__entry->dev),
536 (unsigned long long)__entry->inum,
537 (unsigned long long)__entry->offset,
538 (unsigned long)__entry->length,
539 (long long)__entry->pblock,
540 (u16)__entry->type,
541 (u16)__entry->flags, __entry->ret)
544 /* Keep track of blocks as they are allocated/freed */
545 TRACE_EVENT(gfs2_block_alloc,
547 TP_PROTO(const struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
548 u64 block, unsigned len, u8 block_state),
550 TP_ARGS(ip, rgd, block, len, block_state),
552 TP_STRUCT__entry(
553 __field( dev_t, dev )
554 __field( u64, start )
555 __field( u64, inum )
556 __field( u32, len )
557 __field( u8, block_state )
558 __field( u64, rd_addr )
559 __field( u32, rd_free_clone )
560 __field( u32, rd_requested )
561 __field( u32, rd_reserved )
564 TP_fast_assign(
565 __entry->dev = rgd->rd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
566 __entry->start = block;
567 __entry->inum = ip->i_no_addr;
568 __entry->len = len;
569 __entry->block_state = block_state;
570 __entry->rd_addr = rgd->rd_addr;
571 __entry->rd_free_clone = rgd->rd_free_clone;
572 __entry->rd_requested = rgd->rd_requested;
573 __entry->rd_reserved = rgd->rd_reserved;
576 TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rq:%u rr:%u",
577 MAJOR(__entry->dev), MINOR(__entry->dev),
578 (unsigned long long)__entry->inum,
579 (unsigned long long)__entry->start,
580 (unsigned long)__entry->len,
581 block_state_name(__entry->block_state),
582 (unsigned long long)__entry->rd_addr,
583 __entry->rd_free_clone,
584 __entry->rd_requested,
585 __entry->rd_reserved)
588 /* Keep track of multi-block reservations as they are allocated/freed */
589 TRACE_EVENT(gfs2_rs,
591 TP_PROTO(const struct gfs2_blkreserv *rs, u8 func),
593 TP_ARGS(rs, func),
595 TP_STRUCT__entry(
596 __field( dev_t, dev )
597 __field( u64, rd_addr )
598 __field( u32, rd_free_clone )
599 __field( u32, rd_requested )
600 __field( u32, rd_reserved )
601 __field( u64, inum )
602 __field( u64, start )
603 __field( u32, requested )
604 __field( u32, reserved )
605 __field( u8, func )
608 TP_fast_assign(
609 __entry->dev = rs->rs_rgd->rd_sbd->sd_vfs->s_dev;
610 __entry->rd_addr = rs->rs_rgd->rd_addr;
611 __entry->rd_free_clone = rs->rs_rgd->rd_free_clone;
612 __entry->rd_requested = rs->rs_rgd->rd_requested;
613 __entry->rd_reserved = rs->rs_rgd->rd_reserved;
614 __entry->inum = container_of(rs, struct gfs2_inode,
615 i_res)->i_no_addr;
616 __entry->start = rs->rs_start;
617 __entry->requested = rs->rs_requested;
618 __entry->reserved = rs->rs_reserved;
619 __entry->func = func;
622 TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%u rq:%u rr:%u %s q:%u r:%u",
623 MAJOR(__entry->dev), MINOR(__entry->dev),
624 (unsigned long long)__entry->inum,
625 (unsigned long long)__entry->start,
626 (unsigned long long)__entry->rd_addr,
627 __entry->rd_free_clone,
628 __entry->rd_requested,
629 __entry->rd_reserved,
630 rs_func_name(__entry->func),
631 __entry->requested,
632 __entry->reserved)
635 #endif /* _TRACE_GFS2_H */
637 /* This part must be outside protection */
638 #undef TRACE_INCLUDE_PATH
639 #define TRACE_INCLUDE_PATH .
640 #define TRACE_INCLUDE_FILE trace_gfs2
641 #include <trace/define_trace.h>