2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
48 #define __HFI1_TRACE_TX_H
50 #include <linux/tracepoint.h>
51 #include <linux/trace_seq.h>
57 const char *parse_sdma_flags(struct trace_seq
*p
, u64 desc0
, u64 desc1
);
59 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
62 #define TRACE_SYSTEM hfi1_tx
64 TRACE_EVENT(hfi1_piofree
,
65 TP_PROTO(struct send_context
*sc
, int extra
),
67 TP_STRUCT__entry(DD_DEV_ENTRY(sc
->dd
)
68 __field(u32
, sw_index
)
69 __field(u32
, hw_context
)
72 TP_fast_assign(DD_DEV_ASSIGN(sc
->dd
);
73 __entry
->sw_index
= sc
->sw_index
;
74 __entry
->hw_context
= sc
->hw_context
;
75 __entry
->extra
= extra
;
77 TP_printk("[%s] ctxt %u(%u) extra %d",
85 TRACE_EVENT(hfi1_wantpiointr
,
86 TP_PROTO(struct send_context
*sc
, u32 needint
, u64 credit_ctrl
),
87 TP_ARGS(sc
, needint
, credit_ctrl
),
88 TP_STRUCT__entry(DD_DEV_ENTRY(sc
->dd
)
89 __field(u32
, sw_index
)
90 __field(u32
, hw_context
)
92 __field(u64
, credit_ctrl
)
94 TP_fast_assign(DD_DEV_ASSIGN(sc
->dd
);
95 __entry
->sw_index
= sc
->sw_index
;
96 __entry
->hw_context
= sc
->hw_context
;
97 __entry
->needint
= needint
;
98 __entry
->credit_ctrl
= credit_ctrl
;
100 TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
105 (unsigned long long)__entry
->credit_ctrl
109 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template
,
110 TP_PROTO(struct rvt_qp
*qp
, u32 flags
),
113 DD_DEV_ENTRY(dd_from_ibdev(qp
->ibqp
.device
))
116 __field(u32
, s_flags
)
117 __field(u32
, ps_flags
)
118 __field(unsigned long, iow_flags
)
121 DD_DEV_ASSIGN(dd_from_ibdev(qp
->ibqp
.device
))
122 __entry
->flags
= flags
;
123 __entry
->qpn
= qp
->ibqp
.qp_num
;
124 __entry
->s_flags
= qp
->s_flags
;
126 ((struct hfi1_qp_priv
*)qp
->priv
)->s_flags
;
128 ((struct hfi1_qp_priv
*)qp
->priv
)->s_iowait
.flags
;
131 "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
141 DEFINE_EVENT(hfi1_qpsleepwakeup_template
, hfi1_qpwakeup
,
142 TP_PROTO(struct rvt_qp
*qp
, u32 flags
),
145 DEFINE_EVENT(hfi1_qpsleepwakeup_template
, hfi1_qpsleep
,
146 TP_PROTO(struct rvt_qp
*qp
, u32 flags
),
149 TRACE_EVENT(hfi1_sdma_descriptor
,
150 TP_PROTO(struct sdma_engine
*sde
,
155 TP_ARGS(sde
, desc0
, desc1
, e
, descp
),
156 TP_STRUCT__entry(DD_DEV_ENTRY(sde
->dd
)
157 __field(void *, descp
)
163 TP_fast_assign(DD_DEV_ASSIGN(sde
->dd
);
164 __entry
->desc0
= desc0
;
165 __entry
->desc1
= desc1
;
166 __entry
->idx
= sde
->this_idx
;
167 __entry
->descp
= descp
;
171 "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
174 __parse_sdma_flags(__entry
->desc0
, __entry
->desc1
),
175 (__entry
->desc0
>> SDMA_DESC0_PHY_ADDR_SHIFT
) &
176 SDMA_DESC0_PHY_ADDR_MASK
,
177 (u8
)((__entry
->desc1
>> SDMA_DESC1_GENERATION_SHIFT
) &
178 SDMA_DESC1_GENERATION_MASK
),
179 (u16
)((__entry
->desc0
>> SDMA_DESC0_BYTE_COUNT_SHIFT
) &
180 SDMA_DESC0_BYTE_COUNT_MASK
),
188 TRACE_EVENT(hfi1_sdma_engine_select
,
189 TP_PROTO(struct hfi1_devdata
*dd
, u32 sel
, u8 vl
, u8 idx
),
190 TP_ARGS(dd
, sel
, vl
, idx
),
191 TP_STRUCT__entry(DD_DEV_ENTRY(dd
)
196 TP_fast_assign(DD_DEV_ASSIGN(dd
);
201 TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
209 TRACE_EVENT(hfi1_sdma_user_free_queues
,
210 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u16 subctxt
),
211 TP_ARGS(dd
, ctxt
, subctxt
),
212 TP_STRUCT__entry(DD_DEV_ENTRY(dd
)
214 __field(u16
, subctxt
)
216 TP_fast_assign(DD_DEV_ASSIGN(dd
);
217 __entry
->ctxt
= ctxt
;
218 __entry
->subctxt
= subctxt
;
220 TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
227 TRACE_EVENT(hfi1_sdma_user_process_request
,
228 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u16 subctxt
,
230 TP_ARGS(dd
, ctxt
, subctxt
, comp_idx
),
231 TP_STRUCT__entry(DD_DEV_ENTRY(dd
)
233 __field(u16
, subctxt
)
234 __field(u16
, comp_idx
)
236 TP_fast_assign(DD_DEV_ASSIGN(dd
);
237 __entry
->ctxt
= ctxt
;
238 __entry
->subctxt
= subctxt
;
239 __entry
->comp_idx
= comp_idx
;
241 TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
250 hfi1_sdma_value_template
,
251 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u16 subctxt
, u16 comp_idx
,
253 TP_ARGS(dd
, ctxt
, subctxt
, comp_idx
, value
),
254 TP_STRUCT__entry(DD_DEV_ENTRY(dd
)
256 __field(u16
, subctxt
)
257 __field(u16
, comp_idx
)
260 TP_fast_assign(DD_DEV_ASSIGN(dd
);
261 __entry
->ctxt
= ctxt
;
262 __entry
->subctxt
= subctxt
;
263 __entry
->comp_idx
= comp_idx
;
264 __entry
->value
= value
;
266 TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
275 DEFINE_EVENT(hfi1_sdma_value_template
, hfi1_sdma_user_initial_tidoffset
,
276 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u16 subctxt
,
277 u16 comp_idx
, u32 tidoffset
),
278 TP_ARGS(dd
, ctxt
, subctxt
, comp_idx
, tidoffset
));
280 DEFINE_EVENT(hfi1_sdma_value_template
, hfi1_sdma_user_data_length
,
281 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u16 subctxt
,
282 u16 comp_idx
, u32 data_len
),
283 TP_ARGS(dd
, ctxt
, subctxt
, comp_idx
, data_len
));
285 DEFINE_EVENT(hfi1_sdma_value_template
, hfi1_sdma_user_compute_length
,
286 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u16 subctxt
,
287 u16 comp_idx
, u32 data_len
),
288 TP_ARGS(dd
, ctxt
, subctxt
, comp_idx
, data_len
));
290 TRACE_EVENT(hfi1_sdma_user_tid_info
,
291 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u16 subctxt
,
292 u16 comp_idx
, u32 tidoffset
, u32 units
, u8 shift
),
293 TP_ARGS(dd
, ctxt
, subctxt
, comp_idx
, tidoffset
, units
, shift
),
294 TP_STRUCT__entry(DD_DEV_ENTRY(dd
)
296 __field(u16
, subctxt
)
297 __field(u16
, comp_idx
)
298 __field(u32
, tidoffset
)
302 TP_fast_assign(DD_DEV_ASSIGN(dd
);
303 __entry
->ctxt
= ctxt
;
304 __entry
->subctxt
= subctxt
;
305 __entry
->comp_idx
= comp_idx
;
306 __entry
->tidoffset
= tidoffset
;
307 __entry
->units
= units
;
308 __entry
->shift
= shift
;
310 TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
321 TRACE_EVENT(hfi1_sdma_request
,
322 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u16 subctxt
,
324 TP_ARGS(dd
, ctxt
, subctxt
, dim
),
325 TP_STRUCT__entry(DD_DEV_ENTRY(dd
)
327 __field(u16
, subctxt
)
328 __field(unsigned long, dim
)
330 TP_fast_assign(DD_DEV_ASSIGN(dd
);
331 __entry
->ctxt
= ctxt
;
332 __entry
->subctxt
= subctxt
;
335 TP_printk("[%s] SDMA from %u:%u (%lu)",
343 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class
,
344 TP_PROTO(struct sdma_engine
*sde
, u64 status
),
345 TP_ARGS(sde
, status
),
346 TP_STRUCT__entry(DD_DEV_ENTRY(sde
->dd
)
350 TP_fast_assign(DD_DEV_ASSIGN(sde
->dd
);
351 __entry
->status
= status
;
352 __entry
->idx
= sde
->this_idx
;
354 TP_printk("[%s] SDE(%u) status %llx",
357 (unsigned long long)__entry
->status
361 DEFINE_EVENT(hfi1_sdma_engine_class
, hfi1_sdma_engine_interrupt
,
362 TP_PROTO(struct sdma_engine
*sde
, u64 status
),
366 DEFINE_EVENT(hfi1_sdma_engine_class
, hfi1_sdma_engine_progress
,
367 TP_PROTO(struct sdma_engine
*sde
, u64 status
),
371 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad
,
372 TP_PROTO(struct sdma_engine
*sde
, int aidx
),
374 TP_STRUCT__entry(DD_DEV_ENTRY(sde
->dd
)
378 TP_fast_assign(DD_DEV_ASSIGN(sde
->dd
);
379 __entry
->idx
= sde
->this_idx
;
380 __entry
->aidx
= aidx
;
382 TP_printk("[%s] SDE(%u) aidx %d",
389 DEFINE_EVENT(hfi1_sdma_ahg_ad
, hfi1_ahg_allocate
,
390 TP_PROTO(struct sdma_engine
*sde
, int aidx
),
393 DEFINE_EVENT(hfi1_sdma_ahg_ad
, hfi1_ahg_deallocate
,
394 TP_PROTO(struct sdma_engine
*sde
, int aidx
),
397 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
398 TRACE_EVENT(hfi1_sdma_progress
,
399 TP_PROTO(struct sdma_engine
*sde
,
402 struct sdma_txreq
*txp
404 TP_ARGS(sde
, hwhead
, swhead
, txp
),
405 TP_STRUCT__entry(DD_DEV_ENTRY(sde
->dd
)
410 __field(u16
, tx_tail
)
411 __field(u16
, tx_head
)
414 TP_fast_assign(DD_DEV_ASSIGN(sde
->dd
);
415 __entry
->hwhead
= hwhead
;
416 __entry
->swhead
= swhead
;
417 __entry
->tx_tail
= sde
->tx_tail
;
418 __entry
->tx_head
= sde
->tx_head
;
419 __entry
->txnext
= txp
? txp
->next_descq_idx
: ~0;
420 __entry
->idx
= sde
->this_idx
;
421 __entry
->sn
= txp
? txp
->sn
: ~0;
424 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
436 TRACE_EVENT(hfi1_sdma_progress
,
437 TP_PROTO(struct sdma_engine
*sde
,
438 u16 hwhead
, u16 swhead
,
439 struct sdma_txreq
*txp
441 TP_ARGS(sde
, hwhead
, swhead
, txp
),
442 TP_STRUCT__entry(DD_DEV_ENTRY(sde
->dd
)
446 __field(u16
, tx_tail
)
447 __field(u16
, tx_head
)
450 TP_fast_assign(DD_DEV_ASSIGN(sde
->dd
);
451 __entry
->hwhead
= hwhead
;
452 __entry
->swhead
= swhead
;
453 __entry
->tx_tail
= sde
->tx_tail
;
454 __entry
->tx_head
= sde
->tx_head
;
455 __entry
->txnext
= txp
? txp
->next_descq_idx
: ~0;
456 __entry
->idx
= sde
->this_idx
;
459 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
471 DECLARE_EVENT_CLASS(hfi1_sdma_sn
,
472 TP_PROTO(struct sdma_engine
*sde
, u64 sn
),
474 TP_STRUCT__entry(DD_DEV_ENTRY(sde
->dd
)
478 TP_fast_assign(DD_DEV_ASSIGN(sde
->dd
);
480 __entry
->idx
= sde
->this_idx
;
482 TP_printk("[%s] SDE(%u) sn %llu",
489 DEFINE_EVENT(hfi1_sdma_sn
, hfi1_sdma_out_sn
,
491 struct sdma_engine
*sde
,
497 DEFINE_EVENT(hfi1_sdma_sn
, hfi1_sdma_in_sn
,
498 TP_PROTO(struct sdma_engine
*sde
, u64 sn
),
502 #define USDMA_HDR_FORMAT \
503 "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
505 TRACE_EVENT(hfi1_sdma_user_header
,
506 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u8 subctxt
, u16 req
,
507 struct hfi1_pkt_header
*hdr
, u32 tidval
),
508 TP_ARGS(dd
, ctxt
, subctxt
, req
, hdr
, tidval
),
533 __le32
*pbc
= (__le32
*)hdr
->pbc
;
534 __be32
*lrh
= (__be32
*)hdr
->lrh
;
535 __be32
*bth
= (__be32
*)hdr
->bth
;
536 __le32
*kdeth
= (__le32
*)&hdr
->kdeth
;
539 __entry
->ctxt
= ctxt
;
540 __entry
->subctxt
= subctxt
;
542 __entry
->pbc0
= le32_to_cpu(pbc
[0]);
543 __entry
->pbc1
= le32_to_cpu(pbc
[1]);
544 __entry
->lrh0
= be32_to_cpu(lrh
[0]);
545 __entry
->lrh1
= be32_to_cpu(lrh
[1]);
546 __entry
->bth0
= be32_to_cpu(bth
[0]);
547 __entry
->bth1
= be32_to_cpu(bth
[1]);
548 __entry
->bth2
= be32_to_cpu(bth
[2]);
549 __entry
->kdeth0
= le32_to_cpu(kdeth
[0]);
550 __entry
->kdeth1
= le32_to_cpu(kdeth
[1]);
551 __entry
->kdeth2
= le32_to_cpu(kdeth
[2]);
552 __entry
->kdeth3
= le32_to_cpu(kdeth
[3]);
553 __entry
->kdeth4
= le32_to_cpu(kdeth
[4]);
554 __entry
->kdeth5
= le32_to_cpu(kdeth
[5]);
555 __entry
->kdeth6
= le32_to_cpu(kdeth
[6]);
556 __entry
->kdeth7
= le32_to_cpu(kdeth
[7]);
557 __entry
->kdeth8
= le32_to_cpu(kdeth
[8]);
558 __entry
->tidval
= tidval
;
560 TP_printk(USDMA_HDR_FORMAT
,
585 #define SDMA_UREQ_FMT \
586 "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
587 TRACE_EVENT(hfi1_sdma_user_reqinfo
,
588 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u8 subctxt
, u16
*i
),
589 TP_ARGS(dd
, ctxt
, subctxt
, i
),
594 __field(u8
, ver_opcode
)
597 __field(u16
, fragsize
)
598 __field(u16
, comp_idx
)
602 __entry
->ctxt
= ctxt
;
603 __entry
->subctxt
= subctxt
;
604 __entry
->ver_opcode
= i
[0] & 0xff;
605 __entry
->iovcnt
= (i
[0] >> 8) & 0xff;
606 __entry
->npkts
= i
[1];
607 __entry
->fragsize
= i
[2];
608 __entry
->comp_idx
= i
[3];
610 TP_printk(SDMA_UREQ_FMT
,
622 #define usdma_complete_name(st) { st, #st }
623 #define show_usdma_complete_state(st) \
624 __print_symbolic(st, \
625 usdma_complete_name(FREE), \
626 usdma_complete_name(QUEUED), \
627 usdma_complete_name(COMPLETE), \
628 usdma_complete_name(ERROR))
630 TRACE_EVENT(hfi1_sdma_user_completion
,
631 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u8 subctxt
, u16 idx
,
633 TP_ARGS(dd
, ctxt
, subctxt
, idx
, state
, code
),
644 __entry
->ctxt
= ctxt
;
645 __entry
->subctxt
= subctxt
;
647 __entry
->state
= state
;
648 __entry
->code
= code
;
650 TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
651 __get_str(dev
), __entry
->ctxt
, __entry
->subctxt
,
652 __entry
->idx
, show_usdma_complete_state(__entry
->state
),
656 const char *print_u32_array(struct trace_seq
*, u32
*, int);
657 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
659 TRACE_EVENT(hfi1_sdma_user_header_ahg
,
660 TP_PROTO(struct hfi1_devdata
*dd
, u16 ctxt
, u8 subctxt
, u16 req
,
661 u8 sde
, u8 ahgidx
, u32
*ahg
, int len
, u32 tidval
),
662 TP_ARGS(dd
, ctxt
, subctxt
, req
, sde
, ahgidx
, ahg
, len
, tidval
),
672 __array(u32
, ahg
, 10)
676 __entry
->ctxt
= ctxt
;
677 __entry
->subctxt
= subctxt
;
680 __entry
->idx
= ahgidx
;
682 __entry
->tidval
= tidval
;
683 memcpy(__entry
->ahg
, ahg
, len
* sizeof(u32
));
685 TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
693 __print_u32_hex(__entry
->ahg
, __entry
->len
),
698 TRACE_EVENT(hfi1_sdma_state
,
699 TP_PROTO(struct sdma_engine
*sde
,
703 TP_ARGS(sde
, cstate
, nstate
),
704 TP_STRUCT__entry(DD_DEV_ENTRY(sde
->dd
)
705 __string(curstate
, cstate
)
706 __string(newstate
, nstate
)
708 TP_fast_assign(DD_DEV_ASSIGN(sde
->dd
);
709 __assign_str(curstate
, cstate
);
710 __assign_str(newstate
, nstate
);
712 TP_printk("[%s] current state %s new state %s",
720 "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
724 ((struct buffer_control *)__get_dynamic_array(bct))->field \
727 DECLARE_EVENT_CLASS(hfi1_bct_template
,
728 TP_PROTO(struct hfi1_devdata
*dd
,
729 struct buffer_control
*bc
),
731 TP_STRUCT__entry(DD_DEV_ENTRY(dd
)
732 __dynamic_array(u8
, bct
, sizeof(*bc
))
734 TP_fast_assign(DD_DEV_ASSIGN(dd
);
735 memcpy(__get_dynamic_array(bct
), bc
,
738 TP_printk(BCT_FORMAT
,
739 BCT(overall_shared_limit
),
741 BCT(vl
[0].dedicated
),
744 BCT(vl
[1].dedicated
),
747 BCT(vl
[2].dedicated
),
750 BCT(vl
[3].dedicated
),
753 BCT(vl
[4].dedicated
),
756 BCT(vl
[5].dedicated
),
759 BCT(vl
[6].dedicated
),
762 BCT(vl
[7].dedicated
),
765 BCT(vl
[15].dedicated
),
770 DEFINE_EVENT(hfi1_bct_template
, bct_set
,
771 TP_PROTO(struct hfi1_devdata
*dd
, struct buffer_control
*bc
),
774 DEFINE_EVENT(hfi1_bct_template
, bct_get
,
775 TP_PROTO(struct hfi1_devdata
*dd
, struct buffer_control
*bc
),
779 hfi1_qp_send_completion
,
780 TP_PROTO(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
, u32 idx
),
781 TP_ARGS(qp
, wqe
, idx
),
783 DD_DEV_ENTRY(dd_from_ibdev(qp
->ibqp
.device
))
784 __field(struct rvt_swqe
*, wqe
)
791 __field(enum ib_wr_opcode
, opcode
)
792 __field(int, send_flags
)
795 DD_DEV_ASSIGN(dd_from_ibdev(qp
->ibqp
.device
))
797 __entry
->wr_id
= wqe
->wr
.wr_id
;
798 __entry
->qpn
= qp
->ibqp
.qp_num
;
799 __entry
->qpt
= qp
->ibqp
.qp_type
;
800 __entry
->length
= wqe
->length
;
802 __entry
->ssn
= wqe
->ssn
;
803 __entry
->opcode
= wqe
->wr
.opcode
;
804 __entry
->send_flags
= wqe
->wr
.send_flags
;
807 "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
822 hfi1_do_send_template
,
823 TP_PROTO(struct rvt_qp
*qp
, bool flag
),
826 DD_DEV_ENTRY(dd_from_ibdev(qp
->ibqp
.device
))
831 DD_DEV_ASSIGN(dd_from_ibdev(qp
->ibqp
.device
))
832 __entry
->qpn
= qp
->ibqp
.qp_num
;
833 __entry
->flag
= flag
;
836 "[%s] qpn %x flag %d",
844 hfi1_do_send_template
, hfi1_rc_do_send
,
845 TP_PROTO(struct rvt_qp
*qp
, bool flag
),
849 DEFINE_EVENT(/* event */
850 hfi1_do_send_template
, hfi1_rc_do_tid_send
,
851 TP_PROTO(struct rvt_qp
*qp
, bool flag
),
856 hfi1_do_send_template
, hfi1_rc_expired_time_slice
,
857 TP_PROTO(struct rvt_qp
*qp
, bool flag
),
861 #endif /* __HFI1_TRACE_TX_H */
863 #undef TRACE_INCLUDE_PATH
864 #undef TRACE_INCLUDE_FILE
865 #define TRACE_INCLUDE_PATH .
866 #define TRACE_INCLUDE_FILE trace_tx
867 #include <trace/define_trace.h>