Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux/fpc-iii.git] / include / trace / events / rdma_core.h
blob17642aa54437bf6635b690a4772a12789ce3ba19
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Trace point definitions for core RDMA functions.
5 * Author: Chuck Lever <chuck.lever@oracle.com>
7 * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
8 */
10 #undef TRACE_SYSTEM
11 #define TRACE_SYSTEM rdma_core
13 #if !defined(_TRACE_RDMA_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
14 #define _TRACE_RDMA_CORE_H
16 #include <linux/tracepoint.h>
17 #include <rdma/ib_verbs.h>
20 * enum ib_poll_context, from include/rdma/ib_verbs.h
22 #define IB_POLL_CTX_LIST \
23 ib_poll_ctx(DIRECT) \
24 ib_poll_ctx(SOFTIRQ) \
25 ib_poll_ctx(WORKQUEUE) \
26 ib_poll_ctx_end(UNBOUND_WORKQUEUE)
28 #undef ib_poll_ctx
29 #undef ib_poll_ctx_end
31 #define ib_poll_ctx(x) TRACE_DEFINE_ENUM(IB_POLL_##x);
32 #define ib_poll_ctx_end(x) TRACE_DEFINE_ENUM(IB_POLL_##x);
34 IB_POLL_CTX_LIST
36 #undef ib_poll_ctx
37 #undef ib_poll_ctx_end
39 #define ib_poll_ctx(x) { IB_POLL_##x, #x },
40 #define ib_poll_ctx_end(x) { IB_POLL_##x, #x }
42 #define rdma_show_ib_poll_ctx(x) \
43 __print_symbolic(x, IB_POLL_CTX_LIST)
45 /**
46 ** Completion Queue events
47 **/
49 TRACE_EVENT(cq_schedule,
50 TP_PROTO(
51 struct ib_cq *cq
54 TP_ARGS(cq),
56 TP_STRUCT__entry(
57 __field(u32, cq_id)
60 TP_fast_assign(
61 cq->timestamp = ktime_get();
62 cq->interrupt = true;
64 __entry->cq_id = cq->res.id;
67 TP_printk("cq.id=%u", __entry->cq_id)
70 TRACE_EVENT(cq_reschedule,
71 TP_PROTO(
72 struct ib_cq *cq
75 TP_ARGS(cq),
77 TP_STRUCT__entry(
78 __field(u32, cq_id)
81 TP_fast_assign(
82 cq->timestamp = ktime_get();
83 cq->interrupt = false;
85 __entry->cq_id = cq->res.id;
88 TP_printk("cq.id=%u", __entry->cq_id)
91 TRACE_EVENT(cq_process,
92 TP_PROTO(
93 const struct ib_cq *cq
96 TP_ARGS(cq),
98 TP_STRUCT__entry(
99 __field(u32, cq_id)
100 __field(bool, interrupt)
101 __field(s64, latency)
104 TP_fast_assign(
105 ktime_t latency = ktime_sub(ktime_get(), cq->timestamp);
107 __entry->cq_id = cq->res.id;
108 __entry->latency = ktime_to_us(latency);
109 __entry->interrupt = cq->interrupt;
112 TP_printk("cq.id=%u wake-up took %lld [us] from %s",
113 __entry->cq_id, __entry->latency,
114 __entry->interrupt ? "interrupt" : "reschedule"
118 TRACE_EVENT(cq_poll,
119 TP_PROTO(
120 const struct ib_cq *cq,
121 int requested,
122 int rc
125 TP_ARGS(cq, requested, rc),
127 TP_STRUCT__entry(
128 __field(u32, cq_id)
129 __field(int, requested)
130 __field(int, rc)
133 TP_fast_assign(
134 __entry->cq_id = cq->res.id;
135 __entry->requested = requested;
136 __entry->rc = rc;
139 TP_printk("cq.id=%u requested %d, returned %d",
140 __entry->cq_id, __entry->requested, __entry->rc
144 TRACE_EVENT(cq_drain_complete,
145 TP_PROTO(
146 const struct ib_cq *cq
149 TP_ARGS(cq),
151 TP_STRUCT__entry(
152 __field(u32, cq_id)
155 TP_fast_assign(
156 __entry->cq_id = cq->res.id;
159 TP_printk("cq.id=%u",
160 __entry->cq_id
165 TRACE_EVENT(cq_modify,
166 TP_PROTO(
167 const struct ib_cq *cq,
168 u16 comps,
169 u16 usec
172 TP_ARGS(cq, comps, usec),
174 TP_STRUCT__entry(
175 __field(u32, cq_id)
176 __field(unsigned int, comps)
177 __field(unsigned int, usec)
180 TP_fast_assign(
181 __entry->cq_id = cq->res.id;
182 __entry->comps = comps;
183 __entry->usec = usec;
186 TP_printk("cq.id=%u comps=%u usec=%u",
187 __entry->cq_id, __entry->comps, __entry->usec
191 TRACE_EVENT(cq_alloc,
192 TP_PROTO(
193 const struct ib_cq *cq,
194 int nr_cqe,
195 int comp_vector,
196 enum ib_poll_context poll_ctx
199 TP_ARGS(cq, nr_cqe, comp_vector, poll_ctx),
201 TP_STRUCT__entry(
202 __field(u32, cq_id)
203 __field(int, nr_cqe)
204 __field(int, comp_vector)
205 __field(unsigned long, poll_ctx)
208 TP_fast_assign(
209 __entry->cq_id = cq->res.id;
210 __entry->nr_cqe = nr_cqe;
211 __entry->comp_vector = comp_vector;
212 __entry->poll_ctx = poll_ctx;
215 TP_printk("cq.id=%u nr_cqe=%d comp_vector=%d poll_ctx=%s",
216 __entry->cq_id, __entry->nr_cqe, __entry->comp_vector,
217 rdma_show_ib_poll_ctx(__entry->poll_ctx)
221 TRACE_EVENT(cq_alloc_error,
222 TP_PROTO(
223 int nr_cqe,
224 int comp_vector,
225 enum ib_poll_context poll_ctx,
226 int rc
229 TP_ARGS(nr_cqe, comp_vector, poll_ctx, rc),
231 TP_STRUCT__entry(
232 __field(int, rc)
233 __field(int, nr_cqe)
234 __field(int, comp_vector)
235 __field(unsigned long, poll_ctx)
238 TP_fast_assign(
239 __entry->rc = rc;
240 __entry->nr_cqe = nr_cqe;
241 __entry->comp_vector = comp_vector;
242 __entry->poll_ctx = poll_ctx;
245 TP_printk("nr_cqe=%d comp_vector=%d poll_ctx=%s rc=%d",
246 __entry->nr_cqe, __entry->comp_vector,
247 rdma_show_ib_poll_ctx(__entry->poll_ctx), __entry->rc
251 TRACE_EVENT(cq_free,
252 TP_PROTO(
253 const struct ib_cq *cq
256 TP_ARGS(cq),
258 TP_STRUCT__entry(
259 __field(u32, cq_id)
262 TP_fast_assign(
263 __entry->cq_id = cq->res.id;
266 TP_printk("cq.id=%u", __entry->cq_id)
270 ** Memory Region events
274 * enum ib_mr_type, from include/rdma/ib_verbs.h
276 #define IB_MR_TYPE_LIST \
277 ib_mr_type_item(MEM_REG) \
278 ib_mr_type_item(SG_GAPS) \
279 ib_mr_type_item(DM) \
280 ib_mr_type_item(USER) \
281 ib_mr_type_item(DMA) \
282 ib_mr_type_end(INTEGRITY)
284 #undef ib_mr_type_item
285 #undef ib_mr_type_end
287 #define ib_mr_type_item(x) TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
288 #define ib_mr_type_end(x) TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
290 IB_MR_TYPE_LIST
292 #undef ib_mr_type_item
293 #undef ib_mr_type_end
295 #define ib_mr_type_item(x) { IB_MR_TYPE_##x, #x },
296 #define ib_mr_type_end(x) { IB_MR_TYPE_##x, #x }
298 #define rdma_show_ib_mr_type(x) \
299 __print_symbolic(x, IB_MR_TYPE_LIST)
301 TRACE_EVENT(mr_alloc,
302 TP_PROTO(
303 const struct ib_pd *pd,
304 enum ib_mr_type mr_type,
305 u32 max_num_sg,
306 const struct ib_mr *mr
309 TP_ARGS(pd, mr_type, max_num_sg, mr),
311 TP_STRUCT__entry(
312 __field(u32, pd_id)
313 __field(u32, mr_id)
314 __field(u32, max_num_sg)
315 __field(int, rc)
316 __field(unsigned long, mr_type)
319 TP_fast_assign(
320 __entry->pd_id = pd->res.id;
321 if (IS_ERR(mr)) {
322 __entry->mr_id = 0;
323 __entry->rc = PTR_ERR(mr);
324 } else {
325 __entry->mr_id = mr->res.id;
326 __entry->rc = 0;
328 __entry->max_num_sg = max_num_sg;
329 __entry->mr_type = mr_type;
332 TP_printk("pd.id=%u mr.id=%u type=%s max_num_sg=%u rc=%d",
333 __entry->pd_id, __entry->mr_id,
334 rdma_show_ib_mr_type(__entry->mr_type),
335 __entry->max_num_sg, __entry->rc)
338 TRACE_EVENT(mr_integ_alloc,
339 TP_PROTO(
340 const struct ib_pd *pd,
341 u32 max_num_data_sg,
342 u32 max_num_meta_sg,
343 const struct ib_mr *mr
346 TP_ARGS(pd, max_num_data_sg, max_num_meta_sg, mr),
348 TP_STRUCT__entry(
349 __field(u32, pd_id)
350 __field(u32, mr_id)
351 __field(u32, max_num_data_sg)
352 __field(u32, max_num_meta_sg)
353 __field(int, rc)
356 TP_fast_assign(
357 __entry->pd_id = pd->res.id;
358 if (IS_ERR(mr)) {
359 __entry->mr_id = 0;
360 __entry->rc = PTR_ERR(mr);
361 } else {
362 __entry->mr_id = mr->res.id;
363 __entry->rc = 0;
365 __entry->max_num_data_sg = max_num_data_sg;
366 __entry->max_num_meta_sg = max_num_meta_sg;
369 TP_printk("pd.id=%u mr.id=%u max_num_data_sg=%u max_num_meta_sg=%u rc=%d",
370 __entry->pd_id, __entry->mr_id, __entry->max_num_data_sg,
371 __entry->max_num_meta_sg, __entry->rc)
374 TRACE_EVENT(mr_dereg,
375 TP_PROTO(
376 const struct ib_mr *mr
379 TP_ARGS(mr),
381 TP_STRUCT__entry(
382 __field(u32, id)
385 TP_fast_assign(
386 __entry->id = mr->res.id;
389 TP_printk("mr.id=%u", __entry->id)
392 #endif /* _TRACE_RDMA_CORE_H */
394 #include <trace/define_trace.h>