2 * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/rdma_cm.h>
36 #include <rdma/restrack.h>
37 #include <uapi/rdma/rdma_netlink.h>
39 static int fill_sq(struct sk_buff
*msg
, struct t4_wq
*wq
)
42 if (rdma_nl_put_driver_u32(msg
, "sqid", wq
->sq
.qid
))
44 if (rdma_nl_put_driver_u32(msg
, "flushed", wq
->flushed
))
46 if (rdma_nl_put_driver_u32(msg
, "memsize", wq
->sq
.memsize
))
48 if (rdma_nl_put_driver_u32(msg
, "cidx", wq
->sq
.cidx
))
50 if (rdma_nl_put_driver_u32(msg
, "pidx", wq
->sq
.pidx
))
52 if (rdma_nl_put_driver_u32(msg
, "wq_pidx", wq
->sq
.wq_pidx
))
54 if (rdma_nl_put_driver_u32(msg
, "flush_cidx", wq
->sq
.flush_cidx
))
56 if (rdma_nl_put_driver_u32(msg
, "in_use", wq
->sq
.in_use
))
58 if (rdma_nl_put_driver_u32(msg
, "size", wq
->sq
.size
))
60 if (rdma_nl_put_driver_u32_hex(msg
, "flags", wq
->sq
.flags
))
67 static int fill_rq(struct sk_buff
*msg
, struct t4_wq
*wq
)
70 if (rdma_nl_put_driver_u32(msg
, "rqid", wq
->rq
.qid
))
72 if (rdma_nl_put_driver_u32(msg
, "memsize", wq
->rq
.memsize
))
74 if (rdma_nl_put_driver_u32(msg
, "cidx", wq
->rq
.cidx
))
76 if (rdma_nl_put_driver_u32(msg
, "pidx", wq
->rq
.pidx
))
78 if (rdma_nl_put_driver_u32(msg
, "wq_pidx", wq
->rq
.wq_pidx
))
80 if (rdma_nl_put_driver_u32(msg
, "msn", wq
->rq
.msn
))
82 if (rdma_nl_put_driver_u32_hex(msg
, "rqt_hwaddr", wq
->rq
.rqt_hwaddr
))
84 if (rdma_nl_put_driver_u32(msg
, "rqt_size", wq
->rq
.rqt_size
))
86 if (rdma_nl_put_driver_u32(msg
, "in_use", wq
->rq
.in_use
))
88 if (rdma_nl_put_driver_u32(msg
, "size", wq
->rq
.size
))
95 static int fill_swsqe(struct sk_buff
*msg
, struct t4_sq
*sq
, u16 idx
,
98 if (rdma_nl_put_driver_u32(msg
, "idx", idx
))
100 if (rdma_nl_put_driver_u32(msg
, "opcode", sqe
->opcode
))
102 if (rdma_nl_put_driver_u32(msg
, "complete", sqe
->complete
))
105 rdma_nl_put_driver_u32(msg
, "cqe_status", CQE_STATUS(&sqe
->cqe
)))
107 if (rdma_nl_put_driver_u32(msg
, "signaled", sqe
->signaled
))
109 if (rdma_nl_put_driver_u32(msg
, "flushed", sqe
->flushed
))
117 * Dump the first and last pending sqes.
119 static int fill_swsqes(struct sk_buff
*msg
, struct t4_sq
*sq
,
120 u16 first_idx
, struct t4_swsqe
*first_sqe
,
121 u16 last_idx
, struct t4_swsqe
*last_sqe
)
125 if (fill_swsqe(msg
, sq
, first_idx
, first_sqe
))
129 if (fill_swsqe(msg
, sq
, last_idx
, last_sqe
))
137 static int fill_res_qp_entry(struct sk_buff
*msg
,
138 struct rdma_restrack_entry
*res
)
140 struct ib_qp
*ibqp
= container_of(res
, struct ib_qp
, res
);
141 struct t4_swsqe
*fsp
= NULL
, *lsp
= NULL
;
142 struct c4iw_qp
*qhp
= to_c4iw_qp(ibqp
);
143 u16 first_sq_idx
= 0, last_sq_idx
= 0;
144 struct t4_swsqe first_sqe
, last_sqe
;
145 struct nlattr
*table_attr
;
148 /* User qp state is not available, so don't dump user qps */
152 table_attr
= nla_nest_start_noflag(msg
, RDMA_NLDEV_ATTR_DRIVER
);
156 /* Get a consistent snapshot */
157 spin_lock_irq(&qhp
->lock
);
160 /* If there are any pending sqes, copy the first and last */
161 if (wq
.sq
.cidx
!= wq
.sq
.pidx
) {
162 first_sq_idx
= wq
.sq
.cidx
;
163 first_sqe
= qhp
->wq
.sq
.sw_sq
[first_sq_idx
];
165 last_sq_idx
= wq
.sq
.pidx
;
166 if (last_sq_idx
-- == 0)
167 last_sq_idx
= wq
.sq
.size
- 1;
168 if (last_sq_idx
!= first_sq_idx
) {
169 last_sqe
= qhp
->wq
.sq
.sw_sq
[last_sq_idx
];
173 spin_unlock_irq(&qhp
->lock
);
175 if (fill_sq(msg
, &wq
))
176 goto err_cancel_table
;
178 if (fill_swsqes(msg
, &wq
.sq
, first_sq_idx
, fsp
, last_sq_idx
, lsp
))
179 goto err_cancel_table
;
181 if (fill_rq(msg
, &wq
))
182 goto err_cancel_table
;
184 nla_nest_end(msg
, table_attr
);
188 nla_nest_cancel(msg
, table_attr
);
194 struct c4iw_listen_ep lep
;
198 static int fill_res_ep_entry(struct sk_buff
*msg
,
199 struct rdma_restrack_entry
*res
)
201 struct rdma_cm_id
*cm_id
= rdma_res_to_id(res
);
202 struct nlattr
*table_attr
;
203 struct c4iw_ep_common
*epcp
;
204 struct c4iw_listen_ep
*listen_ep
= NULL
;
205 struct c4iw_ep
*ep
= NULL
;
206 struct iw_cm_id
*iw_cm_id
;
209 iw_cm_id
= rdma_iw_cm_id(cm_id
);
212 epcp
= (struct c4iw_ep_common
*)iw_cm_id
->provider_data
;
215 uep
= kcalloc(1, sizeof(*uep
), GFP_KERNEL
);
219 table_attr
= nla_nest_start_noflag(msg
, RDMA_NLDEV_ATTR_DRIVER
);
223 /* Get a consistent snapshot */
224 mutex_lock(&epcp
->mutex
);
225 if (epcp
->state
== LISTEN
) {
226 uep
->lep
= *(struct c4iw_listen_ep
*)epcp
;
227 mutex_unlock(&epcp
->mutex
);
228 listen_ep
= &uep
->lep
;
229 epcp
= &listen_ep
->com
;
231 uep
->ep
= *(struct c4iw_ep
*)epcp
;
232 mutex_unlock(&epcp
->mutex
);
237 if (rdma_nl_put_driver_u32(msg
, "state", epcp
->state
))
238 goto err_cancel_table
;
239 if (rdma_nl_put_driver_u64_hex(msg
, "flags", epcp
->flags
))
240 goto err_cancel_table
;
241 if (rdma_nl_put_driver_u64_hex(msg
, "history", epcp
->history
))
242 goto err_cancel_table
;
244 if (epcp
->state
== LISTEN
) {
245 if (rdma_nl_put_driver_u32(msg
, "stid", listen_ep
->stid
))
246 goto err_cancel_table
;
247 if (rdma_nl_put_driver_u32(msg
, "backlog", listen_ep
->backlog
))
248 goto err_cancel_table
;
250 if (rdma_nl_put_driver_u32(msg
, "hwtid", ep
->hwtid
))
251 goto err_cancel_table
;
252 if (rdma_nl_put_driver_u32(msg
, "ord", ep
->ord
))
253 goto err_cancel_table
;
254 if (rdma_nl_put_driver_u32(msg
, "ird", ep
->ird
))
255 goto err_cancel_table
;
256 if (rdma_nl_put_driver_u32(msg
, "emss", ep
->emss
))
257 goto err_cancel_table
;
259 if (!ep
->parent_ep
&& rdma_nl_put_driver_u32(msg
, "atid",
261 goto err_cancel_table
;
263 nla_nest_end(msg
, table_attr
);
268 nla_nest_cancel(msg
, table_attr
);
274 static int fill_cq(struct sk_buff
*msg
, struct t4_cq
*cq
)
276 if (rdma_nl_put_driver_u32(msg
, "cqid", cq
->cqid
))
278 if (rdma_nl_put_driver_u32(msg
, "memsize", cq
->memsize
))
280 if (rdma_nl_put_driver_u32(msg
, "size", cq
->size
))
282 if (rdma_nl_put_driver_u32(msg
, "cidx", cq
->cidx
))
284 if (rdma_nl_put_driver_u32(msg
, "cidx_inc", cq
->cidx_inc
))
286 if (rdma_nl_put_driver_u32(msg
, "sw_cidx", cq
->sw_cidx
))
288 if (rdma_nl_put_driver_u32(msg
, "sw_pidx", cq
->sw_pidx
))
290 if (rdma_nl_put_driver_u32(msg
, "sw_in_use", cq
->sw_in_use
))
292 if (rdma_nl_put_driver_u32(msg
, "vector", cq
->vector
))
294 if (rdma_nl_put_driver_u32(msg
, "gen", cq
->gen
))
296 if (rdma_nl_put_driver_u32(msg
, "error", cq
->error
))
298 if (rdma_nl_put_driver_u64_hex(msg
, "bits_type_ts",
299 be64_to_cpu(cq
->bits_type_ts
)))
301 if (rdma_nl_put_driver_u64_hex(msg
, "flags", cq
->flags
))
310 static int fill_cqe(struct sk_buff
*msg
, struct t4_cqe
*cqe
, u16 idx
,
313 if (rdma_nl_put_driver_u32(msg
, qstr
, idx
))
315 if (rdma_nl_put_driver_u32_hex(msg
, "header",
316 be32_to_cpu(cqe
->header
)))
318 if (rdma_nl_put_driver_u32(msg
, "len", be32_to_cpu(cqe
->len
)))
320 if (rdma_nl_put_driver_u32_hex(msg
, "wrid_hi",
321 be32_to_cpu(cqe
->u
.gen
.wrid_hi
)))
323 if (rdma_nl_put_driver_u32_hex(msg
, "wrid_low",
324 be32_to_cpu(cqe
->u
.gen
.wrid_low
)))
326 if (rdma_nl_put_driver_u64_hex(msg
, "bits_type_ts",
327 be64_to_cpu(cqe
->bits_type_ts
)))
336 static int fill_hwcqes(struct sk_buff
*msg
, struct t4_cq
*cq
,
341 idx
= (cq
->cidx
> 0) ? cq
->cidx
- 1 : cq
->size
- 1;
342 if (fill_cqe(msg
, cqes
, idx
, "hwcq_idx"))
345 if (fill_cqe(msg
, cqes
+ 1, idx
, "hwcq_idx"))
353 static int fill_swcqes(struct sk_buff
*msg
, struct t4_cq
*cq
,
362 if (fill_cqe(msg
, cqes
, idx
, "swcq_idx"))
364 if (cq
->sw_in_use
== 1)
366 idx
= (cq
->sw_pidx
> 0) ? cq
->sw_pidx
- 1 : cq
->size
- 1;
367 if (fill_cqe(msg
, cqes
+ 1, idx
, "swcq_idx"))
375 static int fill_res_cq_entry(struct sk_buff
*msg
,
376 struct rdma_restrack_entry
*res
)
378 struct ib_cq
*ibcq
= container_of(res
, struct ib_cq
, res
);
379 struct c4iw_cq
*chp
= to_c4iw_cq(ibcq
);
380 struct nlattr
*table_attr
;
381 struct t4_cqe hwcqes
[2];
382 struct t4_cqe swcqes
[2];
386 /* User cq state is not available, so don't dump user cqs */
390 table_attr
= nla_nest_start_noflag(msg
, RDMA_NLDEV_ATTR_DRIVER
);
394 /* Get a consistent snapshot */
395 spin_lock_irq(&chp
->lock
);
400 /* get 2 hw cqes: cidx-1, and cidx */
401 idx
= (cq
.cidx
> 0) ? cq
.cidx
- 1 : cq
.size
- 1;
402 hwcqes
[0] = chp
->cq
.queue
[idx
];
405 hwcqes
[1] = chp
->cq
.queue
[idx
];
407 /* get first and last sw cqes */
409 swcqes
[0] = chp
->cq
.sw_queue
[cq
.sw_cidx
];
410 if (cq
.sw_in_use
> 1) {
411 idx
= (cq
.sw_pidx
> 0) ? cq
.sw_pidx
- 1 : cq
.size
- 1;
412 swcqes
[1] = chp
->cq
.sw_queue
[idx
];
416 spin_unlock_irq(&chp
->lock
);
418 if (fill_cq(msg
, &cq
))
419 goto err_cancel_table
;
421 if (fill_swcqes(msg
, &cq
, swcqes
))
422 goto err_cancel_table
;
424 if (fill_hwcqes(msg
, &cq
, hwcqes
))
425 goto err_cancel_table
;
427 nla_nest_end(msg
, table_attr
);
431 nla_nest_cancel(msg
, table_attr
);
436 static int fill_res_mr_entry(struct sk_buff
*msg
,
437 struct rdma_restrack_entry
*res
)
439 struct ib_mr
*ibmr
= container_of(res
, struct ib_mr
, res
);
440 struct c4iw_mr
*mhp
= to_c4iw_mr(ibmr
);
441 struct c4iw_dev
*dev
= mhp
->rhp
;
442 u32 stag
= mhp
->attr
.stag
;
443 struct nlattr
*table_attr
;
444 struct fw_ri_tpte tpte
;
450 table_attr
= nla_nest_start_noflag(msg
, RDMA_NLDEV_ATTR_DRIVER
);
454 ret
= cxgb4_read_tpte(dev
->rdev
.lldi
.ports
[0], stag
, (__be32
*)&tpte
);
456 dev_err(&dev
->rdev
.lldi
.pdev
->dev
,
457 "%s cxgb4_read_tpte err %d\n", __func__
, ret
);
461 if (rdma_nl_put_driver_u32_hex(msg
, "idx", stag
>> 8))
462 goto err_cancel_table
;
463 if (rdma_nl_put_driver_u32(msg
, "valid",
464 FW_RI_TPTE_VALID_G(ntohl(tpte
.valid_to_pdid
))))
465 goto err_cancel_table
;
466 if (rdma_nl_put_driver_u32_hex(msg
, "key", stag
& 0xff))
467 goto err_cancel_table
;
468 if (rdma_nl_put_driver_u32(msg
, "state",
469 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte
.valid_to_pdid
))))
470 goto err_cancel_table
;
471 if (rdma_nl_put_driver_u32(msg
, "pdid",
472 FW_RI_TPTE_PDID_G(ntohl(tpte
.valid_to_pdid
))))
473 goto err_cancel_table
;
474 if (rdma_nl_put_driver_u32_hex(msg
, "perm",
475 FW_RI_TPTE_PERM_G(ntohl(tpte
.locread_to_qpid
))))
476 goto err_cancel_table
;
477 if (rdma_nl_put_driver_u32(msg
, "ps",
478 FW_RI_TPTE_PS_G(ntohl(tpte
.locread_to_qpid
))))
479 goto err_cancel_table
;
480 if (rdma_nl_put_driver_u64(msg
, "len",
481 ((u64
)ntohl(tpte
.len_hi
) << 32) | ntohl(tpte
.len_lo
)))
482 goto err_cancel_table
;
483 if (rdma_nl_put_driver_u32_hex(msg
, "pbl_addr",
484 FW_RI_TPTE_PBLADDR_G(ntohl(tpte
.nosnoop_pbladdr
))))
485 goto err_cancel_table
;
487 nla_nest_end(msg
, table_attr
);
491 nla_nest_cancel(msg
, table_attr
);
496 c4iw_restrack_func
*c4iw_restrack_funcs
[RDMA_RESTRACK_MAX
] = {
497 [RDMA_RESTRACK_QP
] = fill_res_qp_entry
,
498 [RDMA_RESTRACK_CM_ID
] = fill_res_ep_entry
,
499 [RDMA_RESTRACK_CQ
] = fill_res_cq_entry
,
500 [RDMA_RESTRACK_MR
] = fill_res_mr_entry
,