2 * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/rdma_cm.h>
36 #include <rdma/restrack.h>
37 #include <uapi/rdma/rdma_netlink.h>
39 static int fill_sq(struct sk_buff
*msg
, struct t4_wq
*wq
)
42 if (rdma_nl_put_driver_u32(msg
, "sqid", wq
->sq
.qid
))
44 if (rdma_nl_put_driver_u32(msg
, "flushed", wq
->flushed
))
46 if (rdma_nl_put_driver_u32(msg
, "memsize", wq
->sq
.memsize
))
48 if (rdma_nl_put_driver_u32(msg
, "cidx", wq
->sq
.cidx
))
50 if (rdma_nl_put_driver_u32(msg
, "pidx", wq
->sq
.pidx
))
52 if (rdma_nl_put_driver_u32(msg
, "wq_pidx", wq
->sq
.wq_pidx
))
54 if (rdma_nl_put_driver_u32(msg
, "flush_cidx", wq
->sq
.flush_cidx
))
56 if (rdma_nl_put_driver_u32(msg
, "in_use", wq
->sq
.in_use
))
58 if (rdma_nl_put_driver_u32(msg
, "size", wq
->sq
.size
))
60 if (rdma_nl_put_driver_u32_hex(msg
, "flags", wq
->sq
.flags
))
67 static int fill_rq(struct sk_buff
*msg
, struct t4_wq
*wq
)
70 if (rdma_nl_put_driver_u32(msg
, "rqid", wq
->rq
.qid
))
72 if (rdma_nl_put_driver_u32(msg
, "memsize", wq
->rq
.memsize
))
74 if (rdma_nl_put_driver_u32(msg
, "cidx", wq
->rq
.cidx
))
76 if (rdma_nl_put_driver_u32(msg
, "pidx", wq
->rq
.pidx
))
78 if (rdma_nl_put_driver_u32(msg
, "wq_pidx", wq
->rq
.wq_pidx
))
80 if (rdma_nl_put_driver_u32(msg
, "msn", wq
->rq
.msn
))
82 if (rdma_nl_put_driver_u32_hex(msg
, "rqt_hwaddr", wq
->rq
.rqt_hwaddr
))
84 if (rdma_nl_put_driver_u32(msg
, "rqt_size", wq
->rq
.rqt_size
))
86 if (rdma_nl_put_driver_u32(msg
, "in_use", wq
->rq
.in_use
))
88 if (rdma_nl_put_driver_u32(msg
, "size", wq
->rq
.size
))
95 static int fill_swsqe(struct sk_buff
*msg
, struct t4_sq
*sq
, u16 idx
,
98 if (rdma_nl_put_driver_u32(msg
, "idx", idx
))
100 if (rdma_nl_put_driver_u32(msg
, "opcode", sqe
->opcode
))
102 if (rdma_nl_put_driver_u32(msg
, "complete", sqe
->complete
))
105 rdma_nl_put_driver_u32(msg
, "cqe_status", CQE_STATUS(&sqe
->cqe
)))
107 if (rdma_nl_put_driver_u32(msg
, "signaled", sqe
->signaled
))
109 if (rdma_nl_put_driver_u32(msg
, "flushed", sqe
->flushed
))
117 * Dump the first and last pending sqes.
119 static int fill_swsqes(struct sk_buff
*msg
, struct t4_sq
*sq
,
120 u16 first_idx
, struct t4_swsqe
*first_sqe
,
121 u16 last_idx
, struct t4_swsqe
*last_sqe
)
125 if (fill_swsqe(msg
, sq
, first_idx
, first_sqe
))
129 if (fill_swsqe(msg
, sq
, last_idx
, last_sqe
))
137 int c4iw_fill_res_qp_entry(struct sk_buff
*msg
, struct ib_qp
*ibqp
)
139 struct t4_swsqe
*fsp
= NULL
, *lsp
= NULL
;
140 struct c4iw_qp
*qhp
= to_c4iw_qp(ibqp
);
141 u16 first_sq_idx
= 0, last_sq_idx
= 0;
142 struct t4_swsqe first_sqe
, last_sqe
;
143 struct nlattr
*table_attr
;
146 /* User qp state is not available, so don't dump user qps */
150 table_attr
= nla_nest_start_noflag(msg
, RDMA_NLDEV_ATTR_DRIVER
);
154 /* Get a consistent snapshot */
155 spin_lock_irq(&qhp
->lock
);
158 /* If there are any pending sqes, copy the first and last */
159 if (wq
.sq
.cidx
!= wq
.sq
.pidx
) {
160 first_sq_idx
= wq
.sq
.cidx
;
161 first_sqe
= qhp
->wq
.sq
.sw_sq
[first_sq_idx
];
163 last_sq_idx
= wq
.sq
.pidx
;
164 if (last_sq_idx
-- == 0)
165 last_sq_idx
= wq
.sq
.size
- 1;
166 if (last_sq_idx
!= first_sq_idx
) {
167 last_sqe
= qhp
->wq
.sq
.sw_sq
[last_sq_idx
];
171 spin_unlock_irq(&qhp
->lock
);
173 if (fill_sq(msg
, &wq
))
174 goto err_cancel_table
;
176 if (fill_swsqes(msg
, &wq
.sq
, first_sq_idx
, fsp
, last_sq_idx
, lsp
))
177 goto err_cancel_table
;
179 if (fill_rq(msg
, &wq
))
180 goto err_cancel_table
;
182 nla_nest_end(msg
, table_attr
);
186 nla_nest_cancel(msg
, table_attr
);
192 struct c4iw_listen_ep lep
;
196 int c4iw_fill_res_cm_id_entry(struct sk_buff
*msg
,
197 struct rdma_cm_id
*cm_id
)
199 struct nlattr
*table_attr
;
200 struct c4iw_ep_common
*epcp
;
201 struct c4iw_listen_ep
*listen_ep
= NULL
;
202 struct c4iw_ep
*ep
= NULL
;
203 struct iw_cm_id
*iw_cm_id
;
206 iw_cm_id
= rdma_iw_cm_id(cm_id
);
209 epcp
= (struct c4iw_ep_common
*)iw_cm_id
->provider_data
;
212 uep
= kcalloc(1, sizeof(*uep
), GFP_KERNEL
);
216 table_attr
= nla_nest_start_noflag(msg
, RDMA_NLDEV_ATTR_DRIVER
);
220 /* Get a consistent snapshot */
221 mutex_lock(&epcp
->mutex
);
222 if (epcp
->state
== LISTEN
) {
223 uep
->lep
= *(struct c4iw_listen_ep
*)epcp
;
224 mutex_unlock(&epcp
->mutex
);
225 listen_ep
= &uep
->lep
;
226 epcp
= &listen_ep
->com
;
228 uep
->ep
= *(struct c4iw_ep
*)epcp
;
229 mutex_unlock(&epcp
->mutex
);
234 if (rdma_nl_put_driver_u32(msg
, "state", epcp
->state
))
235 goto err_cancel_table
;
236 if (rdma_nl_put_driver_u64_hex(msg
, "flags", epcp
->flags
))
237 goto err_cancel_table
;
238 if (rdma_nl_put_driver_u64_hex(msg
, "history", epcp
->history
))
239 goto err_cancel_table
;
241 if (epcp
->state
== LISTEN
) {
242 if (rdma_nl_put_driver_u32(msg
, "stid", listen_ep
->stid
))
243 goto err_cancel_table
;
244 if (rdma_nl_put_driver_u32(msg
, "backlog", listen_ep
->backlog
))
245 goto err_cancel_table
;
247 if (rdma_nl_put_driver_u32(msg
, "hwtid", ep
->hwtid
))
248 goto err_cancel_table
;
249 if (rdma_nl_put_driver_u32(msg
, "ord", ep
->ord
))
250 goto err_cancel_table
;
251 if (rdma_nl_put_driver_u32(msg
, "ird", ep
->ird
))
252 goto err_cancel_table
;
253 if (rdma_nl_put_driver_u32(msg
, "emss", ep
->emss
))
254 goto err_cancel_table
;
256 if (!ep
->parent_ep
&& rdma_nl_put_driver_u32(msg
, "atid",
258 goto err_cancel_table
;
260 nla_nest_end(msg
, table_attr
);
265 nla_nest_cancel(msg
, table_attr
);
271 static int fill_cq(struct sk_buff
*msg
, struct t4_cq
*cq
)
273 if (rdma_nl_put_driver_u32(msg
, "cqid", cq
->cqid
))
275 if (rdma_nl_put_driver_u32(msg
, "memsize", cq
->memsize
))
277 if (rdma_nl_put_driver_u32(msg
, "size", cq
->size
))
279 if (rdma_nl_put_driver_u32(msg
, "cidx", cq
->cidx
))
281 if (rdma_nl_put_driver_u32(msg
, "cidx_inc", cq
->cidx_inc
))
283 if (rdma_nl_put_driver_u32(msg
, "sw_cidx", cq
->sw_cidx
))
285 if (rdma_nl_put_driver_u32(msg
, "sw_pidx", cq
->sw_pidx
))
287 if (rdma_nl_put_driver_u32(msg
, "sw_in_use", cq
->sw_in_use
))
289 if (rdma_nl_put_driver_u32(msg
, "vector", cq
->vector
))
291 if (rdma_nl_put_driver_u32(msg
, "gen", cq
->gen
))
293 if (rdma_nl_put_driver_u32(msg
, "error", cq
->error
))
295 if (rdma_nl_put_driver_u64_hex(msg
, "bits_type_ts",
296 be64_to_cpu(cq
->bits_type_ts
)))
298 if (rdma_nl_put_driver_u64_hex(msg
, "flags", cq
->flags
))
307 static int fill_cqe(struct sk_buff
*msg
, struct t4_cqe
*cqe
, u16 idx
,
310 if (rdma_nl_put_driver_u32(msg
, qstr
, idx
))
312 if (rdma_nl_put_driver_u32_hex(msg
, "header",
313 be32_to_cpu(cqe
->header
)))
315 if (rdma_nl_put_driver_u32(msg
, "len", be32_to_cpu(cqe
->len
)))
317 if (rdma_nl_put_driver_u32_hex(msg
, "wrid_hi",
318 be32_to_cpu(cqe
->u
.gen
.wrid_hi
)))
320 if (rdma_nl_put_driver_u32_hex(msg
, "wrid_low",
321 be32_to_cpu(cqe
->u
.gen
.wrid_low
)))
323 if (rdma_nl_put_driver_u64_hex(msg
, "bits_type_ts",
324 be64_to_cpu(cqe
->bits_type_ts
)))
333 static int fill_hwcqes(struct sk_buff
*msg
, struct t4_cq
*cq
,
338 idx
= (cq
->cidx
> 0) ? cq
->cidx
- 1 : cq
->size
- 1;
339 if (fill_cqe(msg
, cqes
, idx
, "hwcq_idx"))
342 if (fill_cqe(msg
, cqes
+ 1, idx
, "hwcq_idx"))
350 static int fill_swcqes(struct sk_buff
*msg
, struct t4_cq
*cq
,
359 if (fill_cqe(msg
, cqes
, idx
, "swcq_idx"))
361 if (cq
->sw_in_use
== 1)
363 idx
= (cq
->sw_pidx
> 0) ? cq
->sw_pidx
- 1 : cq
->size
- 1;
364 if (fill_cqe(msg
, cqes
+ 1, idx
, "swcq_idx"))
372 int c4iw_fill_res_cq_entry(struct sk_buff
*msg
, struct ib_cq
*ibcq
)
374 struct c4iw_cq
*chp
= to_c4iw_cq(ibcq
);
375 struct nlattr
*table_attr
;
376 struct t4_cqe hwcqes
[2];
377 struct t4_cqe swcqes
[2];
381 /* User cq state is not available, so don't dump user cqs */
385 table_attr
= nla_nest_start_noflag(msg
, RDMA_NLDEV_ATTR_DRIVER
);
389 /* Get a consistent snapshot */
390 spin_lock_irq(&chp
->lock
);
395 /* get 2 hw cqes: cidx-1, and cidx */
396 idx
= (cq
.cidx
> 0) ? cq
.cidx
- 1 : cq
.size
- 1;
397 hwcqes
[0] = chp
->cq
.queue
[idx
];
400 hwcqes
[1] = chp
->cq
.queue
[idx
];
402 /* get first and last sw cqes */
404 swcqes
[0] = chp
->cq
.sw_queue
[cq
.sw_cidx
];
405 if (cq
.sw_in_use
> 1) {
406 idx
= (cq
.sw_pidx
> 0) ? cq
.sw_pidx
- 1 : cq
.size
- 1;
407 swcqes
[1] = chp
->cq
.sw_queue
[idx
];
411 spin_unlock_irq(&chp
->lock
);
413 if (fill_cq(msg
, &cq
))
414 goto err_cancel_table
;
416 if (fill_swcqes(msg
, &cq
, swcqes
))
417 goto err_cancel_table
;
419 if (fill_hwcqes(msg
, &cq
, hwcqes
))
420 goto err_cancel_table
;
422 nla_nest_end(msg
, table_attr
);
426 nla_nest_cancel(msg
, table_attr
);
431 int c4iw_fill_res_mr_entry(struct sk_buff
*msg
, struct ib_mr
*ibmr
)
433 struct c4iw_mr
*mhp
= to_c4iw_mr(ibmr
);
434 struct c4iw_dev
*dev
= mhp
->rhp
;
435 u32 stag
= mhp
->attr
.stag
;
436 struct nlattr
*table_attr
;
437 struct fw_ri_tpte tpte
;
443 table_attr
= nla_nest_start_noflag(msg
, RDMA_NLDEV_ATTR_DRIVER
);
447 ret
= cxgb4_read_tpte(dev
->rdev
.lldi
.ports
[0], stag
, (__be32
*)&tpte
);
449 dev_err(&dev
->rdev
.lldi
.pdev
->dev
,
450 "%s cxgb4_read_tpte err %d\n", __func__
, ret
);
454 if (rdma_nl_put_driver_u32_hex(msg
, "idx", stag
>> 8))
455 goto err_cancel_table
;
456 if (rdma_nl_put_driver_u32(msg
, "valid",
457 FW_RI_TPTE_VALID_G(ntohl(tpte
.valid_to_pdid
))))
458 goto err_cancel_table
;
459 if (rdma_nl_put_driver_u32_hex(msg
, "key", stag
& 0xff))
460 goto err_cancel_table
;
461 if (rdma_nl_put_driver_u32(msg
, "state",
462 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte
.valid_to_pdid
))))
463 goto err_cancel_table
;
464 if (rdma_nl_put_driver_u32(msg
, "pdid",
465 FW_RI_TPTE_PDID_G(ntohl(tpte
.valid_to_pdid
))))
466 goto err_cancel_table
;
467 if (rdma_nl_put_driver_u32_hex(msg
, "perm",
468 FW_RI_TPTE_PERM_G(ntohl(tpte
.locread_to_qpid
))))
469 goto err_cancel_table
;
470 if (rdma_nl_put_driver_u32(msg
, "ps",
471 FW_RI_TPTE_PS_G(ntohl(tpte
.locread_to_qpid
))))
472 goto err_cancel_table
;
473 if (rdma_nl_put_driver_u64(msg
, "len",
474 ((u64
)ntohl(tpte
.len_hi
) << 32) | ntohl(tpte
.len_lo
)))
475 goto err_cancel_table
;
476 if (rdma_nl_put_driver_u32_hex(msg
, "pbl_addr",
477 FW_RI_TPTE_PBLADDR_G(ntohl(tpte
.nosnoop_pbladdr
))))
478 goto err_cancel_table
;
480 nla_nest_end(msg
, table_attr
);
484 nla_nest_cancel(msg
, table_attr
);