1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2 // Copyright (c) 2019 Hisilicon Limited.
4 #include <rdma/rdma_cm.h>
5 #include <rdma/restrack.h>
6 #include <uapi/rdma/rdma_netlink.h>
8 #include "hns_roce_common.h"
9 #include "hns_roce_device.h"
10 #include "hns_roce_hw_v2.h"
12 int hns_roce_fill_res_cq_entry(struct sk_buff
*msg
, struct ib_cq
*ib_cq
)
14 struct hns_roce_cq
*hr_cq
= to_hr_cq(ib_cq
);
15 struct nlattr
*table_attr
;
17 table_attr
= nla_nest_start(msg
, RDMA_NLDEV_ATTR_DRIVER
);
21 if (rdma_nl_put_driver_u32(msg
, "cq_depth", hr_cq
->cq_depth
))
24 if (rdma_nl_put_driver_u32(msg
, "cons_index", hr_cq
->cons_index
))
27 if (rdma_nl_put_driver_u32(msg
, "cqe_size", hr_cq
->cqe_size
))
30 if (rdma_nl_put_driver_u32(msg
, "arm_sn", hr_cq
->arm_sn
))
33 nla_nest_end(msg
, table_attr
);
38 nla_nest_cancel(msg
, table_attr
);
43 int hns_roce_fill_res_cq_entry_raw(struct sk_buff
*msg
, struct ib_cq
*ib_cq
)
45 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_cq
->device
);
46 struct hns_roce_cq
*hr_cq
= to_hr_cq(ib_cq
);
47 struct hns_roce_v2_cq_context context
;
50 if (!hr_dev
->hw
->query_cqc
)
53 ret
= hr_dev
->hw
->query_cqc(hr_dev
, hr_cq
->cqn
, &context
);
57 ret
= nla_put(msg
, RDMA_NLDEV_ATTR_RES_RAW
, sizeof(context
), &context
);
62 int hns_roce_fill_res_qp_entry(struct sk_buff
*msg
, struct ib_qp
*ib_qp
)
64 struct hns_roce_qp
*hr_qp
= to_hr_qp(ib_qp
);
65 struct nlattr
*table_attr
;
67 table_attr
= nla_nest_start(msg
, RDMA_NLDEV_ATTR_DRIVER
);
71 if (rdma_nl_put_driver_u32_hex(msg
, "sq_wqe_cnt", hr_qp
->sq
.wqe_cnt
))
74 if (rdma_nl_put_driver_u32_hex(msg
, "sq_max_gs", hr_qp
->sq
.max_gs
))
77 if (rdma_nl_put_driver_u32_hex(msg
, "rq_wqe_cnt", hr_qp
->rq
.wqe_cnt
))
80 if (rdma_nl_put_driver_u32_hex(msg
, "rq_max_gs", hr_qp
->rq
.max_gs
))
83 if (rdma_nl_put_driver_u32_hex(msg
, "ext_sge_sge_cnt", hr_qp
->sge
.sge_cnt
))
86 nla_nest_end(msg
, table_attr
);
91 nla_nest_cancel(msg
, table_attr
);
96 int hns_roce_fill_res_qp_entry_raw(struct sk_buff
*msg
, struct ib_qp
*ib_qp
)
98 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_qp
->device
);
99 struct hns_roce_qp
*hr_qp
= to_hr_qp(ib_qp
);
100 struct hns_roce_full_qp_ctx
{
101 struct hns_roce_v2_qp_context qpc
;
102 struct hns_roce_v2_scc_context sccc
;
106 if (!hr_dev
->hw
->query_qpc
)
109 ret
= hr_dev
->hw
->query_qpc(hr_dev
, hr_qp
->qpn
, &context
.qpc
);
113 /* If SCC is disabled or the query fails, the queried SCCC will
116 if (!(hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL
) ||
117 !hr_dev
->hw
->query_sccc
)
120 ret
= hr_dev
->hw
->query_sccc(hr_dev
, hr_qp
->qpn
, &context
.sccc
);
122 ibdev_warn_ratelimited(&hr_dev
->ib_dev
,
123 "failed to query SCCC, ret = %d.\n",
127 ret
= nla_put(msg
, RDMA_NLDEV_ATTR_RES_RAW
, sizeof(context
), &context
);
132 int hns_roce_fill_res_mr_entry(struct sk_buff
*msg
, struct ib_mr
*ib_mr
)
134 struct hns_roce_mr
*hr_mr
= to_hr_mr(ib_mr
);
135 struct nlattr
*table_attr
;
137 table_attr
= nla_nest_start(msg
, RDMA_NLDEV_ATTR_DRIVER
);
141 if (rdma_nl_put_driver_u32_hex(msg
, "pbl_hop_num", hr_mr
->pbl_hop_num
))
144 if (rdma_nl_put_driver_u32_hex(msg
, "ba_pg_shift",
145 hr_mr
->pbl_mtr
.hem_cfg
.ba_pg_shift
))
148 if (rdma_nl_put_driver_u32_hex(msg
, "buf_pg_shift",
149 hr_mr
->pbl_mtr
.hem_cfg
.buf_pg_shift
))
152 nla_nest_end(msg
, table_attr
);
157 nla_nest_cancel(msg
, table_attr
);
162 int hns_roce_fill_res_mr_entry_raw(struct sk_buff
*msg
, struct ib_mr
*ib_mr
)
164 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_mr
->device
);
165 struct hns_roce_mr
*hr_mr
= to_hr_mr(ib_mr
);
166 struct hns_roce_v2_mpt_entry context
;
169 if (!hr_dev
->hw
->query_mpt
)
172 ret
= hr_dev
->hw
->query_mpt(hr_dev
, hr_mr
->key
, &context
);
176 ret
= nla_put(msg
, RDMA_NLDEV_ATTR_RES_RAW
, sizeof(context
), &context
);
181 int hns_roce_fill_res_srq_entry(struct sk_buff
*msg
, struct ib_srq
*ib_srq
)
183 struct hns_roce_srq
*hr_srq
= to_hr_srq(ib_srq
);
184 struct nlattr
*table_attr
;
186 table_attr
= nla_nest_start(msg
, RDMA_NLDEV_ATTR_DRIVER
);
190 if (rdma_nl_put_driver_u32_hex(msg
, "srqn", hr_srq
->srqn
))
193 if (rdma_nl_put_driver_u32_hex(msg
, "wqe_cnt", hr_srq
->wqe_cnt
))
196 if (rdma_nl_put_driver_u32_hex(msg
, "max_gs", hr_srq
->max_gs
))
199 if (rdma_nl_put_driver_u32_hex(msg
, "xrcdn", hr_srq
->xrcdn
))
202 nla_nest_end(msg
, table_attr
);
207 nla_nest_cancel(msg
, table_attr
);
211 int hns_roce_fill_res_srq_entry_raw(struct sk_buff
*msg
, struct ib_srq
*ib_srq
)
213 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_srq
->device
);
214 struct hns_roce_srq
*hr_srq
= to_hr_srq(ib_srq
);
215 struct hns_roce_srq_context context
;
218 if (!hr_dev
->hw
->query_srqc
)
221 ret
= hr_dev
->hw
->query_srqc(hr_dev
, hr_srq
->srqn
, &context
);
225 ret
= nla_put(msg
, RDMA_NLDEV_ATTR_RES_RAW
, sizeof(context
), &context
);