drm/nouveau: consume the return of large GSP message
[drm/drm-misc.git] / drivers / infiniband / hw / hns / hns_roce_restrack.c
blob356d988169497385fe433ea113bb7c87344f8777
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2 // Copyright (c) 2019 Hisilicon Limited.
4 #include <rdma/rdma_cm.h>
5 #include <rdma/restrack.h>
6 #include <uapi/rdma/rdma_netlink.h>
7 #include "hnae3.h"
8 #include "hns_roce_common.h"
9 #include "hns_roce_device.h"
10 #include "hns_roce_hw_v2.h"
12 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
14 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
15 struct nlattr *table_attr;
17 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
18 if (!table_attr)
19 return -EMSGSIZE;
21 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
22 goto err;
24 if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
25 goto err;
27 if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
28 goto err;
30 if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
31 goto err;
33 nla_nest_end(msg, table_attr);
35 return 0;
37 err:
38 nla_nest_cancel(msg, table_attr);
40 return -EMSGSIZE;
43 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
45 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
46 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
47 struct hns_roce_v2_cq_context context;
48 int ret;
50 if (!hr_dev->hw->query_cqc)
51 return -EINVAL;
53 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
54 if (ret)
55 return -EINVAL;
57 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
59 return ret;
62 int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
64 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
65 struct nlattr *table_attr;
67 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
68 if (!table_attr)
69 return -EMSGSIZE;
71 if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
72 goto err;
74 if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
75 goto err;
77 if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
78 goto err;
80 if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
81 goto err;
83 if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
84 goto err;
86 nla_nest_end(msg, table_attr);
88 return 0;
90 err:
91 nla_nest_cancel(msg, table_attr);
93 return -EMSGSIZE;
96 int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
98 struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
99 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
100 struct hns_roce_full_qp_ctx {
101 struct hns_roce_v2_qp_context qpc;
102 struct hns_roce_v2_scc_context sccc;
103 } context = {};
104 int ret;
106 if (!hr_dev->hw->query_qpc)
107 return -EINVAL;
109 ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc);
110 if (ret)
111 return ret;
113 /* If SCC is disabled or the query fails, the queried SCCC will
114 * be all 0.
116 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) ||
117 !hr_dev->hw->query_sccc)
118 goto out;
120 ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
121 if (ret)
122 ibdev_warn_ratelimited(&hr_dev->ib_dev,
123 "failed to query SCCC, ret = %d.\n",
124 ret);
126 out:
127 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
129 return ret;
132 int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
134 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
135 struct nlattr *table_attr;
137 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
138 if (!table_attr)
139 return -EMSGSIZE;
141 if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
142 goto err;
144 if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
145 hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
146 goto err;
148 if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
149 hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
150 goto err;
152 nla_nest_end(msg, table_attr);
154 return 0;
156 err:
157 nla_nest_cancel(msg, table_attr);
159 return -EMSGSIZE;
162 int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
164 struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
165 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
166 struct hns_roce_v2_mpt_entry context;
167 int ret;
169 if (!hr_dev->hw->query_mpt)
170 return -EINVAL;
172 ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
173 if (ret)
174 return -EINVAL;
176 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
178 return ret;
181 int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
183 struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
184 struct nlattr *table_attr;
186 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
187 if (!table_attr)
188 return -EMSGSIZE;
190 if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn))
191 goto err;
193 if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt))
194 goto err;
196 if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs))
197 goto err;
199 if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn))
200 goto err;
202 nla_nest_end(msg, table_attr);
204 return 0;
206 err:
207 nla_nest_cancel(msg, table_attr);
208 return -EMSGSIZE;
211 int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq)
213 struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
214 struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
215 struct hns_roce_srq_context context;
216 int ret;
218 if (!hr_dev->hw->query_srqc)
219 return -EINVAL;
221 ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context);
222 if (ret)
223 return ret;
225 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
227 return ret;