treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / infiniband / hw / mlx5 / srq_cmd.c
blob8fc3630a9d4c3742995f31b2d06584b8024a29cf
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
4 */
6 #include <linux/kernel.h>
7 #include <linux/mlx5/driver.h>
8 #include <linux/mlx5/cmd.h>
9 #include "mlx5_ib.h"
10 #include "srq.h"
12 static int get_pas_size(struct mlx5_srq_attr *in)
14 u32 log_page_size = in->log_page_size + 12;
15 u32 log_srq_size = in->log_size;
16 u32 log_rq_stride = in->wqe_shift;
17 u32 page_offset = in->page_offset;
18 u32 po_quanta = 1 << (log_page_size - 6);
19 u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
20 u32 page_size = 1 << log_page_size;
21 u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
22 u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size);
24 return rq_num_pas * sizeof(u64);
27 static void set_wq(void *wq, struct mlx5_srq_attr *in)
29 MLX5_SET(wq, wq, wq_signature, !!(in->flags
30 & MLX5_SRQ_FLAG_WQ_SIG));
31 MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
32 MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
33 MLX5_SET(wq, wq, log_wq_sz, in->log_size);
34 MLX5_SET(wq, wq, page_offset, in->page_offset);
35 MLX5_SET(wq, wq, lwm, in->lwm);
36 MLX5_SET(wq, wq, pd, in->pd);
37 MLX5_SET64(wq, wq, dbr_addr, in->db_record);
40 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
42 MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
43 & MLX5_SRQ_FLAG_WQ_SIG));
44 MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
45 MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
46 MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
47 MLX5_SET(srqc, srqc, page_offset, in->page_offset);
48 MLX5_SET(srqc, srqc, lwm, in->lwm);
49 MLX5_SET(srqc, srqc, pd, in->pd);
50 MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
51 MLX5_SET(srqc, srqc, xrcd, in->xrcd);
52 MLX5_SET(srqc, srqc, cqn, in->cqn);
55 static void get_wq(void *wq, struct mlx5_srq_attr *in)
57 if (MLX5_GET(wq, wq, wq_signature))
58 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
59 in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
60 in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
61 in->log_size = MLX5_GET(wq, wq, log_wq_sz);
62 in->page_offset = MLX5_GET(wq, wq, page_offset);
63 in->lwm = MLX5_GET(wq, wq, lwm);
64 in->pd = MLX5_GET(wq, wq, pd);
65 in->db_record = MLX5_GET64(wq, wq, dbr_addr);
68 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
70 if (MLX5_GET(srqc, srqc, wq_signature))
71 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
72 in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
73 in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
74 in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
75 in->page_offset = MLX5_GET(srqc, srqc, page_offset);
76 in->lwm = MLX5_GET(srqc, srqc, lwm);
77 in->pd = MLX5_GET(srqc, srqc, pd);
78 in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
81 struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
83 struct mlx5_srq_table *table = &dev->srq_table;
84 struct mlx5_core_srq *srq;
86 xa_lock(&table->array);
87 srq = xa_load(&table->array, srqn);
88 if (srq)
89 refcount_inc(&srq->common.refcount);
90 xa_unlock(&table->array);
92 return srq;
95 static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
96 struct mlx5_srq_attr *in)
98 u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
99 void *create_in;
100 void *srqc;
101 void *pas;
102 int pas_size;
103 int inlen;
104 int err;
106 pas_size = get_pas_size(in);
107 inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
108 create_in = kvzalloc(inlen, GFP_KERNEL);
109 if (!create_in)
110 return -ENOMEM;
112 MLX5_SET(create_srq_in, create_in, uid, in->uid);
113 srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
114 pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
116 set_srqc(srqc, in);
117 memcpy(pas, in->pas, pas_size);
119 MLX5_SET(create_srq_in, create_in, opcode,
120 MLX5_CMD_OP_CREATE_SRQ);
122 err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
123 sizeof(create_out));
124 kvfree(create_in);
125 if (!err) {
126 srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
127 srq->uid = in->uid;
130 return err;
133 static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
135 u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
136 u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
138 MLX5_SET(destroy_srq_in, srq_in, opcode,
139 MLX5_CMD_OP_DESTROY_SRQ);
140 MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
141 MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
143 return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
144 sizeof(srq_out));
147 static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
148 u16 lwm, int is_srq)
150 u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
151 u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
153 MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
154 MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
155 MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
156 MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
157 MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
159 return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
160 sizeof(srq_out));
163 static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
164 struct mlx5_srq_attr *out)
166 u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
167 u32 *srq_out;
168 void *srqc;
169 int err;
171 srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
172 if (!srq_out)
173 return -ENOMEM;
175 MLX5_SET(query_srq_in, srq_in, opcode,
176 MLX5_CMD_OP_QUERY_SRQ);
177 MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
178 err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
179 MLX5_ST_SZ_BYTES(query_srq_out));
180 if (err)
181 goto out;
183 srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
184 get_srqc(srqc, out);
185 if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
186 out->flags |= MLX5_SRQ_FLAG_ERR;
187 out:
188 kvfree(srq_out);
189 return err;
192 static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
193 struct mlx5_core_srq *srq,
194 struct mlx5_srq_attr *in)
196 u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
197 void *create_in;
198 void *xrc_srqc;
199 void *pas;
200 int pas_size;
201 int inlen;
202 int err;
204 pas_size = get_pas_size(in);
205 inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
206 create_in = kvzalloc(inlen, GFP_KERNEL);
207 if (!create_in)
208 return -ENOMEM;
210 MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
211 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
212 xrc_srq_context_entry);
213 pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
215 set_srqc(xrc_srqc, in);
216 MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
217 memcpy(pas, in->pas, pas_size);
218 MLX5_SET(create_xrc_srq_in, create_in, opcode,
219 MLX5_CMD_OP_CREATE_XRC_SRQ);
221 memset(create_out, 0, sizeof(create_out));
222 err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
223 sizeof(create_out));
224 if (err)
225 goto out;
227 srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
228 srq->uid = in->uid;
229 out:
230 kvfree(create_in);
231 return err;
234 static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
235 struct mlx5_core_srq *srq)
237 u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
238 u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
240 MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
241 MLX5_CMD_OP_DESTROY_XRC_SRQ);
242 MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
243 MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
245 return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
246 xrcsrq_out, sizeof(xrcsrq_out));
249 static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
250 u16 lwm)
252 u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
253 u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
255 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
256 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
257 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
258 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
259 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
261 return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
262 xrcsrq_out, sizeof(xrcsrq_out));
265 static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
266 struct mlx5_core_srq *srq,
267 struct mlx5_srq_attr *out)
269 u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
270 u32 *xrcsrq_out;
271 void *xrc_srqc;
272 int err;
274 xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
275 if (!xrcsrq_out)
276 return -ENOMEM;
277 memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
279 MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
280 MLX5_CMD_OP_QUERY_XRC_SRQ);
281 MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
283 err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
284 xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out));
285 if (err)
286 goto out;
288 xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
289 xrc_srq_context_entry);
290 get_srqc(xrc_srqc, out);
291 if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
292 out->flags |= MLX5_SRQ_FLAG_ERR;
294 out:
295 kvfree(xrcsrq_out);
296 return err;
299 static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
300 struct mlx5_srq_attr *in)
302 void *create_out = NULL;
303 void *create_in = NULL;
304 void *rmpc;
305 void *wq;
306 int pas_size;
307 int outlen;
308 int inlen;
309 int err;
311 pas_size = get_pas_size(in);
312 inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
313 outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
314 create_in = kvzalloc(inlen, GFP_KERNEL);
315 create_out = kvzalloc(outlen, GFP_KERNEL);
316 if (!create_in || !create_out) {
317 err = -ENOMEM;
318 goto out;
321 rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
322 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
324 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
325 MLX5_SET(create_rmp_in, create_in, uid, in->uid);
326 set_wq(wq, in);
327 memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
329 MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
330 err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
331 if (!err) {
332 srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
333 srq->uid = in->uid;
336 out:
337 kvfree(create_in);
338 kvfree(create_out);
339 return err;
342 static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
344 u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
345 u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
347 MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
348 MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
349 MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
350 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
353 static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
354 u16 lwm)
356 void *out = NULL;
357 void *in = NULL;
358 void *rmpc;
359 void *wq;
360 void *bitmask;
361 int outlen;
362 int inlen;
363 int err;
365 inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
366 outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
368 in = kvzalloc(inlen, GFP_KERNEL);
369 out = kvzalloc(outlen, GFP_KERNEL);
370 if (!in || !out) {
371 err = -ENOMEM;
372 goto out;
375 rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
376 bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
377 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
379 MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
380 MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
381 MLX5_SET(modify_rmp_in, in, uid, srq->uid);
382 MLX5_SET(wq, wq, lwm, lwm);
383 MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
384 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
385 MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
387 err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
389 out:
390 kvfree(in);
391 kvfree(out);
392 return err;
395 static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
396 struct mlx5_srq_attr *out)
398 u32 *rmp_out = NULL;
399 u32 *rmp_in = NULL;
400 void *rmpc;
401 int outlen;
402 int inlen;
403 int err;
405 outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
406 inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
408 rmp_out = kvzalloc(outlen, GFP_KERNEL);
409 rmp_in = kvzalloc(inlen, GFP_KERNEL);
410 if (!rmp_out || !rmp_in) {
411 err = -ENOMEM;
412 goto out;
415 MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
416 MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
417 err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
418 if (err)
419 goto out;
421 rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
422 get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
423 if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
424 out->flags |= MLX5_SRQ_FLAG_ERR;
426 out:
427 kvfree(rmp_out);
428 kvfree(rmp_in);
429 return err;
432 static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
433 struct mlx5_srq_attr *in)
435 u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
436 void *create_in;
437 void *xrqc;
438 void *wq;
439 int pas_size;
440 int inlen;
441 int err;
443 pas_size = get_pas_size(in);
444 inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
445 create_in = kvzalloc(inlen, GFP_KERNEL);
446 if (!create_in)
447 return -ENOMEM;
449 xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
450 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
452 set_wq(wq, in);
453 memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size);
455 if (in->type == IB_SRQT_TM) {
456 MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
457 if (in->flags & MLX5_SRQ_FLAG_RNDV)
458 MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
459 MLX5_SET(xrqc, xrqc,
460 tag_matching_topology_context.log_matching_list_sz,
461 in->tm_log_list_size);
463 MLX5_SET(xrqc, xrqc, user_index, in->user_index);
464 MLX5_SET(xrqc, xrqc, cqn, in->cqn);
465 MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
466 MLX5_SET(create_xrq_in, create_in, uid, in->uid);
467 err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
468 sizeof(create_out));
469 kvfree(create_in);
470 if (!err) {
471 srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
472 srq->uid = in->uid;
475 return err;
478 static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
480 u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
481 u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
483 MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
484 MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
485 MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
487 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
490 static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
491 struct mlx5_core_srq *srq,
492 u16 lwm)
494 u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
495 u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
497 MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
498 MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
499 MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
500 MLX5_SET(arm_rq_in, in, lwm, lwm);
501 MLX5_SET(arm_rq_in, in, uid, srq->uid);
503 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
506 static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
507 struct mlx5_srq_attr *out)
509 u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
510 u32 *xrq_out;
511 int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
512 void *xrqc;
513 int err;
515 xrq_out = kvzalloc(outlen, GFP_KERNEL);
516 if (!xrq_out)
517 return -ENOMEM;
519 MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
520 MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
522 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
523 if (err)
524 goto out;
526 xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
527 get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
528 if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
529 out->flags |= MLX5_SRQ_FLAG_ERR;
530 out->tm_next_tag =
531 MLX5_GET(xrqc, xrqc,
532 tag_matching_topology_context.append_next_index);
533 out->tm_hw_phase_cnt =
534 MLX5_GET(xrqc, xrqc,
535 tag_matching_topology_context.hw_phase_cnt);
536 out->tm_sw_phase_cnt =
537 MLX5_GET(xrqc, xrqc,
538 tag_matching_topology_context.sw_phase_cnt);
540 out:
541 kvfree(xrq_out);
542 return err;
545 static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
546 struct mlx5_srq_attr *in)
548 if (!dev->mdev->issi)
549 return create_srq_cmd(dev, srq, in);
550 switch (srq->common.res) {
551 case MLX5_RES_XSRQ:
552 return create_xrc_srq_cmd(dev, srq, in);
553 case MLX5_RES_XRQ:
554 return create_xrq_cmd(dev, srq, in);
555 default:
556 return create_rmp_cmd(dev, srq, in);
560 static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
562 if (!dev->mdev->issi)
563 return destroy_srq_cmd(dev, srq);
564 switch (srq->common.res) {
565 case MLX5_RES_XSRQ:
566 return destroy_xrc_srq_cmd(dev, srq);
567 case MLX5_RES_XRQ:
568 return destroy_xrq_cmd(dev, srq);
569 default:
570 return destroy_rmp_cmd(dev, srq);
574 int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
575 struct mlx5_srq_attr *in)
577 struct mlx5_srq_table *table = &dev->srq_table;
578 int err;
580 switch (in->type) {
581 case IB_SRQT_XRC:
582 srq->common.res = MLX5_RES_XSRQ;
583 break;
584 case IB_SRQT_TM:
585 srq->common.res = MLX5_RES_XRQ;
586 break;
587 default:
588 srq->common.res = MLX5_RES_SRQ;
591 err = create_srq_split(dev, srq, in);
592 if (err)
593 return err;
595 refcount_set(&srq->common.refcount, 1);
596 init_completion(&srq->common.free);
598 err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
599 if (err)
600 goto err_destroy_srq_split;
602 return 0;
604 err_destroy_srq_split:
605 destroy_srq_split(dev, srq);
607 return err;
610 void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
612 struct mlx5_srq_table *table = &dev->srq_table;
613 struct mlx5_core_srq *tmp;
614 int err;
616 tmp = xa_erase_irq(&table->array, srq->srqn);
617 if (!tmp || tmp != srq)
618 return;
620 err = destroy_srq_split(dev, srq);
621 if (err)
622 return;
624 mlx5_core_res_put(&srq->common);
625 wait_for_completion(&srq->common.free);
628 int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
629 struct mlx5_srq_attr *out)
631 if (!dev->mdev->issi)
632 return query_srq_cmd(dev, srq, out);
633 switch (srq->common.res) {
634 case MLX5_RES_XSRQ:
635 return query_xrc_srq_cmd(dev, srq, out);
636 case MLX5_RES_XRQ:
637 return query_xrq_cmd(dev, srq, out);
638 default:
639 return query_rmp_cmd(dev, srq, out);
643 int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
644 u16 lwm, int is_srq)
646 if (!dev->mdev->issi)
647 return arm_srq_cmd(dev, srq, lwm, is_srq);
648 switch (srq->common.res) {
649 case MLX5_RES_XSRQ:
650 return arm_xrc_srq_cmd(dev, srq, lwm);
651 case MLX5_RES_XRQ:
652 return arm_xrq_cmd(dev, srq, lwm);
653 default:
654 return arm_rmp_cmd(dev, srq, lwm);
658 static int srq_event_notifier(struct notifier_block *nb,
659 unsigned long type, void *data)
661 struct mlx5_srq_table *table;
662 struct mlx5_core_srq *srq;
663 struct mlx5_eqe *eqe;
664 u32 srqn;
666 if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
667 type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
668 return NOTIFY_DONE;
670 table = container_of(nb, struct mlx5_srq_table, nb);
672 eqe = data;
673 srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
675 xa_lock(&table->array);
676 srq = xa_load(&table->array, srqn);
677 if (srq)
678 refcount_inc(&srq->common.refcount);
679 xa_unlock(&table->array);
681 if (!srq)
682 return NOTIFY_OK;
684 srq->event(srq, eqe->type);
686 mlx5_core_res_put(&srq->common);
688 return NOTIFY_OK;
691 int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
693 struct mlx5_srq_table *table = &dev->srq_table;
695 memset(table, 0, sizeof(*table));
696 xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
698 table->nb.notifier_call = srq_event_notifier;
699 mlx5_notifier_register(dev->mdev, &table->nb);
701 return 0;
704 void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
706 struct mlx5_srq_table *table = &dev->srq_table;
708 mlx5_notifier_unregister(dev->mdev, &table->nb);