drm/nouveau: consume the return of large GSP message
[drm/drm-misc.git] / drivers / infiniband / hw / mana / cq.c
blobf04a679d287144c3f531d60e110cf885c22902a8
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
6 #include "mana_ib.h"
8 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9 struct uverbs_attr_bundle *attrs)
11 struct ib_udata *udata = &attrs->driver_udata;
12 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
13 struct mana_ib_create_cq_resp resp = {};
14 struct mana_ib_ucontext *mana_ucontext;
15 struct ib_device *ibdev = ibcq->device;
16 struct mana_ib_create_cq ucmd = {};
17 struct mana_ib_dev *mdev;
18 bool is_rnic_cq;
19 u32 doorbell;
20 int err;
22 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
24 cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
25 cq->cq_handle = INVALID_MANA_HANDLE;
27 if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
28 return -EINVAL;
30 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
31 if (err) {
32 ibdev_dbg(ibdev,
33 "Failed to copy from udata for create cq, %d\n", err);
34 return err;
37 is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
39 if (!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) {
40 ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
41 return -EINVAL;
44 cq->cqe = attr->cqe;
45 err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
46 if (err) {
47 ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
48 return err;
51 mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
52 ibucontext);
53 doorbell = mana_ucontext->doorbell;
55 if (is_rnic_cq) {
56 err = mana_ib_gd_create_cq(mdev, cq, doorbell);
57 if (err) {
58 ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
59 goto err_destroy_queue;
62 err = mana_ib_install_cq_cb(mdev, cq);
63 if (err) {
64 ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
65 goto err_destroy_rnic_cq;
69 resp.cqid = cq->queue.id;
70 err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
71 if (err) {
72 ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
73 goto err_remove_cq_cb;
76 return 0;
78 err_remove_cq_cb:
79 mana_ib_remove_cq_cb(mdev, cq);
80 err_destroy_rnic_cq:
81 mana_ib_gd_destroy_cq(mdev, cq);
82 err_destroy_queue:
83 mana_ib_destroy_queue(mdev, &cq->queue);
85 return err;
88 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
90 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
91 struct ib_device *ibdev = ibcq->device;
92 struct mana_ib_dev *mdev;
94 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
96 mana_ib_remove_cq_cb(mdev, cq);
98 /* Ignore return code as there is not much we can do about it.
99 * The error message is printed inside.
101 mana_ib_gd_destroy_cq(mdev, cq);
103 mana_ib_destroy_queue(mdev, &cq->queue);
105 return 0;
108 static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
110 struct mana_ib_cq *cq = ctx;
112 if (cq->ibcq.comp_handler)
113 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
116 int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
118 struct gdma_context *gc = mdev_to_gc(mdev);
119 struct gdma_queue *gdma_cq;
121 if (cq->queue.id >= gc->max_num_cqs)
122 return -EINVAL;
123 /* Create CQ table entry */
124 WARN_ON(gc->cq_table[cq->queue.id]);
125 gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
126 if (!gdma_cq)
127 return -ENOMEM;
129 gdma_cq->cq.context = cq;
130 gdma_cq->type = GDMA_CQ;
131 gdma_cq->cq.callback = mana_ib_cq_handler;
132 gdma_cq->id = cq->queue.id;
133 gc->cq_table[cq->queue.id] = gdma_cq;
134 return 0;
137 void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
139 struct gdma_context *gc = mdev_to_gc(mdev);
141 if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
142 return;
144 kfree(gc->cq_table[cq->queue.id]);
145 gc->cq_table[cq->queue.id] = NULL;