treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / infiniband / hw / hns / hns_roce_cq.c
blob5ffe4c996ed3dca59c63384c05e5eab695b9e567
1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/platform_device.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/uverbs_ioctl.h>
36 #include "hns_roce_device.h"
37 #include "hns_roce_cmd.h"
38 #include "hns_roce_hem.h"
39 #include <rdma/hns-abi.h>
40 #include "hns_roce_common.h"
42 static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
43 struct hns_roce_cq *hr_cq)
45 struct hns_roce_cmd_mailbox *mailbox;
46 struct hns_roce_hem_table *mtt_table;
47 struct hns_roce_cq_table *cq_table;
48 struct device *dev = hr_dev->dev;
49 dma_addr_t dma_handle;
50 u64 *mtts;
51 int ret;
53 cq_table = &hr_dev->cq_table;
55 /* Get the physical address of cq buf */
56 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
57 mtt_table = &hr_dev->mr_table.mtt_cqe_table;
58 else
59 mtt_table = &hr_dev->mr_table.mtt_table;
61 mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
62 &dma_handle);
64 if (!mtts) {
65 dev_err(dev, "Failed to find mtt for CQ buf.\n");
66 return -EINVAL;
69 ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
70 if (ret) {
71 dev_err(dev, "Num of CQ out of range.\n");
72 return ret;
75 /* Get CQC memory HEM(Hardware Entry Memory) table */
76 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
77 if (ret) {
78 dev_err(dev,
79 "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
80 ret, hr_cq->cqn);
81 goto err_out;
84 ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
85 if (ret) {
86 dev_err(dev, "Failed to xa_store CQ.\n");
87 goto err_put;
90 /* Allocate mailbox memory */
91 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
92 if (IS_ERR(mailbox)) {
93 ret = PTR_ERR(mailbox);
94 goto err_xa;
97 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
99 /* Send mailbox to hw */
100 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
101 HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
102 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
103 if (ret) {
104 dev_err(dev,
105 "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
106 ret, hr_cq->cqn);
107 goto err_xa;
110 hr_cq->cons_index = 0;
111 hr_cq->arm_sn = 1;
113 atomic_set(&hr_cq->refcount, 1);
114 init_completion(&hr_cq->free);
116 return 0;
118 err_xa:
119 xa_erase(&cq_table->array, hr_cq->cqn);
121 err_put:
122 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
124 err_out:
125 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
126 return ret;
129 void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
131 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
132 struct device *dev = hr_dev->dev;
133 int ret;
135 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
136 HNS_ROCE_CMD_DESTROY_CQC,
137 HNS_ROCE_CMD_TIMEOUT_MSECS);
138 if (ret)
139 dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
140 hr_cq->cqn);
142 xa_erase(&cq_table->array, hr_cq->cqn);
144 /* Waiting interrupt process procedure carried out */
145 synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
147 /* wait for all interrupt processed */
148 if (atomic_dec_and_test(&hr_cq->refcount))
149 complete(&hr_cq->free);
150 wait_for_completion(&hr_cq->free);
152 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
153 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
156 static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
157 struct hns_roce_ib_create_cq ucmd,
158 struct ib_udata *udata)
160 struct hns_roce_buf *buf = &hr_cq->buf;
161 struct hns_roce_mtt *mtt = &hr_cq->mtt;
162 struct ib_umem **umem = &hr_cq->umem;
163 u32 npages;
164 int ret;
166 *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size,
167 IB_ACCESS_LOCAL_WRITE);
168 if (IS_ERR(*umem))
169 return PTR_ERR(*umem);
171 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
172 mtt->mtt_type = MTT_TYPE_CQE;
173 else
174 mtt->mtt_type = MTT_TYPE_WQE;
176 npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
177 1 << hr_dev->caps.cqe_buf_pg_sz);
178 ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
179 if (ret)
180 goto err_buf;
182 ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
183 if (ret)
184 goto err_mtt;
186 return 0;
188 err_mtt:
189 hns_roce_mtt_cleanup(hr_dev, mtt);
191 err_buf:
192 ib_umem_release(*umem);
193 return ret;
196 static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
198 struct hns_roce_buf *buf = &hr_cq->buf;
199 struct hns_roce_mtt *mtt = &hr_cq->mtt;
200 int ret;
202 ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
203 buf, buf->page_shift);
204 if (ret)
205 goto out;
207 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
208 mtt->mtt_type = MTT_TYPE_CQE;
209 else
210 mtt->mtt_type = MTT_TYPE_WQE;
212 ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
213 if (ret)
214 goto err_buf;
216 ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
217 if (ret)
218 goto err_mtt;
220 return 0;
222 err_mtt:
223 hns_roce_mtt_cleanup(hr_dev, mtt);
225 err_buf:
226 hns_roce_buf_free(hr_dev, buf->size, buf);
228 out:
229 return ret;
232 static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
234 hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
237 static int create_user_cq(struct hns_roce_dev *hr_dev,
238 struct hns_roce_cq *hr_cq,
239 struct ib_udata *udata,
240 struct hns_roce_ib_create_cq_resp *resp)
242 struct hns_roce_ib_create_cq ucmd;
243 struct device *dev = hr_dev->dev;
244 int ret;
245 struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
246 udata, struct hns_roce_ucontext, ibucontext);
248 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
249 dev_err(dev, "Failed to copy_from_udata.\n");
250 return -EFAULT;
253 /* Get user space address, write it into mtt table */
254 ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
255 if (ret) {
256 dev_err(dev, "Failed to get_cq_umem.\n");
257 return ret;
260 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
261 (udata->outlen >= sizeof(*resp))) {
262 ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
263 &hr_cq->db);
264 if (ret) {
265 dev_err(dev, "cq record doorbell map failed!\n");
266 goto err_mtt;
268 hr_cq->db_en = 1;
269 resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
272 return 0;
274 err_mtt:
275 hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
276 ib_umem_release(hr_cq->umem);
278 return ret;
281 static int create_kernel_cq(struct hns_roce_dev *hr_dev,
282 struct hns_roce_cq *hr_cq)
284 struct device *dev = hr_dev->dev;
285 int ret;
287 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
288 ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
289 if (ret)
290 return ret;
292 hr_cq->set_ci_db = hr_cq->db.db_record;
293 *hr_cq->set_ci_db = 0;
294 hr_cq->db_en = 1;
297 /* Init mtt table and write buff address to mtt table */
298 ret = alloc_cq_buf(hr_dev, hr_cq);
299 if (ret) {
300 dev_err(dev, "Failed to alloc_cq_buf.\n");
301 goto err_db;
304 hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
305 DB_REG_OFFSET * hr_dev->priv_uar.index;
307 return 0;
309 err_db:
310 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
311 hns_roce_free_db(hr_dev, &hr_cq->db);
313 return ret;
316 static void destroy_user_cq(struct hns_roce_dev *hr_dev,
317 struct hns_roce_cq *hr_cq,
318 struct ib_udata *udata,
319 struct hns_roce_ib_create_cq_resp *resp)
321 struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
322 udata, struct hns_roce_ucontext, ibucontext);
324 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
325 (udata->outlen >= sizeof(*resp)))
326 hns_roce_db_unmap_user(context, &hr_cq->db);
328 hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
329 ib_umem_release(hr_cq->umem);
332 static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
333 struct hns_roce_cq *hr_cq)
335 hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
336 free_cq_buf(hr_dev, hr_cq);
338 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
339 hns_roce_free_db(hr_dev, &hr_cq->db);
342 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
343 struct ib_udata *udata)
345 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
346 struct hns_roce_ib_create_cq_resp resp = {};
347 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
348 struct device *dev = hr_dev->dev;
349 int vector = attr->comp_vector;
350 u32 cq_entries = attr->cqe;
351 int ret;
353 if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
354 dev_err(dev, "Create CQ failed. entries=%d, max=%d\n",
355 cq_entries, hr_dev->caps.max_cqes);
356 return -EINVAL;
359 if (vector >= hr_dev->caps.num_comp_vectors) {
360 dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
361 vector, hr_dev->caps.num_comp_vectors);
362 return -EINVAL;
365 cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
366 cq_entries = roundup_pow_of_two(cq_entries);
367 hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
368 hr_cq->cq_depth = cq_entries;
369 hr_cq->vector = vector;
370 hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
371 hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
372 spin_lock_init(&hr_cq->lock);
373 INIT_LIST_HEAD(&hr_cq->sq_list);
374 INIT_LIST_HEAD(&hr_cq->rq_list);
376 if (udata) {
377 ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
378 if (ret) {
379 dev_err(dev, "Create cq failed in user mode!\n");
380 goto err_cq;
382 } else {
383 ret = create_kernel_cq(hr_dev, hr_cq);
384 if (ret) {
385 dev_err(dev, "Create cq failed in kernel mode!\n");
386 goto err_cq;
390 ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
391 if (ret) {
392 dev_err(dev, "Alloc CQ failed(%d).\n", ret);
393 goto err_dbmap;
397 * For the QP created by kernel space, tptr value should be initialized
398 * to zero; For the QP created by user space, it will cause synchronous
399 * problems if tptr is set to zero here, so we initialze it in user
400 * space.
402 if (!udata && hr_cq->tptr_addr)
403 *hr_cq->tptr_addr = 0;
405 if (udata) {
406 resp.cqn = hr_cq->cqn;
407 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
408 if (ret)
409 goto err_cqc;
412 return 0;
414 err_cqc:
415 hns_roce_free_cqc(hr_dev, hr_cq);
417 err_dbmap:
418 if (udata)
419 destroy_user_cq(hr_dev, hr_cq, udata, &resp);
420 else
421 destroy_kernel_cq(hr_dev, hr_cq);
423 err_cq:
424 return ret;
427 void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
429 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
430 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
432 if (hr_dev->hw->destroy_cq) {
433 hr_dev->hw->destroy_cq(ib_cq, udata);
434 return;
437 hns_roce_free_cqc(hr_dev, hr_cq);
438 hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
440 ib_umem_release(hr_cq->umem);
441 if (udata) {
442 if (hr_cq->db_en == 1)
443 hns_roce_db_unmap_user(rdma_udata_to_drv_context(
444 udata,
445 struct hns_roce_ucontext,
446 ibucontext),
447 &hr_cq->db);
448 } else {
449 /* Free the buff of stored cq */
450 free_cq_buf(hr_dev, hr_cq);
451 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
452 hns_roce_free_db(hr_dev, &hr_cq->db);
456 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
458 struct hns_roce_cq *hr_cq;
459 struct ib_cq *ibcq;
461 hr_cq = xa_load(&hr_dev->cq_table.array,
462 cqn & (hr_dev->caps.num_cqs - 1));
463 if (!hr_cq) {
464 dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
465 cqn);
466 return;
469 ++hr_cq->arm_sn;
470 ibcq = &hr_cq->ib_cq;
471 if (ibcq->comp_handler)
472 ibcq->comp_handler(ibcq, ibcq->cq_context);
475 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
477 struct device *dev = hr_dev->dev;
478 struct hns_roce_cq *hr_cq;
479 struct ib_event event;
480 struct ib_cq *ibcq;
482 hr_cq = xa_load(&hr_dev->cq_table.array,
483 cqn & (hr_dev->caps.num_cqs - 1));
484 if (!hr_cq) {
485 dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
486 return;
489 if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
490 event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
491 event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
492 dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
493 event_type, cqn);
494 return;
497 atomic_inc(&hr_cq->refcount);
499 ibcq = &hr_cq->ib_cq;
500 if (ibcq->event_handler) {
501 event.device = ibcq->device;
502 event.element.cq = ibcq;
503 event.event = IB_EVENT_CQ_ERR;
504 ibcq->event_handler(&event, ibcq->cq_context);
507 if (atomic_dec_and_test(&hr_cq->refcount))
508 complete(&hr_cq->free);
511 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
513 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
515 xa_init(&cq_table->array);
517 return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
518 hr_dev->caps.num_cqs - 1,
519 hr_dev->caps.reserved_cqs, 0);
522 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
524 hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);