2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/uverbs_ioctl.h>
36 #include "hns_roce_device.h"
37 #include "hns_roce_cmd.h"
38 #include "hns_roce_hem.h"
39 #include <rdma/hns-abi.h>
40 #include "hns_roce_common.h"
42 static int hns_roce_alloc_cqc(struct hns_roce_dev
*hr_dev
,
43 struct hns_roce_cq
*hr_cq
)
45 struct hns_roce_cmd_mailbox
*mailbox
;
46 struct hns_roce_hem_table
*mtt_table
;
47 struct hns_roce_cq_table
*cq_table
;
48 struct device
*dev
= hr_dev
->dev
;
49 dma_addr_t dma_handle
;
53 cq_table
= &hr_dev
->cq_table
;
55 /* Get the physical address of cq buf */
56 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
57 mtt_table
= &hr_dev
->mr_table
.mtt_cqe_table
;
59 mtt_table
= &hr_dev
->mr_table
.mtt_table
;
61 mtts
= hns_roce_table_find(hr_dev
, mtt_table
, hr_cq
->mtt
.first_seg
,
65 dev_err(dev
, "Failed to find mtt for CQ buf.\n");
69 ret
= hns_roce_bitmap_alloc(&cq_table
->bitmap
, &hr_cq
->cqn
);
71 dev_err(dev
, "Num of CQ out of range.\n");
75 /* Get CQC memory HEM(Hardware Entry Memory) table */
76 ret
= hns_roce_table_get(hr_dev
, &cq_table
->table
, hr_cq
->cqn
);
79 "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
84 ret
= xa_err(xa_store(&cq_table
->array
, hr_cq
->cqn
, hr_cq
, GFP_KERNEL
));
86 dev_err(dev
, "Failed to xa_store CQ.\n");
90 /* Allocate mailbox memory */
91 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
92 if (IS_ERR(mailbox
)) {
93 ret
= PTR_ERR(mailbox
);
97 hr_dev
->hw
->write_cqc(hr_dev
, hr_cq
, mailbox
->buf
, mtts
, dma_handle
);
99 /* Send mailbox to hw */
100 ret
= hns_roce_cmd_mbox(hr_dev
, mailbox
->dma
, 0, hr_cq
->cqn
, 0,
101 HNS_ROCE_CMD_CREATE_CQC
, HNS_ROCE_CMD_TIMEOUT_MSECS
);
102 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
105 "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
110 hr_cq
->cons_index
= 0;
113 atomic_set(&hr_cq
->refcount
, 1);
114 init_completion(&hr_cq
->free
);
119 xa_erase(&cq_table
->array
, hr_cq
->cqn
);
122 hns_roce_table_put(hr_dev
, &cq_table
->table
, hr_cq
->cqn
);
125 hns_roce_bitmap_free(&cq_table
->bitmap
, hr_cq
->cqn
, BITMAP_NO_RR
);
129 void hns_roce_free_cqc(struct hns_roce_dev
*hr_dev
, struct hns_roce_cq
*hr_cq
)
131 struct hns_roce_cq_table
*cq_table
= &hr_dev
->cq_table
;
132 struct device
*dev
= hr_dev
->dev
;
135 ret
= hns_roce_cmd_mbox(hr_dev
, 0, 0, hr_cq
->cqn
, 1,
136 HNS_ROCE_CMD_DESTROY_CQC
,
137 HNS_ROCE_CMD_TIMEOUT_MSECS
);
139 dev_err(dev
, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret
,
142 xa_erase(&cq_table
->array
, hr_cq
->cqn
);
144 /* Waiting interrupt process procedure carried out */
145 synchronize_irq(hr_dev
->eq_table
.eq
[hr_cq
->vector
].irq
);
147 /* wait for all interrupt processed */
148 if (atomic_dec_and_test(&hr_cq
->refcount
))
149 complete(&hr_cq
->free
);
150 wait_for_completion(&hr_cq
->free
);
152 hns_roce_table_put(hr_dev
, &cq_table
->table
, hr_cq
->cqn
);
153 hns_roce_bitmap_free(&cq_table
->bitmap
, hr_cq
->cqn
, BITMAP_NO_RR
);
156 static int get_cq_umem(struct hns_roce_dev
*hr_dev
, struct hns_roce_cq
*hr_cq
,
157 struct hns_roce_ib_create_cq ucmd
,
158 struct ib_udata
*udata
)
160 struct hns_roce_buf
*buf
= &hr_cq
->buf
;
161 struct hns_roce_mtt
*mtt
= &hr_cq
->mtt
;
162 struct ib_umem
**umem
= &hr_cq
->umem
;
166 *umem
= ib_umem_get(&hr_dev
->ib_dev
, ucmd
.buf_addr
, buf
->size
,
167 IB_ACCESS_LOCAL_WRITE
);
169 return PTR_ERR(*umem
);
171 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
172 mtt
->mtt_type
= MTT_TYPE_CQE
;
174 mtt
->mtt_type
= MTT_TYPE_WQE
;
176 npages
= DIV_ROUND_UP(ib_umem_page_count(*umem
),
177 1 << hr_dev
->caps
.cqe_buf_pg_sz
);
178 ret
= hns_roce_mtt_init(hr_dev
, npages
, buf
->page_shift
, mtt
);
182 ret
= hns_roce_ib_umem_write_mtt(hr_dev
, mtt
, *umem
);
189 hns_roce_mtt_cleanup(hr_dev
, mtt
);
192 ib_umem_release(*umem
);
196 static int alloc_cq_buf(struct hns_roce_dev
*hr_dev
, struct hns_roce_cq
*hr_cq
)
198 struct hns_roce_buf
*buf
= &hr_cq
->buf
;
199 struct hns_roce_mtt
*mtt
= &hr_cq
->mtt
;
202 ret
= hns_roce_buf_alloc(hr_dev
, buf
->size
, (1 << buf
->page_shift
) * 2,
203 buf
, buf
->page_shift
);
207 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
208 mtt
->mtt_type
= MTT_TYPE_CQE
;
210 mtt
->mtt_type
= MTT_TYPE_WQE
;
212 ret
= hns_roce_mtt_init(hr_dev
, buf
->npages
, buf
->page_shift
, mtt
);
216 ret
= hns_roce_buf_write_mtt(hr_dev
, mtt
, buf
);
223 hns_roce_mtt_cleanup(hr_dev
, mtt
);
226 hns_roce_buf_free(hr_dev
, buf
->size
, buf
);
232 static void free_cq_buf(struct hns_roce_dev
*hr_dev
, struct hns_roce_cq
*hr_cq
)
234 hns_roce_buf_free(hr_dev
, hr_cq
->buf
.size
, &hr_cq
->buf
);
237 static int create_user_cq(struct hns_roce_dev
*hr_dev
,
238 struct hns_roce_cq
*hr_cq
,
239 struct ib_udata
*udata
,
240 struct hns_roce_ib_create_cq_resp
*resp
)
242 struct hns_roce_ib_create_cq ucmd
;
243 struct device
*dev
= hr_dev
->dev
;
245 struct hns_roce_ucontext
*context
= rdma_udata_to_drv_context(
246 udata
, struct hns_roce_ucontext
, ibucontext
);
248 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
249 dev_err(dev
, "Failed to copy_from_udata.\n");
253 /* Get user space address, write it into mtt table */
254 ret
= get_cq_umem(hr_dev
, hr_cq
, ucmd
, udata
);
256 dev_err(dev
, "Failed to get_cq_umem.\n");
260 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) &&
261 (udata
->outlen
>= sizeof(*resp
))) {
262 ret
= hns_roce_db_map_user(context
, udata
, ucmd
.db_addr
,
265 dev_err(dev
, "cq record doorbell map failed!\n");
269 resp
->cap_flags
|= HNS_ROCE_SUPPORT_CQ_RECORD_DB
;
275 hns_roce_mtt_cleanup(hr_dev
, &hr_cq
->mtt
);
276 ib_umem_release(hr_cq
->umem
);
281 static int create_kernel_cq(struct hns_roce_dev
*hr_dev
,
282 struct hns_roce_cq
*hr_cq
)
284 struct device
*dev
= hr_dev
->dev
;
287 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) {
288 ret
= hns_roce_alloc_db(hr_dev
, &hr_cq
->db
, 1);
292 hr_cq
->set_ci_db
= hr_cq
->db
.db_record
;
293 *hr_cq
->set_ci_db
= 0;
297 /* Init mtt table and write buff address to mtt table */
298 ret
= alloc_cq_buf(hr_dev
, hr_cq
);
300 dev_err(dev
, "Failed to alloc_cq_buf.\n");
304 hr_cq
->cq_db_l
= hr_dev
->reg_base
+ hr_dev
->odb_offset
+
305 DB_REG_OFFSET
* hr_dev
->priv_uar
.index
;
310 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
)
311 hns_roce_free_db(hr_dev
, &hr_cq
->db
);
316 static void destroy_user_cq(struct hns_roce_dev
*hr_dev
,
317 struct hns_roce_cq
*hr_cq
,
318 struct ib_udata
*udata
,
319 struct hns_roce_ib_create_cq_resp
*resp
)
321 struct hns_roce_ucontext
*context
= rdma_udata_to_drv_context(
322 udata
, struct hns_roce_ucontext
, ibucontext
);
324 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) &&
325 (udata
->outlen
>= sizeof(*resp
)))
326 hns_roce_db_unmap_user(context
, &hr_cq
->db
);
328 hns_roce_mtt_cleanup(hr_dev
, &hr_cq
->mtt
);
329 ib_umem_release(hr_cq
->umem
);
332 static void destroy_kernel_cq(struct hns_roce_dev
*hr_dev
,
333 struct hns_roce_cq
*hr_cq
)
335 hns_roce_mtt_cleanup(hr_dev
, &hr_cq
->mtt
);
336 free_cq_buf(hr_dev
, hr_cq
);
338 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
)
339 hns_roce_free_db(hr_dev
, &hr_cq
->db
);
342 int hns_roce_create_cq(struct ib_cq
*ib_cq
, const struct ib_cq_init_attr
*attr
,
343 struct ib_udata
*udata
)
345 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_cq
->device
);
346 struct hns_roce_ib_create_cq_resp resp
= {};
347 struct hns_roce_cq
*hr_cq
= to_hr_cq(ib_cq
);
348 struct device
*dev
= hr_dev
->dev
;
349 int vector
= attr
->comp_vector
;
350 u32 cq_entries
= attr
->cqe
;
353 if (cq_entries
< 1 || cq_entries
> hr_dev
->caps
.max_cqes
) {
354 dev_err(dev
, "Create CQ failed. entries=%d, max=%d\n",
355 cq_entries
, hr_dev
->caps
.max_cqes
);
359 if (vector
>= hr_dev
->caps
.num_comp_vectors
) {
360 dev_err(dev
, "Create CQ failed, vector=%d, max=%d\n",
361 vector
, hr_dev
->caps
.num_comp_vectors
);
365 cq_entries
= max(cq_entries
, hr_dev
->caps
.min_cqes
);
366 cq_entries
= roundup_pow_of_two(cq_entries
);
367 hr_cq
->ib_cq
.cqe
= cq_entries
- 1; /* used as cqe index */
368 hr_cq
->cq_depth
= cq_entries
;
369 hr_cq
->vector
= vector
;
370 hr_cq
->buf
.size
= hr_cq
->cq_depth
* hr_dev
->caps
.cq_entry_sz
;
371 hr_cq
->buf
.page_shift
= PAGE_SHIFT
+ hr_dev
->caps
.cqe_buf_pg_sz
;
372 spin_lock_init(&hr_cq
->lock
);
373 INIT_LIST_HEAD(&hr_cq
->sq_list
);
374 INIT_LIST_HEAD(&hr_cq
->rq_list
);
377 ret
= create_user_cq(hr_dev
, hr_cq
, udata
, &resp
);
379 dev_err(dev
, "Create cq failed in user mode!\n");
383 ret
= create_kernel_cq(hr_dev
, hr_cq
);
385 dev_err(dev
, "Create cq failed in kernel mode!\n");
390 ret
= hns_roce_alloc_cqc(hr_dev
, hr_cq
);
392 dev_err(dev
, "Alloc CQ failed(%d).\n", ret
);
397 * For the QP created by kernel space, tptr value should be initialized
398 * to zero; For the QP created by user space, it will cause synchronous
399 * problems if tptr is set to zero here, so we initialze it in user
402 if (!udata
&& hr_cq
->tptr_addr
)
403 *hr_cq
->tptr_addr
= 0;
406 resp
.cqn
= hr_cq
->cqn
;
407 ret
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
415 hns_roce_free_cqc(hr_dev
, hr_cq
);
419 destroy_user_cq(hr_dev
, hr_cq
, udata
, &resp
);
421 destroy_kernel_cq(hr_dev
, hr_cq
);
427 void hns_roce_destroy_cq(struct ib_cq
*ib_cq
, struct ib_udata
*udata
)
429 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_cq
->device
);
430 struct hns_roce_cq
*hr_cq
= to_hr_cq(ib_cq
);
432 if (hr_dev
->hw
->destroy_cq
) {
433 hr_dev
->hw
->destroy_cq(ib_cq
, udata
);
437 hns_roce_free_cqc(hr_dev
, hr_cq
);
438 hns_roce_mtt_cleanup(hr_dev
, &hr_cq
->mtt
);
440 ib_umem_release(hr_cq
->umem
);
442 if (hr_cq
->db_en
== 1)
443 hns_roce_db_unmap_user(rdma_udata_to_drv_context(
445 struct hns_roce_ucontext
,
449 /* Free the buff of stored cq */
450 free_cq_buf(hr_dev
, hr_cq
);
451 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
)
452 hns_roce_free_db(hr_dev
, &hr_cq
->db
);
456 void hns_roce_cq_completion(struct hns_roce_dev
*hr_dev
, u32 cqn
)
458 struct hns_roce_cq
*hr_cq
;
461 hr_cq
= xa_load(&hr_dev
->cq_table
.array
,
462 cqn
& (hr_dev
->caps
.num_cqs
- 1));
464 dev_warn(hr_dev
->dev
, "Completion event for bogus CQ 0x%06x\n",
470 ibcq
= &hr_cq
->ib_cq
;
471 if (ibcq
->comp_handler
)
472 ibcq
->comp_handler(ibcq
, ibcq
->cq_context
);
475 void hns_roce_cq_event(struct hns_roce_dev
*hr_dev
, u32 cqn
, int event_type
)
477 struct device
*dev
= hr_dev
->dev
;
478 struct hns_roce_cq
*hr_cq
;
479 struct ib_event event
;
482 hr_cq
= xa_load(&hr_dev
->cq_table
.array
,
483 cqn
& (hr_dev
->caps
.num_cqs
- 1));
485 dev_warn(dev
, "Async event for bogus CQ 0x%06x\n", cqn
);
489 if (event_type
!= HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID
&&
490 event_type
!= HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
&&
491 event_type
!= HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
) {
492 dev_err(dev
, "Unexpected event type 0x%x on CQ 0x%06x\n",
497 atomic_inc(&hr_cq
->refcount
);
499 ibcq
= &hr_cq
->ib_cq
;
500 if (ibcq
->event_handler
) {
501 event
.device
= ibcq
->device
;
502 event
.element
.cq
= ibcq
;
503 event
.event
= IB_EVENT_CQ_ERR
;
504 ibcq
->event_handler(&event
, ibcq
->cq_context
);
507 if (atomic_dec_and_test(&hr_cq
->refcount
))
508 complete(&hr_cq
->free
);
511 int hns_roce_init_cq_table(struct hns_roce_dev
*hr_dev
)
513 struct hns_roce_cq_table
*cq_table
= &hr_dev
->cq_table
;
515 xa_init(&cq_table
->array
);
517 return hns_roce_bitmap_init(&cq_table
->bitmap
, hr_dev
->caps
.num_cqs
,
518 hr_dev
->caps
.num_cqs
- 1,
519 hr_dev
->caps
.reserved_cqs
, 0);
522 void hns_roce_cleanup_cq_table(struct hns_roce_dev
*hr_dev
)
524 hns_roce_bitmap_cleanup(&hr_dev
->cq_table
.bitmap
);