1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
47 #include <linux/qed/qed_if.h>
50 #include <rdma/qedr-abi.h>
53 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55 int qedr_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
, u16
*pkey
)
57 if (index
> QEDR_ROCE_PKEY_TABLE_LEN
)
60 *pkey
= QEDR_ROCE_PKEY_DEFAULT
;
64 int qedr_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
67 struct qedr_dev
*dev
= get_qedr_dev(ibdev
);
70 if (!rdma_cap_roce_gid_table(ibdev
, port
))
73 rc
= ib_get_cached_gid(ibdev
, port
, index
, sgid
, NULL
);
75 memcpy(sgid
, &zgid
, sizeof(*sgid
));
79 DP_DEBUG(dev
, QEDR_MSG_INIT
, "query gid: index=%d %llx:%llx\n", index
,
80 sgid
->global
.interface_id
, sgid
->global
.subnet_prefix
);
85 int qedr_add_gid(struct ib_device
*device
, u8 port_num
,
86 unsigned int index
, const union ib_gid
*gid
,
87 const struct ib_gid_attr
*attr
, void **context
)
89 if (!rdma_cap_roce_gid_table(device
, port_num
))
92 if (port_num
> QEDR_MAX_PORT
)
101 int qedr_del_gid(struct ib_device
*device
, u8 port_num
,
102 unsigned int index
, void **context
)
104 if (!rdma_cap_roce_gid_table(device
, port_num
))
107 if (port_num
> QEDR_MAX_PORT
)
116 int qedr_query_device(struct ib_device
*ibdev
,
117 struct ib_device_attr
*attr
, struct ib_udata
*udata
)
119 struct qedr_dev
*dev
= get_qedr_dev(ibdev
);
120 struct qedr_device_attr
*qattr
= &dev
->attr
;
122 if (!dev
->rdma_ctx
) {
124 "qedr_query_device called with invalid params rdma_ctx=%p\n",
129 memset(attr
, 0, sizeof(*attr
));
131 attr
->fw_ver
= qattr
->fw_ver
;
132 attr
->sys_image_guid
= qattr
->sys_image_guid
;
133 attr
->max_mr_size
= qattr
->max_mr_size
;
134 attr
->page_size_cap
= qattr
->page_size_caps
;
135 attr
->vendor_id
= qattr
->vendor_id
;
136 attr
->vendor_part_id
= qattr
->vendor_part_id
;
137 attr
->hw_ver
= qattr
->hw_ver
;
138 attr
->max_qp
= qattr
->max_qp
;
139 attr
->max_qp_wr
= max_t(u32
, qattr
->max_sqe
, qattr
->max_rqe
);
140 attr
->device_cap_flags
= IB_DEVICE_CURR_QP_STATE_MOD
|
141 IB_DEVICE_RC_RNR_NAK_GEN
|
142 IB_DEVICE_LOCAL_DMA_LKEY
| IB_DEVICE_MEM_MGT_EXTENSIONS
;
144 attr
->max_sge
= qattr
->max_sge
;
145 attr
->max_sge_rd
= qattr
->max_sge
;
146 attr
->max_cq
= qattr
->max_cq
;
147 attr
->max_cqe
= qattr
->max_cqe
;
148 attr
->max_mr
= qattr
->max_mr
;
149 attr
->max_mw
= qattr
->max_mw
;
150 attr
->max_pd
= qattr
->max_pd
;
151 attr
->atomic_cap
= dev
->atomic_cap
;
152 attr
->max_fmr
= qattr
->max_fmr
;
153 attr
->max_map_per_fmr
= 16;
154 attr
->max_qp_init_rd_atom
=
155 1 << (fls(qattr
->max_qp_req_rd_atomic_resc
) - 1);
156 attr
->max_qp_rd_atom
=
157 min(1 << (fls(qattr
->max_qp_resp_rd_atomic_resc
) - 1),
158 attr
->max_qp_init_rd_atom
);
160 attr
->max_srq
= qattr
->max_srq
;
161 attr
->max_srq_sge
= qattr
->max_srq_sge
;
162 attr
->max_srq_wr
= qattr
->max_srq_wr
;
164 attr
->local_ca_ack_delay
= qattr
->dev_ack_delay
;
165 attr
->max_fast_reg_page_list_len
= qattr
->max_mr
/ 8;
166 attr
->max_pkeys
= QEDR_ROCE_PKEY_MAX
;
167 attr
->max_ah
= qattr
->max_ah
;
172 #define QEDR_SPEED_SDR (1)
173 #define QEDR_SPEED_DDR (2)
174 #define QEDR_SPEED_QDR (4)
175 #define QEDR_SPEED_FDR10 (8)
176 #define QEDR_SPEED_FDR (16)
177 #define QEDR_SPEED_EDR (32)
179 static inline void get_link_speed_and_width(int speed
, u8
*ib_speed
,
184 *ib_speed
= QEDR_SPEED_SDR
;
185 *ib_width
= IB_WIDTH_1X
;
188 *ib_speed
= QEDR_SPEED_QDR
;
189 *ib_width
= IB_WIDTH_1X
;
193 *ib_speed
= QEDR_SPEED_DDR
;
194 *ib_width
= IB_WIDTH_4X
;
198 *ib_speed
= QEDR_SPEED_EDR
;
199 *ib_width
= IB_WIDTH_1X
;
203 *ib_speed
= QEDR_SPEED_QDR
;
204 *ib_width
= IB_WIDTH_4X
;
208 *ib_speed
= QEDR_SPEED_QDR
;
209 *ib_width
= IB_WIDTH_4X
;
213 *ib_speed
= QEDR_SPEED_EDR
;
214 *ib_width
= IB_WIDTH_4X
;
219 *ib_speed
= QEDR_SPEED_SDR
;
220 *ib_width
= IB_WIDTH_1X
;
224 int qedr_query_port(struct ib_device
*ibdev
, u8 port
, struct ib_port_attr
*attr
)
226 struct qedr_dev
*dev
;
227 struct qed_rdma_port
*rdma_port
;
229 dev
= get_qedr_dev(ibdev
);
231 DP_ERR(dev
, "invalid_port=0x%x\n", port
);
235 if (!dev
->rdma_ctx
) {
236 DP_ERR(dev
, "rdma_ctx is NULL\n");
240 rdma_port
= dev
->ops
->rdma_query_port(dev
->rdma_ctx
);
241 memset(attr
, 0, sizeof(*attr
));
243 if (rdma_port
->port_state
== QED_RDMA_PORT_UP
) {
244 attr
->state
= IB_PORT_ACTIVE
;
245 attr
->phys_state
= 5;
247 attr
->state
= IB_PORT_DOWN
;
248 attr
->phys_state
= 3;
250 attr
->max_mtu
= IB_MTU_4096
;
251 attr
->active_mtu
= iboe_get_mtu(dev
->ndev
->mtu
);
256 attr
->port_cap_flags
= IB_PORT_IP_BASED_GIDS
;
257 attr
->gid_tbl_len
= QEDR_MAX_SGID
;
258 attr
->pkey_tbl_len
= QEDR_ROCE_PKEY_TABLE_LEN
;
259 attr
->bad_pkey_cntr
= rdma_port
->pkey_bad_counter
;
260 attr
->qkey_viol_cntr
= 0;
261 get_link_speed_and_width(rdma_port
->link_speed
,
262 &attr
->active_speed
, &attr
->active_width
);
263 attr
->max_msg_sz
= rdma_port
->max_msg_size
;
264 attr
->max_vl_num
= 4;
269 int qedr_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
270 struct ib_port_modify
*props
)
272 struct qedr_dev
*dev
;
274 dev
= get_qedr_dev(ibdev
);
276 DP_ERR(dev
, "invalid_port=0x%x\n", port
);
283 static int qedr_add_mmap(struct qedr_ucontext
*uctx
, u64 phy_addr
,
288 mm
= kzalloc(sizeof(*mm
), GFP_KERNEL
);
292 mm
->key
.phy_addr
= phy_addr
;
293 /* This function might be called with a length which is not a multiple
294 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295 * forces this granularity by increasing the requested size if needed.
296 * When qedr_mmap is called, it will search the list with the updated
297 * length as a key. To prevent search failures, the length is rounded up
298 * in advance to PAGE_SIZE.
300 mm
->key
.len
= roundup(len
, PAGE_SIZE
);
301 INIT_LIST_HEAD(&mm
->entry
);
303 mutex_lock(&uctx
->mm_list_lock
);
304 list_add(&mm
->entry
, &uctx
->mm_head
);
305 mutex_unlock(&uctx
->mm_list_lock
);
307 DP_DEBUG(uctx
->dev
, QEDR_MSG_MISC
,
308 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309 (unsigned long long)mm
->key
.phy_addr
,
310 (unsigned long)mm
->key
.len
, uctx
);
315 static bool qedr_search_mmap(struct qedr_ucontext
*uctx
, u64 phy_addr
,
321 mutex_lock(&uctx
->mm_list_lock
);
322 list_for_each_entry(mm
, &uctx
->mm_head
, entry
) {
323 if (len
!= mm
->key
.len
|| phy_addr
!= mm
->key
.phy_addr
)
329 mutex_unlock(&uctx
->mm_list_lock
);
330 DP_DEBUG(uctx
->dev
, QEDR_MSG_MISC
,
331 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332 mm
->key
.phy_addr
, mm
->key
.len
, uctx
, found
);
337 struct ib_ucontext
*qedr_alloc_ucontext(struct ib_device
*ibdev
,
338 struct ib_udata
*udata
)
341 struct qedr_ucontext
*ctx
;
342 struct qedr_alloc_ucontext_resp uresp
;
343 struct qedr_dev
*dev
= get_qedr_dev(ibdev
);
344 struct qed_rdma_add_user_out_params oparams
;
347 return ERR_PTR(-EFAULT
);
349 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
351 return ERR_PTR(-ENOMEM
);
353 rc
= dev
->ops
->rdma_add_user(dev
->rdma_ctx
, &oparams
);
356 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
361 ctx
->dpi
= oparams
.dpi
;
362 ctx
->dpi_addr
= oparams
.dpi_addr
;
363 ctx
->dpi_phys_addr
= oparams
.dpi_phys_addr
;
364 ctx
->dpi_size
= oparams
.dpi_size
;
365 INIT_LIST_HEAD(&ctx
->mm_head
);
366 mutex_init(&ctx
->mm_list_lock
);
368 memset(&uresp
, 0, sizeof(uresp
));
370 uresp
.db_pa
= ctx
->dpi_phys_addr
;
371 uresp
.db_size
= ctx
->dpi_size
;
372 uresp
.max_send_wr
= dev
->attr
.max_sqe
;
373 uresp
.max_recv_wr
= dev
->attr
.max_rqe
;
374 uresp
.max_srq_wr
= dev
->attr
.max_srq_wr
;
375 uresp
.sges_per_send_wr
= QEDR_MAX_SQE_ELEMENTS_PER_SQE
;
376 uresp
.sges_per_recv_wr
= QEDR_MAX_RQE_ELEMENTS_PER_RQE
;
377 uresp
.sges_per_srq_wr
= dev
->attr
.max_srq_sge
;
378 uresp
.max_cqes
= QEDR_MAX_CQES
;
380 rc
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
386 rc
= qedr_add_mmap(ctx
, ctx
->dpi_phys_addr
, ctx
->dpi_size
);
390 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Allocating user context %p\n",
392 return &ctx
->ibucontext
;
399 int qedr_dealloc_ucontext(struct ib_ucontext
*ibctx
)
401 struct qedr_ucontext
*uctx
= get_qedr_ucontext(ibctx
);
402 struct qedr_mm
*mm
, *tmp
;
405 DP_DEBUG(uctx
->dev
, QEDR_MSG_INIT
, "Deallocating user context %p\n",
407 uctx
->dev
->ops
->rdma_remove_user(uctx
->dev
->rdma_ctx
, uctx
->dpi
);
409 list_for_each_entry_safe(mm
, tmp
, &uctx
->mm_head
, entry
) {
410 DP_DEBUG(uctx
->dev
, QEDR_MSG_MISC
,
411 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412 mm
->key
.phy_addr
, mm
->key
.len
, uctx
);
413 list_del(&mm
->entry
);
421 int qedr_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
423 struct qedr_ucontext
*ucontext
= get_qedr_ucontext(context
);
424 struct qedr_dev
*dev
= get_qedr_dev(context
->device
);
425 unsigned long vm_page
= vma
->vm_pgoff
<< PAGE_SHIFT
;
426 u64 unmapped_db
= dev
->db_phys_addr
;
427 unsigned long len
= (vma
->vm_end
- vma
->vm_start
);
431 DP_DEBUG(dev
, QEDR_MSG_INIT
,
432 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433 vm_page
, vma
->vm_pgoff
, unmapped_db
, dev
->db_size
, len
);
434 if (vma
->vm_start
& (PAGE_SIZE
- 1)) {
435 DP_ERR(dev
, "Vma_start not page aligned = %ld\n",
440 found
= qedr_search_mmap(ucontext
, vm_page
, len
);
442 DP_ERR(dev
, "Vma_pgoff not found in mapped array = %ld\n",
447 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Mapping doorbell bar\n");
449 if ((vm_page
>= unmapped_db
) && (vm_page
<= (unmapped_db
+
451 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Mapping doorbell bar\n");
452 if (vma
->vm_flags
& VM_READ
) {
453 DP_ERR(dev
, "Trying to map doorbell bar for read\n");
457 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
459 rc
= io_remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
460 PAGE_SIZE
, vma
->vm_page_prot
);
462 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Mapping chains\n");
463 rc
= remap_pfn_range(vma
, vma
->vm_start
,
464 vma
->vm_pgoff
, len
, vma
->vm_page_prot
);
466 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr_mmap return code: %d\n", rc
);
470 struct ib_pd
*qedr_alloc_pd(struct ib_device
*ibdev
,
471 struct ib_ucontext
*context
, struct ib_udata
*udata
)
473 struct qedr_dev
*dev
= get_qedr_dev(ibdev
);
474 struct qedr_ucontext
*uctx
= NULL
;
475 struct qedr_alloc_pd_uresp uresp
;
480 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Function called from: %s\n",
481 (udata
&& context
) ? "User Lib" : "Kernel");
483 if (!dev
->rdma_ctx
) {
484 DP_ERR(dev
, "invlaid RDMA context\n");
485 return ERR_PTR(-EINVAL
);
488 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
490 return ERR_PTR(-ENOMEM
);
492 dev
->ops
->rdma_alloc_pd(dev
->rdma_ctx
, &pd_id
);
497 if (udata
&& context
) {
498 rc
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
500 DP_ERR(dev
, "copy error pd_id=0x%x.\n", pd_id
);
501 uctx
= get_qedr_ucontext(context
);
509 int qedr_dealloc_pd(struct ib_pd
*ibpd
)
511 struct qedr_dev
*dev
= get_qedr_dev(ibpd
->device
);
512 struct qedr_pd
*pd
= get_qedr_pd(ibpd
);
515 pr_err("Invalid PD received in dealloc_pd\n");
517 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Deallocating PD %d\n", pd
->pd_id
);
518 dev
->ops
->rdma_dealloc_pd(dev
->rdma_ctx
, pd
->pd_id
);
525 static void qedr_free_pbl(struct qedr_dev
*dev
,
526 struct qedr_pbl_info
*pbl_info
, struct qedr_pbl
*pbl
)
528 struct pci_dev
*pdev
= dev
->pdev
;
531 for (i
= 0; i
< pbl_info
->num_pbls
; i
++) {
534 dma_free_coherent(&pdev
->dev
, pbl_info
->pbl_size
,
535 pbl
[i
].va
, pbl
[i
].pa
);
541 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
542 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
544 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
545 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
546 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
548 static struct qedr_pbl
*qedr_alloc_pbl_tbl(struct qedr_dev
*dev
,
549 struct qedr_pbl_info
*pbl_info
,
552 struct pci_dev
*pdev
= dev
->pdev
;
553 struct qedr_pbl
*pbl_table
;
554 dma_addr_t
*pbl_main_tbl
;
559 pbl_table
= kcalloc(pbl_info
->num_pbls
, sizeof(*pbl_table
), flags
);
561 return ERR_PTR(-ENOMEM
);
563 for (i
= 0; i
< pbl_info
->num_pbls
; i
++) {
564 va
= dma_alloc_coherent(&pdev
->dev
, pbl_info
->pbl_size
,
569 memset(va
, 0, pbl_info
->pbl_size
);
570 pbl_table
[i
].va
= va
;
571 pbl_table
[i
].pa
= pa
;
574 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
575 * the first one with physical pointers to all of the rest
577 pbl_main_tbl
= (dma_addr_t
*)pbl_table
[0].va
;
578 for (i
= 0; i
< pbl_info
->num_pbls
- 1; i
++)
579 pbl_main_tbl
[i
] = pbl_table
[i
+ 1].pa
;
584 for (i
--; i
>= 0; i
--)
585 dma_free_coherent(&pdev
->dev
, pbl_info
->pbl_size
,
586 pbl_table
[i
].va
, pbl_table
[i
].pa
);
588 qedr_free_pbl(dev
, pbl_info
, pbl_table
);
590 return ERR_PTR(-ENOMEM
);
593 static int qedr_prepare_pbl_tbl(struct qedr_dev
*dev
,
594 struct qedr_pbl_info
*pbl_info
,
595 u32 num_pbes
, int two_layer_capable
)
601 if ((num_pbes
> MAX_PBES_ON_PAGE
) && two_layer_capable
) {
602 if (num_pbes
> MAX_PBES_TWO_LAYER
) {
603 DP_ERR(dev
, "prepare pbl table: too many pages %d\n",
608 /* calculate required pbl page size */
609 pbl_size
= MIN_FW_PBL_PAGE_SIZE
;
610 pbl_capacity
= NUM_PBES_ON_PAGE(pbl_size
) *
611 NUM_PBES_ON_PAGE(pbl_size
);
613 while (pbl_capacity
< num_pbes
) {
615 pbl_capacity
= pbl_size
/ sizeof(u64
);
616 pbl_capacity
= pbl_capacity
* pbl_capacity
;
619 num_pbls
= DIV_ROUND_UP(num_pbes
, NUM_PBES_ON_PAGE(pbl_size
));
620 num_pbls
++; /* One for the layer0 ( points to the pbls) */
621 pbl_info
->two_layered
= true;
623 /* One layered PBL */
625 pbl_size
= max_t(u32
, MIN_FW_PBL_PAGE_SIZE
,
626 roundup_pow_of_two((num_pbes
* sizeof(u64
))));
627 pbl_info
->two_layered
= false;
630 pbl_info
->num_pbls
= num_pbls
;
631 pbl_info
->pbl_size
= pbl_size
;
632 pbl_info
->num_pbes
= num_pbes
;
634 DP_DEBUG(dev
, QEDR_MSG_MR
,
635 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
636 pbl_info
->num_pbes
, pbl_info
->num_pbls
, pbl_info
->pbl_size
);
641 static void qedr_populate_pbls(struct qedr_dev
*dev
, struct ib_umem
*umem
,
642 struct qedr_pbl
*pbl
,
643 struct qedr_pbl_info
*pbl_info
)
645 int shift
, pg_cnt
, pages
, pbe_cnt
, total_num_pbes
= 0;
646 struct qedr_pbl
*pbl_tbl
;
647 struct scatterlist
*sg
;
652 if (!pbl_info
->num_pbes
)
655 /* If we have a two layered pbl, the first pbl points to the rest
656 * of the pbls and the first entry lays on the second pbl in the table
658 if (pbl_info
->two_layered
)
663 pbe
= (struct regpair
*)pbl_tbl
->va
;
665 DP_ERR(dev
, "cannot populate PBL due to a NULL PBE\n");
671 shift
= ilog2(umem
->page_size
);
673 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
674 pages
= sg_dma_len(sg
) >> shift
;
675 for (pg_cnt
= 0; pg_cnt
< pages
; pg_cnt
++) {
676 /* store the page address in pbe */
677 pbe
->lo
= cpu_to_le32(sg_dma_address(sg
) +
678 umem
->page_size
* pg_cnt
);
679 addr
= upper_32_bits(sg_dma_address(sg
) +
680 umem
->page_size
* pg_cnt
);
681 pbe
->hi
= cpu_to_le32(addr
);
686 if (total_num_pbes
== pbl_info
->num_pbes
)
689 /* If the given pbl is full storing the pbes,
692 if (pbe_cnt
== (pbl_info
->pbl_size
/ sizeof(u64
))) {
694 pbe
= (struct regpair
*)pbl_tbl
->va
;
701 static int qedr_copy_cq_uresp(struct qedr_dev
*dev
,
702 struct qedr_cq
*cq
, struct ib_udata
*udata
)
704 struct qedr_create_cq_uresp uresp
;
707 memset(&uresp
, 0, sizeof(uresp
));
709 uresp
.db_offset
= DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT
);
710 uresp
.icid
= cq
->icid
;
712 rc
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
714 DP_ERR(dev
, "copy error cqid=0x%x.\n", cq
->icid
);
719 static void consume_cqe(struct qedr_cq
*cq
)
721 if (cq
->latest_cqe
== cq
->toggle_cqe
)
722 cq
->pbl_toggle
^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK
;
724 cq
->latest_cqe
= qed_chain_consume(&cq
->pbl
);
727 static inline int qedr_align_cq_entries(int entries
)
729 u64 size
, aligned_size
;
731 /* We allocate an extra entry that we don't report to the FW. */
732 size
= (entries
+ 1) * QEDR_CQE_SIZE
;
733 aligned_size
= ALIGN(size
, PAGE_SIZE
);
735 return aligned_size
/ QEDR_CQE_SIZE
;
738 static inline int qedr_init_user_queue(struct ib_ucontext
*ib_ctx
,
739 struct qedr_dev
*dev
,
740 struct qedr_userq
*q
,
741 u64 buf_addr
, size_t buf_len
,
742 int access
, int dmasync
)
747 q
->buf_addr
= buf_addr
;
748 q
->buf_len
= buf_len
;
749 q
->umem
= ib_umem_get(ib_ctx
, q
->buf_addr
, q
->buf_len
, access
, dmasync
);
750 if (IS_ERR(q
->umem
)) {
751 DP_ERR(dev
, "create user queue: failed ib_umem_get, got %ld\n",
753 return PTR_ERR(q
->umem
);
756 page_cnt
= ib_umem_page_count(q
->umem
);
757 rc
= qedr_prepare_pbl_tbl(dev
, &q
->pbl_info
, page_cnt
, 0);
761 q
->pbl_tbl
= qedr_alloc_pbl_tbl(dev
, &q
->pbl_info
, GFP_KERNEL
);
762 if (IS_ERR_OR_NULL(q
->pbl_tbl
))
765 qedr_populate_pbls(dev
, q
->umem
, q
->pbl_tbl
, &q
->pbl_info
);
770 ib_umem_release(q
->umem
);
775 static inline void qedr_init_cq_params(struct qedr_cq
*cq
,
776 struct qedr_ucontext
*ctx
,
777 struct qedr_dev
*dev
, int vector
,
778 int chain_entries
, int page_cnt
,
780 struct qed_rdma_create_cq_in_params
783 memset(params
, 0, sizeof(*params
));
784 params
->cq_handle_hi
= upper_32_bits((uintptr_t)cq
);
785 params
->cq_handle_lo
= lower_32_bits((uintptr_t)cq
);
786 params
->cnq_id
= vector
;
787 params
->cq_size
= chain_entries
- 1;
788 params
->dpi
= (ctx
) ? ctx
->dpi
: dev
->dpi
;
789 params
->pbl_num_pages
= page_cnt
;
790 params
->pbl_ptr
= pbl_ptr
;
791 params
->pbl_two_level
= 0;
794 static void doorbell_cq(struct qedr_cq
*cq
, u32 cons
, u8 flags
)
796 /* Flush data before signalling doorbell */
798 cq
->db
.data
.agg_flags
= flags
;
799 cq
->db
.data
.value
= cpu_to_le32(cons
);
800 writeq(cq
->db
.raw
, cq
->db_addr
);
802 /* Make sure write would stick */
806 int qedr_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
808 struct qedr_cq
*cq
= get_qedr_cq(ibcq
);
809 unsigned long sflags
;
811 if (cq
->cq_type
== QEDR_CQ_TYPE_GSI
)
814 spin_lock_irqsave(&cq
->cq_lock
, sflags
);
818 if (flags
& IB_CQ_SOLICITED
)
819 cq
->arm_flags
|= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD
;
821 if (flags
& IB_CQ_NEXT_COMP
)
822 cq
->arm_flags
|= DQ_UCM_ROCE_CQ_ARM_CF_CMD
;
824 doorbell_cq(cq
, cq
->cq_cons
- 1, cq
->arm_flags
);
826 spin_unlock_irqrestore(&cq
->cq_lock
, sflags
);
831 struct ib_cq
*qedr_create_cq(struct ib_device
*ibdev
,
832 const struct ib_cq_init_attr
*attr
,
833 struct ib_ucontext
*ib_ctx
, struct ib_udata
*udata
)
835 struct qedr_ucontext
*ctx
= get_qedr_ucontext(ib_ctx
);
836 struct qed_rdma_destroy_cq_out_params destroy_oparams
;
837 struct qed_rdma_destroy_cq_in_params destroy_iparams
;
838 struct qedr_dev
*dev
= get_qedr_dev(ibdev
);
839 struct qed_rdma_create_cq_in_params params
;
840 struct qedr_create_cq_ureq ureq
;
841 int vector
= attr
->comp_vector
;
842 int entries
= attr
->cqe
;
850 DP_DEBUG(dev
, QEDR_MSG_INIT
,
851 "create_cq: called from %s. entries=%d, vector=%d\n",
852 udata
? "User Lib" : "Kernel", entries
, vector
);
854 if (entries
> QEDR_MAX_CQES
) {
856 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
857 entries
, QEDR_MAX_CQES
);
858 return ERR_PTR(-EINVAL
);
861 chain_entries
= qedr_align_cq_entries(entries
);
862 chain_entries
= min_t(int, chain_entries
, QEDR_MAX_CQES
);
864 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
866 return ERR_PTR(-ENOMEM
);
869 memset(&ureq
, 0, sizeof(ureq
));
870 if (ib_copy_from_udata(&ureq
, udata
, sizeof(ureq
))) {
872 "create cq: problem copying data from user space\n");
878 "create cq: cannot create a cq with 0 entries\n");
882 cq
->cq_type
= QEDR_CQ_TYPE_USER
;
884 rc
= qedr_init_user_queue(ib_ctx
, dev
, &cq
->q
, ureq
.addr
,
885 ureq
.len
, IB_ACCESS_LOCAL_WRITE
, 1);
889 pbl_ptr
= cq
->q
.pbl_tbl
->pa
;
890 page_cnt
= cq
->q
.pbl_info
.num_pbes
;
892 cq
->cq_type
= QEDR_CQ_TYPE_KERNEL
;
894 rc
= dev
->ops
->common
->chain_alloc(dev
->cdev
,
895 QED_CHAIN_USE_TO_CONSUME
,
897 QED_CHAIN_CNT_TYPE_U32
,
899 sizeof(union rdma_cqe
),
904 page_cnt
= qed_chain_get_page_cnt(&cq
->pbl
);
905 pbl_ptr
= qed_chain_get_pbl_phys(&cq
->pbl
);
908 qedr_init_cq_params(cq
, ctx
, dev
, vector
, chain_entries
, page_cnt
,
911 rc
= dev
->ops
->rdma_create_cq(dev
->rdma_ctx
, ¶ms
, &icid
);
916 cq
->sig
= QEDR_CQ_MAGIC_NUMBER
;
917 spin_lock_init(&cq
->cq_lock
);
920 rc
= qedr_copy_cq_uresp(dev
, cq
, udata
);
924 /* Generate doorbell address. */
925 cq
->db_addr
= dev
->db_addr
+
926 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT
);
927 cq
->db
.data
.icid
= cq
->icid
;
928 cq
->db
.data
.params
= DB_AGG_CMD_SET
<<
929 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT
;
931 /* point to the very last element, passing it we will toggle */
932 cq
->toggle_cqe
= qed_chain_get_last_elem(&cq
->pbl
);
933 cq
->pbl_toggle
= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK
;
934 cq
->latest_cqe
= NULL
;
936 cq
->cq_cons
= qed_chain_get_cons_idx_u32(&cq
->pbl
);
939 DP_DEBUG(dev
, QEDR_MSG_CQ
,
940 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
941 cq
->icid
, cq
, params
.cq_size
);
946 destroy_iparams
.icid
= cq
->icid
;
947 dev
->ops
->rdma_destroy_cq(dev
->rdma_ctx
, &destroy_iparams
,
951 qedr_free_pbl(dev
, &cq
->q
.pbl_info
, cq
->q
.pbl_tbl
);
953 dev
->ops
->common
->chain_free(dev
->cdev
, &cq
->pbl
);
956 ib_umem_release(cq
->q
.umem
);
959 return ERR_PTR(-EINVAL
);
962 int qedr_resize_cq(struct ib_cq
*ibcq
, int new_cnt
, struct ib_udata
*udata
)
964 struct qedr_dev
*dev
= get_qedr_dev(ibcq
->device
);
965 struct qedr_cq
*cq
= get_qedr_cq(ibcq
);
967 DP_ERR(dev
, "cq %p RESIZE NOT SUPPORTED\n", cq
);
972 int qedr_destroy_cq(struct ib_cq
*ibcq
)
974 struct qedr_dev
*dev
= get_qedr_dev(ibcq
->device
);
975 struct qed_rdma_destroy_cq_out_params oparams
;
976 struct qed_rdma_destroy_cq_in_params iparams
;
977 struct qedr_cq
*cq
= get_qedr_cq(ibcq
);
979 DP_DEBUG(dev
, QEDR_MSG_CQ
, "destroy cq: cq_id %d", cq
->icid
);
981 /* GSIs CQs are handled by driver, so they don't exist in the FW */
982 if (cq
->cq_type
!= QEDR_CQ_TYPE_GSI
) {
983 iparams
.icid
= cq
->icid
;
984 dev
->ops
->rdma_destroy_cq(dev
->rdma_ctx
, &iparams
, &oparams
);
985 dev
->ops
->common
->chain_free(dev
->cdev
, &cq
->pbl
);
988 if (ibcq
->uobject
&& ibcq
->uobject
->context
) {
989 qedr_free_pbl(dev
, &cq
->q
.pbl_info
, cq
->q
.pbl_tbl
);
990 ib_umem_release(cq
->q
.umem
);
998 static inline int get_gid_info_from_table(struct ib_qp
*ibqp
,
999 struct ib_qp_attr
*attr
,
1001 struct qed_rdma_modify_qp_in_params
1004 enum rdma_network_type nw_type
;
1005 struct ib_gid_attr gid_attr
;
1011 rc
= ib_get_cached_gid(ibqp
->device
, attr
->ah_attr
.port_num
,
1012 attr
->ah_attr
.grh
.sgid_index
, &gid
, &gid_attr
);
1016 if (!memcmp(&gid
, &zgid
, sizeof(gid
)))
1019 if (gid_attr
.ndev
) {
1020 qp_params
->vlan_id
= rdma_vlan_dev_vlan_id(gid_attr
.ndev
);
1022 dev_put(gid_attr
.ndev
);
1023 nw_type
= ib_gid_to_network_type(gid_attr
.gid_type
, &gid
);
1025 case RDMA_NETWORK_IPV6
:
1026 memcpy(&qp_params
->sgid
.bytes
[0], &gid
.raw
[0],
1027 sizeof(qp_params
->sgid
));
1028 memcpy(&qp_params
->dgid
.bytes
[0],
1029 &attr
->ah_attr
.grh
.dgid
,
1030 sizeof(qp_params
->dgid
));
1031 qp_params
->roce_mode
= ROCE_V2_IPV6
;
1032 SET_FIELD(qp_params
->modify_flags
,
1033 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE
, 1);
1035 case RDMA_NETWORK_IB
:
1036 memcpy(&qp_params
->sgid
.bytes
[0], &gid
.raw
[0],
1037 sizeof(qp_params
->sgid
));
1038 memcpy(&qp_params
->dgid
.bytes
[0],
1039 &attr
->ah_attr
.grh
.dgid
,
1040 sizeof(qp_params
->dgid
));
1041 qp_params
->roce_mode
= ROCE_V1
;
1043 case RDMA_NETWORK_IPV4
:
1044 memset(&qp_params
->sgid
, 0, sizeof(qp_params
->sgid
));
1045 memset(&qp_params
->dgid
, 0, sizeof(qp_params
->dgid
));
1046 ipv4_addr
= qedr_get_ipv4_from_gid(gid
.raw
);
1047 qp_params
->sgid
.ipv4_addr
= ipv4_addr
;
1049 qedr_get_ipv4_from_gid(attr
->ah_attr
.grh
.dgid
.raw
);
1050 qp_params
->dgid
.ipv4_addr
= ipv4_addr
;
1051 SET_FIELD(qp_params
->modify_flags
,
1052 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE
, 1);
1053 qp_params
->roce_mode
= ROCE_V2_IPV4
;
1058 for (i
= 0; i
< 4; i
++) {
1059 qp_params
->sgid
.dwords
[i
] = ntohl(qp_params
->sgid
.dwords
[i
]);
1060 qp_params
->dgid
.dwords
[i
] = ntohl(qp_params
->dgid
.dwords
[i
]);
1063 if (qp_params
->vlan_id
>= VLAN_CFI_MASK
)
1064 qp_params
->vlan_id
= 0;
1069 static void qedr_cleanup_user_sq(struct qedr_dev
*dev
, struct qedr_qp
*qp
)
1071 qedr_free_pbl(dev
, &qp
->usq
.pbl_info
, qp
->usq
.pbl_tbl
);
1072 ib_umem_release(qp
->usq
.umem
);
1075 static void qedr_cleanup_user_rq(struct qedr_dev
*dev
, struct qedr_qp
*qp
)
1077 qedr_free_pbl(dev
, &qp
->urq
.pbl_info
, qp
->urq
.pbl_tbl
);
1078 ib_umem_release(qp
->urq
.umem
);
1081 static void qedr_cleanup_kernel_sq(struct qedr_dev
*dev
, struct qedr_qp
*qp
)
1083 dev
->ops
->common
->chain_free(dev
->cdev
, &qp
->sq
.pbl
);
1084 kfree(qp
->wqe_wr_id
);
1087 static void qedr_cleanup_kernel_rq(struct qedr_dev
*dev
, struct qedr_qp
*qp
)
1089 dev
->ops
->common
->chain_free(dev
->cdev
, &qp
->rq
.pbl
);
1090 kfree(qp
->rqe_wr_id
);
1093 static int qedr_check_qp_attrs(struct ib_pd
*ibpd
, struct qedr_dev
*dev
,
1094 struct ib_qp_init_attr
*attrs
)
1096 struct qedr_device_attr
*qattr
= &dev
->attr
;
1098 /* QP0... attrs->qp_type == IB_QPT_GSI */
1099 if (attrs
->qp_type
!= IB_QPT_RC
&& attrs
->qp_type
!= IB_QPT_GSI
) {
1100 DP_DEBUG(dev
, QEDR_MSG_QP
,
1101 "create qp: unsupported qp type=0x%x requested\n",
1106 if (attrs
->cap
.max_send_wr
> qattr
->max_sqe
) {
1108 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1109 attrs
->cap
.max_send_wr
, qattr
->max_sqe
);
1113 if (attrs
->cap
.max_inline_data
> qattr
->max_inline
) {
1115 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1116 attrs
->cap
.max_inline_data
, qattr
->max_inline
);
1120 if (attrs
->cap
.max_send_sge
> qattr
->max_sge
) {
1122 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1123 attrs
->cap
.max_send_sge
, qattr
->max_sge
);
1127 if (attrs
->cap
.max_recv_sge
> qattr
->max_sge
) {
1129 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1130 attrs
->cap
.max_recv_sge
, qattr
->max_sge
);
1134 /* Unprivileged user space cannot create special QP */
1135 if (ibpd
->uobject
&& attrs
->qp_type
== IB_QPT_GSI
) {
1137 "create qp: userspace can't create special QPs of type=0x%x\n",
1145 static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp
*uresp
,
1148 uresp
->rq_db_offset
= DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD
);
1149 uresp
->rq_icid
= qp
->icid
;
1152 static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp
*uresp
,
1155 uresp
->sq_db_offset
= DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD
);
1156 uresp
->sq_icid
= qp
->icid
+ 1;
1159 static int qedr_copy_qp_uresp(struct qedr_dev
*dev
,
1160 struct qedr_qp
*qp
, struct ib_udata
*udata
)
1162 struct qedr_create_qp_uresp uresp
;
1165 memset(&uresp
, 0, sizeof(uresp
));
1166 qedr_copy_sq_uresp(&uresp
, qp
);
1167 qedr_copy_rq_uresp(&uresp
, qp
);
1169 uresp
.atomic_supported
= dev
->atomic_cap
!= IB_ATOMIC_NONE
;
1170 uresp
.qp_id
= qp
->qp_id
;
1172 rc
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
1175 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1181 static void qedr_set_qp_init_params(struct qedr_dev
*dev
,
1184 struct ib_qp_init_attr
*attrs
)
1188 spin_lock_init(&qp
->q_lock
);
1190 qp
->qp_type
= attrs
->qp_type
;
1191 qp
->max_inline_data
= attrs
->cap
.max_inline_data
;
1192 qp
->sq
.max_sges
= attrs
->cap
.max_send_sge
;
1193 qp
->state
= QED_ROCE_QP_STATE_RESET
;
1194 qp
->signaled
= (attrs
->sq_sig_type
== IB_SIGNAL_ALL_WR
) ? true : false;
1195 qp
->sq_cq
= get_qedr_cq(attrs
->send_cq
);
1196 qp
->rq_cq
= get_qedr_cq(attrs
->recv_cq
);
1199 DP_DEBUG(dev
, QEDR_MSG_QP
,
1200 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1201 pd
->pd_id
, qp
->qp_type
, qp
->max_inline_data
,
1202 qp
->state
, qp
->signaled
, (attrs
->srq
) ? 1 : 0);
1203 DP_DEBUG(dev
, QEDR_MSG_QP
,
1204 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1205 qp
->sq
.max_sges
, qp
->sq_cq
->icid
);
1206 qp
->rq
.max_sges
= attrs
->cap
.max_recv_sge
;
1207 DP_DEBUG(dev
, QEDR_MSG_QP
,
1208 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1209 qp
->rq
.max_sges
, qp
->rq_cq
->icid
);
1213 qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params
*params
,
1214 struct qedr_create_qp_ureq
*ureq
)
1216 /* QP handle to be written in CQE */
1217 params
->qp_handle_lo
= ureq
->qp_handle_lo
;
1218 params
->qp_handle_hi
= ureq
->qp_handle_hi
;
1222 qedr_init_qp_kernel_doorbell_sq(struct qedr_dev
*dev
, struct qedr_qp
*qp
)
1224 qp
->sq
.db
= dev
->db_addr
+
1225 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD
);
1226 qp
->sq
.db_data
.data
.icid
= qp
->icid
+ 1;
1230 qedr_init_qp_kernel_doorbell_rq(struct qedr_dev
*dev
, struct qedr_qp
*qp
)
1232 qp
->rq
.db
= dev
->db_addr
+
1233 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD
);
1234 qp
->rq
.db_data
.data
.icid
= qp
->icid
;
1238 qedr_init_qp_kernel_params_rq(struct qedr_dev
*dev
,
1239 struct qedr_qp
*qp
, struct ib_qp_init_attr
*attrs
)
1241 /* Allocate driver internal RQ array */
1242 qp
->rqe_wr_id
= kcalloc(qp
->rq
.max_wr
, sizeof(*qp
->rqe_wr_id
),
1247 DP_DEBUG(dev
, QEDR_MSG_QP
, "RQ max_wr set to %d.\n", qp
->rq
.max_wr
);
1253 qedr_init_qp_kernel_params_sq(struct qedr_dev
*dev
,
1255 struct ib_qp_init_attr
*attrs
,
1256 struct qed_rdma_create_qp_in_params
*params
)
1260 /* Allocate driver internal SQ array */
1261 temp_max_wr
= attrs
->cap
.max_send_wr
* dev
->wq_multiplier
;
1262 temp_max_wr
= min_t(u32
, temp_max_wr
, dev
->attr
.max_sqe
);
1264 /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1265 qp
->sq
.max_wr
= (u16
)temp_max_wr
;
1266 qp
->wqe_wr_id
= kcalloc(qp
->sq
.max_wr
, sizeof(*qp
->wqe_wr_id
),
1271 DP_DEBUG(dev
, QEDR_MSG_QP
, "SQ max_wr set to %d.\n", qp
->sq
.max_wr
);
1273 /* QP handle to be written in CQE */
1274 params
->qp_handle_lo
= lower_32_bits((uintptr_t)qp
);
1275 params
->qp_handle_hi
= upper_32_bits((uintptr_t)qp
);
1280 static inline int qedr_init_qp_kernel_sq(struct qedr_dev
*dev
,
1282 struct ib_qp_init_attr
*attrs
)
1284 u32 n_sq_elems
, n_sq_entries
;
1287 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1288 * the ring. The ring should allow at least a single WR, even if the
1289 * user requested none, due to allocation issues.
1291 n_sq_entries
= attrs
->cap
.max_send_wr
;
1292 n_sq_entries
= min_t(u32
, n_sq_entries
, dev
->attr
.max_sqe
);
1293 n_sq_entries
= max_t(u32
, n_sq_entries
, 1);
1294 n_sq_elems
= n_sq_entries
* QEDR_MAX_SQE_ELEMENTS_PER_SQE
;
1295 rc
= dev
->ops
->common
->chain_alloc(dev
->cdev
,
1296 QED_CHAIN_USE_TO_PRODUCE
,
1298 QED_CHAIN_CNT_TYPE_U32
,
1300 QEDR_SQE_ELEMENT_SIZE
,
1303 DP_ERR(dev
, "failed to allocate QP %p SQ\n", qp
);
1307 DP_DEBUG(dev
, QEDR_MSG_SQ
,
1308 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1309 qed_chain_get_pbl_phys(&qp
->sq
.pbl
), attrs
->cap
.max_send_wr
,
1310 n_sq_entries
, qed_chain_get_capacity(&qp
->sq
.pbl
), rc
);
1314 static inline int qedr_init_qp_kernel_rq(struct qedr_dev
*dev
,
1316 struct ib_qp_init_attr
*attrs
)
1318 u32 n_rq_elems
, n_rq_entries
;
1321 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1322 * the ring. There ring should allow at least a single WR, even if the
1323 * user requested none, due to allocation issues.
1325 n_rq_entries
= max_t(u32
, attrs
->cap
.max_recv_wr
, 1);
1326 n_rq_elems
= n_rq_entries
* QEDR_MAX_RQE_ELEMENTS_PER_RQE
;
1327 rc
= dev
->ops
->common
->chain_alloc(dev
->cdev
,
1328 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1330 QED_CHAIN_CNT_TYPE_U32
,
1332 QEDR_RQE_ELEMENT_SIZE
,
1336 DP_ERR(dev
, "failed to allocate memory for QP %p RQ\n", qp
);
1340 DP_DEBUG(dev
, QEDR_MSG_RQ
,
1341 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1342 qed_chain_get_pbl_phys(&qp
->rq
.pbl
), attrs
->cap
.max_recv_wr
,
1343 n_rq_entries
, qed_chain_get_capacity(&qp
->rq
.pbl
), rc
);
1345 /* n_rq_entries < u16 so the casting is safe */
1346 qp
->rq
.max_wr
= (u16
)n_rq_entries
;
1352 qedr_init_qp_in_params_sq(struct qedr_dev
*dev
,
1355 struct ib_qp_init_attr
*attrs
,
1356 struct ib_udata
*udata
,
1357 struct qed_rdma_create_qp_in_params
*params
)
1359 /* QP handle to be written in an async event */
1360 params
->qp_handle_async_lo
= lower_32_bits((uintptr_t)qp
);
1361 params
->qp_handle_async_hi
= upper_32_bits((uintptr_t)qp
);
1363 params
->signal_all
= (attrs
->sq_sig_type
== IB_SIGNAL_ALL_WR
);
1364 params
->fmr_and_reserved_lkey
= !udata
;
1365 params
->pd
= pd
->pd_id
;
1366 params
->dpi
= pd
->uctx
? pd
->uctx
->dpi
: dev
->dpi
;
1367 params
->sq_cq_id
= get_qedr_cq(attrs
->send_cq
)->icid
;
1368 params
->max_sq_sges
= 0;
1369 params
->stats_queue
= 0;
1372 params
->sq_num_pages
= qp
->usq
.pbl_info
.num_pbes
;
1373 params
->sq_pbl_ptr
= qp
->usq
.pbl_tbl
->pa
;
1375 params
->sq_num_pages
= qed_chain_get_page_cnt(&qp
->sq
.pbl
);
1376 params
->sq_pbl_ptr
= qed_chain_get_pbl_phys(&qp
->sq
.pbl
);
1381 qedr_init_qp_in_params_rq(struct qedr_qp
*qp
,
1382 struct ib_qp_init_attr
*attrs
,
1383 struct ib_udata
*udata
,
1384 struct qed_rdma_create_qp_in_params
*params
)
1386 params
->rq_cq_id
= get_qedr_cq(attrs
->recv_cq
)->icid
;
1388 params
->use_srq
= false;
1391 params
->rq_num_pages
= qp
->urq
.pbl_info
.num_pbes
;
1392 params
->rq_pbl_ptr
= qp
->urq
.pbl_tbl
->pa
;
1394 params
->rq_num_pages
= qed_chain_get_page_cnt(&qp
->rq
.pbl
);
1395 params
->rq_pbl_ptr
= qed_chain_get_pbl_phys(&qp
->rq
.pbl
);
1399 static inline void qedr_qp_user_print(struct qedr_dev
*dev
, struct qedr_qp
*qp
)
1401 DP_DEBUG(dev
, QEDR_MSG_QP
,
1402 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1403 qp
, qp
->usq
.buf_addr
, qp
->usq
.buf_len
, qp
->urq
.buf_addr
,
1407 static inline int qedr_init_user_qp(struct ib_ucontext
*ib_ctx
,
1408 struct qedr_dev
*dev
,
1410 struct qedr_create_qp_ureq
*ureq
)
1414 /* SQ - read access only (0), dma sync not required (0) */
1415 rc
= qedr_init_user_queue(ib_ctx
, dev
, &qp
->usq
, ureq
->sq_addr
,
1416 ureq
->sq_len
, 0, 0);
1420 /* RQ - read access only (0), dma sync not required (0) */
1421 rc
= qedr_init_user_queue(ib_ctx
, dev
, &qp
->urq
, ureq
->rq_addr
,
1422 ureq
->rq_len
, 0, 0);
1425 qedr_cleanup_user_sq(dev
, qp
);
1430 qedr_init_kernel_qp(struct qedr_dev
*dev
,
1432 struct ib_qp_init_attr
*attrs
,
1433 struct qed_rdma_create_qp_in_params
*params
)
1437 rc
= qedr_init_qp_kernel_sq(dev
, qp
, attrs
);
1439 DP_ERR(dev
, "failed to init kernel QP %p SQ\n", qp
);
1443 rc
= qedr_init_qp_kernel_params_sq(dev
, qp
, attrs
, params
);
1445 dev
->ops
->common
->chain_free(dev
->cdev
, &qp
->sq
.pbl
);
1446 DP_ERR(dev
, "failed to init kernel QP %p SQ params\n", qp
);
1450 rc
= qedr_init_qp_kernel_rq(dev
, qp
, attrs
);
1452 qedr_cleanup_kernel_sq(dev
, qp
);
1453 DP_ERR(dev
, "failed to init kernel QP %p RQ\n", qp
);
1457 rc
= qedr_init_qp_kernel_params_rq(dev
, qp
, attrs
);
1459 DP_ERR(dev
, "failed to init kernel QP %p RQ params\n", qp
);
1460 qedr_cleanup_kernel_sq(dev
, qp
);
1461 dev
->ops
->common
->chain_free(dev
->cdev
, &qp
->rq
.pbl
);
1468 struct ib_qp
*qedr_create_qp(struct ib_pd
*ibpd
,
1469 struct ib_qp_init_attr
*attrs
,
1470 struct ib_udata
*udata
)
1472 struct qedr_dev
*dev
= get_qedr_dev(ibpd
->device
);
1473 struct qed_rdma_create_qp_out_params out_params
;
1474 struct qed_rdma_create_qp_in_params in_params
;
1475 struct qedr_pd
*pd
= get_qedr_pd(ibpd
);
1476 struct ib_ucontext
*ib_ctx
= NULL
;
1477 struct qedr_ucontext
*ctx
= NULL
;
1478 struct qedr_create_qp_ureq ureq
;
1482 DP_DEBUG(dev
, QEDR_MSG_QP
, "create qp: called from %s, pd=%p\n",
1483 udata
? "user library" : "kernel", pd
);
1485 rc
= qedr_check_qp_attrs(ibpd
, dev
, attrs
);
1489 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1491 return ERR_PTR(-ENOMEM
);
1494 return ERR_PTR(-EINVAL
);
1496 DP_DEBUG(dev
, QEDR_MSG_QP
,
1497 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1498 get_qedr_cq(attrs
->send_cq
),
1499 get_qedr_cq(attrs
->send_cq
)->icid
,
1500 get_qedr_cq(attrs
->recv_cq
),
1501 get_qedr_cq(attrs
->recv_cq
)->icid
);
1503 qedr_set_qp_init_params(dev
, qp
, pd
, attrs
);
1505 if (attrs
->qp_type
== IB_QPT_GSI
) {
1508 "create qp: unexpected udata when creating GSI QP\n");
1511 return qedr_create_gsi_qp(dev
, attrs
, qp
);
1514 memset(&in_params
, 0, sizeof(in_params
));
1517 if (!(udata
&& ibpd
->uobject
&& ibpd
->uobject
->context
))
1520 ib_ctx
= ibpd
->uobject
->context
;
1521 ctx
= get_qedr_ucontext(ib_ctx
);
1523 memset(&ureq
, 0, sizeof(ureq
));
1524 if (ib_copy_from_udata(&ureq
, udata
, sizeof(ureq
))) {
1526 "create qp: problem copying data from user space\n");
1530 rc
= qedr_init_user_qp(ib_ctx
, dev
, qp
, &ureq
);
1534 qedr_init_qp_user_params(&in_params
, &ureq
);
1536 rc
= qedr_init_kernel_qp(dev
, qp
, attrs
, &in_params
);
1541 qedr_init_qp_in_params_sq(dev
, pd
, qp
, attrs
, udata
, &in_params
);
1542 qedr_init_qp_in_params_rq(qp
, attrs
, udata
, &in_params
);
1544 qp
->qed_qp
= dev
->ops
->rdma_create_qp(dev
->rdma_ctx
,
1545 &in_params
, &out_params
);
1550 qp
->qp_id
= out_params
.qp_id
;
1551 qp
->icid
= out_params
.icid
;
1552 qp
->ibqp
.qp_num
= qp
->qp_id
;
1555 rc
= qedr_copy_qp_uresp(dev
, qp
, udata
);
1559 qedr_qp_user_print(dev
, qp
);
1561 qedr_init_qp_kernel_doorbell_sq(dev
, qp
);
1562 qedr_init_qp_kernel_doorbell_rq(dev
, qp
);
1565 DP_DEBUG(dev
, QEDR_MSG_QP
, "created %s space QP %p\n",
1566 udata
? "user" : "kernel", qp
);
1571 rc
= dev
->ops
->rdma_destroy_qp(dev
->rdma_ctx
, qp
->qed_qp
);
1573 DP_ERR(dev
, "create qp: fatal fault. rc=%d", rc
);
1576 qedr_cleanup_user_sq(dev
, qp
);
1577 qedr_cleanup_user_rq(dev
, qp
);
1579 qedr_cleanup_kernel_sq(dev
, qp
);
1580 qedr_cleanup_kernel_rq(dev
, qp
);
1586 return ERR_PTR(-EFAULT
);
1589 enum ib_qp_state
qedr_get_ibqp_state(enum qed_roce_qp_state qp_state
)
1592 case QED_ROCE_QP_STATE_RESET
:
1593 return IB_QPS_RESET
;
1594 case QED_ROCE_QP_STATE_INIT
:
1596 case QED_ROCE_QP_STATE_RTR
:
1598 case QED_ROCE_QP_STATE_RTS
:
1600 case QED_ROCE_QP_STATE_SQD
:
1602 case QED_ROCE_QP_STATE_ERR
:
1604 case QED_ROCE_QP_STATE_SQE
:
1610 enum qed_roce_qp_state
qedr_get_state_from_ibqp(enum ib_qp_state qp_state
)
1614 return QED_ROCE_QP_STATE_RESET
;
1616 return QED_ROCE_QP_STATE_INIT
;
1618 return QED_ROCE_QP_STATE_RTR
;
1620 return QED_ROCE_QP_STATE_RTS
;
1622 return QED_ROCE_QP_STATE_SQD
;
1624 return QED_ROCE_QP_STATE_ERR
;
1626 return QED_ROCE_QP_STATE_ERR
;
1630 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info
*qph
)
1632 qed_chain_reset(&qph
->pbl
);
1636 qph
->db_data
.data
.value
= cpu_to_le16(0);
1639 static int qedr_update_qp_state(struct qedr_dev
*dev
,
1641 enum qed_roce_qp_state new_state
)
1645 if (new_state
== qp
->state
)
1648 switch (qp
->state
) {
1649 case QED_ROCE_QP_STATE_RESET
:
1650 switch (new_state
) {
1651 case QED_ROCE_QP_STATE_INIT
:
1652 qp
->prev_wqe_size
= 0;
1653 qedr_reset_qp_hwq_info(&qp
->sq
);
1654 qedr_reset_qp_hwq_info(&qp
->rq
);
1661 case QED_ROCE_QP_STATE_INIT
:
1662 switch (new_state
) {
1663 case QED_ROCE_QP_STATE_RTR
:
1664 /* Update doorbell (in case post_recv was
1665 * done before move to RTR)
1668 writel(qp
->rq
.db_data
.raw
, qp
->rq
.db
);
1669 /* Make sure write takes effect */
1672 case QED_ROCE_QP_STATE_ERR
:
1675 /* Invalid state change. */
1680 case QED_ROCE_QP_STATE_RTR
:
1682 switch (new_state
) {
1683 case QED_ROCE_QP_STATE_RTS
:
1685 case QED_ROCE_QP_STATE_ERR
:
1688 /* Invalid state change. */
1693 case QED_ROCE_QP_STATE_RTS
:
1695 switch (new_state
) {
1696 case QED_ROCE_QP_STATE_SQD
:
1698 case QED_ROCE_QP_STATE_ERR
:
1701 /* Invalid state change. */
1706 case QED_ROCE_QP_STATE_SQD
:
1708 switch (new_state
) {
1709 case QED_ROCE_QP_STATE_RTS
:
1710 case QED_ROCE_QP_STATE_ERR
:
1713 /* Invalid state change. */
1718 case QED_ROCE_QP_STATE_ERR
:
1720 switch (new_state
) {
1721 case QED_ROCE_QP_STATE_RESET
:
1736 int qedr_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1737 int attr_mask
, struct ib_udata
*udata
)
1739 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
1740 struct qed_rdma_modify_qp_in_params qp_params
= { 0 };
1741 struct qedr_dev
*dev
= get_qedr_dev(&qp
->dev
->ibdev
);
1742 enum ib_qp_state old_qp_state
, new_qp_state
;
1745 DP_DEBUG(dev
, QEDR_MSG_QP
,
1746 "modify qp: qp %p attr_mask=0x%x, state=%d", qp
, attr_mask
,
1749 old_qp_state
= qedr_get_ibqp_state(qp
->state
);
1750 if (attr_mask
& IB_QP_STATE
)
1751 new_qp_state
= attr
->qp_state
;
1753 new_qp_state
= old_qp_state
;
1755 if (!ib_modify_qp_is_ok
1756 (old_qp_state
, new_qp_state
, ibqp
->qp_type
, attr_mask
,
1757 IB_LINK_LAYER_ETHERNET
)) {
1759 "modify qp: invalid attribute mask=0x%x specified for\n"
1760 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1761 attr_mask
, qp
->qp_id
, ibqp
->qp_type
, old_qp_state
,
1767 /* Translate the masks... */
1768 if (attr_mask
& IB_QP_STATE
) {
1769 SET_FIELD(qp_params
.modify_flags
,
1770 QED_RDMA_MODIFY_QP_VALID_NEW_STATE
, 1);
1771 qp_params
.new_state
= qedr_get_state_from_ibqp(attr
->qp_state
);
1774 if (attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
)
1775 qp_params
.sqd_async
= true;
1777 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1778 SET_FIELD(qp_params
.modify_flags
,
1779 QED_ROCE_MODIFY_QP_VALID_PKEY
, 1);
1780 if (attr
->pkey_index
>= QEDR_ROCE_PKEY_TABLE_LEN
) {
1785 qp_params
.pkey
= QEDR_ROCE_PKEY_DEFAULT
;
1788 if (attr_mask
& IB_QP_QKEY
)
1789 qp
->qkey
= attr
->qkey
;
1791 if (attr_mask
& IB_QP_ACCESS_FLAGS
) {
1792 SET_FIELD(qp_params
.modify_flags
,
1793 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN
, 1);
1794 qp_params
.incoming_rdma_read_en
= attr
->qp_access_flags
&
1795 IB_ACCESS_REMOTE_READ
;
1796 qp_params
.incoming_rdma_write_en
= attr
->qp_access_flags
&
1797 IB_ACCESS_REMOTE_WRITE
;
1798 qp_params
.incoming_atomic_en
= attr
->qp_access_flags
&
1799 IB_ACCESS_REMOTE_ATOMIC
;
1802 if (attr_mask
& (IB_QP_AV
| IB_QP_PATH_MTU
)) {
1803 if (attr_mask
& IB_QP_PATH_MTU
) {
1804 if (attr
->path_mtu
< IB_MTU_256
||
1805 attr
->path_mtu
> IB_MTU_4096
) {
1806 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1810 qp
->mtu
= min(ib_mtu_enum_to_int(attr
->path_mtu
),
1811 ib_mtu_enum_to_int(iboe_get_mtu
1817 ib_mtu_enum_to_int(iboe_get_mtu(dev
->ndev
->mtu
));
1818 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp
->mtu
);
1821 SET_FIELD(qp_params
.modify_flags
,
1822 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR
, 1);
1824 qp_params
.traffic_class_tos
= attr
->ah_attr
.grh
.traffic_class
;
1825 qp_params
.flow_label
= attr
->ah_attr
.grh
.flow_label
;
1826 qp_params
.hop_limit_ttl
= attr
->ah_attr
.grh
.hop_limit
;
1828 qp
->sgid_idx
= attr
->ah_attr
.grh
.sgid_index
;
1830 rc
= get_gid_info_from_table(ibqp
, attr
, attr_mask
, &qp_params
);
1833 "modify qp: problems with GID index %d (rc=%d)\n",
1834 attr
->ah_attr
.grh
.sgid_index
, rc
);
1838 rc
= qedr_get_dmac(dev
, &attr
->ah_attr
,
1839 qp_params
.remote_mac_addr
);
1843 qp_params
.use_local_mac
= true;
1844 ether_addr_copy(qp_params
.local_mac_addr
, dev
->ndev
->dev_addr
);
1846 DP_DEBUG(dev
, QEDR_MSG_QP
, "dgid=%x:%x:%x:%x\n",
1847 qp_params
.dgid
.dwords
[0], qp_params
.dgid
.dwords
[1],
1848 qp_params
.dgid
.dwords
[2], qp_params
.dgid
.dwords
[3]);
1849 DP_DEBUG(dev
, QEDR_MSG_QP
, "sgid=%x:%x:%x:%x\n",
1850 qp_params
.sgid
.dwords
[0], qp_params
.sgid
.dwords
[1],
1851 qp_params
.sgid
.dwords
[2], qp_params
.sgid
.dwords
[3]);
1852 DP_DEBUG(dev
, QEDR_MSG_QP
, "remote_mac=[%pM]\n",
1853 qp_params
.remote_mac_addr
);
1856 qp_params
.mtu
= qp
->mtu
;
1857 qp_params
.lb_indication
= false;
1860 if (!qp_params
.mtu
) {
1861 /* Stay with current MTU */
1863 qp_params
.mtu
= qp
->mtu
;
1866 ib_mtu_enum_to_int(iboe_get_mtu(dev
->ndev
->mtu
));
1869 if (attr_mask
& IB_QP_TIMEOUT
) {
1870 SET_FIELD(qp_params
.modify_flags
,
1871 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT
, 1);
1873 qp_params
.ack_timeout
= attr
->timeout
;
1874 if (attr
->timeout
) {
1877 temp
= 4096 * (1UL << attr
->timeout
) / 1000 / 1000;
1878 /* FW requires [msec] */
1879 qp_params
.ack_timeout
= temp
;
1882 qp_params
.ack_timeout
= 0;
1885 if (attr_mask
& IB_QP_RETRY_CNT
) {
1886 SET_FIELD(qp_params
.modify_flags
,
1887 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT
, 1);
1888 qp_params
.retry_cnt
= attr
->retry_cnt
;
1891 if (attr_mask
& IB_QP_RNR_RETRY
) {
1892 SET_FIELD(qp_params
.modify_flags
,
1893 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT
, 1);
1894 qp_params
.rnr_retry_cnt
= attr
->rnr_retry
;
1897 if (attr_mask
& IB_QP_RQ_PSN
) {
1898 SET_FIELD(qp_params
.modify_flags
,
1899 QED_ROCE_MODIFY_QP_VALID_RQ_PSN
, 1);
1900 qp_params
.rq_psn
= attr
->rq_psn
;
1901 qp
->rq_psn
= attr
->rq_psn
;
1904 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1905 if (attr
->max_rd_atomic
> dev
->attr
.max_qp_req_rd_atomic_resc
) {
1908 "unsupported max_rd_atomic=%d, supported=%d\n",
1909 attr
->max_rd_atomic
,
1910 dev
->attr
.max_qp_req_rd_atomic_resc
);
1914 SET_FIELD(qp_params
.modify_flags
,
1915 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ
, 1);
1916 qp_params
.max_rd_atomic_req
= attr
->max_rd_atomic
;
1919 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1920 SET_FIELD(qp_params
.modify_flags
,
1921 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER
, 1);
1922 qp_params
.min_rnr_nak_timer
= attr
->min_rnr_timer
;
1925 if (attr_mask
& IB_QP_SQ_PSN
) {
1926 SET_FIELD(qp_params
.modify_flags
,
1927 QED_ROCE_MODIFY_QP_VALID_SQ_PSN
, 1);
1928 qp_params
.sq_psn
= attr
->sq_psn
;
1929 qp
->sq_psn
= attr
->sq_psn
;
1932 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1933 if (attr
->max_dest_rd_atomic
>
1934 dev
->attr
.max_qp_resp_rd_atomic_resc
) {
1936 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1937 attr
->max_dest_rd_atomic
,
1938 dev
->attr
.max_qp_resp_rd_atomic_resc
);
1944 SET_FIELD(qp_params
.modify_flags
,
1945 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP
, 1);
1946 qp_params
.max_rd_atomic_resp
= attr
->max_dest_rd_atomic
;
1949 if (attr_mask
& IB_QP_DEST_QPN
) {
1950 SET_FIELD(qp_params
.modify_flags
,
1951 QED_ROCE_MODIFY_QP_VALID_DEST_QP
, 1);
1953 qp_params
.dest_qp
= attr
->dest_qp_num
;
1954 qp
->dest_qp_num
= attr
->dest_qp_num
;
1957 if (qp
->qp_type
!= IB_QPT_GSI
)
1958 rc
= dev
->ops
->rdma_modify_qp(dev
->rdma_ctx
,
1959 qp
->qed_qp
, &qp_params
);
1961 if (attr_mask
& IB_QP_STATE
) {
1962 if ((qp
->qp_type
!= IB_QPT_GSI
) && (!udata
))
1963 qedr_update_qp_state(dev
, qp
, qp_params
.new_state
);
1964 qp
->state
= qp_params
.new_state
;
1971 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params
*params
)
1973 int ib_qp_acc_flags
= 0;
1975 if (params
->incoming_rdma_write_en
)
1976 ib_qp_acc_flags
|= IB_ACCESS_REMOTE_WRITE
;
1977 if (params
->incoming_rdma_read_en
)
1978 ib_qp_acc_flags
|= IB_ACCESS_REMOTE_READ
;
1979 if (params
->incoming_atomic_en
)
1980 ib_qp_acc_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
1981 ib_qp_acc_flags
|= IB_ACCESS_LOCAL_WRITE
;
1982 return ib_qp_acc_flags
;
1985 int qedr_query_qp(struct ib_qp
*ibqp
,
1986 struct ib_qp_attr
*qp_attr
,
1987 int attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
1989 struct qed_rdma_query_qp_out_params params
;
1990 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
1991 struct qedr_dev
*dev
= qp
->dev
;
1994 memset(¶ms
, 0, sizeof(params
));
1996 rc
= dev
->ops
->rdma_query_qp(dev
->rdma_ctx
, qp
->qed_qp
, ¶ms
);
2000 memset(qp_attr
, 0, sizeof(*qp_attr
));
2001 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
2003 qp_attr
->qp_state
= qedr_get_ibqp_state(params
.state
);
2004 qp_attr
->cur_qp_state
= qedr_get_ibqp_state(params
.state
);
2005 qp_attr
->path_mtu
= iboe_get_mtu(params
.mtu
);
2006 qp_attr
->path_mig_state
= IB_MIG_MIGRATED
;
2007 qp_attr
->rq_psn
= params
.rq_psn
;
2008 qp_attr
->sq_psn
= params
.sq_psn
;
2009 qp_attr
->dest_qp_num
= params
.dest_qp
;
2011 qp_attr
->qp_access_flags
= qedr_to_ib_qp_acc_flags(¶ms
);
2013 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_wr
;
2014 qp_attr
->cap
.max_recv_wr
= qp
->rq
.max_wr
;
2015 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_sges
;
2016 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_sges
;
2017 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
2018 qp_init_attr
->cap
= qp_attr
->cap
;
2020 memcpy(&qp_attr
->ah_attr
.grh
.dgid
.raw
[0], ¶ms
.dgid
.bytes
[0],
2021 sizeof(qp_attr
->ah_attr
.grh
.dgid
.raw
));
2023 qp_attr
->ah_attr
.grh
.flow_label
= params
.flow_label
;
2024 qp_attr
->ah_attr
.grh
.sgid_index
= qp
->sgid_idx
;
2025 qp_attr
->ah_attr
.grh
.hop_limit
= params
.hop_limit_ttl
;
2026 qp_attr
->ah_attr
.grh
.traffic_class
= params
.traffic_class_tos
;
2028 qp_attr
->ah_attr
.ah_flags
= IB_AH_GRH
;
2029 qp_attr
->ah_attr
.port_num
= 1;
2030 qp_attr
->ah_attr
.sl
= 0;
2031 qp_attr
->timeout
= params
.timeout
;
2032 qp_attr
->rnr_retry
= params
.rnr_retry
;
2033 qp_attr
->retry_cnt
= params
.retry_cnt
;
2034 qp_attr
->min_rnr_timer
= params
.min_rnr_nak_timer
;
2035 qp_attr
->pkey_index
= params
.pkey_index
;
2036 qp_attr
->port_num
= 1;
2037 qp_attr
->ah_attr
.src_path_bits
= 0;
2038 qp_attr
->ah_attr
.static_rate
= 0;
2039 qp_attr
->alt_pkey_index
= 0;
2040 qp_attr
->alt_port_num
= 0;
2041 qp_attr
->alt_timeout
= 0;
2042 memset(&qp_attr
->alt_ah_attr
, 0, sizeof(qp_attr
->alt_ah_attr
));
2044 qp_attr
->sq_draining
= (params
.state
== QED_ROCE_QP_STATE_SQD
) ? 1 : 0;
2045 qp_attr
->max_dest_rd_atomic
= params
.max_dest_rd_atomic
;
2046 qp_attr
->max_rd_atomic
= params
.max_rd_atomic
;
2047 qp_attr
->en_sqd_async_notify
= (params
.sqd_async
) ? 1 : 0;
2049 DP_DEBUG(dev
, QEDR_MSG_QP
, "QEDR_QUERY_QP: max_inline_data=%d\n",
2050 qp_attr
->cap
.max_inline_data
);
2056 int qedr_destroy_qp(struct ib_qp
*ibqp
)
2058 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
2059 struct qedr_dev
*dev
= qp
->dev
;
2060 struct ib_qp_attr attr
;
2064 DP_DEBUG(dev
, QEDR_MSG_QP
, "destroy qp: destroying %p, qp type=%d\n",
2067 if (qp
->state
!= (QED_ROCE_QP_STATE_RESET
| QED_ROCE_QP_STATE_ERR
|
2068 QED_ROCE_QP_STATE_INIT
)) {
2069 attr
.qp_state
= IB_QPS_ERR
;
2070 attr_mask
|= IB_QP_STATE
;
2072 /* Change the QP state to ERROR */
2073 qedr_modify_qp(ibqp
, &attr
, attr_mask
, NULL
);
2076 if (qp
->qp_type
!= IB_QPT_GSI
) {
2077 rc
= dev
->ops
->rdma_destroy_qp(dev
->rdma_ctx
, qp
->qed_qp
);
2081 qedr_destroy_gsi_qp(dev
);
2084 if (ibqp
->uobject
&& ibqp
->uobject
->context
) {
2085 qedr_cleanup_user_sq(dev
, qp
);
2086 qedr_cleanup_user_rq(dev
, qp
);
2088 qedr_cleanup_kernel_sq(dev
, qp
);
2089 qedr_cleanup_kernel_rq(dev
, qp
);
2097 struct ib_ah
*qedr_create_ah(struct ib_pd
*ibpd
, struct ib_ah_attr
*attr
)
2101 ah
= kzalloc(sizeof(*ah
), GFP_ATOMIC
);
2103 return ERR_PTR(-ENOMEM
);
2110 int qedr_destroy_ah(struct ib_ah
*ibah
)
2112 struct qedr_ah
*ah
= get_qedr_ah(ibah
);
2118 static void free_mr_info(struct qedr_dev
*dev
, struct mr_info
*info
)
2120 struct qedr_pbl
*pbl
, *tmp
;
2122 if (info
->pbl_table
)
2123 list_add_tail(&info
->pbl_table
->list_entry
,
2124 &info
->free_pbl_list
);
2126 if (!list_empty(&info
->inuse_pbl_list
))
2127 list_splice(&info
->inuse_pbl_list
, &info
->free_pbl_list
);
2129 list_for_each_entry_safe(pbl
, tmp
, &info
->free_pbl_list
, list_entry
) {
2130 list_del(&pbl
->list_entry
);
2131 qedr_free_pbl(dev
, &info
->pbl_info
, pbl
);
2135 static int init_mr_info(struct qedr_dev
*dev
, struct mr_info
*info
,
2136 size_t page_list_len
, bool two_layered
)
2138 struct qedr_pbl
*tmp
;
2141 INIT_LIST_HEAD(&info
->free_pbl_list
);
2142 INIT_LIST_HEAD(&info
->inuse_pbl_list
);
2144 rc
= qedr_prepare_pbl_tbl(dev
, &info
->pbl_info
,
2145 page_list_len
, two_layered
);
2149 info
->pbl_table
= qedr_alloc_pbl_tbl(dev
, &info
->pbl_info
, GFP_KERNEL
);
2150 if (!info
->pbl_table
) {
2155 DP_DEBUG(dev
, QEDR_MSG_MR
, "pbl_table_pa = %pa\n",
2156 &info
->pbl_table
->pa
);
2158 /* in usual case we use 2 PBLs, so we add one to free
2159 * list and allocating another one
2161 tmp
= qedr_alloc_pbl_tbl(dev
, &info
->pbl_info
, GFP_KERNEL
);
2163 DP_DEBUG(dev
, QEDR_MSG_MR
, "Extra PBL is not allocated\n");
2167 list_add_tail(&tmp
->list_entry
, &info
->free_pbl_list
);
2169 DP_DEBUG(dev
, QEDR_MSG_MR
, "extra pbl_table_pa = %pa\n", &tmp
->pa
);
2173 free_mr_info(dev
, info
);
2178 struct ib_mr
*qedr_reg_user_mr(struct ib_pd
*ibpd
, u64 start
, u64 len
,
2179 u64 usr_addr
, int acc
, struct ib_udata
*udata
)
2181 struct qedr_dev
*dev
= get_qedr_dev(ibpd
->device
);
2186 pd
= get_qedr_pd(ibpd
);
2187 DP_DEBUG(dev
, QEDR_MSG_MR
,
2188 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2189 pd
->pd_id
, start
, len
, usr_addr
, acc
);
2191 if (acc
& IB_ACCESS_REMOTE_WRITE
&& !(acc
& IB_ACCESS_LOCAL_WRITE
))
2192 return ERR_PTR(-EINVAL
);
2194 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
2198 mr
->type
= QEDR_MR_USER
;
2200 mr
->umem
= ib_umem_get(ibpd
->uobject
->context
, start
, len
, acc
, 0);
2201 if (IS_ERR(mr
->umem
)) {
2206 rc
= init_mr_info(dev
, &mr
->info
, ib_umem_page_count(mr
->umem
), 1);
2210 qedr_populate_pbls(dev
, mr
->umem
, mr
->info
.pbl_table
,
2211 &mr
->info
.pbl_info
);
2213 rc
= dev
->ops
->rdma_alloc_tid(dev
->rdma_ctx
, &mr
->hw_mr
.itid
);
2215 DP_ERR(dev
, "roce alloc tid returned an error %d\n", rc
);
2219 /* Index only, 18 bit long, lkey = itid << 8 | key */
2220 mr
->hw_mr
.tid_type
= QED_RDMA_TID_REGISTERED_MR
;
2222 mr
->hw_mr
.pd
= pd
->pd_id
;
2223 mr
->hw_mr
.local_read
= 1;
2224 mr
->hw_mr
.local_write
= (acc
& IB_ACCESS_LOCAL_WRITE
) ? 1 : 0;
2225 mr
->hw_mr
.remote_read
= (acc
& IB_ACCESS_REMOTE_READ
) ? 1 : 0;
2226 mr
->hw_mr
.remote_write
= (acc
& IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
2227 mr
->hw_mr
.remote_atomic
= (acc
& IB_ACCESS_REMOTE_ATOMIC
) ? 1 : 0;
2228 mr
->hw_mr
.mw_bind
= false;
2229 mr
->hw_mr
.pbl_ptr
= mr
->info
.pbl_table
[0].pa
;
2230 mr
->hw_mr
.pbl_two_level
= mr
->info
.pbl_info
.two_layered
;
2231 mr
->hw_mr
.pbl_page_size_log
= ilog2(mr
->info
.pbl_info
.pbl_size
);
2232 mr
->hw_mr
.page_size_log
= ilog2(mr
->umem
->page_size
);
2233 mr
->hw_mr
.fbo
= ib_umem_offset(mr
->umem
);
2234 mr
->hw_mr
.length
= len
;
2235 mr
->hw_mr
.vaddr
= usr_addr
;
2236 mr
->hw_mr
.zbva
= false;
2237 mr
->hw_mr
.phy_mr
= false;
2238 mr
->hw_mr
.dma_mr
= false;
2240 rc
= dev
->ops
->rdma_register_tid(dev
->rdma_ctx
, &mr
->hw_mr
);
2242 DP_ERR(dev
, "roce register tid returned an error %d\n", rc
);
2246 mr
->ibmr
.lkey
= mr
->hw_mr
.itid
<< 8 | mr
->hw_mr
.key
;
2247 if (mr
->hw_mr
.remote_write
|| mr
->hw_mr
.remote_read
||
2248 mr
->hw_mr
.remote_atomic
)
2249 mr
->ibmr
.rkey
= mr
->hw_mr
.itid
<< 8 | mr
->hw_mr
.key
;
2251 DP_DEBUG(dev
, QEDR_MSG_MR
, "register user mr lkey: %x\n",
2256 dev
->ops
->rdma_free_tid(dev
->rdma_ctx
, mr
->hw_mr
.itid
);
2258 qedr_free_pbl(dev
, &mr
->info
.pbl_info
, mr
->info
.pbl_table
);
2264 int qedr_dereg_mr(struct ib_mr
*ib_mr
)
2266 struct qedr_mr
*mr
= get_qedr_mr(ib_mr
);
2267 struct qedr_dev
*dev
= get_qedr_dev(ib_mr
->device
);
2270 rc
= dev
->ops
->rdma_deregister_tid(dev
->rdma_ctx
, mr
->hw_mr
.itid
);
2274 dev
->ops
->rdma_free_tid(dev
->rdma_ctx
, mr
->hw_mr
.itid
);
2276 if ((mr
->type
!= QEDR_MR_DMA
) && (mr
->type
!= QEDR_MR_FRMR
))
2277 qedr_free_pbl(dev
, &mr
->info
.pbl_info
, mr
->info
.pbl_table
);
2279 /* it could be user registered memory. */
2281 ib_umem_release(mr
->umem
);
2288 struct qedr_mr
*__qedr_alloc_mr(struct ib_pd
*ibpd
, int max_page_list_len
)
2290 struct qedr_pd
*pd
= get_qedr_pd(ibpd
);
2291 struct qedr_dev
*dev
= get_qedr_dev(ibpd
->device
);
2295 DP_DEBUG(dev
, QEDR_MSG_MR
,
2296 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd
->pd_id
,
2299 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
2304 mr
->type
= QEDR_MR_FRMR
;
2306 rc
= init_mr_info(dev
, &mr
->info
, max_page_list_len
, 1);
2310 rc
= dev
->ops
->rdma_alloc_tid(dev
->rdma_ctx
, &mr
->hw_mr
.itid
);
2312 DP_ERR(dev
, "roce alloc tid returned an error %d\n", rc
);
2316 /* Index only, 18 bit long, lkey = itid << 8 | key */
2317 mr
->hw_mr
.tid_type
= QED_RDMA_TID_FMR
;
2319 mr
->hw_mr
.pd
= pd
->pd_id
;
2320 mr
->hw_mr
.local_read
= 1;
2321 mr
->hw_mr
.local_write
= 0;
2322 mr
->hw_mr
.remote_read
= 0;
2323 mr
->hw_mr
.remote_write
= 0;
2324 mr
->hw_mr
.remote_atomic
= 0;
2325 mr
->hw_mr
.mw_bind
= false;
2326 mr
->hw_mr
.pbl_ptr
= 0;
2327 mr
->hw_mr
.pbl_two_level
= mr
->info
.pbl_info
.two_layered
;
2328 mr
->hw_mr
.pbl_page_size_log
= ilog2(mr
->info
.pbl_info
.pbl_size
);
2330 mr
->hw_mr
.length
= 0;
2331 mr
->hw_mr
.vaddr
= 0;
2332 mr
->hw_mr
.zbva
= false;
2333 mr
->hw_mr
.phy_mr
= true;
2334 mr
->hw_mr
.dma_mr
= false;
2336 rc
= dev
->ops
->rdma_register_tid(dev
->rdma_ctx
, &mr
->hw_mr
);
2338 DP_ERR(dev
, "roce register tid returned an error %d\n", rc
);
2342 mr
->ibmr
.lkey
= mr
->hw_mr
.itid
<< 8 | mr
->hw_mr
.key
;
2343 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
;
2345 DP_DEBUG(dev
, QEDR_MSG_MR
, "alloc frmr: %x\n", mr
->ibmr
.lkey
);
2349 dev
->ops
->rdma_free_tid(dev
->rdma_ctx
, mr
->hw_mr
.itid
);
2355 struct ib_mr
*qedr_alloc_mr(struct ib_pd
*ibpd
,
2356 enum ib_mr_type mr_type
, u32 max_num_sg
)
2358 struct qedr_dev
*dev
;
2361 if (mr_type
!= IB_MR_TYPE_MEM_REG
)
2362 return ERR_PTR(-EINVAL
);
2364 mr
= __qedr_alloc_mr(ibpd
, max_num_sg
);
2367 return ERR_PTR(-EINVAL
);
2374 static int qedr_set_page(struct ib_mr
*ibmr
, u64 addr
)
2376 struct qedr_mr
*mr
= get_qedr_mr(ibmr
);
2377 struct qedr_pbl
*pbl_table
;
2378 struct regpair
*pbe
;
2381 if (unlikely(mr
->npages
== mr
->info
.pbl_info
.num_pbes
)) {
2382 DP_ERR(mr
->dev
, "qedr_set_page failes when %d\n", mr
->npages
);
2386 DP_DEBUG(mr
->dev
, QEDR_MSG_MR
, "qedr_set_page pages[%d] = 0x%llx\n",
2389 pbes_in_page
= mr
->info
.pbl_info
.pbl_size
/ sizeof(u64
);
2390 pbl_table
= mr
->info
.pbl_table
+ (mr
->npages
/ pbes_in_page
);
2391 pbe
= (struct regpair
*)pbl_table
->va
;
2392 pbe
+= mr
->npages
% pbes_in_page
;
2393 pbe
->lo
= cpu_to_le32((u32
)addr
);
2394 pbe
->hi
= cpu_to_le32((u32
)upper_32_bits(addr
));
2401 static void handle_completed_mrs(struct qedr_dev
*dev
, struct mr_info
*info
)
2403 int work
= info
->completed
- info
->completed_handled
- 1;
2405 DP_DEBUG(dev
, QEDR_MSG_MR
, "Special FMR work = %d\n", work
);
2406 while (work
-- > 0 && !list_empty(&info
->inuse_pbl_list
)) {
2407 struct qedr_pbl
*pbl
;
2409 /* Free all the page list that are possible to be freed
2410 * (all the ones that were invalidated), under the assumption
2411 * that if an FMR was completed successfully that means that
2412 * if there was an invalidate operation before it also ended
2414 pbl
= list_first_entry(&info
->inuse_pbl_list
,
2415 struct qedr_pbl
, list_entry
);
2416 list_del(&pbl
->list_entry
);
2417 list_add_tail(&pbl
->list_entry
, &info
->free_pbl_list
);
2418 info
->completed_handled
++;
2422 int qedr_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
,
2423 int sg_nents
, unsigned int *sg_offset
)
2425 struct qedr_mr
*mr
= get_qedr_mr(ibmr
);
2429 handle_completed_mrs(mr
->dev
, &mr
->info
);
2430 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, NULL
, qedr_set_page
);
2433 struct ib_mr
*qedr_get_dma_mr(struct ib_pd
*ibpd
, int acc
)
2435 struct qedr_dev
*dev
= get_qedr_dev(ibpd
->device
);
2436 struct qedr_pd
*pd
= get_qedr_pd(ibpd
);
2440 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
2442 return ERR_PTR(-ENOMEM
);
2444 mr
->type
= QEDR_MR_DMA
;
2446 rc
= dev
->ops
->rdma_alloc_tid(dev
->rdma_ctx
, &mr
->hw_mr
.itid
);
2448 DP_ERR(dev
, "roce alloc tid returned an error %d\n", rc
);
2452 /* index only, 18 bit long, lkey = itid << 8 | key */
2453 mr
->hw_mr
.tid_type
= QED_RDMA_TID_REGISTERED_MR
;
2454 mr
->hw_mr
.pd
= pd
->pd_id
;
2455 mr
->hw_mr
.local_read
= 1;
2456 mr
->hw_mr
.local_write
= (acc
& IB_ACCESS_LOCAL_WRITE
) ? 1 : 0;
2457 mr
->hw_mr
.remote_read
= (acc
& IB_ACCESS_REMOTE_READ
) ? 1 : 0;
2458 mr
->hw_mr
.remote_write
= (acc
& IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
2459 mr
->hw_mr
.remote_atomic
= (acc
& IB_ACCESS_REMOTE_ATOMIC
) ? 1 : 0;
2460 mr
->hw_mr
.dma_mr
= true;
2462 rc
= dev
->ops
->rdma_register_tid(dev
->rdma_ctx
, &mr
->hw_mr
);
2464 DP_ERR(dev
, "roce register tid returned an error %d\n", rc
);
2468 mr
->ibmr
.lkey
= mr
->hw_mr
.itid
<< 8 | mr
->hw_mr
.key
;
2469 if (mr
->hw_mr
.remote_write
|| mr
->hw_mr
.remote_read
||
2470 mr
->hw_mr
.remote_atomic
)
2471 mr
->ibmr
.rkey
= mr
->hw_mr
.itid
<< 8 | mr
->hw_mr
.key
;
2473 DP_DEBUG(dev
, QEDR_MSG_MR
, "get dma mr: lkey = %x\n", mr
->ibmr
.lkey
);
2477 dev
->ops
->rdma_free_tid(dev
->rdma_ctx
, mr
->hw_mr
.itid
);
2483 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info
*wq
)
2485 return (((wq
->prod
+ 1) % wq
->max_wr
) == wq
->cons
);
2488 static int sge_data_len(struct ib_sge
*sg_list
, int num_sge
)
2492 for (i
= 0; i
< num_sge
; i
++)
2493 len
+= sg_list
[i
].length
;
2498 static void swap_wqe_data64(u64
*p
)
2502 for (i
= 0; i
< QEDR_SQE_ELEMENT_SIZE
/ sizeof(u64
); i
++, p
++)
2503 *p
= cpu_to_be64(cpu_to_le64(*p
));
2506 static u32
qedr_prepare_sq_inline_data(struct qedr_dev
*dev
,
2507 struct qedr_qp
*qp
, u8
*wqe_size
,
2508 struct ib_send_wr
*wr
,
2509 struct ib_send_wr
**bad_wr
, u8
*bits
,
2512 u32 data_size
= sge_data_len(wr
->sg_list
, wr
->num_sge
);
2513 char *seg_prt
, *wqe
;
2516 if (data_size
> ROCE_REQ_MAX_INLINE_DATA_SIZE
) {
2517 DP_ERR(dev
, "Too much inline data in WR: %d\n", data_size
);
2531 /* Copy data inline */
2532 for (i
= 0; i
< wr
->num_sge
; i
++) {
2533 u32 len
= wr
->sg_list
[i
].length
;
2534 void *src
= (void *)(uintptr_t)wr
->sg_list
[i
].addr
;
2539 /* New segment required */
2541 wqe
= (char *)qed_chain_produce(&qp
->sq
.pbl
);
2543 seg_siz
= sizeof(struct rdma_sq_common_wqe
);
2547 /* Calculate currently allowed length */
2548 cur
= min_t(u32
, len
, seg_siz
);
2549 memcpy(seg_prt
, src
, cur
);
2551 /* Update segment variables */
2555 /* Update sge variables */
2559 /* Swap fully-completed segments */
2561 swap_wqe_data64((u64
*)wqe
);
2565 /* swap last not completed segment */
2567 swap_wqe_data64((u64
*)wqe
);
2572 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2574 DMA_REGPAIR_LE(sge->addr, vaddr); \
2575 (sge)->length = cpu_to_le32(vlength); \
2576 (sge)->flags = cpu_to_le32(vflags); \
2579 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2581 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2582 (hdr)->num_sges = num_sge; \
2585 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2587 DMA_REGPAIR_LE(sge->addr, vaddr); \
2588 (sge)->length = cpu_to_le32(vlength); \
2589 (sge)->l_key = cpu_to_le32(vlkey); \
2592 static u32
qedr_prepare_sq_sges(struct qedr_qp
*qp
, u8
*wqe_size
,
2593 struct ib_send_wr
*wr
)
2598 for (i
= 0; i
< wr
->num_sge
; i
++) {
2599 struct rdma_sq_sge
*sge
= qed_chain_produce(&qp
->sq
.pbl
);
2601 DMA_REGPAIR_LE(sge
->addr
, wr
->sg_list
[i
].addr
);
2602 sge
->l_key
= cpu_to_le32(wr
->sg_list
[i
].lkey
);
2603 sge
->length
= cpu_to_le32(wr
->sg_list
[i
].length
);
2604 data_size
+= wr
->sg_list
[i
].length
;
2608 *wqe_size
+= wr
->num_sge
;
2613 static u32
qedr_prepare_sq_rdma_data(struct qedr_dev
*dev
,
2615 struct rdma_sq_rdma_wqe_1st
*rwqe
,
2616 struct rdma_sq_rdma_wqe_2nd
*rwqe2
,
2617 struct ib_send_wr
*wr
,
2618 struct ib_send_wr
**bad_wr
)
2620 rwqe2
->r_key
= cpu_to_le32(rdma_wr(wr
)->rkey
);
2621 DMA_REGPAIR_LE(rwqe2
->remote_va
, rdma_wr(wr
)->remote_addr
);
2623 if (wr
->send_flags
& IB_SEND_INLINE
) {
2626 SET_FIELD2(flags
, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG
, 1);
2627 return qedr_prepare_sq_inline_data(dev
, qp
, &rwqe
->wqe_size
, wr
,
2628 bad_wr
, &rwqe
->flags
, flags
);
2631 return qedr_prepare_sq_sges(qp
, &rwqe
->wqe_size
, wr
);
2634 static u32
qedr_prepare_sq_send_data(struct qedr_dev
*dev
,
2636 struct rdma_sq_send_wqe_1st
*swqe
,
2637 struct rdma_sq_send_wqe_2st
*swqe2
,
2638 struct ib_send_wr
*wr
,
2639 struct ib_send_wr
**bad_wr
)
2641 memset(swqe2
, 0, sizeof(*swqe2
));
2642 if (wr
->send_flags
& IB_SEND_INLINE
) {
2645 SET_FIELD2(flags
, RDMA_SQ_SEND_WQE_INLINE_FLG
, 1);
2646 return qedr_prepare_sq_inline_data(dev
, qp
, &swqe
->wqe_size
, wr
,
2647 bad_wr
, &swqe
->flags
, flags
);
2650 return qedr_prepare_sq_sges(qp
, &swqe
->wqe_size
, wr
);
2653 static int qedr_prepare_reg(struct qedr_qp
*qp
,
2654 struct rdma_sq_fmr_wqe_1st
*fwqe1
,
2655 struct ib_reg_wr
*wr
)
2657 struct qedr_mr
*mr
= get_qedr_mr(wr
->mr
);
2658 struct rdma_sq_fmr_wqe_2nd
*fwqe2
;
2660 fwqe2
= (struct rdma_sq_fmr_wqe_2nd
*)qed_chain_produce(&qp
->sq
.pbl
);
2661 fwqe1
->addr
.hi
= upper_32_bits(mr
->ibmr
.iova
);
2662 fwqe1
->addr
.lo
= lower_32_bits(mr
->ibmr
.iova
);
2663 fwqe1
->l_key
= wr
->key
;
2665 SET_FIELD2(fwqe2
->access_ctrl
, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ
,
2666 !!(wr
->access
& IB_ACCESS_REMOTE_READ
));
2667 SET_FIELD2(fwqe2
->access_ctrl
, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE
,
2668 !!(wr
->access
& IB_ACCESS_REMOTE_WRITE
));
2669 SET_FIELD2(fwqe2
->access_ctrl
, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC
,
2670 !!(wr
->access
& IB_ACCESS_REMOTE_ATOMIC
));
2671 SET_FIELD2(fwqe2
->access_ctrl
, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ
, 1);
2672 SET_FIELD2(fwqe2
->access_ctrl
, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE
,
2673 !!(wr
->access
& IB_ACCESS_LOCAL_WRITE
));
2674 fwqe2
->fmr_ctrl
= 0;
2676 SET_FIELD2(fwqe2
->fmr_ctrl
, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG
,
2677 ilog2(mr
->ibmr
.page_size
) - 12);
2679 fwqe2
->length_hi
= 0;
2680 fwqe2
->length_lo
= mr
->ibmr
.length
;
2681 fwqe2
->pbl_addr
.hi
= upper_32_bits(mr
->info
.pbl_table
->pa
);
2682 fwqe2
->pbl_addr
.lo
= lower_32_bits(mr
->info
.pbl_table
->pa
);
2684 qp
->wqe_wr_id
[qp
->sq
.prod
].mr
= mr
;
2689 enum ib_wc_opcode
qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode
)
2692 case IB_WR_RDMA_WRITE
:
2693 case IB_WR_RDMA_WRITE_WITH_IMM
:
2694 return IB_WC_RDMA_WRITE
;
2695 case IB_WR_SEND_WITH_IMM
:
2697 case IB_WR_SEND_WITH_INV
:
2699 case IB_WR_RDMA_READ
:
2700 return IB_WC_RDMA_READ
;
2701 case IB_WR_ATOMIC_CMP_AND_SWP
:
2702 return IB_WC_COMP_SWAP
;
2703 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2704 return IB_WC_FETCH_ADD
;
2706 return IB_WC_REG_MR
;
2707 case IB_WR_LOCAL_INV
:
2708 return IB_WC_LOCAL_INV
;
2714 inline bool qedr_can_post_send(struct qedr_qp
*qp
, struct ib_send_wr
*wr
)
2716 int wq_is_full
, err_wr
, pbl_is_full
;
2717 struct qedr_dev
*dev
= qp
->dev
;
2719 /* prevent SQ overflow and/or processing of a bad WR */
2720 err_wr
= wr
->num_sge
> qp
->sq
.max_sges
;
2721 wq_is_full
= qedr_wq_is_full(&qp
->sq
);
2722 pbl_is_full
= qed_chain_get_elem_left_u32(&qp
->sq
.pbl
) <
2723 QEDR_MAX_SQE_ELEMENTS_PER_SQE
;
2724 if (wq_is_full
|| err_wr
|| pbl_is_full
) {
2725 if (wq_is_full
&& !(qp
->err_bitmap
& QEDR_QP_ERR_SQ_FULL
)) {
2727 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2729 qp
->err_bitmap
|= QEDR_QP_ERR_SQ_FULL
;
2732 if (err_wr
&& !(qp
->err_bitmap
& QEDR_QP_ERR_BAD_SR
)) {
2734 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2736 qp
->err_bitmap
|= QEDR_QP_ERR_BAD_SR
;
2740 !(qp
->err_bitmap
& QEDR_QP_ERR_SQ_PBL_FULL
)) {
2742 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2744 qp
->err_bitmap
|= QEDR_QP_ERR_SQ_PBL_FULL
;
2751 int __qedr_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2752 struct ib_send_wr
**bad_wr
)
2754 struct qedr_dev
*dev
= get_qedr_dev(ibqp
->device
);
2755 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
2756 struct rdma_sq_atomic_wqe_1st
*awqe1
;
2757 struct rdma_sq_atomic_wqe_2nd
*awqe2
;
2758 struct rdma_sq_atomic_wqe_3rd
*awqe3
;
2759 struct rdma_sq_send_wqe_2st
*swqe2
;
2760 struct rdma_sq_local_inv_wqe
*iwqe
;
2761 struct rdma_sq_rdma_wqe_2nd
*rwqe2
;
2762 struct rdma_sq_send_wqe_1st
*swqe
;
2763 struct rdma_sq_rdma_wqe_1st
*rwqe
;
2764 struct rdma_sq_fmr_wqe_1st
*fwqe1
;
2765 struct rdma_sq_common_wqe
*wqe
;
2770 if (!qedr_can_post_send(qp
, wr
)) {
2775 wqe
= qed_chain_produce(&qp
->sq
.pbl
);
2776 qp
->wqe_wr_id
[qp
->sq
.prod
].signaled
=
2777 !!(wr
->send_flags
& IB_SEND_SIGNALED
) || qp
->signaled
;
2780 SET_FIELD2(wqe
->flags
, RDMA_SQ_SEND_WQE_SE_FLG
,
2781 !!(wr
->send_flags
& IB_SEND_SOLICITED
));
2782 comp
= (!!(wr
->send_flags
& IB_SEND_SIGNALED
)) || qp
->signaled
;
2783 SET_FIELD2(wqe
->flags
, RDMA_SQ_SEND_WQE_COMP_FLG
, comp
);
2784 SET_FIELD2(wqe
->flags
, RDMA_SQ_SEND_WQE_RD_FENCE_FLG
,
2785 !!(wr
->send_flags
& IB_SEND_FENCE
));
2786 wqe
->prev_wqe_size
= qp
->prev_wqe_size
;
2788 qp
->wqe_wr_id
[qp
->sq
.prod
].opcode
= qedr_ib_to_wc_opcode(wr
->opcode
);
2790 switch (wr
->opcode
) {
2791 case IB_WR_SEND_WITH_IMM
:
2792 wqe
->req_type
= RDMA_SQ_REQ_TYPE_SEND_WITH_IMM
;
2793 swqe
= (struct rdma_sq_send_wqe_1st
*)wqe
;
2795 swqe2
= qed_chain_produce(&qp
->sq
.pbl
);
2797 swqe
->inv_key_or_imm_data
= cpu_to_le32(wr
->ex
.imm_data
);
2798 length
= qedr_prepare_sq_send_data(dev
, qp
, swqe
, swqe2
,
2800 swqe
->length
= cpu_to_le32(length
);
2801 qp
->wqe_wr_id
[qp
->sq
.prod
].wqe_size
= swqe
->wqe_size
;
2802 qp
->prev_wqe_size
= swqe
->wqe_size
;
2803 qp
->wqe_wr_id
[qp
->sq
.prod
].bytes_len
= swqe
->length
;
2806 wqe
->req_type
= RDMA_SQ_REQ_TYPE_SEND
;
2807 swqe
= (struct rdma_sq_send_wqe_1st
*)wqe
;
2810 swqe2
= qed_chain_produce(&qp
->sq
.pbl
);
2811 length
= qedr_prepare_sq_send_data(dev
, qp
, swqe
, swqe2
,
2813 swqe
->length
= cpu_to_le32(length
);
2814 qp
->wqe_wr_id
[qp
->sq
.prod
].wqe_size
= swqe
->wqe_size
;
2815 qp
->prev_wqe_size
= swqe
->wqe_size
;
2816 qp
->wqe_wr_id
[qp
->sq
.prod
].bytes_len
= swqe
->length
;
2818 case IB_WR_SEND_WITH_INV
:
2819 wqe
->req_type
= RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE
;
2820 swqe
= (struct rdma_sq_send_wqe_1st
*)wqe
;
2821 swqe2
= qed_chain_produce(&qp
->sq
.pbl
);
2823 swqe
->inv_key_or_imm_data
= cpu_to_le32(wr
->ex
.invalidate_rkey
);
2824 length
= qedr_prepare_sq_send_data(dev
, qp
, swqe
, swqe2
,
2826 swqe
->length
= cpu_to_le32(length
);
2827 qp
->wqe_wr_id
[qp
->sq
.prod
].wqe_size
= swqe
->wqe_size
;
2828 qp
->prev_wqe_size
= swqe
->wqe_size
;
2829 qp
->wqe_wr_id
[qp
->sq
.prod
].bytes_len
= swqe
->length
;
2832 case IB_WR_RDMA_WRITE_WITH_IMM
:
2833 wqe
->req_type
= RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM
;
2834 rwqe
= (struct rdma_sq_rdma_wqe_1st
*)wqe
;
2837 rwqe
->imm_data
= htonl(cpu_to_le32(wr
->ex
.imm_data
));
2838 rwqe2
= qed_chain_produce(&qp
->sq
.pbl
);
2839 length
= qedr_prepare_sq_rdma_data(dev
, qp
, rwqe
, rwqe2
,
2841 rwqe
->length
= cpu_to_le32(length
);
2842 qp
->wqe_wr_id
[qp
->sq
.prod
].wqe_size
= rwqe
->wqe_size
;
2843 qp
->prev_wqe_size
= rwqe
->wqe_size
;
2844 qp
->wqe_wr_id
[qp
->sq
.prod
].bytes_len
= rwqe
->length
;
2846 case IB_WR_RDMA_WRITE
:
2847 wqe
->req_type
= RDMA_SQ_REQ_TYPE_RDMA_WR
;
2848 rwqe
= (struct rdma_sq_rdma_wqe_1st
*)wqe
;
2851 rwqe2
= qed_chain_produce(&qp
->sq
.pbl
);
2852 length
= qedr_prepare_sq_rdma_data(dev
, qp
, rwqe
, rwqe2
,
2854 rwqe
->length
= cpu_to_le32(length
);
2855 qp
->wqe_wr_id
[qp
->sq
.prod
].wqe_size
= rwqe
->wqe_size
;
2856 qp
->prev_wqe_size
= rwqe
->wqe_size
;
2857 qp
->wqe_wr_id
[qp
->sq
.prod
].bytes_len
= rwqe
->length
;
2859 case IB_WR_RDMA_READ_WITH_INV
:
2861 "RDMA READ WITH INVALIDATE not supported\n");
2866 case IB_WR_RDMA_READ
:
2867 wqe
->req_type
= RDMA_SQ_REQ_TYPE_RDMA_RD
;
2868 rwqe
= (struct rdma_sq_rdma_wqe_1st
*)wqe
;
2871 rwqe2
= qed_chain_produce(&qp
->sq
.pbl
);
2872 length
= qedr_prepare_sq_rdma_data(dev
, qp
, rwqe
, rwqe2
,
2874 rwqe
->length
= cpu_to_le32(length
);
2875 qp
->wqe_wr_id
[qp
->sq
.prod
].wqe_size
= rwqe
->wqe_size
;
2876 qp
->prev_wqe_size
= rwqe
->wqe_size
;
2877 qp
->wqe_wr_id
[qp
->sq
.prod
].bytes_len
= rwqe
->length
;
2880 case IB_WR_ATOMIC_CMP_AND_SWP
:
2881 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2882 awqe1
= (struct rdma_sq_atomic_wqe_1st
*)wqe
;
2883 awqe1
->wqe_size
= 4;
2885 awqe2
= qed_chain_produce(&qp
->sq
.pbl
);
2886 DMA_REGPAIR_LE(awqe2
->remote_va
, atomic_wr(wr
)->remote_addr
);
2887 awqe2
->r_key
= cpu_to_le32(atomic_wr(wr
)->rkey
);
2889 awqe3
= qed_chain_produce(&qp
->sq
.pbl
);
2891 if (wr
->opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
2892 wqe
->req_type
= RDMA_SQ_REQ_TYPE_ATOMIC_ADD
;
2893 DMA_REGPAIR_LE(awqe3
->swap_data
,
2894 atomic_wr(wr
)->compare_add
);
2896 wqe
->req_type
= RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP
;
2897 DMA_REGPAIR_LE(awqe3
->swap_data
,
2898 atomic_wr(wr
)->swap
);
2899 DMA_REGPAIR_LE(awqe3
->cmp_data
,
2900 atomic_wr(wr
)->compare_add
);
2903 qedr_prepare_sq_sges(qp
, NULL
, wr
);
2905 qp
->wqe_wr_id
[qp
->sq
.prod
].wqe_size
= awqe1
->wqe_size
;
2906 qp
->prev_wqe_size
= awqe1
->wqe_size
;
2909 case IB_WR_LOCAL_INV
:
2910 iwqe
= (struct rdma_sq_local_inv_wqe
*)wqe
;
2913 iwqe
->req_type
= RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE
;
2914 iwqe
->inv_l_key
= wr
->ex
.invalidate_rkey
;
2915 qp
->wqe_wr_id
[qp
->sq
.prod
].wqe_size
= iwqe
->wqe_size
;
2916 qp
->prev_wqe_size
= iwqe
->wqe_size
;
2919 DP_DEBUG(dev
, QEDR_MSG_CQ
, "REG_MR\n");
2920 wqe
->req_type
= RDMA_SQ_REQ_TYPE_FAST_MR
;
2921 fwqe1
= (struct rdma_sq_fmr_wqe_1st
*)wqe
;
2922 fwqe1
->wqe_size
= 2;
2924 rc
= qedr_prepare_reg(qp
, fwqe1
, reg_wr(wr
));
2926 DP_ERR(dev
, "IB_REG_MR failed rc=%d\n", rc
);
2931 qp
->wqe_wr_id
[qp
->sq
.prod
].wqe_size
= fwqe1
->wqe_size
;
2932 qp
->prev_wqe_size
= fwqe1
->wqe_size
;
2935 DP_ERR(dev
, "invalid opcode 0x%x!\n", wr
->opcode
);
2944 /* Restore prod to its position before
2945 * this WR was processed
2947 value
= le16_to_cpu(qp
->sq
.db_data
.data
.value
);
2948 qed_chain_set_prod(&qp
->sq
.pbl
, value
, wqe
);
2950 /* Restore prev_wqe_size */
2951 qp
->prev_wqe_size
= wqe
->prev_wqe_size
;
2953 DP_ERR(dev
, "POST SEND FAILED\n");
2959 int qedr_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2960 struct ib_send_wr
**bad_wr
)
2962 struct qedr_dev
*dev
= get_qedr_dev(ibqp
->device
);
2963 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
2964 unsigned long flags
;
2969 if (qp
->qp_type
== IB_QPT_GSI
)
2970 return qedr_gsi_post_send(ibqp
, wr
, bad_wr
);
2972 spin_lock_irqsave(&qp
->q_lock
, flags
);
2974 if ((qp
->state
== QED_ROCE_QP_STATE_RESET
) ||
2975 (qp
->state
== QED_ROCE_QP_STATE_ERR
)) {
2976 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
2978 DP_DEBUG(dev
, QEDR_MSG_CQ
,
2979 "QP in wrong state! QP icid=0x%x state %d\n",
2980 qp
->icid
, qp
->state
);
2985 DP_ERR(dev
, "Got an empty post send.\n");
2990 rc
= __qedr_post_send(ibqp
, wr
, bad_wr
);
2994 qp
->wqe_wr_id
[qp
->sq
.prod
].wr_id
= wr
->wr_id
;
2996 qedr_inc_sw_prod(&qp
->sq
);
2998 qp
->sq
.db_data
.data
.value
++;
3004 * If there was a failure in the first WR then it will be triggered in
3005 * vane. However this is not harmful (as long as the producer value is
3006 * unchanged). For performance reasons we avoid checking for this
3007 * redundant doorbell.
3010 writel(qp
->sq
.db_data
.raw
, qp
->sq
.db
);
3012 /* Make sure write sticks */
3015 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
3020 int qedr_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
3021 struct ib_recv_wr
**bad_wr
)
3023 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
3024 struct qedr_dev
*dev
= qp
->dev
;
3025 unsigned long flags
;
3028 if (qp
->qp_type
== IB_QPT_GSI
)
3029 return qedr_gsi_post_recv(ibqp
, wr
, bad_wr
);
3031 spin_lock_irqsave(&qp
->q_lock
, flags
);
3033 if ((qp
->state
== QED_ROCE_QP_STATE_RESET
) ||
3034 (qp
->state
== QED_ROCE_QP_STATE_ERR
)) {
3035 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
3043 if (qed_chain_get_elem_left_u32(&qp
->rq
.pbl
) <
3044 QEDR_MAX_RQE_ELEMENTS_PER_RQE
||
3045 wr
->num_sge
> qp
->rq
.max_sges
) {
3046 DP_ERR(dev
, "Can't post WR (%d < %d) || (%d > %d)\n",
3047 qed_chain_get_elem_left_u32(&qp
->rq
.pbl
),
3048 QEDR_MAX_RQE_ELEMENTS_PER_RQE
, wr
->num_sge
,
3054 for (i
= 0; i
< wr
->num_sge
; i
++) {
3056 struct rdma_rq_sge
*rqe
=
3057 qed_chain_produce(&qp
->rq
.pbl
);
3059 /* First one must include the number
3060 * of SGE in the list
3063 SET_FIELD(flags
, RDMA_RQ_SGE_NUM_SGES
,
3066 SET_FIELD(flags
, RDMA_RQ_SGE_L_KEY
,
3067 wr
->sg_list
[i
].lkey
);
3069 RQ_SGE_SET(rqe
, wr
->sg_list
[i
].addr
,
3070 wr
->sg_list
[i
].length
, flags
);
3073 /* Special case of no sges. FW requires between 1-4 sges...
3074 * in this case we need to post 1 sge with length zero. this is
3075 * because rdma write with immediate consumes an RQ.
3079 struct rdma_rq_sge
*rqe
=
3080 qed_chain_produce(&qp
->rq
.pbl
);
3082 /* First one must include the number
3083 * of SGE in the list
3085 SET_FIELD(flags
, RDMA_RQ_SGE_L_KEY
, 0);
3086 SET_FIELD(flags
, RDMA_RQ_SGE_NUM_SGES
, 1);
3088 RQ_SGE_SET(rqe
, 0, 0, flags
);
3092 qp
->rqe_wr_id
[qp
->rq
.prod
].wr_id
= wr
->wr_id
;
3093 qp
->rqe_wr_id
[qp
->rq
.prod
].wqe_size
= i
;
3095 qedr_inc_sw_prod(&qp
->rq
);
3097 /* Flush all the writes before signalling doorbell */
3100 qp
->rq
.db_data
.data
.value
++;
3102 writel(qp
->rq
.db_data
.raw
, qp
->rq
.db
);
3104 /* Make sure write sticks */
3110 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
3115 static int is_valid_cqe(struct qedr_cq
*cq
, union rdma_cqe
*cqe
)
3117 struct rdma_cqe_requester
*resp_cqe
= &cqe
->req
;
3119 return (resp_cqe
->flags
& RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK
) ==
3123 static struct qedr_qp
*cqe_get_qp(union rdma_cqe
*cqe
)
3125 struct rdma_cqe_requester
*resp_cqe
= &cqe
->req
;
3128 qp
= (struct qedr_qp
*)(uintptr_t)HILO_GEN(resp_cqe
->qp_handle
.hi
,
3129 resp_cqe
->qp_handle
.lo
,
3134 static enum rdma_cqe_type
cqe_get_type(union rdma_cqe
*cqe
)
3136 struct rdma_cqe_requester
*resp_cqe
= &cqe
->req
;
3138 return GET_FIELD(resp_cqe
->flags
, RDMA_CQE_REQUESTER_TYPE
);
3141 /* Return latest CQE (needs processing) */
3142 static union rdma_cqe
*get_cqe(struct qedr_cq
*cq
)
3144 return cq
->latest_cqe
;
3147 /* In fmr we need to increase the number of fmr completed counter for the fmr
3148 * algorithm determining whether we can free a pbl or not.
3149 * we need to perform this whether the work request was signaled or not. for
3150 * this purpose we call this function from the condition that checks if a wr
3151 * should be skipped, to make sure we don't miss it ( possibly this fmr
3152 * operation was not signalted)
3154 static inline void qedr_chk_if_fmr(struct qedr_qp
*qp
)
3156 if (qp
->wqe_wr_id
[qp
->sq
.cons
].opcode
== IB_WC_REG_MR
)
3157 qp
->wqe_wr_id
[qp
->sq
.cons
].mr
->info
.completed
++;
3160 static int process_req(struct qedr_dev
*dev
, struct qedr_qp
*qp
,
3161 struct qedr_cq
*cq
, int num_entries
,
3162 struct ib_wc
*wc
, u16 hw_cons
, enum ib_wc_status status
,
3167 while (num_entries
&& qp
->sq
.wqe_cons
!= hw_cons
) {
3168 if (!qp
->wqe_wr_id
[qp
->sq
.cons
].signaled
&& !force
) {
3169 qedr_chk_if_fmr(qp
);
3175 wc
->status
= status
;
3177 wc
->src_qp
= qp
->id
;
3180 wc
->wr_id
= qp
->wqe_wr_id
[qp
->sq
.cons
].wr_id
;
3181 wc
->opcode
= qp
->wqe_wr_id
[qp
->sq
.cons
].opcode
;
3183 switch (wc
->opcode
) {
3184 case IB_WC_RDMA_WRITE
:
3185 wc
->byte_len
= qp
->wqe_wr_id
[qp
->sq
.cons
].bytes_len
;
3187 case IB_WC_COMP_SWAP
:
3188 case IB_WC_FETCH_ADD
:
3192 qp
->wqe_wr_id
[qp
->sq
.cons
].mr
->info
.completed
++;
3202 while (qp
->wqe_wr_id
[qp
->sq
.cons
].wqe_size
--)
3203 qed_chain_consume(&qp
->sq
.pbl
);
3204 qedr_inc_sw_cons(&qp
->sq
);
3210 static int qedr_poll_cq_req(struct qedr_dev
*dev
,
3211 struct qedr_qp
*qp
, struct qedr_cq
*cq
,
3212 int num_entries
, struct ib_wc
*wc
,
3213 struct rdma_cqe_requester
*req
)
3217 switch (req
->status
) {
3218 case RDMA_CQE_REQ_STS_OK
:
3219 cnt
= process_req(dev
, qp
, cq
, num_entries
, wc
, req
->sq_cons
,
3222 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR
:
3224 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3225 cq
->icid
, qp
->icid
);
3226 cnt
= process_req(dev
, qp
, cq
, num_entries
, wc
, req
->sq_cons
,
3227 IB_WC_WR_FLUSH_ERR
, 0);
3230 /* process all WQE before the cosumer */
3231 qp
->state
= QED_ROCE_QP_STATE_ERR
;
3232 cnt
= process_req(dev
, qp
, cq
, num_entries
, wc
,
3233 req
->sq_cons
- 1, IB_WC_SUCCESS
, 0);
3235 /* if we have extra WC fill it with actual error info */
3236 if (cnt
< num_entries
) {
3237 enum ib_wc_status wc_status
;
3239 switch (req
->status
) {
3240 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR
:
3242 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3243 cq
->icid
, qp
->icid
);
3244 wc_status
= IB_WC_BAD_RESP_ERR
;
3246 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR
:
3248 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3249 cq
->icid
, qp
->icid
);
3250 wc_status
= IB_WC_LOC_LEN_ERR
;
3252 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR
:
3254 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3255 cq
->icid
, qp
->icid
);
3256 wc_status
= IB_WC_LOC_QP_OP_ERR
;
3258 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR
:
3260 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3261 cq
->icid
, qp
->icid
);
3262 wc_status
= IB_WC_LOC_PROT_ERR
;
3264 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR
:
3266 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3267 cq
->icid
, qp
->icid
);
3268 wc_status
= IB_WC_MW_BIND_ERR
;
3270 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR
:
3272 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3273 cq
->icid
, qp
->icid
);
3274 wc_status
= IB_WC_REM_INV_REQ_ERR
;
3276 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR
:
3278 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3279 cq
->icid
, qp
->icid
);
3280 wc_status
= IB_WC_REM_ACCESS_ERR
;
3282 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR
:
3284 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3285 cq
->icid
, qp
->icid
);
3286 wc_status
= IB_WC_REM_OP_ERR
;
3288 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR
:
3290 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3291 cq
->icid
, qp
->icid
);
3292 wc_status
= IB_WC_RNR_RETRY_EXC_ERR
;
3294 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR
:
3296 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3297 cq
->icid
, qp
->icid
);
3298 wc_status
= IB_WC_RETRY_EXC_ERR
;
3302 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3303 cq
->icid
, qp
->icid
);
3304 wc_status
= IB_WC_GENERAL_ERR
;
3306 cnt
+= process_req(dev
, qp
, cq
, 1, wc
, req
->sq_cons
,
3314 static void __process_resp_one(struct qedr_dev
*dev
, struct qedr_qp
*qp
,
3315 struct qedr_cq
*cq
, struct ib_wc
*wc
,
3316 struct rdma_cqe_responder
*resp
, u64 wr_id
)
3318 enum ib_wc_status wc_status
= IB_WC_SUCCESS
;
3321 wc
->opcode
= IB_WC_RECV
;
3324 switch (resp
->status
) {
3325 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR
:
3326 wc_status
= IB_WC_LOC_ACCESS_ERR
;
3328 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR
:
3329 wc_status
= IB_WC_LOC_LEN_ERR
;
3331 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR
:
3332 wc_status
= IB_WC_LOC_QP_OP_ERR
;
3334 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR
:
3335 wc_status
= IB_WC_LOC_PROT_ERR
;
3337 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR
:
3338 wc_status
= IB_WC_MW_BIND_ERR
;
3340 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR
:
3341 wc_status
= IB_WC_REM_INV_RD_REQ_ERR
;
3343 case RDMA_CQE_RESP_STS_OK
:
3344 wc_status
= IB_WC_SUCCESS
;
3345 wc
->byte_len
= le32_to_cpu(resp
->length
);
3347 flags
= resp
->flags
& QEDR_RESP_RDMA_IMM
;
3349 if (flags
== QEDR_RESP_RDMA_IMM
)
3350 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
3352 if (flags
== QEDR_RESP_RDMA_IMM
|| flags
== QEDR_RESP_IMM
) {
3354 le32_to_cpu(resp
->imm_data_or_inv_r_Key
);
3355 wc
->wc_flags
|= IB_WC_WITH_IMM
;
3359 wc
->status
= IB_WC_GENERAL_ERR
;
3360 DP_ERR(dev
, "Invalid CQE status detected\n");
3364 wc
->status
= wc_status
;
3365 wc
->src_qp
= qp
->id
;
3370 static int process_resp_one(struct qedr_dev
*dev
, struct qedr_qp
*qp
,
3371 struct qedr_cq
*cq
, struct ib_wc
*wc
,
3372 struct rdma_cqe_responder
*resp
)
3374 u64 wr_id
= qp
->rqe_wr_id
[qp
->rq
.cons
].wr_id
;
3376 __process_resp_one(dev
, qp
, cq
, wc
, resp
, wr_id
);
3378 while (qp
->rqe_wr_id
[qp
->rq
.cons
].wqe_size
--)
3379 qed_chain_consume(&qp
->rq
.pbl
);
3380 qedr_inc_sw_cons(&qp
->rq
);
3385 static int process_resp_flush(struct qedr_qp
*qp
, struct qedr_cq
*cq
,
3386 int num_entries
, struct ib_wc
*wc
, u16 hw_cons
)
3390 while (num_entries
&& qp
->rq
.wqe_cons
!= hw_cons
) {
3392 wc
->status
= IB_WC_WR_FLUSH_ERR
;
3394 wc
->src_qp
= qp
->id
;
3396 wc
->wr_id
= qp
->rqe_wr_id
[qp
->rq
.cons
].wr_id
;
3401 while (qp
->rqe_wr_id
[qp
->rq
.cons
].wqe_size
--)
3402 qed_chain_consume(&qp
->rq
.pbl
);
3403 qedr_inc_sw_cons(&qp
->rq
);
3409 static void try_consume_resp_cqe(struct qedr_cq
*cq
, struct qedr_qp
*qp
,
3410 struct rdma_cqe_responder
*resp
, int *update
)
3412 if (le16_to_cpu(resp
->rq_cons
) == qp
->rq
.wqe_cons
) {
3418 static int qedr_poll_cq_resp(struct qedr_dev
*dev
, struct qedr_qp
*qp
,
3419 struct qedr_cq
*cq
, int num_entries
,
3420 struct ib_wc
*wc
, struct rdma_cqe_responder
*resp
,
3425 if (resp
->status
== RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR
) {
3426 cnt
= process_resp_flush(qp
, cq
, num_entries
, wc
,
3428 try_consume_resp_cqe(cq
, qp
, resp
, update
);
3430 cnt
= process_resp_one(dev
, qp
, cq
, wc
, resp
);
3438 static void try_consume_req_cqe(struct qedr_cq
*cq
, struct qedr_qp
*qp
,
3439 struct rdma_cqe_requester
*req
, int *update
)
3441 if (le16_to_cpu(req
->sq_cons
) == qp
->sq
.wqe_cons
) {
3447 int qedr_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
3449 struct qedr_dev
*dev
= get_qedr_dev(ibcq
->device
);
3450 struct qedr_cq
*cq
= get_qedr_cq(ibcq
);
3451 union rdma_cqe
*cqe
= cq
->latest_cqe
;
3452 u32 old_cons
, new_cons
;
3453 unsigned long flags
;
3457 if (cq
->cq_type
== QEDR_CQ_TYPE_GSI
)
3458 return qedr_gsi_poll_cq(ibcq
, num_entries
, wc
);
3460 spin_lock_irqsave(&cq
->cq_lock
, flags
);
3461 old_cons
= qed_chain_get_cons_idx_u32(&cq
->pbl
);
3462 while (num_entries
&& is_valid_cqe(cq
, cqe
)) {
3466 /* prevent speculative reads of any field of CQE */
3469 qp
= cqe_get_qp(cqe
);
3471 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe
);
3477 switch (cqe_get_type(cqe
)) {
3478 case RDMA_CQE_TYPE_REQUESTER
:
3479 cnt
= qedr_poll_cq_req(dev
, qp
, cq
, num_entries
, wc
,
3481 try_consume_req_cqe(cq
, qp
, &cqe
->req
, &update
);
3483 case RDMA_CQE_TYPE_RESPONDER_RQ
:
3484 cnt
= qedr_poll_cq_resp(dev
, qp
, cq
, num_entries
, wc
,
3485 &cqe
->resp
, &update
);
3487 case RDMA_CQE_TYPE_INVALID
:
3489 DP_ERR(dev
, "Error: invalid CQE type = %d\n",
3498 new_cons
= qed_chain_get_cons_idx_u32(&cq
->pbl
);
3500 cq
->cq_cons
+= new_cons
- old_cons
;
3503 /* doorbell notifies abount latest VALID entry,
3504 * but chain already point to the next INVALID one
3506 doorbell_cq(cq
, cq
->cq_cons
- 1, cq
->arm_flags
);
3508 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
3512 int qedr_process_mad(struct ib_device
*ibdev
, int process_mad_flags
,
3514 const struct ib_wc
*in_wc
,
3515 const struct ib_grh
*in_grh
,
3516 const struct ib_mad_hdr
*mad_hdr
,
3517 size_t in_mad_size
, struct ib_mad_hdr
*out_mad
,
3518 size_t *out_mad_size
, u16
*out_mad_pkey_index
)
3520 struct qedr_dev
*dev
= get_qedr_dev(ibdev
);
3522 DP_DEBUG(dev
, QEDR_MSG_GSI
,
3523 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3524 mad_hdr
->attr_id
, mad_hdr
->base_version
, mad_hdr
->attr_mod
,
3525 mad_hdr
->class_specific
, mad_hdr
->class_version
,
3526 mad_hdr
->method
, mad_hdr
->mgmt_class
, mad_hdr
->status
);
3527 return IB_MAD_RESULT_SUCCESS
;
3530 int qedr_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
3531 struct ib_port_immutable
*immutable
)
3533 struct ib_port_attr attr
;
3536 err
= qedr_query_port(ibdev
, port_num
, &attr
);
3540 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
3541 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
3542 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
|
3543 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
3544 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;