1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_addr.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/iw_cm.h>
37 #include <rdma/ib_mad.h>
38 #include <linux/netdevice.h>
39 #include <linux/iommu.h>
40 #include <linux/pci.h>
41 #include <net/addrconf.h>
43 #include <linux/qed/qed_chain.h>
44 #include <linux/qed/qed_if.h>
47 #include <rdma/qedr-abi.h>
48 #include "qedr_iw_cm.h"
50 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_LICENSE("Dual BSD/GPL");
54 #define QEDR_WQ_MULTIPLIER_DFT (3)
56 static void qedr_ib_dispatch_event(struct qedr_dev
*dev
, u8 port_num
,
57 enum ib_event_type type
)
61 ibev
.device
= &dev
->ibdev
;
62 ibev
.element
.port_num
= port_num
;
65 ib_dispatch_event(&ibev
);
68 static enum rdma_link_layer
qedr_link_layer(struct ib_device
*device
,
71 return IB_LINK_LAYER_ETHERNET
;
74 static void qedr_get_dev_fw_str(struct ib_device
*ibdev
, char *str
)
76 struct qedr_dev
*qedr
= get_qedr_dev(ibdev
);
77 u32 fw_ver
= (u32
)qedr
->attr
.fw_ver
;
79 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%d.%d.%d.%d",
80 (fw_ver
>> 24) & 0xFF, (fw_ver
>> 16) & 0xFF,
81 (fw_ver
>> 8) & 0xFF, fw_ver
& 0xFF);
84 static int qedr_roce_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
85 struct ib_port_immutable
*immutable
)
87 struct ib_port_attr attr
;
90 err
= qedr_query_port(ibdev
, port_num
, &attr
);
94 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
95 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
96 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
|
97 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
98 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
103 static int qedr_iw_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
104 struct ib_port_immutable
*immutable
)
106 struct ib_port_attr attr
;
109 err
= qedr_query_port(ibdev
, port_num
, &attr
);
113 immutable
->pkey_tbl_len
= 1;
114 immutable
->gid_tbl_len
= 1;
115 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
116 immutable
->max_mad_size
= 0;
121 /* QEDR sysfs interface */
122 static ssize_t
hw_rev_show(struct device
*device
, struct device_attribute
*attr
,
125 struct qedr_dev
*dev
=
126 rdma_device_to_drv_device(device
, struct qedr_dev
, ibdev
);
128 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", dev
->attr
.hw_ver
);
130 static DEVICE_ATTR_RO(hw_rev
);
132 static ssize_t
hca_type_show(struct device
*device
,
133 struct device_attribute
*attr
, char *buf
)
135 struct qedr_dev
*dev
=
136 rdma_device_to_drv_device(device
, struct qedr_dev
, ibdev
);
138 return scnprintf(buf
, PAGE_SIZE
, "FastLinQ QL%x %s\n",
140 rdma_protocol_iwarp(&dev
->ibdev
, 1) ?
143 static DEVICE_ATTR_RO(hca_type
);
145 static struct attribute
*qedr_attributes
[] = {
146 &dev_attr_hw_rev
.attr
,
147 &dev_attr_hca_type
.attr
,
151 static const struct attribute_group qedr_attr_group
= {
152 .attrs
= qedr_attributes
,
155 static const struct ib_device_ops qedr_iw_dev_ops
= {
156 .get_port_immutable
= qedr_iw_port_immutable
,
157 .iw_accept
= qedr_iw_accept
,
158 .iw_add_ref
= qedr_iw_qp_add_ref
,
159 .iw_connect
= qedr_iw_connect
,
160 .iw_create_listen
= qedr_iw_create_listen
,
161 .iw_destroy_listen
= qedr_iw_destroy_listen
,
162 .iw_get_qp
= qedr_iw_get_qp
,
163 .iw_reject
= qedr_iw_reject
,
164 .iw_rem_ref
= qedr_iw_qp_rem_ref
,
165 .query_gid
= qedr_iw_query_gid
,
168 static int qedr_iw_register_device(struct qedr_dev
*dev
)
170 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
172 ib_set_device_ops(&dev
->ibdev
, &qedr_iw_dev_ops
);
174 memcpy(dev
->ibdev
.iw_ifname
,
175 dev
->ndev
->name
, sizeof(dev
->ibdev
.iw_ifname
));
180 static const struct ib_device_ops qedr_roce_dev_ops
= {
181 .get_port_immutable
= qedr_roce_port_immutable
,
184 static void qedr_roce_register_device(struct qedr_dev
*dev
)
186 dev
->ibdev
.node_type
= RDMA_NODE_IB_CA
;
188 ib_set_device_ops(&dev
->ibdev
, &qedr_roce_dev_ops
);
191 static const struct ib_device_ops qedr_dev_ops
= {
192 .owner
= THIS_MODULE
,
193 .driver_id
= RDMA_DRIVER_QEDR
,
194 .uverbs_abi_ver
= QEDR_ABI_VERSION
,
196 .alloc_mr
= qedr_alloc_mr
,
197 .alloc_pd
= qedr_alloc_pd
,
198 .alloc_ucontext
= qedr_alloc_ucontext
,
199 .create_ah
= qedr_create_ah
,
200 .create_cq
= qedr_create_cq
,
201 .create_qp
= qedr_create_qp
,
202 .create_srq
= qedr_create_srq
,
203 .dealloc_pd
= qedr_dealloc_pd
,
204 .dealloc_ucontext
= qedr_dealloc_ucontext
,
205 .dereg_mr
= qedr_dereg_mr
,
206 .destroy_ah
= qedr_destroy_ah
,
207 .destroy_cq
= qedr_destroy_cq
,
208 .destroy_qp
= qedr_destroy_qp
,
209 .destroy_srq
= qedr_destroy_srq
,
210 .get_dev_fw_str
= qedr_get_dev_fw_str
,
211 .get_dma_mr
= qedr_get_dma_mr
,
212 .get_link_layer
= qedr_link_layer
,
213 .map_mr_sg
= qedr_map_mr_sg
,
215 .mmap_free
= qedr_mmap_free
,
216 .modify_qp
= qedr_modify_qp
,
217 .modify_srq
= qedr_modify_srq
,
218 .poll_cq
= qedr_poll_cq
,
219 .post_recv
= qedr_post_recv
,
220 .post_send
= qedr_post_send
,
221 .post_srq_recv
= qedr_post_srq_recv
,
222 .process_mad
= qedr_process_mad
,
223 .query_device
= qedr_query_device
,
224 .query_pkey
= qedr_query_pkey
,
225 .query_port
= qedr_query_port
,
226 .query_qp
= qedr_query_qp
,
227 .query_srq
= qedr_query_srq
,
228 .reg_user_mr
= qedr_reg_user_mr
,
229 .req_notify_cq
= qedr_arm_cq
,
230 .resize_cq
= qedr_resize_cq
,
232 INIT_RDMA_OBJ_SIZE(ib_ah
, qedr_ah
, ibah
),
233 INIT_RDMA_OBJ_SIZE(ib_cq
, qedr_cq
, ibcq
),
234 INIT_RDMA_OBJ_SIZE(ib_pd
, qedr_pd
, ibpd
),
235 INIT_RDMA_OBJ_SIZE(ib_srq
, qedr_srq
, ibsrq
),
236 INIT_RDMA_OBJ_SIZE(ib_ucontext
, qedr_ucontext
, ibucontext
),
239 static int qedr_register_device(struct qedr_dev
*dev
)
243 dev
->ibdev
.node_guid
= dev
->attr
.node_guid
;
244 memcpy(dev
->ibdev
.node_desc
, QEDR_NODE_DESC
, sizeof(QEDR_NODE_DESC
));
246 dev
->ibdev
.uverbs_cmd_mask
= QEDR_UVERBS(GET_CONTEXT
) |
247 QEDR_UVERBS(QUERY_DEVICE
) |
248 QEDR_UVERBS(QUERY_PORT
) |
249 QEDR_UVERBS(ALLOC_PD
) |
250 QEDR_UVERBS(DEALLOC_PD
) |
251 QEDR_UVERBS(CREATE_COMP_CHANNEL
) |
252 QEDR_UVERBS(CREATE_CQ
) |
253 QEDR_UVERBS(RESIZE_CQ
) |
254 QEDR_UVERBS(DESTROY_CQ
) |
255 QEDR_UVERBS(REQ_NOTIFY_CQ
) |
256 QEDR_UVERBS(CREATE_QP
) |
257 QEDR_UVERBS(MODIFY_QP
) |
258 QEDR_UVERBS(QUERY_QP
) |
259 QEDR_UVERBS(DESTROY_QP
) |
260 QEDR_UVERBS(CREATE_SRQ
) |
261 QEDR_UVERBS(DESTROY_SRQ
) |
262 QEDR_UVERBS(QUERY_SRQ
) |
263 QEDR_UVERBS(MODIFY_SRQ
) |
264 QEDR_UVERBS(POST_SRQ_RECV
) |
265 QEDR_UVERBS(REG_MR
) |
266 QEDR_UVERBS(DEREG_MR
) |
267 QEDR_UVERBS(POLL_CQ
) |
268 QEDR_UVERBS(POST_SEND
) |
269 QEDR_UVERBS(POST_RECV
);
272 rc
= qedr_iw_register_device(dev
);
276 qedr_roce_register_device(dev
);
279 dev
->ibdev
.phys_port_cnt
= 1;
280 dev
->ibdev
.num_comp_vectors
= dev
->num_cnq
;
281 dev
->ibdev
.dev
.parent
= &dev
->pdev
->dev
;
283 rdma_set_device_sysfs_group(&dev
->ibdev
, &qedr_attr_group
);
284 ib_set_device_ops(&dev
->ibdev
, &qedr_dev_ops
);
286 rc
= ib_device_set_netdev(&dev
->ibdev
, dev
->ndev
, 1);
290 return ib_register_device(&dev
->ibdev
, "qedr%d");
293 /* This function allocates fast-path status block memory */
294 static int qedr_alloc_mem_sb(struct qedr_dev
*dev
,
295 struct qed_sb_info
*sb_info
, u16 sb_id
)
297 struct status_block_e4
*sb_virt
;
301 sb_virt
= dma_alloc_coherent(&dev
->pdev
->dev
,
302 sizeof(*sb_virt
), &sb_phys
, GFP_KERNEL
);
306 rc
= dev
->ops
->common
->sb_init(dev
->cdev
, sb_info
,
307 sb_virt
, sb_phys
, sb_id
,
310 pr_err("Status block initialization failed\n");
311 dma_free_coherent(&dev
->pdev
->dev
, sizeof(*sb_virt
),
319 static void qedr_free_mem_sb(struct qedr_dev
*dev
,
320 struct qed_sb_info
*sb_info
, int sb_id
)
322 if (sb_info
->sb_virt
) {
323 dev
->ops
->common
->sb_release(dev
->cdev
, sb_info
, sb_id
,
325 dma_free_coherent(&dev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
326 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
330 static void qedr_free_resources(struct qedr_dev
*dev
)
335 destroy_workqueue(dev
->iwarp_wq
);
337 for (i
= 0; i
< dev
->num_cnq
; i
++) {
338 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
339 dev
->ops
->common
->chain_free(dev
->cdev
, &dev
->cnq_array
[i
].pbl
);
342 kfree(dev
->cnq_array
);
343 kfree(dev
->sb_array
);
344 kfree(dev
->sgid_tbl
);
347 static int qedr_alloc_resources(struct qedr_dev
*dev
)
349 struct qedr_cnq
*cnq
;
354 dev
->sgid_tbl
= kcalloc(QEDR_MAX_SGID
, sizeof(union ib_gid
),
359 spin_lock_init(&dev
->sgid_lock
);
360 xa_init_flags(&dev
->srqs
, XA_FLAGS_LOCK_IRQ
);
364 dev
->iwarp_wq
= create_singlethread_workqueue("qedr_iwarpq");
367 /* Allocate Status blocks for CNQ */
368 dev
->sb_array
= kcalloc(dev
->num_cnq
, sizeof(*dev
->sb_array
),
370 if (!dev
->sb_array
) {
375 dev
->cnq_array
= kcalloc(dev
->num_cnq
,
376 sizeof(*dev
->cnq_array
), GFP_KERNEL
);
377 if (!dev
->cnq_array
) {
382 dev
->sb_start
= dev
->ops
->rdma_get_start_sb(dev
->cdev
);
384 /* Allocate CNQ PBLs */
385 n_entries
= min_t(u32
, QED_RDMA_MAX_CNQ_SIZE
, QEDR_ROCE_MAX_CNQ_SIZE
);
386 for (i
= 0; i
< dev
->num_cnq
; i
++) {
387 cnq
= &dev
->cnq_array
[i
];
389 rc
= qedr_alloc_mem_sb(dev
, &dev
->sb_array
[i
],
394 rc
= dev
->ops
->common
->chain_alloc(dev
->cdev
,
395 QED_CHAIN_USE_TO_CONSUME
,
397 QED_CHAIN_CNT_TYPE_U16
,
399 sizeof(struct regpair
*),
405 cnq
->sb
= &dev
->sb_array
[i
];
406 cons_pi
= dev
->sb_array
[i
].sb_virt
->pi_array
;
407 cnq
->hw_cons_ptr
= &cons_pi
[QED_ROCE_PROTOCOL_INDEX
];
409 sprintf(cnq
->name
, "qedr%d@pci:%s", i
, pci_name(dev
->pdev
));
411 DP_DEBUG(dev
, QEDR_MSG_INIT
, "cnq[%d].cons=%d\n",
412 i
, qed_chain_get_cons_idx(&cnq
->pbl
));
417 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
419 for (--i
; i
>= 0; i
--) {
420 dev
->ops
->common
->chain_free(dev
->cdev
, &dev
->cnq_array
[i
].pbl
);
421 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
423 kfree(dev
->cnq_array
);
425 kfree(dev
->sb_array
);
427 kfree(dev
->sgid_tbl
);
431 static void qedr_pci_set_atomic(struct qedr_dev
*dev
, struct pci_dev
*pdev
)
433 int rc
= pci_enable_atomic_ops_to_root(pdev
,
434 PCI_EXP_DEVCAP2_ATOMIC_COMP64
);
437 dev
->atomic_cap
= IB_ATOMIC_NONE
;
438 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Atomic capability disabled\n");
440 dev
->atomic_cap
= IB_ATOMIC_GLOB
;
441 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Atomic capability enabled\n");
445 static const struct qed_rdma_ops
*qed_ops
;
447 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
449 static irqreturn_t
qedr_irq_handler(int irq
, void *handle
)
451 u16 hw_comp_cons
, sw_comp_cons
;
452 struct qedr_cnq
*cnq
= handle
;
453 struct regpair
*cq_handle
;
456 qed_sb_ack(cnq
->sb
, IGU_INT_DISABLE
, 0);
458 qed_sb_update_sb_idx(cnq
->sb
);
460 hw_comp_cons
= le16_to_cpu(*cnq
->hw_cons_ptr
);
461 sw_comp_cons
= qed_chain_get_cons_idx(&cnq
->pbl
);
463 /* Align protocol-index and chain reads */
466 while (sw_comp_cons
!= hw_comp_cons
) {
467 cq_handle
= (struct regpair
*)qed_chain_consume(&cnq
->pbl
);
468 cq
= (struct qedr_cq
*)(uintptr_t)HILO_U64(cq_handle
->hi
,
473 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
474 cq_handle
->hi
, cq_handle
->lo
, sw_comp_cons
,
480 if (cq
->sig
!= QEDR_CQ_MAGIC_NUMBER
) {
482 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
483 cq_handle
->hi
, cq_handle
->lo
, cq
);
489 if (!cq
->destroyed
&& cq
->ibcq
.comp_handler
)
490 (*cq
->ibcq
.comp_handler
)
491 (&cq
->ibcq
, cq
->ibcq
.cq_context
);
493 /* The CQ's CNQ notification counter is checked before
494 * destroying the CQ in a busy-wait loop that waits for all of
495 * the CQ's CNQ interrupts to be processed. It is increased
496 * here, only after the completion handler, to ensure that the
497 * the handler is not running when the CQ is destroyed.
501 sw_comp_cons
= qed_chain_get_cons_idx(&cnq
->pbl
);
506 qed_ops
->rdma_cnq_prod_update(cnq
->dev
->rdma_ctx
, cnq
->index
,
509 qed_sb_ack(cnq
->sb
, IGU_INT_ENABLE
, 1);
514 static void qedr_sync_free_irqs(struct qedr_dev
*dev
)
520 for (i
= 0; i
< dev
->int_info
.used_cnt
; i
++) {
521 if (dev
->int_info
.msix_cnt
) {
522 idx
= i
* dev
->num_hwfns
+ dev
->affin_hwfn_idx
;
523 vector
= dev
->int_info
.msix
[idx
].vector
;
524 synchronize_irq(vector
);
525 free_irq(vector
, &dev
->cnq_array
[i
]);
529 dev
->int_info
.used_cnt
= 0;
532 static int qedr_req_msix_irqs(struct qedr_dev
*dev
)
537 if (dev
->num_cnq
> dev
->int_info
.msix_cnt
) {
539 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
540 dev
->num_cnq
, dev
->int_info
.msix_cnt
);
544 for (i
= 0; i
< dev
->num_cnq
; i
++) {
545 idx
= i
* dev
->num_hwfns
+ dev
->affin_hwfn_idx
;
546 rc
= request_irq(dev
->int_info
.msix
[idx
].vector
,
547 qedr_irq_handler
, 0, dev
->cnq_array
[i
].name
,
550 DP_ERR(dev
, "Request cnq %d irq failed\n", i
);
551 qedr_sync_free_irqs(dev
);
553 DP_DEBUG(dev
, QEDR_MSG_INIT
,
554 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
555 dev
->cnq_array
[i
].name
, i
,
557 dev
->int_info
.used_cnt
++;
564 static int qedr_setup_irqs(struct qedr_dev
*dev
)
568 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr_setup_irqs\n");
570 /* Learn Interrupt configuration */
571 rc
= dev
->ops
->rdma_set_rdma_int(dev
->cdev
, dev
->num_cnq
);
575 rc
= dev
->ops
->rdma_get_rdma_int(dev
->cdev
, &dev
->int_info
);
577 DP_DEBUG(dev
, QEDR_MSG_INIT
, "get_rdma_int failed\n");
581 if (dev
->int_info
.msix_cnt
) {
582 DP_DEBUG(dev
, QEDR_MSG_INIT
, "rdma msix_cnt = %d\n",
583 dev
->int_info
.msix_cnt
);
584 rc
= qedr_req_msix_irqs(dev
);
589 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr_setup_irqs succeeded\n");
594 static int qedr_set_device_attr(struct qedr_dev
*dev
)
596 struct qed_rdma_device
*qed_attr
;
597 struct qedr_device_attr
*attr
;
600 /* Part 1 - query core capabilities */
601 qed_attr
= dev
->ops
->rdma_query_device(dev
->rdma_ctx
);
603 /* Part 2 - check capabilities */
604 page_size
= ~dev
->attr
.page_size_caps
+ 1;
605 if (page_size
> PAGE_SIZE
) {
607 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
608 PAGE_SIZE
, page_size
);
612 /* Part 3 - copy and update capabilities */
614 attr
->vendor_id
= qed_attr
->vendor_id
;
615 attr
->vendor_part_id
= qed_attr
->vendor_part_id
;
616 attr
->hw_ver
= qed_attr
->hw_ver
;
617 attr
->fw_ver
= qed_attr
->fw_ver
;
618 attr
->node_guid
= qed_attr
->node_guid
;
619 attr
->sys_image_guid
= qed_attr
->sys_image_guid
;
620 attr
->max_cnq
= qed_attr
->max_cnq
;
621 attr
->max_sge
= qed_attr
->max_sge
;
622 attr
->max_inline
= qed_attr
->max_inline
;
623 attr
->max_sqe
= min_t(u32
, qed_attr
->max_wqe
, QEDR_MAX_SQE
);
624 attr
->max_rqe
= min_t(u32
, qed_attr
->max_wqe
, QEDR_MAX_RQE
);
625 attr
->max_qp_resp_rd_atomic_resc
= qed_attr
->max_qp_resp_rd_atomic_resc
;
626 attr
->max_qp_req_rd_atomic_resc
= qed_attr
->max_qp_req_rd_atomic_resc
;
627 attr
->max_dev_resp_rd_atomic_resc
=
628 qed_attr
->max_dev_resp_rd_atomic_resc
;
629 attr
->max_cq
= qed_attr
->max_cq
;
630 attr
->max_qp
= qed_attr
->max_qp
;
631 attr
->max_mr
= qed_attr
->max_mr
;
632 attr
->max_mr_size
= qed_attr
->max_mr_size
;
633 attr
->max_cqe
= min_t(u64
, qed_attr
->max_cqe
, QEDR_MAX_CQES
);
634 attr
->max_mw
= qed_attr
->max_mw
;
635 attr
->max_fmr
= qed_attr
->max_fmr
;
636 attr
->max_mr_mw_fmr_pbl
= qed_attr
->max_mr_mw_fmr_pbl
;
637 attr
->max_mr_mw_fmr_size
= qed_attr
->max_mr_mw_fmr_size
;
638 attr
->max_pd
= qed_attr
->max_pd
;
639 attr
->max_ah
= qed_attr
->max_ah
;
640 attr
->max_pkey
= qed_attr
->max_pkey
;
641 attr
->max_srq
= qed_attr
->max_srq
;
642 attr
->max_srq_wr
= qed_attr
->max_srq_wr
;
643 attr
->dev_caps
= qed_attr
->dev_caps
;
644 attr
->page_size_caps
= qed_attr
->page_size_caps
;
645 attr
->dev_ack_delay
= qed_attr
->dev_ack_delay
;
646 attr
->reserved_lkey
= qed_attr
->reserved_lkey
;
647 attr
->bad_pkey_counter
= qed_attr
->bad_pkey_counter
;
648 attr
->max_stats_queues
= qed_attr
->max_stats_queues
;
653 static void qedr_unaffiliated_event(void *context
, u8 event_code
)
655 pr_err("unaffiliated event not implemented yet\n");
658 static void qedr_affiliated_event(void *context
, u8 e_code
, void *fw_handle
)
660 #define EVENT_TYPE_NOT_DEFINED 0
661 #define EVENT_TYPE_CQ 1
662 #define EVENT_TYPE_QP 2
663 #define EVENT_TYPE_SRQ 3
664 struct qedr_dev
*dev
= (struct qedr_dev
*)context
;
665 struct regpair
*async_handle
= (struct regpair
*)fw_handle
;
666 u64 roce_handle64
= ((u64
) async_handle
->hi
<< 32) + async_handle
->lo
;
667 u8 event_type
= EVENT_TYPE_NOT_DEFINED
;
668 struct ib_event event
;
669 struct ib_srq
*ibsrq
;
670 struct qedr_srq
*srq
;
680 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR
:
681 event
.event
= IB_EVENT_CQ_ERR
;
682 event_type
= EVENT_TYPE_CQ
;
684 case ROCE_ASYNC_EVENT_SQ_DRAINED
:
685 event
.event
= IB_EVENT_SQ_DRAINED
;
686 event_type
= EVENT_TYPE_QP
;
688 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR
:
689 event
.event
= IB_EVENT_QP_FATAL
;
690 event_type
= EVENT_TYPE_QP
;
692 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR
:
693 event
.event
= IB_EVENT_QP_REQ_ERR
;
694 event_type
= EVENT_TYPE_QP
;
696 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR
:
697 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
698 event_type
= EVENT_TYPE_QP
;
700 case ROCE_ASYNC_EVENT_SRQ_LIMIT
:
701 event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
702 event_type
= EVENT_TYPE_SRQ
;
704 case ROCE_ASYNC_EVENT_SRQ_EMPTY
:
705 event
.event
= IB_EVENT_SRQ_ERR
;
706 event_type
= EVENT_TYPE_SRQ
;
709 DP_ERR(dev
, "unsupported event %d on handle=%llx\n",
710 e_code
, roce_handle64
);
714 case QED_IWARP_EVENT_SRQ_LIMIT
:
715 event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
716 event_type
= EVENT_TYPE_SRQ
;
718 case QED_IWARP_EVENT_SRQ_EMPTY
:
719 event
.event
= IB_EVENT_SRQ_ERR
;
720 event_type
= EVENT_TYPE_SRQ
;
723 DP_ERR(dev
, "unsupported event %d on handle=%llx\n", e_code
,
727 switch (event_type
) {
729 cq
= (struct qedr_cq
*)(uintptr_t)roce_handle64
;
732 if (ibcq
->event_handler
) {
733 event
.device
= ibcq
->device
;
734 event
.element
.cq
= ibcq
;
735 ibcq
->event_handler(&event
, ibcq
->cq_context
);
739 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
742 DP_ERR(dev
, "CQ event %d on handle %p\n", e_code
, cq
);
745 qp
= (struct qedr_qp
*)(uintptr_t)roce_handle64
;
748 if (ibqp
->event_handler
) {
749 event
.device
= ibqp
->device
;
750 event
.element
.qp
= ibqp
;
751 ibqp
->event_handler(&event
, ibqp
->qp_context
);
755 "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
758 DP_ERR(dev
, "QP event %d on handle %p\n", e_code
, qp
);
761 srq_id
= (u16
)roce_handle64
;
762 xa_lock_irqsave(&dev
->srqs
, flags
);
763 srq
= xa_load(&dev
->srqs
, srq_id
);
766 if (ibsrq
->event_handler
) {
767 event
.device
= ibsrq
->device
;
768 event
.element
.srq
= ibsrq
;
769 ibsrq
->event_handler(&event
,
774 "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
777 xa_unlock_irqrestore(&dev
->srqs
, flags
);
778 DP_NOTICE(dev
, "SRQ event %d on handle %p\n", e_code
, srq
);
784 static int qedr_init_hw(struct qedr_dev
*dev
)
786 struct qed_rdma_add_user_out_params out_params
;
787 struct qed_rdma_start_in_params
*in_params
;
788 struct qed_rdma_cnq_params
*cur_pbl
;
789 struct qed_rdma_events events
;
790 dma_addr_t p_phys_table
;
795 in_params
= kzalloc(sizeof(*in_params
), GFP_KERNEL
);
801 in_params
->desired_cnq
= dev
->num_cnq
;
802 for (i
= 0; i
< dev
->num_cnq
; i
++) {
803 cur_pbl
= &in_params
->cnq_pbl_list
[i
];
805 page_cnt
= qed_chain_get_page_cnt(&dev
->cnq_array
[i
].pbl
);
806 cur_pbl
->num_pbl_pages
= page_cnt
;
808 p_phys_table
= qed_chain_get_pbl_phys(&dev
->cnq_array
[i
].pbl
);
809 cur_pbl
->pbl_ptr
= (u64
)p_phys_table
;
812 events
.affiliated_event
= qedr_affiliated_event
;
813 events
.unaffiliated_event
= qedr_unaffiliated_event
;
814 events
.context
= dev
;
816 in_params
->events
= &events
;
817 in_params
->cq_mode
= QED_RDMA_CQ_MODE_32_BITS
;
818 in_params
->max_mtu
= dev
->ndev
->mtu
;
819 dev
->iwarp_max_mtu
= dev
->ndev
->mtu
;
820 ether_addr_copy(&in_params
->mac_addr
[0], dev
->ndev
->dev_addr
);
822 rc
= dev
->ops
->rdma_init(dev
->cdev
, in_params
);
826 rc
= dev
->ops
->rdma_add_user(dev
->rdma_ctx
, &out_params
);
830 dev
->db_addr
= out_params
.dpi_addr
;
831 dev
->db_phys_addr
= out_params
.dpi_phys_addr
;
832 dev
->db_size
= out_params
.dpi_size
;
833 dev
->dpi
= out_params
.dpi
;
835 rc
= qedr_set_device_attr(dev
);
839 DP_ERR(dev
, "Init HW Failed rc = %d\n", rc
);
844 static void qedr_stop_hw(struct qedr_dev
*dev
)
846 dev
->ops
->rdma_remove_user(dev
->rdma_ctx
, dev
->dpi
);
847 dev
->ops
->rdma_stop(dev
->rdma_ctx
);
850 static struct qedr_dev
*qedr_add(struct qed_dev
*cdev
, struct pci_dev
*pdev
,
851 struct net_device
*ndev
)
853 struct qed_dev_rdma_info dev_info
;
854 struct qedr_dev
*dev
;
857 dev
= ib_alloc_device(qedr_dev
, ibdev
);
859 pr_err("Unable to allocate ib device\n");
863 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr add device called\n");
869 qed_ops
= qed_get_rdma_ops();
871 DP_ERR(dev
, "Failed to get qed roce operations\n");
876 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
880 dev
->user_dpm_enabled
= dev_info
.user_dpm_enabled
;
881 dev
->rdma_type
= dev_info
.rdma_type
;
882 dev
->num_hwfns
= dev_info
.common
.num_hwfns
;
884 if (IS_IWARP(dev
) && QEDR_IS_CMT(dev
)) {
885 rc
= dev
->ops
->iwarp_set_engine_affin(cdev
, false);
887 DP_ERR(dev
, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
891 dev
->affin_hwfn_idx
= dev
->ops
->common
->get_affin_hwfn_idx(cdev
);
893 dev
->rdma_ctx
= dev
->ops
->rdma_get_rdma_ctx(cdev
);
895 dev
->num_cnq
= dev
->ops
->rdma_get_min_cnq_msix(cdev
);
897 DP_ERR(dev
, "Failed. At least one CNQ is required.\n");
902 dev
->wq_multiplier
= QEDR_WQ_MULTIPLIER_DFT
;
904 qedr_pci_set_atomic(dev
, pdev
);
906 rc
= qedr_alloc_resources(dev
);
910 rc
= qedr_init_hw(dev
);
914 rc
= qedr_setup_irqs(dev
);
918 rc
= qedr_register_device(dev
);
920 DP_ERR(dev
, "Unable to allocate register device\n");
924 if (!test_and_set_bit(QEDR_ENET_STATE_BIT
, &dev
->enet_state
))
925 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_PORT_ACTIVE
);
927 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr driver loaded successfully\n");
931 qedr_sync_free_irqs(dev
);
935 qedr_free_resources(dev
);
937 ib_dealloc_device(&dev
->ibdev
);
938 DP_ERR(dev
, "qedr driver load failed rc=%d\n", rc
);
943 static void qedr_remove(struct qedr_dev
*dev
)
945 /* First unregister with stack to stop all the active traffic
946 * of the registered clients.
948 ib_unregister_device(&dev
->ibdev
);
951 qedr_sync_free_irqs(dev
);
952 qedr_free_resources(dev
);
954 if (IS_IWARP(dev
) && QEDR_IS_CMT(dev
))
955 dev
->ops
->iwarp_set_engine_affin(dev
->cdev
, true);
957 ib_dealloc_device(&dev
->ibdev
);
960 static void qedr_close(struct qedr_dev
*dev
)
962 if (test_and_clear_bit(QEDR_ENET_STATE_BIT
, &dev
->enet_state
))
963 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_PORT_ERR
);
966 static void qedr_shutdown(struct qedr_dev
*dev
)
972 static void qedr_open(struct qedr_dev
*dev
)
974 if (!test_and_set_bit(QEDR_ENET_STATE_BIT
, &dev
->enet_state
))
975 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_PORT_ACTIVE
);
978 static void qedr_mac_address_change(struct qedr_dev
*dev
)
980 union ib_gid
*sgid
= &dev
->sgid_tbl
[0];
981 u8 guid
[8], mac_addr
[6];
985 ether_addr_copy(&mac_addr
[0], dev
->ndev
->dev_addr
);
986 guid
[0] = mac_addr
[0] ^ 2;
987 guid
[1] = mac_addr
[1];
988 guid
[2] = mac_addr
[2];
991 guid
[5] = mac_addr
[3];
992 guid
[6] = mac_addr
[4];
993 guid
[7] = mac_addr
[5];
994 sgid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
995 memcpy(&sgid
->raw
[8], guid
, sizeof(guid
));
998 rc
= dev
->ops
->ll2_set_mac_filter(dev
->cdev
,
999 dev
->gsi_ll2_mac_address
,
1000 dev
->ndev
->dev_addr
);
1002 ether_addr_copy(dev
->gsi_ll2_mac_address
, dev
->ndev
->dev_addr
);
1004 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_GID_CHANGE
);
1007 DP_ERR(dev
, "Error updating mac filter\n");
1010 /* event handling via NIC driver ensures that all the NIC specific
1011 * initialization done before RoCE driver notifies
1014 static void qedr_notify(struct qedr_dev
*dev
, enum qede_rdma_event event
)
1026 case QEDE_CHANGE_ADDR
:
1027 qedr_mac_address_change(dev
);
1030 pr_err("Event not supported\n");
1034 static struct qedr_driver qedr_drv
= {
1035 .name
= "qedr_driver",
1037 .remove
= qedr_remove
,
1038 .notify
= qedr_notify
,
1041 static int __init
qedr_init_module(void)
1043 return qede_rdma_register_driver(&qedr_drv
);
1046 static void __exit
qedr_exit_module(void)
1048 qede_rdma_unregister_driver(&qedr_drv
);
1051 module_init(qedr_init_module
);
1052 module_exit(qedr_exit_module
);