1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_addr.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/iw_cm.h>
37 #include <rdma/ib_mad.h>
38 #include <linux/netdevice.h>
39 #include <linux/iommu.h>
40 #include <linux/pci.h>
41 #include <net/addrconf.h>
43 #include <linux/qed/qed_chain.h>
44 #include <linux/qed/qed_if.h>
47 #include <rdma/qedr-abi.h>
48 #include "qedr_iw_cm.h"
50 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_LICENSE("Dual BSD/GPL");
54 #define QEDR_WQ_MULTIPLIER_DFT (3)
56 static void qedr_ib_dispatch_event(struct qedr_dev
*dev
, u8 port_num
,
57 enum ib_event_type type
)
61 ibev
.device
= &dev
->ibdev
;
62 ibev
.element
.port_num
= port_num
;
65 ib_dispatch_event(&ibev
);
68 static enum rdma_link_layer
qedr_link_layer(struct ib_device
*device
,
71 return IB_LINK_LAYER_ETHERNET
;
74 static void qedr_get_dev_fw_str(struct ib_device
*ibdev
, char *str
)
76 struct qedr_dev
*qedr
= get_qedr_dev(ibdev
);
77 u32 fw_ver
= (u32
)qedr
->attr
.fw_ver
;
79 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%d.%d.%d.%d",
80 (fw_ver
>> 24) & 0xFF, (fw_ver
>> 16) & 0xFF,
81 (fw_ver
>> 8) & 0xFF, fw_ver
& 0xFF);
84 static int qedr_roce_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
85 struct ib_port_immutable
*immutable
)
87 struct ib_port_attr attr
;
90 err
= qedr_query_port(ibdev
, port_num
, &attr
);
94 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
95 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
96 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
|
97 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
98 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
103 static int qedr_iw_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
104 struct ib_port_immutable
*immutable
)
106 struct ib_port_attr attr
;
109 err
= qedr_query_port(ibdev
, port_num
, &attr
);
113 immutable
->gid_tbl_len
= 1;
114 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
115 immutable
->max_mad_size
= 0;
120 /* QEDR sysfs interface */
121 static ssize_t
hw_rev_show(struct device
*device
, struct device_attribute
*attr
,
124 struct qedr_dev
*dev
=
125 rdma_device_to_drv_device(device
, struct qedr_dev
, ibdev
);
127 return sysfs_emit(buf
, "0x%x\n", dev
->attr
.hw_ver
);
129 static DEVICE_ATTR_RO(hw_rev
);
131 static ssize_t
hca_type_show(struct device
*device
,
132 struct device_attribute
*attr
, char *buf
)
134 struct qedr_dev
*dev
=
135 rdma_device_to_drv_device(device
, struct qedr_dev
, ibdev
);
137 return sysfs_emit(buf
, "FastLinQ QL%x %s\n", dev
->pdev
->device
,
138 rdma_protocol_iwarp(&dev
->ibdev
, 1) ? "iWARP" :
141 static DEVICE_ATTR_RO(hca_type
);
143 static struct attribute
*qedr_attributes
[] = {
144 &dev_attr_hw_rev
.attr
,
145 &dev_attr_hca_type
.attr
,
149 static const struct attribute_group qedr_attr_group
= {
150 .attrs
= qedr_attributes
,
153 static const struct ib_device_ops qedr_iw_dev_ops
= {
154 .get_port_immutable
= qedr_iw_port_immutable
,
155 .iw_accept
= qedr_iw_accept
,
156 .iw_add_ref
= qedr_iw_qp_add_ref
,
157 .iw_connect
= qedr_iw_connect
,
158 .iw_create_listen
= qedr_iw_create_listen
,
159 .iw_destroy_listen
= qedr_iw_destroy_listen
,
160 .iw_get_qp
= qedr_iw_get_qp
,
161 .iw_reject
= qedr_iw_reject
,
162 .iw_rem_ref
= qedr_iw_qp_rem_ref
,
163 .query_gid
= qedr_iw_query_gid
,
166 static int qedr_iw_register_device(struct qedr_dev
*dev
)
168 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
170 ib_set_device_ops(&dev
->ibdev
, &qedr_iw_dev_ops
);
172 memcpy(dev
->ibdev
.iw_ifname
,
173 dev
->ndev
->name
, sizeof(dev
->ibdev
.iw_ifname
));
178 static const struct ib_device_ops qedr_roce_dev_ops
= {
179 .alloc_xrcd
= qedr_alloc_xrcd
,
180 .dealloc_xrcd
= qedr_dealloc_xrcd
,
181 .get_port_immutable
= qedr_roce_port_immutable
,
182 .query_pkey
= qedr_query_pkey
,
185 static void qedr_roce_register_device(struct qedr_dev
*dev
)
187 dev
->ibdev
.node_type
= RDMA_NODE_IB_CA
;
189 ib_set_device_ops(&dev
->ibdev
, &qedr_roce_dev_ops
);
192 static const struct ib_device_ops qedr_dev_ops
= {
193 .owner
= THIS_MODULE
,
194 .driver_id
= RDMA_DRIVER_QEDR
,
195 .uverbs_abi_ver
= QEDR_ABI_VERSION
,
197 .alloc_mr
= qedr_alloc_mr
,
198 .alloc_pd
= qedr_alloc_pd
,
199 .alloc_ucontext
= qedr_alloc_ucontext
,
200 .create_ah
= qedr_create_ah
,
201 .create_cq
= qedr_create_cq
,
202 .create_qp
= qedr_create_qp
,
203 .create_srq
= qedr_create_srq
,
204 .dealloc_pd
= qedr_dealloc_pd
,
205 .dealloc_ucontext
= qedr_dealloc_ucontext
,
206 .dereg_mr
= qedr_dereg_mr
,
207 .destroy_ah
= qedr_destroy_ah
,
208 .destroy_cq
= qedr_destroy_cq
,
209 .destroy_qp
= qedr_destroy_qp
,
210 .destroy_srq
= qedr_destroy_srq
,
211 .get_dev_fw_str
= qedr_get_dev_fw_str
,
212 .get_dma_mr
= qedr_get_dma_mr
,
213 .get_link_layer
= qedr_link_layer
,
214 .map_mr_sg
= qedr_map_mr_sg
,
216 .mmap_free
= qedr_mmap_free
,
217 .modify_qp
= qedr_modify_qp
,
218 .modify_srq
= qedr_modify_srq
,
219 .poll_cq
= qedr_poll_cq
,
220 .post_recv
= qedr_post_recv
,
221 .post_send
= qedr_post_send
,
222 .post_srq_recv
= qedr_post_srq_recv
,
223 .process_mad
= qedr_process_mad
,
224 .query_device
= qedr_query_device
,
225 .query_port
= qedr_query_port
,
226 .query_qp
= qedr_query_qp
,
227 .query_srq
= qedr_query_srq
,
228 .reg_user_mr
= qedr_reg_user_mr
,
229 .req_notify_cq
= qedr_arm_cq
,
230 .resize_cq
= qedr_resize_cq
,
232 INIT_RDMA_OBJ_SIZE(ib_ah
, qedr_ah
, ibah
),
233 INIT_RDMA_OBJ_SIZE(ib_cq
, qedr_cq
, ibcq
),
234 INIT_RDMA_OBJ_SIZE(ib_pd
, qedr_pd
, ibpd
),
235 INIT_RDMA_OBJ_SIZE(ib_srq
, qedr_srq
, ibsrq
),
236 INIT_RDMA_OBJ_SIZE(ib_xrcd
, qedr_xrcd
, ibxrcd
),
237 INIT_RDMA_OBJ_SIZE(ib_ucontext
, qedr_ucontext
, ibucontext
),
240 static int qedr_register_device(struct qedr_dev
*dev
)
244 dev
->ibdev
.node_guid
= dev
->attr
.node_guid
;
245 memcpy(dev
->ibdev
.node_desc
, QEDR_NODE_DESC
, sizeof(QEDR_NODE_DESC
));
248 rc
= qedr_iw_register_device(dev
);
252 qedr_roce_register_device(dev
);
255 dev
->ibdev
.phys_port_cnt
= 1;
256 dev
->ibdev
.num_comp_vectors
= dev
->num_cnq
;
257 dev
->ibdev
.dev
.parent
= &dev
->pdev
->dev
;
259 rdma_set_device_sysfs_group(&dev
->ibdev
, &qedr_attr_group
);
260 ib_set_device_ops(&dev
->ibdev
, &qedr_dev_ops
);
262 rc
= ib_device_set_netdev(&dev
->ibdev
, dev
->ndev
, 1);
266 dma_set_max_seg_size(&dev
->pdev
->dev
, UINT_MAX
);
267 return ib_register_device(&dev
->ibdev
, "qedr%d", &dev
->pdev
->dev
);
270 /* This function allocates fast-path status block memory */
271 static int qedr_alloc_mem_sb(struct qedr_dev
*dev
,
272 struct qed_sb_info
*sb_info
, u16 sb_id
)
274 struct status_block_e4
*sb_virt
;
278 sb_virt
= dma_alloc_coherent(&dev
->pdev
->dev
,
279 sizeof(*sb_virt
), &sb_phys
, GFP_KERNEL
);
283 rc
= dev
->ops
->common
->sb_init(dev
->cdev
, sb_info
,
284 sb_virt
, sb_phys
, sb_id
,
287 pr_err("Status block initialization failed\n");
288 dma_free_coherent(&dev
->pdev
->dev
, sizeof(*sb_virt
),
296 static void qedr_free_mem_sb(struct qedr_dev
*dev
,
297 struct qed_sb_info
*sb_info
, int sb_id
)
299 if (sb_info
->sb_virt
) {
300 dev
->ops
->common
->sb_release(dev
->cdev
, sb_info
, sb_id
,
302 dma_free_coherent(&dev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
303 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
307 static void qedr_free_resources(struct qedr_dev
*dev
)
312 destroy_workqueue(dev
->iwarp_wq
);
314 for (i
= 0; i
< dev
->num_cnq
; i
++) {
315 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
316 dev
->ops
->common
->chain_free(dev
->cdev
, &dev
->cnq_array
[i
].pbl
);
319 kfree(dev
->cnq_array
);
320 kfree(dev
->sb_array
);
321 kfree(dev
->sgid_tbl
);
324 static int qedr_alloc_resources(struct qedr_dev
*dev
)
326 struct qed_chain_init_params params
= {
327 .mode
= QED_CHAIN_MODE_PBL
,
328 .intended_use
= QED_CHAIN_USE_TO_CONSUME
,
329 .cnt_type
= QED_CHAIN_CNT_TYPE_U16
,
330 .elem_size
= sizeof(struct regpair
*),
332 struct qedr_cnq
*cnq
;
336 dev
->sgid_tbl
= kcalloc(QEDR_MAX_SGID
, sizeof(union ib_gid
),
341 spin_lock_init(&dev
->sgid_lock
);
342 xa_init_flags(&dev
->srqs
, XA_FLAGS_LOCK_IRQ
);
346 dev
->iwarp_wq
= create_singlethread_workqueue("qedr_iwarpq");
349 /* Allocate Status blocks for CNQ */
350 dev
->sb_array
= kcalloc(dev
->num_cnq
, sizeof(*dev
->sb_array
),
352 if (!dev
->sb_array
) {
357 dev
->cnq_array
= kcalloc(dev
->num_cnq
,
358 sizeof(*dev
->cnq_array
), GFP_KERNEL
);
359 if (!dev
->cnq_array
) {
364 dev
->sb_start
= dev
->ops
->rdma_get_start_sb(dev
->cdev
);
366 /* Allocate CNQ PBLs */
367 params
.num_elems
= min_t(u32
, QED_RDMA_MAX_CNQ_SIZE
,
368 QEDR_ROCE_MAX_CNQ_SIZE
);
370 for (i
= 0; i
< dev
->num_cnq
; i
++) {
371 cnq
= &dev
->cnq_array
[i
];
373 rc
= qedr_alloc_mem_sb(dev
, &dev
->sb_array
[i
],
378 rc
= dev
->ops
->common
->chain_alloc(dev
->cdev
, &cnq
->pbl
,
384 cnq
->sb
= &dev
->sb_array
[i
];
385 cons_pi
= dev
->sb_array
[i
].sb_virt
->pi_array
;
386 cnq
->hw_cons_ptr
= &cons_pi
[QED_ROCE_PROTOCOL_INDEX
];
388 sprintf(cnq
->name
, "qedr%d@pci:%s", i
, pci_name(dev
->pdev
));
390 DP_DEBUG(dev
, QEDR_MSG_INIT
, "cnq[%d].cons=%d\n",
391 i
, qed_chain_get_cons_idx(&cnq
->pbl
));
396 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
398 for (--i
; i
>= 0; i
--) {
399 dev
->ops
->common
->chain_free(dev
->cdev
, &dev
->cnq_array
[i
].pbl
);
400 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
402 kfree(dev
->cnq_array
);
404 kfree(dev
->sb_array
);
406 kfree(dev
->sgid_tbl
);
410 static void qedr_pci_set_atomic(struct qedr_dev
*dev
, struct pci_dev
*pdev
)
412 int rc
= pci_enable_atomic_ops_to_root(pdev
,
413 PCI_EXP_DEVCAP2_ATOMIC_COMP64
);
416 dev
->atomic_cap
= IB_ATOMIC_NONE
;
417 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Atomic capability disabled\n");
419 dev
->atomic_cap
= IB_ATOMIC_GLOB
;
420 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Atomic capability enabled\n");
424 static const struct qed_rdma_ops
*qed_ops
;
426 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
428 static irqreturn_t
qedr_irq_handler(int irq
, void *handle
)
430 u16 hw_comp_cons
, sw_comp_cons
;
431 struct qedr_cnq
*cnq
= handle
;
432 struct regpair
*cq_handle
;
435 qed_sb_ack(cnq
->sb
, IGU_INT_DISABLE
, 0);
437 qed_sb_update_sb_idx(cnq
->sb
);
439 hw_comp_cons
= le16_to_cpu(*cnq
->hw_cons_ptr
);
440 sw_comp_cons
= qed_chain_get_cons_idx(&cnq
->pbl
);
442 /* Align protocol-index and chain reads */
445 while (sw_comp_cons
!= hw_comp_cons
) {
446 cq_handle
= (struct regpair
*)qed_chain_consume(&cnq
->pbl
);
447 cq
= (struct qedr_cq
*)(uintptr_t)HILO_U64(cq_handle
->hi
,
452 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
453 cq_handle
->hi
, cq_handle
->lo
, sw_comp_cons
,
459 if (cq
->sig
!= QEDR_CQ_MAGIC_NUMBER
) {
461 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
462 cq_handle
->hi
, cq_handle
->lo
, cq
);
468 if (!cq
->destroyed
&& cq
->ibcq
.comp_handler
)
469 (*cq
->ibcq
.comp_handler
)
470 (&cq
->ibcq
, cq
->ibcq
.cq_context
);
472 /* The CQ's CNQ notification counter is checked before
473 * destroying the CQ in a busy-wait loop that waits for all of
474 * the CQ's CNQ interrupts to be processed. It is increased
475 * here, only after the completion handler, to ensure that the
476 * the handler is not running when the CQ is destroyed.
480 sw_comp_cons
= qed_chain_get_cons_idx(&cnq
->pbl
);
485 qed_ops
->rdma_cnq_prod_update(cnq
->dev
->rdma_ctx
, cnq
->index
,
488 qed_sb_ack(cnq
->sb
, IGU_INT_ENABLE
, 1);
493 static void qedr_sync_free_irqs(struct qedr_dev
*dev
)
499 for (i
= 0; i
< dev
->int_info
.used_cnt
; i
++) {
500 if (dev
->int_info
.msix_cnt
) {
501 idx
= i
* dev
->num_hwfns
+ dev
->affin_hwfn_idx
;
502 vector
= dev
->int_info
.msix
[idx
].vector
;
503 synchronize_irq(vector
);
504 free_irq(vector
, &dev
->cnq_array
[i
]);
508 dev
->int_info
.used_cnt
= 0;
511 static int qedr_req_msix_irqs(struct qedr_dev
*dev
)
516 if (dev
->num_cnq
> dev
->int_info
.msix_cnt
) {
518 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
519 dev
->num_cnq
, dev
->int_info
.msix_cnt
);
523 for (i
= 0; i
< dev
->num_cnq
; i
++) {
524 idx
= i
* dev
->num_hwfns
+ dev
->affin_hwfn_idx
;
525 rc
= request_irq(dev
->int_info
.msix
[idx
].vector
,
526 qedr_irq_handler
, 0, dev
->cnq_array
[i
].name
,
529 DP_ERR(dev
, "Request cnq %d irq failed\n", i
);
530 qedr_sync_free_irqs(dev
);
532 DP_DEBUG(dev
, QEDR_MSG_INIT
,
533 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
534 dev
->cnq_array
[i
].name
, i
,
536 dev
->int_info
.used_cnt
++;
543 static int qedr_setup_irqs(struct qedr_dev
*dev
)
547 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr_setup_irqs\n");
549 /* Learn Interrupt configuration */
550 rc
= dev
->ops
->rdma_set_rdma_int(dev
->cdev
, dev
->num_cnq
);
554 rc
= dev
->ops
->rdma_get_rdma_int(dev
->cdev
, &dev
->int_info
);
556 DP_DEBUG(dev
, QEDR_MSG_INIT
, "get_rdma_int failed\n");
560 if (dev
->int_info
.msix_cnt
) {
561 DP_DEBUG(dev
, QEDR_MSG_INIT
, "rdma msix_cnt = %d\n",
562 dev
->int_info
.msix_cnt
);
563 rc
= qedr_req_msix_irqs(dev
);
568 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr_setup_irqs succeeded\n");
573 static int qedr_set_device_attr(struct qedr_dev
*dev
)
575 struct qed_rdma_device
*qed_attr
;
576 struct qedr_device_attr
*attr
;
579 /* Part 1 - query core capabilities */
580 qed_attr
= dev
->ops
->rdma_query_device(dev
->rdma_ctx
);
582 /* Part 2 - check capabilities */
583 page_size
= ~qed_attr
->page_size_caps
+ 1;
584 if (page_size
> PAGE_SIZE
) {
586 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
587 PAGE_SIZE
, page_size
);
591 /* Part 3 - copy and update capabilities */
593 attr
->vendor_id
= qed_attr
->vendor_id
;
594 attr
->vendor_part_id
= qed_attr
->vendor_part_id
;
595 attr
->hw_ver
= qed_attr
->hw_ver
;
596 attr
->fw_ver
= qed_attr
->fw_ver
;
597 attr
->node_guid
= qed_attr
->node_guid
;
598 attr
->sys_image_guid
= qed_attr
->sys_image_guid
;
599 attr
->max_cnq
= qed_attr
->max_cnq
;
600 attr
->max_sge
= qed_attr
->max_sge
;
601 attr
->max_inline
= qed_attr
->max_inline
;
602 attr
->max_sqe
= min_t(u32
, qed_attr
->max_wqe
, QEDR_MAX_SQE
);
603 attr
->max_rqe
= min_t(u32
, qed_attr
->max_wqe
, QEDR_MAX_RQE
);
604 attr
->max_qp_resp_rd_atomic_resc
= qed_attr
->max_qp_resp_rd_atomic_resc
;
605 attr
->max_qp_req_rd_atomic_resc
= qed_attr
->max_qp_req_rd_atomic_resc
;
606 attr
->max_dev_resp_rd_atomic_resc
=
607 qed_attr
->max_dev_resp_rd_atomic_resc
;
608 attr
->max_cq
= qed_attr
->max_cq
;
609 attr
->max_qp
= qed_attr
->max_qp
;
610 attr
->max_mr
= qed_attr
->max_mr
;
611 attr
->max_mr_size
= qed_attr
->max_mr_size
;
612 attr
->max_cqe
= min_t(u64
, qed_attr
->max_cqe
, QEDR_MAX_CQES
);
613 attr
->max_mw
= qed_attr
->max_mw
;
614 attr
->max_mr_mw_fmr_pbl
= qed_attr
->max_mr_mw_fmr_pbl
;
615 attr
->max_mr_mw_fmr_size
= qed_attr
->max_mr_mw_fmr_size
;
616 attr
->max_pd
= qed_attr
->max_pd
;
617 attr
->max_ah
= qed_attr
->max_ah
;
618 attr
->max_pkey
= qed_attr
->max_pkey
;
619 attr
->max_srq
= qed_attr
->max_srq
;
620 attr
->max_srq_wr
= qed_attr
->max_srq_wr
;
621 attr
->dev_caps
= qed_attr
->dev_caps
;
622 attr
->page_size_caps
= qed_attr
->page_size_caps
;
623 attr
->dev_ack_delay
= qed_attr
->dev_ack_delay
;
624 attr
->reserved_lkey
= qed_attr
->reserved_lkey
;
625 attr
->bad_pkey_counter
= qed_attr
->bad_pkey_counter
;
626 attr
->max_stats_queues
= qed_attr
->max_stats_queues
;
631 static void qedr_unaffiliated_event(void *context
, u8 event_code
)
633 pr_err("unaffiliated event not implemented yet\n");
636 static void qedr_affiliated_event(void *context
, u8 e_code
, void *fw_handle
)
638 #define EVENT_TYPE_NOT_DEFINED 0
639 #define EVENT_TYPE_CQ 1
640 #define EVENT_TYPE_QP 2
641 #define EVENT_TYPE_SRQ 3
642 struct qedr_dev
*dev
= (struct qedr_dev
*)context
;
643 struct regpair
*async_handle
= (struct regpair
*)fw_handle
;
644 u64 roce_handle64
= ((u64
) async_handle
->hi
<< 32) + async_handle
->lo
;
645 u8 event_type
= EVENT_TYPE_NOT_DEFINED
;
646 struct ib_event event
;
647 struct ib_srq
*ibsrq
;
648 struct qedr_srq
*srq
;
658 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR
:
659 event
.event
= IB_EVENT_CQ_ERR
;
660 event_type
= EVENT_TYPE_CQ
;
662 case ROCE_ASYNC_EVENT_SQ_DRAINED
:
663 event
.event
= IB_EVENT_SQ_DRAINED
;
664 event_type
= EVENT_TYPE_QP
;
666 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR
:
667 event
.event
= IB_EVENT_QP_FATAL
;
668 event_type
= EVENT_TYPE_QP
;
670 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR
:
671 event
.event
= IB_EVENT_QP_REQ_ERR
;
672 event_type
= EVENT_TYPE_QP
;
674 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR
:
675 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
676 event_type
= EVENT_TYPE_QP
;
678 case ROCE_ASYNC_EVENT_SRQ_LIMIT
:
679 event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
680 event_type
= EVENT_TYPE_SRQ
;
682 case ROCE_ASYNC_EVENT_SRQ_EMPTY
:
683 event
.event
= IB_EVENT_SRQ_ERR
;
684 event_type
= EVENT_TYPE_SRQ
;
686 case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR
:
687 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
688 event_type
= EVENT_TYPE_QP
;
690 case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR
:
691 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
692 event_type
= EVENT_TYPE_QP
;
694 case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR
:
695 event
.event
= IB_EVENT_CQ_ERR
;
696 event_type
= EVENT_TYPE_CQ
;
699 DP_ERR(dev
, "unsupported event %d on handle=%llx\n",
700 e_code
, roce_handle64
);
704 case QED_IWARP_EVENT_SRQ_LIMIT
:
705 event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
706 event_type
= EVENT_TYPE_SRQ
;
708 case QED_IWARP_EVENT_SRQ_EMPTY
:
709 event
.event
= IB_EVENT_SRQ_ERR
;
710 event_type
= EVENT_TYPE_SRQ
;
713 DP_ERR(dev
, "unsupported event %d on handle=%llx\n", e_code
,
717 switch (event_type
) {
719 cq
= (struct qedr_cq
*)(uintptr_t)roce_handle64
;
722 if (ibcq
->event_handler
) {
723 event
.device
= ibcq
->device
;
724 event
.element
.cq
= ibcq
;
725 ibcq
->event_handler(&event
, ibcq
->cq_context
);
729 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
732 DP_ERR(dev
, "CQ event %d on handle %p\n", e_code
, cq
);
735 qp
= (struct qedr_qp
*)(uintptr_t)roce_handle64
;
738 if (ibqp
->event_handler
) {
739 event
.device
= ibqp
->device
;
740 event
.element
.qp
= ibqp
;
741 ibqp
->event_handler(&event
, ibqp
->qp_context
);
745 "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
748 DP_ERR(dev
, "QP event %d on handle %p\n", e_code
, qp
);
751 srq_id
= (u16
)roce_handle64
;
752 xa_lock_irqsave(&dev
->srqs
, flags
);
753 srq
= xa_load(&dev
->srqs
, srq_id
);
756 if (ibsrq
->event_handler
) {
757 event
.device
= ibsrq
->device
;
758 event
.element
.srq
= ibsrq
;
759 ibsrq
->event_handler(&event
,
764 "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
767 xa_unlock_irqrestore(&dev
->srqs
, flags
);
768 DP_NOTICE(dev
, "SRQ event %d on handle %p\n", e_code
, srq
);
775 static int qedr_init_hw(struct qedr_dev
*dev
)
777 struct qed_rdma_add_user_out_params out_params
;
778 struct qed_rdma_start_in_params
*in_params
;
779 struct qed_rdma_cnq_params
*cur_pbl
;
780 struct qed_rdma_events events
;
781 dma_addr_t p_phys_table
;
786 in_params
= kzalloc(sizeof(*in_params
), GFP_KERNEL
);
792 in_params
->desired_cnq
= dev
->num_cnq
;
793 for (i
= 0; i
< dev
->num_cnq
; i
++) {
794 cur_pbl
= &in_params
->cnq_pbl_list
[i
];
796 page_cnt
= qed_chain_get_page_cnt(&dev
->cnq_array
[i
].pbl
);
797 cur_pbl
->num_pbl_pages
= page_cnt
;
799 p_phys_table
= qed_chain_get_pbl_phys(&dev
->cnq_array
[i
].pbl
);
800 cur_pbl
->pbl_ptr
= (u64
)p_phys_table
;
803 events
.affiliated_event
= qedr_affiliated_event
;
804 events
.unaffiliated_event
= qedr_unaffiliated_event
;
805 events
.context
= dev
;
807 in_params
->events
= &events
;
808 in_params
->cq_mode
= QED_RDMA_CQ_MODE_32_BITS
;
809 in_params
->max_mtu
= dev
->ndev
->mtu
;
810 dev
->iwarp_max_mtu
= dev
->ndev
->mtu
;
811 ether_addr_copy(&in_params
->mac_addr
[0], dev
->ndev
->dev_addr
);
813 rc
= dev
->ops
->rdma_init(dev
->cdev
, in_params
);
817 rc
= dev
->ops
->rdma_add_user(dev
->rdma_ctx
, &out_params
);
821 dev
->db_addr
= out_params
.dpi_addr
;
822 dev
->db_phys_addr
= out_params
.dpi_phys_addr
;
823 dev
->db_size
= out_params
.dpi_size
;
824 dev
->dpi
= out_params
.dpi
;
826 rc
= qedr_set_device_attr(dev
);
830 DP_ERR(dev
, "Init HW Failed rc = %d\n", rc
);
835 static void qedr_stop_hw(struct qedr_dev
*dev
)
837 dev
->ops
->rdma_remove_user(dev
->rdma_ctx
, dev
->dpi
);
838 dev
->ops
->rdma_stop(dev
->rdma_ctx
);
841 static struct qedr_dev
*qedr_add(struct qed_dev
*cdev
, struct pci_dev
*pdev
,
842 struct net_device
*ndev
)
844 struct qed_dev_rdma_info dev_info
;
845 struct qedr_dev
*dev
;
848 dev
= ib_alloc_device(qedr_dev
, ibdev
);
850 pr_err("Unable to allocate ib device\n");
854 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr add device called\n");
860 qed_ops
= qed_get_rdma_ops();
862 DP_ERR(dev
, "Failed to get qed roce operations\n");
867 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
871 dev
->user_dpm_enabled
= dev_info
.user_dpm_enabled
;
872 dev
->rdma_type
= dev_info
.rdma_type
;
873 dev
->num_hwfns
= dev_info
.common
.num_hwfns
;
875 if (IS_IWARP(dev
) && QEDR_IS_CMT(dev
)) {
876 rc
= dev
->ops
->iwarp_set_engine_affin(cdev
, false);
878 DP_ERR(dev
, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
882 dev
->affin_hwfn_idx
= dev
->ops
->common
->get_affin_hwfn_idx(cdev
);
884 dev
->rdma_ctx
= dev
->ops
->rdma_get_rdma_ctx(cdev
);
886 dev
->num_cnq
= dev
->ops
->rdma_get_min_cnq_msix(cdev
);
888 DP_ERR(dev
, "Failed. At least one CNQ is required.\n");
893 dev
->wq_multiplier
= QEDR_WQ_MULTIPLIER_DFT
;
895 qedr_pci_set_atomic(dev
, pdev
);
897 rc
= qedr_alloc_resources(dev
);
901 rc
= qedr_init_hw(dev
);
905 rc
= qedr_setup_irqs(dev
);
909 rc
= qedr_register_device(dev
);
911 DP_ERR(dev
, "Unable to allocate register device\n");
915 if (!test_and_set_bit(QEDR_ENET_STATE_BIT
, &dev
->enet_state
))
916 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_PORT_ACTIVE
);
918 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr driver loaded successfully\n");
922 qedr_sync_free_irqs(dev
);
926 qedr_free_resources(dev
);
928 ib_dealloc_device(&dev
->ibdev
);
929 DP_ERR(dev
, "qedr driver load failed rc=%d\n", rc
);
934 static void qedr_remove(struct qedr_dev
*dev
)
936 /* First unregister with stack to stop all the active traffic
937 * of the registered clients.
939 ib_unregister_device(&dev
->ibdev
);
942 qedr_sync_free_irqs(dev
);
943 qedr_free_resources(dev
);
945 if (IS_IWARP(dev
) && QEDR_IS_CMT(dev
))
946 dev
->ops
->iwarp_set_engine_affin(dev
->cdev
, true);
948 ib_dealloc_device(&dev
->ibdev
);
951 static void qedr_close(struct qedr_dev
*dev
)
953 if (test_and_clear_bit(QEDR_ENET_STATE_BIT
, &dev
->enet_state
))
954 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_PORT_ERR
);
957 static void qedr_shutdown(struct qedr_dev
*dev
)
963 static void qedr_open(struct qedr_dev
*dev
)
965 if (!test_and_set_bit(QEDR_ENET_STATE_BIT
, &dev
->enet_state
))
966 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_PORT_ACTIVE
);
969 static void qedr_mac_address_change(struct qedr_dev
*dev
)
971 union ib_gid
*sgid
= &dev
->sgid_tbl
[0];
972 u8 guid
[8], mac_addr
[6];
976 ether_addr_copy(&mac_addr
[0], dev
->ndev
->dev_addr
);
977 guid
[0] = mac_addr
[0] ^ 2;
978 guid
[1] = mac_addr
[1];
979 guid
[2] = mac_addr
[2];
982 guid
[5] = mac_addr
[3];
983 guid
[6] = mac_addr
[4];
984 guid
[7] = mac_addr
[5];
985 sgid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
986 memcpy(&sgid
->raw
[8], guid
, sizeof(guid
));
989 rc
= dev
->ops
->ll2_set_mac_filter(dev
->cdev
,
990 dev
->gsi_ll2_mac_address
,
991 dev
->ndev
->dev_addr
);
993 ether_addr_copy(dev
->gsi_ll2_mac_address
, dev
->ndev
->dev_addr
);
995 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_GID_CHANGE
);
998 DP_ERR(dev
, "Error updating mac filter\n");
1001 /* event handling via NIC driver ensures that all the NIC specific
1002 * initialization done before RoCE driver notifies
1005 static void qedr_notify(struct qedr_dev
*dev
, enum qede_rdma_event event
)
1017 case QEDE_CHANGE_ADDR
:
1018 qedr_mac_address_change(dev
);
1020 case QEDE_CHANGE_MTU
:
1021 if (rdma_protocol_iwarp(&dev
->ibdev
, 1))
1022 if (dev
->ndev
->mtu
!= dev
->iwarp_max_mtu
)
1024 "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n",
1025 dev
->iwarp_max_mtu
, dev
->ndev
->mtu
);
1028 pr_err("Event not supported\n");
1032 static struct qedr_driver qedr_drv
= {
1033 .name
= "qedr_driver",
1035 .remove
= qedr_remove
,
1036 .notify
= qedr_notify
,
1039 static int __init
qedr_init_module(void)
1041 return qede_rdma_register_driver(&qedr_drv
);
1044 static void __exit
qedr_exit_module(void)
1046 qede_rdma_unregister_driver(&qedr_drv
);
1049 module_init(qedr_init_module
);
1050 module_exit(qedr_exit_module
);