1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_addr.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/iw_cm.h>
37 #include <rdma/ib_mad.h>
38 #include <linux/netdevice.h>
39 #include <linux/iommu.h>
40 #include <linux/pci.h>
41 #include <net/addrconf.h>
43 #include <linux/qed/qed_chain.h>
44 #include <linux/qed/qed_if.h>
47 #include <rdma/qedr-abi.h>
48 #include "qedr_iw_cm.h"
50 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_LICENSE("Dual BSD/GPL");
54 #define QEDR_WQ_MULTIPLIER_DFT (3)
56 static void qedr_ib_dispatch_event(struct qedr_dev
*dev
, u32 port_num
,
57 enum ib_event_type type
)
61 ibev
.device
= &dev
->ibdev
;
62 ibev
.element
.port_num
= port_num
;
65 ib_dispatch_event(&ibev
);
68 static enum rdma_link_layer
qedr_link_layer(struct ib_device
*device
,
71 return IB_LINK_LAYER_ETHERNET
;
74 static void qedr_get_dev_fw_str(struct ib_device
*ibdev
, char *str
)
76 struct qedr_dev
*qedr
= get_qedr_dev(ibdev
);
77 u32 fw_ver
= (u32
)qedr
->attr
.fw_ver
;
79 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%d.%d.%d.%d",
80 (fw_ver
>> 24) & 0xFF, (fw_ver
>> 16) & 0xFF,
81 (fw_ver
>> 8) & 0xFF, fw_ver
& 0xFF);
84 static int qedr_roce_port_immutable(struct ib_device
*ibdev
, u32 port_num
,
85 struct ib_port_immutable
*immutable
)
87 struct ib_port_attr attr
;
90 err
= qedr_query_port(ibdev
, port_num
, &attr
);
94 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
95 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
96 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
|
97 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
98 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
103 static int qedr_iw_port_immutable(struct ib_device
*ibdev
, u32 port_num
,
104 struct ib_port_immutable
*immutable
)
106 struct ib_port_attr attr
;
109 err
= qedr_query_port(ibdev
, port_num
, &attr
);
113 immutable
->gid_tbl_len
= 1;
114 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
115 immutable
->max_mad_size
= 0;
120 /* QEDR sysfs interface */
121 static ssize_t
hw_rev_show(struct device
*device
, struct device_attribute
*attr
,
124 struct qedr_dev
*dev
=
125 rdma_device_to_drv_device(device
, struct qedr_dev
, ibdev
);
127 return sysfs_emit(buf
, "0x%x\n", dev
->attr
.hw_ver
);
129 static DEVICE_ATTR_RO(hw_rev
);
131 static ssize_t
hca_type_show(struct device
*device
,
132 struct device_attribute
*attr
, char *buf
)
134 struct qedr_dev
*dev
=
135 rdma_device_to_drv_device(device
, struct qedr_dev
, ibdev
);
137 return sysfs_emit(buf
, "FastLinQ QL%x %s\n", dev
->pdev
->device
,
138 rdma_protocol_iwarp(&dev
->ibdev
, 1) ? "iWARP" :
141 static DEVICE_ATTR_RO(hca_type
);
143 static struct attribute
*qedr_attributes
[] = {
144 &dev_attr_hw_rev
.attr
,
145 &dev_attr_hca_type
.attr
,
149 static const struct attribute_group qedr_attr_group
= {
150 .attrs
= qedr_attributes
,
153 static const struct ib_device_ops qedr_iw_dev_ops
= {
154 .get_port_immutable
= qedr_iw_port_immutable
,
155 .iw_accept
= qedr_iw_accept
,
156 .iw_add_ref
= qedr_iw_qp_add_ref
,
157 .iw_connect
= qedr_iw_connect
,
158 .iw_create_listen
= qedr_iw_create_listen
,
159 .iw_destroy_listen
= qedr_iw_destroy_listen
,
160 .iw_get_qp
= qedr_iw_get_qp
,
161 .iw_reject
= qedr_iw_reject
,
162 .iw_rem_ref
= qedr_iw_qp_rem_ref
,
163 .query_gid
= qedr_iw_query_gid
,
166 static int qedr_iw_register_device(struct qedr_dev
*dev
)
168 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
170 ib_set_device_ops(&dev
->ibdev
, &qedr_iw_dev_ops
);
172 memcpy(dev
->ibdev
.iw_ifname
,
173 dev
->ndev
->name
, sizeof(dev
->ibdev
.iw_ifname
));
178 static const struct ib_device_ops qedr_roce_dev_ops
= {
179 .alloc_xrcd
= qedr_alloc_xrcd
,
180 .dealloc_xrcd
= qedr_dealloc_xrcd
,
181 .get_port_immutable
= qedr_roce_port_immutable
,
182 .query_pkey
= qedr_query_pkey
,
185 static void qedr_roce_register_device(struct qedr_dev
*dev
)
187 dev
->ibdev
.node_type
= RDMA_NODE_IB_CA
;
189 ib_set_device_ops(&dev
->ibdev
, &qedr_roce_dev_ops
);
192 static const struct ib_device_ops qedr_dev_ops
= {
193 .owner
= THIS_MODULE
,
194 .driver_id
= RDMA_DRIVER_QEDR
,
195 .uverbs_abi_ver
= QEDR_ABI_VERSION
,
197 .alloc_mr
= qedr_alloc_mr
,
198 .alloc_pd
= qedr_alloc_pd
,
199 .alloc_ucontext
= qedr_alloc_ucontext
,
200 .create_ah
= qedr_create_ah
,
201 .create_cq
= qedr_create_cq
,
202 .create_qp
= qedr_create_qp
,
203 .create_srq
= qedr_create_srq
,
204 .dealloc_pd
= qedr_dealloc_pd
,
205 .dealloc_ucontext
= qedr_dealloc_ucontext
,
206 .dereg_mr
= qedr_dereg_mr
,
207 .destroy_ah
= qedr_destroy_ah
,
208 .destroy_cq
= qedr_destroy_cq
,
209 .destroy_qp
= qedr_destroy_qp
,
210 .destroy_srq
= qedr_destroy_srq
,
211 .device_group
= &qedr_attr_group
,
212 .get_dev_fw_str
= qedr_get_dev_fw_str
,
213 .get_dma_mr
= qedr_get_dma_mr
,
214 .get_link_layer
= qedr_link_layer
,
215 .map_mr_sg
= qedr_map_mr_sg
,
217 .mmap_free
= qedr_mmap_free
,
218 .modify_qp
= qedr_modify_qp
,
219 .modify_srq
= qedr_modify_srq
,
220 .poll_cq
= qedr_poll_cq
,
221 .post_recv
= qedr_post_recv
,
222 .post_send
= qedr_post_send
,
223 .post_srq_recv
= qedr_post_srq_recv
,
224 .process_mad
= qedr_process_mad
,
225 .query_device
= qedr_query_device
,
226 .query_port
= qedr_query_port
,
227 .query_qp
= qedr_query_qp
,
228 .query_srq
= qedr_query_srq
,
229 .reg_user_mr
= qedr_reg_user_mr
,
230 .req_notify_cq
= qedr_arm_cq
,
232 INIT_RDMA_OBJ_SIZE(ib_ah
, qedr_ah
, ibah
),
233 INIT_RDMA_OBJ_SIZE(ib_cq
, qedr_cq
, ibcq
),
234 INIT_RDMA_OBJ_SIZE(ib_pd
, qedr_pd
, ibpd
),
235 INIT_RDMA_OBJ_SIZE(ib_qp
, qedr_qp
, ibqp
),
236 INIT_RDMA_OBJ_SIZE(ib_srq
, qedr_srq
, ibsrq
),
237 INIT_RDMA_OBJ_SIZE(ib_xrcd
, qedr_xrcd
, ibxrcd
),
238 INIT_RDMA_OBJ_SIZE(ib_ucontext
, qedr_ucontext
, ibucontext
),
241 static int qedr_register_device(struct qedr_dev
*dev
)
245 dev
->ibdev
.node_guid
= dev
->attr
.node_guid
;
246 memcpy(dev
->ibdev
.node_desc
, QEDR_NODE_DESC
, sizeof(QEDR_NODE_DESC
));
249 rc
= qedr_iw_register_device(dev
);
253 qedr_roce_register_device(dev
);
256 dev
->ibdev
.phys_port_cnt
= 1;
257 dev
->ibdev
.num_comp_vectors
= dev
->num_cnq
;
258 dev
->ibdev
.dev
.parent
= &dev
->pdev
->dev
;
260 ib_set_device_ops(&dev
->ibdev
, &qedr_dev_ops
);
262 rc
= ib_device_set_netdev(&dev
->ibdev
, dev
->ndev
, 1);
266 dma_set_max_seg_size(&dev
->pdev
->dev
, UINT_MAX
);
267 return ib_register_device(&dev
->ibdev
, "qedr%d", &dev
->pdev
->dev
);
270 /* This function allocates fast-path status block memory */
271 static int qedr_alloc_mem_sb(struct qedr_dev
*dev
,
272 struct qed_sb_info
*sb_info
, u16 sb_id
)
274 struct status_block
*sb_virt
;
278 sb_virt
= dma_alloc_coherent(&dev
->pdev
->dev
,
279 sizeof(*sb_virt
), &sb_phys
, GFP_KERNEL
);
283 rc
= dev
->ops
->common
->sb_init(dev
->cdev
, sb_info
,
284 sb_virt
, sb_phys
, sb_id
,
287 pr_err("Status block initialization failed\n");
288 dma_free_coherent(&dev
->pdev
->dev
, sizeof(*sb_virt
),
296 static void qedr_free_mem_sb(struct qedr_dev
*dev
,
297 struct qed_sb_info
*sb_info
, int sb_id
)
299 if (sb_info
->sb_virt
) {
300 dev
->ops
->common
->sb_release(dev
->cdev
, sb_info
, sb_id
,
302 dma_free_coherent(&dev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
303 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
307 static void qedr_free_resources(struct qedr_dev
*dev
)
312 destroy_workqueue(dev
->iwarp_wq
);
314 for (i
= 0; i
< dev
->num_cnq
; i
++) {
315 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
316 dev
->ops
->common
->chain_free(dev
->cdev
, &dev
->cnq_array
[i
].pbl
);
319 kfree(dev
->cnq_array
);
320 kfree(dev
->sb_array
);
321 kfree(dev
->sgid_tbl
);
324 static int qedr_alloc_resources(struct qedr_dev
*dev
)
326 struct qed_chain_init_params params
= {
327 .mode
= QED_CHAIN_MODE_PBL
,
328 .intended_use
= QED_CHAIN_USE_TO_CONSUME
,
329 .cnt_type
= QED_CHAIN_CNT_TYPE_U16
,
330 .elem_size
= sizeof(struct regpair
*),
332 struct qedr_cnq
*cnq
;
336 dev
->sgid_tbl
= kcalloc(QEDR_MAX_SGID
, sizeof(union ib_gid
),
341 spin_lock_init(&dev
->sgid_lock
);
342 xa_init_flags(&dev
->srqs
, XA_FLAGS_LOCK_IRQ
);
346 dev
->iwarp_wq
= create_singlethread_workqueue("qedr_iwarpq");
347 if (!dev
->iwarp_wq
) {
353 /* Allocate Status blocks for CNQ */
354 dev
->sb_array
= kcalloc(dev
->num_cnq
, sizeof(*dev
->sb_array
),
356 if (!dev
->sb_array
) {
361 dev
->cnq_array
= kcalloc(dev
->num_cnq
,
362 sizeof(*dev
->cnq_array
), GFP_KERNEL
);
363 if (!dev
->cnq_array
) {
368 dev
->sb_start
= dev
->ops
->rdma_get_start_sb(dev
->cdev
);
370 /* Allocate CNQ PBLs */
371 params
.num_elems
= min_t(u32
, QED_RDMA_MAX_CNQ_SIZE
,
372 QEDR_ROCE_MAX_CNQ_SIZE
);
374 for (i
= 0; i
< dev
->num_cnq
; i
++) {
375 cnq
= &dev
->cnq_array
[i
];
377 rc
= qedr_alloc_mem_sb(dev
, &dev
->sb_array
[i
],
382 rc
= dev
->ops
->common
->chain_alloc(dev
->cdev
, &cnq
->pbl
,
388 cnq
->sb
= &dev
->sb_array
[i
];
389 cons_pi
= dev
->sb_array
[i
].sb_virt
->pi_array
;
390 cnq
->hw_cons_ptr
= &cons_pi
[QED_ROCE_PROTOCOL_INDEX
];
392 sprintf(cnq
->name
, "qedr%d@pci:%s", i
, pci_name(dev
->pdev
));
394 DP_DEBUG(dev
, QEDR_MSG_INIT
, "cnq[%d].cons=%d\n",
395 i
, qed_chain_get_cons_idx(&cnq
->pbl
));
400 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
402 for (--i
; i
>= 0; i
--) {
403 dev
->ops
->common
->chain_free(dev
->cdev
, &dev
->cnq_array
[i
].pbl
);
404 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
406 kfree(dev
->cnq_array
);
408 kfree(dev
->sb_array
);
411 destroy_workqueue(dev
->iwarp_wq
);
413 kfree(dev
->sgid_tbl
);
417 static void qedr_pci_set_atomic(struct qedr_dev
*dev
, struct pci_dev
*pdev
)
419 int rc
= pci_enable_atomic_ops_to_root(pdev
,
420 PCI_EXP_DEVCAP2_ATOMIC_COMP64
);
423 dev
->atomic_cap
= IB_ATOMIC_NONE
;
424 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Atomic capability disabled\n");
426 dev
->atomic_cap
= IB_ATOMIC_GLOB
;
427 DP_DEBUG(dev
, QEDR_MSG_INIT
, "Atomic capability enabled\n");
431 static const struct qed_rdma_ops
*qed_ops
;
433 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
435 static irqreturn_t
qedr_irq_handler(int irq
, void *handle
)
437 u16 hw_comp_cons
, sw_comp_cons
;
438 struct qedr_cnq
*cnq
= handle
;
439 struct regpair
*cq_handle
;
442 qed_sb_ack(cnq
->sb
, IGU_INT_DISABLE
, 0);
444 qed_sb_update_sb_idx(cnq
->sb
);
446 hw_comp_cons
= le16_to_cpu(*cnq
->hw_cons_ptr
);
447 sw_comp_cons
= qed_chain_get_cons_idx(&cnq
->pbl
);
449 /* Align protocol-index and chain reads */
452 while (sw_comp_cons
!= hw_comp_cons
) {
453 cq_handle
= (struct regpair
*)qed_chain_consume(&cnq
->pbl
);
454 cq
= (struct qedr_cq
*)(uintptr_t)HILO_U64(cq_handle
->hi
,
459 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
460 cq_handle
->hi
, cq_handle
->lo
, sw_comp_cons
,
466 if (cq
->sig
!= QEDR_CQ_MAGIC_NUMBER
) {
468 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
469 cq_handle
->hi
, cq_handle
->lo
, cq
);
475 if (!cq
->destroyed
&& cq
->ibcq
.comp_handler
)
476 (*cq
->ibcq
.comp_handler
)
477 (&cq
->ibcq
, cq
->ibcq
.cq_context
);
479 /* The CQ's CNQ notification counter is checked before
480 * destroying the CQ in a busy-wait loop that waits for all of
481 * the CQ's CNQ interrupts to be processed. It is increased
482 * here, only after the completion handler, to ensure that
483 * the handler is not running when the CQ is destroyed.
487 sw_comp_cons
= qed_chain_get_cons_idx(&cnq
->pbl
);
492 qed_ops
->rdma_cnq_prod_update(cnq
->dev
->rdma_ctx
, cnq
->index
,
495 qed_sb_ack(cnq
->sb
, IGU_INT_ENABLE
, 1);
500 static void qedr_sync_free_irqs(struct qedr_dev
*dev
)
506 for (i
= 0; i
< dev
->int_info
.used_cnt
; i
++) {
507 if (dev
->int_info
.msix_cnt
) {
508 idx
= i
* dev
->num_hwfns
+ dev
->affin_hwfn_idx
;
509 vector
= dev
->int_info
.msix
[idx
].vector
;
510 free_irq(vector
, &dev
->cnq_array
[i
]);
514 dev
->int_info
.used_cnt
= 0;
517 static int qedr_req_msix_irqs(struct qedr_dev
*dev
)
522 if (dev
->num_cnq
> dev
->int_info
.msix_cnt
) {
524 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
525 dev
->num_cnq
, dev
->int_info
.msix_cnt
);
529 for (i
= 0; i
< dev
->num_cnq
; i
++) {
530 idx
= i
* dev
->num_hwfns
+ dev
->affin_hwfn_idx
;
531 rc
= request_irq(dev
->int_info
.msix
[idx
].vector
,
532 qedr_irq_handler
, 0, dev
->cnq_array
[i
].name
,
535 DP_ERR(dev
, "Request cnq %d irq failed\n", i
);
536 qedr_sync_free_irqs(dev
);
538 DP_DEBUG(dev
, QEDR_MSG_INIT
,
539 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
540 dev
->cnq_array
[i
].name
, i
,
542 dev
->int_info
.used_cnt
++;
549 static int qedr_setup_irqs(struct qedr_dev
*dev
)
553 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr_setup_irqs\n");
555 /* Learn Interrupt configuration */
556 rc
= dev
->ops
->rdma_set_rdma_int(dev
->cdev
, dev
->num_cnq
);
560 rc
= dev
->ops
->rdma_get_rdma_int(dev
->cdev
, &dev
->int_info
);
562 DP_DEBUG(dev
, QEDR_MSG_INIT
, "get_rdma_int failed\n");
566 if (dev
->int_info
.msix_cnt
) {
567 DP_DEBUG(dev
, QEDR_MSG_INIT
, "rdma msix_cnt = %d\n",
568 dev
->int_info
.msix_cnt
);
569 rc
= qedr_req_msix_irqs(dev
);
574 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr_setup_irqs succeeded\n");
579 static int qedr_set_device_attr(struct qedr_dev
*dev
)
581 struct qed_rdma_device
*qed_attr
;
582 struct qedr_device_attr
*attr
;
585 /* Part 1 - query core capabilities */
586 qed_attr
= dev
->ops
->rdma_query_device(dev
->rdma_ctx
);
588 /* Part 2 - check capabilities */
589 page_size
= ~qed_attr
->page_size_caps
+ 1;
590 if (page_size
> PAGE_SIZE
) {
592 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
593 PAGE_SIZE
, page_size
);
597 /* Part 3 - copy and update capabilities */
599 attr
->vendor_id
= qed_attr
->vendor_id
;
600 attr
->vendor_part_id
= qed_attr
->vendor_part_id
;
601 attr
->hw_ver
= qed_attr
->hw_ver
;
602 attr
->fw_ver
= qed_attr
->fw_ver
;
603 attr
->node_guid
= qed_attr
->node_guid
;
604 attr
->sys_image_guid
= qed_attr
->sys_image_guid
;
605 attr
->max_cnq
= qed_attr
->max_cnq
;
606 attr
->max_sge
= qed_attr
->max_sge
;
607 attr
->max_inline
= qed_attr
->max_inline
;
608 attr
->max_sqe
= min_t(u32
, qed_attr
->max_wqe
, QEDR_MAX_SQE
);
609 attr
->max_rqe
= min_t(u32
, qed_attr
->max_wqe
, QEDR_MAX_RQE
);
610 attr
->max_qp_resp_rd_atomic_resc
= qed_attr
->max_qp_resp_rd_atomic_resc
;
611 attr
->max_qp_req_rd_atomic_resc
= qed_attr
->max_qp_req_rd_atomic_resc
;
612 attr
->max_dev_resp_rd_atomic_resc
=
613 qed_attr
->max_dev_resp_rd_atomic_resc
;
614 attr
->max_cq
= qed_attr
->max_cq
;
615 attr
->max_qp
= qed_attr
->max_qp
;
616 attr
->max_mr
= qed_attr
->max_mr
;
617 attr
->max_mr_size
= qed_attr
->max_mr_size
;
618 attr
->max_cqe
= min_t(u64
, qed_attr
->max_cqe
, QEDR_MAX_CQES
);
619 attr
->max_mw
= qed_attr
->max_mw
;
620 attr
->max_mr_mw_fmr_pbl
= qed_attr
->max_mr_mw_fmr_pbl
;
621 attr
->max_mr_mw_fmr_size
= qed_attr
->max_mr_mw_fmr_size
;
622 attr
->max_pd
= qed_attr
->max_pd
;
623 attr
->max_ah
= qed_attr
->max_ah
;
624 attr
->max_pkey
= qed_attr
->max_pkey
;
625 attr
->max_srq
= qed_attr
->max_srq
;
626 attr
->max_srq_wr
= qed_attr
->max_srq_wr
;
627 attr
->dev_caps
= qed_attr
->dev_caps
;
628 attr
->page_size_caps
= qed_attr
->page_size_caps
;
629 attr
->dev_ack_delay
= qed_attr
->dev_ack_delay
;
630 attr
->reserved_lkey
= qed_attr
->reserved_lkey
;
631 attr
->bad_pkey_counter
= qed_attr
->bad_pkey_counter
;
632 attr
->max_stats_queues
= qed_attr
->max_stats_queues
;
637 static void qedr_unaffiliated_event(void *context
, u8 event_code
)
639 pr_err("unaffiliated event not implemented yet\n");
642 static void qedr_affiliated_event(void *context
, u8 e_code
, void *fw_handle
)
644 #define EVENT_TYPE_NOT_DEFINED 0
645 #define EVENT_TYPE_CQ 1
646 #define EVENT_TYPE_QP 2
647 #define EVENT_TYPE_SRQ 3
648 struct qedr_dev
*dev
= (struct qedr_dev
*)context
;
649 struct regpair
*async_handle
= (struct regpair
*)fw_handle
;
650 u64 roce_handle64
= ((u64
) async_handle
->hi
<< 32) + async_handle
->lo
;
651 u8 event_type
= EVENT_TYPE_NOT_DEFINED
;
652 struct ib_event event
;
653 struct ib_srq
*ibsrq
;
654 struct qedr_srq
*srq
;
664 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR
:
665 event
.event
= IB_EVENT_CQ_ERR
;
666 event_type
= EVENT_TYPE_CQ
;
668 case ROCE_ASYNC_EVENT_SQ_DRAINED
:
669 event
.event
= IB_EVENT_SQ_DRAINED
;
670 event_type
= EVENT_TYPE_QP
;
672 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR
:
673 event
.event
= IB_EVENT_QP_FATAL
;
674 event_type
= EVENT_TYPE_QP
;
676 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR
:
677 event
.event
= IB_EVENT_QP_REQ_ERR
;
678 event_type
= EVENT_TYPE_QP
;
680 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR
:
681 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
682 event_type
= EVENT_TYPE_QP
;
684 case ROCE_ASYNC_EVENT_SRQ_LIMIT
:
685 event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
686 event_type
= EVENT_TYPE_SRQ
;
688 case ROCE_ASYNC_EVENT_SRQ_EMPTY
:
689 event
.event
= IB_EVENT_SRQ_ERR
;
690 event_type
= EVENT_TYPE_SRQ
;
692 case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR
:
693 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
694 event_type
= EVENT_TYPE_QP
;
696 case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR
:
697 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
698 event_type
= EVENT_TYPE_QP
;
700 case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR
:
701 event
.event
= IB_EVENT_CQ_ERR
;
702 event_type
= EVENT_TYPE_CQ
;
705 DP_ERR(dev
, "unsupported event %d on handle=%llx\n",
706 e_code
, roce_handle64
);
710 case QED_IWARP_EVENT_SRQ_LIMIT
:
711 event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
712 event_type
= EVENT_TYPE_SRQ
;
714 case QED_IWARP_EVENT_SRQ_EMPTY
:
715 event
.event
= IB_EVENT_SRQ_ERR
;
716 event_type
= EVENT_TYPE_SRQ
;
719 DP_ERR(dev
, "unsupported event %d on handle=%llx\n", e_code
,
723 switch (event_type
) {
725 cq
= (struct qedr_cq
*)(uintptr_t)roce_handle64
;
728 if (ibcq
->event_handler
) {
729 event
.device
= ibcq
->device
;
730 event
.element
.cq
= ibcq
;
731 ibcq
->event_handler(&event
, ibcq
->cq_context
);
735 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
738 DP_ERR(dev
, "CQ event %d on handle %p\n", e_code
, cq
);
741 qp
= (struct qedr_qp
*)(uintptr_t)roce_handle64
;
744 if (ibqp
->event_handler
) {
745 event
.device
= ibqp
->device
;
746 event
.element
.qp
= ibqp
;
747 ibqp
->event_handler(&event
, ibqp
->qp_context
);
751 "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
754 DP_ERR(dev
, "QP event %d on handle %p\n", e_code
, qp
);
757 srq_id
= (u16
)roce_handle64
;
758 xa_lock_irqsave(&dev
->srqs
, flags
);
759 srq
= xa_load(&dev
->srqs
, srq_id
);
762 if (ibsrq
->event_handler
) {
763 event
.device
= ibsrq
->device
;
764 event
.element
.srq
= ibsrq
;
765 ibsrq
->event_handler(&event
,
770 "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
773 xa_unlock_irqrestore(&dev
->srqs
, flags
);
774 DP_NOTICE(dev
, "SRQ event %d on handle %p\n", e_code
, srq
);
781 static int qedr_init_hw(struct qedr_dev
*dev
)
783 struct qed_rdma_add_user_out_params out_params
;
784 struct qed_rdma_start_in_params
*in_params
;
785 struct qed_rdma_cnq_params
*cur_pbl
;
786 struct qed_rdma_events events
;
787 dma_addr_t p_phys_table
;
792 in_params
= kzalloc(sizeof(*in_params
), GFP_KERNEL
);
798 in_params
->desired_cnq
= dev
->num_cnq
;
799 for (i
= 0; i
< dev
->num_cnq
; i
++) {
800 cur_pbl
= &in_params
->cnq_pbl_list
[i
];
802 page_cnt
= qed_chain_get_page_cnt(&dev
->cnq_array
[i
].pbl
);
803 cur_pbl
->num_pbl_pages
= page_cnt
;
805 p_phys_table
= qed_chain_get_pbl_phys(&dev
->cnq_array
[i
].pbl
);
806 cur_pbl
->pbl_ptr
= (u64
)p_phys_table
;
809 events
.affiliated_event
= qedr_affiliated_event
;
810 events
.unaffiliated_event
= qedr_unaffiliated_event
;
811 events
.context
= dev
;
813 in_params
->events
= &events
;
814 in_params
->cq_mode
= QED_RDMA_CQ_MODE_32_BITS
;
815 in_params
->max_mtu
= dev
->ndev
->mtu
;
816 dev
->iwarp_max_mtu
= dev
->ndev
->mtu
;
817 ether_addr_copy(&in_params
->mac_addr
[0], dev
->ndev
->dev_addr
);
819 rc
= dev
->ops
->rdma_init(dev
->cdev
, in_params
);
823 rc
= dev
->ops
->rdma_add_user(dev
->rdma_ctx
, &out_params
);
827 dev
->db_addr
= out_params
.dpi_addr
;
828 dev
->db_phys_addr
= out_params
.dpi_phys_addr
;
829 dev
->db_size
= out_params
.dpi_size
;
830 dev
->dpi
= out_params
.dpi
;
832 rc
= qedr_set_device_attr(dev
);
836 DP_ERR(dev
, "Init HW Failed rc = %d\n", rc
);
841 static void qedr_stop_hw(struct qedr_dev
*dev
)
843 dev
->ops
->rdma_remove_user(dev
->rdma_ctx
, dev
->dpi
);
844 dev
->ops
->rdma_stop(dev
->rdma_ctx
);
847 static struct qedr_dev
*qedr_add(struct qed_dev
*cdev
, struct pci_dev
*pdev
,
848 struct net_device
*ndev
)
850 struct qed_dev_rdma_info dev_info
;
851 struct qedr_dev
*dev
;
854 dev
= ib_alloc_device(qedr_dev
, ibdev
);
856 pr_err("Unable to allocate ib device\n");
860 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr add device called\n");
866 qed_ops
= qed_get_rdma_ops();
868 DP_ERR(dev
, "Failed to get qed roce operations\n");
873 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
877 dev
->user_dpm_enabled
= dev_info
.user_dpm_enabled
;
878 dev
->rdma_type
= dev_info
.rdma_type
;
879 dev
->num_hwfns
= dev_info
.common
.num_hwfns
;
881 if (IS_IWARP(dev
) && QEDR_IS_CMT(dev
)) {
882 rc
= dev
->ops
->iwarp_set_engine_affin(cdev
, false);
884 DP_ERR(dev
, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
888 dev
->affin_hwfn_idx
= dev
->ops
->common
->get_affin_hwfn_idx(cdev
);
890 dev
->rdma_ctx
= dev
->ops
->rdma_get_rdma_ctx(cdev
);
892 dev
->num_cnq
= dev
->ops
->rdma_get_min_cnq_msix(cdev
);
894 DP_ERR(dev
, "Failed. At least one CNQ is required.\n");
899 dev
->wq_multiplier
= QEDR_WQ_MULTIPLIER_DFT
;
901 qedr_pci_set_atomic(dev
, pdev
);
903 rc
= qedr_alloc_resources(dev
);
907 rc
= qedr_init_hw(dev
);
911 rc
= qedr_setup_irqs(dev
);
915 rc
= qedr_register_device(dev
);
917 DP_ERR(dev
, "Unable to allocate register device\n");
921 if (!test_and_set_bit(QEDR_ENET_STATE_BIT
, &dev
->enet_state
))
922 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_PORT_ACTIVE
);
924 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr driver loaded successfully\n");
928 qedr_sync_free_irqs(dev
);
932 qedr_free_resources(dev
);
934 ib_dealloc_device(&dev
->ibdev
);
935 DP_ERR(dev
, "qedr driver load failed rc=%d\n", rc
);
940 static void qedr_remove(struct qedr_dev
*dev
)
942 /* First unregister with stack to stop all the active traffic
943 * of the registered clients.
945 ib_unregister_device(&dev
->ibdev
);
948 qedr_sync_free_irqs(dev
);
949 qedr_free_resources(dev
);
951 if (IS_IWARP(dev
) && QEDR_IS_CMT(dev
))
952 dev
->ops
->iwarp_set_engine_affin(dev
->cdev
, true);
954 ib_dealloc_device(&dev
->ibdev
);
957 static void qedr_close(struct qedr_dev
*dev
)
959 if (test_and_clear_bit(QEDR_ENET_STATE_BIT
, &dev
->enet_state
))
960 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_PORT_ERR
);
963 static void qedr_shutdown(struct qedr_dev
*dev
)
969 static void qedr_open(struct qedr_dev
*dev
)
971 if (!test_and_set_bit(QEDR_ENET_STATE_BIT
, &dev
->enet_state
))
972 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_PORT_ACTIVE
);
975 static void qedr_mac_address_change(struct qedr_dev
*dev
)
977 union ib_gid
*sgid
= &dev
->sgid_tbl
[0];
978 u8 guid
[8], mac_addr
[6];
982 ether_addr_copy(&mac_addr
[0], dev
->ndev
->dev_addr
);
983 guid
[0] = mac_addr
[0] ^ 2;
984 guid
[1] = mac_addr
[1];
985 guid
[2] = mac_addr
[2];
988 guid
[5] = mac_addr
[3];
989 guid
[6] = mac_addr
[4];
990 guid
[7] = mac_addr
[5];
991 sgid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
992 memcpy(&sgid
->raw
[8], guid
, sizeof(guid
));
995 rc
= dev
->ops
->ll2_set_mac_filter(dev
->cdev
,
996 dev
->gsi_ll2_mac_address
,
997 dev
->ndev
->dev_addr
);
999 ether_addr_copy(dev
->gsi_ll2_mac_address
, dev
->ndev
->dev_addr
);
1001 qedr_ib_dispatch_event(dev
, QEDR_PORT
, IB_EVENT_GID_CHANGE
);
1004 DP_ERR(dev
, "Error updating mac filter\n");
1007 /* event handling via NIC driver ensures that all the NIC specific
1008 * initialization done before RoCE driver notifies
1011 static void qedr_notify(struct qedr_dev
*dev
, enum qede_rdma_event event
)
1023 case QEDE_CHANGE_ADDR
:
1024 qedr_mac_address_change(dev
);
1026 case QEDE_CHANGE_MTU
:
1027 if (rdma_protocol_iwarp(&dev
->ibdev
, 1))
1028 if (dev
->ndev
->mtu
!= dev
->iwarp_max_mtu
)
1030 "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n",
1031 dev
->iwarp_max_mtu
, dev
->ndev
->mtu
);
1034 pr_err("Event not supported\n");
1038 static struct qedr_driver qedr_drv
= {
1039 .name
= "qedr_driver",
1041 .remove
= qedr_remove
,
1042 .notify
= qedr_notify
,
1045 static int __init
qedr_init_module(void)
1047 return qede_rdma_register_driver(&qedr_drv
);
1050 static void __exit
qedr_exit_module(void)
1052 qede_rdma_unregister_driver(&qedr_drv
);
1055 module_init(qedr_init_module
);
1056 module_exit(qedr_exit_module
);