1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_addr.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <linux/netdevice.h>
37 #include <linux/iommu.h>
38 #include <net/addrconf.h>
39 #include <linux/qed/qede_roce.h>
40 #include <linux/qed/qed_chain.h>
41 #include <linux/qed/qed_if.h>
44 #include <rdma/qedr-abi.h>
46 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
47 MODULE_AUTHOR("QLogic Corporation");
48 MODULE_LICENSE("Dual BSD/GPL");
49 MODULE_VERSION(QEDR_MODULE_VERSION
);
51 #define QEDR_WQ_MULTIPLIER_DFT (3)
53 void qedr_ib_dispatch_event(struct qedr_dev
*dev
, u8 port_num
,
54 enum ib_event_type type
)
58 ibev
.device
= &dev
->ibdev
;
59 ibev
.element
.port_num
= port_num
;
62 ib_dispatch_event(&ibev
);
65 static enum rdma_link_layer
qedr_link_layer(struct ib_device
*device
,
68 return IB_LINK_LAYER_ETHERNET
;
71 static void qedr_get_dev_fw_str(struct ib_device
*ibdev
, char *str
,
74 struct qedr_dev
*qedr
= get_qedr_dev(ibdev
);
75 u32 fw_ver
= (u32
)qedr
->attr
.fw_ver
;
77 snprintf(str
, str_len
, "%d. %d. %d. %d",
78 (fw_ver
>> 24) & 0xFF, (fw_ver
>> 16) & 0xFF,
79 (fw_ver
>> 8) & 0xFF, fw_ver
& 0xFF);
82 static struct net_device
*qedr_get_netdev(struct ib_device
*dev
, u8 port_num
)
84 struct qedr_dev
*qdev
;
86 qdev
= get_qedr_dev(dev
);
89 /* The HW vendor's device driver must guarantee
90 * that this function returns NULL before the net device reaches
91 * NETDEV_UNREGISTER_FINAL state.
96 static int qedr_register_device(struct qedr_dev
*dev
)
98 strlcpy(dev
->ibdev
.name
, "qedr%d", IB_DEVICE_NAME_MAX
);
100 dev
->ibdev
.node_guid
= dev
->attr
.node_guid
;
101 memcpy(dev
->ibdev
.node_desc
, QEDR_NODE_DESC
, sizeof(QEDR_NODE_DESC
));
102 dev
->ibdev
.owner
= THIS_MODULE
;
103 dev
->ibdev
.uverbs_abi_ver
= QEDR_ABI_VERSION
;
105 dev
->ibdev
.uverbs_cmd_mask
= QEDR_UVERBS(GET_CONTEXT
) |
106 QEDR_UVERBS(QUERY_DEVICE
) |
107 QEDR_UVERBS(QUERY_PORT
) |
108 QEDR_UVERBS(ALLOC_PD
) |
109 QEDR_UVERBS(DEALLOC_PD
) |
110 QEDR_UVERBS(CREATE_COMP_CHANNEL
) |
111 QEDR_UVERBS(CREATE_CQ
) |
112 QEDR_UVERBS(RESIZE_CQ
) |
113 QEDR_UVERBS(DESTROY_CQ
) |
114 QEDR_UVERBS(REQ_NOTIFY_CQ
) |
115 QEDR_UVERBS(CREATE_QP
) |
116 QEDR_UVERBS(MODIFY_QP
) |
117 QEDR_UVERBS(QUERY_QP
) |
118 QEDR_UVERBS(DESTROY_QP
) |
119 QEDR_UVERBS(REG_MR
) |
120 QEDR_UVERBS(DEREG_MR
) |
121 QEDR_UVERBS(POLL_CQ
) |
122 QEDR_UVERBS(POST_SEND
) |
123 QEDR_UVERBS(POST_RECV
);
125 dev
->ibdev
.phys_port_cnt
= 1;
126 dev
->ibdev
.num_comp_vectors
= dev
->num_cnq
;
127 dev
->ibdev
.node_type
= RDMA_NODE_IB_CA
;
129 dev
->ibdev
.query_device
= qedr_query_device
;
130 dev
->ibdev
.query_port
= qedr_query_port
;
131 dev
->ibdev
.modify_port
= qedr_modify_port
;
133 dev
->ibdev
.query_gid
= qedr_query_gid
;
134 dev
->ibdev
.add_gid
= qedr_add_gid
;
135 dev
->ibdev
.del_gid
= qedr_del_gid
;
137 dev
->ibdev
.alloc_ucontext
= qedr_alloc_ucontext
;
138 dev
->ibdev
.dealloc_ucontext
= qedr_dealloc_ucontext
;
139 dev
->ibdev
.mmap
= qedr_mmap
;
141 dev
->ibdev
.alloc_pd
= qedr_alloc_pd
;
142 dev
->ibdev
.dealloc_pd
= qedr_dealloc_pd
;
144 dev
->ibdev
.create_cq
= qedr_create_cq
;
145 dev
->ibdev
.destroy_cq
= qedr_destroy_cq
;
146 dev
->ibdev
.resize_cq
= qedr_resize_cq
;
147 dev
->ibdev
.req_notify_cq
= qedr_arm_cq
;
149 dev
->ibdev
.create_qp
= qedr_create_qp
;
150 dev
->ibdev
.modify_qp
= qedr_modify_qp
;
151 dev
->ibdev
.query_qp
= qedr_query_qp
;
152 dev
->ibdev
.destroy_qp
= qedr_destroy_qp
;
154 dev
->ibdev
.query_pkey
= qedr_query_pkey
;
156 dev
->ibdev
.create_ah
= qedr_create_ah
;
157 dev
->ibdev
.destroy_ah
= qedr_destroy_ah
;
159 dev
->ibdev
.get_dma_mr
= qedr_get_dma_mr
;
160 dev
->ibdev
.dereg_mr
= qedr_dereg_mr
;
161 dev
->ibdev
.reg_user_mr
= qedr_reg_user_mr
;
162 dev
->ibdev
.alloc_mr
= qedr_alloc_mr
;
163 dev
->ibdev
.map_mr_sg
= qedr_map_mr_sg
;
165 dev
->ibdev
.poll_cq
= qedr_poll_cq
;
166 dev
->ibdev
.post_send
= qedr_post_send
;
167 dev
->ibdev
.post_recv
= qedr_post_recv
;
169 dev
->ibdev
.process_mad
= qedr_process_mad
;
170 dev
->ibdev
.get_port_immutable
= qedr_port_immutable
;
171 dev
->ibdev
.get_netdev
= qedr_get_netdev
;
173 dev
->ibdev
.dma_device
= &dev
->pdev
->dev
;
175 dev
->ibdev
.get_link_layer
= qedr_link_layer
;
176 dev
->ibdev
.get_dev_fw_str
= qedr_get_dev_fw_str
;
178 return ib_register_device(&dev
->ibdev
, NULL
);
181 /* This function allocates fast-path status block memory */
182 static int qedr_alloc_mem_sb(struct qedr_dev
*dev
,
183 struct qed_sb_info
*sb_info
, u16 sb_id
)
185 struct status_block
*sb_virt
;
189 sb_virt
= dma_alloc_coherent(&dev
->pdev
->dev
,
190 sizeof(*sb_virt
), &sb_phys
, GFP_KERNEL
);
194 rc
= dev
->ops
->common
->sb_init(dev
->cdev
, sb_info
,
195 sb_virt
, sb_phys
, sb_id
,
198 pr_err("Status block initialization failed\n");
199 dma_free_coherent(&dev
->pdev
->dev
, sizeof(*sb_virt
),
207 static void qedr_free_mem_sb(struct qedr_dev
*dev
,
208 struct qed_sb_info
*sb_info
, int sb_id
)
210 if (sb_info
->sb_virt
) {
211 dev
->ops
->common
->sb_release(dev
->cdev
, sb_info
, sb_id
);
212 dma_free_coherent(&dev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
213 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
217 static void qedr_free_resources(struct qedr_dev
*dev
)
221 for (i
= 0; i
< dev
->num_cnq
; i
++) {
222 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
223 dev
->ops
->common
->chain_free(dev
->cdev
, &dev
->cnq_array
[i
].pbl
);
226 kfree(dev
->cnq_array
);
227 kfree(dev
->sb_array
);
228 kfree(dev
->sgid_tbl
);
231 static int qedr_alloc_resources(struct qedr_dev
*dev
)
233 struct qedr_cnq
*cnq
;
238 dev
->sgid_tbl
= kzalloc(sizeof(union ib_gid
) *
239 QEDR_MAX_SGID
, GFP_KERNEL
);
243 spin_lock_init(&dev
->sgid_lock
);
245 /* Allocate Status blocks for CNQ */
246 dev
->sb_array
= kcalloc(dev
->num_cnq
, sizeof(*dev
->sb_array
),
248 if (!dev
->sb_array
) {
253 dev
->cnq_array
= kcalloc(dev
->num_cnq
,
254 sizeof(*dev
->cnq_array
), GFP_KERNEL
);
255 if (!dev
->cnq_array
) {
260 dev
->sb_start
= dev
->ops
->rdma_get_start_sb(dev
->cdev
);
262 /* Allocate CNQ PBLs */
263 n_entries
= min_t(u32
, QED_RDMA_MAX_CNQ_SIZE
, QEDR_ROCE_MAX_CNQ_SIZE
);
264 for (i
= 0; i
< dev
->num_cnq
; i
++) {
265 cnq
= &dev
->cnq_array
[i
];
267 rc
= qedr_alloc_mem_sb(dev
, &dev
->sb_array
[i
],
272 rc
= dev
->ops
->common
->chain_alloc(dev
->cdev
,
273 QED_CHAIN_USE_TO_CONSUME
,
275 QED_CHAIN_CNT_TYPE_U16
,
277 sizeof(struct regpair
*),
283 cnq
->sb
= &dev
->sb_array
[i
];
284 cons_pi
= dev
->sb_array
[i
].sb_virt
->pi_array
;
285 cnq
->hw_cons_ptr
= &cons_pi
[QED_ROCE_PROTOCOL_INDEX
];
287 sprintf(cnq
->name
, "qedr%d@pci:%s", i
, pci_name(dev
->pdev
));
289 DP_DEBUG(dev
, QEDR_MSG_INIT
, "cnq[%d].cons=%d\n",
290 i
, qed_chain_get_cons_idx(&cnq
->pbl
));
295 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
297 for (--i
; i
>= 0; i
--) {
298 dev
->ops
->common
->chain_free(dev
->cdev
, &dev
->cnq_array
[i
].pbl
);
299 qedr_free_mem_sb(dev
, &dev
->sb_array
[i
], dev
->sb_start
+ i
);
301 kfree(dev
->cnq_array
);
303 kfree(dev
->sb_array
);
305 kfree(dev
->sgid_tbl
);
309 /* QEDR sysfs interface */
310 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
313 struct qedr_dev
*dev
= dev_get_drvdata(device
);
315 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", dev
->pdev
->vendor
);
318 static ssize_t
show_hca_type(struct device
*device
,
319 struct device_attribute
*attr
, char *buf
)
321 return scnprintf(buf
, PAGE_SIZE
, "%s\n", "HCA_TYPE_TO_SET");
324 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
325 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca_type
, NULL
);
327 static struct device_attribute
*qedr_attributes
[] = {
332 static void qedr_remove_sysfiles(struct qedr_dev
*dev
)
336 for (i
= 0; i
< ARRAY_SIZE(qedr_attributes
); i
++)
337 device_remove_file(&dev
->ibdev
.dev
, qedr_attributes
[i
]);
340 static void qedr_pci_set_atomic(struct qedr_dev
*dev
, struct pci_dev
*pdev
)
342 struct pci_dev
*bridge
;
345 dev
->atomic_cap
= IB_ATOMIC_NONE
;
347 bridge
= pdev
->bus
->self
;
351 /* Check whether we are connected directly or via a switch */
352 while (bridge
&& bridge
->bus
->parent
) {
353 DP_DEBUG(dev
, QEDR_MSG_INIT
,
354 "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
355 bridge
->bus
->number
, bridge
->bus
->primary
);
356 /* Need to check Atomic Op Routing Supported all the way to
359 pcie_capability_read_dword(bridge
, PCI_EXP_DEVCAP2
, &val
);
360 if (!(val
& PCI_EXP_DEVCAP2_ATOMIC_ROUTE
)) {
361 pcie_capability_clear_word(pdev
,
363 PCI_EXP_DEVCTL2_ATOMIC_REQ
);
366 bridge
= bridge
->bus
->parent
->self
;
368 bridge
= pdev
->bus
->self
;
370 /* according to bridge capability */
371 pcie_capability_read_dword(bridge
, PCI_EXP_DEVCAP2
, &val
);
372 if (val
& PCI_EXP_DEVCAP2_ATOMIC_COMP64
) {
373 pcie_capability_set_word(pdev
, PCI_EXP_DEVCTL2
,
374 PCI_EXP_DEVCTL2_ATOMIC_REQ
);
375 dev
->atomic_cap
= IB_ATOMIC_GLOB
;
377 pcie_capability_clear_word(pdev
, PCI_EXP_DEVCTL2
,
378 PCI_EXP_DEVCTL2_ATOMIC_REQ
);
382 static const struct qed_rdma_ops
*qed_ops
;
384 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
386 static irqreturn_t
qedr_irq_handler(int irq
, void *handle
)
388 u16 hw_comp_cons
, sw_comp_cons
;
389 struct qedr_cnq
*cnq
= handle
;
390 struct regpair
*cq_handle
;
393 qed_sb_ack(cnq
->sb
, IGU_INT_DISABLE
, 0);
395 qed_sb_update_sb_idx(cnq
->sb
);
397 hw_comp_cons
= le16_to_cpu(*cnq
->hw_cons_ptr
);
398 sw_comp_cons
= qed_chain_get_cons_idx(&cnq
->pbl
);
400 /* Align protocol-index and chain reads */
403 while (sw_comp_cons
!= hw_comp_cons
) {
404 cq_handle
= (struct regpair
*)qed_chain_consume(&cnq
->pbl
);
405 cq
= (struct qedr_cq
*)(uintptr_t)HILO_U64(cq_handle
->hi
,
410 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
411 cq_handle
->hi
, cq_handle
->lo
, sw_comp_cons
,
417 if (cq
->sig
!= QEDR_CQ_MAGIC_NUMBER
) {
419 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
420 cq_handle
->hi
, cq_handle
->lo
, cq
);
426 if (cq
->ibcq
.comp_handler
)
427 (*cq
->ibcq
.comp_handler
)
428 (&cq
->ibcq
, cq
->ibcq
.cq_context
);
430 sw_comp_cons
= qed_chain_get_cons_idx(&cnq
->pbl
);
436 qed_ops
->rdma_cnq_prod_update(cnq
->dev
->rdma_ctx
, cnq
->index
,
439 qed_sb_ack(cnq
->sb
, IGU_INT_ENABLE
, 1);
444 static void qedr_sync_free_irqs(struct qedr_dev
*dev
)
449 for (i
= 0; i
< dev
->int_info
.used_cnt
; i
++) {
450 if (dev
->int_info
.msix_cnt
) {
451 vector
= dev
->int_info
.msix
[i
* dev
->num_hwfns
].vector
;
452 synchronize_irq(vector
);
453 free_irq(vector
, &dev
->cnq_array
[i
]);
457 dev
->int_info
.used_cnt
= 0;
460 static int qedr_req_msix_irqs(struct qedr_dev
*dev
)
464 if (dev
->num_cnq
> dev
->int_info
.msix_cnt
) {
466 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
467 dev
->num_cnq
, dev
->int_info
.msix_cnt
);
471 for (i
= 0; i
< dev
->num_cnq
; i
++) {
472 rc
= request_irq(dev
->int_info
.msix
[i
* dev
->num_hwfns
].vector
,
473 qedr_irq_handler
, 0, dev
->cnq_array
[i
].name
,
476 DP_ERR(dev
, "Request cnq %d irq failed\n", i
);
477 qedr_sync_free_irqs(dev
);
479 DP_DEBUG(dev
, QEDR_MSG_INIT
,
480 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
481 dev
->cnq_array
[i
].name
, i
,
483 dev
->int_info
.used_cnt
++;
490 static int qedr_setup_irqs(struct qedr_dev
*dev
)
494 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr_setup_irqs\n");
496 /* Learn Interrupt configuration */
497 rc
= dev
->ops
->rdma_set_rdma_int(dev
->cdev
, dev
->num_cnq
);
501 rc
= dev
->ops
->rdma_get_rdma_int(dev
->cdev
, &dev
->int_info
);
503 DP_DEBUG(dev
, QEDR_MSG_INIT
, "get_rdma_int failed\n");
507 if (dev
->int_info
.msix_cnt
) {
508 DP_DEBUG(dev
, QEDR_MSG_INIT
, "rdma msix_cnt = %d\n",
509 dev
->int_info
.msix_cnt
);
510 rc
= qedr_req_msix_irqs(dev
);
515 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr_setup_irqs succeeded\n");
520 static int qedr_set_device_attr(struct qedr_dev
*dev
)
522 struct qed_rdma_device
*qed_attr
;
523 struct qedr_device_attr
*attr
;
526 /* Part 1 - query core capabilities */
527 qed_attr
= dev
->ops
->rdma_query_device(dev
->rdma_ctx
);
529 /* Part 2 - check capabilities */
530 page_size
= ~dev
->attr
.page_size_caps
+ 1;
531 if (page_size
> PAGE_SIZE
) {
533 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
534 PAGE_SIZE
, page_size
);
538 /* Part 3 - copy and update capabilities */
540 attr
->vendor_id
= qed_attr
->vendor_id
;
541 attr
->vendor_part_id
= qed_attr
->vendor_part_id
;
542 attr
->hw_ver
= qed_attr
->hw_ver
;
543 attr
->fw_ver
= qed_attr
->fw_ver
;
544 attr
->node_guid
= qed_attr
->node_guid
;
545 attr
->sys_image_guid
= qed_attr
->sys_image_guid
;
546 attr
->max_cnq
= qed_attr
->max_cnq
;
547 attr
->max_sge
= qed_attr
->max_sge
;
548 attr
->max_inline
= qed_attr
->max_inline
;
549 attr
->max_sqe
= min_t(u32
, qed_attr
->max_wqe
, QEDR_MAX_SQE
);
550 attr
->max_rqe
= min_t(u32
, qed_attr
->max_wqe
, QEDR_MAX_RQE
);
551 attr
->max_qp_resp_rd_atomic_resc
= qed_attr
->max_qp_resp_rd_atomic_resc
;
552 attr
->max_qp_req_rd_atomic_resc
= qed_attr
->max_qp_req_rd_atomic_resc
;
553 attr
->max_dev_resp_rd_atomic_resc
=
554 qed_attr
->max_dev_resp_rd_atomic_resc
;
555 attr
->max_cq
= qed_attr
->max_cq
;
556 attr
->max_qp
= qed_attr
->max_qp
;
557 attr
->max_mr
= qed_attr
->max_mr
;
558 attr
->max_mr_size
= qed_attr
->max_mr_size
;
559 attr
->max_cqe
= min_t(u64
, qed_attr
->max_cqe
, QEDR_MAX_CQES
);
560 attr
->max_mw
= qed_attr
->max_mw
;
561 attr
->max_fmr
= qed_attr
->max_fmr
;
562 attr
->max_mr_mw_fmr_pbl
= qed_attr
->max_mr_mw_fmr_pbl
;
563 attr
->max_mr_mw_fmr_size
= qed_attr
->max_mr_mw_fmr_size
;
564 attr
->max_pd
= qed_attr
->max_pd
;
565 attr
->max_ah
= qed_attr
->max_ah
;
566 attr
->max_pkey
= qed_attr
->max_pkey
;
567 attr
->max_srq
= qed_attr
->max_srq
;
568 attr
->max_srq_wr
= qed_attr
->max_srq_wr
;
569 attr
->dev_caps
= qed_attr
->dev_caps
;
570 attr
->page_size_caps
= qed_attr
->page_size_caps
;
571 attr
->dev_ack_delay
= qed_attr
->dev_ack_delay
;
572 attr
->reserved_lkey
= qed_attr
->reserved_lkey
;
573 attr
->bad_pkey_counter
= qed_attr
->bad_pkey_counter
;
574 attr
->max_stats_queues
= qed_attr
->max_stats_queues
;
579 void qedr_unaffiliated_event(void *context
,
582 pr_err("unaffiliated event not implemented yet\n");
585 void qedr_affiliated_event(void *context
, u8 e_code
, void *fw_handle
)
587 #define EVENT_TYPE_NOT_DEFINED 0
588 #define EVENT_TYPE_CQ 1
589 #define EVENT_TYPE_QP 2
590 struct qedr_dev
*dev
= (struct qedr_dev
*)context
;
591 union event_ring_data
*data
= fw_handle
;
592 u64 roce_handle64
= ((u64
)data
->roce_handle
.hi
<< 32) +
593 data
->roce_handle
.lo
;
594 u8 event_type
= EVENT_TYPE_NOT_DEFINED
;
595 struct ib_event event
;
602 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR
:
603 event
.event
= IB_EVENT_CQ_ERR
;
604 event_type
= EVENT_TYPE_CQ
;
606 case ROCE_ASYNC_EVENT_SQ_DRAINED
:
607 event
.event
= IB_EVENT_SQ_DRAINED
;
608 event_type
= EVENT_TYPE_QP
;
610 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR
:
611 event
.event
= IB_EVENT_QP_FATAL
;
612 event_type
= EVENT_TYPE_QP
;
614 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR
:
615 event
.event
= IB_EVENT_QP_REQ_ERR
;
616 event_type
= EVENT_TYPE_QP
;
618 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR
:
619 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
620 event_type
= EVENT_TYPE_QP
;
623 DP_ERR(dev
, "unsupported event %d on handle=%llx\n", e_code
,
627 switch (event_type
) {
629 cq
= (struct qedr_cq
*)(uintptr_t)roce_handle64
;
632 if (ibcq
->event_handler
) {
633 event
.device
= ibcq
->device
;
634 event
.element
.cq
= ibcq
;
635 ibcq
->event_handler(&event
, ibcq
->cq_context
);
639 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
642 DP_ERR(dev
, "CQ event %d on hanlde %p\n", e_code
, cq
);
645 qp
= (struct qedr_qp
*)(uintptr_t)roce_handle64
;
648 if (ibqp
->event_handler
) {
649 event
.device
= ibqp
->device
;
650 event
.element
.qp
= ibqp
;
651 ibqp
->event_handler(&event
, ibqp
->qp_context
);
655 "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
658 DP_ERR(dev
, "QP event %d on hanlde %p\n", e_code
, qp
);
665 static int qedr_init_hw(struct qedr_dev
*dev
)
667 struct qed_rdma_add_user_out_params out_params
;
668 struct qed_rdma_start_in_params
*in_params
;
669 struct qed_rdma_cnq_params
*cur_pbl
;
670 struct qed_rdma_events events
;
671 dma_addr_t p_phys_table
;
676 in_params
= kzalloc(sizeof(*in_params
), GFP_KERNEL
);
682 in_params
->desired_cnq
= dev
->num_cnq
;
683 for (i
= 0; i
< dev
->num_cnq
; i
++) {
684 cur_pbl
= &in_params
->cnq_pbl_list
[i
];
686 page_cnt
= qed_chain_get_page_cnt(&dev
->cnq_array
[i
].pbl
);
687 cur_pbl
->num_pbl_pages
= page_cnt
;
689 p_phys_table
= qed_chain_get_pbl_phys(&dev
->cnq_array
[i
].pbl
);
690 cur_pbl
->pbl_ptr
= (u64
)p_phys_table
;
693 events
.affiliated_event
= qedr_affiliated_event
;
694 events
.unaffiliated_event
= qedr_unaffiliated_event
;
695 events
.context
= dev
;
697 in_params
->events
= &events
;
698 in_params
->cq_mode
= QED_RDMA_CQ_MODE_32_BITS
;
699 in_params
->max_mtu
= dev
->ndev
->mtu
;
700 ether_addr_copy(&in_params
->mac_addr
[0], dev
->ndev
->dev_addr
);
702 rc
= dev
->ops
->rdma_init(dev
->cdev
, in_params
);
706 rc
= dev
->ops
->rdma_add_user(dev
->rdma_ctx
, &out_params
);
710 dev
->db_addr
= (void *)(uintptr_t)out_params
.dpi_addr
;
711 dev
->db_phys_addr
= out_params
.dpi_phys_addr
;
712 dev
->db_size
= out_params
.dpi_size
;
713 dev
->dpi
= out_params
.dpi
;
715 rc
= qedr_set_device_attr(dev
);
719 DP_ERR(dev
, "Init HW Failed rc = %d\n", rc
);
724 void qedr_stop_hw(struct qedr_dev
*dev
)
726 dev
->ops
->rdma_remove_user(dev
->rdma_ctx
, dev
->dpi
);
727 dev
->ops
->rdma_stop(dev
->rdma_ctx
);
730 static struct qedr_dev
*qedr_add(struct qed_dev
*cdev
, struct pci_dev
*pdev
,
731 struct net_device
*ndev
)
733 struct qed_dev_rdma_info dev_info
;
734 struct qedr_dev
*dev
;
737 dev
= (struct qedr_dev
*)ib_alloc_device(sizeof(*dev
));
739 pr_err("Unable to allocate ib device\n");
743 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr add device called\n");
749 qed_ops
= qed_get_rdma_ops();
751 DP_ERR(dev
, "Failed to get qed roce operations\n");
756 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
760 dev
->num_hwfns
= dev_info
.common
.num_hwfns
;
761 dev
->rdma_ctx
= dev
->ops
->rdma_get_rdma_ctx(cdev
);
763 dev
->num_cnq
= dev
->ops
->rdma_get_min_cnq_msix(cdev
);
765 DP_ERR(dev
, "not enough CNQ resources.\n");
769 dev
->wq_multiplier
= QEDR_WQ_MULTIPLIER_DFT
;
771 qedr_pci_set_atomic(dev
, pdev
);
773 rc
= qedr_alloc_resources(dev
);
777 rc
= qedr_init_hw(dev
);
781 rc
= qedr_setup_irqs(dev
);
785 rc
= qedr_register_device(dev
);
787 DP_ERR(dev
, "Unable to allocate register device\n");
791 for (i
= 0; i
< ARRAY_SIZE(qedr_attributes
); i
++)
792 if (device_create_file(&dev
->ibdev
.dev
, qedr_attributes
[i
]))
795 DP_DEBUG(dev
, QEDR_MSG_INIT
, "qedr driver loaded successfully\n");
799 ib_unregister_device(&dev
->ibdev
);
801 qedr_sync_free_irqs(dev
);
805 qedr_free_resources(dev
);
807 ib_dealloc_device(&dev
->ibdev
);
808 DP_ERR(dev
, "qedr driver load failed rc=%d\n", rc
);
813 static void qedr_remove(struct qedr_dev
*dev
)
815 /* First unregister with stack to stop all the active traffic
816 * of the registered clients.
818 qedr_remove_sysfiles(dev
);
819 ib_unregister_device(&dev
->ibdev
);
822 qedr_sync_free_irqs(dev
);
823 qedr_free_resources(dev
);
824 ib_dealloc_device(&dev
->ibdev
);
827 static int qedr_close(struct qedr_dev
*dev
)
829 qedr_ib_dispatch_event(dev
, 1, IB_EVENT_PORT_ERR
);
834 static void qedr_shutdown(struct qedr_dev
*dev
)
840 static void qedr_mac_address_change(struct qedr_dev
*dev
)
842 union ib_gid
*sgid
= &dev
->sgid_tbl
[0];
843 u8 guid
[8], mac_addr
[6];
847 ether_addr_copy(&mac_addr
[0], dev
->ndev
->dev_addr
);
848 guid
[0] = mac_addr
[0] ^ 2;
849 guid
[1] = mac_addr
[1];
850 guid
[2] = mac_addr
[2];
853 guid
[5] = mac_addr
[3];
854 guid
[6] = mac_addr
[4];
855 guid
[7] = mac_addr
[5];
856 sgid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
857 memcpy(&sgid
->raw
[8], guid
, sizeof(guid
));
860 rc
= dev
->ops
->roce_ll2_set_mac_filter(dev
->cdev
,
861 dev
->gsi_ll2_mac_address
,
862 dev
->ndev
->dev_addr
);
864 ether_addr_copy(dev
->gsi_ll2_mac_address
, dev
->ndev
->dev_addr
);
866 qedr_ib_dispatch_event(dev
, 1, IB_EVENT_GID_CHANGE
);
869 DP_ERR(dev
, "Error updating mac filter\n");
872 /* event handling via NIC driver ensures that all the NIC specific
873 * initialization done before RoCE driver notifies
876 static void qedr_notify(struct qedr_dev
*dev
, enum qede_roce_event event
)
880 qedr_ib_dispatch_event(dev
, 1, IB_EVENT_PORT_ACTIVE
);
888 case QEDE_CHANGE_ADDR
:
889 qedr_mac_address_change(dev
);
892 pr_err("Event not supported\n");
896 static struct qedr_driver qedr_drv
= {
897 .name
= "qedr_driver",
899 .remove
= qedr_remove
,
900 .notify
= qedr_notify
,
903 static int __init
qedr_init_module(void)
905 return qede_roce_register_driver(&qedr_drv
);
908 static void __exit
qedr_exit_module(void)
910 qede_roce_unregister_driver(&qedr_drv
);
913 module_init(qedr_init_module
);
914 module_exit(qedr_exit_module
);