1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
20 * Contact Information:
21 * linux-drivers@emulex.com
25 * Costa Mesa, CA 92626
26 *******************************************************************/
28 #include <linux/module.h>
29 #include <linux/idr.h>
30 #include <rdma/ib_verbs.h>
31 #include <rdma/ib_user_verbs.h>
32 #include <rdma/ib_addr.h>
34 #include <linux/netdevice.h>
35 #include <net/addrconf.h>
38 #include "ocrdma_verbs.h"
39 #include "ocrdma_ah.h"
41 #include "ocrdma_hw.h"
43 MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION
);
44 MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
45 MODULE_AUTHOR("Emulex Corporation");
46 MODULE_LICENSE("GPL");
48 static LIST_HEAD(ocrdma_dev_list
);
49 static DEFINE_SPINLOCK(ocrdma_devlist_lock
);
50 static DEFINE_IDR(ocrdma_dev_id
);
52 static union ib_gid ocrdma_zero_sgid
;
54 static int ocrdma_get_instance(void)
58 /* Assign an unused number */
59 if (!idr_pre_get(&ocrdma_dev_id
, GFP_KERNEL
))
61 if (idr_get_new(&ocrdma_dev_id
, NULL
, &instance
))
66 void ocrdma_get_guid(struct ocrdma_dev
*dev
, u8
*guid
)
70 memcpy(&mac_addr
[0], &dev
->nic_info
.mac_addr
[0], ETH_ALEN
);
71 guid
[0] = mac_addr
[0] ^ 2;
72 guid
[1] = mac_addr
[1];
73 guid
[2] = mac_addr
[2];
76 guid
[5] = mac_addr
[3];
77 guid
[6] = mac_addr
[4];
78 guid
[7] = mac_addr
[5];
81 static void ocrdma_build_sgid_mac(union ib_gid
*sgid
, unsigned char *mac_addr
,
82 bool is_vlan
, u16 vlan_id
)
84 sgid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
85 sgid
->raw
[8] = mac_addr
[0] ^ 2;
86 sgid
->raw
[9] = mac_addr
[1];
87 sgid
->raw
[10] = mac_addr
[2];
89 sgid
->raw
[11] = vlan_id
>> 8;
90 sgid
->raw
[12] = vlan_id
& 0xff;
95 sgid
->raw
[13] = mac_addr
[3];
96 sgid
->raw
[14] = mac_addr
[4];
97 sgid
->raw
[15] = mac_addr
[5];
100 static bool ocrdma_add_sgid(struct ocrdma_dev
*dev
, unsigned char *mac_addr
,
101 bool is_vlan
, u16 vlan_id
)
104 union ib_gid new_sgid
;
107 memset(&ocrdma_zero_sgid
, 0, sizeof(union ib_gid
));
109 ocrdma_build_sgid_mac(&new_sgid
, mac_addr
, is_vlan
, vlan_id
);
111 spin_lock_irqsave(&dev
->sgid_lock
, flags
);
112 for (i
= 0; i
< OCRDMA_MAX_SGID
; i
++) {
113 if (!memcmp(&dev
->sgid_tbl
[i
], &ocrdma_zero_sgid
,
114 sizeof(union ib_gid
))) {
115 /* found free entry */
116 memcpy(&dev
->sgid_tbl
[i
], &new_sgid
,
117 sizeof(union ib_gid
));
118 spin_unlock_irqrestore(&dev
->sgid_lock
, flags
);
120 } else if (!memcmp(&dev
->sgid_tbl
[i
], &new_sgid
,
121 sizeof(union ib_gid
))) {
122 /* entry already present, no addition is required. */
123 spin_unlock_irqrestore(&dev
->sgid_lock
, flags
);
127 spin_unlock_irqrestore(&dev
->sgid_lock
, flags
);
131 static bool ocrdma_del_sgid(struct ocrdma_dev
*dev
, unsigned char *mac_addr
,
132 bool is_vlan
, u16 vlan_id
)
139 ocrdma_build_sgid_mac(&sgid
, mac_addr
, is_vlan
, vlan_id
);
141 spin_lock_irqsave(&dev
->sgid_lock
, flags
);
142 /* first is default sgid, which cannot be deleted. */
143 for (i
= 1; i
< OCRDMA_MAX_SGID
; i
++) {
144 if (!memcmp(&dev
->sgid_tbl
[i
], &sgid
, sizeof(union ib_gid
))) {
145 /* found matching entry */
146 memset(&dev
->sgid_tbl
[i
], 0, sizeof(union ib_gid
));
151 spin_unlock_irqrestore(&dev
->sgid_lock
, flags
);
155 static void ocrdma_add_default_sgid(struct ocrdma_dev
*dev
)
157 /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
158 union ib_gid
*sgid
= &dev
->sgid_tbl
[0];
160 sgid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
161 ocrdma_get_guid(dev
, &sgid
->raw
[8]);
164 #if IS_ENABLED(CONFIG_VLAN_8021Q)
165 static void ocrdma_add_vlan_sgids(struct ocrdma_dev
*dev
)
167 struct net_device
*netdev
, *tmp
;
171 netdev
= dev
->nic_info
.netdev
;
174 for_each_netdev_rcu(&init_net
, tmp
) {
175 if (netdev
== tmp
|| vlan_dev_real_dev(tmp
) == netdev
) {
176 if (!netif_running(tmp
) || !netif_oper_up(tmp
))
179 vlan_id
= vlan_dev_vlan_id(tmp
);
186 ocrdma_add_sgid(dev
, tmp
->dev_addr
, is_vlan
, vlan_id
);
192 static void ocrdma_add_vlan_sgids(struct ocrdma_dev
*dev
)
198 static int ocrdma_build_sgid_tbl(struct ocrdma_dev
*dev
)
200 ocrdma_add_default_sgid(dev
);
201 ocrdma_add_vlan_sgids(dev
);
205 #if IS_ENABLED(CONFIG_IPV6)
207 static int ocrdma_inet6addr_event(struct notifier_block
*notifier
,
208 unsigned long event
, void *ptr
)
210 struct inet6_ifaddr
*ifa
= (struct inet6_ifaddr
*)ptr
;
211 struct net_device
*netdev
= ifa
->idev
->dev
;
212 struct ib_event gid_event
;
213 struct ocrdma_dev
*dev
;
215 bool updated
= false;
216 bool is_vlan
= false;
219 is_vlan
= netdev
->priv_flags
& IFF_802_1Q_VLAN
;
221 vid
= vlan_dev_vlan_id(netdev
);
222 netdev
= vlan_dev_real_dev(netdev
);
226 list_for_each_entry_rcu(dev
, &ocrdma_dev_list
, entry
) {
227 if (dev
->nic_info
.netdev
== netdev
) {
236 if (!rdma_link_local_addr((struct in6_addr
*)&ifa
->addr
))
239 mutex_lock(&dev
->dev_lock
);
242 updated
= ocrdma_add_sgid(dev
, netdev
->dev_addr
, is_vlan
, vid
);
245 updated
= ocrdma_del_sgid(dev
, netdev
->dev_addr
, is_vlan
, vid
);
251 /* GID table updated, notify the consumers about it */
252 gid_event
.device
= &dev
->ibdev
;
253 gid_event
.element
.port_num
= 1;
254 gid_event
.event
= IB_EVENT_GID_CHANGE
;
255 ib_dispatch_event(&gid_event
);
257 mutex_unlock(&dev
->dev_lock
);
261 static struct notifier_block ocrdma_inet6addr_notifier
= {
262 .notifier_call
= ocrdma_inet6addr_event
265 #endif /* IPV6 and VLAN */
267 static enum rdma_link_layer
ocrdma_link_layer(struct ib_device
*device
,
270 return IB_LINK_LAYER_ETHERNET
;
273 static int ocrdma_register_device(struct ocrdma_dev
*dev
)
275 strlcpy(dev
->ibdev
.name
, "ocrdma%d", IB_DEVICE_NAME_MAX
);
276 ocrdma_get_guid(dev
, (u8
*)&dev
->ibdev
.node_guid
);
277 memcpy(dev
->ibdev
.node_desc
, OCRDMA_NODE_DESC
,
278 sizeof(OCRDMA_NODE_DESC
));
279 dev
->ibdev
.owner
= THIS_MODULE
;
280 dev
->ibdev
.uverbs_cmd_mask
=
281 OCRDMA_UVERBS(GET_CONTEXT
) |
282 OCRDMA_UVERBS(QUERY_DEVICE
) |
283 OCRDMA_UVERBS(QUERY_PORT
) |
284 OCRDMA_UVERBS(ALLOC_PD
) |
285 OCRDMA_UVERBS(DEALLOC_PD
) |
286 OCRDMA_UVERBS(REG_MR
) |
287 OCRDMA_UVERBS(DEREG_MR
) |
288 OCRDMA_UVERBS(CREATE_COMP_CHANNEL
) |
289 OCRDMA_UVERBS(CREATE_CQ
) |
290 OCRDMA_UVERBS(RESIZE_CQ
) |
291 OCRDMA_UVERBS(DESTROY_CQ
) |
292 OCRDMA_UVERBS(REQ_NOTIFY_CQ
) |
293 OCRDMA_UVERBS(CREATE_QP
) |
294 OCRDMA_UVERBS(MODIFY_QP
) |
295 OCRDMA_UVERBS(QUERY_QP
) |
296 OCRDMA_UVERBS(DESTROY_QP
) |
297 OCRDMA_UVERBS(POLL_CQ
) |
298 OCRDMA_UVERBS(POST_SEND
) |
299 OCRDMA_UVERBS(POST_RECV
);
301 dev
->ibdev
.uverbs_cmd_mask
|=
302 OCRDMA_UVERBS(CREATE_AH
) |
303 OCRDMA_UVERBS(MODIFY_AH
) |
304 OCRDMA_UVERBS(QUERY_AH
) |
305 OCRDMA_UVERBS(DESTROY_AH
);
307 dev
->ibdev
.node_type
= RDMA_NODE_IB_CA
;
308 dev
->ibdev
.phys_port_cnt
= 1;
309 dev
->ibdev
.num_comp_vectors
= 1;
311 /* mandatory verbs. */
312 dev
->ibdev
.query_device
= ocrdma_query_device
;
313 dev
->ibdev
.query_port
= ocrdma_query_port
;
314 dev
->ibdev
.modify_port
= ocrdma_modify_port
;
315 dev
->ibdev
.query_gid
= ocrdma_query_gid
;
316 dev
->ibdev
.get_link_layer
= ocrdma_link_layer
;
317 dev
->ibdev
.alloc_pd
= ocrdma_alloc_pd
;
318 dev
->ibdev
.dealloc_pd
= ocrdma_dealloc_pd
;
320 dev
->ibdev
.create_cq
= ocrdma_create_cq
;
321 dev
->ibdev
.destroy_cq
= ocrdma_destroy_cq
;
322 dev
->ibdev
.resize_cq
= ocrdma_resize_cq
;
324 dev
->ibdev
.create_qp
= ocrdma_create_qp
;
325 dev
->ibdev
.modify_qp
= ocrdma_modify_qp
;
326 dev
->ibdev
.query_qp
= ocrdma_query_qp
;
327 dev
->ibdev
.destroy_qp
= ocrdma_destroy_qp
;
329 dev
->ibdev
.query_pkey
= ocrdma_query_pkey
;
330 dev
->ibdev
.create_ah
= ocrdma_create_ah
;
331 dev
->ibdev
.destroy_ah
= ocrdma_destroy_ah
;
332 dev
->ibdev
.query_ah
= ocrdma_query_ah
;
333 dev
->ibdev
.modify_ah
= ocrdma_modify_ah
;
335 dev
->ibdev
.poll_cq
= ocrdma_poll_cq
;
336 dev
->ibdev
.post_send
= ocrdma_post_send
;
337 dev
->ibdev
.post_recv
= ocrdma_post_recv
;
338 dev
->ibdev
.req_notify_cq
= ocrdma_arm_cq
;
340 dev
->ibdev
.get_dma_mr
= ocrdma_get_dma_mr
;
341 dev
->ibdev
.dereg_mr
= ocrdma_dereg_mr
;
342 dev
->ibdev
.reg_user_mr
= ocrdma_reg_user_mr
;
344 /* mandatory to support user space verbs consumer. */
345 dev
->ibdev
.alloc_ucontext
= ocrdma_alloc_ucontext
;
346 dev
->ibdev
.dealloc_ucontext
= ocrdma_dealloc_ucontext
;
347 dev
->ibdev
.mmap
= ocrdma_mmap
;
348 dev
->ibdev
.dma_device
= &dev
->nic_info
.pdev
->dev
;
350 dev
->ibdev
.process_mad
= ocrdma_process_mad
;
352 if (dev
->nic_info
.dev_family
== OCRDMA_GEN2_FAMILY
) {
353 dev
->ibdev
.uverbs_cmd_mask
|=
354 OCRDMA_UVERBS(CREATE_SRQ
) |
355 OCRDMA_UVERBS(MODIFY_SRQ
) |
356 OCRDMA_UVERBS(QUERY_SRQ
) |
357 OCRDMA_UVERBS(DESTROY_SRQ
) |
358 OCRDMA_UVERBS(POST_SRQ_RECV
);
360 dev
->ibdev
.create_srq
= ocrdma_create_srq
;
361 dev
->ibdev
.modify_srq
= ocrdma_modify_srq
;
362 dev
->ibdev
.query_srq
= ocrdma_query_srq
;
363 dev
->ibdev
.destroy_srq
= ocrdma_destroy_srq
;
364 dev
->ibdev
.post_srq_recv
= ocrdma_post_srq_recv
;
366 return ib_register_device(&dev
->ibdev
, NULL
);
369 static int ocrdma_alloc_resources(struct ocrdma_dev
*dev
)
371 mutex_init(&dev
->dev_lock
);
372 dev
->sgid_tbl
= kzalloc(sizeof(union ib_gid
) *
373 OCRDMA_MAX_SGID
, GFP_KERNEL
);
376 spin_lock_init(&dev
->sgid_lock
);
378 dev
->cq_tbl
= kzalloc(sizeof(struct ocrdma_cq
*) *
379 OCRDMA_MAX_CQ
, GFP_KERNEL
);
383 if (dev
->attr
.max_qp
) {
384 dev
->qp_tbl
= kzalloc(sizeof(struct ocrdma_qp
*) *
385 OCRDMA_MAX_QP
, GFP_KERNEL
);
389 spin_lock_init(&dev
->av_tbl
.lock
);
390 spin_lock_init(&dev
->flush_q_lock
);
393 ocrdma_err("%s(%d) error.\n", __func__
, dev
->id
);
397 static void ocrdma_free_resources(struct ocrdma_dev
*dev
)
401 kfree(dev
->sgid_tbl
);
404 static struct ocrdma_dev
*ocrdma_add(struct be_dev_info
*dev_info
)
407 struct ocrdma_dev
*dev
;
409 dev
= (struct ocrdma_dev
*)ib_alloc_device(sizeof(struct ocrdma_dev
));
411 ocrdma_err("Unable to allocate ib device\n");
414 dev
->mbx_cmd
= kzalloc(sizeof(struct ocrdma_mqe_emb_cmd
), GFP_KERNEL
);
418 memcpy(&dev
->nic_info
, dev_info
, sizeof(*dev_info
));
419 dev
->id
= ocrdma_get_instance();
423 status
= ocrdma_init_hw(dev
);
427 status
= ocrdma_alloc_resources(dev
);
431 status
= ocrdma_build_sgid_tbl(dev
);
435 status
= ocrdma_register_device(dev
);
439 spin_lock(&ocrdma_devlist_lock
);
440 list_add_tail_rcu(&dev
->entry
, &ocrdma_dev_list
);
441 spin_unlock(&ocrdma_devlist_lock
);
445 ocrdma_free_resources(dev
);
446 ocrdma_cleanup_hw(dev
);
448 idr_remove(&ocrdma_dev_id
, dev
->id
);
451 ib_dealloc_device(&dev
->ibdev
);
452 ocrdma_err("%s() leaving. ret=%d\n", __func__
, status
);
456 static void ocrdma_remove_free(struct rcu_head
*rcu
)
458 struct ocrdma_dev
*dev
= container_of(rcu
, struct ocrdma_dev
, rcu
);
460 ocrdma_free_resources(dev
);
461 ocrdma_cleanup_hw(dev
);
463 idr_remove(&ocrdma_dev_id
, dev
->id
);
465 ib_dealloc_device(&dev
->ibdev
);
468 static void ocrdma_remove(struct ocrdma_dev
*dev
)
470 /* first unregister with stack to stop all the active traffic
471 * of the registered clients.
473 ib_unregister_device(&dev
->ibdev
);
475 spin_lock(&ocrdma_devlist_lock
);
476 list_del_rcu(&dev
->entry
);
477 spin_unlock(&ocrdma_devlist_lock
);
478 call_rcu(&dev
->rcu
, ocrdma_remove_free
);
481 static int ocrdma_open(struct ocrdma_dev
*dev
)
483 struct ib_event port_event
;
485 port_event
.event
= IB_EVENT_PORT_ACTIVE
;
486 port_event
.element
.port_num
= 1;
487 port_event
.device
= &dev
->ibdev
;
488 ib_dispatch_event(&port_event
);
492 static int ocrdma_close(struct ocrdma_dev
*dev
)
495 struct ocrdma_qp
*qp
, **cur_qp
;
496 struct ib_event err_event
;
497 struct ib_qp_attr attrs
;
498 int attr_mask
= IB_QP_STATE
;
500 attrs
.qp_state
= IB_QPS_ERR
;
501 mutex_lock(&dev
->dev_lock
);
503 cur_qp
= dev
->qp_tbl
;
504 for (i
= 0; i
< OCRDMA_MAX_QP
; i
++) {
507 /* change the QP state to ERROR */
508 _ocrdma_modify_qp(&qp
->ibqp
, &attrs
, attr_mask
);
510 err_event
.event
= IB_EVENT_QP_FATAL
;
511 err_event
.element
.qp
= &qp
->ibqp
;
512 err_event
.device
= &dev
->ibdev
;
513 ib_dispatch_event(&err_event
);
517 mutex_unlock(&dev
->dev_lock
);
519 err_event
.event
= IB_EVENT_PORT_ERR
;
520 err_event
.element
.port_num
= 1;
521 err_event
.device
= &dev
->ibdev
;
522 ib_dispatch_event(&err_event
);
526 /* event handling via NIC driver ensures that all the NIC specific
527 * initialization done before RoCE driver notifies
530 static void ocrdma_event_handler(struct ocrdma_dev
*dev
, u32 event
)
542 static struct ocrdma_driver ocrdma_drv
= {
543 .name
= "ocrdma_driver",
545 .remove
= ocrdma_remove
,
546 .state_change_handler
= ocrdma_event_handler
,
549 static void ocrdma_unregister_inet6addr_notifier(void)
551 #if IS_ENABLED(CONFIG_IPV6)
552 unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier
);
556 static int __init
ocrdma_init_module(void)
560 #if IS_ENABLED(CONFIG_IPV6)
561 status
= register_inet6addr_notifier(&ocrdma_inet6addr_notifier
);
566 status
= be_roce_register_driver(&ocrdma_drv
);
568 ocrdma_unregister_inet6addr_notifier();
573 static void __exit
ocrdma_exit_module(void)
575 be_roce_unregister_driver(&ocrdma_drv
);
576 ocrdma_unregister_inet6addr_notifier();
579 module_init(ocrdma_init_module
);
580 module_exit(ocrdma_exit_module
);