Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / infiniband / hw / usnic / usnic_ib_main.c
blobf45e99a938e099805e4c1fd253506af99020df52
1 /*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 * Author: Upinder Malhi <umalhi@cisco.com>
33 * Author: Anant Deepak <anadeepa@cisco.com>
34 * Author: Cesare Cantu' <cantuc@cisco.com>
35 * Author: Jeff Squyres <jsquyres@cisco.com>
36 * Author: Kiran Thirumalai <kithirum@cisco.com>
37 * Author: Xuyang Wang <xuywang@cisco.com>
38 * Author: Reese Faucette <rfaucett@cisco.com>
42 #include <linux/module.h>
43 #include <linux/inetdevice.h>
44 #include <linux/init.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/pci.h>
48 #include <linux/netdevice.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
53 #include "usnic_abi.h"
54 #include "usnic_common_util.h"
55 #include "usnic_ib.h"
56 #include "usnic_ib_qp_grp.h"
57 #include "usnic_log.h"
58 #include "usnic_fwd.h"
59 #include "usnic_debugfs.h"
60 #include "usnic_ib_verbs.h"
61 #include "usnic_transport.h"
62 #include "usnic_uiom.h"
63 #include "usnic_ib_sysfs.h"
65 unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
66 unsigned int usnic_ib_share_vf = 1;
68 static const char usnic_version[] =
69 DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
70 DRV_VERSION " (" DRV_RELDATE ")\n";
72 static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
73 static LIST_HEAD(usnic_ib_ibdev_list);
75 /* Callback dump funcs */
76 static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
78 struct usnic_ib_vf *vf = obj;
79 return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
81 /* End callback dump funcs */
83 static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
85 usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
86 usnic_ib_dump_vf_hdr,
87 usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
90 void usnic_ib_log_vf(struct usnic_ib_vf *vf)
92 char buf[1000];
93 usnic_ib_dump_vf(vf, buf, sizeof(buf));
94 usnic_dbg("%s\n", buf);
97 /* Start of netdev section */
98 static inline const char *usnic_ib_netdev_event_to_string(unsigned long event)
100 const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN",
101 "NETDEV_REBOOT", "NETDEV_CHANGE",
102 "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU",
103 "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE",
104 "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP",
105 "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE",
106 "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE",
107 "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN"
110 if (event >= ARRAY_SIZE(event2str))
111 return "UNKNOWN_NETDEV_EVENT";
112 else
113 return event2str[event];
116 static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
118 struct usnic_ib_ucontext *ctx;
119 struct usnic_ib_qp_grp *qp_grp;
120 enum ib_qp_state cur_state;
121 int status;
123 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
125 list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
126 list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
127 cur_state = qp_grp->state;
128 if (cur_state == IB_QPS_INIT ||
129 cur_state == IB_QPS_RTR ||
130 cur_state == IB_QPS_RTS) {
131 status = usnic_ib_qp_grp_modify(qp_grp,
132 IB_QPS_ERR,
133 NULL);
134 if (status) {
135 usnic_err("Failed to transistion qp grp %u from %s to %s\n",
136 qp_grp->grp_id,
137 usnic_ib_qp_grp_state_to_string
138 (cur_state),
139 usnic_ib_qp_grp_state_to_string
140 (IB_QPS_ERR));
147 static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
148 unsigned long event)
150 struct net_device *netdev;
151 struct ib_event ib_event;
153 memset(&ib_event, 0, sizeof(ib_event));
155 mutex_lock(&us_ibdev->usdev_lock);
156 netdev = us_ibdev->netdev;
157 switch (event) {
158 case NETDEV_REBOOT:
159 usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
160 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
161 ib_event.event = IB_EVENT_PORT_ERR;
162 ib_event.device = &us_ibdev->ib_dev;
163 ib_event.element.port_num = 1;
164 ib_dispatch_event(&ib_event);
165 break;
166 case NETDEV_UP:
167 case NETDEV_DOWN:
168 case NETDEV_CHANGE:
169 if (!us_ibdev->ufdev->link_up &&
170 netif_carrier_ok(netdev)) {
171 usnic_fwd_carrier_up(us_ibdev->ufdev);
172 usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
173 ib_event.event = IB_EVENT_PORT_ACTIVE;
174 ib_event.device = &us_ibdev->ib_dev;
175 ib_event.element.port_num = 1;
176 ib_dispatch_event(&ib_event);
177 } else if (us_ibdev->ufdev->link_up &&
178 !netif_carrier_ok(netdev)) {
179 usnic_fwd_carrier_down(us_ibdev->ufdev);
180 usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
181 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
182 ib_event.event = IB_EVENT_PORT_ERR;
183 ib_event.device = &us_ibdev->ib_dev;
184 ib_event.element.port_num = 1;
185 ib_dispatch_event(&ib_event);
186 } else {
187 usnic_dbg("Ignoring %s on %s\n",
188 usnic_ib_netdev_event_to_string(event),
189 us_ibdev->ib_dev.name);
191 break;
192 case NETDEV_CHANGEADDR:
193 if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
194 sizeof(us_ibdev->ufdev->mac))) {
195 usnic_dbg("Ignoring addr change on %s\n",
196 us_ibdev->ib_dev.name);
197 } else {
198 usnic_info(" %s old mac: %pM new mac: %pM\n",
199 us_ibdev->ib_dev.name,
200 us_ibdev->ufdev->mac,
201 netdev->dev_addr);
202 usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
203 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
204 ib_event.event = IB_EVENT_GID_CHANGE;
205 ib_event.device = &us_ibdev->ib_dev;
206 ib_event.element.port_num = 1;
207 ib_dispatch_event(&ib_event);
210 break;
211 case NETDEV_CHANGEMTU:
212 if (us_ibdev->ufdev->mtu != netdev->mtu) {
213 usnic_info("MTU Change on %s old: %u new: %u\n",
214 us_ibdev->ib_dev.name,
215 us_ibdev->ufdev->mtu, netdev->mtu);
216 usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
217 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
218 } else {
219 usnic_dbg("Ignoring MTU change on %s\n",
220 us_ibdev->ib_dev.name);
222 break;
223 default:
224 usnic_dbg("Ignoring event %s on %s",
225 usnic_ib_netdev_event_to_string(event),
226 us_ibdev->ib_dev.name);
228 mutex_unlock(&us_ibdev->usdev_lock);
231 static int usnic_ib_netdevice_event(struct notifier_block *notifier,
232 unsigned long event, void *ptr)
234 struct usnic_ib_dev *us_ibdev;
236 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
238 mutex_lock(&usnic_ib_ibdev_list_lock);
239 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
240 if (us_ibdev->netdev == netdev) {
241 usnic_ib_handle_usdev_event(us_ibdev, event);
242 break;
245 mutex_unlock(&usnic_ib_ibdev_list_lock);
247 return NOTIFY_DONE;
250 static struct notifier_block usnic_ib_netdevice_notifier = {
251 .notifier_call = usnic_ib_netdevice_event
253 /* End of netdev section */
255 /* Start of inet section */
256 static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
257 unsigned long event, void *ptr)
259 struct in_ifaddr *ifa = ptr;
260 struct ib_event ib_event;
262 mutex_lock(&us_ibdev->usdev_lock);
264 switch (event) {
265 case NETDEV_DOWN:
266 usnic_info("%s via ip notifiers",
267 usnic_ib_netdev_event_to_string(event));
268 usnic_fwd_del_ipaddr(us_ibdev->ufdev);
269 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
270 ib_event.event = IB_EVENT_GID_CHANGE;
271 ib_event.device = &us_ibdev->ib_dev;
272 ib_event.element.port_num = 1;
273 ib_dispatch_event(&ib_event);
274 break;
275 case NETDEV_UP:
276 usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
277 usnic_info("%s via ip notifiers: ip %pI4",
278 usnic_ib_netdev_event_to_string(event),
279 &us_ibdev->ufdev->inaddr);
280 ib_event.event = IB_EVENT_GID_CHANGE;
281 ib_event.device = &us_ibdev->ib_dev;
282 ib_event.element.port_num = 1;
283 ib_dispatch_event(&ib_event);
284 break;
285 default:
286 usnic_info("Ignoring event %s on %s",
287 usnic_ib_netdev_event_to_string(event),
288 us_ibdev->ib_dev.name);
290 mutex_unlock(&us_ibdev->usdev_lock);
292 return NOTIFY_DONE;
295 static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
296 unsigned long event, void *ptr)
298 struct usnic_ib_dev *us_ibdev;
299 struct in_ifaddr *ifa = ptr;
300 struct net_device *netdev = ifa->ifa_dev->dev;
302 mutex_lock(&usnic_ib_ibdev_list_lock);
303 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
304 if (us_ibdev->netdev == netdev) {
305 usnic_ib_handle_inet_event(us_ibdev, event, ptr);
306 break;
309 mutex_unlock(&usnic_ib_ibdev_list_lock);
311 return NOTIFY_DONE;
313 static struct notifier_block usnic_ib_inetaddr_notifier = {
314 .notifier_call = usnic_ib_inetaddr_event
316 /* End of inet section*/
318 static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
319 struct ib_port_immutable *immutable)
321 struct ib_port_attr attr;
322 int err;
324 immutable->core_cap_flags = RDMA_CORE_PORT_USNIC;
326 err = ib_query_port(ibdev, port_num, &attr);
327 if (err)
328 return err;
330 immutable->pkey_tbl_len = attr.pkey_tbl_len;
331 immutable->gid_tbl_len = attr.gid_tbl_len;
333 return 0;
336 static void usnic_get_dev_fw_str(struct ib_device *device, char *str)
338 struct usnic_ib_dev *us_ibdev =
339 container_of(device, struct usnic_ib_dev, ib_dev);
340 struct ethtool_drvinfo info;
342 mutex_lock(&us_ibdev->usdev_lock);
343 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
344 mutex_unlock(&us_ibdev->usdev_lock);
346 snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
349 /* Start of PF discovery section */
350 static void *usnic_ib_device_add(struct pci_dev *dev)
352 struct usnic_ib_dev *us_ibdev;
353 union ib_gid gid;
354 struct in_device *ind;
355 struct net_device *netdev;
357 usnic_dbg("\n");
358 netdev = pci_get_drvdata(dev);
360 us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
361 if (!us_ibdev) {
362 usnic_err("Device %s context alloc failed\n",
363 netdev_name(pci_get_drvdata(dev)));
364 return ERR_PTR(-EFAULT);
367 us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
368 if (!us_ibdev->ufdev) {
369 usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev));
370 goto err_dealloc;
373 mutex_init(&us_ibdev->usdev_lock);
374 INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
375 INIT_LIST_HEAD(&us_ibdev->ctx_list);
377 us_ibdev->pdev = dev;
378 us_ibdev->netdev = pci_get_drvdata(dev);
379 us_ibdev->ib_dev.owner = THIS_MODULE;
380 us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
381 us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
382 us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
383 us_ibdev->ib_dev.dev.parent = &dev->dev;
384 us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
385 strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
387 us_ibdev->ib_dev.uverbs_cmd_mask =
388 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
389 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
390 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
391 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
392 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
393 (1ull << IB_USER_VERBS_CMD_REG_MR) |
394 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
395 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
396 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
397 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
398 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
399 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
400 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
401 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
402 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
403 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
404 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
406 us_ibdev->ib_dev.query_device = usnic_ib_query_device;
407 us_ibdev->ib_dev.query_port = usnic_ib_query_port;
408 us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
409 us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
410 us_ibdev->ib_dev.get_netdev = usnic_get_netdev;
411 us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
412 us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
413 us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
414 us_ibdev->ib_dev.create_qp = usnic_ib_create_qp;
415 us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp;
416 us_ibdev->ib_dev.query_qp = usnic_ib_query_qp;
417 us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp;
418 us_ibdev->ib_dev.create_cq = usnic_ib_create_cq;
419 us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq;
420 us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr;
421 us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr;
422 us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext;
423 us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext;
424 us_ibdev->ib_dev.mmap = usnic_ib_mmap;
425 us_ibdev->ib_dev.create_ah = usnic_ib_create_ah;
426 us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah;
427 us_ibdev->ib_dev.post_send = usnic_ib_post_send;
428 us_ibdev->ib_dev.post_recv = usnic_ib_post_recv;
429 us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
430 us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
431 us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
432 us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable;
433 us_ibdev->ib_dev.get_dev_fw_str = usnic_get_dev_fw_str;
436 if (ib_register_device(&us_ibdev->ib_dev, NULL))
437 goto err_fwd_dealloc;
439 usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
440 usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
441 if (netif_carrier_ok(us_ibdev->netdev))
442 usnic_fwd_carrier_up(us_ibdev->ufdev);
444 ind = in_dev_get(netdev);
445 if (ind->ifa_list)
446 usnic_fwd_add_ipaddr(us_ibdev->ufdev,
447 ind->ifa_list->ifa_address);
448 in_dev_put(ind);
450 usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
451 us_ibdev->ufdev->inaddr, &gid.raw[0]);
452 memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
453 sizeof(gid.global.interface_id));
454 kref_init(&us_ibdev->vf_cnt);
456 usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
457 us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
458 us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
459 us_ibdev->ufdev->mtu);
460 return us_ibdev;
462 err_fwd_dealloc:
463 usnic_fwd_dev_free(us_ibdev->ufdev);
464 err_dealloc:
465 usnic_err("failed -- deallocing device\n");
466 ib_dealloc_device(&us_ibdev->ib_dev);
467 return NULL;
470 static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
472 usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
473 usnic_ib_sysfs_unregister_usdev(us_ibdev);
474 usnic_fwd_dev_free(us_ibdev->ufdev);
475 ib_unregister_device(&us_ibdev->ib_dev);
476 ib_dealloc_device(&us_ibdev->ib_dev);
479 static void usnic_ib_undiscover_pf(struct kref *kref)
481 struct usnic_ib_dev *us_ibdev, *tmp;
482 struct pci_dev *dev;
483 bool found = false;
485 dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
486 mutex_lock(&usnic_ib_ibdev_list_lock);
487 list_for_each_entry_safe(us_ibdev, tmp,
488 &usnic_ib_ibdev_list, ib_dev_link) {
489 if (us_ibdev->pdev == dev) {
490 list_del(&us_ibdev->ib_dev_link);
491 usnic_ib_device_remove(us_ibdev);
492 found = true;
493 break;
497 WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
499 mutex_unlock(&usnic_ib_ibdev_list_lock);
502 static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
504 struct usnic_ib_dev *us_ibdev;
505 struct pci_dev *parent_pci, *vf_pci;
506 int err;
508 vf_pci = usnic_vnic_get_pdev(vnic);
509 parent_pci = pci_physfn(vf_pci);
511 BUG_ON(!parent_pci);
513 mutex_lock(&usnic_ib_ibdev_list_lock);
514 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
515 if (us_ibdev->pdev == parent_pci) {
516 kref_get(&us_ibdev->vf_cnt);
517 goto out;
521 us_ibdev = usnic_ib_device_add(parent_pci);
522 if (IS_ERR_OR_NULL(us_ibdev)) {
523 us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
524 goto out;
527 err = usnic_ib_sysfs_register_usdev(us_ibdev);
528 if (err) {
529 usnic_ib_device_remove(us_ibdev);
530 us_ibdev = ERR_PTR(err);
531 goto out;
534 list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
535 out:
536 mutex_unlock(&usnic_ib_ibdev_list_lock);
537 return us_ibdev;
539 /* End of PF discovery section */
541 /* Start of PCI section */
543 static const struct pci_device_id usnic_ib_pci_ids[] = {
544 {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
545 {0,}
548 static int usnic_ib_pci_probe(struct pci_dev *pdev,
549 const struct pci_device_id *id)
551 int err;
552 struct usnic_ib_dev *pf;
553 struct usnic_ib_vf *vf;
554 enum usnic_vnic_res_type res_type;
556 vf = kzalloc(sizeof(*vf), GFP_KERNEL);
557 if (!vf)
558 return -ENOMEM;
560 err = pci_enable_device(pdev);
561 if (err) {
562 usnic_err("Failed to enable %s with err %d\n",
563 pci_name(pdev), err);
564 goto out_clean_vf;
567 err = pci_request_regions(pdev, DRV_NAME);
568 if (err) {
569 usnic_err("Failed to request region for %s with err %d\n",
570 pci_name(pdev), err);
571 goto out_disable_device;
574 pci_set_master(pdev);
575 pci_set_drvdata(pdev, vf);
577 vf->vnic = usnic_vnic_alloc(pdev);
578 if (IS_ERR_OR_NULL(vf->vnic)) {
579 err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
580 usnic_err("Failed to alloc vnic for %s with err %d\n",
581 pci_name(pdev), err);
582 goto out_release_regions;
585 pf = usnic_ib_discover_pf(vf->vnic);
586 if (IS_ERR_OR_NULL(pf)) {
587 usnic_err("Failed to discover pf of vnic %s with err%ld\n",
588 pci_name(pdev), PTR_ERR(pf));
589 err = pf ? PTR_ERR(pf) : -EFAULT;
590 goto out_clean_vnic;
593 vf->pf = pf;
594 spin_lock_init(&vf->lock);
595 mutex_lock(&pf->usdev_lock);
596 list_add_tail(&vf->link, &pf->vf_dev_list);
598 * Save max settings (will be same for each VF, easier to re-write than
599 * to say "if (!set) { set_values(); set=1; }
601 for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
602 res_type < USNIC_VNIC_RES_TYPE_MAX;
603 res_type++) {
604 pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
605 res_type);
608 mutex_unlock(&pf->usdev_lock);
610 usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
611 pf->ib_dev.name);
612 usnic_ib_log_vf(vf);
613 return 0;
615 out_clean_vnic:
616 usnic_vnic_free(vf->vnic);
617 out_release_regions:
618 pci_set_drvdata(pdev, NULL);
619 pci_clear_master(pdev);
620 pci_release_regions(pdev);
621 out_disable_device:
622 pci_disable_device(pdev);
623 out_clean_vf:
624 kfree(vf);
625 return err;
628 static void usnic_ib_pci_remove(struct pci_dev *pdev)
630 struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
631 struct usnic_ib_dev *pf = vf->pf;
633 mutex_lock(&pf->usdev_lock);
634 list_del(&vf->link);
635 mutex_unlock(&pf->usdev_lock);
637 kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
638 usnic_vnic_free(vf->vnic);
639 pci_set_drvdata(pdev, NULL);
640 pci_clear_master(pdev);
641 pci_release_regions(pdev);
642 pci_disable_device(pdev);
643 kfree(vf);
645 usnic_info("Removed VF %s\n", pci_name(pdev));
648 /* PCI driver entry points */
649 static struct pci_driver usnic_ib_pci_driver = {
650 .name = DRV_NAME,
651 .id_table = usnic_ib_pci_ids,
652 .probe = usnic_ib_pci_probe,
653 .remove = usnic_ib_pci_remove,
655 /* End of PCI section */
657 /* Start of module section */
658 static int __init usnic_ib_init(void)
660 int err;
662 printk_once(KERN_INFO "%s", usnic_version);
664 err = usnic_uiom_init(DRV_NAME);
665 if (err) {
666 usnic_err("Unable to initalize umem with err %d\n", err);
667 return err;
670 err = pci_register_driver(&usnic_ib_pci_driver);
671 if (err) {
672 usnic_err("Unable to register with PCI\n");
673 goto out_umem_fini;
676 err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
677 if (err) {
678 usnic_err("Failed to register netdev notifier\n");
679 goto out_pci_unreg;
682 err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
683 if (err) {
684 usnic_err("Failed to register inet addr notifier\n");
685 goto out_unreg_netdev_notifier;
688 err = usnic_transport_init();
689 if (err) {
690 usnic_err("Failed to initialize transport\n");
691 goto out_unreg_inetaddr_notifier;
694 usnic_debugfs_init();
696 return 0;
698 out_unreg_inetaddr_notifier:
699 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
700 out_unreg_netdev_notifier:
701 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
702 out_pci_unreg:
703 pci_unregister_driver(&usnic_ib_pci_driver);
704 out_umem_fini:
705 usnic_uiom_fini();
707 return err;
710 static void __exit usnic_ib_destroy(void)
712 usnic_dbg("\n");
713 usnic_debugfs_exit();
714 usnic_transport_fini();
715 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
716 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
717 pci_unregister_driver(&usnic_ib_pci_driver);
718 usnic_uiom_fini();
721 MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
722 MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
723 MODULE_LICENSE("Dual BSD/GPL");
724 module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
725 module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
726 MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
727 MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
728 MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
730 module_init(usnic_ib_init);
731 module_exit(usnic_ib_destroy);
732 /* End of module section */