2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * PCIe NTB Network Linux driver
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
50 #include <linux/etherdevice.h>
51 #include <linux/ethtool.h>
52 #include <linux/module.h>
53 #include <linux/pci.h>
54 #include <linux/ntb.h>
55 #include <linux/ntb_transport.h>
57 #define NTB_NETDEV_VER "0.7"
59 MODULE_DESCRIPTION(KBUILD_MODNAME
);
60 MODULE_VERSION(NTB_NETDEV_VER
);
61 MODULE_LICENSE("Dual BSD/GPL");
62 MODULE_AUTHOR("Intel Corporation");
64 /* Time in usecs for tx resource reaper */
65 static unsigned int tx_time
= 1;
67 /* Number of descriptors to free before resuming tx */
68 static unsigned int tx_start
= 10;
70 /* Number of descriptors still available before stop upper layer tx */
71 static unsigned int tx_stop
= 5;
75 struct net_device
*ndev
;
76 struct ntb_transport_qp
*qp
;
77 struct timer_list tx_timer
;
80 #define NTB_TX_TIMEOUT_MS 1000
81 #define NTB_RXQ_SIZE 100
83 static void ntb_netdev_event_handler(void *data
, int link_is_up
)
85 struct net_device
*ndev
= data
;
86 struct ntb_netdev
*dev
= netdev_priv(ndev
);
88 netdev_dbg(ndev
, "Event %x, Link %x\n", link_is_up
,
89 ntb_transport_link_query(dev
->qp
));
92 if (ntb_transport_link_query(dev
->qp
))
93 netif_carrier_on(ndev
);
95 netif_carrier_off(ndev
);
99 static void ntb_netdev_rx_handler(struct ntb_transport_qp
*qp
, void *qp_data
,
102 struct net_device
*ndev
= qp_data
;
110 netdev_dbg(ndev
, "%s: %d byte payload received\n", __func__
, len
);
113 ndev
->stats
.rx_errors
++;
114 ndev
->stats
.rx_length_errors
++;
119 skb
->protocol
= eth_type_trans(skb
, ndev
);
120 skb
->ip_summed
= CHECKSUM_NONE
;
122 if (netif_rx(skb
) == NET_RX_DROP
) {
123 ndev
->stats
.rx_errors
++;
124 ndev
->stats
.rx_dropped
++;
126 ndev
->stats
.rx_packets
++;
127 ndev
->stats
.rx_bytes
+= len
;
130 skb
= netdev_alloc_skb(ndev
, ndev
->mtu
+ ETH_HLEN
);
132 ndev
->stats
.rx_errors
++;
133 ndev
->stats
.rx_frame_errors
++;
138 rc
= ntb_transport_rx_enqueue(qp
, skb
, skb
->data
, ndev
->mtu
+ ETH_HLEN
);
141 ndev
->stats
.rx_errors
++;
142 ndev
->stats
.rx_fifo_errors
++;
146 static int __ntb_netdev_maybe_stop_tx(struct net_device
*netdev
,
147 struct ntb_transport_qp
*qp
, int size
)
149 struct ntb_netdev
*dev
= netdev_priv(netdev
);
151 netif_stop_queue(netdev
);
152 /* Make sure to see the latest value of ntb_transport_tx_free_entry()
153 * since the queue was last started.
157 if (likely(ntb_transport_tx_free_entry(qp
) < size
)) {
158 mod_timer(&dev
->tx_timer
, jiffies
+ usecs_to_jiffies(tx_time
));
162 netif_start_queue(netdev
);
166 static int ntb_netdev_maybe_stop_tx(struct net_device
*ndev
,
167 struct ntb_transport_qp
*qp
, int size
)
169 if (netif_queue_stopped(ndev
) ||
170 (ntb_transport_tx_free_entry(qp
) >= size
))
173 return __ntb_netdev_maybe_stop_tx(ndev
, qp
, size
);
176 static void ntb_netdev_tx_handler(struct ntb_transport_qp
*qp
, void *qp_data
,
179 struct net_device
*ndev
= qp_data
;
181 struct ntb_netdev
*dev
= netdev_priv(ndev
);
188 ndev
->stats
.tx_packets
++;
189 ndev
->stats
.tx_bytes
+= skb
->len
;
191 ndev
->stats
.tx_errors
++;
192 ndev
->stats
.tx_aborted_errors
++;
197 if (ntb_transport_tx_free_entry(dev
->qp
) >= tx_start
) {
198 /* Make sure anybody stopping the queue after this sees the new
199 * value of ntb_transport_tx_free_entry()
202 if (netif_queue_stopped(ndev
))
203 netif_wake_queue(ndev
);
207 static netdev_tx_t
ntb_netdev_start_xmit(struct sk_buff
*skb
,
208 struct net_device
*ndev
)
210 struct ntb_netdev
*dev
= netdev_priv(ndev
);
213 ntb_netdev_maybe_stop_tx(ndev
, dev
->qp
, tx_stop
);
215 rc
= ntb_transport_tx_enqueue(dev
->qp
, skb
, skb
->data
, skb
->len
);
219 /* check for next submit */
220 ntb_netdev_maybe_stop_tx(ndev
, dev
->qp
, tx_stop
);
225 ndev
->stats
.tx_dropped
++;
226 ndev
->stats
.tx_errors
++;
227 return NETDEV_TX_BUSY
;
230 static void ntb_netdev_tx_timer(struct timer_list
*t
)
232 struct ntb_netdev
*dev
= from_timer(dev
, t
, tx_timer
);
233 struct net_device
*ndev
= dev
->ndev
;
235 if (ntb_transport_tx_free_entry(dev
->qp
) < tx_stop
) {
236 mod_timer(&dev
->tx_timer
, jiffies
+ usecs_to_jiffies(tx_time
));
238 /* Make sure anybody stopping the queue after this sees the new
239 * value of ntb_transport_tx_free_entry()
242 if (netif_queue_stopped(ndev
))
243 netif_wake_queue(ndev
);
247 static int ntb_netdev_open(struct net_device
*ndev
)
249 struct ntb_netdev
*dev
= netdev_priv(ndev
);
253 /* Add some empty rx bufs */
254 for (i
= 0; i
< NTB_RXQ_SIZE
; i
++) {
255 skb
= netdev_alloc_skb(ndev
, ndev
->mtu
+ ETH_HLEN
);
261 rc
= ntb_transport_rx_enqueue(dev
->qp
, skb
, skb
->data
,
262 ndev
->mtu
+ ETH_HLEN
);
269 timer_setup(&dev
->tx_timer
, ntb_netdev_tx_timer
, 0);
271 netif_carrier_off(ndev
);
272 ntb_transport_link_up(dev
->qp
);
273 netif_start_queue(ndev
);
278 while ((skb
= ntb_transport_rx_remove(dev
->qp
, &len
)))
283 static int ntb_netdev_close(struct net_device
*ndev
)
285 struct ntb_netdev
*dev
= netdev_priv(ndev
);
289 ntb_transport_link_down(dev
->qp
);
291 while ((skb
= ntb_transport_rx_remove(dev
->qp
, &len
)))
294 del_timer_sync(&dev
->tx_timer
);
299 static int ntb_netdev_change_mtu(struct net_device
*ndev
, int new_mtu
)
301 struct ntb_netdev
*dev
= netdev_priv(ndev
);
305 if (new_mtu
> ntb_transport_max_size(dev
->qp
) - ETH_HLEN
)
308 if (!netif_running(ndev
)) {
313 /* Bring down the link and dispose of posted rx entries */
314 ntb_transport_link_down(dev
->qp
);
316 if (ndev
->mtu
< new_mtu
) {
319 for (i
= 0; (skb
= ntb_transport_rx_remove(dev
->qp
, &len
)); i
++)
323 skb
= netdev_alloc_skb(ndev
, new_mtu
+ ETH_HLEN
);
329 rc
= ntb_transport_rx_enqueue(dev
->qp
, skb
, skb
->data
,
340 ntb_transport_link_up(dev
->qp
);
345 ntb_transport_link_down(dev
->qp
);
347 while ((skb
= ntb_transport_rx_remove(dev
->qp
, &len
)))
350 netdev_err(ndev
, "Error changing MTU, device inoperable\n");
354 static const struct net_device_ops ntb_netdev_ops
= {
355 .ndo_open
= ntb_netdev_open
,
356 .ndo_stop
= ntb_netdev_close
,
357 .ndo_start_xmit
= ntb_netdev_start_xmit
,
358 .ndo_change_mtu
= ntb_netdev_change_mtu
,
359 .ndo_set_mac_address
= eth_mac_addr
,
362 static void ntb_get_drvinfo(struct net_device
*ndev
,
363 struct ethtool_drvinfo
*info
)
365 struct ntb_netdev
*dev
= netdev_priv(ndev
);
367 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
368 strlcpy(info
->version
, NTB_NETDEV_VER
, sizeof(info
->version
));
369 strlcpy(info
->bus_info
, pci_name(dev
->pdev
), sizeof(info
->bus_info
));
372 static int ntb_get_link_ksettings(struct net_device
*dev
,
373 struct ethtool_link_ksettings
*cmd
)
375 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
376 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Backplane
);
377 ethtool_link_ksettings_zero_link_mode(cmd
, advertising
);
378 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Backplane
);
380 cmd
->base
.speed
= SPEED_UNKNOWN
;
381 cmd
->base
.duplex
= DUPLEX_FULL
;
382 cmd
->base
.port
= PORT_OTHER
;
383 cmd
->base
.phy_address
= 0;
384 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
389 static const struct ethtool_ops ntb_ethtool_ops
= {
390 .get_drvinfo
= ntb_get_drvinfo
,
391 .get_link
= ethtool_op_get_link
,
392 .get_link_ksettings
= ntb_get_link_ksettings
,
395 static const struct ntb_queue_handlers ntb_netdev_handlers
= {
396 .tx_handler
= ntb_netdev_tx_handler
,
397 .rx_handler
= ntb_netdev_rx_handler
,
398 .event_handler
= ntb_netdev_event_handler
,
401 static int ntb_netdev_probe(struct device
*client_dev
)
404 struct net_device
*ndev
;
405 struct pci_dev
*pdev
;
406 struct ntb_netdev
*dev
;
409 ntb
= dev_ntb(client_dev
->parent
);
414 ndev
= alloc_etherdev(sizeof(*dev
));
418 SET_NETDEV_DEV(ndev
, client_dev
);
420 dev
= netdev_priv(ndev
);
423 ndev
->features
= NETIF_F_HIGHDMA
;
425 ndev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
427 ndev
->hw_features
= ndev
->features
;
428 ndev
->watchdog_timeo
= msecs_to_jiffies(NTB_TX_TIMEOUT_MS
);
430 eth_random_addr(ndev
->perm_addr
);
431 memcpy(ndev
->dev_addr
, ndev
->perm_addr
, ndev
->addr_len
);
433 ndev
->netdev_ops
= &ntb_netdev_ops
;
434 ndev
->ethtool_ops
= &ntb_ethtool_ops
;
437 ndev
->max_mtu
= ETH_MAX_MTU
;
439 dev
->qp
= ntb_transport_create_queue(ndev
, client_dev
,
440 &ntb_netdev_handlers
);
446 ndev
->mtu
= ntb_transport_max_size(dev
->qp
) - ETH_HLEN
;
448 rc
= register_netdev(ndev
);
452 dev_set_drvdata(client_dev
, ndev
);
453 dev_info(&pdev
->dev
, "%s created\n", ndev
->name
);
457 ntb_transport_free_queue(dev
->qp
);
463 static void ntb_netdev_remove(struct device
*client_dev
)
465 struct net_device
*ndev
= dev_get_drvdata(client_dev
);
466 struct ntb_netdev
*dev
= netdev_priv(ndev
);
468 unregister_netdev(ndev
);
469 ntb_transport_free_queue(dev
->qp
);
473 static struct ntb_transport_client ntb_netdev_client
= {
474 .driver
.name
= KBUILD_MODNAME
,
475 .driver
.owner
= THIS_MODULE
,
476 .probe
= ntb_netdev_probe
,
477 .remove
= ntb_netdev_remove
,
480 static int __init
ntb_netdev_init_module(void)
484 rc
= ntb_transport_register_client_dev(KBUILD_MODNAME
);
487 return ntb_transport_register_client(&ntb_netdev_client
);
489 module_init(ntb_netdev_init_module
);
491 static void __exit
ntb_netdev_exit_module(void)
493 ntb_transport_unregister_client(&ntb_netdev_client
);
494 ntb_transport_unregister_client_dev(KBUILD_MODNAME
);
496 module_exit(ntb_netdev_exit_module
);