sungem: Fix WakeOnLan
[zen-stable.git] / net / core / netpoll.c
blobddefc513b44a6faf99a7878f6fe651147feaa0cc
1 /*
2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #include <linux/moduleparam.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/if_arp.h>
17 #include <linux/inetdevice.h>
18 #include <linux/inet.h>
19 #include <linux/interrupt.h>
20 #include <linux/netpoll.h>
21 #include <linux/sched.h>
22 #include <linux/delay.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
25 #include <linux/slab.h>
26 #include <linux/export.h>
27 #include <net/tcp.h>
28 #include <net/udp.h>
29 #include <asm/unaligned.h>
30 #include <trace/events/napi.h>
33 * We maintain a small pool of fully-sized skbs, to make sure the
34 * message gets out even in extreme OOM situations.
37 #define MAX_UDP_CHUNK 1460
38 #define MAX_SKBS 32
40 static struct sk_buff_head skb_pool;
42 static atomic_t trapped;
44 #define USEC_PER_POLL 50
45 #define NETPOLL_RX_ENABLED 1
46 #define NETPOLL_RX_DROP 2
48 #define MAX_SKB_SIZE \
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr))
52 static void zap_completion_queue(void);
53 static void arp_reply(struct sk_buff *skb);
55 static unsigned int carrier_timeout = 4;
56 module_param(carrier_timeout, uint, 0644);
58 static void queue_process(struct work_struct *work)
60 struct netpoll_info *npinfo =
61 container_of(work, struct netpoll_info, tx_work.work);
62 struct sk_buff *skb;
63 unsigned long flags;
65 while ((skb = skb_dequeue(&npinfo->txq))) {
66 struct net_device *dev = skb->dev;
67 const struct net_device_ops *ops = dev->netdev_ops;
68 struct netdev_queue *txq;
70 if (!netif_device_present(dev) || !netif_running(dev)) {
71 __kfree_skb(skb);
72 continue;
75 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
77 local_irq_save(flags);
78 __netif_tx_lock(txq, smp_processor_id());
79 if (netif_xmit_frozen_or_stopped(txq) ||
80 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
81 skb_queue_head(&npinfo->txq, skb);
82 __netif_tx_unlock(txq);
83 local_irq_restore(flags);
85 schedule_delayed_work(&npinfo->tx_work, HZ/10);
86 return;
88 __netif_tx_unlock(txq);
89 local_irq_restore(flags);
93 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
94 unsigned short ulen, __be32 saddr, __be32 daddr)
96 __wsum psum;
98 if (uh->check == 0 || skb_csum_unnecessary(skb))
99 return 0;
101 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
103 if (skb->ip_summed == CHECKSUM_COMPLETE &&
104 !csum_fold(csum_add(psum, skb->csum)))
105 return 0;
107 skb->csum = psum;
109 return __skb_checksum_complete(skb);
113 * Check whether delayed processing was scheduled for our NIC. If so,
114 * we attempt to grab the poll lock and use ->poll() to pump the card.
115 * If this fails, either we've recursed in ->poll() or it's already
116 * running on another CPU.
118 * Note: we don't mask interrupts with this lock because we're using
119 * trylock here and interrupts are already disabled in the softirq
120 * case. Further, we test the poll_owner to avoid recursion on UP
121 * systems where the lock doesn't exist.
123 * In cases where there is bi-directional communications, reading only
124 * one message at a time can lead to packets being dropped by the
125 * network adapter, forcing superfluous retries and possibly timeouts.
126 * Thus, we set our budget to greater than 1.
128 static int poll_one_napi(struct netpoll_info *npinfo,
129 struct napi_struct *napi, int budget)
131 int work;
133 /* net_rx_action's ->poll() invocations and our's are
134 * synchronized by this test which is only made while
135 * holding the napi->poll_lock.
137 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
138 return budget;
140 npinfo->rx_flags |= NETPOLL_RX_DROP;
141 atomic_inc(&trapped);
142 set_bit(NAPI_STATE_NPSVC, &napi->state);
144 work = napi->poll(napi, budget);
145 trace_napi_poll(napi);
147 clear_bit(NAPI_STATE_NPSVC, &napi->state);
148 atomic_dec(&trapped);
149 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
151 return budget - work;
154 static void poll_napi(struct net_device *dev)
156 struct napi_struct *napi;
157 int budget = 16;
159 list_for_each_entry(napi, &dev->napi_list, dev_list) {
160 if (napi->poll_owner != smp_processor_id() &&
161 spin_trylock(&napi->poll_lock)) {
162 budget = poll_one_napi(dev->npinfo, napi, budget);
163 spin_unlock(&napi->poll_lock);
165 if (!budget)
166 break;
171 static void service_arp_queue(struct netpoll_info *npi)
173 if (npi) {
174 struct sk_buff *skb;
176 while ((skb = skb_dequeue(&npi->arp_tx)))
177 arp_reply(skb);
181 static void netpoll_poll_dev(struct net_device *dev)
183 const struct net_device_ops *ops;
185 if (!dev || !netif_running(dev))
186 return;
188 ops = dev->netdev_ops;
189 if (!ops->ndo_poll_controller)
190 return;
192 /* Process pending work on NIC */
193 ops->ndo_poll_controller(dev);
195 poll_napi(dev);
197 if (dev->flags & IFF_SLAVE) {
198 if (dev->npinfo) {
199 struct net_device *bond_dev = dev->master;
200 struct sk_buff *skb;
201 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
202 skb->dev = bond_dev;
203 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
208 service_arp_queue(dev->npinfo);
210 zap_completion_queue();
213 static void refill_skbs(void)
215 struct sk_buff *skb;
216 unsigned long flags;
218 spin_lock_irqsave(&skb_pool.lock, flags);
219 while (skb_pool.qlen < MAX_SKBS) {
220 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
221 if (!skb)
222 break;
224 __skb_queue_tail(&skb_pool, skb);
226 spin_unlock_irqrestore(&skb_pool.lock, flags);
229 static void zap_completion_queue(void)
231 unsigned long flags;
232 struct softnet_data *sd = &get_cpu_var(softnet_data);
234 if (sd->completion_queue) {
235 struct sk_buff *clist;
237 local_irq_save(flags);
238 clist = sd->completion_queue;
239 sd->completion_queue = NULL;
240 local_irq_restore(flags);
242 while (clist != NULL) {
243 struct sk_buff *skb = clist;
244 clist = clist->next;
245 if (skb->destructor) {
246 atomic_inc(&skb->users);
247 dev_kfree_skb_any(skb); /* put this one back */
248 } else {
249 __kfree_skb(skb);
254 put_cpu_var(softnet_data);
257 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
259 int count = 0;
260 struct sk_buff *skb;
262 zap_completion_queue();
263 refill_skbs();
264 repeat:
266 skb = alloc_skb(len, GFP_ATOMIC);
267 if (!skb)
268 skb = skb_dequeue(&skb_pool);
270 if (!skb) {
271 if (++count < 10) {
272 netpoll_poll_dev(np->dev);
273 goto repeat;
275 return NULL;
278 atomic_set(&skb->users, 1);
279 skb_reserve(skb, reserve);
280 return skb;
283 static int netpoll_owner_active(struct net_device *dev)
285 struct napi_struct *napi;
287 list_for_each_entry(napi, &dev->napi_list, dev_list) {
288 if (napi->poll_owner == smp_processor_id())
289 return 1;
291 return 0;
294 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
295 struct net_device *dev)
297 int status = NETDEV_TX_BUSY;
298 unsigned long tries;
299 const struct net_device_ops *ops = dev->netdev_ops;
300 /* It is up to the caller to keep npinfo alive. */
301 struct netpoll_info *npinfo = np->dev->npinfo;
303 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
304 __kfree_skb(skb);
305 return;
308 /* don't get messages out of order, and no recursion */
309 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
310 struct netdev_queue *txq;
311 unsigned long flags;
313 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
315 local_irq_save(flags);
316 /* try until next clock tick */
317 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
318 tries > 0; --tries) {
319 if (__netif_tx_trylock(txq)) {
320 if (!netif_xmit_stopped(txq)) {
321 status = ops->ndo_start_xmit(skb, dev);
322 if (status == NETDEV_TX_OK)
323 txq_trans_update(txq);
325 __netif_tx_unlock(txq);
327 if (status == NETDEV_TX_OK)
328 break;
332 /* tickle device maybe there is some cleanup */
333 netpoll_poll_dev(np->dev);
335 udelay(USEC_PER_POLL);
338 WARN_ONCE(!irqs_disabled(),
339 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
340 dev->name, ops->ndo_start_xmit);
342 local_irq_restore(flags);
345 if (status != NETDEV_TX_OK) {
346 skb_queue_tail(&npinfo->txq, skb);
347 schedule_delayed_work(&npinfo->tx_work,0);
350 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
352 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
354 int total_len, eth_len, ip_len, udp_len;
355 struct sk_buff *skb;
356 struct udphdr *udph;
357 struct iphdr *iph;
358 struct ethhdr *eth;
360 udp_len = len + sizeof(*udph);
361 ip_len = eth_len = udp_len + sizeof(*iph);
362 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
364 skb = find_skb(np, total_len, total_len - len);
365 if (!skb)
366 return;
368 skb_copy_to_linear_data(skb, msg, len);
369 skb->len += len;
371 skb_push(skb, sizeof(*udph));
372 skb_reset_transport_header(skb);
373 udph = udp_hdr(skb);
374 udph->source = htons(np->local_port);
375 udph->dest = htons(np->remote_port);
376 udph->len = htons(udp_len);
377 udph->check = 0;
378 udph->check = csum_tcpudp_magic(np->local_ip,
379 np->remote_ip,
380 udp_len, IPPROTO_UDP,
381 csum_partial(udph, udp_len, 0));
382 if (udph->check == 0)
383 udph->check = CSUM_MANGLED_0;
385 skb_push(skb, sizeof(*iph));
386 skb_reset_network_header(skb);
387 iph = ip_hdr(skb);
389 /* iph->version = 4; iph->ihl = 5; */
390 put_unaligned(0x45, (unsigned char *)iph);
391 iph->tos = 0;
392 put_unaligned(htons(ip_len), &(iph->tot_len));
393 iph->id = 0;
394 iph->frag_off = 0;
395 iph->ttl = 64;
396 iph->protocol = IPPROTO_UDP;
397 iph->check = 0;
398 put_unaligned(np->local_ip, &(iph->saddr));
399 put_unaligned(np->remote_ip, &(iph->daddr));
400 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
402 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
403 skb_reset_mac_header(skb);
404 skb->protocol = eth->h_proto = htons(ETH_P_IP);
405 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
406 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
408 skb->dev = np->dev;
410 netpoll_send_skb(np, skb);
412 EXPORT_SYMBOL(netpoll_send_udp);
414 static void arp_reply(struct sk_buff *skb)
416 struct netpoll_info *npinfo = skb->dev->npinfo;
417 struct arphdr *arp;
418 unsigned char *arp_ptr;
419 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
420 __be32 sip, tip;
421 unsigned char *sha;
422 struct sk_buff *send_skb;
423 struct netpoll *np, *tmp;
424 unsigned long flags;
425 int hlen, tlen;
426 int hits = 0;
428 if (list_empty(&npinfo->rx_np))
429 return;
431 /* Before checking the packet, we do some early
432 inspection whether this is interesting at all */
433 spin_lock_irqsave(&npinfo->rx_lock, flags);
434 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
435 if (np->dev == skb->dev)
436 hits++;
438 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
440 /* No netpoll struct is using this dev */
441 if (!hits)
442 return;
444 /* No arp on this interface */
445 if (skb->dev->flags & IFF_NOARP)
446 return;
448 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
449 return;
451 skb_reset_network_header(skb);
452 skb_reset_transport_header(skb);
453 arp = arp_hdr(skb);
455 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
456 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
457 arp->ar_pro != htons(ETH_P_IP) ||
458 arp->ar_op != htons(ARPOP_REQUEST))
459 return;
461 arp_ptr = (unsigned char *)(arp+1);
462 /* save the location of the src hw addr */
463 sha = arp_ptr;
464 arp_ptr += skb->dev->addr_len;
465 memcpy(&sip, arp_ptr, 4);
466 arp_ptr += 4;
467 /* If we actually cared about dst hw addr,
468 it would get copied here */
469 arp_ptr += skb->dev->addr_len;
470 memcpy(&tip, arp_ptr, 4);
472 /* Should we ignore arp? */
473 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
474 return;
476 size = arp_hdr_len(skb->dev);
478 spin_lock_irqsave(&npinfo->rx_lock, flags);
479 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
480 if (tip != np->local_ip)
481 continue;
483 hlen = LL_RESERVED_SPACE(np->dev);
484 tlen = np->dev->needed_tailroom;
485 send_skb = find_skb(np, size + hlen + tlen, hlen);
486 if (!send_skb)
487 continue;
489 skb_reset_network_header(send_skb);
490 arp = (struct arphdr *) skb_put(send_skb, size);
491 send_skb->dev = skb->dev;
492 send_skb->protocol = htons(ETH_P_ARP);
494 /* Fill the device header for the ARP frame */
495 if (dev_hard_header(send_skb, skb->dev, ptype,
496 sha, np->dev->dev_addr,
497 send_skb->len) < 0) {
498 kfree_skb(send_skb);
499 continue;
503 * Fill out the arp protocol part.
505 * we only support ethernet device type,
506 * which (according to RFC 1390) should
507 * always equal 1 (Ethernet).
510 arp->ar_hrd = htons(np->dev->type);
511 arp->ar_pro = htons(ETH_P_IP);
512 arp->ar_hln = np->dev->addr_len;
513 arp->ar_pln = 4;
514 arp->ar_op = htons(type);
516 arp_ptr = (unsigned char *)(arp + 1);
517 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
518 arp_ptr += np->dev->addr_len;
519 memcpy(arp_ptr, &tip, 4);
520 arp_ptr += 4;
521 memcpy(arp_ptr, sha, np->dev->addr_len);
522 arp_ptr += np->dev->addr_len;
523 memcpy(arp_ptr, &sip, 4);
525 netpoll_send_skb(np, send_skb);
527 /* If there are several rx_hooks for the same address,
528 we're fine by sending a single reply */
529 break;
531 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
534 int __netpoll_rx(struct sk_buff *skb)
536 int proto, len, ulen;
537 int hits = 0;
538 const struct iphdr *iph;
539 struct udphdr *uh;
540 struct netpoll_info *npinfo = skb->dev->npinfo;
541 struct netpoll *np, *tmp;
543 if (list_empty(&npinfo->rx_np))
544 goto out;
546 if (skb->dev->type != ARPHRD_ETHER)
547 goto out;
549 /* check if netpoll clients need ARP */
550 if (skb->protocol == htons(ETH_P_ARP) &&
551 atomic_read(&trapped)) {
552 skb_queue_tail(&npinfo->arp_tx, skb);
553 return 1;
556 proto = ntohs(eth_hdr(skb)->h_proto);
557 if (proto != ETH_P_IP)
558 goto out;
559 if (skb->pkt_type == PACKET_OTHERHOST)
560 goto out;
561 if (skb_shared(skb))
562 goto out;
564 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
565 goto out;
566 iph = (struct iphdr *)skb->data;
567 if (iph->ihl < 5 || iph->version != 4)
568 goto out;
569 if (!pskb_may_pull(skb, iph->ihl*4))
570 goto out;
571 iph = (struct iphdr *)skb->data;
572 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
573 goto out;
575 len = ntohs(iph->tot_len);
576 if (skb->len < len || len < iph->ihl*4)
577 goto out;
580 * Our transport medium may have padded the buffer out.
581 * Now We trim to the true length of the frame.
583 if (pskb_trim_rcsum(skb, len))
584 goto out;
586 iph = (struct iphdr *)skb->data;
587 if (iph->protocol != IPPROTO_UDP)
588 goto out;
590 len -= iph->ihl*4;
591 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
592 ulen = ntohs(uh->len);
594 if (ulen != len)
595 goto out;
596 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
597 goto out;
599 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
600 if (np->local_ip && np->local_ip != iph->daddr)
601 continue;
602 if (np->remote_ip && np->remote_ip != iph->saddr)
603 continue;
604 if (np->local_port && np->local_port != ntohs(uh->dest))
605 continue;
607 np->rx_hook(np, ntohs(uh->source),
608 (char *)(uh+1),
609 ulen - sizeof(struct udphdr));
610 hits++;
613 if (!hits)
614 goto out;
616 kfree_skb(skb);
617 return 1;
619 out:
620 if (atomic_read(&trapped)) {
621 kfree_skb(skb);
622 return 1;
625 return 0;
628 void netpoll_print_options(struct netpoll *np)
630 printk(KERN_INFO "%s: local port %d\n",
631 np->name, np->local_port);
632 printk(KERN_INFO "%s: local IP %pI4\n",
633 np->name, &np->local_ip);
634 printk(KERN_INFO "%s: interface '%s'\n",
635 np->name, np->dev_name);
636 printk(KERN_INFO "%s: remote port %d\n",
637 np->name, np->remote_port);
638 printk(KERN_INFO "%s: remote IP %pI4\n",
639 np->name, &np->remote_ip);
640 printk(KERN_INFO "%s: remote ethernet address %pM\n",
641 np->name, np->remote_mac);
643 EXPORT_SYMBOL(netpoll_print_options);
645 int netpoll_parse_options(struct netpoll *np, char *opt)
647 char *cur=opt, *delim;
649 if (*cur != '@') {
650 if ((delim = strchr(cur, '@')) == NULL)
651 goto parse_failed;
652 *delim = 0;
653 np->local_port = simple_strtol(cur, NULL, 10);
654 cur = delim;
656 cur++;
658 if (*cur != '/') {
659 if ((delim = strchr(cur, '/')) == NULL)
660 goto parse_failed;
661 *delim = 0;
662 np->local_ip = in_aton(cur);
663 cur = delim;
665 cur++;
667 if (*cur != ',') {
668 /* parse out dev name */
669 if ((delim = strchr(cur, ',')) == NULL)
670 goto parse_failed;
671 *delim = 0;
672 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
673 cur = delim;
675 cur++;
677 if (*cur != '@') {
678 /* dst port */
679 if ((delim = strchr(cur, '@')) == NULL)
680 goto parse_failed;
681 *delim = 0;
682 if (*cur == ' ' || *cur == '\t')
683 printk(KERN_INFO "%s: warning: whitespace"
684 "is not allowed\n", np->name);
685 np->remote_port = simple_strtol(cur, NULL, 10);
686 cur = delim;
688 cur++;
690 /* dst ip */
691 if ((delim = strchr(cur, '/')) == NULL)
692 goto parse_failed;
693 *delim = 0;
694 np->remote_ip = in_aton(cur);
695 cur = delim + 1;
697 if (*cur != 0) {
698 /* MAC address */
699 if (!mac_pton(cur, np->remote_mac))
700 goto parse_failed;
703 netpoll_print_options(np);
705 return 0;
707 parse_failed:
708 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
709 np->name, cur);
710 return -1;
712 EXPORT_SYMBOL(netpoll_parse_options);
714 int __netpoll_setup(struct netpoll *np)
716 struct net_device *ndev = np->dev;
717 struct netpoll_info *npinfo;
718 const struct net_device_ops *ops;
719 unsigned long flags;
720 int err;
722 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
723 !ndev->netdev_ops->ndo_poll_controller) {
724 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
725 np->name, np->dev_name);
726 err = -ENOTSUPP;
727 goto out;
730 if (!ndev->npinfo) {
731 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
732 if (!npinfo) {
733 err = -ENOMEM;
734 goto out;
737 npinfo->rx_flags = 0;
738 INIT_LIST_HEAD(&npinfo->rx_np);
740 spin_lock_init(&npinfo->rx_lock);
741 skb_queue_head_init(&npinfo->arp_tx);
742 skb_queue_head_init(&npinfo->txq);
743 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
745 atomic_set(&npinfo->refcnt, 1);
747 ops = np->dev->netdev_ops;
748 if (ops->ndo_netpoll_setup) {
749 err = ops->ndo_netpoll_setup(ndev, npinfo);
750 if (err)
751 goto free_npinfo;
753 } else {
754 npinfo = ndev->npinfo;
755 atomic_inc(&npinfo->refcnt);
758 npinfo->netpoll = np;
760 if (np->rx_hook) {
761 spin_lock_irqsave(&npinfo->rx_lock, flags);
762 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
763 list_add_tail(&np->rx, &npinfo->rx_np);
764 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
767 /* last thing to do is link it to the net device structure */
768 rcu_assign_pointer(ndev->npinfo, npinfo);
770 return 0;
772 free_npinfo:
773 kfree(npinfo);
774 out:
775 return err;
777 EXPORT_SYMBOL_GPL(__netpoll_setup);
779 int netpoll_setup(struct netpoll *np)
781 struct net_device *ndev = NULL;
782 struct in_device *in_dev;
783 int err;
785 if (np->dev_name)
786 ndev = dev_get_by_name(&init_net, np->dev_name);
787 if (!ndev) {
788 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
789 np->name, np->dev_name);
790 return -ENODEV;
793 if (ndev->master) {
794 printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
795 np->name, np->dev_name);
796 err = -EBUSY;
797 goto put;
800 if (!netif_running(ndev)) {
801 unsigned long atmost, atleast;
803 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
804 np->name, np->dev_name);
806 rtnl_lock();
807 err = dev_open(ndev);
808 rtnl_unlock();
810 if (err) {
811 printk(KERN_ERR "%s: failed to open %s\n",
812 np->name, ndev->name);
813 goto put;
816 atleast = jiffies + HZ/10;
817 atmost = jiffies + carrier_timeout * HZ;
818 while (!netif_carrier_ok(ndev)) {
819 if (time_after(jiffies, atmost)) {
820 printk(KERN_NOTICE
821 "%s: timeout waiting for carrier\n",
822 np->name);
823 break;
825 msleep(1);
828 /* If carrier appears to come up instantly, we don't
829 * trust it and pause so that we don't pump all our
830 * queued console messages into the bitbucket.
833 if (time_before(jiffies, atleast)) {
834 printk(KERN_NOTICE "%s: carrier detect appears"
835 " untrustworthy, waiting 4 seconds\n",
836 np->name);
837 msleep(4000);
841 if (!np->local_ip) {
842 rcu_read_lock();
843 in_dev = __in_dev_get_rcu(ndev);
845 if (!in_dev || !in_dev->ifa_list) {
846 rcu_read_unlock();
847 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
848 np->name, np->dev_name);
849 err = -EDESTADDRREQ;
850 goto put;
853 np->local_ip = in_dev->ifa_list->ifa_local;
854 rcu_read_unlock();
855 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
858 np->dev = ndev;
860 /* fill up the skb queue */
861 refill_skbs();
863 rtnl_lock();
864 err = __netpoll_setup(np);
865 rtnl_unlock();
867 if (err)
868 goto put;
870 return 0;
872 put:
873 dev_put(ndev);
874 return err;
876 EXPORT_SYMBOL(netpoll_setup);
878 static int __init netpoll_init(void)
880 skb_queue_head_init(&skb_pool);
881 return 0;
883 core_initcall(netpoll_init);
885 void __netpoll_cleanup(struct netpoll *np)
887 struct netpoll_info *npinfo;
888 unsigned long flags;
890 npinfo = np->dev->npinfo;
891 if (!npinfo)
892 return;
894 if (!list_empty(&npinfo->rx_np)) {
895 spin_lock_irqsave(&npinfo->rx_lock, flags);
896 list_del(&np->rx);
897 if (list_empty(&npinfo->rx_np))
898 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
899 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
902 if (atomic_dec_and_test(&npinfo->refcnt)) {
903 const struct net_device_ops *ops;
905 ops = np->dev->netdev_ops;
906 if (ops->ndo_netpoll_cleanup)
907 ops->ndo_netpoll_cleanup(np->dev);
909 RCU_INIT_POINTER(np->dev->npinfo, NULL);
911 /* avoid racing with NAPI reading npinfo */
912 synchronize_rcu_bh();
914 skb_queue_purge(&npinfo->arp_tx);
915 skb_queue_purge(&npinfo->txq);
916 cancel_delayed_work_sync(&npinfo->tx_work);
918 /* clean after last, unfinished work */
919 __skb_queue_purge(&npinfo->txq);
920 kfree(npinfo);
923 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
925 void netpoll_cleanup(struct netpoll *np)
927 if (!np->dev)
928 return;
930 rtnl_lock();
931 __netpoll_cleanup(np);
932 rtnl_unlock();
934 dev_put(np->dev);
935 np->dev = NULL;
937 EXPORT_SYMBOL(netpoll_cleanup);
939 int netpoll_trap(void)
941 return atomic_read(&trapped);
943 EXPORT_SYMBOL(netpoll_trap);
945 void netpoll_set_trap(int trap)
947 if (trap)
948 atomic_inc(&trapped);
949 else
950 atomic_dec(&trapped);
952 EXPORT_SYMBOL(netpoll_set_trap);