Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / s390 / net / netiucv.c
blob874a19994489a208d92de22285b2781ffd6525b6
1 /*
2 * IUCV network driver
4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
7 * Sysfs integration and all bugs therein by Cornelia Huck
8 * (cornelia.huck@de.ibm.com)
10 * Documentation used:
11 * the source of the original IUCV driver by:
12 * Stefan Hegewald <hegewald@de.ibm.com>
13 * Hartmut Penner <hpenner@de.ibm.com>
14 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15 * Martin Schwidefsky (schwidefsky@de.ibm.com)
16 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 #undef DEBUG
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/timer.h>
44 #include <linux/bitops.h>
46 #include <linux/signal.h>
47 #include <linux/string.h>
48 #include <linux/device.h>
50 #include <linux/ip.h>
51 #include <linux/if_arp.h>
52 #include <linux/tcp.h>
53 #include <linux/skbuff.h>
54 #include <linux/ctype.h>
55 #include <net/dst.h>
57 #include <asm/io.h>
58 #include <asm/uaccess.h>
60 #include <net/iucv/iucv.h>
61 #include "fsm.h"
63 MODULE_AUTHOR
64 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
65 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
67 /**
68 * Debug Facility stuff
70 #define IUCV_DBF_SETUP_NAME "iucv_setup"
71 #define IUCV_DBF_SETUP_LEN 32
72 #define IUCV_DBF_SETUP_PAGES 2
73 #define IUCV_DBF_SETUP_NR_AREAS 1
74 #define IUCV_DBF_SETUP_LEVEL 3
76 #define IUCV_DBF_DATA_NAME "iucv_data"
77 #define IUCV_DBF_DATA_LEN 128
78 #define IUCV_DBF_DATA_PAGES 2
79 #define IUCV_DBF_DATA_NR_AREAS 1
80 #define IUCV_DBF_DATA_LEVEL 2
82 #define IUCV_DBF_TRACE_NAME "iucv_trace"
83 #define IUCV_DBF_TRACE_LEN 16
84 #define IUCV_DBF_TRACE_PAGES 4
85 #define IUCV_DBF_TRACE_NR_AREAS 1
86 #define IUCV_DBF_TRACE_LEVEL 3
88 #define IUCV_DBF_TEXT(name,level,text) \
89 do { \
90 debug_text_event(iucv_dbf_##name,level,text); \
91 } while (0)
93 #define IUCV_DBF_HEX(name,level,addr,len) \
94 do { \
95 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
96 } while (0)
98 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
100 /* Allow to sort out low debug levels early to avoid wasted sprints */
101 static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
103 return (level <= dbf_grp->level);
106 #define IUCV_DBF_TEXT_(name, level, text...) \
107 do { \
108 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
109 char* iucv_dbf_txt_buf = \
110 get_cpu_var(iucv_dbf_txt_buf); \
111 sprintf(iucv_dbf_txt_buf, text); \
112 debug_text_event(iucv_dbf_##name, level, \
113 iucv_dbf_txt_buf); \
114 put_cpu_var(iucv_dbf_txt_buf); \
116 } while (0)
118 #define IUCV_DBF_SPRINTF(name,level,text...) \
119 do { \
120 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
121 debug_sprintf_event(iucv_dbf_trace, level, text ); \
122 } while (0)
125 * some more debug stuff
127 #define IUCV_HEXDUMP16(importance,header,ptr) \
128 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
129 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
130 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
131 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
132 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
133 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
134 *(((char*)ptr)+12),*(((char*)ptr)+13), \
135 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
136 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
137 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
138 *(((char*)ptr)+16),*(((char*)ptr)+17), \
139 *(((char*)ptr)+18),*(((char*)ptr)+19), \
140 *(((char*)ptr)+20),*(((char*)ptr)+21), \
141 *(((char*)ptr)+22),*(((char*)ptr)+23), \
142 *(((char*)ptr)+24),*(((char*)ptr)+25), \
143 *(((char*)ptr)+26),*(((char*)ptr)+27), \
144 *(((char*)ptr)+28),*(((char*)ptr)+29), \
145 *(((char*)ptr)+30),*(((char*)ptr)+31));
147 #define PRINTK_HEADER " iucv: " /* for debugging */
149 static struct device_driver netiucv_driver = {
150 .owner = THIS_MODULE,
151 .name = "netiucv",
152 .bus = &iucv_bus,
155 static int netiucv_callback_connreq(struct iucv_path *,
156 u8 ipvmid[8], u8 ipuser[16]);
157 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
158 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
159 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
160 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
161 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
162 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
164 static struct iucv_handler netiucv_handler = {
165 .path_pending = netiucv_callback_connreq,
166 .path_complete = netiucv_callback_connack,
167 .path_severed = netiucv_callback_connrej,
168 .path_quiesced = netiucv_callback_connsusp,
169 .path_resumed = netiucv_callback_connres,
170 .message_pending = netiucv_callback_rx,
171 .message_complete = netiucv_callback_txdone
175 * Per connection profiling data
177 struct connection_profile {
178 unsigned long maxmulti;
179 unsigned long maxcqueue;
180 unsigned long doios_single;
181 unsigned long doios_multi;
182 unsigned long txlen;
183 unsigned long tx_time;
184 struct timespec send_stamp;
185 unsigned long tx_pending;
186 unsigned long tx_max_pending;
190 * Representation of one iucv connection
192 struct iucv_connection {
193 struct list_head list;
194 struct iucv_path *path;
195 struct sk_buff *rx_buff;
196 struct sk_buff *tx_buff;
197 struct sk_buff_head collect_queue;
198 struct sk_buff_head commit_queue;
199 spinlock_t collect_lock;
200 int collect_len;
201 int max_buffsize;
202 fsm_timer timer;
203 fsm_instance *fsm;
204 struct net_device *netdev;
205 struct connection_profile prof;
206 char userid[9];
210 * Linked list of all connection structs.
212 static LIST_HEAD(iucv_connection_list);
213 static DEFINE_RWLOCK(iucv_connection_rwlock);
216 * Representation of event-data for the
217 * connection state machine.
219 struct iucv_event {
220 struct iucv_connection *conn;
221 void *data;
225 * Private part of the network device structure
227 struct netiucv_priv {
228 struct net_device_stats stats;
229 unsigned long tbusy;
230 fsm_instance *fsm;
231 struct iucv_connection *conn;
232 struct device *dev;
236 * Link level header for a packet.
238 struct ll_header {
239 u16 next;
242 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
243 #define NETIUCV_BUFSIZE_MAX 32768
244 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
245 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
246 #define NETIUCV_MTU_DEFAULT 9216
247 #define NETIUCV_QUEUELEN_DEFAULT 50
248 #define NETIUCV_TIMEOUT_5SEC 5000
251 * Compatibility macros for busy handling
252 * of network devices.
254 static inline void netiucv_clear_busy(struct net_device *dev)
256 struct netiucv_priv *priv = netdev_priv(dev);
257 clear_bit(0, &priv->tbusy);
258 netif_wake_queue(dev);
261 static inline int netiucv_test_and_set_busy(struct net_device *dev)
263 struct netiucv_priv *priv = netdev_priv(dev);
264 netif_stop_queue(dev);
265 return test_and_set_bit(0, &priv->tbusy);
268 static u8 iucvMagic[16] = {
269 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
270 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
274 * Convert an iucv userId to its printable
275 * form (strip whitespace at end).
277 * @param An iucv userId
279 * @returns The printable string (static data!!)
281 static char *netiucv_printname(char *name)
283 static char tmp[9];
284 char *p = tmp;
285 memcpy(tmp, name, 8);
286 tmp[8] = '\0';
287 while (*p && (!isspace(*p)))
288 p++;
289 *p = '\0';
290 return tmp;
294 * States of the interface statemachine.
296 enum dev_states {
297 DEV_STATE_STOPPED,
298 DEV_STATE_STARTWAIT,
299 DEV_STATE_STOPWAIT,
300 DEV_STATE_RUNNING,
302 * MUST be always the last element!!
304 NR_DEV_STATES
307 static const char *dev_state_names[] = {
308 "Stopped",
309 "StartWait",
310 "StopWait",
311 "Running",
315 * Events of the interface statemachine.
317 enum dev_events {
318 DEV_EVENT_START,
319 DEV_EVENT_STOP,
320 DEV_EVENT_CONUP,
321 DEV_EVENT_CONDOWN,
323 * MUST be always the last element!!
325 NR_DEV_EVENTS
328 static const char *dev_event_names[] = {
329 "Start",
330 "Stop",
331 "Connection up",
332 "Connection down",
336 * Events of the connection statemachine
338 enum conn_events {
340 * Events, representing callbacks from
341 * lowlevel iucv layer)
343 CONN_EVENT_CONN_REQ,
344 CONN_EVENT_CONN_ACK,
345 CONN_EVENT_CONN_REJ,
346 CONN_EVENT_CONN_SUS,
347 CONN_EVENT_CONN_RES,
348 CONN_EVENT_RX,
349 CONN_EVENT_TXDONE,
352 * Events, representing errors return codes from
353 * calls to lowlevel iucv layer
357 * Event, representing timer expiry.
359 CONN_EVENT_TIMER,
362 * Events, representing commands from upper levels.
364 CONN_EVENT_START,
365 CONN_EVENT_STOP,
368 * MUST be always the last element!!
370 NR_CONN_EVENTS,
373 static const char *conn_event_names[] = {
374 "Remote connection request",
375 "Remote connection acknowledge",
376 "Remote connection reject",
377 "Connection suspended",
378 "Connection resumed",
379 "Data received",
380 "Data sent",
382 "Timer",
384 "Start",
385 "Stop",
389 * States of the connection statemachine.
391 enum conn_states {
393 * Connection not assigned to any device,
394 * initial state, invalid
396 CONN_STATE_INVALID,
399 * Userid assigned but not operating
401 CONN_STATE_STOPPED,
404 * Connection registered,
405 * no connection request sent yet,
406 * no connection request received
408 CONN_STATE_STARTWAIT,
411 * Connection registered and connection request sent,
412 * no acknowledge and no connection request received yet.
414 CONN_STATE_SETUPWAIT,
417 * Connection up and running idle
419 CONN_STATE_IDLE,
422 * Data sent, awaiting CONN_EVENT_TXDONE
424 CONN_STATE_TX,
427 * Error during registration.
429 CONN_STATE_REGERR,
432 * Error during registration.
434 CONN_STATE_CONNERR,
437 * MUST be always the last element!!
439 NR_CONN_STATES,
442 static const char *conn_state_names[] = {
443 "Invalid",
444 "Stopped",
445 "StartWait",
446 "SetupWait",
447 "Idle",
448 "TX",
449 "Terminating",
450 "Registration error",
451 "Connect error",
456 * Debug Facility Stuff
458 static debug_info_t *iucv_dbf_setup = NULL;
459 static debug_info_t *iucv_dbf_data = NULL;
460 static debug_info_t *iucv_dbf_trace = NULL;
462 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
464 static void iucv_unregister_dbf_views(void)
466 if (iucv_dbf_setup)
467 debug_unregister(iucv_dbf_setup);
468 if (iucv_dbf_data)
469 debug_unregister(iucv_dbf_data);
470 if (iucv_dbf_trace)
471 debug_unregister(iucv_dbf_trace);
473 static int iucv_register_dbf_views(void)
475 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
476 IUCV_DBF_SETUP_PAGES,
477 IUCV_DBF_SETUP_NR_AREAS,
478 IUCV_DBF_SETUP_LEN);
479 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
480 IUCV_DBF_DATA_PAGES,
481 IUCV_DBF_DATA_NR_AREAS,
482 IUCV_DBF_DATA_LEN);
483 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
484 IUCV_DBF_TRACE_PAGES,
485 IUCV_DBF_TRACE_NR_AREAS,
486 IUCV_DBF_TRACE_LEN);
488 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
489 (iucv_dbf_trace == NULL)) {
490 iucv_unregister_dbf_views();
491 return -ENOMEM;
493 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
494 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
496 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
497 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
499 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
500 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
502 return 0;
506 * Callback-wrappers, called from lowlevel iucv layer.
509 static void netiucv_callback_rx(struct iucv_path *path,
510 struct iucv_message *msg)
512 struct iucv_connection *conn = path->private;
513 struct iucv_event ev;
515 ev.conn = conn;
516 ev.data = msg;
517 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
520 static void netiucv_callback_txdone(struct iucv_path *path,
521 struct iucv_message *msg)
523 struct iucv_connection *conn = path->private;
524 struct iucv_event ev;
526 ev.conn = conn;
527 ev.data = msg;
528 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
531 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
533 struct iucv_connection *conn = path->private;
535 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
538 static int netiucv_callback_connreq(struct iucv_path *path,
539 u8 ipvmid[8], u8 ipuser[16])
541 struct iucv_connection *conn = path->private;
542 struct iucv_event ev;
543 int rc;
545 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
546 /* ipuser must match iucvMagic. */
547 return -EINVAL;
548 rc = -EINVAL;
549 read_lock_bh(&iucv_connection_rwlock);
550 list_for_each_entry(conn, &iucv_connection_list, list) {
551 if (strncmp(ipvmid, conn->userid, 8))
552 continue;
553 /* Found a matching connection for this path. */
554 conn->path = path;
555 ev.conn = conn;
556 ev.data = path;
557 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
558 rc = 0;
560 read_unlock_bh(&iucv_connection_rwlock);
561 return rc;
564 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
566 struct iucv_connection *conn = path->private;
568 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
571 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
573 struct iucv_connection *conn = path->private;
575 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
578 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
580 struct iucv_connection *conn = path->private;
582 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
586 * NOP action for statemachines
588 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
593 * Actions of the connection statemachine
597 * netiucv_unpack_skb
598 * @conn: The connection where this skb has been received.
599 * @pskb: The received skb.
601 * Unpack a just received skb and hand it over to upper layers.
602 * Helper function for conn_action_rx.
604 static void netiucv_unpack_skb(struct iucv_connection *conn,
605 struct sk_buff *pskb)
607 struct net_device *dev = conn->netdev;
608 struct netiucv_priv *privptr = netdev_priv(dev);
609 u16 offset = 0;
611 skb_put(pskb, NETIUCV_HDRLEN);
612 pskb->dev = dev;
613 pskb->ip_summed = CHECKSUM_NONE;
614 pskb->protocol = ntohs(ETH_P_IP);
616 while (1) {
617 struct sk_buff *skb;
618 struct ll_header *header = (struct ll_header *) pskb->data;
620 if (!header->next)
621 break;
623 skb_pull(pskb, NETIUCV_HDRLEN);
624 header->next -= offset;
625 offset += header->next;
626 header->next -= NETIUCV_HDRLEN;
627 if (skb_tailroom(pskb) < header->next) {
628 PRINT_WARN("%s: Illegal next field in iucv header: "
629 "%d > %d\n",
630 dev->name, header->next, skb_tailroom(pskb));
631 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
632 header->next, skb_tailroom(pskb));
633 return;
635 skb_put(pskb, header->next);
636 skb_reset_mac_header(pskb);
637 skb = dev_alloc_skb(pskb->len);
638 if (!skb) {
639 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
640 dev->name);
641 IUCV_DBF_TEXT(data, 2,
642 "Out of memory in netiucv_unpack_skb\n");
643 privptr->stats.rx_dropped++;
644 return;
646 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
647 pskb->len);
648 skb_reset_mac_header(skb);
649 skb->dev = pskb->dev;
650 skb->protocol = pskb->protocol;
651 pskb->ip_summed = CHECKSUM_UNNECESSARY;
652 privptr->stats.rx_packets++;
653 privptr->stats.rx_bytes += skb->len;
655 * Since receiving is always initiated from a tasklet (in iucv.c),
656 * we must use netif_rx_ni() instead of netif_rx()
658 netif_rx_ni(skb);
659 dev->last_rx = jiffies;
660 skb_pull(pskb, header->next);
661 skb_put(pskb, NETIUCV_HDRLEN);
665 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
667 struct iucv_event *ev = arg;
668 struct iucv_connection *conn = ev->conn;
669 struct iucv_message *msg = ev->data;
670 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
671 int rc;
673 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
675 if (!conn->netdev) {
676 iucv_message_reject(conn->path, msg);
677 PRINT_WARN("Received data for unlinked connection\n");
678 IUCV_DBF_TEXT(data, 2,
679 "Received data for unlinked connection\n");
680 return;
682 if (msg->length > conn->max_buffsize) {
683 iucv_message_reject(conn->path, msg);
684 privptr->stats.rx_dropped++;
685 PRINT_WARN("msglen %d > max_buffsize %d\n",
686 msg->length, conn->max_buffsize);
687 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
688 msg->length, conn->max_buffsize);
689 return;
691 conn->rx_buff->data = conn->rx_buff->head;
692 skb_reset_tail_pointer(conn->rx_buff);
693 conn->rx_buff->len = 0;
694 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
695 msg->length, NULL);
696 if (rc || msg->length < 5) {
697 privptr->stats.rx_errors++;
698 PRINT_WARN("iucv_receive returned %08x\n", rc);
699 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
700 return;
702 netiucv_unpack_skb(conn, conn->rx_buff);
705 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
707 struct iucv_event *ev = arg;
708 struct iucv_connection *conn = ev->conn;
709 struct iucv_message *msg = ev->data;
710 struct iucv_message txmsg;
711 struct netiucv_priv *privptr = NULL;
712 u32 single_flag = msg->tag;
713 u32 txbytes = 0;
714 u32 txpackets = 0;
715 u32 stat_maxcq = 0;
716 struct sk_buff *skb;
717 unsigned long saveflags;
718 struct ll_header header;
719 int rc;
721 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
723 if (conn && conn->netdev)
724 privptr = netdev_priv(conn->netdev);
725 conn->prof.tx_pending--;
726 if (single_flag) {
727 if ((skb = skb_dequeue(&conn->commit_queue))) {
728 atomic_dec(&skb->users);
729 dev_kfree_skb_any(skb);
730 if (privptr) {
731 privptr->stats.tx_packets++;
732 privptr->stats.tx_bytes +=
733 (skb->len - NETIUCV_HDRLEN
734 - NETIUCV_HDRLEN);
738 conn->tx_buff->data = conn->tx_buff->head;
739 skb_reset_tail_pointer(conn->tx_buff);
740 conn->tx_buff->len = 0;
741 spin_lock_irqsave(&conn->collect_lock, saveflags);
742 while ((skb = skb_dequeue(&conn->collect_queue))) {
743 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
744 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
745 NETIUCV_HDRLEN);
746 skb_copy_from_linear_data(skb,
747 skb_put(conn->tx_buff, skb->len),
748 skb->len);
749 txbytes += skb->len;
750 txpackets++;
751 stat_maxcq++;
752 atomic_dec(&skb->users);
753 dev_kfree_skb_any(skb);
755 if (conn->collect_len > conn->prof.maxmulti)
756 conn->prof.maxmulti = conn->collect_len;
757 conn->collect_len = 0;
758 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
759 if (conn->tx_buff->len == 0) {
760 fsm_newstate(fi, CONN_STATE_IDLE);
761 return;
764 header.next = 0;
765 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
766 conn->prof.send_stamp = current_kernel_time();
767 txmsg.class = 0;
768 txmsg.tag = 0;
769 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
770 conn->tx_buff->data, conn->tx_buff->len);
771 conn->prof.doios_multi++;
772 conn->prof.txlen += conn->tx_buff->len;
773 conn->prof.tx_pending++;
774 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
775 conn->prof.tx_max_pending = conn->prof.tx_pending;
776 if (rc) {
777 conn->prof.tx_pending--;
778 fsm_newstate(fi, CONN_STATE_IDLE);
779 if (privptr)
780 privptr->stats.tx_errors += txpackets;
781 PRINT_WARN("iucv_send returned %08x\n", rc);
782 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
783 } else {
784 if (privptr) {
785 privptr->stats.tx_packets += txpackets;
786 privptr->stats.tx_bytes += txbytes;
788 if (stat_maxcq > conn->prof.maxcqueue)
789 conn->prof.maxcqueue = stat_maxcq;
793 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
795 struct iucv_event *ev = arg;
796 struct iucv_connection *conn = ev->conn;
797 struct iucv_path *path = ev->data;
798 struct net_device *netdev = conn->netdev;
799 struct netiucv_priv *privptr = netdev_priv(netdev);
800 int rc;
802 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
804 conn->path = path;
805 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
806 path->flags = 0;
807 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
808 if (rc) {
809 PRINT_WARN("%s: IUCV accept failed with error %d\n",
810 netdev->name, rc);
811 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
812 return;
814 fsm_newstate(fi, CONN_STATE_IDLE);
815 netdev->tx_queue_len = conn->path->msglim;
816 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
819 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
821 struct iucv_event *ev = arg;
822 struct iucv_path *path = ev->data;
824 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
825 iucv_path_sever(path, NULL);
828 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
830 struct iucv_connection *conn = arg;
831 struct net_device *netdev = conn->netdev;
832 struct netiucv_priv *privptr = netdev_priv(netdev);
834 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
835 fsm_deltimer(&conn->timer);
836 fsm_newstate(fi, CONN_STATE_IDLE);
837 netdev->tx_queue_len = conn->path->msglim;
838 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
841 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
843 struct iucv_connection *conn = arg;
845 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
846 fsm_deltimer(&conn->timer);
847 iucv_path_sever(conn->path, NULL);
848 fsm_newstate(fi, CONN_STATE_STARTWAIT);
851 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
853 struct iucv_connection *conn = arg;
854 struct net_device *netdev = conn->netdev;
855 struct netiucv_priv *privptr = netdev_priv(netdev);
857 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
859 fsm_deltimer(&conn->timer);
860 iucv_path_sever(conn->path, NULL);
861 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
862 IUCV_DBF_TEXT(data, 2,
863 "conn_action_connsever: Remote dropped connection\n");
864 fsm_newstate(fi, CONN_STATE_STARTWAIT);
865 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
868 static void conn_action_start(fsm_instance *fi, int event, void *arg)
870 struct iucv_connection *conn = arg;
871 int rc;
873 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
875 fsm_newstate(fi, CONN_STATE_STARTWAIT);
876 PRINT_DEBUG("%s('%s'): connecting ...\n",
877 conn->netdev->name, conn->userid);
880 * We must set the state before calling iucv_connect because the
881 * callback handler could be called at any point after the connection
882 * request is sent
885 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
886 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
887 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
888 NULL, iucvMagic, conn);
889 switch (rc) {
890 case 0:
891 conn->netdev->tx_queue_len = conn->path->msglim;
892 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
893 CONN_EVENT_TIMER, conn);
894 return;
895 case 11:
896 PRINT_INFO("%s: User %s is currently not available.\n",
897 conn->netdev->name,
898 netiucv_printname(conn->userid));
899 fsm_newstate(fi, CONN_STATE_STARTWAIT);
900 break;
901 case 12:
902 PRINT_INFO("%s: User %s is currently not ready.\n",
903 conn->netdev->name,
904 netiucv_printname(conn->userid));
905 fsm_newstate(fi, CONN_STATE_STARTWAIT);
906 break;
907 case 13:
908 PRINT_WARN("%s: Too many IUCV connections.\n",
909 conn->netdev->name);
910 fsm_newstate(fi, CONN_STATE_CONNERR);
911 break;
912 case 14:
913 PRINT_WARN("%s: User %s has too many IUCV connections.\n",
914 conn->netdev->name,
915 netiucv_printname(conn->userid));
916 fsm_newstate(fi, CONN_STATE_CONNERR);
917 break;
918 case 15:
919 PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
920 conn->netdev->name);
921 fsm_newstate(fi, CONN_STATE_CONNERR);
922 break;
923 default:
924 PRINT_WARN("%s: iucv_connect returned error %d\n",
925 conn->netdev->name, rc);
926 fsm_newstate(fi, CONN_STATE_CONNERR);
927 break;
929 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
930 kfree(conn->path);
931 conn->path = NULL;
934 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
936 struct sk_buff *skb;
938 while ((skb = skb_dequeue(q))) {
939 atomic_dec(&skb->users);
940 dev_kfree_skb_any(skb);
944 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
946 struct iucv_event *ev = arg;
947 struct iucv_connection *conn = ev->conn;
948 struct net_device *netdev = conn->netdev;
949 struct netiucv_priv *privptr = netdev_priv(netdev);
951 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
953 fsm_deltimer(&conn->timer);
954 fsm_newstate(fi, CONN_STATE_STOPPED);
955 netiucv_purge_skb_queue(&conn->collect_queue);
956 if (conn->path) {
957 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
958 iucv_path_sever(conn->path, iucvMagic);
959 kfree(conn->path);
960 conn->path = NULL;
962 netiucv_purge_skb_queue(&conn->commit_queue);
963 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
966 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
968 struct iucv_connection *conn = arg;
969 struct net_device *netdev = conn->netdev;
971 PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
972 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
975 static const fsm_node conn_fsm[] = {
976 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
977 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
979 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
980 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
981 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
982 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
983 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
984 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
985 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
987 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
988 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
989 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
990 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
991 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
993 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
994 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
996 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
997 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
998 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
1000 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
1001 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1003 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1004 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1007 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1011 * Actions for interface - statemachine.
1015 * dev_action_start
1016 * @fi: An instance of an interface statemachine.
1017 * @event: The event, just happened.
1018 * @arg: Generic pointer, casted from struct net_device * upon call.
1020 * Startup connection by sending CONN_EVENT_START to it.
1022 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1024 struct net_device *dev = arg;
1025 struct netiucv_priv *privptr = netdev_priv(dev);
1027 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1029 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1030 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1034 * Shutdown connection by sending CONN_EVENT_STOP to it.
1036 * @param fi An instance of an interface statemachine.
1037 * @param event The event, just happened.
1038 * @param arg Generic pointer, casted from struct net_device * upon call.
1040 static void
1041 dev_action_stop(fsm_instance *fi, int event, void *arg)
1043 struct net_device *dev = arg;
1044 struct netiucv_priv *privptr = netdev_priv(dev);
1045 struct iucv_event ev;
1047 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1049 ev.conn = privptr->conn;
1051 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1052 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1056 * Called from connection statemachine
1057 * when a connection is up and running.
1059 * @param fi An instance of an interface statemachine.
1060 * @param event The event, just happened.
1061 * @param arg Generic pointer, casted from struct net_device * upon call.
1063 static void
1064 dev_action_connup(fsm_instance *fi, int event, void *arg)
1066 struct net_device *dev = arg;
1067 struct netiucv_priv *privptr = netdev_priv(dev);
1069 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1071 switch (fsm_getstate(fi)) {
1072 case DEV_STATE_STARTWAIT:
1073 fsm_newstate(fi, DEV_STATE_RUNNING);
1074 PRINT_INFO("%s: connected with remote side %s\n",
1075 dev->name, privptr->conn->userid);
1076 IUCV_DBF_TEXT(setup, 3,
1077 "connection is up and running\n");
1078 break;
1079 case DEV_STATE_STOPWAIT:
1080 PRINT_INFO(
1081 "%s: got connection UP event during shutdown!\n",
1082 dev->name);
1083 IUCV_DBF_TEXT(data, 2,
1084 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1085 break;
1090 * Called from connection statemachine
1091 * when a connection has been shutdown.
1093 * @param fi An instance of an interface statemachine.
1094 * @param event The event, just happened.
1095 * @param arg Generic pointer, casted from struct net_device * upon call.
1097 static void
1098 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1100 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1102 switch (fsm_getstate(fi)) {
1103 case DEV_STATE_RUNNING:
1104 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1105 break;
1106 case DEV_STATE_STOPWAIT:
1107 fsm_newstate(fi, DEV_STATE_STOPPED);
1108 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1109 break;
1113 static const fsm_node dev_fsm[] = {
1114 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1116 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1117 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1119 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1120 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1122 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1123 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1124 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1127 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1130 * Transmit a packet.
1131 * This is a helper function for netiucv_tx().
1133 * @param conn Connection to be used for sending.
1134 * @param skb Pointer to struct sk_buff of packet to send.
1135 * The linklevel header has already been set up
1136 * by netiucv_tx().
1138 * @return 0 on success, -ERRNO on failure. (Never fails.)
1140 static int netiucv_transmit_skb(struct iucv_connection *conn,
1141 struct sk_buff *skb)
1143 struct iucv_message msg;
1144 unsigned long saveflags;
1145 struct ll_header header;
1146 int rc;
1148 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1149 int l = skb->len + NETIUCV_HDRLEN;
1151 spin_lock_irqsave(&conn->collect_lock, saveflags);
1152 if (conn->collect_len + l >
1153 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1154 rc = -EBUSY;
1155 IUCV_DBF_TEXT(data, 2,
1156 "EBUSY from netiucv_transmit_skb\n");
1157 } else {
1158 atomic_inc(&skb->users);
1159 skb_queue_tail(&conn->collect_queue, skb);
1160 conn->collect_len += l;
1161 rc = 0;
1163 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1164 } else {
1165 struct sk_buff *nskb = skb;
1167 * Copy the skb to a new allocated skb in lowmem only if the
1168 * data is located above 2G in memory or tailroom is < 2.
1170 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1171 NETIUCV_HDRLEN)) >> 31;
1172 int copied = 0;
1173 if (hi || (skb_tailroom(skb) < 2)) {
1174 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1175 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1176 if (!nskb) {
1177 PRINT_WARN("%s: Could not allocate tx_skb\n",
1178 conn->netdev->name);
1179 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1180 rc = -ENOMEM;
1181 return rc;
1182 } else {
1183 skb_reserve(nskb, NETIUCV_HDRLEN);
1184 memcpy(skb_put(nskb, skb->len),
1185 skb->data, skb->len);
1187 copied = 1;
1190 * skb now is below 2G and has enough room. Add headers.
1192 header.next = nskb->len + NETIUCV_HDRLEN;
1193 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1194 header.next = 0;
1195 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1197 fsm_newstate(conn->fsm, CONN_STATE_TX);
1198 conn->prof.send_stamp = current_kernel_time();
1200 msg.tag = 1;
1201 msg.class = 0;
1202 rc = iucv_message_send(conn->path, &msg, 0, 0,
1203 nskb->data, nskb->len);
1204 conn->prof.doios_single++;
1205 conn->prof.txlen += skb->len;
1206 conn->prof.tx_pending++;
1207 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1208 conn->prof.tx_max_pending = conn->prof.tx_pending;
1209 if (rc) {
1210 struct netiucv_priv *privptr;
1211 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1212 conn->prof.tx_pending--;
1213 privptr = netdev_priv(conn->netdev);
1214 if (privptr)
1215 privptr->stats.tx_errors++;
1216 if (copied)
1217 dev_kfree_skb(nskb);
1218 else {
1220 * Remove our headers. They get added
1221 * again on retransmit.
1223 skb_pull(skb, NETIUCV_HDRLEN);
1224 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1226 PRINT_WARN("iucv_send returned %08x\n", rc);
1227 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1228 } else {
1229 if (copied)
1230 dev_kfree_skb(skb);
1231 atomic_inc(&nskb->users);
1232 skb_queue_tail(&conn->commit_queue, nskb);
1236 return rc;
1240 * Interface API for upper network layers
1244 * Open an interface.
1245 * Called from generic network layer when ifconfig up is run.
1247 * @param dev Pointer to interface struct.
1249 * @return 0 on success, -ERRNO on failure. (Never fails.)
1251 static int netiucv_open(struct net_device *dev)
1253 struct netiucv_priv *priv = netdev_priv(dev);
1255 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1256 return 0;
1260 * Close an interface.
1261 * Called from generic network layer when ifconfig down is run.
1263 * @param dev Pointer to interface struct.
1265 * @return 0 on success, -ERRNO on failure. (Never fails.)
1267 static int netiucv_close(struct net_device *dev)
1269 struct netiucv_priv *priv = netdev_priv(dev);
1271 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1272 return 0;
1276 * Start transmission of a packet.
1277 * Called from generic network device layer.
1279 * @param skb Pointer to buffer containing the packet.
1280 * @param dev Pointer to interface struct.
1282 * @return 0 if packet consumed, !0 if packet rejected.
1283 * Note: If we return !0, then the packet is free'd by
1284 * the generic network layer.
1286 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1288 struct netiucv_priv *privptr = netdev_priv(dev);
1289 int rc;
1291 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1293 * Some sanity checks ...
1295 if (skb == NULL) {
1296 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1297 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1298 privptr->stats.tx_dropped++;
1299 return 0;
1301 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1302 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1303 dev->name, NETIUCV_HDRLEN);
1304 IUCV_DBF_TEXT(data, 2,
1305 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1306 dev_kfree_skb(skb);
1307 privptr->stats.tx_dropped++;
1308 return 0;
1312 * If connection is not running, try to restart it
1313 * and throw away packet.
1315 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1316 if (!in_atomic())
1317 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1318 dev_kfree_skb(skb);
1319 privptr->stats.tx_dropped++;
1320 privptr->stats.tx_errors++;
1321 privptr->stats.tx_carrier_errors++;
1322 return 0;
1325 if (netiucv_test_and_set_busy(dev)) {
1326 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1327 return -EBUSY;
1329 dev->trans_start = jiffies;
1330 rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1331 netiucv_clear_busy(dev);
1332 return rc;
1336 * netiucv_stats
1337 * @dev: Pointer to interface struct.
1339 * Returns interface statistics of a device.
1341 * Returns pointer to stats struct of this interface.
1343 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1345 struct netiucv_priv *priv = netdev_priv(dev);
1347 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1348 return &priv->stats;
1352 * netiucv_change_mtu
1353 * @dev: Pointer to interface struct.
1354 * @new_mtu: The new MTU to use for this interface.
1356 * Sets MTU of an interface.
1358 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1359 * (valid range is 576 .. NETIUCV_MTU_MAX).
1361 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1363 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1364 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1365 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1366 return -EINVAL;
1368 dev->mtu = new_mtu;
1369 return 0;
1373 * attributes in sysfs
1376 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1377 char *buf)
1379 struct netiucv_priv *priv = dev->driver_data;
1381 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1382 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1385 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1386 const char *buf, size_t count)
1388 struct netiucv_priv *priv = dev->driver_data;
1389 struct net_device *ndev = priv->conn->netdev;
1390 char *p;
1391 char *tmp;
1392 char username[9];
1393 int i;
1394 struct iucv_connection *cp;
1396 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1397 if (count > 9) {
1398 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1399 IUCV_DBF_TEXT_(setup, 2,
1400 "%d is length of username\n", (int) count);
1401 return -EINVAL;
1404 tmp = strsep((char **) &buf, "\n");
1405 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1406 if (isalnum(*p) || (*p == '$')) {
1407 username[i]= toupper(*p);
1408 continue;
1410 if (*p == '\n') {
1411 /* trailing lf, grr */
1412 break;
1414 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1415 IUCV_DBF_TEXT_(setup, 2,
1416 "username: invalid character %c\n", *p);
1417 return -EINVAL;
1419 while (i < 8)
1420 username[i++] = ' ';
1421 username[8] = '\0';
1423 if (memcmp(username, priv->conn->userid, 9) &&
1424 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1425 /* username changed while the interface is active. */
1426 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1427 dev->bus_id, priv->conn->userid);
1428 PRINT_WARN("netiucv: user cannot be updated\n");
1429 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1430 return -EBUSY;
1432 read_lock_bh(&iucv_connection_rwlock);
1433 list_for_each_entry(cp, &iucv_connection_list, list) {
1434 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1435 read_unlock_bh(&iucv_connection_rwlock);
1436 PRINT_WARN("netiucv: Connection to %s already "
1437 "exists\n", username);
1438 return -EEXIST;
1441 read_unlock_bh(&iucv_connection_rwlock);
1442 memcpy(priv->conn->userid, username, 9);
1443 return count;
1446 static DEVICE_ATTR(user, 0644, user_show, user_write);
1448 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1449 char *buf)
1450 { struct netiucv_priv *priv = dev->driver_data;
1452 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1453 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1456 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1457 const char *buf, size_t count)
1459 struct netiucv_priv *priv = dev->driver_data;
1460 struct net_device *ndev = priv->conn->netdev;
1461 char *e;
1462 int bs1;
1464 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1465 if (count >= 39)
1466 return -EINVAL;
1468 bs1 = simple_strtoul(buf, &e, 0);
1470 if (e && (!isspace(*e))) {
1471 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1472 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1473 return -EINVAL;
1475 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1476 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1477 bs1);
1478 IUCV_DBF_TEXT_(setup, 2,
1479 "buffer_write: buffer size %d too large\n",
1480 bs1);
1481 return -EINVAL;
1483 if ((ndev->flags & IFF_RUNNING) &&
1484 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1485 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1486 bs1);
1487 IUCV_DBF_TEXT_(setup, 2,
1488 "buffer_write: buffer size %d too small\n",
1489 bs1);
1490 return -EINVAL;
1492 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1493 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1494 bs1);
1495 IUCV_DBF_TEXT_(setup, 2,
1496 "buffer_write: buffer size %d too small\n",
1497 bs1);
1498 return -EINVAL;
1501 priv->conn->max_buffsize = bs1;
1502 if (!(ndev->flags & IFF_RUNNING))
1503 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1505 return count;
1509 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1511 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1512 char *buf)
1514 struct netiucv_priv *priv = dev->driver_data;
1516 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1517 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1520 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1522 static ssize_t conn_fsm_show (struct device *dev,
1523 struct device_attribute *attr, char *buf)
1525 struct netiucv_priv *priv = dev->driver_data;
1527 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1528 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1531 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1533 static ssize_t maxmulti_show (struct device *dev,
1534 struct device_attribute *attr, char *buf)
1536 struct netiucv_priv *priv = dev->driver_data;
1538 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1539 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1542 static ssize_t maxmulti_write (struct device *dev,
1543 struct device_attribute *attr,
1544 const char *buf, size_t count)
1546 struct netiucv_priv *priv = dev->driver_data;
1548 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1549 priv->conn->prof.maxmulti = 0;
1550 return count;
1553 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1555 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1556 char *buf)
1558 struct netiucv_priv *priv = dev->driver_data;
1560 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1561 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1564 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1565 const char *buf, size_t count)
1567 struct netiucv_priv *priv = dev->driver_data;
1569 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1570 priv->conn->prof.maxcqueue = 0;
1571 return count;
1574 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1576 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1577 char *buf)
1579 struct netiucv_priv *priv = dev->driver_data;
1581 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1582 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1585 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1586 const char *buf, size_t count)
1588 struct netiucv_priv *priv = dev->driver_data;
1590 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1591 priv->conn->prof.doios_single = 0;
1592 return count;
1595 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1597 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1598 char *buf)
1600 struct netiucv_priv *priv = dev->driver_data;
1602 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1603 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1606 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1607 const char *buf, size_t count)
1609 struct netiucv_priv *priv = dev->driver_data;
1611 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1612 priv->conn->prof.doios_multi = 0;
1613 return count;
1616 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1618 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1619 char *buf)
1621 struct netiucv_priv *priv = dev->driver_data;
1623 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1624 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1627 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1628 const char *buf, size_t count)
1630 struct netiucv_priv *priv = dev->driver_data;
1632 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1633 priv->conn->prof.txlen = 0;
1634 return count;
1637 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1639 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1640 char *buf)
1642 struct netiucv_priv *priv = dev->driver_data;
1644 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1645 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1648 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1649 const char *buf, size_t count)
1651 struct netiucv_priv *priv = dev->driver_data;
1653 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1654 priv->conn->prof.tx_time = 0;
1655 return count;
1658 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1660 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1661 char *buf)
1663 struct netiucv_priv *priv = dev->driver_data;
1665 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1666 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1669 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1670 const char *buf, size_t count)
1672 struct netiucv_priv *priv = dev->driver_data;
1674 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1675 priv->conn->prof.tx_pending = 0;
1676 return count;
1679 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1681 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1682 char *buf)
1684 struct netiucv_priv *priv = dev->driver_data;
1686 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1687 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1690 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1691 const char *buf, size_t count)
1693 struct netiucv_priv *priv = dev->driver_data;
1695 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1696 priv->conn->prof.tx_max_pending = 0;
1697 return count;
1700 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1702 static struct attribute *netiucv_attrs[] = {
1703 &dev_attr_buffer.attr,
1704 &dev_attr_user.attr,
1705 NULL,
1708 static struct attribute_group netiucv_attr_group = {
1709 .attrs = netiucv_attrs,
1712 static struct attribute *netiucv_stat_attrs[] = {
1713 &dev_attr_device_fsm_state.attr,
1714 &dev_attr_connection_fsm_state.attr,
1715 &dev_attr_max_tx_buffer_used.attr,
1716 &dev_attr_max_chained_skbs.attr,
1717 &dev_attr_tx_single_write_ops.attr,
1718 &dev_attr_tx_multi_write_ops.attr,
1719 &dev_attr_netto_bytes.attr,
1720 &dev_attr_max_tx_io_time.attr,
1721 &dev_attr_tx_pending.attr,
1722 &dev_attr_tx_max_pending.attr,
1723 NULL,
1726 static struct attribute_group netiucv_stat_attr_group = {
1727 .name = "stats",
1728 .attrs = netiucv_stat_attrs,
1731 static int netiucv_add_files(struct device *dev)
1733 int ret;
1735 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1736 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1737 if (ret)
1738 return ret;
1739 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1740 if (ret)
1741 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1742 return ret;
1745 static void netiucv_remove_files(struct device *dev)
1747 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1748 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1749 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1752 static int netiucv_register_device(struct net_device *ndev)
1754 struct netiucv_priv *priv = netdev_priv(ndev);
1755 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1756 int ret;
1759 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1761 if (dev) {
1762 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1763 dev->bus = &iucv_bus;
1764 dev->parent = iucv_root;
1766 * The release function could be called after the
1767 * module has been unloaded. It's _only_ task is to
1768 * free the struct. Therefore, we specify kfree()
1769 * directly here. (Probably a little bit obfuscating
1770 * but legitime ...).
1772 dev->release = (void (*)(struct device *))kfree;
1773 dev->driver = &netiucv_driver;
1774 } else
1775 return -ENOMEM;
1777 ret = device_register(dev);
1779 if (ret)
1780 return ret;
1781 ret = netiucv_add_files(dev);
1782 if (ret)
1783 goto out_unreg;
1784 priv->dev = dev;
1785 dev->driver_data = priv;
1786 return 0;
1788 out_unreg:
1789 device_unregister(dev);
1790 return ret;
1793 static void netiucv_unregister_device(struct device *dev)
1795 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1796 netiucv_remove_files(dev);
1797 device_unregister(dev);
1801 * Allocate and initialize a new connection structure.
1802 * Add it to the list of netiucv connections;
1804 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1805 char *username)
1807 struct iucv_connection *conn;
1809 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1810 if (!conn)
1811 goto out;
1812 skb_queue_head_init(&conn->collect_queue);
1813 skb_queue_head_init(&conn->commit_queue);
1814 spin_lock_init(&conn->collect_lock);
1815 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1816 conn->netdev = dev;
1818 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1819 if (!conn->rx_buff)
1820 goto out_conn;
1821 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1822 if (!conn->tx_buff)
1823 goto out_rx;
1824 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1825 conn_event_names, NR_CONN_STATES,
1826 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1827 GFP_KERNEL);
1828 if (!conn->fsm)
1829 goto out_tx;
1831 fsm_settimer(conn->fsm, &conn->timer);
1832 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1834 if (username) {
1835 memcpy(conn->userid, username, 9);
1836 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1839 write_lock_bh(&iucv_connection_rwlock);
1840 list_add_tail(&conn->list, &iucv_connection_list);
1841 write_unlock_bh(&iucv_connection_rwlock);
1842 return conn;
1844 out_tx:
1845 kfree_skb(conn->tx_buff);
1846 out_rx:
1847 kfree_skb(conn->rx_buff);
1848 out_conn:
1849 kfree(conn);
1850 out:
1851 return NULL;
1855 * Release a connection structure and remove it from the
1856 * list of netiucv connections.
1858 static void netiucv_remove_connection(struct iucv_connection *conn)
1860 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1861 write_lock_bh(&iucv_connection_rwlock);
1862 list_del_init(&conn->list);
1863 write_unlock_bh(&iucv_connection_rwlock);
1864 fsm_deltimer(&conn->timer);
1865 netiucv_purge_skb_queue(&conn->collect_queue);
1866 if (conn->path) {
1867 iucv_path_sever(conn->path, iucvMagic);
1868 kfree(conn->path);
1869 conn->path = NULL;
1871 netiucv_purge_skb_queue(&conn->commit_queue);
1872 kfree_fsm(conn->fsm);
1873 kfree_skb(conn->rx_buff);
1874 kfree_skb(conn->tx_buff);
1878 * Release everything of a net device.
1880 static void netiucv_free_netdevice(struct net_device *dev)
1882 struct netiucv_priv *privptr = netdev_priv(dev);
1884 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1886 if (!dev)
1887 return;
1889 if (privptr) {
1890 if (privptr->conn)
1891 netiucv_remove_connection(privptr->conn);
1892 if (privptr->fsm)
1893 kfree_fsm(privptr->fsm);
1894 privptr->conn = NULL; privptr->fsm = NULL;
1895 /* privptr gets freed by free_netdev() */
1897 free_netdev(dev);
1901 * Initialize a net device. (Called from kernel in alloc_netdev())
1903 static void netiucv_setup_netdevice(struct net_device *dev)
1905 dev->mtu = NETIUCV_MTU_DEFAULT;
1906 dev->hard_start_xmit = netiucv_tx;
1907 dev->open = netiucv_open;
1908 dev->stop = netiucv_close;
1909 dev->get_stats = netiucv_stats;
1910 dev->change_mtu = netiucv_change_mtu;
1911 dev->destructor = netiucv_free_netdevice;
1912 dev->hard_header_len = NETIUCV_HDRLEN;
1913 dev->addr_len = 0;
1914 dev->type = ARPHRD_SLIP;
1915 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1916 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1920 * Allocate and initialize everything of a net device.
1922 static struct net_device *netiucv_init_netdevice(char *username)
1924 struct netiucv_priv *privptr;
1925 struct net_device *dev;
1927 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1928 netiucv_setup_netdevice);
1929 if (!dev)
1930 return NULL;
1931 if (dev_alloc_name(dev, dev->name) < 0)
1932 goto out_netdev;
1934 privptr = netdev_priv(dev);
1935 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1936 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1937 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1938 if (!privptr->fsm)
1939 goto out_netdev;
1941 privptr->conn = netiucv_new_connection(dev, username);
1942 if (!privptr->conn) {
1943 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1944 goto out_fsm;
1946 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1947 return dev;
1949 out_fsm:
1950 kfree_fsm(privptr->fsm);
1951 out_netdev:
1952 free_netdev(dev);
1953 return NULL;
1956 static ssize_t conn_write(struct device_driver *drv,
1957 const char *buf, size_t count)
1959 const char *p;
1960 char username[9];
1961 int i, rc;
1962 struct net_device *dev;
1963 struct netiucv_priv *priv;
1964 struct iucv_connection *cp;
1966 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1967 if (count>9) {
1968 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1969 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1970 return -EINVAL;
1973 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1974 if (isalnum(*p) || *p == '$') {
1975 username[i] = toupper(*p);
1976 continue;
1978 if (*p == '\n')
1979 /* trailing lf, grr */
1980 break;
1981 PRINT_WARN("netiucv: Invalid character in username!\n");
1982 IUCV_DBF_TEXT_(setup, 2,
1983 "conn_write: invalid character %c\n", *p);
1984 return -EINVAL;
1986 while (i < 8)
1987 username[i++] = ' ';
1988 username[8] = '\0';
1990 read_lock_bh(&iucv_connection_rwlock);
1991 list_for_each_entry(cp, &iucv_connection_list, list) {
1992 if (!strncmp(username, cp->userid, 9)) {
1993 read_unlock_bh(&iucv_connection_rwlock);
1994 PRINT_WARN("netiucv: Connection to %s already "
1995 "exists\n", username);
1996 return -EEXIST;
1999 read_unlock_bh(&iucv_connection_rwlock);
2001 dev = netiucv_init_netdevice(username);
2002 if (!dev) {
2003 PRINT_WARN("netiucv: Could not allocate network device "
2004 "structure for user '%s'\n",
2005 netiucv_printname(username));
2006 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2007 return -ENODEV;
2010 rc = netiucv_register_device(dev);
2011 if (rc) {
2012 IUCV_DBF_TEXT_(setup, 2,
2013 "ret %d from netiucv_register_device\n", rc);
2014 goto out_free_ndev;
2017 /* sysfs magic */
2018 priv = netdev_priv(dev);
2019 SET_NETDEV_DEV(dev, priv->dev);
2021 rc = register_netdev(dev);
2022 if (rc)
2023 goto out_unreg;
2025 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2027 return count;
2029 out_unreg:
2030 netiucv_unregister_device(priv->dev);
2031 out_free_ndev:
2032 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2033 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2034 netiucv_free_netdevice(dev);
2035 return rc;
2038 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2040 static ssize_t remove_write (struct device_driver *drv,
2041 const char *buf, size_t count)
2043 struct iucv_connection *cp;
2044 struct net_device *ndev;
2045 struct netiucv_priv *priv;
2046 struct device *dev;
2047 char name[IFNAMSIZ];
2048 const char *p;
2049 int i;
2051 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2053 if (count >= IFNAMSIZ)
2054 count = IFNAMSIZ - 1;;
2056 for (i = 0, p = buf; i < count && *p; i++, p++) {
2057 if (*p == '\n' || *p == ' ')
2058 /* trailing lf, grr */
2059 break;
2060 name[i] = *p;
2062 name[i] = '\0';
2064 read_lock_bh(&iucv_connection_rwlock);
2065 list_for_each_entry(cp, &iucv_connection_list, list) {
2066 ndev = cp->netdev;
2067 priv = netdev_priv(ndev);
2068 dev = priv->dev;
2069 if (strncmp(name, ndev->name, count))
2070 continue;
2071 read_unlock_bh(&iucv_connection_rwlock);
2072 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2073 PRINT_WARN("netiucv: net device %s active with peer "
2074 "%s\n", ndev->name, priv->conn->userid);
2075 PRINT_WARN("netiucv: %s cannot be removed\n",
2076 ndev->name);
2077 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2078 return -EBUSY;
2080 unregister_netdev(ndev);
2081 netiucv_unregister_device(dev);
2082 return count;
2084 read_unlock_bh(&iucv_connection_rwlock);
2085 PRINT_WARN("netiucv: net device %s unknown\n", name);
2086 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2087 return -EINVAL;
2090 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2092 static struct attribute * netiucv_drv_attrs[] = {
2093 &driver_attr_connection.attr,
2094 &driver_attr_remove.attr,
2095 NULL,
2098 static struct attribute_group netiucv_drv_attr_group = {
2099 .attrs = netiucv_drv_attrs,
2102 static struct attribute_group *netiucv_drv_attr_groups[] = {
2103 &netiucv_drv_attr_group,
2104 NULL,
2107 static void netiucv_banner(void)
2109 PRINT_INFO("NETIUCV driver initialized\n");
2112 static void __exit netiucv_exit(void)
2114 struct iucv_connection *cp;
2115 struct net_device *ndev;
2116 struct netiucv_priv *priv;
2117 struct device *dev;
2119 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2120 while (!list_empty(&iucv_connection_list)) {
2121 cp = list_entry(iucv_connection_list.next,
2122 struct iucv_connection, list);
2123 ndev = cp->netdev;
2124 priv = netdev_priv(ndev);
2125 dev = priv->dev;
2127 unregister_netdev(ndev);
2128 netiucv_unregister_device(dev);
2131 driver_unregister(&netiucv_driver);
2132 iucv_unregister(&netiucv_handler, 1);
2133 iucv_unregister_dbf_views();
2135 PRINT_INFO("NETIUCV driver unloaded\n");
2136 return;
2139 static int __init netiucv_init(void)
2141 int rc;
2143 rc = iucv_register_dbf_views();
2144 if (rc)
2145 goto out;
2146 rc = iucv_register(&netiucv_handler, 1);
2147 if (rc)
2148 goto out_dbf;
2149 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2150 netiucv_driver.groups = netiucv_drv_attr_groups;
2151 rc = driver_register(&netiucv_driver);
2152 if (rc) {
2153 PRINT_ERR("NETIUCV: failed to register driver.\n");
2154 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2155 goto out_iucv;
2158 netiucv_banner();
2159 return rc;
2161 out_iucv:
2162 iucv_unregister(&netiucv_handler, 1);
2163 out_dbf:
2164 iucv_unregister_dbf_views();
2165 out:
2166 return rc;
2169 module_init(netiucv_init);
2170 module_exit(netiucv_exit);
2171 MODULE_LICENSE("GPL");