4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
7 * Sysfs integration and all bugs therein by Cornelia Huck
8 * (cornelia.huck@de.ibm.com)
11 * the source of the original IUCV driver by:
12 * Stefan Hegewald <hegewald@de.ibm.com>
13 * Hartmut Penner <hpenner@de.ibm.com>
14 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15 * Martin Schwidefsky (schwidefsky@de.ibm.com)
16 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 #define KMSG_COMPONENT "netiucv"
35 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/interrupt.h>
46 #include <linux/timer.h>
47 #include <linux/bitops.h>
49 #include <linux/signal.h>
50 #include <linux/string.h>
51 #include <linux/device.h>
54 #include <linux/if_arp.h>
55 #include <linux/tcp.h>
56 #include <linux/skbuff.h>
57 #include <linux/ctype.h>
61 #include <asm/uaccess.h>
63 #include <net/iucv/iucv.h>
67 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
68 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
71 * Debug Facility stuff
73 #define IUCV_DBF_SETUP_NAME "iucv_setup"
74 #define IUCV_DBF_SETUP_LEN 32
75 #define IUCV_DBF_SETUP_PAGES 2
76 #define IUCV_DBF_SETUP_NR_AREAS 1
77 #define IUCV_DBF_SETUP_LEVEL 3
79 #define IUCV_DBF_DATA_NAME "iucv_data"
80 #define IUCV_DBF_DATA_LEN 128
81 #define IUCV_DBF_DATA_PAGES 2
82 #define IUCV_DBF_DATA_NR_AREAS 1
83 #define IUCV_DBF_DATA_LEVEL 2
85 #define IUCV_DBF_TRACE_NAME "iucv_trace"
86 #define IUCV_DBF_TRACE_LEN 16
87 #define IUCV_DBF_TRACE_PAGES 4
88 #define IUCV_DBF_TRACE_NR_AREAS 1
89 #define IUCV_DBF_TRACE_LEVEL 3
91 #define IUCV_DBF_TEXT(name,level,text) \
93 debug_text_event(iucv_dbf_##name,level,text); \
96 #define IUCV_DBF_HEX(name,level,addr,len) \
98 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
101 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf
);
103 /* Allow to sort out low debug levels early to avoid wasted sprints */
104 static inline int iucv_dbf_passes(debug_info_t
*dbf_grp
, int level
)
106 return (level
<= dbf_grp
->level
);
109 #define IUCV_DBF_TEXT_(name, level, text...) \
111 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
112 char* iucv_dbf_txt_buf = \
113 get_cpu_var(iucv_dbf_txt_buf); \
114 sprintf(iucv_dbf_txt_buf, text); \
115 debug_text_event(iucv_dbf_##name, level, \
117 put_cpu_var(iucv_dbf_txt_buf); \
121 #define IUCV_DBF_SPRINTF(name,level,text...) \
123 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
124 debug_sprintf_event(iucv_dbf_trace, level, text ); \
128 * some more debug stuff
130 #define IUCV_HEXDUMP16(importance,header,ptr) \
131 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
132 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
133 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
134 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
135 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
136 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
137 *(((char*)ptr)+12),*(((char*)ptr)+13), \
138 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
139 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
140 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
141 *(((char*)ptr)+16),*(((char*)ptr)+17), \
142 *(((char*)ptr)+18),*(((char*)ptr)+19), \
143 *(((char*)ptr)+20),*(((char*)ptr)+21), \
144 *(((char*)ptr)+22),*(((char*)ptr)+23), \
145 *(((char*)ptr)+24),*(((char*)ptr)+25), \
146 *(((char*)ptr)+26),*(((char*)ptr)+27), \
147 *(((char*)ptr)+28),*(((char*)ptr)+29), \
148 *(((char*)ptr)+30),*(((char*)ptr)+31));
150 #define PRINTK_HEADER " iucv: " /* for debugging */
152 static struct device_driver netiucv_driver
= {
153 .owner
= THIS_MODULE
,
158 static int netiucv_callback_connreq(struct iucv_path
*,
159 u8 ipvmid
[8], u8 ipuser
[16]);
160 static void netiucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
161 static void netiucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
162 static void netiucv_callback_connsusp(struct iucv_path
*, u8 ipuser
[16]);
163 static void netiucv_callback_connres(struct iucv_path
*, u8 ipuser
[16]);
164 static void netiucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
165 static void netiucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
167 static struct iucv_handler netiucv_handler
= {
168 .path_pending
= netiucv_callback_connreq
,
169 .path_complete
= netiucv_callback_connack
,
170 .path_severed
= netiucv_callback_connrej
,
171 .path_quiesced
= netiucv_callback_connsusp
,
172 .path_resumed
= netiucv_callback_connres
,
173 .message_pending
= netiucv_callback_rx
,
174 .message_complete
= netiucv_callback_txdone
178 * Per connection profiling data
180 struct connection_profile
{
181 unsigned long maxmulti
;
182 unsigned long maxcqueue
;
183 unsigned long doios_single
;
184 unsigned long doios_multi
;
186 unsigned long tx_time
;
187 struct timespec send_stamp
;
188 unsigned long tx_pending
;
189 unsigned long tx_max_pending
;
193 * Representation of one iucv connection
195 struct iucv_connection
{
196 struct list_head list
;
197 struct iucv_path
*path
;
198 struct sk_buff
*rx_buff
;
199 struct sk_buff
*tx_buff
;
200 struct sk_buff_head collect_queue
;
201 struct sk_buff_head commit_queue
;
202 spinlock_t collect_lock
;
207 struct net_device
*netdev
;
208 struct connection_profile prof
;
213 * Linked list of all connection structs.
215 static LIST_HEAD(iucv_connection_list
);
216 static DEFINE_RWLOCK(iucv_connection_rwlock
);
219 * Representation of event-data for the
220 * connection state machine.
223 struct iucv_connection
*conn
;
228 * Private part of the network device structure
230 struct netiucv_priv
{
231 struct net_device_stats stats
;
234 struct iucv_connection
*conn
;
239 * Link level header for a packet.
245 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
246 #define NETIUCV_BUFSIZE_MAX 32768
247 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
248 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
249 #define NETIUCV_MTU_DEFAULT 9216
250 #define NETIUCV_QUEUELEN_DEFAULT 50
251 #define NETIUCV_TIMEOUT_5SEC 5000
254 * Compatibility macros for busy handling
255 * of network devices.
257 static inline void netiucv_clear_busy(struct net_device
*dev
)
259 struct netiucv_priv
*priv
= netdev_priv(dev
);
260 clear_bit(0, &priv
->tbusy
);
261 netif_wake_queue(dev
);
264 static inline int netiucv_test_and_set_busy(struct net_device
*dev
)
266 struct netiucv_priv
*priv
= netdev_priv(dev
);
267 netif_stop_queue(dev
);
268 return test_and_set_bit(0, &priv
->tbusy
);
271 static u8 iucvMagic
[16] = {
272 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
273 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
277 * Convert an iucv userId to its printable
278 * form (strip whitespace at end).
280 * @param An iucv userId
282 * @returns The printable string (static data!!)
284 static char *netiucv_printname(char *name
)
288 memcpy(tmp
, name
, 8);
290 while (*p
&& (!isspace(*p
)))
297 * States of the interface statemachine.
305 * MUST be always the last element!!
310 static const char *dev_state_names
[] = {
318 * Events of the interface statemachine.
326 * MUST be always the last element!!
331 static const char *dev_event_names
[] = {
339 * Events of the connection statemachine
343 * Events, representing callbacks from
344 * lowlevel iucv layer)
355 * Events, representing errors return codes from
356 * calls to lowlevel iucv layer
360 * Event, representing timer expiry.
365 * Events, representing commands from upper levels.
371 * MUST be always the last element!!
376 static const char *conn_event_names
[] = {
377 "Remote connection request",
378 "Remote connection acknowledge",
379 "Remote connection reject",
380 "Connection suspended",
381 "Connection resumed",
392 * States of the connection statemachine.
396 * Connection not assigned to any device,
397 * initial state, invalid
402 * Userid assigned but not operating
407 * Connection registered,
408 * no connection request sent yet,
409 * no connection request received
411 CONN_STATE_STARTWAIT
,
414 * Connection registered and connection request sent,
415 * no acknowledge and no connection request received yet.
417 CONN_STATE_SETUPWAIT
,
420 * Connection up and running idle
425 * Data sent, awaiting CONN_EVENT_TXDONE
430 * Error during registration.
435 * Error during registration.
440 * MUST be always the last element!!
445 static const char *conn_state_names
[] = {
453 "Registration error",
459 * Debug Facility Stuff
461 static debug_info_t
*iucv_dbf_setup
= NULL
;
462 static debug_info_t
*iucv_dbf_data
= NULL
;
463 static debug_info_t
*iucv_dbf_trace
= NULL
;
465 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf
);
467 static void iucv_unregister_dbf_views(void)
470 debug_unregister(iucv_dbf_setup
);
472 debug_unregister(iucv_dbf_data
);
474 debug_unregister(iucv_dbf_trace
);
476 static int iucv_register_dbf_views(void)
478 iucv_dbf_setup
= debug_register(IUCV_DBF_SETUP_NAME
,
479 IUCV_DBF_SETUP_PAGES
,
480 IUCV_DBF_SETUP_NR_AREAS
,
482 iucv_dbf_data
= debug_register(IUCV_DBF_DATA_NAME
,
484 IUCV_DBF_DATA_NR_AREAS
,
486 iucv_dbf_trace
= debug_register(IUCV_DBF_TRACE_NAME
,
487 IUCV_DBF_TRACE_PAGES
,
488 IUCV_DBF_TRACE_NR_AREAS
,
491 if ((iucv_dbf_setup
== NULL
) || (iucv_dbf_data
== NULL
) ||
492 (iucv_dbf_trace
== NULL
)) {
493 iucv_unregister_dbf_views();
496 debug_register_view(iucv_dbf_setup
, &debug_hex_ascii_view
);
497 debug_set_level(iucv_dbf_setup
, IUCV_DBF_SETUP_LEVEL
);
499 debug_register_view(iucv_dbf_data
, &debug_hex_ascii_view
);
500 debug_set_level(iucv_dbf_data
, IUCV_DBF_DATA_LEVEL
);
502 debug_register_view(iucv_dbf_trace
, &debug_hex_ascii_view
);
503 debug_set_level(iucv_dbf_trace
, IUCV_DBF_TRACE_LEVEL
);
509 * Callback-wrappers, called from lowlevel iucv layer.
512 static void netiucv_callback_rx(struct iucv_path
*path
,
513 struct iucv_message
*msg
)
515 struct iucv_connection
*conn
= path
->private;
516 struct iucv_event ev
;
520 fsm_event(conn
->fsm
, CONN_EVENT_RX
, &ev
);
523 static void netiucv_callback_txdone(struct iucv_path
*path
,
524 struct iucv_message
*msg
)
526 struct iucv_connection
*conn
= path
->private;
527 struct iucv_event ev
;
531 fsm_event(conn
->fsm
, CONN_EVENT_TXDONE
, &ev
);
534 static void netiucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
536 struct iucv_connection
*conn
= path
->private;
538 fsm_event(conn
->fsm
, CONN_EVENT_CONN_ACK
, conn
);
541 static int netiucv_callback_connreq(struct iucv_path
*path
,
542 u8 ipvmid
[8], u8 ipuser
[16])
544 struct iucv_connection
*conn
= path
->private;
545 struct iucv_event ev
;
548 if (memcmp(iucvMagic
, ipuser
, sizeof(ipuser
)))
549 /* ipuser must match iucvMagic. */
552 read_lock_bh(&iucv_connection_rwlock
);
553 list_for_each_entry(conn
, &iucv_connection_list
, list
) {
554 if (strncmp(ipvmid
, conn
->userid
, 8))
556 /* Found a matching connection for this path. */
560 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REQ
, &ev
);
563 read_unlock_bh(&iucv_connection_rwlock
);
567 static void netiucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
569 struct iucv_connection
*conn
= path
->private;
571 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REJ
, conn
);
574 static void netiucv_callback_connsusp(struct iucv_path
*path
, u8 ipuser
[16])
576 struct iucv_connection
*conn
= path
->private;
578 fsm_event(conn
->fsm
, CONN_EVENT_CONN_SUS
, conn
);
581 static void netiucv_callback_connres(struct iucv_path
*path
, u8 ipuser
[16])
583 struct iucv_connection
*conn
= path
->private;
585 fsm_event(conn
->fsm
, CONN_EVENT_CONN_RES
, conn
);
589 * NOP action for statemachines
591 static void netiucv_action_nop(fsm_instance
*fi
, int event
, void *arg
)
596 * Actions of the connection statemachine
601 * @conn: The connection where this skb has been received.
602 * @pskb: The received skb.
604 * Unpack a just received skb and hand it over to upper layers.
605 * Helper function for conn_action_rx.
607 static void netiucv_unpack_skb(struct iucv_connection
*conn
,
608 struct sk_buff
*pskb
)
610 struct net_device
*dev
= conn
->netdev
;
611 struct netiucv_priv
*privptr
= netdev_priv(dev
);
614 skb_put(pskb
, NETIUCV_HDRLEN
);
616 pskb
->ip_summed
= CHECKSUM_NONE
;
617 pskb
->protocol
= ntohs(ETH_P_IP
);
621 struct ll_header
*header
= (struct ll_header
*) pskb
->data
;
626 skb_pull(pskb
, NETIUCV_HDRLEN
);
627 header
->next
-= offset
;
628 offset
+= header
->next
;
629 header
->next
-= NETIUCV_HDRLEN
;
630 if (skb_tailroom(pskb
) < header
->next
) {
631 IUCV_DBF_TEXT_(data
, 2, "Illegal next field: %d > %d\n",
632 header
->next
, skb_tailroom(pskb
));
635 skb_put(pskb
, header
->next
);
636 skb_reset_mac_header(pskb
);
637 skb
= dev_alloc_skb(pskb
->len
);
639 IUCV_DBF_TEXT(data
, 2,
640 "Out of memory in netiucv_unpack_skb\n");
641 privptr
->stats
.rx_dropped
++;
644 skb_copy_from_linear_data(pskb
, skb_put(skb
, pskb
->len
),
646 skb_reset_mac_header(skb
);
647 skb
->dev
= pskb
->dev
;
648 skb
->protocol
= pskb
->protocol
;
649 pskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
650 privptr
->stats
.rx_packets
++;
651 privptr
->stats
.rx_bytes
+= skb
->len
;
653 * Since receiving is always initiated from a tasklet (in iucv.c),
654 * we must use netif_rx_ni() instead of netif_rx()
657 dev
->last_rx
= jiffies
;
658 skb_pull(pskb
, header
->next
);
659 skb_put(pskb
, NETIUCV_HDRLEN
);
663 static void conn_action_rx(fsm_instance
*fi
, int event
, void *arg
)
665 struct iucv_event
*ev
= arg
;
666 struct iucv_connection
*conn
= ev
->conn
;
667 struct iucv_message
*msg
= ev
->data
;
668 struct netiucv_priv
*privptr
= netdev_priv(conn
->netdev
);
671 IUCV_DBF_TEXT(trace
, 4, __func__
);
674 iucv_message_reject(conn
->path
, msg
);
675 IUCV_DBF_TEXT(data
, 2,
676 "Received data for unlinked connection\n");
679 if (msg
->length
> conn
->max_buffsize
) {
680 iucv_message_reject(conn
->path
, msg
);
681 privptr
->stats
.rx_dropped
++;
682 IUCV_DBF_TEXT_(data
, 2, "msglen %d > max_buffsize %d\n",
683 msg
->length
, conn
->max_buffsize
);
686 conn
->rx_buff
->data
= conn
->rx_buff
->head
;
687 skb_reset_tail_pointer(conn
->rx_buff
);
688 conn
->rx_buff
->len
= 0;
689 rc
= iucv_message_receive(conn
->path
, msg
, 0, conn
->rx_buff
->data
,
691 if (rc
|| msg
->length
< 5) {
692 privptr
->stats
.rx_errors
++;
693 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_receive\n", rc
);
696 netiucv_unpack_skb(conn
, conn
->rx_buff
);
699 static void conn_action_txdone(fsm_instance
*fi
, int event
, void *arg
)
701 struct iucv_event
*ev
= arg
;
702 struct iucv_connection
*conn
= ev
->conn
;
703 struct iucv_message
*msg
= ev
->data
;
704 struct iucv_message txmsg
;
705 struct netiucv_priv
*privptr
= NULL
;
706 u32 single_flag
= msg
->tag
;
711 unsigned long saveflags
;
712 struct ll_header header
;
715 IUCV_DBF_TEXT(trace
, 4, __func__
);
717 if (conn
&& conn
->netdev
)
718 privptr
= netdev_priv(conn
->netdev
);
719 conn
->prof
.tx_pending
--;
721 if ((skb
= skb_dequeue(&conn
->commit_queue
))) {
722 atomic_dec(&skb
->users
);
723 dev_kfree_skb_any(skb
);
725 privptr
->stats
.tx_packets
++;
726 privptr
->stats
.tx_bytes
+=
727 (skb
->len
- NETIUCV_HDRLEN
732 conn
->tx_buff
->data
= conn
->tx_buff
->head
;
733 skb_reset_tail_pointer(conn
->tx_buff
);
734 conn
->tx_buff
->len
= 0;
735 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
736 while ((skb
= skb_dequeue(&conn
->collect_queue
))) {
737 header
.next
= conn
->tx_buff
->len
+ skb
->len
+ NETIUCV_HDRLEN
;
738 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
,
740 skb_copy_from_linear_data(skb
,
741 skb_put(conn
->tx_buff
, skb
->len
),
746 atomic_dec(&skb
->users
);
747 dev_kfree_skb_any(skb
);
749 if (conn
->collect_len
> conn
->prof
.maxmulti
)
750 conn
->prof
.maxmulti
= conn
->collect_len
;
751 conn
->collect_len
= 0;
752 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
753 if (conn
->tx_buff
->len
== 0) {
754 fsm_newstate(fi
, CONN_STATE_IDLE
);
759 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
760 conn
->prof
.send_stamp
= current_kernel_time();
763 rc
= iucv_message_send(conn
->path
, &txmsg
, 0, 0,
764 conn
->tx_buff
->data
, conn
->tx_buff
->len
);
765 conn
->prof
.doios_multi
++;
766 conn
->prof
.txlen
+= conn
->tx_buff
->len
;
767 conn
->prof
.tx_pending
++;
768 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
769 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
771 conn
->prof
.tx_pending
--;
772 fsm_newstate(fi
, CONN_STATE_IDLE
);
774 privptr
->stats
.tx_errors
+= txpackets
;
775 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
778 privptr
->stats
.tx_packets
+= txpackets
;
779 privptr
->stats
.tx_bytes
+= txbytes
;
781 if (stat_maxcq
> conn
->prof
.maxcqueue
)
782 conn
->prof
.maxcqueue
= stat_maxcq
;
786 static void conn_action_connaccept(fsm_instance
*fi
, int event
, void *arg
)
788 struct iucv_event
*ev
= arg
;
789 struct iucv_connection
*conn
= ev
->conn
;
790 struct iucv_path
*path
= ev
->data
;
791 struct net_device
*netdev
= conn
->netdev
;
792 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
795 IUCV_DBF_TEXT(trace
, 3, __func__
);
798 path
->msglim
= NETIUCV_QUEUELEN_DEFAULT
;
800 rc
= iucv_path_accept(path
, &netiucv_handler
, NULL
, conn
);
802 IUCV_DBF_TEXT_(setup
, 2, "rc %d from iucv_accept", rc
);
805 fsm_newstate(fi
, CONN_STATE_IDLE
);
806 netdev
->tx_queue_len
= conn
->path
->msglim
;
807 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
810 static void conn_action_connreject(fsm_instance
*fi
, int event
, void *arg
)
812 struct iucv_event
*ev
= arg
;
813 struct iucv_path
*path
= ev
->data
;
815 IUCV_DBF_TEXT(trace
, 3, __func__
);
816 iucv_path_sever(path
, NULL
);
819 static void conn_action_connack(fsm_instance
*fi
, int event
, void *arg
)
821 struct iucv_connection
*conn
= arg
;
822 struct net_device
*netdev
= conn
->netdev
;
823 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
825 IUCV_DBF_TEXT(trace
, 3, __func__
);
826 fsm_deltimer(&conn
->timer
);
827 fsm_newstate(fi
, CONN_STATE_IDLE
);
828 netdev
->tx_queue_len
= conn
->path
->msglim
;
829 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
832 static void conn_action_conntimsev(fsm_instance
*fi
, int event
, void *arg
)
834 struct iucv_connection
*conn
= arg
;
836 IUCV_DBF_TEXT(trace
, 3, __func__
);
837 fsm_deltimer(&conn
->timer
);
838 iucv_path_sever(conn
->path
, NULL
);
839 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
842 static void conn_action_connsever(fsm_instance
*fi
, int event
, void *arg
)
844 struct iucv_connection
*conn
= arg
;
845 struct net_device
*netdev
= conn
->netdev
;
846 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
848 IUCV_DBF_TEXT(trace
, 3, __func__
);
850 fsm_deltimer(&conn
->timer
);
851 iucv_path_sever(conn
->path
, NULL
);
852 dev_info(privptr
->dev
, "The peer interface of the IUCV device"
853 " has closed the connection\n");
854 IUCV_DBF_TEXT(data
, 2,
855 "conn_action_connsever: Remote dropped connection\n");
856 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
857 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
860 static void conn_action_start(fsm_instance
*fi
, int event
, void *arg
)
862 struct iucv_connection
*conn
= arg
;
863 struct net_device
*netdev
= conn
->netdev
;
864 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
867 IUCV_DBF_TEXT(trace
, 3, __func__
);
869 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
870 IUCV_DBF_TEXT_(setup
, 2, "%s('%s'): connecting ...\n",
871 netdev
->name
, conn
->userid
);
874 * We must set the state before calling iucv_connect because the
875 * callback handler could be called at any point after the connection
879 fsm_newstate(fi
, CONN_STATE_SETUPWAIT
);
880 conn
->path
= iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT
, 0, GFP_KERNEL
);
881 rc
= iucv_path_connect(conn
->path
, &netiucv_handler
, conn
->userid
,
882 NULL
, iucvMagic
, conn
);
885 netdev
->tx_queue_len
= conn
->path
->msglim
;
886 fsm_addtimer(&conn
->timer
, NETIUCV_TIMEOUT_5SEC
,
887 CONN_EVENT_TIMER
, conn
);
890 dev_warn(privptr
->dev
,
891 "The IUCV device failed to connect to z/VM guest %s\n",
892 netiucv_printname(conn
->userid
));
893 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
896 dev_warn(privptr
->dev
,
897 "The IUCV device failed to connect to the peer on z/VM"
898 " guest %s\n", netiucv_printname(conn
->userid
));
899 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
902 dev_err(privptr
->dev
,
903 "Connecting the IUCV device would exceed the maximum"
904 " number of IUCV connections\n");
905 fsm_newstate(fi
, CONN_STATE_CONNERR
);
908 dev_err(privptr
->dev
,
909 "z/VM guest %s has too many IUCV connections"
910 " to connect with the IUCV device\n",
911 netiucv_printname(conn
->userid
));
912 fsm_newstate(fi
, CONN_STATE_CONNERR
);
915 dev_err(privptr
->dev
,
916 "The IUCV device cannot connect to a z/VM guest with no"
917 " IUCV authorization\n");
918 fsm_newstate(fi
, CONN_STATE_CONNERR
);
921 dev_err(privptr
->dev
,
922 "Connecting the IUCV device failed with error %d\n",
924 fsm_newstate(fi
, CONN_STATE_CONNERR
);
927 IUCV_DBF_TEXT_(setup
, 5, "iucv_connect rc is %d\n", rc
);
932 static void netiucv_purge_skb_queue(struct sk_buff_head
*q
)
936 while ((skb
= skb_dequeue(q
))) {
937 atomic_dec(&skb
->users
);
938 dev_kfree_skb_any(skb
);
942 static void conn_action_stop(fsm_instance
*fi
, int event
, void *arg
)
944 struct iucv_event
*ev
= arg
;
945 struct iucv_connection
*conn
= ev
->conn
;
946 struct net_device
*netdev
= conn
->netdev
;
947 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
949 IUCV_DBF_TEXT(trace
, 3, __func__
);
951 fsm_deltimer(&conn
->timer
);
952 fsm_newstate(fi
, CONN_STATE_STOPPED
);
953 netiucv_purge_skb_queue(&conn
->collect_queue
);
955 IUCV_DBF_TEXT(trace
, 5, "calling iucv_path_sever\n");
956 iucv_path_sever(conn
->path
, iucvMagic
);
960 netiucv_purge_skb_queue(&conn
->commit_queue
);
961 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
964 static void conn_action_inval(fsm_instance
*fi
, int event
, void *arg
)
966 struct iucv_connection
*conn
= arg
;
967 struct net_device
*netdev
= conn
->netdev
;
969 IUCV_DBF_TEXT_(data
, 2, "%s('%s'): conn_action_inval called\n",
970 netdev
->name
, conn
->userid
);
973 static const fsm_node conn_fsm
[] = {
974 { CONN_STATE_INVALID
, CONN_EVENT_START
, conn_action_inval
},
975 { CONN_STATE_STOPPED
, CONN_EVENT_START
, conn_action_start
},
977 { CONN_STATE_STOPPED
, CONN_EVENT_STOP
, conn_action_stop
},
978 { CONN_STATE_STARTWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
979 { CONN_STATE_SETUPWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
980 { CONN_STATE_IDLE
, CONN_EVENT_STOP
, conn_action_stop
},
981 { CONN_STATE_TX
, CONN_EVENT_STOP
, conn_action_stop
},
982 { CONN_STATE_REGERR
, CONN_EVENT_STOP
, conn_action_stop
},
983 { CONN_STATE_CONNERR
, CONN_EVENT_STOP
, conn_action_stop
},
985 { CONN_STATE_STOPPED
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
986 { CONN_STATE_STARTWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
987 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
988 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
989 { CONN_STATE_TX
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
991 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_ACK
, conn_action_connack
},
992 { CONN_STATE_SETUPWAIT
, CONN_EVENT_TIMER
, conn_action_conntimsev
},
994 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
995 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
996 { CONN_STATE_TX
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
998 { CONN_STATE_IDLE
, CONN_EVENT_RX
, conn_action_rx
},
999 { CONN_STATE_TX
, CONN_EVENT_RX
, conn_action_rx
},
1001 { CONN_STATE_TX
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1002 { CONN_STATE_IDLE
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1005 static const int CONN_FSM_LEN
= sizeof(conn_fsm
) / sizeof(fsm_node
);
1009 * Actions for interface - statemachine.
1014 * @fi: An instance of an interface statemachine.
1015 * @event: The event, just happened.
1016 * @arg: Generic pointer, casted from struct net_device * upon call.
1018 * Startup connection by sending CONN_EVENT_START to it.
1020 static void dev_action_start(fsm_instance
*fi
, int event
, void *arg
)
1022 struct net_device
*dev
= arg
;
1023 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1025 IUCV_DBF_TEXT(trace
, 3, __func__
);
1027 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1028 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_START
, privptr
->conn
);
1032 * Shutdown connection by sending CONN_EVENT_STOP to it.
1034 * @param fi An instance of an interface statemachine.
1035 * @param event The event, just happened.
1036 * @param arg Generic pointer, casted from struct net_device * upon call.
1039 dev_action_stop(fsm_instance
*fi
, int event
, void *arg
)
1041 struct net_device
*dev
= arg
;
1042 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1043 struct iucv_event ev
;
1045 IUCV_DBF_TEXT(trace
, 3, __func__
);
1047 ev
.conn
= privptr
->conn
;
1049 fsm_newstate(fi
, DEV_STATE_STOPWAIT
);
1050 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_STOP
, &ev
);
1054 * Called from connection statemachine
1055 * when a connection is up and running.
1057 * @param fi An instance of an interface statemachine.
1058 * @param event The event, just happened.
1059 * @param arg Generic pointer, casted from struct net_device * upon call.
1062 dev_action_connup(fsm_instance
*fi
, int event
, void *arg
)
1064 struct net_device
*dev
= arg
;
1065 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1067 IUCV_DBF_TEXT(trace
, 3, __func__
);
1069 switch (fsm_getstate(fi
)) {
1070 case DEV_STATE_STARTWAIT
:
1071 fsm_newstate(fi
, DEV_STATE_RUNNING
);
1072 dev_info(privptr
->dev
,
1073 "The IUCV device has been connected"
1074 " successfully to %s\n", privptr
->conn
->userid
);
1075 IUCV_DBF_TEXT(setup
, 3,
1076 "connection is up and running\n");
1078 case DEV_STATE_STOPWAIT
:
1079 IUCV_DBF_TEXT(data
, 2,
1080 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1086 * Called from connection statemachine
1087 * when a connection has been shutdown.
1089 * @param fi An instance of an interface statemachine.
1090 * @param event The event, just happened.
1091 * @param arg Generic pointer, casted from struct net_device * upon call.
1094 dev_action_conndown(fsm_instance
*fi
, int event
, void *arg
)
1096 IUCV_DBF_TEXT(trace
, 3, __func__
);
1098 switch (fsm_getstate(fi
)) {
1099 case DEV_STATE_RUNNING
:
1100 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1102 case DEV_STATE_STOPWAIT
:
1103 fsm_newstate(fi
, DEV_STATE_STOPPED
);
1104 IUCV_DBF_TEXT(setup
, 3, "connection is down\n");
1109 static const fsm_node dev_fsm
[] = {
1110 { DEV_STATE_STOPPED
, DEV_EVENT_START
, dev_action_start
},
1112 { DEV_STATE_STOPWAIT
, DEV_EVENT_START
, dev_action_start
},
1113 { DEV_STATE_STOPWAIT
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1115 { DEV_STATE_STARTWAIT
, DEV_EVENT_STOP
, dev_action_stop
},
1116 { DEV_STATE_STARTWAIT
, DEV_EVENT_CONUP
, dev_action_connup
},
1118 { DEV_STATE_RUNNING
, DEV_EVENT_STOP
, dev_action_stop
},
1119 { DEV_STATE_RUNNING
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1120 { DEV_STATE_RUNNING
, DEV_EVENT_CONUP
, netiucv_action_nop
},
1123 static const int DEV_FSM_LEN
= sizeof(dev_fsm
) / sizeof(fsm_node
);
1126 * Transmit a packet.
1127 * This is a helper function for netiucv_tx().
1129 * @param conn Connection to be used for sending.
1130 * @param skb Pointer to struct sk_buff of packet to send.
1131 * The linklevel header has already been set up
1134 * @return 0 on success, -ERRNO on failure. (Never fails.)
1136 static int netiucv_transmit_skb(struct iucv_connection
*conn
,
1137 struct sk_buff
*skb
)
1139 struct iucv_message msg
;
1140 unsigned long saveflags
;
1141 struct ll_header header
;
1144 if (fsm_getstate(conn
->fsm
) != CONN_STATE_IDLE
) {
1145 int l
= skb
->len
+ NETIUCV_HDRLEN
;
1147 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
1148 if (conn
->collect_len
+ l
>
1149 (conn
->max_buffsize
- NETIUCV_HDRLEN
)) {
1151 IUCV_DBF_TEXT(data
, 2,
1152 "EBUSY from netiucv_transmit_skb\n");
1154 atomic_inc(&skb
->users
);
1155 skb_queue_tail(&conn
->collect_queue
, skb
);
1156 conn
->collect_len
+= l
;
1159 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
1161 struct sk_buff
*nskb
= skb
;
1163 * Copy the skb to a new allocated skb in lowmem only if the
1164 * data is located above 2G in memory or tailroom is < 2.
1166 unsigned long hi
= ((unsigned long)(skb_tail_pointer(skb
) +
1167 NETIUCV_HDRLEN
)) >> 31;
1169 if (hi
|| (skb_tailroom(skb
) < 2)) {
1170 nskb
= alloc_skb(skb
->len
+ NETIUCV_HDRLEN
+
1171 NETIUCV_HDRLEN
, GFP_ATOMIC
| GFP_DMA
);
1173 IUCV_DBF_TEXT(data
, 2, "alloc_skb failed\n");
1177 skb_reserve(nskb
, NETIUCV_HDRLEN
);
1178 memcpy(skb_put(nskb
, skb
->len
),
1179 skb
->data
, skb
->len
);
1184 * skb now is below 2G and has enough room. Add headers.
1186 header
.next
= nskb
->len
+ NETIUCV_HDRLEN
;
1187 memcpy(skb_push(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1189 memcpy(skb_put(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1191 fsm_newstate(conn
->fsm
, CONN_STATE_TX
);
1192 conn
->prof
.send_stamp
= current_kernel_time();
1196 rc
= iucv_message_send(conn
->path
, &msg
, 0, 0,
1197 nskb
->data
, nskb
->len
);
1198 conn
->prof
.doios_single
++;
1199 conn
->prof
.txlen
+= skb
->len
;
1200 conn
->prof
.tx_pending
++;
1201 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
1202 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
1204 struct netiucv_priv
*privptr
;
1205 fsm_newstate(conn
->fsm
, CONN_STATE_IDLE
);
1206 conn
->prof
.tx_pending
--;
1207 privptr
= netdev_priv(conn
->netdev
);
1209 privptr
->stats
.tx_errors
++;
1211 dev_kfree_skb(nskb
);
1214 * Remove our headers. They get added
1215 * again on retransmit.
1217 skb_pull(skb
, NETIUCV_HDRLEN
);
1218 skb_trim(skb
, skb
->len
- NETIUCV_HDRLEN
);
1220 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
1224 atomic_inc(&nskb
->users
);
1225 skb_queue_tail(&conn
->commit_queue
, nskb
);
1233 * Interface API for upper network layers
1237 * Open an interface.
1238 * Called from generic network layer when ifconfig up is run.
1240 * @param dev Pointer to interface struct.
1242 * @return 0 on success, -ERRNO on failure. (Never fails.)
1244 static int netiucv_open(struct net_device
*dev
)
1246 struct netiucv_priv
*priv
= netdev_priv(dev
);
1248 fsm_event(priv
->fsm
, DEV_EVENT_START
, dev
);
1253 * Close an interface.
1254 * Called from generic network layer when ifconfig down is run.
1256 * @param dev Pointer to interface struct.
1258 * @return 0 on success, -ERRNO on failure. (Never fails.)
1260 static int netiucv_close(struct net_device
*dev
)
1262 struct netiucv_priv
*priv
= netdev_priv(dev
);
1264 fsm_event(priv
->fsm
, DEV_EVENT_STOP
, dev
);
1269 * Start transmission of a packet.
1270 * Called from generic network device layer.
1272 * @param skb Pointer to buffer containing the packet.
1273 * @param dev Pointer to interface struct.
1275 * @return 0 if packet consumed, !0 if packet rejected.
1276 * Note: If we return !0, then the packet is free'd by
1277 * the generic network layer.
1279 static int netiucv_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1281 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1284 IUCV_DBF_TEXT(trace
, 4, __func__
);
1286 * Some sanity checks ...
1289 IUCV_DBF_TEXT(data
, 2, "netiucv_tx: skb is NULL\n");
1290 privptr
->stats
.tx_dropped
++;
1293 if (skb_headroom(skb
) < NETIUCV_HDRLEN
) {
1294 IUCV_DBF_TEXT(data
, 2,
1295 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1297 privptr
->stats
.tx_dropped
++;
1302 * If connection is not running, try to restart it
1303 * and throw away packet.
1305 if (fsm_getstate(privptr
->fsm
) != DEV_STATE_RUNNING
) {
1307 privptr
->stats
.tx_dropped
++;
1308 privptr
->stats
.tx_errors
++;
1309 privptr
->stats
.tx_carrier_errors
++;
1313 if (netiucv_test_and_set_busy(dev
)) {
1314 IUCV_DBF_TEXT(data
, 2, "EBUSY from netiucv_tx\n");
1315 return NETDEV_TX_BUSY
;
1317 dev
->trans_start
= jiffies
;
1318 rc
= netiucv_transmit_skb(privptr
->conn
, skb
) != 0;
1319 netiucv_clear_busy(dev
);
1325 * @dev: Pointer to interface struct.
1327 * Returns interface statistics of a device.
1329 * Returns pointer to stats struct of this interface.
1331 static struct net_device_stats
*netiucv_stats (struct net_device
* dev
)
1333 struct netiucv_priv
*priv
= netdev_priv(dev
);
1335 IUCV_DBF_TEXT(trace
, 5, __func__
);
1336 return &priv
->stats
;
1340 * netiucv_change_mtu
1341 * @dev: Pointer to interface struct.
1342 * @new_mtu: The new MTU to use for this interface.
1344 * Sets MTU of an interface.
1346 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1347 * (valid range is 576 .. NETIUCV_MTU_MAX).
1349 static int netiucv_change_mtu(struct net_device
* dev
, int new_mtu
)
1351 IUCV_DBF_TEXT(trace
, 3, __func__
);
1352 if (new_mtu
< 576 || new_mtu
> NETIUCV_MTU_MAX
) {
1353 IUCV_DBF_TEXT(setup
, 2, "given MTU out of valid range\n");
1361 * attributes in sysfs
1364 static ssize_t
user_show(struct device
*dev
, struct device_attribute
*attr
,
1367 struct netiucv_priv
*priv
= dev
->driver_data
;
1369 IUCV_DBF_TEXT(trace
, 5, __func__
);
1370 return sprintf(buf
, "%s\n", netiucv_printname(priv
->conn
->userid
));
1373 static ssize_t
user_write(struct device
*dev
, struct device_attribute
*attr
,
1374 const char *buf
, size_t count
)
1376 struct netiucv_priv
*priv
= dev
->driver_data
;
1377 struct net_device
*ndev
= priv
->conn
->netdev
;
1382 struct iucv_connection
*cp
;
1384 IUCV_DBF_TEXT(trace
, 3, __func__
);
1386 IUCV_DBF_TEXT_(setup
, 2,
1387 "%d is length of username\n", (int) count
);
1391 tmp
= strsep((char **) &buf
, "\n");
1392 for (i
= 0, p
= tmp
; i
< 8 && *p
; i
++, p
++) {
1393 if (isalnum(*p
) || (*p
== '$')) {
1394 username
[i
]= toupper(*p
);
1398 /* trailing lf, grr */
1401 IUCV_DBF_TEXT_(setup
, 2,
1402 "username: invalid character %c\n", *p
);
1406 username
[i
++] = ' ';
1409 if (memcmp(username
, priv
->conn
->userid
, 9) &&
1410 (ndev
->flags
& (IFF_UP
| IFF_RUNNING
))) {
1411 /* username changed while the interface is active. */
1412 IUCV_DBF_TEXT(setup
, 2, "user_write: device active\n");
1415 read_lock_bh(&iucv_connection_rwlock
);
1416 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
1417 if (!strncmp(username
, cp
->userid
, 9) && cp
->netdev
!= ndev
) {
1418 read_unlock_bh(&iucv_connection_rwlock
);
1419 IUCV_DBF_TEXT_(setup
, 2, "user_write: Connection "
1420 "to %s already exists\n", username
);
1424 read_unlock_bh(&iucv_connection_rwlock
);
1425 memcpy(priv
->conn
->userid
, username
, 9);
1429 static DEVICE_ATTR(user
, 0644, user_show
, user_write
);
1431 static ssize_t
buffer_show (struct device
*dev
, struct device_attribute
*attr
,
1433 { struct netiucv_priv
*priv
= dev
->driver_data
;
1435 IUCV_DBF_TEXT(trace
, 5, __func__
);
1436 return sprintf(buf
, "%d\n", priv
->conn
->max_buffsize
);
1439 static ssize_t
buffer_write (struct device
*dev
, struct device_attribute
*attr
,
1440 const char *buf
, size_t count
)
1442 struct netiucv_priv
*priv
= dev
->driver_data
;
1443 struct net_device
*ndev
= priv
->conn
->netdev
;
1447 IUCV_DBF_TEXT(trace
, 3, __func__
);
1451 bs1
= simple_strtoul(buf
, &e
, 0);
1453 if (e
&& (!isspace(*e
))) {
1454 IUCV_DBF_TEXT_(setup
, 2, "buffer_write: invalid char %c\n", *e
);
1457 if (bs1
> NETIUCV_BUFSIZE_MAX
) {
1458 IUCV_DBF_TEXT_(setup
, 2,
1459 "buffer_write: buffer size %d too large\n",
1463 if ((ndev
->flags
& IFF_RUNNING
) &&
1464 (bs1
< (ndev
->mtu
+ NETIUCV_HDRLEN
+ 2))) {
1465 IUCV_DBF_TEXT_(setup
, 2,
1466 "buffer_write: buffer size %d too small\n",
1470 if (bs1
< (576 + NETIUCV_HDRLEN
+ NETIUCV_HDRLEN
)) {
1471 IUCV_DBF_TEXT_(setup
, 2,
1472 "buffer_write: buffer size %d too small\n",
1477 priv
->conn
->max_buffsize
= bs1
;
1478 if (!(ndev
->flags
& IFF_RUNNING
))
1479 ndev
->mtu
= bs1
- NETIUCV_HDRLEN
- NETIUCV_HDRLEN
;
1485 static DEVICE_ATTR(buffer
, 0644, buffer_show
, buffer_write
);
1487 static ssize_t
dev_fsm_show (struct device
*dev
, struct device_attribute
*attr
,
1490 struct netiucv_priv
*priv
= dev
->driver_data
;
1492 IUCV_DBF_TEXT(trace
, 5, __func__
);
1493 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->fsm
));
1496 static DEVICE_ATTR(device_fsm_state
, 0444, dev_fsm_show
, NULL
);
1498 static ssize_t
conn_fsm_show (struct device
*dev
,
1499 struct device_attribute
*attr
, char *buf
)
1501 struct netiucv_priv
*priv
= dev
->driver_data
;
1503 IUCV_DBF_TEXT(trace
, 5, __func__
);
1504 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->conn
->fsm
));
1507 static DEVICE_ATTR(connection_fsm_state
, 0444, conn_fsm_show
, NULL
);
1509 static ssize_t
maxmulti_show (struct device
*dev
,
1510 struct device_attribute
*attr
, char *buf
)
1512 struct netiucv_priv
*priv
= dev
->driver_data
;
1514 IUCV_DBF_TEXT(trace
, 5, __func__
);
1515 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxmulti
);
1518 static ssize_t
maxmulti_write (struct device
*dev
,
1519 struct device_attribute
*attr
,
1520 const char *buf
, size_t count
)
1522 struct netiucv_priv
*priv
= dev
->driver_data
;
1524 IUCV_DBF_TEXT(trace
, 4, __func__
);
1525 priv
->conn
->prof
.maxmulti
= 0;
1529 static DEVICE_ATTR(max_tx_buffer_used
, 0644, maxmulti_show
, maxmulti_write
);
1531 static ssize_t
maxcq_show (struct device
*dev
, struct device_attribute
*attr
,
1534 struct netiucv_priv
*priv
= dev
->driver_data
;
1536 IUCV_DBF_TEXT(trace
, 5, __func__
);
1537 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxcqueue
);
1540 static ssize_t
maxcq_write (struct device
*dev
, struct device_attribute
*attr
,
1541 const char *buf
, size_t count
)
1543 struct netiucv_priv
*priv
= dev
->driver_data
;
1545 IUCV_DBF_TEXT(trace
, 4, __func__
);
1546 priv
->conn
->prof
.maxcqueue
= 0;
1550 static DEVICE_ATTR(max_chained_skbs
, 0644, maxcq_show
, maxcq_write
);
1552 static ssize_t
sdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1555 struct netiucv_priv
*priv
= dev
->driver_data
;
1557 IUCV_DBF_TEXT(trace
, 5, __func__
);
1558 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_single
);
1561 static ssize_t
sdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1562 const char *buf
, size_t count
)
1564 struct netiucv_priv
*priv
= dev
->driver_data
;
1566 IUCV_DBF_TEXT(trace
, 4, __func__
);
1567 priv
->conn
->prof
.doios_single
= 0;
1571 static DEVICE_ATTR(tx_single_write_ops
, 0644, sdoio_show
, sdoio_write
);
1573 static ssize_t
mdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1576 struct netiucv_priv
*priv
= dev
->driver_data
;
1578 IUCV_DBF_TEXT(trace
, 5, __func__
);
1579 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_multi
);
1582 static ssize_t
mdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1583 const char *buf
, size_t count
)
1585 struct netiucv_priv
*priv
= dev
->driver_data
;
1587 IUCV_DBF_TEXT(trace
, 5, __func__
);
1588 priv
->conn
->prof
.doios_multi
= 0;
1592 static DEVICE_ATTR(tx_multi_write_ops
, 0644, mdoio_show
, mdoio_write
);
1594 static ssize_t
txlen_show (struct device
*dev
, struct device_attribute
*attr
,
1597 struct netiucv_priv
*priv
= dev
->driver_data
;
1599 IUCV_DBF_TEXT(trace
, 5, __func__
);
1600 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.txlen
);
1603 static ssize_t
txlen_write (struct device
*dev
, struct device_attribute
*attr
,
1604 const char *buf
, size_t count
)
1606 struct netiucv_priv
*priv
= dev
->driver_data
;
1608 IUCV_DBF_TEXT(trace
, 4, __func__
);
1609 priv
->conn
->prof
.txlen
= 0;
1613 static DEVICE_ATTR(netto_bytes
, 0644, txlen_show
, txlen_write
);
1615 static ssize_t
txtime_show (struct device
*dev
, struct device_attribute
*attr
,
1618 struct netiucv_priv
*priv
= dev
->driver_data
;
1620 IUCV_DBF_TEXT(trace
, 5, __func__
);
1621 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_time
);
1624 static ssize_t
txtime_write (struct device
*dev
, struct device_attribute
*attr
,
1625 const char *buf
, size_t count
)
1627 struct netiucv_priv
*priv
= dev
->driver_data
;
1629 IUCV_DBF_TEXT(trace
, 4, __func__
);
1630 priv
->conn
->prof
.tx_time
= 0;
1634 static DEVICE_ATTR(max_tx_io_time
, 0644, txtime_show
, txtime_write
);
1636 static ssize_t
txpend_show (struct device
*dev
, struct device_attribute
*attr
,
1639 struct netiucv_priv
*priv
= dev
->driver_data
;
1641 IUCV_DBF_TEXT(trace
, 5, __func__
);
1642 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_pending
);
1645 static ssize_t
txpend_write (struct device
*dev
, struct device_attribute
*attr
,
1646 const char *buf
, size_t count
)
1648 struct netiucv_priv
*priv
= dev
->driver_data
;
1650 IUCV_DBF_TEXT(trace
, 4, __func__
);
1651 priv
->conn
->prof
.tx_pending
= 0;
1655 static DEVICE_ATTR(tx_pending
, 0644, txpend_show
, txpend_write
);
1657 static ssize_t
txmpnd_show (struct device
*dev
, struct device_attribute
*attr
,
1660 struct netiucv_priv
*priv
= dev
->driver_data
;
1662 IUCV_DBF_TEXT(trace
, 5, __func__
);
1663 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_max_pending
);
1666 static ssize_t
txmpnd_write (struct device
*dev
, struct device_attribute
*attr
,
1667 const char *buf
, size_t count
)
1669 struct netiucv_priv
*priv
= dev
->driver_data
;
1671 IUCV_DBF_TEXT(trace
, 4, __func__
);
1672 priv
->conn
->prof
.tx_max_pending
= 0;
1676 static DEVICE_ATTR(tx_max_pending
, 0644, txmpnd_show
, txmpnd_write
);
1678 static struct attribute
*netiucv_attrs
[] = {
1679 &dev_attr_buffer
.attr
,
1680 &dev_attr_user
.attr
,
1684 static struct attribute_group netiucv_attr_group
= {
1685 .attrs
= netiucv_attrs
,
1688 static struct attribute
*netiucv_stat_attrs
[] = {
1689 &dev_attr_device_fsm_state
.attr
,
1690 &dev_attr_connection_fsm_state
.attr
,
1691 &dev_attr_max_tx_buffer_used
.attr
,
1692 &dev_attr_max_chained_skbs
.attr
,
1693 &dev_attr_tx_single_write_ops
.attr
,
1694 &dev_attr_tx_multi_write_ops
.attr
,
1695 &dev_attr_netto_bytes
.attr
,
1696 &dev_attr_max_tx_io_time
.attr
,
1697 &dev_attr_tx_pending
.attr
,
1698 &dev_attr_tx_max_pending
.attr
,
1702 static struct attribute_group netiucv_stat_attr_group
= {
1704 .attrs
= netiucv_stat_attrs
,
1707 static int netiucv_add_files(struct device
*dev
)
1711 IUCV_DBF_TEXT(trace
, 3, __func__
);
1712 ret
= sysfs_create_group(&dev
->kobj
, &netiucv_attr_group
);
1715 ret
= sysfs_create_group(&dev
->kobj
, &netiucv_stat_attr_group
);
1717 sysfs_remove_group(&dev
->kobj
, &netiucv_attr_group
);
1721 static void netiucv_remove_files(struct device
*dev
)
1723 IUCV_DBF_TEXT(trace
, 3, __func__
);
1724 sysfs_remove_group(&dev
->kobj
, &netiucv_stat_attr_group
);
1725 sysfs_remove_group(&dev
->kobj
, &netiucv_attr_group
);
1728 static int netiucv_register_device(struct net_device
*ndev
)
1730 struct netiucv_priv
*priv
= netdev_priv(ndev
);
1731 struct device
*dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
1735 IUCV_DBF_TEXT(trace
, 3, __func__
);
1738 dev_set_name(dev
, "net%s", ndev
->name
);
1739 dev
->bus
= &iucv_bus
;
1740 dev
->parent
= iucv_root
;
1742 * The release function could be called after the
1743 * module has been unloaded. It's _only_ task is to
1744 * free the struct. Therefore, we specify kfree()
1745 * directly here. (Probably a little bit obfuscating
1746 * but legitime ...).
1748 dev
->release
= (void (*)(struct device
*))kfree
;
1749 dev
->driver
= &netiucv_driver
;
1753 ret
= device_register(dev
);
1757 ret
= netiucv_add_files(dev
);
1761 dev
->driver_data
= priv
;
1765 device_unregister(dev
);
1769 static void netiucv_unregister_device(struct device
*dev
)
1771 IUCV_DBF_TEXT(trace
, 3, __func__
);
1772 netiucv_remove_files(dev
);
1773 device_unregister(dev
);
1777 * Allocate and initialize a new connection structure.
1778 * Add it to the list of netiucv connections;
1780 static struct iucv_connection
*netiucv_new_connection(struct net_device
*dev
,
1783 struct iucv_connection
*conn
;
1785 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
1788 skb_queue_head_init(&conn
->collect_queue
);
1789 skb_queue_head_init(&conn
->commit_queue
);
1790 spin_lock_init(&conn
->collect_lock
);
1791 conn
->max_buffsize
= NETIUCV_BUFSIZE_DEFAULT
;
1794 conn
->rx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1797 conn
->tx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1800 conn
->fsm
= init_fsm("netiucvconn", conn_state_names
,
1801 conn_event_names
, NR_CONN_STATES
,
1802 NR_CONN_EVENTS
, conn_fsm
, CONN_FSM_LEN
,
1807 fsm_settimer(conn
->fsm
, &conn
->timer
);
1808 fsm_newstate(conn
->fsm
, CONN_STATE_INVALID
);
1811 memcpy(conn
->userid
, username
, 9);
1812 fsm_newstate(conn
->fsm
, CONN_STATE_STOPPED
);
1815 write_lock_bh(&iucv_connection_rwlock
);
1816 list_add_tail(&conn
->list
, &iucv_connection_list
);
1817 write_unlock_bh(&iucv_connection_rwlock
);
1821 kfree_skb(conn
->tx_buff
);
1823 kfree_skb(conn
->rx_buff
);
1831 * Release a connection structure and remove it from the
1832 * list of netiucv connections.
1834 static void netiucv_remove_connection(struct iucv_connection
*conn
)
1836 IUCV_DBF_TEXT(trace
, 3, __func__
);
1837 write_lock_bh(&iucv_connection_rwlock
);
1838 list_del_init(&conn
->list
);
1839 write_unlock_bh(&iucv_connection_rwlock
);
1840 fsm_deltimer(&conn
->timer
);
1841 netiucv_purge_skb_queue(&conn
->collect_queue
);
1843 iucv_path_sever(conn
->path
, iucvMagic
);
1847 netiucv_purge_skb_queue(&conn
->commit_queue
);
1848 kfree_fsm(conn
->fsm
);
1849 kfree_skb(conn
->rx_buff
);
1850 kfree_skb(conn
->tx_buff
);
1854 * Release everything of a net device.
1856 static void netiucv_free_netdevice(struct net_device
*dev
)
1858 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1860 IUCV_DBF_TEXT(trace
, 3, __func__
);
1867 netiucv_remove_connection(privptr
->conn
);
1869 kfree_fsm(privptr
->fsm
);
1870 privptr
->conn
= NULL
; privptr
->fsm
= NULL
;
1871 /* privptr gets freed by free_netdev() */
1877 * Initialize a net device. (Called from kernel in alloc_netdev())
1879 static const struct net_device_ops netiucv_netdev_ops
= {
1880 .ndo_open
= netiucv_open
,
1881 .ndo_stop
= netiucv_close
,
1882 .ndo_get_stats
= netiucv_stats
,
1883 .ndo_start_xmit
= netiucv_tx
,
1884 .ndo_change_mtu
= netiucv_change_mtu
,
1887 static void netiucv_setup_netdevice(struct net_device
*dev
)
1889 dev
->mtu
= NETIUCV_MTU_DEFAULT
;
1890 dev
->destructor
= netiucv_free_netdevice
;
1891 dev
->hard_header_len
= NETIUCV_HDRLEN
;
1893 dev
->type
= ARPHRD_SLIP
;
1894 dev
->tx_queue_len
= NETIUCV_QUEUELEN_DEFAULT
;
1895 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
1896 dev
->netdev_ops
= &netiucv_netdev_ops
;
1900 * Allocate and initialize everything of a net device.
1902 static struct net_device
*netiucv_init_netdevice(char *username
)
1904 struct netiucv_priv
*privptr
;
1905 struct net_device
*dev
;
1907 dev
= alloc_netdev(sizeof(struct netiucv_priv
), "iucv%d",
1908 netiucv_setup_netdevice
);
1911 if (dev_alloc_name(dev
, dev
->name
) < 0)
1914 privptr
= netdev_priv(dev
);
1915 privptr
->fsm
= init_fsm("netiucvdev", dev_state_names
,
1916 dev_event_names
, NR_DEV_STATES
, NR_DEV_EVENTS
,
1917 dev_fsm
, DEV_FSM_LEN
, GFP_KERNEL
);
1921 privptr
->conn
= netiucv_new_connection(dev
, username
);
1922 if (!privptr
->conn
) {
1923 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_new_connection\n");
1926 fsm_newstate(privptr
->fsm
, DEV_STATE_STOPPED
);
1930 kfree_fsm(privptr
->fsm
);
1936 static ssize_t
conn_write(struct device_driver
*drv
,
1937 const char *buf
, size_t count
)
1942 struct net_device
*dev
;
1943 struct netiucv_priv
*priv
;
1944 struct iucv_connection
*cp
;
1946 IUCV_DBF_TEXT(trace
, 3, __func__
);
1948 IUCV_DBF_TEXT(setup
, 2, "conn_write: too long\n");
1952 for (i
= 0, p
= buf
; i
< 8 && *p
; i
++, p
++) {
1953 if (isalnum(*p
) || *p
== '$') {
1954 username
[i
] = toupper(*p
);
1958 /* trailing lf, grr */
1960 IUCV_DBF_TEXT_(setup
, 2,
1961 "conn_write: invalid character %c\n", *p
);
1965 username
[i
++] = ' ';
1968 read_lock_bh(&iucv_connection_rwlock
);
1969 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
1970 if (!strncmp(username
, cp
->userid
, 9)) {
1971 read_unlock_bh(&iucv_connection_rwlock
);
1972 IUCV_DBF_TEXT_(setup
, 2, "conn_write: Connection "
1973 "to %s already exists\n", username
);
1977 read_unlock_bh(&iucv_connection_rwlock
);
1979 dev
= netiucv_init_netdevice(username
);
1981 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_init_netdevice\n");
1985 rc
= netiucv_register_device(dev
);
1987 IUCV_DBF_TEXT_(setup
, 2,
1988 "ret %d from netiucv_register_device\n", rc
);
1993 priv
= netdev_priv(dev
);
1994 SET_NETDEV_DEV(dev
, priv
->dev
);
1996 rc
= register_netdev(dev
);
2000 dev_info(priv
->dev
, "The IUCV interface to %s has been"
2001 " established successfully\n", netiucv_printname(username
));
2006 netiucv_unregister_device(priv
->dev
);
2008 netiucv_free_netdevice(dev
);
2012 static DRIVER_ATTR(connection
, 0200, NULL
, conn_write
);
2014 static ssize_t
remove_write (struct device_driver
*drv
,
2015 const char *buf
, size_t count
)
2017 struct iucv_connection
*cp
;
2018 struct net_device
*ndev
;
2019 struct netiucv_priv
*priv
;
2021 char name
[IFNAMSIZ
];
2025 IUCV_DBF_TEXT(trace
, 3, __func__
);
2027 if (count
>= IFNAMSIZ
)
2028 count
= IFNAMSIZ
- 1;;
2030 for (i
= 0, p
= buf
; i
< count
&& *p
; i
++, p
++) {
2031 if (*p
== '\n' || *p
== ' ')
2032 /* trailing lf, grr */
2038 read_lock_bh(&iucv_connection_rwlock
);
2039 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
2041 priv
= netdev_priv(ndev
);
2043 if (strncmp(name
, ndev
->name
, count
))
2045 read_unlock_bh(&iucv_connection_rwlock
);
2046 if (ndev
->flags
& (IFF_UP
| IFF_RUNNING
)) {
2047 dev_warn(dev
, "The IUCV device is connected"
2048 " to %s and cannot be removed\n",
2049 priv
->conn
->userid
);
2050 IUCV_DBF_TEXT(data
, 2, "remove_write: still active\n");
2053 unregister_netdev(ndev
);
2054 netiucv_unregister_device(dev
);
2057 read_unlock_bh(&iucv_connection_rwlock
);
2058 IUCV_DBF_TEXT(data
, 2, "remove_write: unknown device\n");
2062 static DRIVER_ATTR(remove
, 0200, NULL
, remove_write
);
2064 static struct attribute
* netiucv_drv_attrs
[] = {
2065 &driver_attr_connection
.attr
,
2066 &driver_attr_remove
.attr
,
2070 static struct attribute_group netiucv_drv_attr_group
= {
2071 .attrs
= netiucv_drv_attrs
,
2074 static struct attribute_group
*netiucv_drv_attr_groups
[] = {
2075 &netiucv_drv_attr_group
,
2079 static void netiucv_banner(void)
2081 pr_info("driver initialized\n");
2084 static void __exit
netiucv_exit(void)
2086 struct iucv_connection
*cp
;
2087 struct net_device
*ndev
;
2088 struct netiucv_priv
*priv
;
2091 IUCV_DBF_TEXT(trace
, 3, __func__
);
2092 while (!list_empty(&iucv_connection_list
)) {
2093 cp
= list_entry(iucv_connection_list
.next
,
2094 struct iucv_connection
, list
);
2096 priv
= netdev_priv(ndev
);
2099 unregister_netdev(ndev
);
2100 netiucv_unregister_device(dev
);
2103 driver_unregister(&netiucv_driver
);
2104 iucv_unregister(&netiucv_handler
, 1);
2105 iucv_unregister_dbf_views();
2107 pr_info("driver unloaded\n");
2111 static int __init
netiucv_init(void)
2115 rc
= iucv_register_dbf_views();
2118 rc
= iucv_register(&netiucv_handler
, 1);
2121 IUCV_DBF_TEXT(trace
, 3, __func__
);
2122 netiucv_driver
.groups
= netiucv_drv_attr_groups
;
2123 rc
= driver_register(&netiucv_driver
);
2125 IUCV_DBF_TEXT_(setup
, 2, "ret %d from driver_register\n", rc
);
2133 iucv_unregister(&netiucv_handler
, 1);
2135 iucv_unregister_dbf_views();
2140 module_init(netiucv_init
);
2141 module_exit(netiucv_exit
);
2142 MODULE_LICENSE("GPL");