4 * Copyright IBM Corp. 2001, 2009
7 * Original netiucv driver:
8 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9 * Sysfs integration and all bugs therein:
10 * Cornelia Huck (cornelia.huck@de.ibm.com)
12 * Ursula Braun (ursula.braun@de.ibm.com)
15 * the source of the original IUCV driver by:
16 * Stefan Hegewald <hegewald@de.ibm.com>
17 * Hartmut Penner <hpenner@de.ibm.com>
18 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19 * Martin Schwidefsky (schwidefsky@de.ibm.com)
20 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2, or (at your option)
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
38 #define KMSG_COMPONENT "netiucv"
39 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/kernel.h>
46 #include <linux/slab.h>
47 #include <linux/errno.h>
48 #include <linux/types.h>
49 #include <linux/interrupt.h>
50 #include <linux/timer.h>
51 #include <linux/bitops.h>
53 #include <linux/signal.h>
54 #include <linux/string.h>
55 #include <linux/device.h>
58 #include <linux/if_arp.h>
59 #include <linux/tcp.h>
60 #include <linux/skbuff.h>
61 #include <linux/ctype.h>
65 #include <asm/uaccess.h>
66 #include <asm/ebcdic.h>
68 #include <net/iucv/iucv.h>
72 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
73 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
76 * Debug Facility stuff
78 #define IUCV_DBF_SETUP_NAME "iucv_setup"
79 #define IUCV_DBF_SETUP_LEN 64
80 #define IUCV_DBF_SETUP_PAGES 2
81 #define IUCV_DBF_SETUP_NR_AREAS 1
82 #define IUCV_DBF_SETUP_LEVEL 3
84 #define IUCV_DBF_DATA_NAME "iucv_data"
85 #define IUCV_DBF_DATA_LEN 128
86 #define IUCV_DBF_DATA_PAGES 2
87 #define IUCV_DBF_DATA_NR_AREAS 1
88 #define IUCV_DBF_DATA_LEVEL 2
90 #define IUCV_DBF_TRACE_NAME "iucv_trace"
91 #define IUCV_DBF_TRACE_LEN 16
92 #define IUCV_DBF_TRACE_PAGES 4
93 #define IUCV_DBF_TRACE_NR_AREAS 1
94 #define IUCV_DBF_TRACE_LEVEL 3
96 #define IUCV_DBF_TEXT(name,level,text) \
98 debug_text_event(iucv_dbf_##name,level,text); \
101 #define IUCV_DBF_HEX(name,level,addr,len) \
103 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
106 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf
);
108 /* Allow to sort out low debug levels early to avoid wasted sprints */
109 static inline int iucv_dbf_passes(debug_info_t
*dbf_grp
, int level
)
111 return (level
<= dbf_grp
->level
);
114 #define IUCV_DBF_TEXT_(name, level, text...) \
116 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
117 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
118 sprintf(__buf, text); \
119 debug_text_event(iucv_dbf_##name, level, __buf); \
120 put_cpu_var(iucv_dbf_txt_buf); \
124 #define IUCV_DBF_SPRINTF(name,level,text...) \
126 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
127 debug_sprintf_event(iucv_dbf_trace, level, text ); \
131 * some more debug stuff
133 #define PRINTK_HEADER " iucv: " /* for debugging */
135 /* dummy device to make sure netiucv_pm functions are called */
136 static struct device
*netiucv_dev
;
138 static int netiucv_pm_prepare(struct device
*);
139 static void netiucv_pm_complete(struct device
*);
140 static int netiucv_pm_freeze(struct device
*);
141 static int netiucv_pm_restore_thaw(struct device
*);
143 static const struct dev_pm_ops netiucv_pm_ops
= {
144 .prepare
= netiucv_pm_prepare
,
145 .complete
= netiucv_pm_complete
,
146 .freeze
= netiucv_pm_freeze
,
147 .thaw
= netiucv_pm_restore_thaw
,
148 .restore
= netiucv_pm_restore_thaw
,
151 static struct device_driver netiucv_driver
= {
152 .owner
= THIS_MODULE
,
155 .pm
= &netiucv_pm_ops
,
158 static int netiucv_callback_connreq(struct iucv_path
*,
159 u8 ipvmid
[8], u8 ipuser
[16]);
160 static void netiucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
161 static void netiucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
162 static void netiucv_callback_connsusp(struct iucv_path
*, u8 ipuser
[16]);
163 static void netiucv_callback_connres(struct iucv_path
*, u8 ipuser
[16]);
164 static void netiucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
165 static void netiucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
167 static struct iucv_handler netiucv_handler
= {
168 .path_pending
= netiucv_callback_connreq
,
169 .path_complete
= netiucv_callback_connack
,
170 .path_severed
= netiucv_callback_connrej
,
171 .path_quiesced
= netiucv_callback_connsusp
,
172 .path_resumed
= netiucv_callback_connres
,
173 .message_pending
= netiucv_callback_rx
,
174 .message_complete
= netiucv_callback_txdone
178 * Per connection profiling data
180 struct connection_profile
{
181 unsigned long maxmulti
;
182 unsigned long maxcqueue
;
183 unsigned long doios_single
;
184 unsigned long doios_multi
;
186 unsigned long tx_time
;
187 struct timespec send_stamp
;
188 unsigned long tx_pending
;
189 unsigned long tx_max_pending
;
193 * Representation of one iucv connection
195 struct iucv_connection
{
196 struct list_head list
;
197 struct iucv_path
*path
;
198 struct sk_buff
*rx_buff
;
199 struct sk_buff
*tx_buff
;
200 struct sk_buff_head collect_queue
;
201 struct sk_buff_head commit_queue
;
202 spinlock_t collect_lock
;
207 struct net_device
*netdev
;
208 struct connection_profile prof
;
214 * Linked list of all connection structs.
216 static LIST_HEAD(iucv_connection_list
);
217 static DEFINE_RWLOCK(iucv_connection_rwlock
);
220 * Representation of event-data for the
221 * connection state machine.
224 struct iucv_connection
*conn
;
229 * Private part of the network device structure
231 struct netiucv_priv
{
232 struct net_device_stats stats
;
235 struct iucv_connection
*conn
;
241 * Link level header for a packet.
247 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
248 #define NETIUCV_BUFSIZE_MAX 65537
249 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
250 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
251 #define NETIUCV_MTU_DEFAULT 9216
252 #define NETIUCV_QUEUELEN_DEFAULT 50
253 #define NETIUCV_TIMEOUT_5SEC 5000
256 * Compatibility macros for busy handling
257 * of network devices.
259 static inline void netiucv_clear_busy(struct net_device
*dev
)
261 struct netiucv_priv
*priv
= netdev_priv(dev
);
262 clear_bit(0, &priv
->tbusy
);
263 netif_wake_queue(dev
);
266 static inline int netiucv_test_and_set_busy(struct net_device
*dev
)
268 struct netiucv_priv
*priv
= netdev_priv(dev
);
269 netif_stop_queue(dev
);
270 return test_and_set_bit(0, &priv
->tbusy
);
273 static u8 iucvMagic_ascii
[16] = {
274 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
275 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
278 static u8 iucvMagic_ebcdic
[16] = {
279 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
280 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
284 * Convert an iucv userId to its printable
285 * form (strip whitespace at end).
287 * @param An iucv userId
289 * @returns The printable string (static data!!)
291 static char *netiucv_printname(char *name
, int len
)
295 memcpy(tmp
, name
, len
);
297 while (*p
&& ((p
- tmp
) < len
) && (!isspace(*p
)))
303 static char *netiucv_printuser(struct iucv_connection
*conn
)
305 static char tmp_uid
[9];
306 static char tmp_udat
[17];
307 static char buf
[100];
309 if (memcmp(conn
->userdata
, iucvMagic_ebcdic
, 16)) {
312 memcpy(tmp_uid
, conn
->userid
, 8);
313 memcpy(tmp_uid
, netiucv_printname(tmp_uid
, 8), 8);
314 memcpy(tmp_udat
, conn
->userdata
, 16);
315 EBCASC(tmp_udat
, 16);
316 memcpy(tmp_udat
, netiucv_printname(tmp_udat
, 16), 16);
317 sprintf(buf
, "%s.%s", tmp_uid
, tmp_udat
);
320 return netiucv_printname(conn
->userid
, 8);
324 * States of the interface statemachine.
332 * MUST be always the last element!!
337 static const char *dev_state_names
[] = {
345 * Events of the interface statemachine.
353 * MUST be always the last element!!
358 static const char *dev_event_names
[] = {
366 * Events of the connection statemachine
370 * Events, representing callbacks from
371 * lowlevel iucv layer)
382 * Events, representing errors return codes from
383 * calls to lowlevel iucv layer
387 * Event, representing timer expiry.
392 * Events, representing commands from upper levels.
398 * MUST be always the last element!!
403 static const char *conn_event_names
[] = {
404 "Remote connection request",
405 "Remote connection acknowledge",
406 "Remote connection reject",
407 "Connection suspended",
408 "Connection resumed",
419 * States of the connection statemachine.
423 * Connection not assigned to any device,
424 * initial state, invalid
429 * Userid assigned but not operating
434 * Connection registered,
435 * no connection request sent yet,
436 * no connection request received
438 CONN_STATE_STARTWAIT
,
441 * Connection registered and connection request sent,
442 * no acknowledge and no connection request received yet.
444 CONN_STATE_SETUPWAIT
,
447 * Connection up and running idle
452 * Data sent, awaiting CONN_EVENT_TXDONE
457 * Error during registration.
462 * Error during registration.
467 * MUST be always the last element!!
472 static const char *conn_state_names
[] = {
480 "Registration error",
486 * Debug Facility Stuff
488 static debug_info_t
*iucv_dbf_setup
= NULL
;
489 static debug_info_t
*iucv_dbf_data
= NULL
;
490 static debug_info_t
*iucv_dbf_trace
= NULL
;
492 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf
);
494 static void iucv_unregister_dbf_views(void)
497 debug_unregister(iucv_dbf_setup
);
499 debug_unregister(iucv_dbf_data
);
501 debug_unregister(iucv_dbf_trace
);
503 static int iucv_register_dbf_views(void)
505 iucv_dbf_setup
= debug_register(IUCV_DBF_SETUP_NAME
,
506 IUCV_DBF_SETUP_PAGES
,
507 IUCV_DBF_SETUP_NR_AREAS
,
509 iucv_dbf_data
= debug_register(IUCV_DBF_DATA_NAME
,
511 IUCV_DBF_DATA_NR_AREAS
,
513 iucv_dbf_trace
= debug_register(IUCV_DBF_TRACE_NAME
,
514 IUCV_DBF_TRACE_PAGES
,
515 IUCV_DBF_TRACE_NR_AREAS
,
518 if ((iucv_dbf_setup
== NULL
) || (iucv_dbf_data
== NULL
) ||
519 (iucv_dbf_trace
== NULL
)) {
520 iucv_unregister_dbf_views();
523 debug_register_view(iucv_dbf_setup
, &debug_hex_ascii_view
);
524 debug_set_level(iucv_dbf_setup
, IUCV_DBF_SETUP_LEVEL
);
526 debug_register_view(iucv_dbf_data
, &debug_hex_ascii_view
);
527 debug_set_level(iucv_dbf_data
, IUCV_DBF_DATA_LEVEL
);
529 debug_register_view(iucv_dbf_trace
, &debug_hex_ascii_view
);
530 debug_set_level(iucv_dbf_trace
, IUCV_DBF_TRACE_LEVEL
);
536 * Callback-wrappers, called from lowlevel iucv layer.
539 static void netiucv_callback_rx(struct iucv_path
*path
,
540 struct iucv_message
*msg
)
542 struct iucv_connection
*conn
= path
->private;
543 struct iucv_event ev
;
547 fsm_event(conn
->fsm
, CONN_EVENT_RX
, &ev
);
550 static void netiucv_callback_txdone(struct iucv_path
*path
,
551 struct iucv_message
*msg
)
553 struct iucv_connection
*conn
= path
->private;
554 struct iucv_event ev
;
558 fsm_event(conn
->fsm
, CONN_EVENT_TXDONE
, &ev
);
561 static void netiucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
563 struct iucv_connection
*conn
= path
->private;
565 fsm_event(conn
->fsm
, CONN_EVENT_CONN_ACK
, conn
);
568 static int netiucv_callback_connreq(struct iucv_path
*path
,
569 u8 ipvmid
[8], u8 ipuser
[16])
571 struct iucv_connection
*conn
= path
->private;
572 struct iucv_event ev
;
573 static char tmp_user
[9];
574 static char tmp_udat
[17];
578 memcpy(tmp_user
, netiucv_printname(ipvmid
, 8), 8);
579 memcpy(tmp_udat
, ipuser
, 16);
580 EBCASC(tmp_udat
, 16);
581 read_lock_bh(&iucv_connection_rwlock
);
582 list_for_each_entry(conn
, &iucv_connection_list
, list
) {
583 if (strncmp(ipvmid
, conn
->userid
, 8) ||
584 strncmp(ipuser
, conn
->userdata
, 16))
586 /* Found a matching connection for this path. */
590 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REQ
, &ev
);
593 IUCV_DBF_TEXT_(setup
, 2, "Connection requested for %s.%s\n",
594 tmp_user
, netiucv_printname(tmp_udat
, 16));
595 read_unlock_bh(&iucv_connection_rwlock
);
599 static void netiucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
601 struct iucv_connection
*conn
= path
->private;
603 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REJ
, conn
);
606 static void netiucv_callback_connsusp(struct iucv_path
*path
, u8 ipuser
[16])
608 struct iucv_connection
*conn
= path
->private;
610 fsm_event(conn
->fsm
, CONN_EVENT_CONN_SUS
, conn
);
613 static void netiucv_callback_connres(struct iucv_path
*path
, u8 ipuser
[16])
615 struct iucv_connection
*conn
= path
->private;
617 fsm_event(conn
->fsm
, CONN_EVENT_CONN_RES
, conn
);
621 * NOP action for statemachines
623 static void netiucv_action_nop(fsm_instance
*fi
, int event
, void *arg
)
628 * Actions of the connection statemachine
633 * @conn: The connection where this skb has been received.
634 * @pskb: The received skb.
636 * Unpack a just received skb and hand it over to upper layers.
637 * Helper function for conn_action_rx.
639 static void netiucv_unpack_skb(struct iucv_connection
*conn
,
640 struct sk_buff
*pskb
)
642 struct net_device
*dev
= conn
->netdev
;
643 struct netiucv_priv
*privptr
= netdev_priv(dev
);
646 skb_put(pskb
, NETIUCV_HDRLEN
);
648 pskb
->ip_summed
= CHECKSUM_NONE
;
649 pskb
->protocol
= ntohs(ETH_P_IP
);
653 struct ll_header
*header
= (struct ll_header
*) pskb
->data
;
658 skb_pull(pskb
, NETIUCV_HDRLEN
);
659 header
->next
-= offset
;
660 offset
+= header
->next
;
661 header
->next
-= NETIUCV_HDRLEN
;
662 if (skb_tailroom(pskb
) < header
->next
) {
663 IUCV_DBF_TEXT_(data
, 2, "Illegal next field: %d > %d\n",
664 header
->next
, skb_tailroom(pskb
));
667 skb_put(pskb
, header
->next
);
668 skb_reset_mac_header(pskb
);
669 skb
= dev_alloc_skb(pskb
->len
);
671 IUCV_DBF_TEXT(data
, 2,
672 "Out of memory in netiucv_unpack_skb\n");
673 privptr
->stats
.rx_dropped
++;
676 skb_copy_from_linear_data(pskb
, skb_put(skb
, pskb
->len
),
678 skb_reset_mac_header(skb
);
679 skb
->dev
= pskb
->dev
;
680 skb
->protocol
= pskb
->protocol
;
681 pskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
682 privptr
->stats
.rx_packets
++;
683 privptr
->stats
.rx_bytes
+= skb
->len
;
685 * Since receiving is always initiated from a tasklet (in iucv.c),
686 * we must use netif_rx_ni() instead of netif_rx()
689 skb_pull(pskb
, header
->next
);
690 skb_put(pskb
, NETIUCV_HDRLEN
);
694 static void conn_action_rx(fsm_instance
*fi
, int event
, void *arg
)
696 struct iucv_event
*ev
= arg
;
697 struct iucv_connection
*conn
= ev
->conn
;
698 struct iucv_message
*msg
= ev
->data
;
699 struct netiucv_priv
*privptr
= netdev_priv(conn
->netdev
);
702 IUCV_DBF_TEXT(trace
, 4, __func__
);
705 iucv_message_reject(conn
->path
, msg
);
706 IUCV_DBF_TEXT(data
, 2,
707 "Received data for unlinked connection\n");
710 if (msg
->length
> conn
->max_buffsize
) {
711 iucv_message_reject(conn
->path
, msg
);
712 privptr
->stats
.rx_dropped
++;
713 IUCV_DBF_TEXT_(data
, 2, "msglen %d > max_buffsize %d\n",
714 msg
->length
, conn
->max_buffsize
);
717 conn
->rx_buff
->data
= conn
->rx_buff
->head
;
718 skb_reset_tail_pointer(conn
->rx_buff
);
719 conn
->rx_buff
->len
= 0;
720 rc
= iucv_message_receive(conn
->path
, msg
, 0, conn
->rx_buff
->data
,
722 if (rc
|| msg
->length
< 5) {
723 privptr
->stats
.rx_errors
++;
724 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_receive\n", rc
);
727 netiucv_unpack_skb(conn
, conn
->rx_buff
);
730 static void conn_action_txdone(fsm_instance
*fi
, int event
, void *arg
)
732 struct iucv_event
*ev
= arg
;
733 struct iucv_connection
*conn
= ev
->conn
;
734 struct iucv_message
*msg
= ev
->data
;
735 struct iucv_message txmsg
;
736 struct netiucv_priv
*privptr
= NULL
;
737 u32 single_flag
= msg
->tag
;
742 unsigned long saveflags
;
743 struct ll_header header
;
746 IUCV_DBF_TEXT(trace
, 4, __func__
);
748 if (conn
&& conn
->netdev
)
749 privptr
= netdev_priv(conn
->netdev
);
750 conn
->prof
.tx_pending
--;
752 if ((skb
= skb_dequeue(&conn
->commit_queue
))) {
753 atomic_dec(&skb
->users
);
755 privptr
->stats
.tx_packets
++;
756 privptr
->stats
.tx_bytes
+=
757 (skb
->len
- NETIUCV_HDRLEN
760 dev_kfree_skb_any(skb
);
763 conn
->tx_buff
->data
= conn
->tx_buff
->head
;
764 skb_reset_tail_pointer(conn
->tx_buff
);
765 conn
->tx_buff
->len
= 0;
766 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
767 while ((skb
= skb_dequeue(&conn
->collect_queue
))) {
768 header
.next
= conn
->tx_buff
->len
+ skb
->len
+ NETIUCV_HDRLEN
;
769 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
,
771 skb_copy_from_linear_data(skb
,
772 skb_put(conn
->tx_buff
, skb
->len
),
777 atomic_dec(&skb
->users
);
778 dev_kfree_skb_any(skb
);
780 if (conn
->collect_len
> conn
->prof
.maxmulti
)
781 conn
->prof
.maxmulti
= conn
->collect_len
;
782 conn
->collect_len
= 0;
783 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
784 if (conn
->tx_buff
->len
== 0) {
785 fsm_newstate(fi
, CONN_STATE_IDLE
);
790 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
791 conn
->prof
.send_stamp
= current_kernel_time();
794 rc
= iucv_message_send(conn
->path
, &txmsg
, 0, 0,
795 conn
->tx_buff
->data
, conn
->tx_buff
->len
);
796 conn
->prof
.doios_multi
++;
797 conn
->prof
.txlen
+= conn
->tx_buff
->len
;
798 conn
->prof
.tx_pending
++;
799 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
800 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
802 conn
->prof
.tx_pending
--;
803 fsm_newstate(fi
, CONN_STATE_IDLE
);
805 privptr
->stats
.tx_errors
+= txpackets
;
806 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
809 privptr
->stats
.tx_packets
+= txpackets
;
810 privptr
->stats
.tx_bytes
+= txbytes
;
812 if (stat_maxcq
> conn
->prof
.maxcqueue
)
813 conn
->prof
.maxcqueue
= stat_maxcq
;
817 static void conn_action_connaccept(fsm_instance
*fi
, int event
, void *arg
)
819 struct iucv_event
*ev
= arg
;
820 struct iucv_connection
*conn
= ev
->conn
;
821 struct iucv_path
*path
= ev
->data
;
822 struct net_device
*netdev
= conn
->netdev
;
823 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
826 IUCV_DBF_TEXT(trace
, 3, __func__
);
829 path
->msglim
= NETIUCV_QUEUELEN_DEFAULT
;
831 rc
= iucv_path_accept(path
, &netiucv_handler
, conn
->userdata
, conn
);
833 IUCV_DBF_TEXT_(setup
, 2, "rc %d from iucv_accept", rc
);
836 fsm_newstate(fi
, CONN_STATE_IDLE
);
837 netdev
->tx_queue_len
= conn
->path
->msglim
;
838 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
841 static void conn_action_connreject(fsm_instance
*fi
, int event
, void *arg
)
843 struct iucv_event
*ev
= arg
;
844 struct iucv_path
*path
= ev
->data
;
846 IUCV_DBF_TEXT(trace
, 3, __func__
);
847 iucv_path_sever(path
, NULL
);
850 static void conn_action_connack(fsm_instance
*fi
, int event
, void *arg
)
852 struct iucv_connection
*conn
= arg
;
853 struct net_device
*netdev
= conn
->netdev
;
854 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
856 IUCV_DBF_TEXT(trace
, 3, __func__
);
857 fsm_deltimer(&conn
->timer
);
858 fsm_newstate(fi
, CONN_STATE_IDLE
);
859 netdev
->tx_queue_len
= conn
->path
->msglim
;
860 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
863 static void conn_action_conntimsev(fsm_instance
*fi
, int event
, void *arg
)
865 struct iucv_connection
*conn
= arg
;
867 IUCV_DBF_TEXT(trace
, 3, __func__
);
868 fsm_deltimer(&conn
->timer
);
869 iucv_path_sever(conn
->path
, conn
->userdata
);
870 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
873 static void conn_action_connsever(fsm_instance
*fi
, int event
, void *arg
)
875 struct iucv_connection
*conn
= arg
;
876 struct net_device
*netdev
= conn
->netdev
;
877 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
879 IUCV_DBF_TEXT(trace
, 3, __func__
);
881 fsm_deltimer(&conn
->timer
);
882 iucv_path_sever(conn
->path
, conn
->userdata
);
883 dev_info(privptr
->dev
, "The peer z/VM guest %s has closed the "
884 "connection\n", netiucv_printuser(conn
));
885 IUCV_DBF_TEXT(data
, 2,
886 "conn_action_connsever: Remote dropped connection\n");
887 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
888 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
891 static void conn_action_start(fsm_instance
*fi
, int event
, void *arg
)
893 struct iucv_connection
*conn
= arg
;
894 struct net_device
*netdev
= conn
->netdev
;
895 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
898 IUCV_DBF_TEXT(trace
, 3, __func__
);
900 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
903 * We must set the state before calling iucv_connect because the
904 * callback handler could be called at any point after the connection
908 fsm_newstate(fi
, CONN_STATE_SETUPWAIT
);
909 conn
->path
= iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT
, 0, GFP_KERNEL
);
910 IUCV_DBF_TEXT_(setup
, 2, "%s: connecting to %s ...\n",
911 netdev
->name
, netiucv_printuser(conn
));
913 rc
= iucv_path_connect(conn
->path
, &netiucv_handler
, conn
->userid
,
914 NULL
, conn
->userdata
, conn
);
917 netdev
->tx_queue_len
= conn
->path
->msglim
;
918 fsm_addtimer(&conn
->timer
, NETIUCV_TIMEOUT_5SEC
,
919 CONN_EVENT_TIMER
, conn
);
922 dev_warn(privptr
->dev
,
923 "The IUCV device failed to connect to z/VM guest %s\n",
924 netiucv_printname(conn
->userid
, 8));
925 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
928 dev_warn(privptr
->dev
,
929 "The IUCV device failed to connect to the peer on z/VM"
930 " guest %s\n", netiucv_printname(conn
->userid
, 8));
931 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
934 dev_err(privptr
->dev
,
935 "Connecting the IUCV device would exceed the maximum"
936 " number of IUCV connections\n");
937 fsm_newstate(fi
, CONN_STATE_CONNERR
);
940 dev_err(privptr
->dev
,
941 "z/VM guest %s has too many IUCV connections"
942 " to connect with the IUCV device\n",
943 netiucv_printname(conn
->userid
, 8));
944 fsm_newstate(fi
, CONN_STATE_CONNERR
);
947 dev_err(privptr
->dev
,
948 "The IUCV device cannot connect to a z/VM guest with no"
949 " IUCV authorization\n");
950 fsm_newstate(fi
, CONN_STATE_CONNERR
);
953 dev_err(privptr
->dev
,
954 "Connecting the IUCV device failed with error %d\n",
956 fsm_newstate(fi
, CONN_STATE_CONNERR
);
959 IUCV_DBF_TEXT_(setup
, 5, "iucv_connect rc is %d\n", rc
);
964 static void netiucv_purge_skb_queue(struct sk_buff_head
*q
)
968 while ((skb
= skb_dequeue(q
))) {
969 atomic_dec(&skb
->users
);
970 dev_kfree_skb_any(skb
);
974 static void conn_action_stop(fsm_instance
*fi
, int event
, void *arg
)
976 struct iucv_event
*ev
= arg
;
977 struct iucv_connection
*conn
= ev
->conn
;
978 struct net_device
*netdev
= conn
->netdev
;
979 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
981 IUCV_DBF_TEXT(trace
, 3, __func__
);
983 fsm_deltimer(&conn
->timer
);
984 fsm_newstate(fi
, CONN_STATE_STOPPED
);
985 netiucv_purge_skb_queue(&conn
->collect_queue
);
987 IUCV_DBF_TEXT(trace
, 5, "calling iucv_path_sever\n");
988 iucv_path_sever(conn
->path
, conn
->userdata
);
992 netiucv_purge_skb_queue(&conn
->commit_queue
);
993 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
996 static void conn_action_inval(fsm_instance
*fi
, int event
, void *arg
)
998 struct iucv_connection
*conn
= arg
;
999 struct net_device
*netdev
= conn
->netdev
;
1001 IUCV_DBF_TEXT_(data
, 2, "%s('%s'): conn_action_inval called\n",
1002 netdev
->name
, conn
->userid
);
1005 static const fsm_node conn_fsm
[] = {
1006 { CONN_STATE_INVALID
, CONN_EVENT_START
, conn_action_inval
},
1007 { CONN_STATE_STOPPED
, CONN_EVENT_START
, conn_action_start
},
1009 { CONN_STATE_STOPPED
, CONN_EVENT_STOP
, conn_action_stop
},
1010 { CONN_STATE_STARTWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
1011 { CONN_STATE_SETUPWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
1012 { CONN_STATE_IDLE
, CONN_EVENT_STOP
, conn_action_stop
},
1013 { CONN_STATE_TX
, CONN_EVENT_STOP
, conn_action_stop
},
1014 { CONN_STATE_REGERR
, CONN_EVENT_STOP
, conn_action_stop
},
1015 { CONN_STATE_CONNERR
, CONN_EVENT_STOP
, conn_action_stop
},
1017 { CONN_STATE_STOPPED
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1018 { CONN_STATE_STARTWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
1019 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
1020 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1021 { CONN_STATE_TX
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1023 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_ACK
, conn_action_connack
},
1024 { CONN_STATE_SETUPWAIT
, CONN_EVENT_TIMER
, conn_action_conntimsev
},
1026 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1027 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1028 { CONN_STATE_TX
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1030 { CONN_STATE_IDLE
, CONN_EVENT_RX
, conn_action_rx
},
1031 { CONN_STATE_TX
, CONN_EVENT_RX
, conn_action_rx
},
1033 { CONN_STATE_TX
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1034 { CONN_STATE_IDLE
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1037 static const int CONN_FSM_LEN
= sizeof(conn_fsm
) / sizeof(fsm_node
);
1041 * Actions for interface - statemachine.
1046 * @fi: An instance of an interface statemachine.
1047 * @event: The event, just happened.
1048 * @arg: Generic pointer, casted from struct net_device * upon call.
1050 * Startup connection by sending CONN_EVENT_START to it.
1052 static void dev_action_start(fsm_instance
*fi
, int event
, void *arg
)
1054 struct net_device
*dev
= arg
;
1055 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1057 IUCV_DBF_TEXT(trace
, 3, __func__
);
1059 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1060 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_START
, privptr
->conn
);
1064 * Shutdown connection by sending CONN_EVENT_STOP to it.
1066 * @param fi An instance of an interface statemachine.
1067 * @param event The event, just happened.
1068 * @param arg Generic pointer, casted from struct net_device * upon call.
1071 dev_action_stop(fsm_instance
*fi
, int event
, void *arg
)
1073 struct net_device
*dev
= arg
;
1074 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1075 struct iucv_event ev
;
1077 IUCV_DBF_TEXT(trace
, 3, __func__
);
1079 ev
.conn
= privptr
->conn
;
1081 fsm_newstate(fi
, DEV_STATE_STOPWAIT
);
1082 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_STOP
, &ev
);
1086 * Called from connection statemachine
1087 * when a connection is up and running.
1089 * @param fi An instance of an interface statemachine.
1090 * @param event The event, just happened.
1091 * @param arg Generic pointer, casted from struct net_device * upon call.
1094 dev_action_connup(fsm_instance
*fi
, int event
, void *arg
)
1096 struct net_device
*dev
= arg
;
1097 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1099 IUCV_DBF_TEXT(trace
, 3, __func__
);
1101 switch (fsm_getstate(fi
)) {
1102 case DEV_STATE_STARTWAIT
:
1103 fsm_newstate(fi
, DEV_STATE_RUNNING
);
1104 dev_info(privptr
->dev
,
1105 "The IUCV device has been connected"
1106 " successfully to %s\n",
1107 netiucv_printuser(privptr
->conn
));
1108 IUCV_DBF_TEXT(setup
, 3,
1109 "connection is up and running\n");
1111 case DEV_STATE_STOPWAIT
:
1112 IUCV_DBF_TEXT(data
, 2,
1113 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1119 * Called from connection statemachine
1120 * when a connection has been shutdown.
1122 * @param fi An instance of an interface statemachine.
1123 * @param event The event, just happened.
1124 * @param arg Generic pointer, casted from struct net_device * upon call.
1127 dev_action_conndown(fsm_instance
*fi
, int event
, void *arg
)
1129 IUCV_DBF_TEXT(trace
, 3, __func__
);
1131 switch (fsm_getstate(fi
)) {
1132 case DEV_STATE_RUNNING
:
1133 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1135 case DEV_STATE_STOPWAIT
:
1136 fsm_newstate(fi
, DEV_STATE_STOPPED
);
1137 IUCV_DBF_TEXT(setup
, 3, "connection is down\n");
1142 static const fsm_node dev_fsm
[] = {
1143 { DEV_STATE_STOPPED
, DEV_EVENT_START
, dev_action_start
},
1145 { DEV_STATE_STOPWAIT
, DEV_EVENT_START
, dev_action_start
},
1146 { DEV_STATE_STOPWAIT
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1148 { DEV_STATE_STARTWAIT
, DEV_EVENT_STOP
, dev_action_stop
},
1149 { DEV_STATE_STARTWAIT
, DEV_EVENT_CONUP
, dev_action_connup
},
1151 { DEV_STATE_RUNNING
, DEV_EVENT_STOP
, dev_action_stop
},
1152 { DEV_STATE_RUNNING
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1153 { DEV_STATE_RUNNING
, DEV_EVENT_CONUP
, netiucv_action_nop
},
1156 static const int DEV_FSM_LEN
= sizeof(dev_fsm
) / sizeof(fsm_node
);
1159 * Transmit a packet.
1160 * This is a helper function for netiucv_tx().
1162 * @param conn Connection to be used for sending.
1163 * @param skb Pointer to struct sk_buff of packet to send.
1164 * The linklevel header has already been set up
1167 * @return 0 on success, -ERRNO on failure. (Never fails.)
1169 static int netiucv_transmit_skb(struct iucv_connection
*conn
,
1170 struct sk_buff
*skb
)
1172 struct iucv_message msg
;
1173 unsigned long saveflags
;
1174 struct ll_header header
;
1177 if (fsm_getstate(conn
->fsm
) != CONN_STATE_IDLE
) {
1178 int l
= skb
->len
+ NETIUCV_HDRLEN
;
1180 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
1181 if (conn
->collect_len
+ l
>
1182 (conn
->max_buffsize
- NETIUCV_HDRLEN
)) {
1184 IUCV_DBF_TEXT(data
, 2,
1185 "EBUSY from netiucv_transmit_skb\n");
1187 atomic_inc(&skb
->users
);
1188 skb_queue_tail(&conn
->collect_queue
, skb
);
1189 conn
->collect_len
+= l
;
1192 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
1194 struct sk_buff
*nskb
= skb
;
1196 * Copy the skb to a new allocated skb in lowmem only if the
1197 * data is located above 2G in memory or tailroom is < 2.
1199 unsigned long hi
= ((unsigned long)(skb_tail_pointer(skb
) +
1200 NETIUCV_HDRLEN
)) >> 31;
1202 if (hi
|| (skb_tailroom(skb
) < 2)) {
1203 nskb
= alloc_skb(skb
->len
+ NETIUCV_HDRLEN
+
1204 NETIUCV_HDRLEN
, GFP_ATOMIC
| GFP_DMA
);
1206 IUCV_DBF_TEXT(data
, 2, "alloc_skb failed\n");
1210 skb_reserve(nskb
, NETIUCV_HDRLEN
);
1211 memcpy(skb_put(nskb
, skb
->len
),
1212 skb
->data
, skb
->len
);
1217 * skb now is below 2G and has enough room. Add headers.
1219 header
.next
= nskb
->len
+ NETIUCV_HDRLEN
;
1220 memcpy(skb_push(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1222 memcpy(skb_put(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1224 fsm_newstate(conn
->fsm
, CONN_STATE_TX
);
1225 conn
->prof
.send_stamp
= current_kernel_time();
1229 rc
= iucv_message_send(conn
->path
, &msg
, 0, 0,
1230 nskb
->data
, nskb
->len
);
1231 conn
->prof
.doios_single
++;
1232 conn
->prof
.txlen
+= skb
->len
;
1233 conn
->prof
.tx_pending
++;
1234 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
1235 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
1237 struct netiucv_priv
*privptr
;
1238 fsm_newstate(conn
->fsm
, CONN_STATE_IDLE
);
1239 conn
->prof
.tx_pending
--;
1240 privptr
= netdev_priv(conn
->netdev
);
1242 privptr
->stats
.tx_errors
++;
1244 dev_kfree_skb(nskb
);
1247 * Remove our headers. They get added
1248 * again on retransmit.
1250 skb_pull(skb
, NETIUCV_HDRLEN
);
1251 skb_trim(skb
, skb
->len
- NETIUCV_HDRLEN
);
1253 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
1257 atomic_inc(&nskb
->users
);
1258 skb_queue_tail(&conn
->commit_queue
, nskb
);
1266 * Interface API for upper network layers
1270 * Open an interface.
1271 * Called from generic network layer when ifconfig up is run.
1273 * @param dev Pointer to interface struct.
1275 * @return 0 on success, -ERRNO on failure. (Never fails.)
1277 static int netiucv_open(struct net_device
*dev
)
1279 struct netiucv_priv
*priv
= netdev_priv(dev
);
1281 fsm_event(priv
->fsm
, DEV_EVENT_START
, dev
);
1286 * Close an interface.
1287 * Called from generic network layer when ifconfig down is run.
1289 * @param dev Pointer to interface struct.
1291 * @return 0 on success, -ERRNO on failure. (Never fails.)
1293 static int netiucv_close(struct net_device
*dev
)
1295 struct netiucv_priv
*priv
= netdev_priv(dev
);
1297 fsm_event(priv
->fsm
, DEV_EVENT_STOP
, dev
);
1301 static int netiucv_pm_prepare(struct device
*dev
)
1303 IUCV_DBF_TEXT(trace
, 3, __func__
);
1307 static void netiucv_pm_complete(struct device
*dev
)
1309 IUCV_DBF_TEXT(trace
, 3, __func__
);
1314 * netiucv_pm_freeze() - Freeze PM callback
1315 * @dev: netiucv device
1317 * close open netiucv interfaces
1319 static int netiucv_pm_freeze(struct device
*dev
)
1321 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1322 struct net_device
*ndev
= NULL
;
1325 IUCV_DBF_TEXT(trace
, 3, __func__
);
1326 if (priv
&& priv
->conn
)
1327 ndev
= priv
->conn
->netdev
;
1330 netif_device_detach(ndev
);
1331 priv
->pm_state
= fsm_getstate(priv
->fsm
);
1332 rc
= netiucv_close(ndev
);
1338 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1339 * @dev: netiucv device
1341 * re-open netiucv interfaces closed during freeze
1343 static int netiucv_pm_restore_thaw(struct device
*dev
)
1345 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1346 struct net_device
*ndev
= NULL
;
1349 IUCV_DBF_TEXT(trace
, 3, __func__
);
1350 if (priv
&& priv
->conn
)
1351 ndev
= priv
->conn
->netdev
;
1354 switch (priv
->pm_state
) {
1355 case DEV_STATE_RUNNING
:
1356 case DEV_STATE_STARTWAIT
:
1357 rc
= netiucv_open(ndev
);
1362 netif_device_attach(ndev
);
1368 * Start transmission of a packet.
1369 * Called from generic network device layer.
1371 * @param skb Pointer to buffer containing the packet.
1372 * @param dev Pointer to interface struct.
1374 * @return 0 if packet consumed, !0 if packet rejected.
1375 * Note: If we return !0, then the packet is free'd by
1376 * the generic network layer.
1378 static int netiucv_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1380 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1383 IUCV_DBF_TEXT(trace
, 4, __func__
);
1385 * Some sanity checks ...
1388 IUCV_DBF_TEXT(data
, 2, "netiucv_tx: skb is NULL\n");
1389 privptr
->stats
.tx_dropped
++;
1390 return NETDEV_TX_OK
;
1392 if (skb_headroom(skb
) < NETIUCV_HDRLEN
) {
1393 IUCV_DBF_TEXT(data
, 2,
1394 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1396 privptr
->stats
.tx_dropped
++;
1397 return NETDEV_TX_OK
;
1401 * If connection is not running, try to restart it
1402 * and throw away packet.
1404 if (fsm_getstate(privptr
->fsm
) != DEV_STATE_RUNNING
) {
1406 privptr
->stats
.tx_dropped
++;
1407 privptr
->stats
.tx_errors
++;
1408 privptr
->stats
.tx_carrier_errors
++;
1409 return NETDEV_TX_OK
;
1412 if (netiucv_test_and_set_busy(dev
)) {
1413 IUCV_DBF_TEXT(data
, 2, "EBUSY from netiucv_tx\n");
1414 return NETDEV_TX_BUSY
;
1416 dev
->trans_start
= jiffies
;
1417 rc
= netiucv_transmit_skb(privptr
->conn
, skb
);
1418 netiucv_clear_busy(dev
);
1419 return rc
? NETDEV_TX_BUSY
: NETDEV_TX_OK
;
1424 * @dev: Pointer to interface struct.
1426 * Returns interface statistics of a device.
1428 * Returns pointer to stats struct of this interface.
1430 static struct net_device_stats
*netiucv_stats (struct net_device
* dev
)
1432 struct netiucv_priv
*priv
= netdev_priv(dev
);
1434 IUCV_DBF_TEXT(trace
, 5, __func__
);
1435 return &priv
->stats
;
1439 * netiucv_change_mtu
1440 * @dev: Pointer to interface struct.
1441 * @new_mtu: The new MTU to use for this interface.
1443 * Sets MTU of an interface.
1445 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1446 * (valid range is 576 .. NETIUCV_MTU_MAX).
1448 static int netiucv_change_mtu(struct net_device
* dev
, int new_mtu
)
1450 IUCV_DBF_TEXT(trace
, 3, __func__
);
1451 if (new_mtu
< 576 || new_mtu
> NETIUCV_MTU_MAX
) {
1452 IUCV_DBF_TEXT(setup
, 2, "given MTU out of valid range\n");
1460 * attributes in sysfs
1463 static ssize_t
user_show(struct device
*dev
, struct device_attribute
*attr
,
1466 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1468 IUCV_DBF_TEXT(trace
, 5, __func__
);
1469 return sprintf(buf
, "%s\n", netiucv_printuser(priv
->conn
));
1472 static int netiucv_check_user(const char *buf
, size_t count
, char *username
,
1478 p
= strchr(buf
, '.');
1479 if ((p
&& ((count
> 26) ||
1481 (buf
+ count
- p
> 18))) ||
1482 (!p
&& (count
> 9))) {
1483 IUCV_DBF_TEXT(setup
, 2, "conn_write: too long\n");
1487 for (i
= 0, p
= buf
; i
< 8 && *p
&& *p
!= '.'; i
++, p
++) {
1488 if (isalnum(*p
) || *p
== '$') {
1489 username
[i
] = toupper(*p
);
1493 /* trailing lf, grr */
1495 IUCV_DBF_TEXT_(setup
, 2,
1496 "conn_write: invalid character %02x\n", *p
);
1500 username
[i
++] = ' ';
1505 for (i
= 0; i
< 16 && *p
; i
++, p
++) {
1508 userdata
[i
] = toupper(*p
);
1510 while (i
> 0 && i
< 16)
1511 userdata
[i
++] = ' ';
1513 memcpy(userdata
, iucvMagic_ascii
, 16);
1514 userdata
[16] = '\0';
1515 ASCEBC(userdata
, 16);
1520 static ssize_t
user_write(struct device
*dev
, struct device_attribute
*attr
,
1521 const char *buf
, size_t count
)
1523 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1524 struct net_device
*ndev
= priv
->conn
->netdev
;
1528 struct iucv_connection
*cp
;
1530 IUCV_DBF_TEXT(trace
, 3, __func__
);
1531 rc
= netiucv_check_user(buf
, count
, username
, userdata
);
1535 if (memcmp(username
, priv
->conn
->userid
, 9) &&
1536 (ndev
->flags
& (IFF_UP
| IFF_RUNNING
))) {
1537 /* username changed while the interface is active. */
1538 IUCV_DBF_TEXT(setup
, 2, "user_write: device active\n");
1541 read_lock_bh(&iucv_connection_rwlock
);
1542 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
1543 if (!strncmp(username
, cp
->userid
, 9) &&
1544 !strncmp(userdata
, cp
->userdata
, 17) && cp
->netdev
!= ndev
) {
1545 read_unlock_bh(&iucv_connection_rwlock
);
1546 IUCV_DBF_TEXT_(setup
, 2, "user_write: Connection to %s "
1547 "already exists\n", netiucv_printuser(cp
));
1551 read_unlock_bh(&iucv_connection_rwlock
);
1552 memcpy(priv
->conn
->userid
, username
, 9);
1553 memcpy(priv
->conn
->userdata
, userdata
, 17);
1557 static DEVICE_ATTR(user
, 0644, user_show
, user_write
);
1559 static ssize_t
buffer_show (struct device
*dev
, struct device_attribute
*attr
,
1562 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1564 IUCV_DBF_TEXT(trace
, 5, __func__
);
1565 return sprintf(buf
, "%d\n", priv
->conn
->max_buffsize
);
1568 static ssize_t
buffer_write (struct device
*dev
, struct device_attribute
*attr
,
1569 const char *buf
, size_t count
)
1571 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1572 struct net_device
*ndev
= priv
->conn
->netdev
;
1576 IUCV_DBF_TEXT(trace
, 3, __func__
);
1580 bs1
= simple_strtoul(buf
, &e
, 0);
1582 if (e
&& (!isspace(*e
))) {
1583 IUCV_DBF_TEXT_(setup
, 2, "buffer_write: invalid char %02x\n",
1587 if (bs1
> NETIUCV_BUFSIZE_MAX
) {
1588 IUCV_DBF_TEXT_(setup
, 2,
1589 "buffer_write: buffer size %d too large\n",
1593 if ((ndev
->flags
& IFF_RUNNING
) &&
1594 (bs1
< (ndev
->mtu
+ NETIUCV_HDRLEN
+ 2))) {
1595 IUCV_DBF_TEXT_(setup
, 2,
1596 "buffer_write: buffer size %d too small\n",
1600 if (bs1
< (576 + NETIUCV_HDRLEN
+ NETIUCV_HDRLEN
)) {
1601 IUCV_DBF_TEXT_(setup
, 2,
1602 "buffer_write: buffer size %d too small\n",
1607 priv
->conn
->max_buffsize
= bs1
;
1608 if (!(ndev
->flags
& IFF_RUNNING
))
1609 ndev
->mtu
= bs1
- NETIUCV_HDRLEN
- NETIUCV_HDRLEN
;
1615 static DEVICE_ATTR(buffer
, 0644, buffer_show
, buffer_write
);
1617 static ssize_t
dev_fsm_show (struct device
*dev
, struct device_attribute
*attr
,
1620 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1622 IUCV_DBF_TEXT(trace
, 5, __func__
);
1623 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->fsm
));
1626 static DEVICE_ATTR(device_fsm_state
, 0444, dev_fsm_show
, NULL
);
1628 static ssize_t
conn_fsm_show (struct device
*dev
,
1629 struct device_attribute
*attr
, char *buf
)
1631 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1633 IUCV_DBF_TEXT(trace
, 5, __func__
);
1634 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->conn
->fsm
));
1637 static DEVICE_ATTR(connection_fsm_state
, 0444, conn_fsm_show
, NULL
);
1639 static ssize_t
maxmulti_show (struct device
*dev
,
1640 struct device_attribute
*attr
, char *buf
)
1642 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1644 IUCV_DBF_TEXT(trace
, 5, __func__
);
1645 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxmulti
);
1648 static ssize_t
maxmulti_write (struct device
*dev
,
1649 struct device_attribute
*attr
,
1650 const char *buf
, size_t count
)
1652 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1654 IUCV_DBF_TEXT(trace
, 4, __func__
);
1655 priv
->conn
->prof
.maxmulti
= 0;
1659 static DEVICE_ATTR(max_tx_buffer_used
, 0644, maxmulti_show
, maxmulti_write
);
1661 static ssize_t
maxcq_show (struct device
*dev
, struct device_attribute
*attr
,
1664 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1666 IUCV_DBF_TEXT(trace
, 5, __func__
);
1667 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxcqueue
);
1670 static ssize_t
maxcq_write (struct device
*dev
, struct device_attribute
*attr
,
1671 const char *buf
, size_t count
)
1673 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1675 IUCV_DBF_TEXT(trace
, 4, __func__
);
1676 priv
->conn
->prof
.maxcqueue
= 0;
1680 static DEVICE_ATTR(max_chained_skbs
, 0644, maxcq_show
, maxcq_write
);
1682 static ssize_t
sdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1685 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1687 IUCV_DBF_TEXT(trace
, 5, __func__
);
1688 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_single
);
1691 static ssize_t
sdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1692 const char *buf
, size_t count
)
1694 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1696 IUCV_DBF_TEXT(trace
, 4, __func__
);
1697 priv
->conn
->prof
.doios_single
= 0;
1701 static DEVICE_ATTR(tx_single_write_ops
, 0644, sdoio_show
, sdoio_write
);
1703 static ssize_t
mdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1706 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1708 IUCV_DBF_TEXT(trace
, 5, __func__
);
1709 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_multi
);
1712 static ssize_t
mdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1713 const char *buf
, size_t count
)
1715 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1717 IUCV_DBF_TEXT(trace
, 5, __func__
);
1718 priv
->conn
->prof
.doios_multi
= 0;
1722 static DEVICE_ATTR(tx_multi_write_ops
, 0644, mdoio_show
, mdoio_write
);
1724 static ssize_t
txlen_show (struct device
*dev
, struct device_attribute
*attr
,
1727 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1729 IUCV_DBF_TEXT(trace
, 5, __func__
);
1730 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.txlen
);
1733 static ssize_t
txlen_write (struct device
*dev
, struct device_attribute
*attr
,
1734 const char *buf
, size_t count
)
1736 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1738 IUCV_DBF_TEXT(trace
, 4, __func__
);
1739 priv
->conn
->prof
.txlen
= 0;
1743 static DEVICE_ATTR(netto_bytes
, 0644, txlen_show
, txlen_write
);
1745 static ssize_t
txtime_show (struct device
*dev
, struct device_attribute
*attr
,
1748 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1750 IUCV_DBF_TEXT(trace
, 5, __func__
);
1751 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_time
);
1754 static ssize_t
txtime_write (struct device
*dev
, struct device_attribute
*attr
,
1755 const char *buf
, size_t count
)
1757 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1759 IUCV_DBF_TEXT(trace
, 4, __func__
);
1760 priv
->conn
->prof
.tx_time
= 0;
1764 static DEVICE_ATTR(max_tx_io_time
, 0644, txtime_show
, txtime_write
);
1766 static ssize_t
txpend_show (struct device
*dev
, struct device_attribute
*attr
,
1769 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1771 IUCV_DBF_TEXT(trace
, 5, __func__
);
1772 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_pending
);
1775 static ssize_t
txpend_write (struct device
*dev
, struct device_attribute
*attr
,
1776 const char *buf
, size_t count
)
1778 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1780 IUCV_DBF_TEXT(trace
, 4, __func__
);
1781 priv
->conn
->prof
.tx_pending
= 0;
1785 static DEVICE_ATTR(tx_pending
, 0644, txpend_show
, txpend_write
);
1787 static ssize_t
txmpnd_show (struct device
*dev
, struct device_attribute
*attr
,
1790 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1792 IUCV_DBF_TEXT(trace
, 5, __func__
);
1793 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_max_pending
);
1796 static ssize_t
txmpnd_write (struct device
*dev
, struct device_attribute
*attr
,
1797 const char *buf
, size_t count
)
1799 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1801 IUCV_DBF_TEXT(trace
, 4, __func__
);
1802 priv
->conn
->prof
.tx_max_pending
= 0;
1806 static DEVICE_ATTR(tx_max_pending
, 0644, txmpnd_show
, txmpnd_write
);
1808 static struct attribute
*netiucv_attrs
[] = {
1809 &dev_attr_buffer
.attr
,
1810 &dev_attr_user
.attr
,
1814 static struct attribute_group netiucv_attr_group
= {
1815 .attrs
= netiucv_attrs
,
1818 static struct attribute
*netiucv_stat_attrs
[] = {
1819 &dev_attr_device_fsm_state
.attr
,
1820 &dev_attr_connection_fsm_state
.attr
,
1821 &dev_attr_max_tx_buffer_used
.attr
,
1822 &dev_attr_max_chained_skbs
.attr
,
1823 &dev_attr_tx_single_write_ops
.attr
,
1824 &dev_attr_tx_multi_write_ops
.attr
,
1825 &dev_attr_netto_bytes
.attr
,
1826 &dev_attr_max_tx_io_time
.attr
,
1827 &dev_attr_tx_pending
.attr
,
1828 &dev_attr_tx_max_pending
.attr
,
1832 static struct attribute_group netiucv_stat_attr_group
= {
1834 .attrs
= netiucv_stat_attrs
,
1837 static const struct attribute_group
*netiucv_attr_groups
[] = {
1838 &netiucv_stat_attr_group
,
1839 &netiucv_attr_group
,
1843 static int netiucv_register_device(struct net_device
*ndev
)
1845 struct netiucv_priv
*priv
= netdev_priv(ndev
);
1846 struct device
*dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
1849 IUCV_DBF_TEXT(trace
, 3, __func__
);
1852 dev_set_name(dev
, "net%s", ndev
->name
);
1853 dev
->bus
= &iucv_bus
;
1854 dev
->parent
= iucv_root
;
1855 dev
->groups
= netiucv_attr_groups
;
1857 * The release function could be called after the
1858 * module has been unloaded. It's _only_ task is to
1859 * free the struct. Therefore, we specify kfree()
1860 * directly here. (Probably a little bit obfuscating
1861 * but legitime ...).
1863 dev
->release
= (void (*)(struct device
*))kfree
;
1864 dev
->driver
= &netiucv_driver
;
1868 ret
= device_register(dev
);
1874 dev_set_drvdata(dev
, priv
);
1878 static void netiucv_unregister_device(struct device
*dev
)
1880 IUCV_DBF_TEXT(trace
, 3, __func__
);
1881 device_unregister(dev
);
1885 * Allocate and initialize a new connection structure.
1886 * Add it to the list of netiucv connections;
1888 static struct iucv_connection
*netiucv_new_connection(struct net_device
*dev
,
1892 struct iucv_connection
*conn
;
1894 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
1897 skb_queue_head_init(&conn
->collect_queue
);
1898 skb_queue_head_init(&conn
->commit_queue
);
1899 spin_lock_init(&conn
->collect_lock
);
1900 conn
->max_buffsize
= NETIUCV_BUFSIZE_DEFAULT
;
1903 conn
->rx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1906 conn
->tx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1909 conn
->fsm
= init_fsm("netiucvconn", conn_state_names
,
1910 conn_event_names
, NR_CONN_STATES
,
1911 NR_CONN_EVENTS
, conn_fsm
, CONN_FSM_LEN
,
1916 fsm_settimer(conn
->fsm
, &conn
->timer
);
1917 fsm_newstate(conn
->fsm
, CONN_STATE_INVALID
);
1920 memcpy(conn
->userdata
, userdata
, 17);
1922 memcpy(conn
->userid
, username
, 9);
1923 fsm_newstate(conn
->fsm
, CONN_STATE_STOPPED
);
1926 write_lock_bh(&iucv_connection_rwlock
);
1927 list_add_tail(&conn
->list
, &iucv_connection_list
);
1928 write_unlock_bh(&iucv_connection_rwlock
);
1932 kfree_skb(conn
->tx_buff
);
1934 kfree_skb(conn
->rx_buff
);
1942 * Release a connection structure and remove it from the
1943 * list of netiucv connections.
1945 static void netiucv_remove_connection(struct iucv_connection
*conn
)
1948 IUCV_DBF_TEXT(trace
, 3, __func__
);
1949 write_lock_bh(&iucv_connection_rwlock
);
1950 list_del_init(&conn
->list
);
1951 write_unlock_bh(&iucv_connection_rwlock
);
1952 fsm_deltimer(&conn
->timer
);
1953 netiucv_purge_skb_queue(&conn
->collect_queue
);
1955 iucv_path_sever(conn
->path
, conn
->userdata
);
1959 netiucv_purge_skb_queue(&conn
->commit_queue
);
1960 kfree_fsm(conn
->fsm
);
1961 kfree_skb(conn
->rx_buff
);
1962 kfree_skb(conn
->tx_buff
);
1966 * Release everything of a net device.
1968 static void netiucv_free_netdevice(struct net_device
*dev
)
1970 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1972 IUCV_DBF_TEXT(trace
, 3, __func__
);
1979 netiucv_remove_connection(privptr
->conn
);
1981 kfree_fsm(privptr
->fsm
);
1982 privptr
->conn
= NULL
; privptr
->fsm
= NULL
;
1983 /* privptr gets freed by free_netdev() */
1989 * Initialize a net device. (Called from kernel in alloc_netdev())
1991 static const struct net_device_ops netiucv_netdev_ops
= {
1992 .ndo_open
= netiucv_open
,
1993 .ndo_stop
= netiucv_close
,
1994 .ndo_get_stats
= netiucv_stats
,
1995 .ndo_start_xmit
= netiucv_tx
,
1996 .ndo_change_mtu
= netiucv_change_mtu
,
1999 static void netiucv_setup_netdevice(struct net_device
*dev
)
2001 dev
->mtu
= NETIUCV_MTU_DEFAULT
;
2002 dev
->destructor
= netiucv_free_netdevice
;
2003 dev
->hard_header_len
= NETIUCV_HDRLEN
;
2005 dev
->type
= ARPHRD_SLIP
;
2006 dev
->tx_queue_len
= NETIUCV_QUEUELEN_DEFAULT
;
2007 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
2008 dev
->netdev_ops
= &netiucv_netdev_ops
;
2012 * Allocate and initialize everything of a net device.
2014 static struct net_device
*netiucv_init_netdevice(char *username
, char *userdata
)
2016 struct netiucv_priv
*privptr
;
2017 struct net_device
*dev
;
2019 dev
= alloc_netdev(sizeof(struct netiucv_priv
), "iucv%d",
2020 netiucv_setup_netdevice
);
2024 if (dev_alloc_name(dev
, dev
->name
) < 0)
2027 privptr
= netdev_priv(dev
);
2028 privptr
->fsm
= init_fsm("netiucvdev", dev_state_names
,
2029 dev_event_names
, NR_DEV_STATES
, NR_DEV_EVENTS
,
2030 dev_fsm
, DEV_FSM_LEN
, GFP_KERNEL
);
2034 privptr
->conn
= netiucv_new_connection(dev
, username
, userdata
);
2035 if (!privptr
->conn
) {
2036 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_new_connection\n");
2039 fsm_newstate(privptr
->fsm
, DEV_STATE_STOPPED
);
2043 kfree_fsm(privptr
->fsm
);
2050 static ssize_t
conn_write(struct device_driver
*drv
,
2051 const char *buf
, size_t count
)
2056 struct net_device
*dev
;
2057 struct netiucv_priv
*priv
;
2058 struct iucv_connection
*cp
;
2060 IUCV_DBF_TEXT(trace
, 3, __func__
);
2061 rc
= netiucv_check_user(buf
, count
, username
, userdata
);
2065 read_lock_bh(&iucv_connection_rwlock
);
2066 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
2067 if (!strncmp(username
, cp
->userid
, 9) &&
2068 !strncmp(userdata
, cp
->userdata
, 17)) {
2069 read_unlock_bh(&iucv_connection_rwlock
);
2070 IUCV_DBF_TEXT_(setup
, 2, "conn_write: Connection to %s "
2071 "already exists\n", netiucv_printuser(cp
));
2075 read_unlock_bh(&iucv_connection_rwlock
);
2077 dev
= netiucv_init_netdevice(username
, userdata
);
2079 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_init_netdevice\n");
2083 rc
= netiucv_register_device(dev
);
2086 IUCV_DBF_TEXT_(setup
, 2,
2087 "ret %d from netiucv_register_device\n", rc
);
2092 priv
= netdev_priv(dev
);
2093 SET_NETDEV_DEV(dev
, priv
->dev
);
2095 rc
= register_netdevice(dev
);
2100 dev_info(priv
->dev
, "The IUCV interface to %s has been established "
2102 netiucv_printuser(priv
->conn
));
2107 netiucv_unregister_device(priv
->dev
);
2109 netiucv_free_netdevice(dev
);
2113 static DRIVER_ATTR(connection
, 0200, NULL
, conn_write
);
2115 static ssize_t
remove_write (struct device_driver
*drv
,
2116 const char *buf
, size_t count
)
2118 struct iucv_connection
*cp
;
2119 struct net_device
*ndev
;
2120 struct netiucv_priv
*priv
;
2122 char name
[IFNAMSIZ
];
2126 IUCV_DBF_TEXT(trace
, 3, __func__
);
2128 if (count
>= IFNAMSIZ
)
2129 count
= IFNAMSIZ
- 1;
2131 for (i
= 0, p
= buf
; i
< count
&& *p
; i
++, p
++) {
2132 if (*p
== '\n' || *p
== ' ')
2133 /* trailing lf, grr */
2139 read_lock_bh(&iucv_connection_rwlock
);
2140 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
2142 priv
= netdev_priv(ndev
);
2144 if (strncmp(name
, ndev
->name
, count
))
2146 read_unlock_bh(&iucv_connection_rwlock
);
2147 if (ndev
->flags
& (IFF_UP
| IFF_RUNNING
)) {
2148 dev_warn(dev
, "The IUCV device is connected"
2149 " to %s and cannot be removed\n",
2150 priv
->conn
->userid
);
2151 IUCV_DBF_TEXT(data
, 2, "remove_write: still active\n");
2154 unregister_netdev(ndev
);
2155 netiucv_unregister_device(dev
);
2158 read_unlock_bh(&iucv_connection_rwlock
);
2159 IUCV_DBF_TEXT(data
, 2, "remove_write: unknown device\n");
2163 static DRIVER_ATTR(remove
, 0200, NULL
, remove_write
);
2165 static struct attribute
* netiucv_drv_attrs
[] = {
2166 &driver_attr_connection
.attr
,
2167 &driver_attr_remove
.attr
,
2171 static struct attribute_group netiucv_drv_attr_group
= {
2172 .attrs
= netiucv_drv_attrs
,
2175 static const struct attribute_group
*netiucv_drv_attr_groups
[] = {
2176 &netiucv_drv_attr_group
,
2180 static void netiucv_banner(void)
2182 pr_info("driver initialized\n");
2185 static void __exit
netiucv_exit(void)
2187 struct iucv_connection
*cp
;
2188 struct net_device
*ndev
;
2189 struct netiucv_priv
*priv
;
2192 IUCV_DBF_TEXT(trace
, 3, __func__
);
2193 while (!list_empty(&iucv_connection_list
)) {
2194 cp
= list_entry(iucv_connection_list
.next
,
2195 struct iucv_connection
, list
);
2197 priv
= netdev_priv(ndev
);
2200 unregister_netdev(ndev
);
2201 netiucv_unregister_device(dev
);
2204 device_unregister(netiucv_dev
);
2205 driver_unregister(&netiucv_driver
);
2206 iucv_unregister(&netiucv_handler
, 1);
2207 iucv_unregister_dbf_views();
2209 pr_info("driver unloaded\n");
2213 static int __init
netiucv_init(void)
2217 rc
= iucv_register_dbf_views();
2220 rc
= iucv_register(&netiucv_handler
, 1);
2223 IUCV_DBF_TEXT(trace
, 3, __func__
);
2224 netiucv_driver
.groups
= netiucv_drv_attr_groups
;
2225 rc
= driver_register(&netiucv_driver
);
2227 IUCV_DBF_TEXT_(setup
, 2, "ret %d from driver_register\n", rc
);
2230 /* establish dummy device */
2231 netiucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
2236 dev_set_name(netiucv_dev
, "netiucv");
2237 netiucv_dev
->bus
= &iucv_bus
;
2238 netiucv_dev
->parent
= iucv_root
;
2239 netiucv_dev
->release
= (void (*)(struct device
*))kfree
;
2240 netiucv_dev
->driver
= &netiucv_driver
;
2241 rc
= device_register(netiucv_dev
);
2243 put_device(netiucv_dev
);
2250 driver_unregister(&netiucv_driver
);
2252 iucv_unregister(&netiucv_handler
, 1);
2254 iucv_unregister_dbf_views();
2259 module_init(netiucv_init
);
2260 module_exit(netiucv_exit
);
2261 MODULE_LICENSE("GPL");