4 * Copyright IBM Corp. 2001, 2009
7 * Original netiucv driver:
8 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9 * Sysfs integration and all bugs therein:
10 * Cornelia Huck (cornelia.huck@de.ibm.com)
12 * Ursula Braun (ursula.braun@de.ibm.com)
15 * the source of the original IUCV driver by:
16 * Stefan Hegewald <hegewald@de.ibm.com>
17 * Hartmut Penner <hpenner@de.ibm.com>
18 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19 * Martin Schwidefsky (schwidefsky@de.ibm.com)
20 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2, or (at your option)
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
38 #define KMSG_COMPONENT "netiucv"
39 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/kernel.h>
46 #include <linux/slab.h>
47 #include <linux/errno.h>
48 #include <linux/types.h>
49 #include <linux/interrupt.h>
50 #include <linux/timer.h>
51 #include <linux/bitops.h>
53 #include <linux/signal.h>
54 #include <linux/string.h>
55 #include <linux/device.h>
58 #include <linux/if_arp.h>
59 #include <linux/tcp.h>
60 #include <linux/skbuff.h>
61 #include <linux/ctype.h>
65 #include <asm/uaccess.h>
66 #include <asm/ebcdic.h>
68 #include <net/iucv/iucv.h>
72 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
73 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
76 * Debug Facility stuff
78 #define IUCV_DBF_SETUP_NAME "iucv_setup"
79 #define IUCV_DBF_SETUP_LEN 64
80 #define IUCV_DBF_SETUP_PAGES 2
81 #define IUCV_DBF_SETUP_NR_AREAS 1
82 #define IUCV_DBF_SETUP_LEVEL 3
84 #define IUCV_DBF_DATA_NAME "iucv_data"
85 #define IUCV_DBF_DATA_LEN 128
86 #define IUCV_DBF_DATA_PAGES 2
87 #define IUCV_DBF_DATA_NR_AREAS 1
88 #define IUCV_DBF_DATA_LEVEL 2
90 #define IUCV_DBF_TRACE_NAME "iucv_trace"
91 #define IUCV_DBF_TRACE_LEN 16
92 #define IUCV_DBF_TRACE_PAGES 4
93 #define IUCV_DBF_TRACE_NR_AREAS 1
94 #define IUCV_DBF_TRACE_LEVEL 3
96 #define IUCV_DBF_TEXT(name,level,text) \
98 debug_text_event(iucv_dbf_##name,level,text); \
101 #define IUCV_DBF_HEX(name,level,addr,len) \
103 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
106 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf
);
108 #define IUCV_DBF_TEXT_(name, level, text...) \
110 if (debug_level_enabled(iucv_dbf_##name, level)) { \
111 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
112 sprintf(__buf, text); \
113 debug_text_event(iucv_dbf_##name, level, __buf); \
114 put_cpu_var(iucv_dbf_txt_buf); \
118 #define IUCV_DBF_SPRINTF(name,level,text...) \
120 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
121 debug_sprintf_event(iucv_dbf_trace, level, text ); \
125 * some more debug stuff
127 #define PRINTK_HEADER " iucv: " /* for debugging */
129 /* dummy device to make sure netiucv_pm functions are called */
130 static struct device
*netiucv_dev
;
132 static int netiucv_pm_prepare(struct device
*);
133 static void netiucv_pm_complete(struct device
*);
134 static int netiucv_pm_freeze(struct device
*);
135 static int netiucv_pm_restore_thaw(struct device
*);
137 static const struct dev_pm_ops netiucv_pm_ops
= {
138 .prepare
= netiucv_pm_prepare
,
139 .complete
= netiucv_pm_complete
,
140 .freeze
= netiucv_pm_freeze
,
141 .thaw
= netiucv_pm_restore_thaw
,
142 .restore
= netiucv_pm_restore_thaw
,
145 static struct device_driver netiucv_driver
= {
146 .owner
= THIS_MODULE
,
149 .pm
= &netiucv_pm_ops
,
152 static int netiucv_callback_connreq(struct iucv_path
*,
153 u8 ipvmid
[8], u8 ipuser
[16]);
154 static void netiucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
155 static void netiucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
156 static void netiucv_callback_connsusp(struct iucv_path
*, u8 ipuser
[16]);
157 static void netiucv_callback_connres(struct iucv_path
*, u8 ipuser
[16]);
158 static void netiucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
159 static void netiucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
161 static struct iucv_handler netiucv_handler
= {
162 .path_pending
= netiucv_callback_connreq
,
163 .path_complete
= netiucv_callback_connack
,
164 .path_severed
= netiucv_callback_connrej
,
165 .path_quiesced
= netiucv_callback_connsusp
,
166 .path_resumed
= netiucv_callback_connres
,
167 .message_pending
= netiucv_callback_rx
,
168 .message_complete
= netiucv_callback_txdone
172 * Per connection profiling data
174 struct connection_profile
{
175 unsigned long maxmulti
;
176 unsigned long maxcqueue
;
177 unsigned long doios_single
;
178 unsigned long doios_multi
;
180 unsigned long tx_time
;
181 struct timespec send_stamp
;
182 unsigned long tx_pending
;
183 unsigned long tx_max_pending
;
187 * Representation of one iucv connection
189 struct iucv_connection
{
190 struct list_head list
;
191 struct iucv_path
*path
;
192 struct sk_buff
*rx_buff
;
193 struct sk_buff
*tx_buff
;
194 struct sk_buff_head collect_queue
;
195 struct sk_buff_head commit_queue
;
196 spinlock_t collect_lock
;
201 struct net_device
*netdev
;
202 struct connection_profile prof
;
208 * Linked list of all connection structs.
210 static LIST_HEAD(iucv_connection_list
);
211 static DEFINE_RWLOCK(iucv_connection_rwlock
);
214 * Representation of event-data for the
215 * connection state machine.
218 struct iucv_connection
*conn
;
223 * Private part of the network device structure
225 struct netiucv_priv
{
226 struct net_device_stats stats
;
229 struct iucv_connection
*conn
;
235 * Link level header for a packet.
241 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
242 #define NETIUCV_BUFSIZE_MAX 65537
243 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
244 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
245 #define NETIUCV_MTU_DEFAULT 9216
246 #define NETIUCV_QUEUELEN_DEFAULT 50
247 #define NETIUCV_TIMEOUT_5SEC 5000
250 * Compatibility macros for busy handling
251 * of network devices.
253 static inline void netiucv_clear_busy(struct net_device
*dev
)
255 struct netiucv_priv
*priv
= netdev_priv(dev
);
256 clear_bit(0, &priv
->tbusy
);
257 netif_wake_queue(dev
);
260 static inline int netiucv_test_and_set_busy(struct net_device
*dev
)
262 struct netiucv_priv
*priv
= netdev_priv(dev
);
263 netif_stop_queue(dev
);
264 return test_and_set_bit(0, &priv
->tbusy
);
267 static u8 iucvMagic_ascii
[16] = {
268 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
269 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
272 static u8 iucvMagic_ebcdic
[16] = {
273 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
274 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
278 * Convert an iucv userId to its printable
279 * form (strip whitespace at end).
281 * @param An iucv userId
283 * @returns The printable string (static data!!)
285 static char *netiucv_printname(char *name
, int len
)
289 memcpy(tmp
, name
, len
);
291 while (*p
&& ((p
- tmp
) < len
) && (!isspace(*p
)))
297 static char *netiucv_printuser(struct iucv_connection
*conn
)
299 static char tmp_uid
[9];
300 static char tmp_udat
[17];
301 static char buf
[100];
303 if (memcmp(conn
->userdata
, iucvMagic_ebcdic
, 16)) {
306 memcpy(tmp_uid
, conn
->userid
, 8);
307 memcpy(tmp_uid
, netiucv_printname(tmp_uid
, 8), 8);
308 memcpy(tmp_udat
, conn
->userdata
, 16);
309 EBCASC(tmp_udat
, 16);
310 memcpy(tmp_udat
, netiucv_printname(tmp_udat
, 16), 16);
311 sprintf(buf
, "%s.%s", tmp_uid
, tmp_udat
);
314 return netiucv_printname(conn
->userid
, 8);
318 * States of the interface statemachine.
326 * MUST be always the last element!!
331 static const char *dev_state_names
[] = {
339 * Events of the interface statemachine.
347 * MUST be always the last element!!
352 static const char *dev_event_names
[] = {
360 * Events of the connection statemachine
364 * Events, representing callbacks from
365 * lowlevel iucv layer)
376 * Events, representing errors return codes from
377 * calls to lowlevel iucv layer
381 * Event, representing timer expiry.
386 * Events, representing commands from upper levels.
392 * MUST be always the last element!!
397 static const char *conn_event_names
[] = {
398 "Remote connection request",
399 "Remote connection acknowledge",
400 "Remote connection reject",
401 "Connection suspended",
402 "Connection resumed",
413 * States of the connection statemachine.
417 * Connection not assigned to any device,
418 * initial state, invalid
423 * Userid assigned but not operating
428 * Connection registered,
429 * no connection request sent yet,
430 * no connection request received
432 CONN_STATE_STARTWAIT
,
435 * Connection registered and connection request sent,
436 * no acknowledge and no connection request received yet.
438 CONN_STATE_SETUPWAIT
,
441 * Connection up and running idle
446 * Data sent, awaiting CONN_EVENT_TXDONE
451 * Error during registration.
456 * Error during registration.
461 * MUST be always the last element!!
466 static const char *conn_state_names
[] = {
474 "Registration error",
480 * Debug Facility Stuff
482 static debug_info_t
*iucv_dbf_setup
= NULL
;
483 static debug_info_t
*iucv_dbf_data
= NULL
;
484 static debug_info_t
*iucv_dbf_trace
= NULL
;
486 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf
);
488 static void iucv_unregister_dbf_views(void)
491 debug_unregister(iucv_dbf_setup
);
493 debug_unregister(iucv_dbf_data
);
495 debug_unregister(iucv_dbf_trace
);
497 static int iucv_register_dbf_views(void)
499 iucv_dbf_setup
= debug_register(IUCV_DBF_SETUP_NAME
,
500 IUCV_DBF_SETUP_PAGES
,
501 IUCV_DBF_SETUP_NR_AREAS
,
503 iucv_dbf_data
= debug_register(IUCV_DBF_DATA_NAME
,
505 IUCV_DBF_DATA_NR_AREAS
,
507 iucv_dbf_trace
= debug_register(IUCV_DBF_TRACE_NAME
,
508 IUCV_DBF_TRACE_PAGES
,
509 IUCV_DBF_TRACE_NR_AREAS
,
512 if ((iucv_dbf_setup
== NULL
) || (iucv_dbf_data
== NULL
) ||
513 (iucv_dbf_trace
== NULL
)) {
514 iucv_unregister_dbf_views();
517 debug_register_view(iucv_dbf_setup
, &debug_hex_ascii_view
);
518 debug_set_level(iucv_dbf_setup
, IUCV_DBF_SETUP_LEVEL
);
520 debug_register_view(iucv_dbf_data
, &debug_hex_ascii_view
);
521 debug_set_level(iucv_dbf_data
, IUCV_DBF_DATA_LEVEL
);
523 debug_register_view(iucv_dbf_trace
, &debug_hex_ascii_view
);
524 debug_set_level(iucv_dbf_trace
, IUCV_DBF_TRACE_LEVEL
);
530 * Callback-wrappers, called from lowlevel iucv layer.
533 static void netiucv_callback_rx(struct iucv_path
*path
,
534 struct iucv_message
*msg
)
536 struct iucv_connection
*conn
= path
->private;
537 struct iucv_event ev
;
541 fsm_event(conn
->fsm
, CONN_EVENT_RX
, &ev
);
544 static void netiucv_callback_txdone(struct iucv_path
*path
,
545 struct iucv_message
*msg
)
547 struct iucv_connection
*conn
= path
->private;
548 struct iucv_event ev
;
552 fsm_event(conn
->fsm
, CONN_EVENT_TXDONE
, &ev
);
555 static void netiucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
557 struct iucv_connection
*conn
= path
->private;
559 fsm_event(conn
->fsm
, CONN_EVENT_CONN_ACK
, conn
);
562 static int netiucv_callback_connreq(struct iucv_path
*path
,
563 u8 ipvmid
[8], u8 ipuser
[16])
565 struct iucv_connection
*conn
= path
->private;
566 struct iucv_event ev
;
567 static char tmp_user
[9];
568 static char tmp_udat
[17];
572 memcpy(tmp_user
, netiucv_printname(ipvmid
, 8), 8);
573 memcpy(tmp_udat
, ipuser
, 16);
574 EBCASC(tmp_udat
, 16);
575 read_lock_bh(&iucv_connection_rwlock
);
576 list_for_each_entry(conn
, &iucv_connection_list
, list
) {
577 if (strncmp(ipvmid
, conn
->userid
, 8) ||
578 strncmp(ipuser
, conn
->userdata
, 16))
580 /* Found a matching connection for this path. */
584 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REQ
, &ev
);
587 IUCV_DBF_TEXT_(setup
, 2, "Connection requested for %s.%s\n",
588 tmp_user
, netiucv_printname(tmp_udat
, 16));
589 read_unlock_bh(&iucv_connection_rwlock
);
593 static void netiucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
595 struct iucv_connection
*conn
= path
->private;
597 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REJ
, conn
);
600 static void netiucv_callback_connsusp(struct iucv_path
*path
, u8 ipuser
[16])
602 struct iucv_connection
*conn
= path
->private;
604 fsm_event(conn
->fsm
, CONN_EVENT_CONN_SUS
, conn
);
607 static void netiucv_callback_connres(struct iucv_path
*path
, u8 ipuser
[16])
609 struct iucv_connection
*conn
= path
->private;
611 fsm_event(conn
->fsm
, CONN_EVENT_CONN_RES
, conn
);
615 * NOP action for statemachines
617 static void netiucv_action_nop(fsm_instance
*fi
, int event
, void *arg
)
622 * Actions of the connection statemachine
627 * @conn: The connection where this skb has been received.
628 * @pskb: The received skb.
630 * Unpack a just received skb and hand it over to upper layers.
631 * Helper function for conn_action_rx.
633 static void netiucv_unpack_skb(struct iucv_connection
*conn
,
634 struct sk_buff
*pskb
)
636 struct net_device
*dev
= conn
->netdev
;
637 struct netiucv_priv
*privptr
= netdev_priv(dev
);
640 skb_put(pskb
, NETIUCV_HDRLEN
);
642 pskb
->ip_summed
= CHECKSUM_NONE
;
643 pskb
->protocol
= ntohs(ETH_P_IP
);
647 struct ll_header
*header
= (struct ll_header
*) pskb
->data
;
652 skb_pull(pskb
, NETIUCV_HDRLEN
);
653 header
->next
-= offset
;
654 offset
+= header
->next
;
655 header
->next
-= NETIUCV_HDRLEN
;
656 if (skb_tailroom(pskb
) < header
->next
) {
657 IUCV_DBF_TEXT_(data
, 2, "Illegal next field: %d > %d\n",
658 header
->next
, skb_tailroom(pskb
));
661 skb_put(pskb
, header
->next
);
662 skb_reset_mac_header(pskb
);
663 skb
= dev_alloc_skb(pskb
->len
);
665 IUCV_DBF_TEXT(data
, 2,
666 "Out of memory in netiucv_unpack_skb\n");
667 privptr
->stats
.rx_dropped
++;
670 skb_copy_from_linear_data(pskb
, skb_put(skb
, pskb
->len
),
672 skb_reset_mac_header(skb
);
673 skb
->dev
= pskb
->dev
;
674 skb
->protocol
= pskb
->protocol
;
675 pskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
676 privptr
->stats
.rx_packets
++;
677 privptr
->stats
.rx_bytes
+= skb
->len
;
679 * Since receiving is always initiated from a tasklet (in iucv.c),
680 * we must use netif_rx_ni() instead of netif_rx()
683 skb_pull(pskb
, header
->next
);
684 skb_put(pskb
, NETIUCV_HDRLEN
);
688 static void conn_action_rx(fsm_instance
*fi
, int event
, void *arg
)
690 struct iucv_event
*ev
= arg
;
691 struct iucv_connection
*conn
= ev
->conn
;
692 struct iucv_message
*msg
= ev
->data
;
693 struct netiucv_priv
*privptr
= netdev_priv(conn
->netdev
);
696 IUCV_DBF_TEXT(trace
, 4, __func__
);
699 iucv_message_reject(conn
->path
, msg
);
700 IUCV_DBF_TEXT(data
, 2,
701 "Received data for unlinked connection\n");
704 if (msg
->length
> conn
->max_buffsize
) {
705 iucv_message_reject(conn
->path
, msg
);
706 privptr
->stats
.rx_dropped
++;
707 IUCV_DBF_TEXT_(data
, 2, "msglen %d > max_buffsize %d\n",
708 msg
->length
, conn
->max_buffsize
);
711 conn
->rx_buff
->data
= conn
->rx_buff
->head
;
712 skb_reset_tail_pointer(conn
->rx_buff
);
713 conn
->rx_buff
->len
= 0;
714 rc
= iucv_message_receive(conn
->path
, msg
, 0, conn
->rx_buff
->data
,
716 if (rc
|| msg
->length
< 5) {
717 privptr
->stats
.rx_errors
++;
718 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_receive\n", rc
);
721 netiucv_unpack_skb(conn
, conn
->rx_buff
);
724 static void conn_action_txdone(fsm_instance
*fi
, int event
, void *arg
)
726 struct iucv_event
*ev
= arg
;
727 struct iucv_connection
*conn
= ev
->conn
;
728 struct iucv_message
*msg
= ev
->data
;
729 struct iucv_message txmsg
;
730 struct netiucv_priv
*privptr
= NULL
;
731 u32 single_flag
= msg
->tag
;
736 unsigned long saveflags
;
737 struct ll_header header
;
740 IUCV_DBF_TEXT(trace
, 4, __func__
);
742 if (!conn
|| !conn
->netdev
) {
743 IUCV_DBF_TEXT(data
, 2,
744 "Send confirmation for unlinked connection\n");
747 privptr
= netdev_priv(conn
->netdev
);
748 conn
->prof
.tx_pending
--;
750 if ((skb
= skb_dequeue(&conn
->commit_queue
))) {
751 atomic_dec(&skb
->users
);
753 privptr
->stats
.tx_packets
++;
754 privptr
->stats
.tx_bytes
+=
755 (skb
->len
- NETIUCV_HDRLEN
758 dev_kfree_skb_any(skb
);
761 conn
->tx_buff
->data
= conn
->tx_buff
->head
;
762 skb_reset_tail_pointer(conn
->tx_buff
);
763 conn
->tx_buff
->len
= 0;
764 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
765 while ((skb
= skb_dequeue(&conn
->collect_queue
))) {
766 header
.next
= conn
->tx_buff
->len
+ skb
->len
+ NETIUCV_HDRLEN
;
767 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
,
769 skb_copy_from_linear_data(skb
,
770 skb_put(conn
->tx_buff
, skb
->len
),
775 atomic_dec(&skb
->users
);
776 dev_kfree_skb_any(skb
);
778 if (conn
->collect_len
> conn
->prof
.maxmulti
)
779 conn
->prof
.maxmulti
= conn
->collect_len
;
780 conn
->collect_len
= 0;
781 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
782 if (conn
->tx_buff
->len
== 0) {
783 fsm_newstate(fi
, CONN_STATE_IDLE
);
788 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
789 conn
->prof
.send_stamp
= current_kernel_time();
792 rc
= iucv_message_send(conn
->path
, &txmsg
, 0, 0,
793 conn
->tx_buff
->data
, conn
->tx_buff
->len
);
794 conn
->prof
.doios_multi
++;
795 conn
->prof
.txlen
+= conn
->tx_buff
->len
;
796 conn
->prof
.tx_pending
++;
797 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
798 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
800 conn
->prof
.tx_pending
--;
801 fsm_newstate(fi
, CONN_STATE_IDLE
);
803 privptr
->stats
.tx_errors
+= txpackets
;
804 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
807 privptr
->stats
.tx_packets
+= txpackets
;
808 privptr
->stats
.tx_bytes
+= txbytes
;
810 if (stat_maxcq
> conn
->prof
.maxcqueue
)
811 conn
->prof
.maxcqueue
= stat_maxcq
;
815 static void conn_action_connaccept(fsm_instance
*fi
, int event
, void *arg
)
817 struct iucv_event
*ev
= arg
;
818 struct iucv_connection
*conn
= ev
->conn
;
819 struct iucv_path
*path
= ev
->data
;
820 struct net_device
*netdev
= conn
->netdev
;
821 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
824 IUCV_DBF_TEXT(trace
, 3, __func__
);
827 path
->msglim
= NETIUCV_QUEUELEN_DEFAULT
;
829 rc
= iucv_path_accept(path
, &netiucv_handler
, conn
->userdata
, conn
);
831 IUCV_DBF_TEXT_(setup
, 2, "rc %d from iucv_accept", rc
);
834 fsm_newstate(fi
, CONN_STATE_IDLE
);
835 netdev
->tx_queue_len
= conn
->path
->msglim
;
836 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
839 static void conn_action_connreject(fsm_instance
*fi
, int event
, void *arg
)
841 struct iucv_event
*ev
= arg
;
842 struct iucv_path
*path
= ev
->data
;
844 IUCV_DBF_TEXT(trace
, 3, __func__
);
845 iucv_path_sever(path
, NULL
);
848 static void conn_action_connack(fsm_instance
*fi
, int event
, void *arg
)
850 struct iucv_connection
*conn
= arg
;
851 struct net_device
*netdev
= conn
->netdev
;
852 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
854 IUCV_DBF_TEXT(trace
, 3, __func__
);
855 fsm_deltimer(&conn
->timer
);
856 fsm_newstate(fi
, CONN_STATE_IDLE
);
857 netdev
->tx_queue_len
= conn
->path
->msglim
;
858 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
861 static void conn_action_conntimsev(fsm_instance
*fi
, int event
, void *arg
)
863 struct iucv_connection
*conn
= arg
;
865 IUCV_DBF_TEXT(trace
, 3, __func__
);
866 fsm_deltimer(&conn
->timer
);
867 iucv_path_sever(conn
->path
, conn
->userdata
);
868 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
871 static void conn_action_connsever(fsm_instance
*fi
, int event
, void *arg
)
873 struct iucv_connection
*conn
= arg
;
874 struct net_device
*netdev
= conn
->netdev
;
875 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
877 IUCV_DBF_TEXT(trace
, 3, __func__
);
879 fsm_deltimer(&conn
->timer
);
880 iucv_path_sever(conn
->path
, conn
->userdata
);
881 dev_info(privptr
->dev
, "The peer z/VM guest %s has closed the "
882 "connection\n", netiucv_printuser(conn
));
883 IUCV_DBF_TEXT(data
, 2,
884 "conn_action_connsever: Remote dropped connection\n");
885 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
886 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
889 static void conn_action_start(fsm_instance
*fi
, int event
, void *arg
)
891 struct iucv_connection
*conn
= arg
;
892 struct net_device
*netdev
= conn
->netdev
;
893 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
896 IUCV_DBF_TEXT(trace
, 3, __func__
);
898 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
901 * We must set the state before calling iucv_connect because the
902 * callback handler could be called at any point after the connection
906 fsm_newstate(fi
, CONN_STATE_SETUPWAIT
);
907 conn
->path
= iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT
, 0, GFP_KERNEL
);
908 IUCV_DBF_TEXT_(setup
, 2, "%s: connecting to %s ...\n",
909 netdev
->name
, netiucv_printuser(conn
));
911 rc
= iucv_path_connect(conn
->path
, &netiucv_handler
, conn
->userid
,
912 NULL
, conn
->userdata
, conn
);
915 netdev
->tx_queue_len
= conn
->path
->msglim
;
916 fsm_addtimer(&conn
->timer
, NETIUCV_TIMEOUT_5SEC
,
917 CONN_EVENT_TIMER
, conn
);
920 dev_warn(privptr
->dev
,
921 "The IUCV device failed to connect to z/VM guest %s\n",
922 netiucv_printname(conn
->userid
, 8));
923 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
926 dev_warn(privptr
->dev
,
927 "The IUCV device failed to connect to the peer on z/VM"
928 " guest %s\n", netiucv_printname(conn
->userid
, 8));
929 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
932 dev_err(privptr
->dev
,
933 "Connecting the IUCV device would exceed the maximum"
934 " number of IUCV connections\n");
935 fsm_newstate(fi
, CONN_STATE_CONNERR
);
938 dev_err(privptr
->dev
,
939 "z/VM guest %s has too many IUCV connections"
940 " to connect with the IUCV device\n",
941 netiucv_printname(conn
->userid
, 8));
942 fsm_newstate(fi
, CONN_STATE_CONNERR
);
945 dev_err(privptr
->dev
,
946 "The IUCV device cannot connect to a z/VM guest with no"
947 " IUCV authorization\n");
948 fsm_newstate(fi
, CONN_STATE_CONNERR
);
951 dev_err(privptr
->dev
,
952 "Connecting the IUCV device failed with error %d\n",
954 fsm_newstate(fi
, CONN_STATE_CONNERR
);
957 IUCV_DBF_TEXT_(setup
, 5, "iucv_connect rc is %d\n", rc
);
962 static void netiucv_purge_skb_queue(struct sk_buff_head
*q
)
966 while ((skb
= skb_dequeue(q
))) {
967 atomic_dec(&skb
->users
);
968 dev_kfree_skb_any(skb
);
972 static void conn_action_stop(fsm_instance
*fi
, int event
, void *arg
)
974 struct iucv_event
*ev
= arg
;
975 struct iucv_connection
*conn
= ev
->conn
;
976 struct net_device
*netdev
= conn
->netdev
;
977 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
979 IUCV_DBF_TEXT(trace
, 3, __func__
);
981 fsm_deltimer(&conn
->timer
);
982 fsm_newstate(fi
, CONN_STATE_STOPPED
);
983 netiucv_purge_skb_queue(&conn
->collect_queue
);
985 IUCV_DBF_TEXT(trace
, 5, "calling iucv_path_sever\n");
986 iucv_path_sever(conn
->path
, conn
->userdata
);
990 netiucv_purge_skb_queue(&conn
->commit_queue
);
991 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
994 static void conn_action_inval(fsm_instance
*fi
, int event
, void *arg
)
996 struct iucv_connection
*conn
= arg
;
997 struct net_device
*netdev
= conn
->netdev
;
999 IUCV_DBF_TEXT_(data
, 2, "%s('%s'): conn_action_inval called\n",
1000 netdev
->name
, conn
->userid
);
1003 static const fsm_node conn_fsm
[] = {
1004 { CONN_STATE_INVALID
, CONN_EVENT_START
, conn_action_inval
},
1005 { CONN_STATE_STOPPED
, CONN_EVENT_START
, conn_action_start
},
1007 { CONN_STATE_STOPPED
, CONN_EVENT_STOP
, conn_action_stop
},
1008 { CONN_STATE_STARTWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
1009 { CONN_STATE_SETUPWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
1010 { CONN_STATE_IDLE
, CONN_EVENT_STOP
, conn_action_stop
},
1011 { CONN_STATE_TX
, CONN_EVENT_STOP
, conn_action_stop
},
1012 { CONN_STATE_REGERR
, CONN_EVENT_STOP
, conn_action_stop
},
1013 { CONN_STATE_CONNERR
, CONN_EVENT_STOP
, conn_action_stop
},
1015 { CONN_STATE_STOPPED
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1016 { CONN_STATE_STARTWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
1017 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
1018 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1019 { CONN_STATE_TX
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1021 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_ACK
, conn_action_connack
},
1022 { CONN_STATE_SETUPWAIT
, CONN_EVENT_TIMER
, conn_action_conntimsev
},
1024 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1025 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1026 { CONN_STATE_TX
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1028 { CONN_STATE_IDLE
, CONN_EVENT_RX
, conn_action_rx
},
1029 { CONN_STATE_TX
, CONN_EVENT_RX
, conn_action_rx
},
1031 { CONN_STATE_TX
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1032 { CONN_STATE_IDLE
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1035 static const int CONN_FSM_LEN
= sizeof(conn_fsm
) / sizeof(fsm_node
);
1039 * Actions for interface - statemachine.
1044 * @fi: An instance of an interface statemachine.
1045 * @event: The event, just happened.
1046 * @arg: Generic pointer, casted from struct net_device * upon call.
1048 * Startup connection by sending CONN_EVENT_START to it.
1050 static void dev_action_start(fsm_instance
*fi
, int event
, void *arg
)
1052 struct net_device
*dev
= arg
;
1053 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1055 IUCV_DBF_TEXT(trace
, 3, __func__
);
1057 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1058 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_START
, privptr
->conn
);
1062 * Shutdown connection by sending CONN_EVENT_STOP to it.
1064 * @param fi An instance of an interface statemachine.
1065 * @param event The event, just happened.
1066 * @param arg Generic pointer, casted from struct net_device * upon call.
1069 dev_action_stop(fsm_instance
*fi
, int event
, void *arg
)
1071 struct net_device
*dev
= arg
;
1072 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1073 struct iucv_event ev
;
1075 IUCV_DBF_TEXT(trace
, 3, __func__
);
1077 ev
.conn
= privptr
->conn
;
1079 fsm_newstate(fi
, DEV_STATE_STOPWAIT
);
1080 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_STOP
, &ev
);
1084 * Called from connection statemachine
1085 * when a connection is up and running.
1087 * @param fi An instance of an interface statemachine.
1088 * @param event The event, just happened.
1089 * @param arg Generic pointer, casted from struct net_device * upon call.
1092 dev_action_connup(fsm_instance
*fi
, int event
, void *arg
)
1094 struct net_device
*dev
= arg
;
1095 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1097 IUCV_DBF_TEXT(trace
, 3, __func__
);
1099 switch (fsm_getstate(fi
)) {
1100 case DEV_STATE_STARTWAIT
:
1101 fsm_newstate(fi
, DEV_STATE_RUNNING
);
1102 dev_info(privptr
->dev
,
1103 "The IUCV device has been connected"
1104 " successfully to %s\n",
1105 netiucv_printuser(privptr
->conn
));
1106 IUCV_DBF_TEXT(setup
, 3,
1107 "connection is up and running\n");
1109 case DEV_STATE_STOPWAIT
:
1110 IUCV_DBF_TEXT(data
, 2,
1111 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1117 * Called from connection statemachine
1118 * when a connection has been shutdown.
1120 * @param fi An instance of an interface statemachine.
1121 * @param event The event, just happened.
1122 * @param arg Generic pointer, casted from struct net_device * upon call.
1125 dev_action_conndown(fsm_instance
*fi
, int event
, void *arg
)
1127 IUCV_DBF_TEXT(trace
, 3, __func__
);
1129 switch (fsm_getstate(fi
)) {
1130 case DEV_STATE_RUNNING
:
1131 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1133 case DEV_STATE_STOPWAIT
:
1134 fsm_newstate(fi
, DEV_STATE_STOPPED
);
1135 IUCV_DBF_TEXT(setup
, 3, "connection is down\n");
1140 static const fsm_node dev_fsm
[] = {
1141 { DEV_STATE_STOPPED
, DEV_EVENT_START
, dev_action_start
},
1143 { DEV_STATE_STOPWAIT
, DEV_EVENT_START
, dev_action_start
},
1144 { DEV_STATE_STOPWAIT
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1146 { DEV_STATE_STARTWAIT
, DEV_EVENT_STOP
, dev_action_stop
},
1147 { DEV_STATE_STARTWAIT
, DEV_EVENT_CONUP
, dev_action_connup
},
1149 { DEV_STATE_RUNNING
, DEV_EVENT_STOP
, dev_action_stop
},
1150 { DEV_STATE_RUNNING
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1151 { DEV_STATE_RUNNING
, DEV_EVENT_CONUP
, netiucv_action_nop
},
1154 static const int DEV_FSM_LEN
= sizeof(dev_fsm
) / sizeof(fsm_node
);
1157 * Transmit a packet.
1158 * This is a helper function for netiucv_tx().
1160 * @param conn Connection to be used for sending.
1161 * @param skb Pointer to struct sk_buff of packet to send.
1162 * The linklevel header has already been set up
1165 * @return 0 on success, -ERRNO on failure. (Never fails.)
1167 static int netiucv_transmit_skb(struct iucv_connection
*conn
,
1168 struct sk_buff
*skb
)
1170 struct iucv_message msg
;
1171 unsigned long saveflags
;
1172 struct ll_header header
;
1175 if (fsm_getstate(conn
->fsm
) != CONN_STATE_IDLE
) {
1176 int l
= skb
->len
+ NETIUCV_HDRLEN
;
1178 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
1179 if (conn
->collect_len
+ l
>
1180 (conn
->max_buffsize
- NETIUCV_HDRLEN
)) {
1182 IUCV_DBF_TEXT(data
, 2,
1183 "EBUSY from netiucv_transmit_skb\n");
1185 atomic_inc(&skb
->users
);
1186 skb_queue_tail(&conn
->collect_queue
, skb
);
1187 conn
->collect_len
+= l
;
1190 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
1192 struct sk_buff
*nskb
= skb
;
1194 * Copy the skb to a new allocated skb in lowmem only if the
1195 * data is located above 2G in memory or tailroom is < 2.
1197 unsigned long hi
= ((unsigned long)(skb_tail_pointer(skb
) +
1198 NETIUCV_HDRLEN
)) >> 31;
1200 if (hi
|| (skb_tailroom(skb
) < 2)) {
1201 nskb
= alloc_skb(skb
->len
+ NETIUCV_HDRLEN
+
1202 NETIUCV_HDRLEN
, GFP_ATOMIC
| GFP_DMA
);
1204 IUCV_DBF_TEXT(data
, 2, "alloc_skb failed\n");
1208 skb_reserve(nskb
, NETIUCV_HDRLEN
);
1209 memcpy(skb_put(nskb
, skb
->len
),
1210 skb
->data
, skb
->len
);
1215 * skb now is below 2G and has enough room. Add headers.
1217 header
.next
= nskb
->len
+ NETIUCV_HDRLEN
;
1218 memcpy(skb_push(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1220 memcpy(skb_put(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1222 fsm_newstate(conn
->fsm
, CONN_STATE_TX
);
1223 conn
->prof
.send_stamp
= current_kernel_time();
1227 rc
= iucv_message_send(conn
->path
, &msg
, 0, 0,
1228 nskb
->data
, nskb
->len
);
1229 conn
->prof
.doios_single
++;
1230 conn
->prof
.txlen
+= skb
->len
;
1231 conn
->prof
.tx_pending
++;
1232 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
1233 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
1235 struct netiucv_priv
*privptr
;
1236 fsm_newstate(conn
->fsm
, CONN_STATE_IDLE
);
1237 conn
->prof
.tx_pending
--;
1238 privptr
= netdev_priv(conn
->netdev
);
1240 privptr
->stats
.tx_errors
++;
1242 dev_kfree_skb(nskb
);
1245 * Remove our headers. They get added
1246 * again on retransmit.
1248 skb_pull(skb
, NETIUCV_HDRLEN
);
1249 skb_trim(skb
, skb
->len
- NETIUCV_HDRLEN
);
1251 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
1255 atomic_inc(&nskb
->users
);
1256 skb_queue_tail(&conn
->commit_queue
, nskb
);
1264 * Interface API for upper network layers
1268 * Open an interface.
1269 * Called from generic network layer when ifconfig up is run.
1271 * @param dev Pointer to interface struct.
1273 * @return 0 on success, -ERRNO on failure. (Never fails.)
1275 static int netiucv_open(struct net_device
*dev
)
1277 struct netiucv_priv
*priv
= netdev_priv(dev
);
1279 fsm_event(priv
->fsm
, DEV_EVENT_START
, dev
);
1284 * Close an interface.
1285 * Called from generic network layer when ifconfig down is run.
1287 * @param dev Pointer to interface struct.
1289 * @return 0 on success, -ERRNO on failure. (Never fails.)
1291 static int netiucv_close(struct net_device
*dev
)
1293 struct netiucv_priv
*priv
= netdev_priv(dev
);
1295 fsm_event(priv
->fsm
, DEV_EVENT_STOP
, dev
);
1299 static int netiucv_pm_prepare(struct device
*dev
)
1301 IUCV_DBF_TEXT(trace
, 3, __func__
);
1305 static void netiucv_pm_complete(struct device
*dev
)
1307 IUCV_DBF_TEXT(trace
, 3, __func__
);
1312 * netiucv_pm_freeze() - Freeze PM callback
1313 * @dev: netiucv device
1315 * close open netiucv interfaces
1317 static int netiucv_pm_freeze(struct device
*dev
)
1319 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1320 struct net_device
*ndev
= NULL
;
1323 IUCV_DBF_TEXT(trace
, 3, __func__
);
1324 if (priv
&& priv
->conn
)
1325 ndev
= priv
->conn
->netdev
;
1328 netif_device_detach(ndev
);
1329 priv
->pm_state
= fsm_getstate(priv
->fsm
);
1330 rc
= netiucv_close(ndev
);
1336 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1337 * @dev: netiucv device
1339 * re-open netiucv interfaces closed during freeze
1341 static int netiucv_pm_restore_thaw(struct device
*dev
)
1343 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1344 struct net_device
*ndev
= NULL
;
1347 IUCV_DBF_TEXT(trace
, 3, __func__
);
1348 if (priv
&& priv
->conn
)
1349 ndev
= priv
->conn
->netdev
;
1352 switch (priv
->pm_state
) {
1353 case DEV_STATE_RUNNING
:
1354 case DEV_STATE_STARTWAIT
:
1355 rc
= netiucv_open(ndev
);
1360 netif_device_attach(ndev
);
1366 * Start transmission of a packet.
1367 * Called from generic network device layer.
1369 * @param skb Pointer to buffer containing the packet.
1370 * @param dev Pointer to interface struct.
1372 * @return 0 if packet consumed, !0 if packet rejected.
1373 * Note: If we return !0, then the packet is free'd by
1374 * the generic network layer.
1376 static int netiucv_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1378 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1381 IUCV_DBF_TEXT(trace
, 4, __func__
);
1383 * Some sanity checks ...
1386 IUCV_DBF_TEXT(data
, 2, "netiucv_tx: skb is NULL\n");
1387 privptr
->stats
.tx_dropped
++;
1388 return NETDEV_TX_OK
;
1390 if (skb_headroom(skb
) < NETIUCV_HDRLEN
) {
1391 IUCV_DBF_TEXT(data
, 2,
1392 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1394 privptr
->stats
.tx_dropped
++;
1395 return NETDEV_TX_OK
;
1399 * If connection is not running, try to restart it
1400 * and throw away packet.
1402 if (fsm_getstate(privptr
->fsm
) != DEV_STATE_RUNNING
) {
1404 privptr
->stats
.tx_dropped
++;
1405 privptr
->stats
.tx_errors
++;
1406 privptr
->stats
.tx_carrier_errors
++;
1407 return NETDEV_TX_OK
;
1410 if (netiucv_test_and_set_busy(dev
)) {
1411 IUCV_DBF_TEXT(data
, 2, "EBUSY from netiucv_tx\n");
1412 return NETDEV_TX_BUSY
;
1414 dev
->trans_start
= jiffies
;
1415 rc
= netiucv_transmit_skb(privptr
->conn
, skb
);
1416 netiucv_clear_busy(dev
);
1417 return rc
? NETDEV_TX_BUSY
: NETDEV_TX_OK
;
1422 * @dev: Pointer to interface struct.
1424 * Returns interface statistics of a device.
1426 * Returns pointer to stats struct of this interface.
1428 static struct net_device_stats
*netiucv_stats (struct net_device
* dev
)
1430 struct netiucv_priv
*priv
= netdev_priv(dev
);
1432 IUCV_DBF_TEXT(trace
, 5, __func__
);
1433 return &priv
->stats
;
1437 * netiucv_change_mtu
1438 * @dev: Pointer to interface struct.
1439 * @new_mtu: The new MTU to use for this interface.
1441 * Sets MTU of an interface.
1443 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1444 * (valid range is 576 .. NETIUCV_MTU_MAX).
1446 static int netiucv_change_mtu(struct net_device
* dev
, int new_mtu
)
1448 IUCV_DBF_TEXT(trace
, 3, __func__
);
1449 if (new_mtu
< 576 || new_mtu
> NETIUCV_MTU_MAX
) {
1450 IUCV_DBF_TEXT(setup
, 2, "given MTU out of valid range\n");
1458 * attributes in sysfs
1461 static ssize_t
user_show(struct device
*dev
, struct device_attribute
*attr
,
1464 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1466 IUCV_DBF_TEXT(trace
, 5, __func__
);
1467 return sprintf(buf
, "%s\n", netiucv_printuser(priv
->conn
));
1470 static int netiucv_check_user(const char *buf
, size_t count
, char *username
,
1476 p
= strchr(buf
, '.');
1477 if ((p
&& ((count
> 26) ||
1479 (buf
+ count
- p
> 18))) ||
1480 (!p
&& (count
> 9))) {
1481 IUCV_DBF_TEXT(setup
, 2, "conn_write: too long\n");
1485 for (i
= 0, p
= buf
; i
< 8 && *p
&& *p
!= '.'; i
++, p
++) {
1486 if (isalnum(*p
) || *p
== '$') {
1487 username
[i
] = toupper(*p
);
1491 /* trailing lf, grr */
1493 IUCV_DBF_TEXT_(setup
, 2,
1494 "conn_write: invalid character %02x\n", *p
);
1498 username
[i
++] = ' ';
1503 for (i
= 0; i
< 16 && *p
; i
++, p
++) {
1506 userdata
[i
] = toupper(*p
);
1508 while (i
> 0 && i
< 16)
1509 userdata
[i
++] = ' ';
1511 memcpy(userdata
, iucvMagic_ascii
, 16);
1512 userdata
[16] = '\0';
1513 ASCEBC(userdata
, 16);
1518 static ssize_t
user_write(struct device
*dev
, struct device_attribute
*attr
,
1519 const char *buf
, size_t count
)
1521 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1522 struct net_device
*ndev
= priv
->conn
->netdev
;
1526 struct iucv_connection
*cp
;
1528 IUCV_DBF_TEXT(trace
, 3, __func__
);
1529 rc
= netiucv_check_user(buf
, count
, username
, userdata
);
1533 if (memcmp(username
, priv
->conn
->userid
, 9) &&
1534 (ndev
->flags
& (IFF_UP
| IFF_RUNNING
))) {
1535 /* username changed while the interface is active. */
1536 IUCV_DBF_TEXT(setup
, 2, "user_write: device active\n");
1539 read_lock_bh(&iucv_connection_rwlock
);
1540 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
1541 if (!strncmp(username
, cp
->userid
, 9) &&
1542 !strncmp(userdata
, cp
->userdata
, 17) && cp
->netdev
!= ndev
) {
1543 read_unlock_bh(&iucv_connection_rwlock
);
1544 IUCV_DBF_TEXT_(setup
, 2, "user_write: Connection to %s "
1545 "already exists\n", netiucv_printuser(cp
));
1549 read_unlock_bh(&iucv_connection_rwlock
);
1550 memcpy(priv
->conn
->userid
, username
, 9);
1551 memcpy(priv
->conn
->userdata
, userdata
, 17);
1555 static DEVICE_ATTR(user
, 0644, user_show
, user_write
);
1557 static ssize_t
buffer_show (struct device
*dev
, struct device_attribute
*attr
,
1560 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1562 IUCV_DBF_TEXT(trace
, 5, __func__
);
1563 return sprintf(buf
, "%d\n", priv
->conn
->max_buffsize
);
1566 static ssize_t
buffer_write (struct device
*dev
, struct device_attribute
*attr
,
1567 const char *buf
, size_t count
)
1569 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1570 struct net_device
*ndev
= priv
->conn
->netdev
;
1574 IUCV_DBF_TEXT(trace
, 3, __func__
);
1578 bs1
= simple_strtoul(buf
, &e
, 0);
1580 if (e
&& (!isspace(*e
))) {
1581 IUCV_DBF_TEXT_(setup
, 2, "buffer_write: invalid char %02x\n",
1585 if (bs1
> NETIUCV_BUFSIZE_MAX
) {
1586 IUCV_DBF_TEXT_(setup
, 2,
1587 "buffer_write: buffer size %d too large\n",
1591 if ((ndev
->flags
& IFF_RUNNING
) &&
1592 (bs1
< (ndev
->mtu
+ NETIUCV_HDRLEN
+ 2))) {
1593 IUCV_DBF_TEXT_(setup
, 2,
1594 "buffer_write: buffer size %d too small\n",
1598 if (bs1
< (576 + NETIUCV_HDRLEN
+ NETIUCV_HDRLEN
)) {
1599 IUCV_DBF_TEXT_(setup
, 2,
1600 "buffer_write: buffer size %d too small\n",
1605 priv
->conn
->max_buffsize
= bs1
;
1606 if (!(ndev
->flags
& IFF_RUNNING
))
1607 ndev
->mtu
= bs1
- NETIUCV_HDRLEN
- NETIUCV_HDRLEN
;
1613 static DEVICE_ATTR(buffer
, 0644, buffer_show
, buffer_write
);
1615 static ssize_t
dev_fsm_show (struct device
*dev
, struct device_attribute
*attr
,
1618 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1620 IUCV_DBF_TEXT(trace
, 5, __func__
);
1621 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->fsm
));
1624 static DEVICE_ATTR(device_fsm_state
, 0444, dev_fsm_show
, NULL
);
1626 static ssize_t
conn_fsm_show (struct device
*dev
,
1627 struct device_attribute
*attr
, char *buf
)
1629 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1631 IUCV_DBF_TEXT(trace
, 5, __func__
);
1632 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->conn
->fsm
));
1635 static DEVICE_ATTR(connection_fsm_state
, 0444, conn_fsm_show
, NULL
);
1637 static ssize_t
maxmulti_show (struct device
*dev
,
1638 struct device_attribute
*attr
, char *buf
)
1640 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1642 IUCV_DBF_TEXT(trace
, 5, __func__
);
1643 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxmulti
);
1646 static ssize_t
maxmulti_write (struct device
*dev
,
1647 struct device_attribute
*attr
,
1648 const char *buf
, size_t count
)
1650 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1652 IUCV_DBF_TEXT(trace
, 4, __func__
);
1653 priv
->conn
->prof
.maxmulti
= 0;
1657 static DEVICE_ATTR(max_tx_buffer_used
, 0644, maxmulti_show
, maxmulti_write
);
1659 static ssize_t
maxcq_show (struct device
*dev
, struct device_attribute
*attr
,
1662 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1664 IUCV_DBF_TEXT(trace
, 5, __func__
);
1665 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxcqueue
);
1668 static ssize_t
maxcq_write (struct device
*dev
, struct device_attribute
*attr
,
1669 const char *buf
, size_t count
)
1671 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1673 IUCV_DBF_TEXT(trace
, 4, __func__
);
1674 priv
->conn
->prof
.maxcqueue
= 0;
1678 static DEVICE_ATTR(max_chained_skbs
, 0644, maxcq_show
, maxcq_write
);
1680 static ssize_t
sdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1683 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1685 IUCV_DBF_TEXT(trace
, 5, __func__
);
1686 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_single
);
1689 static ssize_t
sdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1690 const char *buf
, size_t count
)
1692 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1694 IUCV_DBF_TEXT(trace
, 4, __func__
);
1695 priv
->conn
->prof
.doios_single
= 0;
1699 static DEVICE_ATTR(tx_single_write_ops
, 0644, sdoio_show
, sdoio_write
);
1701 static ssize_t
mdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1704 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1706 IUCV_DBF_TEXT(trace
, 5, __func__
);
1707 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_multi
);
1710 static ssize_t
mdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1711 const char *buf
, size_t count
)
1713 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1715 IUCV_DBF_TEXT(trace
, 5, __func__
);
1716 priv
->conn
->prof
.doios_multi
= 0;
1720 static DEVICE_ATTR(tx_multi_write_ops
, 0644, mdoio_show
, mdoio_write
);
1722 static ssize_t
txlen_show (struct device
*dev
, struct device_attribute
*attr
,
1725 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1727 IUCV_DBF_TEXT(trace
, 5, __func__
);
1728 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.txlen
);
1731 static ssize_t
txlen_write (struct device
*dev
, struct device_attribute
*attr
,
1732 const char *buf
, size_t count
)
1734 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1736 IUCV_DBF_TEXT(trace
, 4, __func__
);
1737 priv
->conn
->prof
.txlen
= 0;
1741 static DEVICE_ATTR(netto_bytes
, 0644, txlen_show
, txlen_write
);
1743 static ssize_t
txtime_show (struct device
*dev
, struct device_attribute
*attr
,
1746 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1748 IUCV_DBF_TEXT(trace
, 5, __func__
);
1749 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_time
);
1752 static ssize_t
txtime_write (struct device
*dev
, struct device_attribute
*attr
,
1753 const char *buf
, size_t count
)
1755 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1757 IUCV_DBF_TEXT(trace
, 4, __func__
);
1758 priv
->conn
->prof
.tx_time
= 0;
1762 static DEVICE_ATTR(max_tx_io_time
, 0644, txtime_show
, txtime_write
);
1764 static ssize_t
txpend_show (struct device
*dev
, struct device_attribute
*attr
,
1767 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1769 IUCV_DBF_TEXT(trace
, 5, __func__
);
1770 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_pending
);
1773 static ssize_t
txpend_write (struct device
*dev
, struct device_attribute
*attr
,
1774 const char *buf
, size_t count
)
1776 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1778 IUCV_DBF_TEXT(trace
, 4, __func__
);
1779 priv
->conn
->prof
.tx_pending
= 0;
1783 static DEVICE_ATTR(tx_pending
, 0644, txpend_show
, txpend_write
);
1785 static ssize_t
txmpnd_show (struct device
*dev
, struct device_attribute
*attr
,
1788 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1790 IUCV_DBF_TEXT(trace
, 5, __func__
);
1791 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_max_pending
);
1794 static ssize_t
txmpnd_write (struct device
*dev
, struct device_attribute
*attr
,
1795 const char *buf
, size_t count
)
1797 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1799 IUCV_DBF_TEXT(trace
, 4, __func__
);
1800 priv
->conn
->prof
.tx_max_pending
= 0;
1804 static DEVICE_ATTR(tx_max_pending
, 0644, txmpnd_show
, txmpnd_write
);
1806 static struct attribute
*netiucv_attrs
[] = {
1807 &dev_attr_buffer
.attr
,
1808 &dev_attr_user
.attr
,
1812 static struct attribute_group netiucv_attr_group
= {
1813 .attrs
= netiucv_attrs
,
1816 static struct attribute
*netiucv_stat_attrs
[] = {
1817 &dev_attr_device_fsm_state
.attr
,
1818 &dev_attr_connection_fsm_state
.attr
,
1819 &dev_attr_max_tx_buffer_used
.attr
,
1820 &dev_attr_max_chained_skbs
.attr
,
1821 &dev_attr_tx_single_write_ops
.attr
,
1822 &dev_attr_tx_multi_write_ops
.attr
,
1823 &dev_attr_netto_bytes
.attr
,
1824 &dev_attr_max_tx_io_time
.attr
,
1825 &dev_attr_tx_pending
.attr
,
1826 &dev_attr_tx_max_pending
.attr
,
1830 static struct attribute_group netiucv_stat_attr_group
= {
1832 .attrs
= netiucv_stat_attrs
,
1835 static const struct attribute_group
*netiucv_attr_groups
[] = {
1836 &netiucv_stat_attr_group
,
1837 &netiucv_attr_group
,
1841 static int netiucv_register_device(struct net_device
*ndev
)
1843 struct netiucv_priv
*priv
= netdev_priv(ndev
);
1844 struct device
*dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
1847 IUCV_DBF_TEXT(trace
, 3, __func__
);
1850 dev_set_name(dev
, "net%s", ndev
->name
);
1851 dev
->bus
= &iucv_bus
;
1852 dev
->parent
= iucv_root
;
1853 dev
->groups
= netiucv_attr_groups
;
1855 * The release function could be called after the
1856 * module has been unloaded. It's _only_ task is to
1857 * free the struct. Therefore, we specify kfree()
1858 * directly here. (Probably a little bit obfuscating
1859 * but legitime ...).
1861 dev
->release
= (void (*)(struct device
*))kfree
;
1862 dev
->driver
= &netiucv_driver
;
1866 ret
= device_register(dev
);
1872 dev_set_drvdata(dev
, priv
);
1876 static void netiucv_unregister_device(struct device
*dev
)
1878 IUCV_DBF_TEXT(trace
, 3, __func__
);
1879 device_unregister(dev
);
1883 * Allocate and initialize a new connection structure.
1884 * Add it to the list of netiucv connections;
1886 static struct iucv_connection
*netiucv_new_connection(struct net_device
*dev
,
1890 struct iucv_connection
*conn
;
1892 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
1895 skb_queue_head_init(&conn
->collect_queue
);
1896 skb_queue_head_init(&conn
->commit_queue
);
1897 spin_lock_init(&conn
->collect_lock
);
1898 conn
->max_buffsize
= NETIUCV_BUFSIZE_DEFAULT
;
1901 conn
->rx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1904 conn
->tx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1907 conn
->fsm
= init_fsm("netiucvconn", conn_state_names
,
1908 conn_event_names
, NR_CONN_STATES
,
1909 NR_CONN_EVENTS
, conn_fsm
, CONN_FSM_LEN
,
1914 fsm_settimer(conn
->fsm
, &conn
->timer
);
1915 fsm_newstate(conn
->fsm
, CONN_STATE_INVALID
);
1918 memcpy(conn
->userdata
, userdata
, 17);
1920 memcpy(conn
->userid
, username
, 9);
1921 fsm_newstate(conn
->fsm
, CONN_STATE_STOPPED
);
1924 write_lock_bh(&iucv_connection_rwlock
);
1925 list_add_tail(&conn
->list
, &iucv_connection_list
);
1926 write_unlock_bh(&iucv_connection_rwlock
);
1930 kfree_skb(conn
->tx_buff
);
1932 kfree_skb(conn
->rx_buff
);
1940 * Release a connection structure and remove it from the
1941 * list of netiucv connections.
1943 static void netiucv_remove_connection(struct iucv_connection
*conn
)
1946 IUCV_DBF_TEXT(trace
, 3, __func__
);
1947 write_lock_bh(&iucv_connection_rwlock
);
1948 list_del_init(&conn
->list
);
1949 write_unlock_bh(&iucv_connection_rwlock
);
1950 fsm_deltimer(&conn
->timer
);
1951 netiucv_purge_skb_queue(&conn
->collect_queue
);
1953 iucv_path_sever(conn
->path
, conn
->userdata
);
1957 netiucv_purge_skb_queue(&conn
->commit_queue
);
1958 kfree_fsm(conn
->fsm
);
1959 kfree_skb(conn
->rx_buff
);
1960 kfree_skb(conn
->tx_buff
);
1964 * Release everything of a net device.
1966 static void netiucv_free_netdevice(struct net_device
*dev
)
1968 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1970 IUCV_DBF_TEXT(trace
, 3, __func__
);
1977 netiucv_remove_connection(privptr
->conn
);
1979 kfree_fsm(privptr
->fsm
);
1980 privptr
->conn
= NULL
; privptr
->fsm
= NULL
;
1981 /* privptr gets freed by free_netdev() */
1987 * Initialize a net device. (Called from kernel in alloc_netdev())
1989 static const struct net_device_ops netiucv_netdev_ops
= {
1990 .ndo_open
= netiucv_open
,
1991 .ndo_stop
= netiucv_close
,
1992 .ndo_get_stats
= netiucv_stats
,
1993 .ndo_start_xmit
= netiucv_tx
,
1994 .ndo_change_mtu
= netiucv_change_mtu
,
1997 static void netiucv_setup_netdevice(struct net_device
*dev
)
1999 dev
->mtu
= NETIUCV_MTU_DEFAULT
;
2000 dev
->destructor
= netiucv_free_netdevice
;
2001 dev
->hard_header_len
= NETIUCV_HDRLEN
;
2003 dev
->type
= ARPHRD_SLIP
;
2004 dev
->tx_queue_len
= NETIUCV_QUEUELEN_DEFAULT
;
2005 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
2006 dev
->netdev_ops
= &netiucv_netdev_ops
;
2010 * Allocate and initialize everything of a net device.
2012 static struct net_device
*netiucv_init_netdevice(char *username
, char *userdata
)
2014 struct netiucv_priv
*privptr
;
2015 struct net_device
*dev
;
2017 dev
= alloc_netdev(sizeof(struct netiucv_priv
), "iucv%d",
2018 netiucv_setup_netdevice
);
2022 if (dev_alloc_name(dev
, dev
->name
) < 0)
2025 privptr
= netdev_priv(dev
);
2026 privptr
->fsm
= init_fsm("netiucvdev", dev_state_names
,
2027 dev_event_names
, NR_DEV_STATES
, NR_DEV_EVENTS
,
2028 dev_fsm
, DEV_FSM_LEN
, GFP_KERNEL
);
2032 privptr
->conn
= netiucv_new_connection(dev
, username
, userdata
);
2033 if (!privptr
->conn
) {
2034 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_new_connection\n");
2037 fsm_newstate(privptr
->fsm
, DEV_STATE_STOPPED
);
2041 kfree_fsm(privptr
->fsm
);
2048 static ssize_t
conn_write(struct device_driver
*drv
,
2049 const char *buf
, size_t count
)
2054 struct net_device
*dev
;
2055 struct netiucv_priv
*priv
;
2056 struct iucv_connection
*cp
;
2058 IUCV_DBF_TEXT(trace
, 3, __func__
);
2059 rc
= netiucv_check_user(buf
, count
, username
, userdata
);
2063 read_lock_bh(&iucv_connection_rwlock
);
2064 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
2065 if (!strncmp(username
, cp
->userid
, 9) &&
2066 !strncmp(userdata
, cp
->userdata
, 17)) {
2067 read_unlock_bh(&iucv_connection_rwlock
);
2068 IUCV_DBF_TEXT_(setup
, 2, "conn_write: Connection to %s "
2069 "already exists\n", netiucv_printuser(cp
));
2073 read_unlock_bh(&iucv_connection_rwlock
);
2075 dev
= netiucv_init_netdevice(username
, userdata
);
2077 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_init_netdevice\n");
2081 rc
= netiucv_register_device(dev
);
2084 IUCV_DBF_TEXT_(setup
, 2,
2085 "ret %d from netiucv_register_device\n", rc
);
2090 priv
= netdev_priv(dev
);
2091 SET_NETDEV_DEV(dev
, priv
->dev
);
2093 rc
= register_netdevice(dev
);
2098 dev_info(priv
->dev
, "The IUCV interface to %s has been established "
2100 netiucv_printuser(priv
->conn
));
2105 netiucv_unregister_device(priv
->dev
);
2107 netiucv_free_netdevice(dev
);
2111 static DRIVER_ATTR(connection
, 0200, NULL
, conn_write
);
2113 static ssize_t
remove_write (struct device_driver
*drv
,
2114 const char *buf
, size_t count
)
2116 struct iucv_connection
*cp
;
2117 struct net_device
*ndev
;
2118 struct netiucv_priv
*priv
;
2120 char name
[IFNAMSIZ
];
2124 IUCV_DBF_TEXT(trace
, 3, __func__
);
2126 if (count
>= IFNAMSIZ
)
2127 count
= IFNAMSIZ
- 1;
2129 for (i
= 0, p
= buf
; i
< count
&& *p
; i
++, p
++) {
2130 if (*p
== '\n' || *p
== ' ')
2131 /* trailing lf, grr */
2137 read_lock_bh(&iucv_connection_rwlock
);
2138 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
2140 priv
= netdev_priv(ndev
);
2142 if (strncmp(name
, ndev
->name
, count
))
2144 read_unlock_bh(&iucv_connection_rwlock
);
2145 if (ndev
->flags
& (IFF_UP
| IFF_RUNNING
)) {
2146 dev_warn(dev
, "The IUCV device is connected"
2147 " to %s and cannot be removed\n",
2148 priv
->conn
->userid
);
2149 IUCV_DBF_TEXT(data
, 2, "remove_write: still active\n");
2152 unregister_netdev(ndev
);
2153 netiucv_unregister_device(dev
);
2156 read_unlock_bh(&iucv_connection_rwlock
);
2157 IUCV_DBF_TEXT(data
, 2, "remove_write: unknown device\n");
2161 static DRIVER_ATTR(remove
, 0200, NULL
, remove_write
);
2163 static struct attribute
* netiucv_drv_attrs
[] = {
2164 &driver_attr_connection
.attr
,
2165 &driver_attr_remove
.attr
,
2169 static struct attribute_group netiucv_drv_attr_group
= {
2170 .attrs
= netiucv_drv_attrs
,
2173 static const struct attribute_group
*netiucv_drv_attr_groups
[] = {
2174 &netiucv_drv_attr_group
,
2178 static void netiucv_banner(void)
2180 pr_info("driver initialized\n");
2183 static void __exit
netiucv_exit(void)
2185 struct iucv_connection
*cp
;
2186 struct net_device
*ndev
;
2187 struct netiucv_priv
*priv
;
2190 IUCV_DBF_TEXT(trace
, 3, __func__
);
2191 while (!list_empty(&iucv_connection_list
)) {
2192 cp
= list_entry(iucv_connection_list
.next
,
2193 struct iucv_connection
, list
);
2195 priv
= netdev_priv(ndev
);
2198 unregister_netdev(ndev
);
2199 netiucv_unregister_device(dev
);
2202 device_unregister(netiucv_dev
);
2203 driver_unregister(&netiucv_driver
);
2204 iucv_unregister(&netiucv_handler
, 1);
2205 iucv_unregister_dbf_views();
2207 pr_info("driver unloaded\n");
2211 static int __init
netiucv_init(void)
2215 rc
= iucv_register_dbf_views();
2218 rc
= iucv_register(&netiucv_handler
, 1);
2221 IUCV_DBF_TEXT(trace
, 3, __func__
);
2222 netiucv_driver
.groups
= netiucv_drv_attr_groups
;
2223 rc
= driver_register(&netiucv_driver
);
2225 IUCV_DBF_TEXT_(setup
, 2, "ret %d from driver_register\n", rc
);
2228 /* establish dummy device */
2229 netiucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
2234 dev_set_name(netiucv_dev
, "netiucv");
2235 netiucv_dev
->bus
= &iucv_bus
;
2236 netiucv_dev
->parent
= iucv_root
;
2237 netiucv_dev
->release
= (void (*)(struct device
*))kfree
;
2238 netiucv_dev
->driver
= &netiucv_driver
;
2239 rc
= device_register(netiucv_dev
);
2241 put_device(netiucv_dev
);
2248 driver_unregister(&netiucv_driver
);
2250 iucv_unregister(&netiucv_handler
, 1);
2252 iucv_unregister_dbf_views();
2257 module_init(netiucv_init
);
2258 module_exit(netiucv_exit
);
2259 MODULE_LICENSE("GPL");