4 * Copyright IBM Corp. 2001, 2009
7 * Original netiucv driver:
8 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9 * Sysfs integration and all bugs therein:
10 * Cornelia Huck (cornelia.huck@de.ibm.com)
12 * Ursula Braun (ursula.braun@de.ibm.com)
15 * the source of the original IUCV driver by:
16 * Stefan Hegewald <hegewald@de.ibm.com>
17 * Hartmut Penner <hpenner@de.ibm.com>
18 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19 * Martin Schwidefsky (schwidefsky@de.ibm.com)
20 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2, or (at your option)
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
38 #define KMSG_COMPONENT "netiucv"
39 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/kernel.h>
46 #include <linux/slab.h>
47 #include <linux/errno.h>
48 #include <linux/types.h>
49 #include <linux/interrupt.h>
50 #include <linux/timer.h>
51 #include <linux/bitops.h>
53 #include <linux/signal.h>
54 #include <linux/string.h>
55 #include <linux/device.h>
58 #include <linux/if_arp.h>
59 #include <linux/tcp.h>
60 #include <linux/skbuff.h>
61 #include <linux/ctype.h>
65 #include <asm/uaccess.h>
67 #include <net/iucv/iucv.h>
71 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
72 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
75 * Debug Facility stuff
77 #define IUCV_DBF_SETUP_NAME "iucv_setup"
78 #define IUCV_DBF_SETUP_LEN 32
79 #define IUCV_DBF_SETUP_PAGES 2
80 #define IUCV_DBF_SETUP_NR_AREAS 1
81 #define IUCV_DBF_SETUP_LEVEL 3
83 #define IUCV_DBF_DATA_NAME "iucv_data"
84 #define IUCV_DBF_DATA_LEN 128
85 #define IUCV_DBF_DATA_PAGES 2
86 #define IUCV_DBF_DATA_NR_AREAS 1
87 #define IUCV_DBF_DATA_LEVEL 2
89 #define IUCV_DBF_TRACE_NAME "iucv_trace"
90 #define IUCV_DBF_TRACE_LEN 16
91 #define IUCV_DBF_TRACE_PAGES 4
92 #define IUCV_DBF_TRACE_NR_AREAS 1
93 #define IUCV_DBF_TRACE_LEVEL 3
95 #define IUCV_DBF_TEXT(name,level,text) \
97 debug_text_event(iucv_dbf_##name,level,text); \
100 #define IUCV_DBF_HEX(name,level,addr,len) \
102 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
105 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf
);
107 /* Allow to sort out low debug levels early to avoid wasted sprints */
108 static inline int iucv_dbf_passes(debug_info_t
*dbf_grp
, int level
)
110 return (level
<= dbf_grp
->level
);
113 #define IUCV_DBF_TEXT_(name, level, text...) \
115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
116 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
117 sprintf(__buf, text); \
118 debug_text_event(iucv_dbf_##name, level, __buf); \
119 put_cpu_var(iucv_dbf_txt_buf); \
123 #define IUCV_DBF_SPRINTF(name,level,text...) \
125 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
126 debug_sprintf_event(iucv_dbf_trace, level, text ); \
130 * some more debug stuff
132 #define IUCV_HEXDUMP16(importance,header,ptr) \
133 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
134 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
135 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
136 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
137 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
138 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
139 *(((char*)ptr)+12),*(((char*)ptr)+13), \
140 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
141 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
142 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
143 *(((char*)ptr)+16),*(((char*)ptr)+17), \
144 *(((char*)ptr)+18),*(((char*)ptr)+19), \
145 *(((char*)ptr)+20),*(((char*)ptr)+21), \
146 *(((char*)ptr)+22),*(((char*)ptr)+23), \
147 *(((char*)ptr)+24),*(((char*)ptr)+25), \
148 *(((char*)ptr)+26),*(((char*)ptr)+27), \
149 *(((char*)ptr)+28),*(((char*)ptr)+29), \
150 *(((char*)ptr)+30),*(((char*)ptr)+31));
152 #define PRINTK_HEADER " iucv: " /* for debugging */
154 /* dummy device to make sure netiucv_pm functions are called */
155 static struct device
*netiucv_dev
;
157 static int netiucv_pm_prepare(struct device
*);
158 static void netiucv_pm_complete(struct device
*);
159 static int netiucv_pm_freeze(struct device
*);
160 static int netiucv_pm_restore_thaw(struct device
*);
162 static const struct dev_pm_ops netiucv_pm_ops
= {
163 .prepare
= netiucv_pm_prepare
,
164 .complete
= netiucv_pm_complete
,
165 .freeze
= netiucv_pm_freeze
,
166 .thaw
= netiucv_pm_restore_thaw
,
167 .restore
= netiucv_pm_restore_thaw
,
170 static struct device_driver netiucv_driver
= {
171 .owner
= THIS_MODULE
,
174 .pm
= &netiucv_pm_ops
,
177 static int netiucv_callback_connreq(struct iucv_path
*,
178 u8 ipvmid
[8], u8 ipuser
[16]);
179 static void netiucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
180 static void netiucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
181 static void netiucv_callback_connsusp(struct iucv_path
*, u8 ipuser
[16]);
182 static void netiucv_callback_connres(struct iucv_path
*, u8 ipuser
[16]);
183 static void netiucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
184 static void netiucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
186 static struct iucv_handler netiucv_handler
= {
187 .path_pending
= netiucv_callback_connreq
,
188 .path_complete
= netiucv_callback_connack
,
189 .path_severed
= netiucv_callback_connrej
,
190 .path_quiesced
= netiucv_callback_connsusp
,
191 .path_resumed
= netiucv_callback_connres
,
192 .message_pending
= netiucv_callback_rx
,
193 .message_complete
= netiucv_callback_txdone
197 * Per connection profiling data
199 struct connection_profile
{
200 unsigned long maxmulti
;
201 unsigned long maxcqueue
;
202 unsigned long doios_single
;
203 unsigned long doios_multi
;
205 unsigned long tx_time
;
206 struct timespec send_stamp
;
207 unsigned long tx_pending
;
208 unsigned long tx_max_pending
;
212 * Representation of one iucv connection
214 struct iucv_connection
{
215 struct list_head list
;
216 struct iucv_path
*path
;
217 struct sk_buff
*rx_buff
;
218 struct sk_buff
*tx_buff
;
219 struct sk_buff_head collect_queue
;
220 struct sk_buff_head commit_queue
;
221 spinlock_t collect_lock
;
226 struct net_device
*netdev
;
227 struct connection_profile prof
;
232 * Linked list of all connection structs.
234 static LIST_HEAD(iucv_connection_list
);
235 static DEFINE_RWLOCK(iucv_connection_rwlock
);
238 * Representation of event-data for the
239 * connection state machine.
242 struct iucv_connection
*conn
;
247 * Private part of the network device structure
249 struct netiucv_priv
{
250 struct net_device_stats stats
;
253 struct iucv_connection
*conn
;
259 * Link level header for a packet.
265 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
266 #define NETIUCV_BUFSIZE_MAX 32768
267 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
268 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
269 #define NETIUCV_MTU_DEFAULT 9216
270 #define NETIUCV_QUEUELEN_DEFAULT 50
271 #define NETIUCV_TIMEOUT_5SEC 5000
274 * Compatibility macros for busy handling
275 * of network devices.
277 static inline void netiucv_clear_busy(struct net_device
*dev
)
279 struct netiucv_priv
*priv
= netdev_priv(dev
);
280 clear_bit(0, &priv
->tbusy
);
281 netif_wake_queue(dev
);
284 static inline int netiucv_test_and_set_busy(struct net_device
*dev
)
286 struct netiucv_priv
*priv
= netdev_priv(dev
);
287 netif_stop_queue(dev
);
288 return test_and_set_bit(0, &priv
->tbusy
);
291 static u8 iucvMagic
[16] = {
292 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
293 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
297 * Convert an iucv userId to its printable
298 * form (strip whitespace at end).
300 * @param An iucv userId
302 * @returns The printable string (static data!!)
304 static char *netiucv_printname(char *name
)
308 memcpy(tmp
, name
, 8);
310 while (*p
&& (!isspace(*p
)))
317 * States of the interface statemachine.
325 * MUST be always the last element!!
330 static const char *dev_state_names
[] = {
338 * Events of the interface statemachine.
346 * MUST be always the last element!!
351 static const char *dev_event_names
[] = {
359 * Events of the connection statemachine
363 * Events, representing callbacks from
364 * lowlevel iucv layer)
375 * Events, representing errors return codes from
376 * calls to lowlevel iucv layer
380 * Event, representing timer expiry.
385 * Events, representing commands from upper levels.
391 * MUST be always the last element!!
396 static const char *conn_event_names
[] = {
397 "Remote connection request",
398 "Remote connection acknowledge",
399 "Remote connection reject",
400 "Connection suspended",
401 "Connection resumed",
412 * States of the connection statemachine.
416 * Connection not assigned to any device,
417 * initial state, invalid
422 * Userid assigned but not operating
427 * Connection registered,
428 * no connection request sent yet,
429 * no connection request received
431 CONN_STATE_STARTWAIT
,
434 * Connection registered and connection request sent,
435 * no acknowledge and no connection request received yet.
437 CONN_STATE_SETUPWAIT
,
440 * Connection up and running idle
445 * Data sent, awaiting CONN_EVENT_TXDONE
450 * Error during registration.
455 * Error during registration.
460 * MUST be always the last element!!
465 static const char *conn_state_names
[] = {
473 "Registration error",
479 * Debug Facility Stuff
481 static debug_info_t
*iucv_dbf_setup
= NULL
;
482 static debug_info_t
*iucv_dbf_data
= NULL
;
483 static debug_info_t
*iucv_dbf_trace
= NULL
;
485 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf
);
487 static void iucv_unregister_dbf_views(void)
490 debug_unregister(iucv_dbf_setup
);
492 debug_unregister(iucv_dbf_data
);
494 debug_unregister(iucv_dbf_trace
);
496 static int iucv_register_dbf_views(void)
498 iucv_dbf_setup
= debug_register(IUCV_DBF_SETUP_NAME
,
499 IUCV_DBF_SETUP_PAGES
,
500 IUCV_DBF_SETUP_NR_AREAS
,
502 iucv_dbf_data
= debug_register(IUCV_DBF_DATA_NAME
,
504 IUCV_DBF_DATA_NR_AREAS
,
506 iucv_dbf_trace
= debug_register(IUCV_DBF_TRACE_NAME
,
507 IUCV_DBF_TRACE_PAGES
,
508 IUCV_DBF_TRACE_NR_AREAS
,
511 if ((iucv_dbf_setup
== NULL
) || (iucv_dbf_data
== NULL
) ||
512 (iucv_dbf_trace
== NULL
)) {
513 iucv_unregister_dbf_views();
516 debug_register_view(iucv_dbf_setup
, &debug_hex_ascii_view
);
517 debug_set_level(iucv_dbf_setup
, IUCV_DBF_SETUP_LEVEL
);
519 debug_register_view(iucv_dbf_data
, &debug_hex_ascii_view
);
520 debug_set_level(iucv_dbf_data
, IUCV_DBF_DATA_LEVEL
);
522 debug_register_view(iucv_dbf_trace
, &debug_hex_ascii_view
);
523 debug_set_level(iucv_dbf_trace
, IUCV_DBF_TRACE_LEVEL
);
529 * Callback-wrappers, called from lowlevel iucv layer.
532 static void netiucv_callback_rx(struct iucv_path
*path
,
533 struct iucv_message
*msg
)
535 struct iucv_connection
*conn
= path
->private;
536 struct iucv_event ev
;
540 fsm_event(conn
->fsm
, CONN_EVENT_RX
, &ev
);
543 static void netiucv_callback_txdone(struct iucv_path
*path
,
544 struct iucv_message
*msg
)
546 struct iucv_connection
*conn
= path
->private;
547 struct iucv_event ev
;
551 fsm_event(conn
->fsm
, CONN_EVENT_TXDONE
, &ev
);
554 static void netiucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
556 struct iucv_connection
*conn
= path
->private;
558 fsm_event(conn
->fsm
, CONN_EVENT_CONN_ACK
, conn
);
561 static int netiucv_callback_connreq(struct iucv_path
*path
,
562 u8 ipvmid
[8], u8 ipuser
[16])
564 struct iucv_connection
*conn
= path
->private;
565 struct iucv_event ev
;
568 if (memcmp(iucvMagic
, ipuser
, sizeof(ipuser
)))
569 /* ipuser must match iucvMagic. */
572 read_lock_bh(&iucv_connection_rwlock
);
573 list_for_each_entry(conn
, &iucv_connection_list
, list
) {
574 if (strncmp(ipvmid
, conn
->userid
, 8))
576 /* Found a matching connection for this path. */
580 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REQ
, &ev
);
583 read_unlock_bh(&iucv_connection_rwlock
);
587 static void netiucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
589 struct iucv_connection
*conn
= path
->private;
591 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REJ
, conn
);
594 static void netiucv_callback_connsusp(struct iucv_path
*path
, u8 ipuser
[16])
596 struct iucv_connection
*conn
= path
->private;
598 fsm_event(conn
->fsm
, CONN_EVENT_CONN_SUS
, conn
);
601 static void netiucv_callback_connres(struct iucv_path
*path
, u8 ipuser
[16])
603 struct iucv_connection
*conn
= path
->private;
605 fsm_event(conn
->fsm
, CONN_EVENT_CONN_RES
, conn
);
609 * NOP action for statemachines
611 static void netiucv_action_nop(fsm_instance
*fi
, int event
, void *arg
)
616 * Actions of the connection statemachine
621 * @conn: The connection where this skb has been received.
622 * @pskb: The received skb.
624 * Unpack a just received skb and hand it over to upper layers.
625 * Helper function for conn_action_rx.
627 static void netiucv_unpack_skb(struct iucv_connection
*conn
,
628 struct sk_buff
*pskb
)
630 struct net_device
*dev
= conn
->netdev
;
631 struct netiucv_priv
*privptr
= netdev_priv(dev
);
634 skb_put(pskb
, NETIUCV_HDRLEN
);
636 pskb
->ip_summed
= CHECKSUM_NONE
;
637 pskb
->protocol
= ntohs(ETH_P_IP
);
641 struct ll_header
*header
= (struct ll_header
*) pskb
->data
;
646 skb_pull(pskb
, NETIUCV_HDRLEN
);
647 header
->next
-= offset
;
648 offset
+= header
->next
;
649 header
->next
-= NETIUCV_HDRLEN
;
650 if (skb_tailroom(pskb
) < header
->next
) {
651 IUCV_DBF_TEXT_(data
, 2, "Illegal next field: %d > %d\n",
652 header
->next
, skb_tailroom(pskb
));
655 skb_put(pskb
, header
->next
);
656 skb_reset_mac_header(pskb
);
657 skb
= dev_alloc_skb(pskb
->len
);
659 IUCV_DBF_TEXT(data
, 2,
660 "Out of memory in netiucv_unpack_skb\n");
661 privptr
->stats
.rx_dropped
++;
664 skb_copy_from_linear_data(pskb
, skb_put(skb
, pskb
->len
),
666 skb_reset_mac_header(skb
);
667 skb
->dev
= pskb
->dev
;
668 skb
->protocol
= pskb
->protocol
;
669 pskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
670 privptr
->stats
.rx_packets
++;
671 privptr
->stats
.rx_bytes
+= skb
->len
;
673 * Since receiving is always initiated from a tasklet (in iucv.c),
674 * we must use netif_rx_ni() instead of netif_rx()
677 skb_pull(pskb
, header
->next
);
678 skb_put(pskb
, NETIUCV_HDRLEN
);
682 static void conn_action_rx(fsm_instance
*fi
, int event
, void *arg
)
684 struct iucv_event
*ev
= arg
;
685 struct iucv_connection
*conn
= ev
->conn
;
686 struct iucv_message
*msg
= ev
->data
;
687 struct netiucv_priv
*privptr
= netdev_priv(conn
->netdev
);
690 IUCV_DBF_TEXT(trace
, 4, __func__
);
693 iucv_message_reject(conn
->path
, msg
);
694 IUCV_DBF_TEXT(data
, 2,
695 "Received data for unlinked connection\n");
698 if (msg
->length
> conn
->max_buffsize
) {
699 iucv_message_reject(conn
->path
, msg
);
700 privptr
->stats
.rx_dropped
++;
701 IUCV_DBF_TEXT_(data
, 2, "msglen %d > max_buffsize %d\n",
702 msg
->length
, conn
->max_buffsize
);
705 conn
->rx_buff
->data
= conn
->rx_buff
->head
;
706 skb_reset_tail_pointer(conn
->rx_buff
);
707 conn
->rx_buff
->len
= 0;
708 rc
= iucv_message_receive(conn
->path
, msg
, 0, conn
->rx_buff
->data
,
710 if (rc
|| msg
->length
< 5) {
711 privptr
->stats
.rx_errors
++;
712 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_receive\n", rc
);
715 netiucv_unpack_skb(conn
, conn
->rx_buff
);
718 static void conn_action_txdone(fsm_instance
*fi
, int event
, void *arg
)
720 struct iucv_event
*ev
= arg
;
721 struct iucv_connection
*conn
= ev
->conn
;
722 struct iucv_message
*msg
= ev
->data
;
723 struct iucv_message txmsg
;
724 struct netiucv_priv
*privptr
= NULL
;
725 u32 single_flag
= msg
->tag
;
730 unsigned long saveflags
;
731 struct ll_header header
;
734 IUCV_DBF_TEXT(trace
, 4, __func__
);
736 if (conn
&& conn
->netdev
)
737 privptr
= netdev_priv(conn
->netdev
);
738 conn
->prof
.tx_pending
--;
740 if ((skb
= skb_dequeue(&conn
->commit_queue
))) {
741 atomic_dec(&skb
->users
);
743 privptr
->stats
.tx_packets
++;
744 privptr
->stats
.tx_bytes
+=
745 (skb
->len
- NETIUCV_HDRLEN
748 dev_kfree_skb_any(skb
);
751 conn
->tx_buff
->data
= conn
->tx_buff
->head
;
752 skb_reset_tail_pointer(conn
->tx_buff
);
753 conn
->tx_buff
->len
= 0;
754 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
755 while ((skb
= skb_dequeue(&conn
->collect_queue
))) {
756 header
.next
= conn
->tx_buff
->len
+ skb
->len
+ NETIUCV_HDRLEN
;
757 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
,
759 skb_copy_from_linear_data(skb
,
760 skb_put(conn
->tx_buff
, skb
->len
),
765 atomic_dec(&skb
->users
);
766 dev_kfree_skb_any(skb
);
768 if (conn
->collect_len
> conn
->prof
.maxmulti
)
769 conn
->prof
.maxmulti
= conn
->collect_len
;
770 conn
->collect_len
= 0;
771 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
772 if (conn
->tx_buff
->len
== 0) {
773 fsm_newstate(fi
, CONN_STATE_IDLE
);
778 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
779 conn
->prof
.send_stamp
= current_kernel_time();
782 rc
= iucv_message_send(conn
->path
, &txmsg
, 0, 0,
783 conn
->tx_buff
->data
, conn
->tx_buff
->len
);
784 conn
->prof
.doios_multi
++;
785 conn
->prof
.txlen
+= conn
->tx_buff
->len
;
786 conn
->prof
.tx_pending
++;
787 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
788 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
790 conn
->prof
.tx_pending
--;
791 fsm_newstate(fi
, CONN_STATE_IDLE
);
793 privptr
->stats
.tx_errors
+= txpackets
;
794 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
797 privptr
->stats
.tx_packets
+= txpackets
;
798 privptr
->stats
.tx_bytes
+= txbytes
;
800 if (stat_maxcq
> conn
->prof
.maxcqueue
)
801 conn
->prof
.maxcqueue
= stat_maxcq
;
805 static void conn_action_connaccept(fsm_instance
*fi
, int event
, void *arg
)
807 struct iucv_event
*ev
= arg
;
808 struct iucv_connection
*conn
= ev
->conn
;
809 struct iucv_path
*path
= ev
->data
;
810 struct net_device
*netdev
= conn
->netdev
;
811 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
814 IUCV_DBF_TEXT(trace
, 3, __func__
);
817 path
->msglim
= NETIUCV_QUEUELEN_DEFAULT
;
819 rc
= iucv_path_accept(path
, &netiucv_handler
, NULL
, conn
);
821 IUCV_DBF_TEXT_(setup
, 2, "rc %d from iucv_accept", rc
);
824 fsm_newstate(fi
, CONN_STATE_IDLE
);
825 netdev
->tx_queue_len
= conn
->path
->msglim
;
826 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
829 static void conn_action_connreject(fsm_instance
*fi
, int event
, void *arg
)
831 struct iucv_event
*ev
= arg
;
832 struct iucv_path
*path
= ev
->data
;
834 IUCV_DBF_TEXT(trace
, 3, __func__
);
835 iucv_path_sever(path
, NULL
);
838 static void conn_action_connack(fsm_instance
*fi
, int event
, void *arg
)
840 struct iucv_connection
*conn
= arg
;
841 struct net_device
*netdev
= conn
->netdev
;
842 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
844 IUCV_DBF_TEXT(trace
, 3, __func__
);
845 fsm_deltimer(&conn
->timer
);
846 fsm_newstate(fi
, CONN_STATE_IDLE
);
847 netdev
->tx_queue_len
= conn
->path
->msglim
;
848 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
851 static void conn_action_conntimsev(fsm_instance
*fi
, int event
, void *arg
)
853 struct iucv_connection
*conn
= arg
;
855 IUCV_DBF_TEXT(trace
, 3, __func__
);
856 fsm_deltimer(&conn
->timer
);
857 iucv_path_sever(conn
->path
, NULL
);
858 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
861 static void conn_action_connsever(fsm_instance
*fi
, int event
, void *arg
)
863 struct iucv_connection
*conn
= arg
;
864 struct net_device
*netdev
= conn
->netdev
;
865 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
867 IUCV_DBF_TEXT(trace
, 3, __func__
);
869 fsm_deltimer(&conn
->timer
);
870 iucv_path_sever(conn
->path
, NULL
);
871 dev_info(privptr
->dev
, "The peer interface of the IUCV device"
872 " has closed the connection\n");
873 IUCV_DBF_TEXT(data
, 2,
874 "conn_action_connsever: Remote dropped connection\n");
875 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
876 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
879 static void conn_action_start(fsm_instance
*fi
, int event
, void *arg
)
881 struct iucv_connection
*conn
= arg
;
882 struct net_device
*netdev
= conn
->netdev
;
883 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
886 IUCV_DBF_TEXT(trace
, 3, __func__
);
888 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
889 IUCV_DBF_TEXT_(setup
, 2, "%s('%s'): connecting ...\n",
890 netdev
->name
, conn
->userid
);
893 * We must set the state before calling iucv_connect because the
894 * callback handler could be called at any point after the connection
898 fsm_newstate(fi
, CONN_STATE_SETUPWAIT
);
899 conn
->path
= iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT
, 0, GFP_KERNEL
);
900 rc
= iucv_path_connect(conn
->path
, &netiucv_handler
, conn
->userid
,
901 NULL
, iucvMagic
, conn
);
904 netdev
->tx_queue_len
= conn
->path
->msglim
;
905 fsm_addtimer(&conn
->timer
, NETIUCV_TIMEOUT_5SEC
,
906 CONN_EVENT_TIMER
, conn
);
909 dev_warn(privptr
->dev
,
910 "The IUCV device failed to connect to z/VM guest %s\n",
911 netiucv_printname(conn
->userid
));
912 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
915 dev_warn(privptr
->dev
,
916 "The IUCV device failed to connect to the peer on z/VM"
917 " guest %s\n", netiucv_printname(conn
->userid
));
918 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
921 dev_err(privptr
->dev
,
922 "Connecting the IUCV device would exceed the maximum"
923 " number of IUCV connections\n");
924 fsm_newstate(fi
, CONN_STATE_CONNERR
);
927 dev_err(privptr
->dev
,
928 "z/VM guest %s has too many IUCV connections"
929 " to connect with the IUCV device\n",
930 netiucv_printname(conn
->userid
));
931 fsm_newstate(fi
, CONN_STATE_CONNERR
);
934 dev_err(privptr
->dev
,
935 "The IUCV device cannot connect to a z/VM guest with no"
936 " IUCV authorization\n");
937 fsm_newstate(fi
, CONN_STATE_CONNERR
);
940 dev_err(privptr
->dev
,
941 "Connecting the IUCV device failed with error %d\n",
943 fsm_newstate(fi
, CONN_STATE_CONNERR
);
946 IUCV_DBF_TEXT_(setup
, 5, "iucv_connect rc is %d\n", rc
);
951 static void netiucv_purge_skb_queue(struct sk_buff_head
*q
)
955 while ((skb
= skb_dequeue(q
))) {
956 atomic_dec(&skb
->users
);
957 dev_kfree_skb_any(skb
);
961 static void conn_action_stop(fsm_instance
*fi
, int event
, void *arg
)
963 struct iucv_event
*ev
= arg
;
964 struct iucv_connection
*conn
= ev
->conn
;
965 struct net_device
*netdev
= conn
->netdev
;
966 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
968 IUCV_DBF_TEXT(trace
, 3, __func__
);
970 fsm_deltimer(&conn
->timer
);
971 fsm_newstate(fi
, CONN_STATE_STOPPED
);
972 netiucv_purge_skb_queue(&conn
->collect_queue
);
974 IUCV_DBF_TEXT(trace
, 5, "calling iucv_path_sever\n");
975 iucv_path_sever(conn
->path
, iucvMagic
);
979 netiucv_purge_skb_queue(&conn
->commit_queue
);
980 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
983 static void conn_action_inval(fsm_instance
*fi
, int event
, void *arg
)
985 struct iucv_connection
*conn
= arg
;
986 struct net_device
*netdev
= conn
->netdev
;
988 IUCV_DBF_TEXT_(data
, 2, "%s('%s'): conn_action_inval called\n",
989 netdev
->name
, conn
->userid
);
992 static const fsm_node conn_fsm
[] = {
993 { CONN_STATE_INVALID
, CONN_EVENT_START
, conn_action_inval
},
994 { CONN_STATE_STOPPED
, CONN_EVENT_START
, conn_action_start
},
996 { CONN_STATE_STOPPED
, CONN_EVENT_STOP
, conn_action_stop
},
997 { CONN_STATE_STARTWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
998 { CONN_STATE_SETUPWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
999 { CONN_STATE_IDLE
, CONN_EVENT_STOP
, conn_action_stop
},
1000 { CONN_STATE_TX
, CONN_EVENT_STOP
, conn_action_stop
},
1001 { CONN_STATE_REGERR
, CONN_EVENT_STOP
, conn_action_stop
},
1002 { CONN_STATE_CONNERR
, CONN_EVENT_STOP
, conn_action_stop
},
1004 { CONN_STATE_STOPPED
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1005 { CONN_STATE_STARTWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
1006 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
1007 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1008 { CONN_STATE_TX
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1010 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_ACK
, conn_action_connack
},
1011 { CONN_STATE_SETUPWAIT
, CONN_EVENT_TIMER
, conn_action_conntimsev
},
1013 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1014 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1015 { CONN_STATE_TX
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1017 { CONN_STATE_IDLE
, CONN_EVENT_RX
, conn_action_rx
},
1018 { CONN_STATE_TX
, CONN_EVENT_RX
, conn_action_rx
},
1020 { CONN_STATE_TX
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1021 { CONN_STATE_IDLE
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1024 static const int CONN_FSM_LEN
= sizeof(conn_fsm
) / sizeof(fsm_node
);
1028 * Actions for interface - statemachine.
1033 * @fi: An instance of an interface statemachine.
1034 * @event: The event, just happened.
1035 * @arg: Generic pointer, casted from struct net_device * upon call.
1037 * Startup connection by sending CONN_EVENT_START to it.
1039 static void dev_action_start(fsm_instance
*fi
, int event
, void *arg
)
1041 struct net_device
*dev
= arg
;
1042 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1044 IUCV_DBF_TEXT(trace
, 3, __func__
);
1046 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1047 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_START
, privptr
->conn
);
1051 * Shutdown connection by sending CONN_EVENT_STOP to it.
1053 * @param fi An instance of an interface statemachine.
1054 * @param event The event, just happened.
1055 * @param arg Generic pointer, casted from struct net_device * upon call.
1058 dev_action_stop(fsm_instance
*fi
, int event
, void *arg
)
1060 struct net_device
*dev
= arg
;
1061 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1062 struct iucv_event ev
;
1064 IUCV_DBF_TEXT(trace
, 3, __func__
);
1066 ev
.conn
= privptr
->conn
;
1068 fsm_newstate(fi
, DEV_STATE_STOPWAIT
);
1069 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_STOP
, &ev
);
1073 * Called from connection statemachine
1074 * when a connection is up and running.
1076 * @param fi An instance of an interface statemachine.
1077 * @param event The event, just happened.
1078 * @param arg Generic pointer, casted from struct net_device * upon call.
1081 dev_action_connup(fsm_instance
*fi
, int event
, void *arg
)
1083 struct net_device
*dev
= arg
;
1084 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1086 IUCV_DBF_TEXT(trace
, 3, __func__
);
1088 switch (fsm_getstate(fi
)) {
1089 case DEV_STATE_STARTWAIT
:
1090 fsm_newstate(fi
, DEV_STATE_RUNNING
);
1091 dev_info(privptr
->dev
,
1092 "The IUCV device has been connected"
1093 " successfully to %s\n", privptr
->conn
->userid
);
1094 IUCV_DBF_TEXT(setup
, 3,
1095 "connection is up and running\n");
1097 case DEV_STATE_STOPWAIT
:
1098 IUCV_DBF_TEXT(data
, 2,
1099 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1105 * Called from connection statemachine
1106 * when a connection has been shutdown.
1108 * @param fi An instance of an interface statemachine.
1109 * @param event The event, just happened.
1110 * @param arg Generic pointer, casted from struct net_device * upon call.
1113 dev_action_conndown(fsm_instance
*fi
, int event
, void *arg
)
1115 IUCV_DBF_TEXT(trace
, 3, __func__
);
1117 switch (fsm_getstate(fi
)) {
1118 case DEV_STATE_RUNNING
:
1119 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1121 case DEV_STATE_STOPWAIT
:
1122 fsm_newstate(fi
, DEV_STATE_STOPPED
);
1123 IUCV_DBF_TEXT(setup
, 3, "connection is down\n");
1128 static const fsm_node dev_fsm
[] = {
1129 { DEV_STATE_STOPPED
, DEV_EVENT_START
, dev_action_start
},
1131 { DEV_STATE_STOPWAIT
, DEV_EVENT_START
, dev_action_start
},
1132 { DEV_STATE_STOPWAIT
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1134 { DEV_STATE_STARTWAIT
, DEV_EVENT_STOP
, dev_action_stop
},
1135 { DEV_STATE_STARTWAIT
, DEV_EVENT_CONUP
, dev_action_connup
},
1137 { DEV_STATE_RUNNING
, DEV_EVENT_STOP
, dev_action_stop
},
1138 { DEV_STATE_RUNNING
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1139 { DEV_STATE_RUNNING
, DEV_EVENT_CONUP
, netiucv_action_nop
},
1142 static const int DEV_FSM_LEN
= sizeof(dev_fsm
) / sizeof(fsm_node
);
1145 * Transmit a packet.
1146 * This is a helper function for netiucv_tx().
1148 * @param conn Connection to be used for sending.
1149 * @param skb Pointer to struct sk_buff of packet to send.
1150 * The linklevel header has already been set up
1153 * @return 0 on success, -ERRNO on failure. (Never fails.)
1155 static int netiucv_transmit_skb(struct iucv_connection
*conn
,
1156 struct sk_buff
*skb
)
1158 struct iucv_message msg
;
1159 unsigned long saveflags
;
1160 struct ll_header header
;
1163 if (fsm_getstate(conn
->fsm
) != CONN_STATE_IDLE
) {
1164 int l
= skb
->len
+ NETIUCV_HDRLEN
;
1166 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
1167 if (conn
->collect_len
+ l
>
1168 (conn
->max_buffsize
- NETIUCV_HDRLEN
)) {
1170 IUCV_DBF_TEXT(data
, 2,
1171 "EBUSY from netiucv_transmit_skb\n");
1173 atomic_inc(&skb
->users
);
1174 skb_queue_tail(&conn
->collect_queue
, skb
);
1175 conn
->collect_len
+= l
;
1178 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
1180 struct sk_buff
*nskb
= skb
;
1182 * Copy the skb to a new allocated skb in lowmem only if the
1183 * data is located above 2G in memory or tailroom is < 2.
1185 unsigned long hi
= ((unsigned long)(skb_tail_pointer(skb
) +
1186 NETIUCV_HDRLEN
)) >> 31;
1188 if (hi
|| (skb_tailroom(skb
) < 2)) {
1189 nskb
= alloc_skb(skb
->len
+ NETIUCV_HDRLEN
+
1190 NETIUCV_HDRLEN
, GFP_ATOMIC
| GFP_DMA
);
1192 IUCV_DBF_TEXT(data
, 2, "alloc_skb failed\n");
1196 skb_reserve(nskb
, NETIUCV_HDRLEN
);
1197 memcpy(skb_put(nskb
, skb
->len
),
1198 skb
->data
, skb
->len
);
1203 * skb now is below 2G and has enough room. Add headers.
1205 header
.next
= nskb
->len
+ NETIUCV_HDRLEN
;
1206 memcpy(skb_push(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1208 memcpy(skb_put(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1210 fsm_newstate(conn
->fsm
, CONN_STATE_TX
);
1211 conn
->prof
.send_stamp
= current_kernel_time();
1215 rc
= iucv_message_send(conn
->path
, &msg
, 0, 0,
1216 nskb
->data
, nskb
->len
);
1217 conn
->prof
.doios_single
++;
1218 conn
->prof
.txlen
+= skb
->len
;
1219 conn
->prof
.tx_pending
++;
1220 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
1221 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
1223 struct netiucv_priv
*privptr
;
1224 fsm_newstate(conn
->fsm
, CONN_STATE_IDLE
);
1225 conn
->prof
.tx_pending
--;
1226 privptr
= netdev_priv(conn
->netdev
);
1228 privptr
->stats
.tx_errors
++;
1230 dev_kfree_skb(nskb
);
1233 * Remove our headers. They get added
1234 * again on retransmit.
1236 skb_pull(skb
, NETIUCV_HDRLEN
);
1237 skb_trim(skb
, skb
->len
- NETIUCV_HDRLEN
);
1239 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
1243 atomic_inc(&nskb
->users
);
1244 skb_queue_tail(&conn
->commit_queue
, nskb
);
1252 * Interface API for upper network layers
1256 * Open an interface.
1257 * Called from generic network layer when ifconfig up is run.
1259 * @param dev Pointer to interface struct.
1261 * @return 0 on success, -ERRNO on failure. (Never fails.)
1263 static int netiucv_open(struct net_device
*dev
)
1265 struct netiucv_priv
*priv
= netdev_priv(dev
);
1267 fsm_event(priv
->fsm
, DEV_EVENT_START
, dev
);
1272 * Close an interface.
1273 * Called from generic network layer when ifconfig down is run.
1275 * @param dev Pointer to interface struct.
1277 * @return 0 on success, -ERRNO on failure. (Never fails.)
1279 static int netiucv_close(struct net_device
*dev
)
1281 struct netiucv_priv
*priv
= netdev_priv(dev
);
1283 fsm_event(priv
->fsm
, DEV_EVENT_STOP
, dev
);
1287 static int netiucv_pm_prepare(struct device
*dev
)
1289 IUCV_DBF_TEXT(trace
, 3, __func__
);
1293 static void netiucv_pm_complete(struct device
*dev
)
1295 IUCV_DBF_TEXT(trace
, 3, __func__
);
1300 * netiucv_pm_freeze() - Freeze PM callback
1301 * @dev: netiucv device
1303 * close open netiucv interfaces
1305 static int netiucv_pm_freeze(struct device
*dev
)
1307 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1308 struct net_device
*ndev
= NULL
;
1311 IUCV_DBF_TEXT(trace
, 3, __func__
);
1312 if (priv
&& priv
->conn
)
1313 ndev
= priv
->conn
->netdev
;
1316 netif_device_detach(ndev
);
1317 priv
->pm_state
= fsm_getstate(priv
->fsm
);
1318 rc
= netiucv_close(ndev
);
1324 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1325 * @dev: netiucv device
1327 * re-open netiucv interfaces closed during freeze
1329 static int netiucv_pm_restore_thaw(struct device
*dev
)
1331 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1332 struct net_device
*ndev
= NULL
;
1335 IUCV_DBF_TEXT(trace
, 3, __func__
);
1336 if (priv
&& priv
->conn
)
1337 ndev
= priv
->conn
->netdev
;
1340 switch (priv
->pm_state
) {
1341 case DEV_STATE_RUNNING
:
1342 case DEV_STATE_STARTWAIT
:
1343 rc
= netiucv_open(ndev
);
1348 netif_device_attach(ndev
);
1354 * Start transmission of a packet.
1355 * Called from generic network device layer.
1357 * @param skb Pointer to buffer containing the packet.
1358 * @param dev Pointer to interface struct.
1360 * @return 0 if packet consumed, !0 if packet rejected.
1361 * Note: If we return !0, then the packet is free'd by
1362 * the generic network layer.
1364 static int netiucv_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1366 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1369 IUCV_DBF_TEXT(trace
, 4, __func__
);
1371 * Some sanity checks ...
1374 IUCV_DBF_TEXT(data
, 2, "netiucv_tx: skb is NULL\n");
1375 privptr
->stats
.tx_dropped
++;
1376 return NETDEV_TX_OK
;
1378 if (skb_headroom(skb
) < NETIUCV_HDRLEN
) {
1379 IUCV_DBF_TEXT(data
, 2,
1380 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1382 privptr
->stats
.tx_dropped
++;
1383 return NETDEV_TX_OK
;
1387 * If connection is not running, try to restart it
1388 * and throw away packet.
1390 if (fsm_getstate(privptr
->fsm
) != DEV_STATE_RUNNING
) {
1392 privptr
->stats
.tx_dropped
++;
1393 privptr
->stats
.tx_errors
++;
1394 privptr
->stats
.tx_carrier_errors
++;
1395 return NETDEV_TX_OK
;
1398 if (netiucv_test_and_set_busy(dev
)) {
1399 IUCV_DBF_TEXT(data
, 2, "EBUSY from netiucv_tx\n");
1400 return NETDEV_TX_BUSY
;
1402 dev
->trans_start
= jiffies
;
1403 rc
= netiucv_transmit_skb(privptr
->conn
, skb
);
1404 netiucv_clear_busy(dev
);
1405 return rc
? NETDEV_TX_BUSY
: NETDEV_TX_OK
;
1410 * @dev: Pointer to interface struct.
1412 * Returns interface statistics of a device.
1414 * Returns pointer to stats struct of this interface.
1416 static struct net_device_stats
*netiucv_stats (struct net_device
* dev
)
1418 struct netiucv_priv
*priv
= netdev_priv(dev
);
1420 IUCV_DBF_TEXT(trace
, 5, __func__
);
1421 return &priv
->stats
;
1425 * netiucv_change_mtu
1426 * @dev: Pointer to interface struct.
1427 * @new_mtu: The new MTU to use for this interface.
1429 * Sets MTU of an interface.
1431 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1432 * (valid range is 576 .. NETIUCV_MTU_MAX).
1434 static int netiucv_change_mtu(struct net_device
* dev
, int new_mtu
)
1436 IUCV_DBF_TEXT(trace
, 3, __func__
);
1437 if (new_mtu
< 576 || new_mtu
> NETIUCV_MTU_MAX
) {
1438 IUCV_DBF_TEXT(setup
, 2, "given MTU out of valid range\n");
1446 * attributes in sysfs
1449 static ssize_t
user_show(struct device
*dev
, struct device_attribute
*attr
,
1452 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1454 IUCV_DBF_TEXT(trace
, 5, __func__
);
1455 return sprintf(buf
, "%s\n", netiucv_printname(priv
->conn
->userid
));
1458 static ssize_t
user_write(struct device
*dev
, struct device_attribute
*attr
,
1459 const char *buf
, size_t count
)
1461 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1462 struct net_device
*ndev
= priv
->conn
->netdev
;
1467 struct iucv_connection
*cp
;
1469 IUCV_DBF_TEXT(trace
, 3, __func__
);
1471 IUCV_DBF_TEXT_(setup
, 2,
1472 "%d is length of username\n", (int) count
);
1476 tmp
= strsep((char **) &buf
, "\n");
1477 for (i
= 0, p
= tmp
; i
< 8 && *p
; i
++, p
++) {
1478 if (isalnum(*p
) || (*p
== '$')) {
1479 username
[i
]= toupper(*p
);
1483 /* trailing lf, grr */
1486 IUCV_DBF_TEXT_(setup
, 2,
1487 "username: invalid character %c\n", *p
);
1491 username
[i
++] = ' ';
1494 if (memcmp(username
, priv
->conn
->userid
, 9) &&
1495 (ndev
->flags
& (IFF_UP
| IFF_RUNNING
))) {
1496 /* username changed while the interface is active. */
1497 IUCV_DBF_TEXT(setup
, 2, "user_write: device active\n");
1500 read_lock_bh(&iucv_connection_rwlock
);
1501 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
1502 if (!strncmp(username
, cp
->userid
, 9) && cp
->netdev
!= ndev
) {
1503 read_unlock_bh(&iucv_connection_rwlock
);
1504 IUCV_DBF_TEXT_(setup
, 2, "user_write: Connection "
1505 "to %s already exists\n", username
);
1509 read_unlock_bh(&iucv_connection_rwlock
);
1510 memcpy(priv
->conn
->userid
, username
, 9);
1514 static DEVICE_ATTR(user
, 0644, user_show
, user_write
);
1516 static ssize_t
buffer_show (struct device
*dev
, struct device_attribute
*attr
,
1519 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1521 IUCV_DBF_TEXT(trace
, 5, __func__
);
1522 return sprintf(buf
, "%d\n", priv
->conn
->max_buffsize
);
1525 static ssize_t
buffer_write (struct device
*dev
, struct device_attribute
*attr
,
1526 const char *buf
, size_t count
)
1528 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1529 struct net_device
*ndev
= priv
->conn
->netdev
;
1533 IUCV_DBF_TEXT(trace
, 3, __func__
);
1537 bs1
= simple_strtoul(buf
, &e
, 0);
1539 if (e
&& (!isspace(*e
))) {
1540 IUCV_DBF_TEXT_(setup
, 2, "buffer_write: invalid char %c\n", *e
);
1543 if (bs1
> NETIUCV_BUFSIZE_MAX
) {
1544 IUCV_DBF_TEXT_(setup
, 2,
1545 "buffer_write: buffer size %d too large\n",
1549 if ((ndev
->flags
& IFF_RUNNING
) &&
1550 (bs1
< (ndev
->mtu
+ NETIUCV_HDRLEN
+ 2))) {
1551 IUCV_DBF_TEXT_(setup
, 2,
1552 "buffer_write: buffer size %d too small\n",
1556 if (bs1
< (576 + NETIUCV_HDRLEN
+ NETIUCV_HDRLEN
)) {
1557 IUCV_DBF_TEXT_(setup
, 2,
1558 "buffer_write: buffer size %d too small\n",
1563 priv
->conn
->max_buffsize
= bs1
;
1564 if (!(ndev
->flags
& IFF_RUNNING
))
1565 ndev
->mtu
= bs1
- NETIUCV_HDRLEN
- NETIUCV_HDRLEN
;
1571 static DEVICE_ATTR(buffer
, 0644, buffer_show
, buffer_write
);
1573 static ssize_t
dev_fsm_show (struct device
*dev
, struct device_attribute
*attr
,
1576 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1578 IUCV_DBF_TEXT(trace
, 5, __func__
);
1579 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->fsm
));
1582 static DEVICE_ATTR(device_fsm_state
, 0444, dev_fsm_show
, NULL
);
1584 static ssize_t
conn_fsm_show (struct device
*dev
,
1585 struct device_attribute
*attr
, char *buf
)
1587 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1589 IUCV_DBF_TEXT(trace
, 5, __func__
);
1590 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->conn
->fsm
));
1593 static DEVICE_ATTR(connection_fsm_state
, 0444, conn_fsm_show
, NULL
);
1595 static ssize_t
maxmulti_show (struct device
*dev
,
1596 struct device_attribute
*attr
, char *buf
)
1598 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1600 IUCV_DBF_TEXT(trace
, 5, __func__
);
1601 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxmulti
);
1604 static ssize_t
maxmulti_write (struct device
*dev
,
1605 struct device_attribute
*attr
,
1606 const char *buf
, size_t count
)
1608 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1610 IUCV_DBF_TEXT(trace
, 4, __func__
);
1611 priv
->conn
->prof
.maxmulti
= 0;
1615 static DEVICE_ATTR(max_tx_buffer_used
, 0644, maxmulti_show
, maxmulti_write
);
1617 static ssize_t
maxcq_show (struct device
*dev
, struct device_attribute
*attr
,
1620 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1622 IUCV_DBF_TEXT(trace
, 5, __func__
);
1623 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxcqueue
);
1626 static ssize_t
maxcq_write (struct device
*dev
, struct device_attribute
*attr
,
1627 const char *buf
, size_t count
)
1629 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1631 IUCV_DBF_TEXT(trace
, 4, __func__
);
1632 priv
->conn
->prof
.maxcqueue
= 0;
1636 static DEVICE_ATTR(max_chained_skbs
, 0644, maxcq_show
, maxcq_write
);
1638 static ssize_t
sdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1641 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1643 IUCV_DBF_TEXT(trace
, 5, __func__
);
1644 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_single
);
1647 static ssize_t
sdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1648 const char *buf
, size_t count
)
1650 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1652 IUCV_DBF_TEXT(trace
, 4, __func__
);
1653 priv
->conn
->prof
.doios_single
= 0;
1657 static DEVICE_ATTR(tx_single_write_ops
, 0644, sdoio_show
, sdoio_write
);
1659 static ssize_t
mdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1662 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1664 IUCV_DBF_TEXT(trace
, 5, __func__
);
1665 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_multi
);
1668 static ssize_t
mdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1669 const char *buf
, size_t count
)
1671 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1673 IUCV_DBF_TEXT(trace
, 5, __func__
);
1674 priv
->conn
->prof
.doios_multi
= 0;
1678 static DEVICE_ATTR(tx_multi_write_ops
, 0644, mdoio_show
, mdoio_write
);
1680 static ssize_t
txlen_show (struct device
*dev
, struct device_attribute
*attr
,
1683 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1685 IUCV_DBF_TEXT(trace
, 5, __func__
);
1686 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.txlen
);
1689 static ssize_t
txlen_write (struct device
*dev
, struct device_attribute
*attr
,
1690 const char *buf
, size_t count
)
1692 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1694 IUCV_DBF_TEXT(trace
, 4, __func__
);
1695 priv
->conn
->prof
.txlen
= 0;
1699 static DEVICE_ATTR(netto_bytes
, 0644, txlen_show
, txlen_write
);
1701 static ssize_t
txtime_show (struct device
*dev
, struct device_attribute
*attr
,
1704 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1706 IUCV_DBF_TEXT(trace
, 5, __func__
);
1707 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_time
);
1710 static ssize_t
txtime_write (struct device
*dev
, struct device_attribute
*attr
,
1711 const char *buf
, size_t count
)
1713 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1715 IUCV_DBF_TEXT(trace
, 4, __func__
);
1716 priv
->conn
->prof
.tx_time
= 0;
1720 static DEVICE_ATTR(max_tx_io_time
, 0644, txtime_show
, txtime_write
);
1722 static ssize_t
txpend_show (struct device
*dev
, struct device_attribute
*attr
,
1725 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1727 IUCV_DBF_TEXT(trace
, 5, __func__
);
1728 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_pending
);
1731 static ssize_t
txpend_write (struct device
*dev
, struct device_attribute
*attr
,
1732 const char *buf
, size_t count
)
1734 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1736 IUCV_DBF_TEXT(trace
, 4, __func__
);
1737 priv
->conn
->prof
.tx_pending
= 0;
1741 static DEVICE_ATTR(tx_pending
, 0644, txpend_show
, txpend_write
);
1743 static ssize_t
txmpnd_show (struct device
*dev
, struct device_attribute
*attr
,
1746 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1748 IUCV_DBF_TEXT(trace
, 5, __func__
);
1749 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_max_pending
);
1752 static ssize_t
txmpnd_write (struct device
*dev
, struct device_attribute
*attr
,
1753 const char *buf
, size_t count
)
1755 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1757 IUCV_DBF_TEXT(trace
, 4, __func__
);
1758 priv
->conn
->prof
.tx_max_pending
= 0;
1762 static DEVICE_ATTR(tx_max_pending
, 0644, txmpnd_show
, txmpnd_write
);
1764 static struct attribute
*netiucv_attrs
[] = {
1765 &dev_attr_buffer
.attr
,
1766 &dev_attr_user
.attr
,
1770 static struct attribute_group netiucv_attr_group
= {
1771 .attrs
= netiucv_attrs
,
1774 static struct attribute
*netiucv_stat_attrs
[] = {
1775 &dev_attr_device_fsm_state
.attr
,
1776 &dev_attr_connection_fsm_state
.attr
,
1777 &dev_attr_max_tx_buffer_used
.attr
,
1778 &dev_attr_max_chained_skbs
.attr
,
1779 &dev_attr_tx_single_write_ops
.attr
,
1780 &dev_attr_tx_multi_write_ops
.attr
,
1781 &dev_attr_netto_bytes
.attr
,
1782 &dev_attr_max_tx_io_time
.attr
,
1783 &dev_attr_tx_pending
.attr
,
1784 &dev_attr_tx_max_pending
.attr
,
1788 static struct attribute_group netiucv_stat_attr_group
= {
1790 .attrs
= netiucv_stat_attrs
,
1793 static int netiucv_add_files(struct device
*dev
)
1797 IUCV_DBF_TEXT(trace
, 3, __func__
);
1798 ret
= sysfs_create_group(&dev
->kobj
, &netiucv_attr_group
);
1801 ret
= sysfs_create_group(&dev
->kobj
, &netiucv_stat_attr_group
);
1803 sysfs_remove_group(&dev
->kobj
, &netiucv_attr_group
);
1807 static void netiucv_remove_files(struct device
*dev
)
1809 IUCV_DBF_TEXT(trace
, 3, __func__
);
1810 sysfs_remove_group(&dev
->kobj
, &netiucv_stat_attr_group
);
1811 sysfs_remove_group(&dev
->kobj
, &netiucv_attr_group
);
1814 static int netiucv_register_device(struct net_device
*ndev
)
1816 struct netiucv_priv
*priv
= netdev_priv(ndev
);
1817 struct device
*dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
1820 IUCV_DBF_TEXT(trace
, 3, __func__
);
1823 dev_set_name(dev
, "net%s", ndev
->name
);
1824 dev
->bus
= &iucv_bus
;
1825 dev
->parent
= iucv_root
;
1827 * The release function could be called after the
1828 * module has been unloaded. It's _only_ task is to
1829 * free the struct. Therefore, we specify kfree()
1830 * directly here. (Probably a little bit obfuscating
1831 * but legitime ...).
1833 dev
->release
= (void (*)(struct device
*))kfree
;
1834 dev
->driver
= &netiucv_driver
;
1838 ret
= device_register(dev
);
1843 ret
= netiucv_add_files(dev
);
1847 dev_set_drvdata(dev
, priv
);
1851 device_unregister(dev
);
1855 static void netiucv_unregister_device(struct device
*dev
)
1857 IUCV_DBF_TEXT(trace
, 3, __func__
);
1858 netiucv_remove_files(dev
);
1859 device_unregister(dev
);
1863 * Allocate and initialize a new connection structure.
1864 * Add it to the list of netiucv connections;
1866 static struct iucv_connection
*netiucv_new_connection(struct net_device
*dev
,
1869 struct iucv_connection
*conn
;
1871 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
1874 skb_queue_head_init(&conn
->collect_queue
);
1875 skb_queue_head_init(&conn
->commit_queue
);
1876 spin_lock_init(&conn
->collect_lock
);
1877 conn
->max_buffsize
= NETIUCV_BUFSIZE_DEFAULT
;
1880 conn
->rx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1883 conn
->tx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1886 conn
->fsm
= init_fsm("netiucvconn", conn_state_names
,
1887 conn_event_names
, NR_CONN_STATES
,
1888 NR_CONN_EVENTS
, conn_fsm
, CONN_FSM_LEN
,
1893 fsm_settimer(conn
->fsm
, &conn
->timer
);
1894 fsm_newstate(conn
->fsm
, CONN_STATE_INVALID
);
1897 memcpy(conn
->userid
, username
, 9);
1898 fsm_newstate(conn
->fsm
, CONN_STATE_STOPPED
);
1901 write_lock_bh(&iucv_connection_rwlock
);
1902 list_add_tail(&conn
->list
, &iucv_connection_list
);
1903 write_unlock_bh(&iucv_connection_rwlock
);
1907 kfree_skb(conn
->tx_buff
);
1909 kfree_skb(conn
->rx_buff
);
1917 * Release a connection structure and remove it from the
1918 * list of netiucv connections.
1920 static void netiucv_remove_connection(struct iucv_connection
*conn
)
1922 IUCV_DBF_TEXT(trace
, 3, __func__
);
1923 write_lock_bh(&iucv_connection_rwlock
);
1924 list_del_init(&conn
->list
);
1925 write_unlock_bh(&iucv_connection_rwlock
);
1926 fsm_deltimer(&conn
->timer
);
1927 netiucv_purge_skb_queue(&conn
->collect_queue
);
1929 iucv_path_sever(conn
->path
, iucvMagic
);
1933 netiucv_purge_skb_queue(&conn
->commit_queue
);
1934 kfree_fsm(conn
->fsm
);
1935 kfree_skb(conn
->rx_buff
);
1936 kfree_skb(conn
->tx_buff
);
1940 * Release everything of a net device.
1942 static void netiucv_free_netdevice(struct net_device
*dev
)
1944 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1946 IUCV_DBF_TEXT(trace
, 3, __func__
);
1953 netiucv_remove_connection(privptr
->conn
);
1955 kfree_fsm(privptr
->fsm
);
1956 privptr
->conn
= NULL
; privptr
->fsm
= NULL
;
1957 /* privptr gets freed by free_netdev() */
1963 * Initialize a net device. (Called from kernel in alloc_netdev())
1965 static const struct net_device_ops netiucv_netdev_ops
= {
1966 .ndo_open
= netiucv_open
,
1967 .ndo_stop
= netiucv_close
,
1968 .ndo_get_stats
= netiucv_stats
,
1969 .ndo_start_xmit
= netiucv_tx
,
1970 .ndo_change_mtu
= netiucv_change_mtu
,
1973 static void netiucv_setup_netdevice(struct net_device
*dev
)
1975 dev
->mtu
= NETIUCV_MTU_DEFAULT
;
1976 dev
->destructor
= netiucv_free_netdevice
;
1977 dev
->hard_header_len
= NETIUCV_HDRLEN
;
1979 dev
->type
= ARPHRD_SLIP
;
1980 dev
->tx_queue_len
= NETIUCV_QUEUELEN_DEFAULT
;
1981 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
1982 dev
->netdev_ops
= &netiucv_netdev_ops
;
1986 * Allocate and initialize everything of a net device.
1988 static struct net_device
*netiucv_init_netdevice(char *username
)
1990 struct netiucv_priv
*privptr
;
1991 struct net_device
*dev
;
1993 dev
= alloc_netdev(sizeof(struct netiucv_priv
), "iucv%d",
1994 netiucv_setup_netdevice
);
1997 if (dev_alloc_name(dev
, dev
->name
) < 0)
2000 privptr
= netdev_priv(dev
);
2001 privptr
->fsm
= init_fsm("netiucvdev", dev_state_names
,
2002 dev_event_names
, NR_DEV_STATES
, NR_DEV_EVENTS
,
2003 dev_fsm
, DEV_FSM_LEN
, GFP_KERNEL
);
2007 privptr
->conn
= netiucv_new_connection(dev
, username
);
2008 if (!privptr
->conn
) {
2009 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_new_connection\n");
2012 fsm_newstate(privptr
->fsm
, DEV_STATE_STOPPED
);
2016 kfree_fsm(privptr
->fsm
);
2022 static ssize_t
conn_write(struct device_driver
*drv
,
2023 const char *buf
, size_t count
)
2028 struct net_device
*dev
;
2029 struct netiucv_priv
*priv
;
2030 struct iucv_connection
*cp
;
2032 IUCV_DBF_TEXT(trace
, 3, __func__
);
2034 IUCV_DBF_TEXT(setup
, 2, "conn_write: too long\n");
2038 for (i
= 0, p
= buf
; i
< 8 && *p
; i
++, p
++) {
2039 if (isalnum(*p
) || *p
== '$') {
2040 username
[i
] = toupper(*p
);
2044 /* trailing lf, grr */
2046 IUCV_DBF_TEXT_(setup
, 2,
2047 "conn_write: invalid character %c\n", *p
);
2051 username
[i
++] = ' ';
2054 read_lock_bh(&iucv_connection_rwlock
);
2055 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
2056 if (!strncmp(username
, cp
->userid
, 9)) {
2057 read_unlock_bh(&iucv_connection_rwlock
);
2058 IUCV_DBF_TEXT_(setup
, 2, "conn_write: Connection "
2059 "to %s already exists\n", username
);
2063 read_unlock_bh(&iucv_connection_rwlock
);
2065 dev
= netiucv_init_netdevice(username
);
2067 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_init_netdevice\n");
2071 rc
= netiucv_register_device(dev
);
2073 IUCV_DBF_TEXT_(setup
, 2,
2074 "ret %d from netiucv_register_device\n", rc
);
2079 priv
= netdev_priv(dev
);
2080 SET_NETDEV_DEV(dev
, priv
->dev
);
2082 rc
= register_netdev(dev
);
2086 dev_info(priv
->dev
, "The IUCV interface to %s has been"
2087 " established successfully\n", netiucv_printname(username
));
2092 netiucv_unregister_device(priv
->dev
);
2094 netiucv_free_netdevice(dev
);
2098 static DRIVER_ATTR(connection
, 0200, NULL
, conn_write
);
2100 static ssize_t
remove_write (struct device_driver
*drv
,
2101 const char *buf
, size_t count
)
2103 struct iucv_connection
*cp
;
2104 struct net_device
*ndev
;
2105 struct netiucv_priv
*priv
;
2107 char name
[IFNAMSIZ
];
2111 IUCV_DBF_TEXT(trace
, 3, __func__
);
2113 if (count
>= IFNAMSIZ
)
2114 count
= IFNAMSIZ
- 1;
2116 for (i
= 0, p
= buf
; i
< count
&& *p
; i
++, p
++) {
2117 if (*p
== '\n' || *p
== ' ')
2118 /* trailing lf, grr */
2124 read_lock_bh(&iucv_connection_rwlock
);
2125 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
2127 priv
= netdev_priv(ndev
);
2129 if (strncmp(name
, ndev
->name
, count
))
2131 read_unlock_bh(&iucv_connection_rwlock
);
2132 if (ndev
->flags
& (IFF_UP
| IFF_RUNNING
)) {
2133 dev_warn(dev
, "The IUCV device is connected"
2134 " to %s and cannot be removed\n",
2135 priv
->conn
->userid
);
2136 IUCV_DBF_TEXT(data
, 2, "remove_write: still active\n");
2139 unregister_netdev(ndev
);
2140 netiucv_unregister_device(dev
);
2143 read_unlock_bh(&iucv_connection_rwlock
);
2144 IUCV_DBF_TEXT(data
, 2, "remove_write: unknown device\n");
2148 static DRIVER_ATTR(remove
, 0200, NULL
, remove_write
);
2150 static struct attribute
* netiucv_drv_attrs
[] = {
2151 &driver_attr_connection
.attr
,
2152 &driver_attr_remove
.attr
,
2156 static struct attribute_group netiucv_drv_attr_group
= {
2157 .attrs
= netiucv_drv_attrs
,
2160 static const struct attribute_group
*netiucv_drv_attr_groups
[] = {
2161 &netiucv_drv_attr_group
,
2165 static void netiucv_banner(void)
2167 pr_info("driver initialized\n");
2170 static void __exit
netiucv_exit(void)
2172 struct iucv_connection
*cp
;
2173 struct net_device
*ndev
;
2174 struct netiucv_priv
*priv
;
2177 IUCV_DBF_TEXT(trace
, 3, __func__
);
2178 while (!list_empty(&iucv_connection_list
)) {
2179 cp
= list_entry(iucv_connection_list
.next
,
2180 struct iucv_connection
, list
);
2182 priv
= netdev_priv(ndev
);
2185 unregister_netdev(ndev
);
2186 netiucv_unregister_device(dev
);
2189 device_unregister(netiucv_dev
);
2190 driver_unregister(&netiucv_driver
);
2191 iucv_unregister(&netiucv_handler
, 1);
2192 iucv_unregister_dbf_views();
2194 pr_info("driver unloaded\n");
2198 static int __init
netiucv_init(void)
2202 rc
= iucv_register_dbf_views();
2205 rc
= iucv_register(&netiucv_handler
, 1);
2208 IUCV_DBF_TEXT(trace
, 3, __func__
);
2209 netiucv_driver
.groups
= netiucv_drv_attr_groups
;
2210 rc
= driver_register(&netiucv_driver
);
2212 IUCV_DBF_TEXT_(setup
, 2, "ret %d from driver_register\n", rc
);
2215 /* establish dummy device */
2216 netiucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
2221 dev_set_name(netiucv_dev
, "netiucv");
2222 netiucv_dev
->bus
= &iucv_bus
;
2223 netiucv_dev
->parent
= iucv_root
;
2224 netiucv_dev
->release
= (void (*)(struct device
*))kfree
;
2225 netiucv_dev
->driver
= &netiucv_driver
;
2226 rc
= device_register(netiucv_dev
);
2228 put_device(netiucv_dev
);
2235 driver_unregister(&netiucv_driver
);
2237 iucv_unregister(&netiucv_handler
, 1);
2239 iucv_unregister_dbf_views();
2244 module_init(netiucv_init
);
2245 module_exit(netiucv_exit
);
2246 MODULE_LICENSE("GPL");