[PATCH] aic7xxx_osm build fix
[cris-mirror.git] / drivers / s390 / net / ctcmain.c
blob7266bf5ea6599f9734866b01383032636e4de96f
1 /*
2 * $Id: ctcmain.c,v 1.72 2005/03/17 10:51:52 ptiedem Exp $
4 * CTC / ESCON network driver
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
9 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
10 Peter Tiedemann (ptiedem@de.ibm.com)
11 * Driver Model stuff by : Cornelia Huck <cohuck@de.ibm.com>
13 * Documentation used:
14 * - Principles of Operation (IBM doc#: SA22-7201-06)
15 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
16 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
17 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
18 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
20 * and the source of the original CTC driver by:
21 * Dieter Wellerdiek (wel@de.ibm.com)
22 * Martin Schwidefsky (schwidefsky@de.ibm.com)
23 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
24 * Jochen Röhrig (roehrig@de.ibm.com)
26 * This program is free software; you can redistribute it and/or modify
27 * it under the terms of the GNU General Public License as published by
28 * the Free Software Foundation; either version 2, or (at your option)
29 * any later version.
31 * This program is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU General Public License for more details.
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
40 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.72 $
44 #undef DEBUG
46 #include <linux/module.h>
47 #include <linux/init.h>
48 #include <linux/kernel.h>
49 #include <linux/slab.h>
50 #include <linux/errno.h>
51 #include <linux/types.h>
52 #include <linux/interrupt.h>
53 #include <linux/timer.h>
54 #include <linux/sched.h>
55 #include <linux/bitops.h>
57 #include <linux/signal.h>
58 #include <linux/string.h>
60 #include <linux/ip.h>
61 #include <linux/if_arp.h>
62 #include <linux/tcp.h>
63 #include <linux/skbuff.h>
64 #include <linux/ctype.h>
65 #include <net/dst.h>
67 #include <asm/io.h>
68 #include <asm/ccwdev.h>
69 #include <asm/ccwgroup.h>
70 #include <asm/uaccess.h>
72 #include <asm/idals.h>
74 #include "ctctty.h"
75 #include "fsm.h"
76 #include "cu3088.h"
77 #include "ctcdbug.h"
79 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
80 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
81 MODULE_LICENSE("GPL");
83 /**
84 * CCW commands, used in this driver.
86 #define CCW_CMD_WRITE 0x01
87 #define CCW_CMD_READ 0x02
88 #define CCW_CMD_SET_EXTENDED 0xc3
89 #define CCW_CMD_PREPARE 0xe3
91 #define CTC_PROTO_S390 0
92 #define CTC_PROTO_LINUX 1
93 #define CTC_PROTO_LINUX_TTY 2
94 #define CTC_PROTO_OS390 3
95 #define CTC_PROTO_MAX 3
97 #define CTC_BUFSIZE_LIMIT 65535
98 #define CTC_BUFSIZE_DEFAULT 32768
100 #define CTC_TIMEOUT_5SEC 5000
102 #define CTC_INITIAL_BLOCKLEN 2
104 #define READ 0
105 #define WRITE 1
107 #define CTC_ID_SIZE BUS_ID_SIZE+3
110 struct ctc_profile {
111 unsigned long maxmulti;
112 unsigned long maxcqueue;
113 unsigned long doios_single;
114 unsigned long doios_multi;
115 unsigned long txlen;
116 unsigned long tx_time;
117 struct timespec send_stamp;
121 * Definition of one channel
123 struct channel {
126 * Pointer to next channel in list.
128 struct channel *next;
129 char id[CTC_ID_SIZE];
130 struct ccw_device *cdev;
133 * Type of this channel.
134 * CTC/A or Escon for valid channels.
136 enum channel_types type;
139 * Misc. flags. See CHANNEL_FLAGS_... below
141 __u32 flags;
144 * The protocol of this channel
146 __u16 protocol;
149 * I/O and irq related stuff
151 struct ccw1 *ccw;
152 struct irb *irb;
155 * RX/TX buffer size
157 int max_bufsize;
160 * Transmit/Receive buffer.
162 struct sk_buff *trans_skb;
165 * Universal I/O queue.
167 struct sk_buff_head io_queue;
170 * TX queue for collecting skb's during busy.
172 struct sk_buff_head collect_queue;
175 * Amount of data in collect_queue.
177 int collect_len;
180 * spinlock for collect_queue and collect_len
182 spinlock_t collect_lock;
185 * Timer for detecting unresposive
186 * I/O operations.
188 fsm_timer timer;
191 * Retry counter for misc. operations.
193 int retry;
196 * The finite state machine of this channel
198 fsm_instance *fsm;
201 * The corresponding net_device this channel
202 * belongs to.
204 struct net_device *netdev;
206 struct ctc_profile prof;
208 unsigned char *trans_skb_data;
210 __u16 logflags;
213 #define CHANNEL_FLAGS_READ 0
214 #define CHANNEL_FLAGS_WRITE 1
215 #define CHANNEL_FLAGS_INUSE 2
216 #define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
217 #define CHANNEL_FLAGS_FAILED 8
218 #define CHANNEL_FLAGS_WAITIRQ 16
219 #define CHANNEL_FLAGS_RWMASK 1
220 #define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
222 #define LOG_FLAG_ILLEGALPKT 1
223 #define LOG_FLAG_ILLEGALSIZE 2
224 #define LOG_FLAG_OVERRUN 4
225 #define LOG_FLAG_NOMEM 8
227 #define CTC_LOGLEVEL_INFO 1
228 #define CTC_LOGLEVEL_NOTICE 2
229 #define CTC_LOGLEVEL_WARN 4
230 #define CTC_LOGLEVEL_EMERG 8
231 #define CTC_LOGLEVEL_ERR 16
232 #define CTC_LOGLEVEL_DEBUG 32
233 #define CTC_LOGLEVEL_CRIT 64
235 #define CTC_LOGLEVEL_DEFAULT \
236 (CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
238 #define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
240 static int loglevel = CTC_LOGLEVEL_DEFAULT;
242 #define ctc_pr_debug(fmt, arg...) \
243 do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
245 #define ctc_pr_info(fmt, arg...) \
246 do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
248 #define ctc_pr_notice(fmt, arg...) \
249 do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
251 #define ctc_pr_warn(fmt, arg...) \
252 do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
254 #define ctc_pr_emerg(fmt, arg...) \
255 do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
257 #define ctc_pr_err(fmt, arg...) \
258 do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
260 #define ctc_pr_crit(fmt, arg...) \
261 do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
264 * Linked list of all detected channels.
266 static struct channel *channels = NULL;
268 struct ctc_priv {
269 struct net_device_stats stats;
270 unsigned long tbusy;
272 * The finite state machine of this interface.
274 fsm_instance *fsm;
276 * The protocol of this device
278 __u16 protocol;
280 * Timer for restarting after I/O Errors
282 fsm_timer restart_timer;
284 int buffer_size;
286 struct channel *channel[2];
290 * Definition of our link level header.
292 struct ll_header {
293 __u16 length;
294 __u16 type;
295 __u16 unused;
297 #define LL_HEADER_LENGTH (sizeof(struct ll_header))
300 * Compatibility macros for busy handling
301 * of network devices.
303 static __inline__ void
304 ctc_clear_busy(struct net_device * dev)
306 clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
307 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
308 netif_wake_queue(dev);
311 static __inline__ int
312 ctc_test_and_set_busy(struct net_device * dev)
314 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
315 netif_stop_queue(dev);
316 return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
320 * Print Banner.
322 static void
323 print_banner(void)
325 static int printed = 0;
326 char vbuf[] = "$Revision: 1.72 $";
327 char *version = vbuf;
329 if (printed)
330 return;
331 if ((version = strchr(version, ':'))) {
332 char *p = strchr(version + 1, '$');
333 if (p)
334 *p = '\0';
335 } else
336 version = " ??? ";
337 printk(KERN_INFO "CTC driver Version%s"
338 #ifdef DEBUG
339 " (DEBUG-VERSION, " __DATE__ __TIME__ ")"
340 #endif
341 " initialized\n", version);
342 printed = 1;
346 * Return type of a detected device.
348 static enum channel_types
349 get_channel_type(struct ccw_device_id *id)
351 enum channel_types type = (enum channel_types) id->driver_info;
353 if (type == channel_type_ficon)
354 type = channel_type_escon;
356 return type;
360 * States of the interface statemachine.
362 enum dev_states {
363 DEV_STATE_STOPPED,
364 DEV_STATE_STARTWAIT_RXTX,
365 DEV_STATE_STARTWAIT_RX,
366 DEV_STATE_STARTWAIT_TX,
367 DEV_STATE_STOPWAIT_RXTX,
368 DEV_STATE_STOPWAIT_RX,
369 DEV_STATE_STOPWAIT_TX,
370 DEV_STATE_RUNNING,
372 * MUST be always the last element!!
374 NR_DEV_STATES
377 static const char *dev_state_names[] = {
378 "Stopped",
379 "StartWait RXTX",
380 "StartWait RX",
381 "StartWait TX",
382 "StopWait RXTX",
383 "StopWait RX",
384 "StopWait TX",
385 "Running",
389 * Events of the interface statemachine.
391 enum dev_events {
392 DEV_EVENT_START,
393 DEV_EVENT_STOP,
394 DEV_EVENT_RXUP,
395 DEV_EVENT_TXUP,
396 DEV_EVENT_RXDOWN,
397 DEV_EVENT_TXDOWN,
398 DEV_EVENT_RESTART,
400 * MUST be always the last element!!
402 NR_DEV_EVENTS
405 static const char *dev_event_names[] = {
406 "Start",
407 "Stop",
408 "RX up",
409 "TX up",
410 "RX down",
411 "TX down",
412 "Restart",
416 * Events of the channel statemachine
418 enum ch_events {
420 * Events, representing return code of
421 * I/O operations (ccw_device_start, ccw_device_halt et al.)
423 CH_EVENT_IO_SUCCESS,
424 CH_EVENT_IO_EBUSY,
425 CH_EVENT_IO_ENODEV,
426 CH_EVENT_IO_EIO,
427 CH_EVENT_IO_UNKNOWN,
429 CH_EVENT_ATTNBUSY,
430 CH_EVENT_ATTN,
431 CH_EVENT_BUSY,
434 * Events, representing unit-check
436 CH_EVENT_UC_RCRESET,
437 CH_EVENT_UC_RSRESET,
438 CH_EVENT_UC_TXTIMEOUT,
439 CH_EVENT_UC_TXPARITY,
440 CH_EVENT_UC_HWFAIL,
441 CH_EVENT_UC_RXPARITY,
442 CH_EVENT_UC_ZERO,
443 CH_EVENT_UC_UNKNOWN,
446 * Events, representing subchannel-check
448 CH_EVENT_SC_UNKNOWN,
451 * Events, representing machine checks
453 CH_EVENT_MC_FAIL,
454 CH_EVENT_MC_GOOD,
457 * Event, representing normal IRQ
459 CH_EVENT_IRQ,
460 CH_EVENT_FINSTAT,
463 * Event, representing timer expiry.
465 CH_EVENT_TIMER,
468 * Events, representing commands from upper levels.
470 CH_EVENT_START,
471 CH_EVENT_STOP,
474 * MUST be always the last element!!
476 NR_CH_EVENTS,
479 static const char *ch_event_names[] = {
480 "ccw_device success",
481 "ccw_device busy",
482 "ccw_device enodev",
483 "ccw_device ioerr",
484 "ccw_device unknown",
486 "Status ATTN & BUSY",
487 "Status ATTN",
488 "Status BUSY",
490 "Unit check remote reset",
491 "Unit check remote system reset",
492 "Unit check TX timeout",
493 "Unit check TX parity",
494 "Unit check Hardware failure",
495 "Unit check RX parity",
496 "Unit check ZERO",
497 "Unit check Unknown",
499 "SubChannel check Unknown",
501 "Machine check failure",
502 "Machine check operational",
504 "IRQ normal",
505 "IRQ final",
507 "Timer",
509 "Start",
510 "Stop",
514 * States of the channel statemachine.
516 enum ch_states {
518 * Channel not assigned to any device,
519 * initial state, direction invalid
521 CH_STATE_IDLE,
524 * Channel assigned but not operating
526 CH_STATE_STOPPED,
527 CH_STATE_STARTWAIT,
528 CH_STATE_STARTRETRY,
529 CH_STATE_SETUPWAIT,
530 CH_STATE_RXINIT,
531 CH_STATE_TXINIT,
532 CH_STATE_RX,
533 CH_STATE_TX,
534 CH_STATE_RXIDLE,
535 CH_STATE_TXIDLE,
536 CH_STATE_RXERR,
537 CH_STATE_TXERR,
538 CH_STATE_TERM,
539 CH_STATE_DTERM,
540 CH_STATE_NOTOP,
543 * MUST be always the last element!!
545 NR_CH_STATES,
548 static const char *ch_state_names[] = {
549 "Idle",
550 "Stopped",
551 "StartWait",
552 "StartRetry",
553 "SetupWait",
554 "RX init",
555 "TX init",
556 "RX",
557 "TX",
558 "RX idle",
559 "TX idle",
560 "RX error",
561 "TX error",
562 "Terminating",
563 "Restarting",
564 "Not operational",
567 #ifdef DEBUG
569 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
571 * @param skb The sk_buff to dump.
572 * @param offset Offset relative to skb-data, where to start the dump.
574 static void
575 ctc_dump_skb(struct sk_buff *skb, int offset)
577 unsigned char *p = skb->data;
578 __u16 bl;
579 struct ll_header *header;
580 int i;
582 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
583 return;
584 p += offset;
585 bl = *((__u16 *) p);
586 p += 2;
587 header = (struct ll_header *) p;
588 p -= 2;
590 printk(KERN_DEBUG "dump:\n");
591 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
593 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
594 header->length);
595 printk(KERN_DEBUG "h->type=%04x\n", header->type);
596 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
597 if (bl > 16)
598 bl = 16;
599 printk(KERN_DEBUG "data: ");
600 for (i = 0; i < bl; i++)
601 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
602 printk("\n");
604 #else
605 static inline void
606 ctc_dump_skb(struct sk_buff *skb, int offset)
609 #endif
612 * Unpack a just received skb and hand it over to
613 * upper layers.
615 * @param ch The channel where this skb has been received.
616 * @param pskb The received skb.
618 static __inline__ void
619 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
621 struct net_device *dev = ch->netdev;
622 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
623 __u16 len = *((__u16 *) pskb->data);
625 DBF_TEXT(trace, 4, __FUNCTION__);
626 skb_put(pskb, 2 + LL_HEADER_LENGTH);
627 skb_pull(pskb, 2);
628 pskb->dev = dev;
629 pskb->ip_summed = CHECKSUM_UNNECESSARY;
630 while (len > 0) {
631 struct sk_buff *skb;
632 struct ll_header *header = (struct ll_header *) pskb->data;
634 skb_pull(pskb, LL_HEADER_LENGTH);
635 if ((ch->protocol == CTC_PROTO_S390) &&
636 (header->type != ETH_P_IP)) {
638 #ifndef DEBUG
639 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
640 #endif
642 * Check packet type only if we stick strictly
643 * to S/390's protocol of OS390. This only
644 * supports IP. Otherwise allow any packet
645 * type.
647 ctc_pr_warn(
648 "%s Illegal packet type 0x%04x received, dropping\n",
649 dev->name, header->type);
650 ch->logflags |= LOG_FLAG_ILLEGALPKT;
651 #ifndef DEBUG
653 #endif
654 #ifdef DEBUG
655 ctc_dump_skb(pskb, -6);
656 #endif
657 privptr->stats.rx_dropped++;
658 privptr->stats.rx_frame_errors++;
659 return;
661 pskb->protocol = ntohs(header->type);
662 if (header->length <= LL_HEADER_LENGTH) {
663 #ifndef DEBUG
664 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
665 #endif
666 ctc_pr_warn(
667 "%s Illegal packet size %d "
668 "received (MTU=%d blocklen=%d), "
669 "dropping\n", dev->name, header->length,
670 dev->mtu, len);
671 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
672 #ifndef DEBUG
674 #endif
675 #ifdef DEBUG
676 ctc_dump_skb(pskb, -6);
677 #endif
678 privptr->stats.rx_dropped++;
679 privptr->stats.rx_length_errors++;
680 return;
682 header->length -= LL_HEADER_LENGTH;
683 len -= LL_HEADER_LENGTH;
684 if ((header->length > skb_tailroom(pskb)) ||
685 (header->length > len)) {
686 #ifndef DEBUG
687 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
688 #endif
689 ctc_pr_warn(
690 "%s Illegal packet size %d "
691 "(beyond the end of received data), "
692 "dropping\n", dev->name, header->length);
693 ch->logflags |= LOG_FLAG_OVERRUN;
694 #ifndef DEBUG
696 #endif
697 #ifdef DEBUG
698 ctc_dump_skb(pskb, -6);
699 #endif
700 privptr->stats.rx_dropped++;
701 privptr->stats.rx_length_errors++;
702 return;
704 skb_put(pskb, header->length);
705 pskb->mac.raw = pskb->data;
706 len -= header->length;
707 skb = dev_alloc_skb(pskb->len);
708 if (!skb) {
709 #ifndef DEBUG
710 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
711 #endif
712 ctc_pr_warn(
713 "%s Out of memory in ctc_unpack_skb\n",
714 dev->name);
715 ch->logflags |= LOG_FLAG_NOMEM;
716 #ifndef DEBUG
718 #endif
719 privptr->stats.rx_dropped++;
720 return;
722 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
723 skb->mac.raw = skb->data;
724 skb->dev = pskb->dev;
725 skb->protocol = pskb->protocol;
726 pskb->ip_summed = CHECKSUM_UNNECESSARY;
727 if (ch->protocol == CTC_PROTO_LINUX_TTY)
728 ctc_tty_netif_rx(skb);
729 else
730 netif_rx_ni(skb);
732 * Successful rx; reset logflags
734 ch->logflags = 0;
735 dev->last_rx = jiffies;
736 privptr->stats.rx_packets++;
737 privptr->stats.rx_bytes += skb->len;
738 if (len > 0) {
739 skb_pull(pskb, header->length);
740 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
741 #ifndef DEBUG
742 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
743 #endif
744 ctc_pr_warn(
745 "%s Overrun in ctc_unpack_skb\n",
746 dev->name);
747 ch->logflags |= LOG_FLAG_OVERRUN;
748 #ifndef DEBUG
750 #endif
751 return;
753 skb_put(pskb, LL_HEADER_LENGTH);
759 * Check return code of a preceeding ccw_device call, halt_IO etc...
761 * @param ch The channel, the error belongs to.
762 * @param return_code The error code to inspect.
764 static void inline
765 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
767 DBF_TEXT(trace, 5, __FUNCTION__);
768 switch (return_code) {
769 case 0:
770 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
771 break;
772 case -EBUSY:
773 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
774 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
775 break;
776 case -ENODEV:
777 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
778 ch->id, msg);
779 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
780 break;
781 case -EIO:
782 ctc_pr_emerg("%s (%s): Status pending... \n",
783 ch->id, msg);
784 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
785 break;
786 default:
787 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
788 ch->id, msg, return_code);
789 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
794 * Check sense of a unit check.
796 * @param ch The channel, the sense code belongs to.
797 * @param sense The sense code to inspect.
799 static void inline
800 ccw_unit_check(struct channel *ch, unsigned char sense)
802 DBF_TEXT(trace, 5, __FUNCTION__);
803 if (sense & SNS0_INTERVENTION_REQ) {
804 if (sense & 0x01) {
805 if (ch->protocol != CTC_PROTO_LINUX_TTY)
806 ctc_pr_debug("%s: Interface disc. or Sel. reset "
807 "(remote)\n", ch->id);
808 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
809 } else {
810 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
811 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
813 } else if (sense & SNS0_EQUIPMENT_CHECK) {
814 if (sense & SNS0_BUS_OUT_CHECK) {
815 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
816 ch->id);
817 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
818 } else {
819 ctc_pr_warn("%s: Read-data parity error (remote)\n",
820 ch->id);
821 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
823 } else if (sense & SNS0_BUS_OUT_CHECK) {
824 if (sense & 0x04) {
825 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
826 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
827 } else {
828 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
829 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
831 } else if (sense & SNS0_CMD_REJECT) {
832 ctc_pr_warn("%s: Command reject\n", ch->id);
833 } else if (sense == 0) {
834 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
835 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
836 } else {
837 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
838 ch->id, sense);
839 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
843 static void
844 ctc_purge_skb_queue(struct sk_buff_head *q)
846 struct sk_buff *skb;
848 DBF_TEXT(trace, 5, __FUNCTION__);
850 while ((skb = skb_dequeue(q))) {
851 atomic_dec(&skb->users);
852 dev_kfree_skb_irq(skb);
856 static __inline__ int
857 ctc_checkalloc_buffer(struct channel *ch, int warn)
859 DBF_TEXT(trace, 5, __FUNCTION__);
860 if ((ch->trans_skb == NULL) ||
861 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
862 if (ch->trans_skb != NULL)
863 dev_kfree_skb(ch->trans_skb);
864 clear_normalized_cda(&ch->ccw[1]);
865 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
866 GFP_ATOMIC | GFP_DMA);
867 if (ch->trans_skb == NULL) {
868 if (warn)
869 ctc_pr_warn(
870 "%s: Couldn't alloc %s trans_skb\n",
871 ch->id,
872 (CHANNEL_DIRECTION(ch->flags) == READ) ?
873 "RX" : "TX");
874 return -ENOMEM;
876 ch->ccw[1].count = ch->max_bufsize;
877 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
878 dev_kfree_skb(ch->trans_skb);
879 ch->trans_skb = NULL;
880 if (warn)
881 ctc_pr_warn(
882 "%s: set_normalized_cda for %s "
883 "trans_skb failed, dropping packets\n",
884 ch->id,
885 (CHANNEL_DIRECTION(ch->flags) == READ) ?
886 "RX" : "TX");
887 return -ENOMEM;
889 ch->ccw[1].count = 0;
890 ch->trans_skb_data = ch->trans_skb->data;
891 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
893 return 0;
897 * Dummy NOP action for statemachines
899 static void
900 fsm_action_nop(fsm_instance * fi, int event, void *arg)
905 * Actions for channel - statemachines.
906 *****************************************************************************/
909 * Normal data has been send. Free the corresponding
910 * skb (it's in io_queue), reset dev->tbusy and
911 * revert to idle state.
913 * @param fi An instance of a channel statemachine.
914 * @param event The event, just happened.
915 * @param arg Generic pointer, casted from channel * upon call.
917 static void
918 ch_action_txdone(fsm_instance * fi, int event, void *arg)
920 struct channel *ch = (struct channel *) arg;
921 struct net_device *dev = ch->netdev;
922 struct ctc_priv *privptr = dev->priv;
923 struct sk_buff *skb;
924 int first = 1;
925 int i;
926 unsigned long duration;
927 struct timespec done_stamp = xtime;
929 DBF_TEXT(trace, 4, __FUNCTION__);
931 duration =
932 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
933 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
934 if (duration > ch->prof.tx_time)
935 ch->prof.tx_time = duration;
937 if (ch->irb->scsw.count != 0)
938 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
939 dev->name, ch->irb->scsw.count);
940 fsm_deltimer(&ch->timer);
941 while ((skb = skb_dequeue(&ch->io_queue))) {
942 privptr->stats.tx_packets++;
943 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
944 if (first) {
945 privptr->stats.tx_bytes += 2;
946 first = 0;
948 atomic_dec(&skb->users);
949 dev_kfree_skb_irq(skb);
951 spin_lock(&ch->collect_lock);
952 clear_normalized_cda(&ch->ccw[4]);
953 if (ch->collect_len > 0) {
954 int rc;
956 if (ctc_checkalloc_buffer(ch, 1)) {
957 spin_unlock(&ch->collect_lock);
958 return;
960 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
961 ch->trans_skb->len = 0;
962 if (ch->prof.maxmulti < (ch->collect_len + 2))
963 ch->prof.maxmulti = ch->collect_len + 2;
964 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
965 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
966 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
967 i = 0;
968 while ((skb = skb_dequeue(&ch->collect_queue))) {
969 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
970 skb->len);
971 privptr->stats.tx_packets++;
972 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
973 atomic_dec(&skb->users);
974 dev_kfree_skb_irq(skb);
975 i++;
977 ch->collect_len = 0;
978 spin_unlock(&ch->collect_lock);
979 ch->ccw[1].count = ch->trans_skb->len;
980 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
981 ch->prof.send_stamp = xtime;
982 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
983 (unsigned long) ch, 0xff, 0);
984 ch->prof.doios_multi++;
985 if (rc != 0) {
986 privptr->stats.tx_dropped += i;
987 privptr->stats.tx_errors += i;
988 fsm_deltimer(&ch->timer);
989 ccw_check_return_code(ch, rc, "chained TX");
991 } else {
992 spin_unlock(&ch->collect_lock);
993 fsm_newstate(fi, CH_STATE_TXIDLE);
995 ctc_clear_busy(dev);
999 * Initial data is sent.
1000 * Notify device statemachine that we are up and
1001 * running.
1003 * @param fi An instance of a channel statemachine.
1004 * @param event The event, just happened.
1005 * @param arg Generic pointer, casted from channel * upon call.
1007 static void
1008 ch_action_txidle(fsm_instance * fi, int event, void *arg)
1010 struct channel *ch = (struct channel *) arg;
1012 DBF_TEXT(trace, 4, __FUNCTION__);
1013 fsm_deltimer(&ch->timer);
1014 fsm_newstate(fi, CH_STATE_TXIDLE);
1015 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
1016 ch->netdev);
1020 * Got normal data, check for sanity, queue it up, allocate new buffer
1021 * trigger bottom half, and initiate next read.
1023 * @param fi An instance of a channel statemachine.
1024 * @param event The event, just happened.
1025 * @param arg Generic pointer, casted from channel * upon call.
1027 static void
1028 ch_action_rx(fsm_instance * fi, int event, void *arg)
1030 struct channel *ch = (struct channel *) arg;
1031 struct net_device *dev = ch->netdev;
1032 struct ctc_priv *privptr = dev->priv;
1033 int len = ch->max_bufsize - ch->irb->scsw.count;
1034 struct sk_buff *skb = ch->trans_skb;
1035 __u16 block_len = *((__u16 *) skb->data);
1036 int check_len;
1037 int rc;
1039 DBF_TEXT(trace, 4, __FUNCTION__);
1040 fsm_deltimer(&ch->timer);
1041 if (len < 8) {
1042 ctc_pr_debug("%s: got packet with length %d < 8\n",
1043 dev->name, len);
1044 privptr->stats.rx_dropped++;
1045 privptr->stats.rx_length_errors++;
1046 goto again;
1048 if (len > ch->max_bufsize) {
1049 ctc_pr_debug("%s: got packet with length %d > %d\n",
1050 dev->name, len, ch->max_bufsize);
1051 privptr->stats.rx_dropped++;
1052 privptr->stats.rx_length_errors++;
1053 goto again;
1057 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
1059 switch (ch->protocol) {
1060 case CTC_PROTO_S390:
1061 case CTC_PROTO_OS390:
1062 check_len = block_len + 2;
1063 break;
1064 default:
1065 check_len = block_len;
1066 break;
1068 if ((len < block_len) || (len > check_len)) {
1069 ctc_pr_debug("%s: got block length %d != rx length %d\n",
1070 dev->name, block_len, len);
1071 #ifdef DEBUG
1072 ctc_dump_skb(skb, 0);
1073 #endif
1074 *((__u16 *) skb->data) = len;
1075 privptr->stats.rx_dropped++;
1076 privptr->stats.rx_length_errors++;
1077 goto again;
1079 block_len -= 2;
1080 if (block_len > 0) {
1081 *((__u16 *) skb->data) = block_len;
1082 ctc_unpack_skb(ch, skb);
1084 again:
1085 skb->data = skb->tail = ch->trans_skb_data;
1086 skb->len = 0;
1087 if (ctc_checkalloc_buffer(ch, 1))
1088 return;
1089 ch->ccw[1].count = ch->max_bufsize;
1090 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1091 if (rc != 0)
1092 ccw_check_return_code(ch, rc, "normal RX");
1095 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
1098 * Initialize connection by sending a __u16 of value 0.
1100 * @param fi An instance of a channel statemachine.
1101 * @param event The event, just happened.
1102 * @param arg Generic pointer, casted from channel * upon call.
1104 static void
1105 ch_action_firstio(fsm_instance * fi, int event, void *arg)
1107 struct channel *ch = (struct channel *) arg;
1108 int rc;
1110 DBF_TEXT(trace, 4, __FUNCTION__);
1112 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
1113 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
1114 fsm_deltimer(&ch->timer);
1115 if (ctc_checkalloc_buffer(ch, 1))
1116 return;
1117 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1118 (ch->protocol == CTC_PROTO_OS390)) {
1119 /* OS/390 resp. z/OS */
1120 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1121 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1122 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
1123 CH_EVENT_TIMER, ch);
1124 ch_action_rxidle(fi, event, arg);
1125 } else {
1126 struct net_device *dev = ch->netdev;
1127 fsm_newstate(fi, CH_STATE_TXIDLE);
1128 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1129 DEV_EVENT_TXUP, dev);
1131 return;
1135 * Don´t setup a timer for receiving the initial RX frame
1136 * if in compatibility mode, since VM TCP delays the initial
1137 * frame until it has some data to send.
1139 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
1140 (ch->protocol != CTC_PROTO_S390))
1141 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1143 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1144 ch->ccw[1].count = 2; /* Transfer only length */
1146 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
1147 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
1148 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1149 if (rc != 0) {
1150 fsm_deltimer(&ch->timer);
1151 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1152 ccw_check_return_code(ch, rc, "init IO");
1155 * If in compatibility mode since we don´t setup a timer, we
1156 * also signal RX channel up immediately. This enables us
1157 * to send packets early which in turn usually triggers some
1158 * reply from VM TCP which brings up the RX channel to it´s
1159 * final state.
1161 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
1162 (ch->protocol == CTC_PROTO_S390)) {
1163 struct net_device *dev = ch->netdev;
1164 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
1165 dev);
1170 * Got initial data, check it. If OK,
1171 * notify device statemachine that we are up and
1172 * running.
1174 * @param fi An instance of a channel statemachine.
1175 * @param event The event, just happened.
1176 * @param arg Generic pointer, casted from channel * upon call.
1178 static void
1179 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
1181 struct channel *ch = (struct channel *) arg;
1182 struct net_device *dev = ch->netdev;
1183 __u16 buflen;
1184 int rc;
1186 DBF_TEXT(trace, 4, __FUNCTION__);
1187 fsm_deltimer(&ch->timer);
1188 buflen = *((__u16 *) ch->trans_skb->data);
1189 #ifdef DEBUG
1190 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
1191 #endif
1192 if (buflen >= CTC_INITIAL_BLOCKLEN) {
1193 if (ctc_checkalloc_buffer(ch, 1))
1194 return;
1195 ch->ccw[1].count = ch->max_bufsize;
1196 fsm_newstate(fi, CH_STATE_RXIDLE);
1197 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1198 (unsigned long) ch, 0xff, 0);
1199 if (rc != 0) {
1200 fsm_newstate(fi, CH_STATE_RXINIT);
1201 ccw_check_return_code(ch, rc, "initial RX");
1202 } else
1203 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1204 DEV_EVENT_RXUP, dev);
1205 } else {
1206 ctc_pr_debug("%s: Initial RX count %d not %d\n",
1207 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
1208 ch_action_firstio(fi, event, arg);
1213 * Set channel into extended mode.
1215 * @param fi An instance of a channel statemachine.
1216 * @param event The event, just happened.
1217 * @param arg Generic pointer, casted from channel * upon call.
1219 static void
1220 ch_action_setmode(fsm_instance * fi, int event, void *arg)
1222 struct channel *ch = (struct channel *) arg;
1223 int rc;
1224 unsigned long saveflags;
1226 DBF_TEXT(trace, 4, __FUNCTION__);
1227 fsm_deltimer(&ch->timer);
1228 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1229 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1230 saveflags = 0; /* avoids compiler warning with
1231 spin_unlock_irqrestore */
1232 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1233 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1234 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
1235 if (event == CH_EVENT_TIMER)
1236 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1237 if (rc != 0) {
1238 fsm_deltimer(&ch->timer);
1239 fsm_newstate(fi, CH_STATE_STARTWAIT);
1240 ccw_check_return_code(ch, rc, "set Mode");
1241 } else
1242 ch->retry = 0;
1246 * Setup channel.
1248 * @param fi An instance of a channel statemachine.
1249 * @param event The event, just happened.
1250 * @param arg Generic pointer, casted from channel * upon call.
1252 static void
1253 ch_action_start(fsm_instance * fi, int event, void *arg)
1255 struct channel *ch = (struct channel *) arg;
1256 unsigned long saveflags;
1257 int rc;
1258 struct net_device *dev;
1260 DBF_TEXT(trace, 4, __FUNCTION__);
1261 if (ch == NULL) {
1262 ctc_pr_warn("ch_action_start ch=NULL\n");
1263 return;
1265 if (ch->netdev == NULL) {
1266 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1267 return;
1269 dev = ch->netdev;
1271 #ifdef DEBUG
1272 ctc_pr_debug("%s: %s channel start\n", dev->name,
1273 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1274 #endif
1276 if (ch->trans_skb != NULL) {
1277 clear_normalized_cda(&ch->ccw[1]);
1278 dev_kfree_skb(ch->trans_skb);
1279 ch->trans_skb = NULL;
1281 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1282 ch->ccw[1].cmd_code = CCW_CMD_READ;
1283 ch->ccw[1].flags = CCW_FLAG_SLI;
1284 ch->ccw[1].count = 0;
1285 } else {
1286 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1287 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1288 ch->ccw[1].count = 0;
1290 if (ctc_checkalloc_buffer(ch, 0)) {
1291 ctc_pr_notice(
1292 "%s: Could not allocate %s trans_skb, delaying "
1293 "allocation until first transfer\n",
1294 dev->name,
1295 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1298 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1299 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1300 ch->ccw[0].count = 0;
1301 ch->ccw[0].cda = 0;
1302 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1303 ch->ccw[2].flags = CCW_FLAG_SLI;
1304 ch->ccw[2].count = 0;
1305 ch->ccw[2].cda = 0;
1306 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1307 ch->ccw[4].cda = 0;
1308 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1310 fsm_newstate(fi, CH_STATE_STARTWAIT);
1311 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1312 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1313 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1314 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1315 if (rc != 0) {
1316 if (rc != -EBUSY)
1317 fsm_deltimer(&ch->timer);
1318 ccw_check_return_code(ch, rc, "initial HaltIO");
1320 #ifdef DEBUG
1321 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1322 #endif
1326 * Shutdown a channel.
1328 * @param fi An instance of a channel statemachine.
1329 * @param event The event, just happened.
1330 * @param arg Generic pointer, casted from channel * upon call.
1332 static void
1333 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1335 struct channel *ch = (struct channel *) arg;
1336 unsigned long saveflags;
1337 int rc;
1338 int oldstate;
1340 DBF_TEXT(trace, 3, __FUNCTION__);
1341 fsm_deltimer(&ch->timer);
1342 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1343 saveflags = 0; /* avoids comp warning with
1344 spin_unlock_irqrestore */
1345 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1346 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1347 oldstate = fsm_getstate(fi);
1348 fsm_newstate(fi, CH_STATE_TERM);
1349 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1350 if (event == CH_EVENT_STOP)
1351 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1352 if (rc != 0) {
1353 if (rc != -EBUSY) {
1354 fsm_deltimer(&ch->timer);
1355 fsm_newstate(fi, oldstate);
1357 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1362 * A channel has successfully been halted.
1363 * Cleanup it's queue and notify interface statemachine.
1365 * @param fi An instance of a channel statemachine.
1366 * @param event The event, just happened.
1367 * @param arg Generic pointer, casted from channel * upon call.
1369 static void
1370 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1372 struct channel *ch = (struct channel *) arg;
1373 struct net_device *dev = ch->netdev;
1375 DBF_TEXT(trace, 3, __FUNCTION__);
1376 fsm_deltimer(&ch->timer);
1377 fsm_newstate(fi, CH_STATE_STOPPED);
1378 if (ch->trans_skb != NULL) {
1379 clear_normalized_cda(&ch->ccw[1]);
1380 dev_kfree_skb(ch->trans_skb);
1381 ch->trans_skb = NULL;
1383 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1384 skb_queue_purge(&ch->io_queue);
1385 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1386 DEV_EVENT_RXDOWN, dev);
1387 } else {
1388 ctc_purge_skb_queue(&ch->io_queue);
1389 spin_lock(&ch->collect_lock);
1390 ctc_purge_skb_queue(&ch->collect_queue);
1391 ch->collect_len = 0;
1392 spin_unlock(&ch->collect_lock);
1393 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1394 DEV_EVENT_TXDOWN, dev);
1399 * A stop command from device statemachine arrived and we are in
1400 * not operational mode. Set state to stopped.
1402 * @param fi An instance of a channel statemachine.
1403 * @param event The event, just happened.
1404 * @param arg Generic pointer, casted from channel * upon call.
1406 static void
1407 ch_action_stop(fsm_instance * fi, int event, void *arg)
1409 fsm_newstate(fi, CH_STATE_STOPPED);
1413 * A machine check for no path, not operational status or gone device has
1414 * happened.
1415 * Cleanup queue and notify interface statemachine.
1417 * @param fi An instance of a channel statemachine.
1418 * @param event The event, just happened.
1419 * @param arg Generic pointer, casted from channel * upon call.
1421 static void
1422 ch_action_fail(fsm_instance * fi, int event, void *arg)
1424 struct channel *ch = (struct channel *) arg;
1425 struct net_device *dev = ch->netdev;
1427 DBF_TEXT(trace, 3, __FUNCTION__);
1428 fsm_deltimer(&ch->timer);
1429 fsm_newstate(fi, CH_STATE_NOTOP);
1430 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1431 skb_queue_purge(&ch->io_queue);
1432 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1433 DEV_EVENT_RXDOWN, dev);
1434 } else {
1435 ctc_purge_skb_queue(&ch->io_queue);
1436 spin_lock(&ch->collect_lock);
1437 ctc_purge_skb_queue(&ch->collect_queue);
1438 ch->collect_len = 0;
1439 spin_unlock(&ch->collect_lock);
1440 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1441 DEV_EVENT_TXDOWN, dev);
1446 * Handle error during setup of channel.
1448 * @param fi An instance of a channel statemachine.
1449 * @param event The event, just happened.
1450 * @param arg Generic pointer, casted from channel * upon call.
1452 static void
1453 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1455 struct channel *ch = (struct channel *) arg;
1456 struct net_device *dev = ch->netdev;
1458 DBF_TEXT(setup, 3, __FUNCTION__);
1460 * Special case: Got UC_RCRESET on setmode.
1461 * This means that remote side isn't setup. In this case
1462 * simply retry after some 10 secs...
1464 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1465 ((event == CH_EVENT_UC_RCRESET) ||
1466 (event == CH_EVENT_UC_RSRESET))) {
1467 fsm_newstate(fi, CH_STATE_STARTRETRY);
1468 fsm_deltimer(&ch->timer);
1469 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1470 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1471 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1472 if (rc != 0)
1473 ccw_check_return_code(
1474 ch, rc, "HaltIO in ch_action_setuperr");
1476 return;
1479 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1480 dev->name, ch_event_names[event],
1481 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1482 fsm_getstate_str(fi));
1483 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1484 fsm_newstate(fi, CH_STATE_RXERR);
1485 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1486 DEV_EVENT_RXDOWN, dev);
1487 } else {
1488 fsm_newstate(fi, CH_STATE_TXERR);
1489 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1490 DEV_EVENT_TXDOWN, dev);
1495 * Restart a channel after an error.
1497 * @param fi An instance of a channel statemachine.
1498 * @param event The event, just happened.
1499 * @param arg Generic pointer, casted from channel * upon call.
1501 static void
1502 ch_action_restart(fsm_instance * fi, int event, void *arg)
1504 unsigned long saveflags;
1505 int oldstate;
1506 int rc;
1508 struct channel *ch = (struct channel *) arg;
1509 struct net_device *dev = ch->netdev;
1511 DBF_TEXT(trace, 3, __FUNCTION__);
1512 fsm_deltimer(&ch->timer);
1513 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1514 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1515 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1516 oldstate = fsm_getstate(fi);
1517 fsm_newstate(fi, CH_STATE_STARTWAIT);
1518 saveflags = 0; /* avoids compiler warning with
1519 spin_unlock_irqrestore */
1520 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1521 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1522 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1523 if (event == CH_EVENT_TIMER)
1524 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1525 if (rc != 0) {
1526 if (rc != -EBUSY) {
1527 fsm_deltimer(&ch->timer);
1528 fsm_newstate(fi, oldstate);
1530 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1535 * Handle error during RX initial handshake (exchange of
1536 * 0-length block header)
1538 * @param fi An instance of a channel statemachine.
1539 * @param event The event, just happened.
1540 * @param arg Generic pointer, casted from channel * upon call.
1542 static void
1543 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1545 struct channel *ch = (struct channel *) arg;
1546 struct net_device *dev = ch->netdev;
1548 DBF_TEXT(setup, 3, __FUNCTION__);
1549 if (event == CH_EVENT_TIMER) {
1550 fsm_deltimer(&ch->timer);
1551 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1552 if (ch->retry++ < 3)
1553 ch_action_restart(fi, event, arg);
1554 else {
1555 fsm_newstate(fi, CH_STATE_RXERR);
1556 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1557 DEV_EVENT_RXDOWN, dev);
1559 } else
1560 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1564 * Notify device statemachine if we gave up initialization
1565 * of RX channel.
1567 * @param fi An instance of a channel statemachine.
1568 * @param event The event, just happened.
1569 * @param arg Generic pointer, casted from channel * upon call.
1571 static void
1572 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1574 struct channel *ch = (struct channel *) arg;
1575 struct net_device *dev = ch->netdev;
1577 DBF_TEXT(setup, 3, __FUNCTION__);
1578 fsm_newstate(fi, CH_STATE_RXERR);
1579 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1580 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1581 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1585 * Handle RX Unit check remote reset (remote disconnected)
1587 * @param fi An instance of a channel statemachine.
1588 * @param event The event, just happened.
1589 * @param arg Generic pointer, casted from channel * upon call.
1591 static void
1592 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1594 struct channel *ch = (struct channel *) arg;
1595 struct channel *ch2;
1596 struct net_device *dev = ch->netdev;
1598 DBF_TEXT(trace, 3, __FUNCTION__);
1599 fsm_deltimer(&ch->timer);
1600 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1601 dev->name);
1604 * Notify device statemachine
1606 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1607 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1609 fsm_newstate(fi, CH_STATE_DTERM);
1610 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1611 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1613 ccw_device_halt(ch->cdev, (unsigned long) ch);
1614 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1618 * Handle error during TX channel initialization.
1620 * @param fi An instance of a channel statemachine.
1621 * @param event The event, just happened.
1622 * @param arg Generic pointer, casted from channel * upon call.
1624 static void
1625 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1627 struct channel *ch = (struct channel *) arg;
1628 struct net_device *dev = ch->netdev;
1630 DBF_TEXT(setup, 2, __FUNCTION__);
1631 if (event == CH_EVENT_TIMER) {
1632 fsm_deltimer(&ch->timer);
1633 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1634 if (ch->retry++ < 3)
1635 ch_action_restart(fi, event, arg);
1636 else {
1637 fsm_newstate(fi, CH_STATE_TXERR);
1638 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1639 DEV_EVENT_TXDOWN, dev);
1641 } else
1642 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1646 * Handle TX timeout by retrying operation.
1648 * @param fi An instance of a channel statemachine.
1649 * @param event The event, just happened.
1650 * @param arg Generic pointer, casted from channel * upon call.
1652 static void
1653 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1655 struct channel *ch = (struct channel *) arg;
1656 struct net_device *dev = ch->netdev;
1657 unsigned long saveflags;
1659 DBF_TEXT(trace, 4, __FUNCTION__);
1660 fsm_deltimer(&ch->timer);
1661 if (ch->retry++ > 3) {
1662 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1663 dev->name);
1664 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1665 DEV_EVENT_TXDOWN, dev);
1666 ch_action_restart(fi, event, arg);
1667 } else {
1668 struct sk_buff *skb;
1670 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1671 if ((skb = skb_peek(&ch->io_queue))) {
1672 int rc = 0;
1674 clear_normalized_cda(&ch->ccw[4]);
1675 ch->ccw[4].count = skb->len;
1676 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1677 ctc_pr_debug(
1678 "%s: IDAL alloc failed, chan restart\n",
1679 dev->name);
1680 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1681 DEV_EVENT_TXDOWN, dev);
1682 ch_action_restart(fi, event, arg);
1683 return;
1685 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1686 saveflags = 0; /* avoids compiler warning with
1687 spin_unlock_irqrestore */
1688 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1689 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1690 saveflags);
1691 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1692 (unsigned long) ch, 0xff, 0);
1693 if (event == CH_EVENT_TIMER)
1694 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1695 saveflags);
1696 if (rc != 0) {
1697 fsm_deltimer(&ch->timer);
1698 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1699 ctc_purge_skb_queue(&ch->io_queue);
1707 * Handle fatal errors during an I/O command.
1709 * @param fi An instance of a channel statemachine.
1710 * @param event The event, just happened.
1711 * @param arg Generic pointer, casted from channel * upon call.
1713 static void
1714 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1716 struct channel *ch = (struct channel *) arg;
1717 struct net_device *dev = ch->netdev;
1719 DBF_TEXT(trace, 3, __FUNCTION__);
1720 fsm_deltimer(&ch->timer);
1721 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1722 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1723 fsm_newstate(fi, CH_STATE_RXERR);
1724 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1725 DEV_EVENT_RXDOWN, dev);
1726 } else {
1727 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1728 fsm_newstate(fi, CH_STATE_TXERR);
1729 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1730 DEV_EVENT_TXDOWN, dev);
1734 static void
1735 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1737 struct channel *ch = (struct channel *)arg;
1738 struct net_device *dev = ch->netdev;
1739 struct ctc_priv *privptr = dev->priv;
1741 DBF_TEXT(trace, 4, __FUNCTION__);
1742 ch_action_iofatal(fi, event, arg);
1743 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1748 * The statemachine for a channel.
1750 static const fsm_node ch_fsm[] = {
1751 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1752 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1753 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1754 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1756 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1757 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1758 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1759 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1760 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1762 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1763 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1764 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1765 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1766 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1767 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1768 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1770 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1771 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1772 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1773 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1775 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1776 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1777 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1778 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1779 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1780 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1781 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1782 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1783 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1785 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1786 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1787 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1788 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1789 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1790 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1791 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1792 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1793 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1794 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1795 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1797 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1798 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1799 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1800 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1801 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1802 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1803 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1804 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1805 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1807 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1808 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1809 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1810 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1811 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1812 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1813 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1814 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1815 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1817 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1818 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1819 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1820 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1821 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1822 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1823 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1824 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1826 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1827 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1828 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1829 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1830 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1831 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1833 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1834 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1835 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1836 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1837 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1838 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1840 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1841 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1842 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1843 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1844 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1845 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1846 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1847 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1848 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1850 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1851 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1852 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1853 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1856 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1859 * Functions related to setup and device detection.
1860 *****************************************************************************/
1862 static inline int
1863 less_than(char *id1, char *id2)
1865 int dev1, dev2, i;
1867 for (i = 0; i < 5; i++) {
1868 id1++;
1869 id2++;
1871 dev1 = simple_strtoul(id1, &id1, 16);
1872 dev2 = simple_strtoul(id2, &id2, 16);
1874 return (dev1 < dev2);
1878 * Add a new channel to the list of channels.
1879 * Keeps the channel list sorted.
1881 * @param cdev The ccw_device to be added.
1882 * @param type The type class of the new channel.
1884 * @return 0 on success, !0 on error.
1886 static int
1887 add_channel(struct ccw_device *cdev, enum channel_types type)
1889 struct channel **c = &channels;
1890 struct channel *ch;
1892 DBF_TEXT(trace, 2, __FUNCTION__);
1893 if ((ch =
1894 (struct channel *) kmalloc(sizeof (struct channel),
1895 GFP_KERNEL)) == NULL) {
1896 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1897 return -1;
1899 memset(ch, 0, sizeof (struct channel));
1900 if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1),
1901 GFP_KERNEL | GFP_DMA)) == NULL) {
1902 kfree(ch);
1903 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1904 return -1;
1907 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1910 * "static" ccws are used in the following way:
1912 * ccw[0..2] (Channel program for generic I/O):
1913 * 0: prepare
1914 * 1: read or write (depending on direction) with fixed
1915 * buffer (idal allocated once when buffer is allocated)
1916 * 2: nop
1917 * ccw[3..5] (Channel program for direct write of packets)
1918 * 3: prepare
1919 * 4: write (idal allocated on every write).
1920 * 5: nop
1921 * ccw[6..7] (Channel program for initial channel setup):
1922 * 6: set extended mode
1923 * 7: nop
1925 * ch->ccw[0..5] are initialized in ch_action_start because
1926 * the channel's direction is yet unknown here.
1928 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1929 ch->ccw[6].flags = CCW_FLAG_SLI;
1931 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1932 ch->ccw[7].flags = CCW_FLAG_SLI;
1934 ch->cdev = cdev;
1935 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1936 ch->type = type;
1937 loglevel = CTC_LOGLEVEL_DEFAULT;
1938 ch->fsm = init_fsm(ch->id, ch_state_names,
1939 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1940 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1941 if (ch->fsm == NULL) {
1942 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1943 kfree(ch->ccw);
1944 kfree(ch);
1945 return -1;
1947 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1948 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
1949 GFP_KERNEL)) == NULL) {
1950 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1951 kfree_fsm(ch->fsm);
1952 kfree(ch->ccw);
1953 kfree(ch);
1954 return -1;
1956 memset(ch->irb, 0, sizeof (struct irb));
1957 while (*c && less_than((*c)->id, ch->id))
1958 c = &(*c)->next;
1959 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1960 ctc_pr_debug(
1961 "ctc: add_channel: device %s already in list, "
1962 "using old entry\n", (*c)->id);
1963 kfree(ch->irb);
1964 kfree_fsm(ch->fsm);
1965 kfree(ch->ccw);
1966 kfree(ch);
1967 return 0;
1969 fsm_settimer(ch->fsm, &ch->timer);
1970 skb_queue_head_init(&ch->io_queue);
1971 skb_queue_head_init(&ch->collect_queue);
1972 ch->next = *c;
1973 *c = ch;
1974 return 0;
1978 * Release a specific channel in the channel list.
1980 * @param ch Pointer to channel struct to be released.
1982 static void
1983 channel_free(struct channel *ch)
1985 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1986 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1990 * Remove a specific channel in the channel list.
1992 * @param ch Pointer to channel struct to be released.
1994 static void
1995 channel_remove(struct channel *ch)
1997 struct channel **c = &channels;
1999 DBF_TEXT(trace, 2, __FUNCTION__);
2000 if (ch == NULL)
2001 return;
2003 channel_free(ch);
2004 while (*c) {
2005 if (*c == ch) {
2006 *c = ch->next;
2007 fsm_deltimer(&ch->timer);
2008 kfree_fsm(ch->fsm);
2009 clear_normalized_cda(&ch->ccw[4]);
2010 if (ch->trans_skb != NULL) {
2011 clear_normalized_cda(&ch->ccw[1]);
2012 dev_kfree_skb(ch->trans_skb);
2014 kfree(ch->ccw);
2015 kfree(ch->irb);
2016 kfree(ch);
2017 return;
2019 c = &((*c)->next);
2024 * Get a specific channel from the channel list.
2026 * @param type Type of channel we are interested in.
2027 * @param id Id of channel we are interested in.
2028 * @param direction Direction we want to use this channel for.
2030 * @return Pointer to a channel or NULL if no matching channel available.
2032 static struct channel
2034 channel_get(enum channel_types type, char *id, int direction)
2036 struct channel *ch = channels;
2038 DBF_TEXT(trace, 3, __FUNCTION__);
2039 #ifdef DEBUG
2040 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
2041 __func__, id, type);
2042 #endif
2044 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
2045 #ifdef DEBUG
2046 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
2047 __func__, ch, ch->id, ch->type);
2048 #endif
2049 ch = ch->next;
2051 #ifdef DEBUG
2052 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
2053 __func__, ch, ch->id, ch->type);
2054 #endif
2055 if (!ch) {
2056 ctc_pr_warn("ctc: %s(): channel with id %s "
2057 "and type %d not found in channel list\n",
2058 __func__, id, type);
2059 } else {
2060 if (ch->flags & CHANNEL_FLAGS_INUSE)
2061 ch = NULL;
2062 else {
2063 ch->flags |= CHANNEL_FLAGS_INUSE;
2064 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
2065 ch->flags |= (direction == WRITE)
2066 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
2067 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
2070 return ch;
2074 * Return the channel type by name.
2076 * @param name Name of network interface.
2078 * @return Type class of channel to be used for that interface.
2080 static enum channel_types inline
2081 extract_channel_media(char *name)
2083 enum channel_types ret = channel_type_unknown;
2085 if (name != NULL) {
2086 if (strncmp(name, "ctc", 3) == 0)
2087 ret = channel_type_parallel;
2088 if (strncmp(name, "escon", 5) == 0)
2089 ret = channel_type_escon;
2091 return ret;
2094 static long
2095 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
2097 if (!IS_ERR(irb))
2098 return 0;
2100 switch (PTR_ERR(irb)) {
2101 case -EIO:
2102 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
2103 // CTC_DBF_TEXT(trace, 2, "ckirberr");
2104 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
2105 break;
2106 case -ETIMEDOUT:
2107 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
2108 // CTC_DBF_TEXT(trace, 2, "ckirberr");
2109 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
2110 break;
2111 default:
2112 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
2113 cdev->dev.bus_id);
2114 // CTC_DBF_TEXT(trace, 2, "ckirberr");
2115 // CTC_DBF_TEXT(trace, 2, " rc???");
2117 return PTR_ERR(irb);
2121 * Main IRQ handler.
2123 * @param cdev The ccw_device the interrupt is for.
2124 * @param intparm interruption parameter.
2125 * @param irb interruption response block.
2127 static void
2128 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2130 struct channel *ch;
2131 struct net_device *dev;
2132 struct ctc_priv *priv;
2134 DBF_TEXT(trace, 5, __FUNCTION__);
2135 if (__ctc_check_irb_error(cdev, irb))
2136 return;
2138 /* Check for unsolicited interrupts. */
2139 if (!cdev->dev.driver_data) {
2140 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
2141 cdev->dev.bus_id, irb->scsw.cstat,
2142 irb->scsw.dstat);
2143 return;
2146 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
2147 ->dev.driver_data;
2149 /* Try to extract channel from driver data. */
2150 if (priv->channel[READ]->cdev == cdev)
2151 ch = priv->channel[READ];
2152 else if (priv->channel[WRITE]->cdev == cdev)
2153 ch = priv->channel[WRITE];
2154 else {
2155 ctc_pr_err("ctc: Can't determine channel for interrupt, "
2156 "device %s\n", cdev->dev.bus_id);
2157 return;
2160 dev = (struct net_device *) (ch->netdev);
2161 if (dev == NULL) {
2162 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
2163 cdev->dev.bus_id, ch);
2164 return;
2167 #ifdef DEBUG
2168 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
2169 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
2170 #endif
2172 /* Copy interruption response block. */
2173 memcpy(ch->irb, irb, sizeof(struct irb));
2175 /* Check for good subchannel return code, otherwise error message */
2176 if (ch->irb->scsw.cstat) {
2177 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
2178 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
2179 dev->name, ch->id, ch->irb->scsw.cstat,
2180 ch->irb->scsw.dstat);
2181 return;
2184 /* Check the reason-code of a unit check */
2185 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
2186 ccw_unit_check(ch, ch->irb->ecw[0]);
2187 return;
2189 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
2190 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
2191 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
2192 else
2193 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
2194 return;
2196 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
2197 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
2198 return;
2200 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
2201 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
2202 (ch->irb->scsw.stctl ==
2203 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
2204 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
2205 else
2206 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
2211 * Actions for interface - statemachine.
2212 *****************************************************************************/
2215 * Startup channels by sending CH_EVENT_START to each channel.
2217 * @param fi An instance of an interface statemachine.
2218 * @param event The event, just happened.
2219 * @param arg Generic pointer, casted from struct net_device * upon call.
2221 static void
2222 dev_action_start(fsm_instance * fi, int event, void *arg)
2224 struct net_device *dev = (struct net_device *) arg;
2225 struct ctc_priv *privptr = dev->priv;
2226 int direction;
2228 DBF_TEXT(setup, 3, __FUNCTION__);
2229 fsm_deltimer(&privptr->restart_timer);
2230 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2231 for (direction = READ; direction <= WRITE; direction++) {
2232 struct channel *ch = privptr->channel[direction];
2233 fsm_event(ch->fsm, CH_EVENT_START, ch);
2238 * Shutdown channels by sending CH_EVENT_STOP to each channel.
2240 * @param fi An instance of an interface statemachine.
2241 * @param event The event, just happened.
2242 * @param arg Generic pointer, casted from struct net_device * upon call.
2244 static void
2245 dev_action_stop(fsm_instance * fi, int event, void *arg)
2247 struct net_device *dev = (struct net_device *) arg;
2248 struct ctc_priv *privptr = dev->priv;
2249 int direction;
2251 DBF_TEXT(trace, 3, __FUNCTION__);
2252 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2253 for (direction = READ; direction <= WRITE; direction++) {
2254 struct channel *ch = privptr->channel[direction];
2255 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2258 static void
2259 dev_action_restart(fsm_instance *fi, int event, void *arg)
2261 struct net_device *dev = (struct net_device *)arg;
2262 struct ctc_priv *privptr = dev->priv;
2264 DBF_TEXT(trace, 3, __FUNCTION__);
2265 ctc_pr_debug("%s: Restarting\n", dev->name);
2266 dev_action_stop(fi, event, arg);
2267 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2268 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2269 DEV_EVENT_START, dev);
2273 * Called from channel statemachine
2274 * when a channel is up and running.
2276 * @param fi An instance of an interface statemachine.
2277 * @param event The event, just happened.
2278 * @param arg Generic pointer, casted from struct net_device * upon call.
2280 static void
2281 dev_action_chup(fsm_instance * fi, int event, void *arg)
2283 struct net_device *dev = (struct net_device *) arg;
2284 struct ctc_priv *privptr = dev->priv;
2286 DBF_TEXT(trace, 3, __FUNCTION__);
2287 switch (fsm_getstate(fi)) {
2288 case DEV_STATE_STARTWAIT_RXTX:
2289 if (event == DEV_EVENT_RXUP)
2290 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2291 else
2292 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2293 break;
2294 case DEV_STATE_STARTWAIT_RX:
2295 if (event == DEV_EVENT_RXUP) {
2296 fsm_newstate(fi, DEV_STATE_RUNNING);
2297 ctc_pr_info("%s: connected with remote side\n",
2298 dev->name);
2299 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2300 ctc_tty_setcarrier(dev, 1);
2301 ctc_clear_busy(dev);
2303 break;
2304 case DEV_STATE_STARTWAIT_TX:
2305 if (event == DEV_EVENT_TXUP) {
2306 fsm_newstate(fi, DEV_STATE_RUNNING);
2307 ctc_pr_info("%s: connected with remote side\n",
2308 dev->name);
2309 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2310 ctc_tty_setcarrier(dev, 1);
2311 ctc_clear_busy(dev);
2313 break;
2314 case DEV_STATE_STOPWAIT_TX:
2315 if (event == DEV_EVENT_RXUP)
2316 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2317 break;
2318 case DEV_STATE_STOPWAIT_RX:
2319 if (event == DEV_EVENT_TXUP)
2320 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2321 break;
2326 * Called from channel statemachine
2327 * when a channel has been shutdown.
2329 * @param fi An instance of an interface statemachine.
2330 * @param event The event, just happened.
2331 * @param arg Generic pointer, casted from struct net_device * upon call.
2333 static void
2334 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2336 struct net_device *dev = (struct net_device *) arg;
2337 struct ctc_priv *privptr = dev->priv;
2339 DBF_TEXT(trace, 3, __FUNCTION__);
2340 switch (fsm_getstate(fi)) {
2341 case DEV_STATE_RUNNING:
2342 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2343 ctc_tty_setcarrier(dev, 0);
2344 if (event == DEV_EVENT_TXDOWN)
2345 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2346 else
2347 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2348 break;
2349 case DEV_STATE_STARTWAIT_RX:
2350 if (event == DEV_EVENT_TXDOWN)
2351 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2352 break;
2353 case DEV_STATE_STARTWAIT_TX:
2354 if (event == DEV_EVENT_RXDOWN)
2355 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2356 break;
2357 case DEV_STATE_STOPWAIT_RXTX:
2358 if (event == DEV_EVENT_TXDOWN)
2359 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2360 else
2361 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2362 break;
2363 case DEV_STATE_STOPWAIT_RX:
2364 if (event == DEV_EVENT_RXDOWN)
2365 fsm_newstate(fi, DEV_STATE_STOPPED);
2366 break;
2367 case DEV_STATE_STOPWAIT_TX:
2368 if (event == DEV_EVENT_TXDOWN)
2369 fsm_newstate(fi, DEV_STATE_STOPPED);
2370 break;
2374 static const fsm_node dev_fsm[] = {
2375 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2377 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2378 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2379 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2380 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2382 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2383 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2384 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2385 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2386 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2388 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2389 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2390 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2391 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2392 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2394 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2395 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2396 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2397 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2398 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2399 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2401 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2402 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2403 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2404 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2405 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2407 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2408 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2409 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2410 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2411 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2413 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2414 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2415 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2416 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2417 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2418 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2421 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2424 * Transmit a packet.
2425 * This is a helper function for ctc_tx().
2427 * @param ch Channel to be used for sending.
2428 * @param skb Pointer to struct sk_buff of packet to send.
2429 * The linklevel header has already been set up
2430 * by ctc_tx().
2432 * @return 0 on success, -ERRNO on failure. (Never fails.)
2434 static int
2435 transmit_skb(struct channel *ch, struct sk_buff *skb)
2437 unsigned long saveflags;
2438 struct ll_header header;
2439 int rc = 0;
2441 DBF_TEXT(trace, 5, __FUNCTION__);
2442 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2443 int l = skb->len + LL_HEADER_LENGTH;
2445 spin_lock_irqsave(&ch->collect_lock, saveflags);
2446 if (ch->collect_len + l > ch->max_bufsize - 2)
2447 rc = -EBUSY;
2448 else {
2449 atomic_inc(&skb->users);
2450 header.length = l;
2451 header.type = skb->protocol;
2452 header.unused = 0;
2453 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2454 LL_HEADER_LENGTH);
2455 skb_queue_tail(&ch->collect_queue, skb);
2456 ch->collect_len += l;
2458 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2459 } else {
2460 __u16 block_len;
2461 int ccw_idx;
2462 struct sk_buff *nskb;
2463 unsigned long hi;
2466 * Protect skb against beeing free'd by upper
2467 * layers.
2469 atomic_inc(&skb->users);
2470 ch->prof.txlen += skb->len;
2471 header.length = skb->len + LL_HEADER_LENGTH;
2472 header.type = skb->protocol;
2473 header.unused = 0;
2474 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2475 LL_HEADER_LENGTH);
2476 block_len = skb->len + 2;
2477 *((__u16 *) skb_push(skb, 2)) = block_len;
2480 * IDAL support in CTC is broken, so we have to
2481 * care about skb's above 2G ourselves.
2483 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2484 if (hi) {
2485 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2486 if (!nskb) {
2487 atomic_dec(&skb->users);
2488 skb_pull(skb, LL_HEADER_LENGTH + 2);
2489 return -ENOMEM;
2490 } else {
2491 memcpy(skb_put(nskb, skb->len),
2492 skb->data, skb->len);
2493 atomic_inc(&nskb->users);
2494 atomic_dec(&skb->users);
2495 dev_kfree_skb_irq(skb);
2496 skb = nskb;
2500 ch->ccw[4].count = block_len;
2501 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2503 * idal allocation failed, try via copying to
2504 * trans_skb. trans_skb usually has a pre-allocated
2505 * idal.
2507 if (ctc_checkalloc_buffer(ch, 1)) {
2509 * Remove our header. It gets added
2510 * again on retransmit.
2512 atomic_dec(&skb->users);
2513 skb_pull(skb, LL_HEADER_LENGTH + 2);
2514 return -EBUSY;
2517 ch->trans_skb->tail = ch->trans_skb->data;
2518 ch->trans_skb->len = 0;
2519 ch->ccw[1].count = skb->len;
2520 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2521 skb->len);
2522 atomic_dec(&skb->users);
2523 dev_kfree_skb_irq(skb);
2524 ccw_idx = 0;
2525 } else {
2526 skb_queue_tail(&ch->io_queue, skb);
2527 ccw_idx = 3;
2529 ch->retry = 0;
2530 fsm_newstate(ch->fsm, CH_STATE_TX);
2531 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2532 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2533 ch->prof.send_stamp = xtime;
2534 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2535 (unsigned long) ch, 0xff, 0);
2536 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2537 if (ccw_idx == 3)
2538 ch->prof.doios_single++;
2539 if (rc != 0) {
2540 fsm_deltimer(&ch->timer);
2541 ccw_check_return_code(ch, rc, "single skb TX");
2542 if (ccw_idx == 3)
2543 skb_dequeue_tail(&ch->io_queue);
2545 * Remove our header. It gets added
2546 * again on retransmit.
2548 skb_pull(skb, LL_HEADER_LENGTH + 2);
2549 } else {
2550 if (ccw_idx == 0) {
2551 struct net_device *dev = ch->netdev;
2552 struct ctc_priv *privptr = dev->priv;
2553 privptr->stats.tx_packets++;
2554 privptr->stats.tx_bytes +=
2555 skb->len - LL_HEADER_LENGTH;
2560 return rc;
2564 * Interface API for upper network layers
2565 *****************************************************************************/
2568 * Open an interface.
2569 * Called from generic network layer when ifconfig up is run.
2571 * @param dev Pointer to interface struct.
2573 * @return 0 on success, -ERRNO on failure. (Never fails.)
2575 static int
2576 ctc_open(struct net_device * dev)
2578 DBF_TEXT(trace, 5, __FUNCTION__);
2579 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2580 return 0;
2584 * Close an interface.
2585 * Called from generic network layer when ifconfig down is run.
2587 * @param dev Pointer to interface struct.
2589 * @return 0 on success, -ERRNO on failure. (Never fails.)
2591 static int
2592 ctc_close(struct net_device * dev)
2594 DBF_TEXT(trace, 5, __FUNCTION__);
2595 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2596 return 0;
2600 * Start transmission of a packet.
2601 * Called from generic network device layer.
2603 * @param skb Pointer to buffer containing the packet.
2604 * @param dev Pointer to interface struct.
2606 * @return 0 if packet consumed, !0 if packet rejected.
2607 * Note: If we return !0, then the packet is free'd by
2608 * the generic network layer.
2610 static int
2611 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2613 int rc = 0;
2614 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2616 DBF_TEXT(trace, 5, __FUNCTION__);
2618 * Some sanity checks ...
2620 if (skb == NULL) {
2621 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2622 privptr->stats.tx_dropped++;
2623 return 0;
2625 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2626 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2627 dev->name, LL_HEADER_LENGTH + 2);
2628 dev_kfree_skb(skb);
2629 privptr->stats.tx_dropped++;
2630 return 0;
2634 * If channels are not running, try to restart them
2635 * and throw away packet.
2637 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2638 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2639 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2640 return -EBUSY;
2641 dev_kfree_skb(skb);
2642 privptr->stats.tx_dropped++;
2643 privptr->stats.tx_errors++;
2644 privptr->stats.tx_carrier_errors++;
2645 return 0;
2648 if (ctc_test_and_set_busy(dev))
2649 return -EBUSY;
2651 dev->trans_start = jiffies;
2652 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2653 rc = 1;
2654 ctc_clear_busy(dev);
2655 return rc;
2659 * Sets MTU of an interface.
2661 * @param dev Pointer to interface struct.
2662 * @param new_mtu The new MTU to use for this interface.
2664 * @return 0 on success, -EINVAL if MTU is out of valid range.
2665 * (valid range is 576 .. 65527). If VM is on the
2666 * remote side, maximum MTU is 32760, however this is
2667 * <em>not</em> checked here.
2669 static int
2670 ctc_change_mtu(struct net_device * dev, int new_mtu)
2672 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2674 DBF_TEXT(trace, 3, __FUNCTION__);
2675 if ((new_mtu < 576) || (new_mtu > 65527) ||
2676 (new_mtu > (privptr->channel[READ]->max_bufsize -
2677 LL_HEADER_LENGTH - 2)))
2678 return -EINVAL;
2679 dev->mtu = new_mtu;
2680 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2681 return 0;
2685 * Returns interface statistics of a device.
2687 * @param dev Pointer to interface struct.
2689 * @return Pointer to stats struct of this interface.
2691 static struct net_device_stats *
2692 ctc_stats(struct net_device * dev)
2694 return &((struct ctc_priv *) dev->priv)->stats;
2698 * sysfs attributes
2700 static ssize_t
2701 buffer_show(struct device *dev, char *buf)
2703 struct ctc_priv *priv;
2705 priv = dev->driver_data;
2706 if (!priv)
2707 return -ENODEV;
2708 return sprintf(buf, "%d\n",
2709 priv->buffer_size);
2712 static ssize_t
2713 buffer_write(struct device *dev, const char *buf, size_t count)
2715 struct ctc_priv *priv;
2716 struct net_device *ndev;
2717 int bs1;
2719 DBF_TEXT(trace, 3, __FUNCTION__);
2720 priv = dev->driver_data;
2721 if (!priv)
2722 return -ENODEV;
2723 ndev = priv->channel[READ]->netdev;
2724 if (!ndev)
2725 return -ENODEV;
2726 sscanf(buf, "%u", &bs1);
2728 if (bs1 > CTC_BUFSIZE_LIMIT)
2729 return -EINVAL;
2730 if ((ndev->flags & IFF_RUNNING) &&
2731 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2732 return -EINVAL;
2733 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2734 return -EINVAL;
2736 priv->buffer_size = bs1;
2737 priv->channel[READ]->max_bufsize =
2738 priv->channel[WRITE]->max_bufsize = bs1;
2739 if (!(ndev->flags & IFF_RUNNING))
2740 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2741 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2742 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2744 return count;
2748 static ssize_t
2749 loglevel_show(struct device *dev, char *buf)
2751 struct ctc_priv *priv;
2753 priv = dev->driver_data;
2754 if (!priv)
2755 return -ENODEV;
2756 return sprintf(buf, "%d\n", loglevel);
2759 static ssize_t
2760 loglevel_write(struct device *dev, const char *buf, size_t count)
2762 struct ctc_priv *priv;
2763 int ll1;
2765 DBF_TEXT(trace, 5, __FUNCTION__);
2766 priv = dev->driver_data;
2767 if (!priv)
2768 return -ENODEV;
2769 sscanf(buf, "%i", &ll1);
2771 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2772 return -EINVAL;
2773 loglevel = ll1;
2774 return count;
2777 static void
2778 ctc_print_statistics(struct ctc_priv *priv)
2780 char *sbuf;
2781 char *p;
2783 DBF_TEXT(trace, 4, __FUNCTION__);
2784 if (!priv)
2785 return;
2786 sbuf = (char *)kmalloc(2048, GFP_KERNEL);
2787 if (sbuf == NULL)
2788 return;
2789 p = sbuf;
2791 p += sprintf(p, " Device FSM state: %s\n",
2792 fsm_getstate_str(priv->fsm));
2793 p += sprintf(p, " RX channel FSM state: %s\n",
2794 fsm_getstate_str(priv->channel[READ]->fsm));
2795 p += sprintf(p, " TX channel FSM state: %s\n",
2796 fsm_getstate_str(priv->channel[WRITE]->fsm));
2797 p += sprintf(p, " Max. TX buffer used: %ld\n",
2798 priv->channel[WRITE]->prof.maxmulti);
2799 p += sprintf(p, " Max. chained SKBs: %ld\n",
2800 priv->channel[WRITE]->prof.maxcqueue);
2801 p += sprintf(p, " TX single write ops: %ld\n",
2802 priv->channel[WRITE]->prof.doios_single);
2803 p += sprintf(p, " TX multi write ops: %ld\n",
2804 priv->channel[WRITE]->prof.doios_multi);
2805 p += sprintf(p, " Netto bytes written: %ld\n",
2806 priv->channel[WRITE]->prof.txlen);
2807 p += sprintf(p, " Max. TX IO-time: %ld\n",
2808 priv->channel[WRITE]->prof.tx_time);
2810 ctc_pr_debug("Statistics for %s:\n%s",
2811 priv->channel[WRITE]->netdev->name, sbuf);
2812 kfree(sbuf);
2813 return;
2816 static ssize_t
2817 stats_show(struct device *dev, char *buf)
2819 struct ctc_priv *priv = dev->driver_data;
2820 if (!priv)
2821 return -ENODEV;
2822 ctc_print_statistics(priv);
2823 return sprintf(buf, "0\n");
2826 static ssize_t
2827 stats_write(struct device *dev, const char *buf, size_t count)
2829 struct ctc_priv *priv = dev->driver_data;
2830 if (!priv)
2831 return -ENODEV;
2832 /* Reset statistics */
2833 memset(&priv->channel[WRITE]->prof, 0,
2834 sizeof(priv->channel[WRITE]->prof));
2835 return count;
2838 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2839 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2840 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2842 static int
2843 ctc_add_attributes(struct device *dev)
2845 // device_create_file(dev, &dev_attr_buffer);
2846 device_create_file(dev, &dev_attr_loglevel);
2847 device_create_file(dev, &dev_attr_stats);
2848 return 0;
2851 static void
2852 ctc_remove_attributes(struct device *dev)
2854 device_remove_file(dev, &dev_attr_stats);
2855 device_remove_file(dev, &dev_attr_loglevel);
2856 // device_remove_file(dev, &dev_attr_buffer);
2860 static void
2861 ctc_netdev_unregister(struct net_device * dev)
2863 struct ctc_priv *privptr;
2865 if (!dev)
2866 return;
2867 privptr = (struct ctc_priv *) dev->priv;
2868 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2869 unregister_netdev(dev);
2870 else
2871 ctc_tty_unregister_netdev(dev);
2874 static int
2875 ctc_netdev_register(struct net_device * dev)
2877 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2878 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2879 return register_netdev(dev);
2880 else
2881 return ctc_tty_register_netdev(dev);
2884 static void
2885 ctc_free_netdevice(struct net_device * dev, int free_dev)
2887 struct ctc_priv *privptr;
2888 if (!dev)
2889 return;
2890 privptr = dev->priv;
2891 if (privptr) {
2892 if (privptr->fsm)
2893 kfree_fsm(privptr->fsm);
2894 kfree(privptr);
2896 #ifdef MODULE
2897 if (free_dev)
2898 free_netdev(dev);
2899 #endif
2903 * Initialize everything of the net device except the name and the
2904 * channel structs.
2906 static struct net_device *
2907 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2908 struct ctc_priv *privptr)
2910 if (!privptr)
2911 return NULL;
2913 DBF_TEXT(setup, 3, __FUNCTION__);
2914 if (alloc_device) {
2915 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2916 if (!dev)
2917 return NULL;
2918 memset(dev, 0, sizeof (struct net_device));
2921 dev->priv = privptr;
2922 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2923 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2924 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2925 if (privptr->fsm == NULL) {
2926 if (alloc_device)
2927 kfree(dev);
2928 return NULL;
2930 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2931 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2932 if (dev->mtu == 0)
2933 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2934 dev->hard_start_xmit = ctc_tx;
2935 dev->open = ctc_open;
2936 dev->stop = ctc_close;
2937 dev->get_stats = ctc_stats;
2938 dev->change_mtu = ctc_change_mtu;
2939 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2940 dev->addr_len = 0;
2941 dev->type = ARPHRD_SLIP;
2942 dev->tx_queue_len = 100;
2943 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2944 SET_MODULE_OWNER(dev);
2945 return dev;
2948 static ssize_t
2949 ctc_proto_show(struct device *dev, char *buf)
2951 struct ctc_priv *priv;
2953 priv = dev->driver_data;
2954 if (!priv)
2955 return -ENODEV;
2957 return sprintf(buf, "%d\n", priv->protocol);
2960 static ssize_t
2961 ctc_proto_store(struct device *dev, const char *buf, size_t count)
2963 struct ctc_priv *priv;
2964 int value;
2966 DBF_TEXT(trace, 3, __FUNCTION__);
2967 pr_debug("%s() called\n", __FUNCTION__);
2969 priv = dev->driver_data;
2970 if (!priv)
2971 return -ENODEV;
2972 sscanf(buf, "%u", &value);
2973 if ((value < 0) || (value > CTC_PROTO_MAX))
2974 return -EINVAL;
2975 priv->protocol = value;
2977 return count;
2980 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2982 static ssize_t
2983 ctc_type_show(struct device *dev, char *buf)
2985 struct ccwgroup_device *cgdev;
2987 cgdev = to_ccwgroupdev(dev);
2988 if (!cgdev)
2989 return -ENODEV;
2991 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2994 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2996 static struct attribute *ctc_attr[] = {
2997 &dev_attr_protocol.attr,
2998 &dev_attr_type.attr,
2999 &dev_attr_buffer.attr,
3000 NULL,
3003 static struct attribute_group ctc_attr_group = {
3004 .attrs = ctc_attr,
3007 static int
3008 ctc_add_files(struct device *dev)
3010 pr_debug("%s() called\n", __FUNCTION__);
3012 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
3015 static void
3016 ctc_remove_files(struct device *dev)
3018 pr_debug("%s() called\n", __FUNCTION__);
3020 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
3024 * Add ctc specific attributes.
3025 * Add ctc private data.
3027 * @param cgdev pointer to ccwgroup_device just added
3029 * @returns 0 on success, !0 on failure.
3032 static int
3033 ctc_probe_device(struct ccwgroup_device *cgdev)
3035 struct ctc_priv *priv;
3036 int rc;
3038 pr_debug("%s() called\n", __FUNCTION__);
3039 DBF_TEXT(trace, 3, __FUNCTION__);
3041 if (!get_device(&cgdev->dev))
3042 return -ENODEV;
3044 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
3045 if (!priv) {
3046 ctc_pr_err("%s: Out of memory\n", __func__);
3047 put_device(&cgdev->dev);
3048 return -ENOMEM;
3051 memset(priv, 0, sizeof (struct ctc_priv));
3052 rc = ctc_add_files(&cgdev->dev);
3053 if (rc) {
3054 kfree(priv);
3055 put_device(&cgdev->dev);
3056 return rc;
3058 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
3059 cgdev->cdev[0]->handler = ctc_irq_handler;
3060 cgdev->cdev[1]->handler = ctc_irq_handler;
3061 cgdev->dev.driver_data = priv;
3063 return 0;
3068 * Setup an interface.
3070 * @param cgdev Device to be setup.
3072 * @returns 0 on success, !0 on failure.
3074 static int
3075 ctc_new_device(struct ccwgroup_device *cgdev)
3077 char read_id[CTC_ID_SIZE];
3078 char write_id[CTC_ID_SIZE];
3079 int direction;
3080 enum channel_types type;
3081 struct ctc_priv *privptr;
3082 struct net_device *dev;
3083 int ret;
3085 pr_debug("%s() called\n", __FUNCTION__);
3086 DBF_TEXT(setup, 3, __FUNCTION__);
3088 privptr = cgdev->dev.driver_data;
3089 if (!privptr)
3090 return -ENODEV;
3092 type = get_channel_type(&cgdev->cdev[0]->id);
3094 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
3095 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
3097 if (add_channel(cgdev->cdev[0], type))
3098 return -ENOMEM;
3099 if (add_channel(cgdev->cdev[1], type))
3100 return -ENOMEM;
3102 ret = ccw_device_set_online(cgdev->cdev[0]);
3103 if (ret != 0) {
3104 printk(KERN_WARNING
3105 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
3108 ret = ccw_device_set_online(cgdev->cdev[1]);
3109 if (ret != 0) {
3110 printk(KERN_WARNING
3111 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
3114 dev = ctc_init_netdevice(NULL, 1, privptr);
3116 if (!dev) {
3117 ctc_pr_warn("ctc_init_netdevice failed\n");
3118 goto out;
3121 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
3122 strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
3123 else
3124 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
3126 for (direction = READ; direction <= WRITE; direction++) {
3127 privptr->channel[direction] =
3128 channel_get(type, direction == READ ? read_id : write_id,
3129 direction);
3130 if (privptr->channel[direction] == NULL) {
3131 if (direction == WRITE)
3132 channel_free(privptr->channel[READ]);
3134 ctc_free_netdevice(dev, 1);
3135 goto out;
3137 privptr->channel[direction]->netdev = dev;
3138 privptr->channel[direction]->protocol = privptr->protocol;
3139 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
3141 /* sysfs magic */
3142 SET_NETDEV_DEV(dev, &cgdev->dev);
3144 if (ctc_netdev_register(dev) != 0) {
3145 ctc_free_netdevice(dev, 1);
3146 goto out;
3149 ctc_add_attributes(&cgdev->dev);
3151 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
3153 print_banner();
3155 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
3156 dev->name, privptr->channel[READ]->id,
3157 privptr->channel[WRITE]->id, privptr->protocol);
3159 return 0;
3160 out:
3161 ccw_device_set_offline(cgdev->cdev[1]);
3162 ccw_device_set_offline(cgdev->cdev[0]);
3164 return -ENODEV;
3168 * Shutdown an interface.
3170 * @param cgdev Device to be shut down.
3172 * @returns 0 on success, !0 on failure.
3174 static int
3175 ctc_shutdown_device(struct ccwgroup_device *cgdev)
3177 struct ctc_priv *priv;
3178 struct net_device *ndev;
3180 DBF_TEXT(trace, 3, __FUNCTION__);
3181 pr_debug("%s() called\n", __FUNCTION__);
3183 priv = cgdev->dev.driver_data;
3184 ndev = NULL;
3185 if (!priv)
3186 return -ENODEV;
3188 if (priv->channel[READ]) {
3189 ndev = priv->channel[READ]->netdev;
3191 /* Close the device */
3192 ctc_close(ndev);
3193 ndev->flags &=~IFF_RUNNING;
3195 ctc_remove_attributes(&cgdev->dev);
3197 channel_free(priv->channel[READ]);
3199 if (priv->channel[WRITE])
3200 channel_free(priv->channel[WRITE]);
3202 if (ndev) {
3203 ctc_netdev_unregister(ndev);
3204 ndev->priv = NULL;
3205 ctc_free_netdevice(ndev, 1);
3208 if (priv->fsm)
3209 kfree_fsm(priv->fsm);
3211 ccw_device_set_offline(cgdev->cdev[1]);
3212 ccw_device_set_offline(cgdev->cdev[0]);
3214 if (priv->channel[READ])
3215 channel_remove(priv->channel[READ]);
3216 if (priv->channel[WRITE])
3217 channel_remove(priv->channel[WRITE]);
3219 priv->channel[READ] = priv->channel[WRITE] = NULL;
3221 return 0;
3225 static void
3226 ctc_remove_device(struct ccwgroup_device *cgdev)
3228 struct ctc_priv *priv;
3230 pr_debug("%s() called\n", __FUNCTION__);
3231 DBF_TEXT(trace, 3, __FUNCTION__);
3233 priv = cgdev->dev.driver_data;
3234 if (!priv)
3235 return;
3236 if (cgdev->state == CCWGROUP_ONLINE)
3237 ctc_shutdown_device(cgdev);
3238 ctc_remove_files(&cgdev->dev);
3239 cgdev->dev.driver_data = NULL;
3240 kfree(priv);
3241 put_device(&cgdev->dev);
3244 static struct ccwgroup_driver ctc_group_driver = {
3245 .owner = THIS_MODULE,
3246 .name = "ctc",
3247 .max_slaves = 2,
3248 .driver_id = 0xC3E3C3,
3249 .probe = ctc_probe_device,
3250 .remove = ctc_remove_device,
3251 .set_online = ctc_new_device,
3252 .set_offline = ctc_shutdown_device,
3256 * Module related routines
3257 *****************************************************************************/
3260 * Prepare to be unloaded. Free IRQ's and release all resources.
3261 * This is called just before this module is unloaded. It is
3262 * <em>not</em> called, if the usage count is !0, so we don't need to check
3263 * for that.
3265 static void __exit
3266 ctc_exit(void)
3268 unregister_cu3088_discipline(&ctc_group_driver);
3269 ctc_tty_cleanup();
3270 ctc_unregister_dbf_views();
3271 ctc_pr_info("CTC driver unloaded\n");
3275 * Initialize module.
3276 * This is called just after the module is loaded.
3278 * @return 0 on success, !0 on error.
3280 static int __init
3281 ctc_init(void)
3283 int ret = 0;
3285 print_banner();
3287 ret = ctc_register_dbf_views();
3288 if (ret){
3289 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3290 return ret;
3292 ctc_tty_init();
3293 ret = register_cu3088_discipline(&ctc_group_driver);
3294 if (ret) {
3295 ctc_tty_cleanup();
3296 ctc_unregister_dbf_views();
3298 return ret;
3301 module_init(ctc_init);
3302 module_exit(ctc_exit);
3304 /* --- This is the END my friend --- */