Pull bugzilla-9429 into release branch
[pv_ops_mirror.git] / drivers / net / hamradio / dmascc.c
blob11b83dae00ac762f8732ccefaf92ab92716a9959
1 /*
2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
5 * S5SCC/DMA support by Janko Koleznik S52HI
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/module.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/if_arp.h>
28 #include <linux/in.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/ioport.h>
32 #include <linux/kernel.h>
33 #include <linux/mm.h>
34 #include <linux/netdevice.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/sockios.h>
37 #include <linux/workqueue.h>
38 #include <asm/atomic.h>
39 #include <asm/dma.h>
40 #include <asm/io.h>
41 #include <asm/irq.h>
42 #include <asm/uaccess.h>
43 #include <net/ax25.h>
44 #include "z8530.h"
47 /* Number of buffers per channel */
49 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
54 /* Cards supported */
56 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57 0, 8, 1843200, 3686400 }
58 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59 0, 8, 3686400, 7372800 }
60 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61 0, 4, 6144000, 6144000 }
62 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63 0, 8, 4915200, 9830400 }
65 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
67 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
69 #define TYPE_PI 0
70 #define TYPE_PI2 1
71 #define TYPE_TWIN 2
72 #define TYPE_S5 3
73 #define NUM_TYPES 4
75 #define MAX_NUM_DEVS 32
78 /* SCC chips supported */
80 #define Z8530 0
81 #define Z85C30 1
82 #define Z85230 2
84 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
87 /* I/O registers */
89 /* 8530 registers relative to card base */
90 #define SCCB_CMD 0x00
91 #define SCCB_DATA 0x01
92 #define SCCA_CMD 0x02
93 #define SCCA_DATA 0x03
95 /* 8253/8254 registers relative to card base */
96 #define TMR_CNT0 0x00
97 #define TMR_CNT1 0x01
98 #define TMR_CNT2 0x02
99 #define TMR_CTRL 0x03
101 /* Additional PI/PI2 registers relative to card base */
102 #define PI_DREQ_MASK 0x04
104 /* Additional PackeTwin registers relative to card base */
105 #define TWIN_INT_REG 0x08
106 #define TWIN_CLR_TMR1 0x09
107 #define TWIN_CLR_TMR2 0x0a
108 #define TWIN_SPARE_1 0x0b
109 #define TWIN_DMA_CFG 0x08
110 #define TWIN_SERIAL_CFG 0x09
111 #define TWIN_DMA_CLR_FF 0x0a
112 #define TWIN_SPARE_2 0x0b
115 /* PackeTwin I/O register values */
117 /* INT_REG */
118 #define TWIN_SCC_MSK 0x01
119 #define TWIN_TMR1_MSK 0x02
120 #define TWIN_TMR2_MSK 0x04
121 #define TWIN_INT_MSK 0x07
123 /* SERIAL_CFG */
124 #define TWIN_DTRA_ON 0x01
125 #define TWIN_DTRB_ON 0x02
126 #define TWIN_EXTCLKA 0x04
127 #define TWIN_EXTCLKB 0x08
128 #define TWIN_LOOPA_ON 0x10
129 #define TWIN_LOOPB_ON 0x20
130 #define TWIN_EI 0x80
132 /* DMA_CFG */
133 #define TWIN_DMA_HDX_T1 0x08
134 #define TWIN_DMA_HDX_R1 0x0a
135 #define TWIN_DMA_HDX_T3 0x14
136 #define TWIN_DMA_HDX_R3 0x16
137 #define TWIN_DMA_FDX_T3R1 0x1b
138 #define TWIN_DMA_FDX_T1R3 0x1d
141 /* Status values */
143 #define IDLE 0
144 #define TX_HEAD 1
145 #define TX_DATA 2
146 #define TX_PAUSE 3
147 #define TX_TAIL 4
148 #define RTS_OFF 5
149 #define WAIT 6
150 #define DCD_ON 7
151 #define RX_ON 8
152 #define DCD_OFF 9
155 /* Ioctls */
157 #define SIOCGSCCPARAM SIOCDEVPRIVATE
158 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
161 /* Data types */
163 struct scc_param {
164 int pclk_hz; /* frequency of BRG input (don't change) */
165 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166 int nrzi; /* 0 (nrz), 1 (nrzi) */
167 int clocks; /* see dmascc_cfg documentation */
168 int txdelay; /* [1/TMR_0_HZ] */
169 int txtimeout; /* [1/HZ] */
170 int txtail; /* [1/TMR_0_HZ] */
171 int waittime; /* [1/TMR_0_HZ] */
172 int slottime; /* [1/TMR_0_HZ] */
173 int persist; /* 1 ... 256 */
174 int dma; /* -1 (disable), 0, 1, 3 */
175 int txpause; /* [1/TMR_0_HZ] */
176 int rtsoff; /* [1/TMR_0_HZ] */
177 int dcdon; /* [1/TMR_0_HZ] */
178 int dcdoff; /* [1/TMR_0_HZ] */
181 struct scc_hardware {
182 char *name;
183 int io_region;
184 int io_delta;
185 int io_size;
186 int num_devs;
187 int scc_offset;
188 int tmr_offset;
189 int tmr_hz;
190 int pclk_hz;
193 struct scc_priv {
194 int type;
195 int chip;
196 struct net_device *dev;
197 struct scc_info *info;
198 struct net_device_stats stats;
199 int channel;
200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode;
202 struct scc_param param;
203 char rx_buf[NUM_RX_BUF][BUF_SIZE];
204 int rx_len[NUM_RX_BUF];
205 int rx_ptr;
206 struct work_struct rx_work;
207 int rx_head, rx_tail, rx_count;
208 int rx_over;
209 char tx_buf[NUM_TX_BUF][BUF_SIZE];
210 int tx_len[NUM_TX_BUF];
211 int tx_ptr;
212 int tx_head, tx_tail, tx_count;
213 int state;
214 unsigned long tx_start;
215 int rr0;
216 spinlock_t *register_lock; /* Per scc_info */
217 spinlock_t ring_lock;
220 struct scc_info {
221 int irq_used;
222 int twin_serial_cfg;
223 struct net_device *dev[2];
224 struct scc_priv priv[2];
225 struct scc_info *next;
226 spinlock_t register_lock; /* Per device register lock */
230 /* Function declarations */
231 static int setup_adapter(int card_base, int type, int n) __init;
233 static void write_scc(struct scc_priv *priv, int reg, int val);
234 static void write_scc_data(struct scc_priv *priv, int val, int fast);
235 static int read_scc(struct scc_priv *priv, int reg);
236 static int read_scc_data(struct scc_priv *priv);
238 static int scc_open(struct net_device *dev);
239 static int scc_close(struct net_device *dev);
240 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242 static struct net_device_stats *scc_get_stats(struct net_device *dev);
243 static int scc_set_mac_address(struct net_device *dev, void *sa);
245 static inline void tx_on(struct scc_priv *priv);
246 static inline void rx_on(struct scc_priv *priv);
247 static inline void rx_off(struct scc_priv *priv);
248 static void start_timer(struct scc_priv *priv, int t, int r15);
249 static inline unsigned char random(void);
251 static inline void z8530_isr(struct scc_info *info);
252 static irqreturn_t scc_isr(int irq, void *dev_id);
253 static void rx_isr(struct scc_priv *priv);
254 static void special_condition(struct scc_priv *priv, int rc);
255 static void rx_bh(struct work_struct *);
256 static void tx_isr(struct scc_priv *priv);
257 static void es_isr(struct scc_priv *priv);
258 static void tm_isr(struct scc_priv *priv);
261 /* Initialization variables */
263 static int io[MAX_NUM_DEVS] __initdata = { 0, };
265 /* Beware! hw[] is also used in cleanup_module(). */
266 static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
269 /* Global variables */
271 static struct scc_info *first;
272 static unsigned long rand;
275 MODULE_AUTHOR("Klaus Kudielka");
276 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
277 module_param_array(io, int, NULL, 0);
278 MODULE_LICENSE("GPL");
280 static void __exit dmascc_exit(void)
282 int i;
283 struct scc_info *info;
285 while (first) {
286 info = first;
288 /* Unregister devices */
289 for (i = 0; i < 2; i++)
290 unregister_netdev(info->dev[i]);
292 /* Reset board */
293 if (info->priv[0].type == TYPE_TWIN)
294 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
295 write_scc(&info->priv[0], R9, FHWRES);
296 release_region(info->dev[0]->base_addr,
297 hw[info->priv[0].type].io_size);
299 for (i = 0; i < 2; i++)
300 free_netdev(info->dev[i]);
302 /* Free memory */
303 first = info->next;
304 kfree(info);
308 static int __init dmascc_init(void)
310 int h, i, j, n;
311 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
312 t1[MAX_NUM_DEVS];
313 unsigned t_val;
314 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
315 counting[MAX_NUM_DEVS];
317 /* Initialize random number generator */
318 rand = jiffies;
319 /* Cards found = 0 */
320 n = 0;
321 /* Warning message */
322 if (!io[0])
323 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
325 /* Run autodetection for each card type */
326 for (h = 0; h < NUM_TYPES; h++) {
328 if (io[0]) {
329 /* User-specified I/O address regions */
330 for (i = 0; i < hw[h].num_devs; i++)
331 base[i] = 0;
332 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
333 j = (io[i] -
334 hw[h].io_region) / hw[h].io_delta;
335 if (j >= 0 && j < hw[h].num_devs
336 && hw[h].io_region +
337 j * hw[h].io_delta == io[i]) {
338 base[j] = io[i];
341 } else {
342 /* Default I/O address regions */
343 for (i = 0; i < hw[h].num_devs; i++) {
344 base[i] =
345 hw[h].io_region + i * hw[h].io_delta;
349 /* Check valid I/O address regions */
350 for (i = 0; i < hw[h].num_devs; i++)
351 if (base[i]) {
352 if (!request_region
353 (base[i], hw[h].io_size, "dmascc"))
354 base[i] = 0;
355 else {
356 tcmd[i] =
357 base[i] + hw[h].tmr_offset +
358 TMR_CTRL;
359 t0[i] =
360 base[i] + hw[h].tmr_offset +
361 TMR_CNT0;
362 t1[i] =
363 base[i] + hw[h].tmr_offset +
364 TMR_CNT1;
368 /* Start timers */
369 for (i = 0; i < hw[h].num_devs; i++)
370 if (base[i]) {
371 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
372 outb(0x36, tcmd[i]);
373 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
374 t0[i]);
375 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
376 t0[i]);
377 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
378 outb(0x70, tcmd[i]);
379 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
380 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
381 start[i] = jiffies;
382 delay[i] = 0;
383 counting[i] = 1;
384 /* Timer 2: LSB+MSB, Mode 0 */
385 outb(0xb0, tcmd[i]);
387 time = jiffies;
388 /* Wait until counter registers are loaded */
389 udelay(2000000 / TMR_0_HZ);
391 /* Timing loop */
392 while (jiffies - time < 13) {
393 for (i = 0; i < hw[h].num_devs; i++)
394 if (base[i] && counting[i]) {
395 /* Read back Timer 1: latch; read LSB; read MSB */
396 outb(0x40, tcmd[i]);
397 t_val =
398 inb(t1[i]) + (inb(t1[i]) << 8);
399 /* Also check whether counter did wrap */
400 if (t_val == 0
401 || t_val > TMR_0_HZ / HZ * 10)
402 counting[i] = 0;
403 delay[i] = jiffies - start[i];
407 /* Evaluate measurements */
408 for (i = 0; i < hw[h].num_devs; i++)
409 if (base[i]) {
410 if ((delay[i] >= 9 && delay[i] <= 11) &&
411 /* Ok, we have found an adapter */
412 (setup_adapter(base[i], h, n) == 0))
413 n++;
414 else
415 release_region(base[i],
416 hw[h].io_size);
419 } /* NUM_TYPES */
421 /* If any adapter was successfully initialized, return ok */
422 if (n)
423 return 0;
425 /* If no adapter found, return error */
426 printk(KERN_INFO "dmascc: no adapters found\n");
427 return -EIO;
430 module_init(dmascc_init);
431 module_exit(dmascc_exit);
433 static void __init dev_setup(struct net_device *dev)
435 dev->type = ARPHRD_AX25;
436 dev->hard_header_len = AX25_MAX_HEADER_LEN;
437 dev->mtu = 1500;
438 dev->addr_len = AX25_ADDR_LEN;
439 dev->tx_queue_len = 64;
440 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
441 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
444 static int __init setup_adapter(int card_base, int type, int n)
446 int i, irq, chip;
447 struct scc_info *info;
448 struct net_device *dev;
449 struct scc_priv *priv;
450 unsigned long time;
451 unsigned int irqs;
452 int tmr_base = card_base + hw[type].tmr_offset;
453 int scc_base = card_base + hw[type].scc_offset;
454 char *chipnames[] = CHIPNAMES;
456 /* Initialize what is necessary for write_scc and write_scc_data */
457 info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
458 if (!info) {
459 printk(KERN_ERR "dmascc: "
460 "could not allocate memory for %s at %#3x\n",
461 hw[type].name, card_base);
462 goto out;
466 info->dev[0] = alloc_netdev(0, "", dev_setup);
467 if (!info->dev[0]) {
468 printk(KERN_ERR "dmascc: "
469 "could not allocate memory for %s at %#3x\n",
470 hw[type].name, card_base);
471 goto out1;
474 info->dev[1] = alloc_netdev(0, "", dev_setup);
475 if (!info->dev[1]) {
476 printk(KERN_ERR "dmascc: "
477 "could not allocate memory for %s at %#3x\n",
478 hw[type].name, card_base);
479 goto out2;
481 spin_lock_init(&info->register_lock);
483 priv = &info->priv[0];
484 priv->type = type;
485 priv->card_base = card_base;
486 priv->scc_cmd = scc_base + SCCA_CMD;
487 priv->scc_data = scc_base + SCCA_DATA;
488 priv->register_lock = &info->register_lock;
490 /* Reset SCC */
491 write_scc(priv, R9, FHWRES | MIE | NV);
493 /* Determine type of chip by enabling SDLC/HDLC enhancements */
494 write_scc(priv, R15, SHDLCE);
495 if (!read_scc(priv, R15)) {
496 /* WR7' not present. This is an ordinary Z8530 SCC. */
497 chip = Z8530;
498 } else {
499 /* Put one character in TX FIFO */
500 write_scc_data(priv, 0, 0);
501 if (read_scc(priv, R0) & Tx_BUF_EMP) {
502 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
503 chip = Z85230;
504 } else {
505 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
506 chip = Z85C30;
509 write_scc(priv, R15, 0);
511 /* Start IRQ auto-detection */
512 irqs = probe_irq_on();
514 /* Enable interrupts */
515 if (type == TYPE_TWIN) {
516 outb(0, card_base + TWIN_DMA_CFG);
517 inb(card_base + TWIN_CLR_TMR1);
518 inb(card_base + TWIN_CLR_TMR2);
519 info->twin_serial_cfg = TWIN_EI;
520 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
521 } else {
522 write_scc(priv, R15, CTSIE);
523 write_scc(priv, R0, RES_EXT_INT);
524 write_scc(priv, R1, EXT_INT_ENAB);
527 /* Start timer */
528 outb(1, tmr_base + TMR_CNT1);
529 outb(0, tmr_base + TMR_CNT1);
531 /* Wait and detect IRQ */
532 time = jiffies;
533 while (jiffies - time < 2 + HZ / TMR_0_HZ);
534 irq = probe_irq_off(irqs);
536 /* Clear pending interrupt, disable interrupts */
537 if (type == TYPE_TWIN) {
538 inb(card_base + TWIN_CLR_TMR1);
539 } else {
540 write_scc(priv, R1, 0);
541 write_scc(priv, R15, 0);
542 write_scc(priv, R0, RES_EXT_INT);
545 if (irq <= 0) {
546 printk(KERN_ERR
547 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
548 hw[type].name, card_base, irq);
549 goto out3;
552 /* Set up data structures */
553 for (i = 0; i < 2; i++) {
554 dev = info->dev[i];
555 priv = &info->priv[i];
556 priv->type = type;
557 priv->chip = chip;
558 priv->dev = dev;
559 priv->info = info;
560 priv->channel = i;
561 spin_lock_init(&priv->ring_lock);
562 priv->register_lock = &info->register_lock;
563 priv->card_base = card_base;
564 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
565 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
566 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
567 priv->tmr_ctrl = tmr_base + TMR_CTRL;
568 priv->tmr_mode = i ? 0xb0 : 0x70;
569 priv->param.pclk_hz = hw[type].pclk_hz;
570 priv->param.brg_tc = -1;
571 priv->param.clocks = TCTRxCP | RCRTxCP;
572 priv->param.persist = 256;
573 priv->param.dma = -1;
574 INIT_WORK(&priv->rx_work, rx_bh);
575 dev->priv = priv;
576 sprintf(dev->name, "dmascc%i", 2 * n + i);
577 dev->base_addr = card_base;
578 dev->irq = irq;
579 dev->open = scc_open;
580 dev->stop = scc_close;
581 dev->do_ioctl = scc_ioctl;
582 dev->hard_start_xmit = scc_send_packet;
583 dev->get_stats = scc_get_stats;
584 dev->header_ops = &ax25_header_ops;
585 dev->set_mac_address = scc_set_mac_address;
587 if (register_netdev(info->dev[0])) {
588 printk(KERN_ERR "dmascc: could not register %s\n",
589 info->dev[0]->name);
590 goto out3;
592 if (register_netdev(info->dev[1])) {
593 printk(KERN_ERR "dmascc: could not register %s\n",
594 info->dev[1]->name);
595 goto out4;
599 info->next = first;
600 first = info;
601 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
602 hw[type].name, chipnames[chip], card_base, irq);
603 return 0;
605 out4:
606 unregister_netdev(info->dev[0]);
607 out3:
608 if (info->priv[0].type == TYPE_TWIN)
609 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
610 write_scc(&info->priv[0], R9, FHWRES);
611 free_netdev(info->dev[1]);
612 out2:
613 free_netdev(info->dev[0]);
614 out1:
615 kfree(info);
616 out:
617 return -1;
621 /* Driver functions */
623 static void write_scc(struct scc_priv *priv, int reg, int val)
625 unsigned long flags;
626 switch (priv->type) {
627 case TYPE_S5:
628 if (reg)
629 outb(reg, priv->scc_cmd);
630 outb(val, priv->scc_cmd);
631 return;
632 case TYPE_TWIN:
633 if (reg)
634 outb_p(reg, priv->scc_cmd);
635 outb_p(val, priv->scc_cmd);
636 return;
637 default:
638 spin_lock_irqsave(priv->register_lock, flags);
639 outb_p(0, priv->card_base + PI_DREQ_MASK);
640 if (reg)
641 outb_p(reg, priv->scc_cmd);
642 outb_p(val, priv->scc_cmd);
643 outb(1, priv->card_base + PI_DREQ_MASK);
644 spin_unlock_irqrestore(priv->register_lock, flags);
645 return;
650 static void write_scc_data(struct scc_priv *priv, int val, int fast)
652 unsigned long flags;
653 switch (priv->type) {
654 case TYPE_S5:
655 outb(val, priv->scc_data);
656 return;
657 case TYPE_TWIN:
658 outb_p(val, priv->scc_data);
659 return;
660 default:
661 if (fast)
662 outb_p(val, priv->scc_data);
663 else {
664 spin_lock_irqsave(priv->register_lock, flags);
665 outb_p(0, priv->card_base + PI_DREQ_MASK);
666 outb_p(val, priv->scc_data);
667 outb(1, priv->card_base + PI_DREQ_MASK);
668 spin_unlock_irqrestore(priv->register_lock, flags);
670 return;
675 static int read_scc(struct scc_priv *priv, int reg)
677 int rc;
678 unsigned long flags;
679 switch (priv->type) {
680 case TYPE_S5:
681 if (reg)
682 outb(reg, priv->scc_cmd);
683 return inb(priv->scc_cmd);
684 case TYPE_TWIN:
685 if (reg)
686 outb_p(reg, priv->scc_cmd);
687 return inb_p(priv->scc_cmd);
688 default:
689 spin_lock_irqsave(priv->register_lock, flags);
690 outb_p(0, priv->card_base + PI_DREQ_MASK);
691 if (reg)
692 outb_p(reg, priv->scc_cmd);
693 rc = inb_p(priv->scc_cmd);
694 outb(1, priv->card_base + PI_DREQ_MASK);
695 spin_unlock_irqrestore(priv->register_lock, flags);
696 return rc;
701 static int read_scc_data(struct scc_priv *priv)
703 int rc;
704 unsigned long flags;
705 switch (priv->type) {
706 case TYPE_S5:
707 return inb(priv->scc_data);
708 case TYPE_TWIN:
709 return inb_p(priv->scc_data);
710 default:
711 spin_lock_irqsave(priv->register_lock, flags);
712 outb_p(0, priv->card_base + PI_DREQ_MASK);
713 rc = inb_p(priv->scc_data);
714 outb(1, priv->card_base + PI_DREQ_MASK);
715 spin_unlock_irqrestore(priv->register_lock, flags);
716 return rc;
721 static int scc_open(struct net_device *dev)
723 struct scc_priv *priv = dev->priv;
724 struct scc_info *info = priv->info;
725 int card_base = priv->card_base;
727 /* Request IRQ if not already used by other channel */
728 if (!info->irq_used) {
729 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
730 return -EAGAIN;
733 info->irq_used++;
735 /* Request DMA if required */
736 if (priv->param.dma >= 0) {
737 if (request_dma(priv->param.dma, "dmascc")) {
738 if (--info->irq_used == 0)
739 free_irq(dev->irq, info);
740 return -EAGAIN;
741 } else {
742 unsigned long flags = claim_dma_lock();
743 clear_dma_ff(priv->param.dma);
744 release_dma_lock(flags);
748 /* Initialize local variables */
749 priv->rx_ptr = 0;
750 priv->rx_over = 0;
751 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
752 priv->state = IDLE;
753 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
754 priv->tx_ptr = 0;
756 /* Reset channel */
757 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
758 /* X1 clock, SDLC mode */
759 write_scc(priv, R4, SDLC | X1CLK);
760 /* DMA */
761 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
762 /* 8 bit RX char, RX disable */
763 write_scc(priv, R3, Rx8);
764 /* 8 bit TX char, TX disable */
765 write_scc(priv, R5, Tx8);
766 /* SDLC address field */
767 write_scc(priv, R6, 0);
768 /* SDLC flag */
769 write_scc(priv, R7, FLAG);
770 switch (priv->chip) {
771 case Z85C30:
772 /* Select WR7' */
773 write_scc(priv, R15, SHDLCE);
774 /* Auto EOM reset */
775 write_scc(priv, R7, AUTOEOM);
776 write_scc(priv, R15, 0);
777 break;
778 case Z85230:
779 /* Select WR7' */
780 write_scc(priv, R15, SHDLCE);
781 /* The following bits are set (see 2.5.2.1):
782 - Automatic EOM reset
783 - Interrupt request if RX FIFO is half full
784 This bit should be ignored in DMA mode (according to the
785 documentation), but actually isn't. The receiver doesn't work if
786 it is set. Thus, we have to clear it in DMA mode.
787 - Interrupt/DMA request if TX FIFO is completely empty
788 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
789 compatibility).
790 b) If cleared, DMA requests may follow each other very quickly,
791 filling up the TX FIFO.
792 Advantage: TX works even in case of high bus latency.
793 Disadvantage: Edge-triggered DMA request circuitry may miss
794 a request. No more data is delivered, resulting
795 in a TX FIFO underrun.
796 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
797 The PackeTwin doesn't. I don't know about the PI, but let's
798 assume it behaves like the PI2.
800 if (priv->param.dma >= 0) {
801 if (priv->type == TYPE_TWIN)
802 write_scc(priv, R7, AUTOEOM | TXFIFOE);
803 else
804 write_scc(priv, R7, AUTOEOM);
805 } else {
806 write_scc(priv, R7, AUTOEOM | RXFIFOH);
808 write_scc(priv, R15, 0);
809 break;
811 /* Preset CRC, NRZ(I) encoding */
812 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
814 /* Configure baud rate generator */
815 if (priv->param.brg_tc >= 0) {
816 /* Program BR generator */
817 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
818 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
819 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
820 PackeTwin, not connected on the PI2); set DPLL source to BRG */
821 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
822 /* Enable DPLL */
823 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
824 } else {
825 /* Disable BR generator */
826 write_scc(priv, R14, DTRREQ | BRSRC);
829 /* Configure clocks */
830 if (priv->type == TYPE_TWIN) {
831 /* Disable external TX clock receiver */
832 outb((info->twin_serial_cfg &=
833 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
834 card_base + TWIN_SERIAL_CFG);
836 write_scc(priv, R11, priv->param.clocks);
837 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
838 /* Enable external TX clock receiver */
839 outb((info->twin_serial_cfg |=
840 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
841 card_base + TWIN_SERIAL_CFG);
844 /* Configure PackeTwin */
845 if (priv->type == TYPE_TWIN) {
846 /* Assert DTR, enable interrupts */
847 outb((info->twin_serial_cfg |= TWIN_EI |
848 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
849 card_base + TWIN_SERIAL_CFG);
852 /* Read current status */
853 priv->rr0 = read_scc(priv, R0);
854 /* Enable DCD interrupt */
855 write_scc(priv, R15, DCDIE);
857 netif_start_queue(dev);
859 return 0;
863 static int scc_close(struct net_device *dev)
865 struct scc_priv *priv = dev->priv;
866 struct scc_info *info = priv->info;
867 int card_base = priv->card_base;
869 netif_stop_queue(dev);
871 if (priv->type == TYPE_TWIN) {
872 /* Drop DTR */
873 outb((info->twin_serial_cfg &=
874 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
875 card_base + TWIN_SERIAL_CFG);
878 /* Reset channel, free DMA and IRQ */
879 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
880 if (priv->param.dma >= 0) {
881 if (priv->type == TYPE_TWIN)
882 outb(0, card_base + TWIN_DMA_CFG);
883 free_dma(priv->param.dma);
885 if (--info->irq_used == 0)
886 free_irq(dev->irq, info);
888 return 0;
892 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
894 struct scc_priv *priv = dev->priv;
896 switch (cmd) {
897 case SIOCGSCCPARAM:
898 if (copy_to_user
899 (ifr->ifr_data, &priv->param,
900 sizeof(struct scc_param)))
901 return -EFAULT;
902 return 0;
903 case SIOCSSCCPARAM:
904 if (!capable(CAP_NET_ADMIN))
905 return -EPERM;
906 if (netif_running(dev))
907 return -EAGAIN;
908 if (copy_from_user
909 (&priv->param, ifr->ifr_data,
910 sizeof(struct scc_param)))
911 return -EFAULT;
912 return 0;
913 default:
914 return -EINVAL;
919 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
921 struct scc_priv *priv = dev->priv;
922 unsigned long flags;
923 int i;
925 /* Temporarily stop the scheduler feeding us packets */
926 netif_stop_queue(dev);
928 /* Transfer data to DMA buffer */
929 i = priv->tx_head;
930 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
931 priv->tx_len[i] = skb->len - 1;
933 /* Clear interrupts while we touch our circular buffers */
935 spin_lock_irqsave(&priv->ring_lock, flags);
936 /* Move the ring buffer's head */
937 priv->tx_head = (i + 1) % NUM_TX_BUF;
938 priv->tx_count++;
940 /* If we just filled up the last buffer, leave queue stopped.
941 The higher layers must wait until we have a DMA buffer
942 to accept the data. */
943 if (priv->tx_count < NUM_TX_BUF)
944 netif_wake_queue(dev);
946 /* Set new TX state */
947 if (priv->state == IDLE) {
948 /* Assert RTS, start timer */
949 priv->state = TX_HEAD;
950 priv->tx_start = jiffies;
951 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
952 write_scc(priv, R15, 0);
953 start_timer(priv, priv->param.txdelay, 0);
956 /* Turn interrupts back on and free buffer */
957 spin_unlock_irqrestore(&priv->ring_lock, flags);
958 dev_kfree_skb(skb);
960 return 0;
964 static struct net_device_stats *scc_get_stats(struct net_device *dev)
966 struct scc_priv *priv = dev->priv;
968 return &priv->stats;
972 static int scc_set_mac_address(struct net_device *dev, void *sa)
974 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
975 dev->addr_len);
976 return 0;
980 static inline void tx_on(struct scc_priv *priv)
982 int i, n;
983 unsigned long flags;
985 if (priv->param.dma >= 0) {
986 n = (priv->chip == Z85230) ? 3 : 1;
987 /* Program DMA controller */
988 flags = claim_dma_lock();
989 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
990 set_dma_addr(priv->param.dma,
991 (int) priv->tx_buf[priv->tx_tail] + n);
992 set_dma_count(priv->param.dma,
993 priv->tx_len[priv->tx_tail] - n);
994 release_dma_lock(flags);
995 /* Enable TX underrun interrupt */
996 write_scc(priv, R15, TxUIE);
997 /* Configure DREQ */
998 if (priv->type == TYPE_TWIN)
999 outb((priv->param.dma ==
1000 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1001 priv->card_base + TWIN_DMA_CFG);
1002 else
1003 write_scc(priv, R1,
1004 EXT_INT_ENAB | WT_FN_RDYFN |
1005 WT_RDY_ENAB);
1006 /* Write first byte(s) */
1007 spin_lock_irqsave(priv->register_lock, flags);
1008 for (i = 0; i < n; i++)
1009 write_scc_data(priv,
1010 priv->tx_buf[priv->tx_tail][i], 1);
1011 enable_dma(priv->param.dma);
1012 spin_unlock_irqrestore(priv->register_lock, flags);
1013 } else {
1014 write_scc(priv, R15, TxUIE);
1015 write_scc(priv, R1,
1016 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1017 tx_isr(priv);
1019 /* Reset EOM latch if we do not have the AUTOEOM feature */
1020 if (priv->chip == Z8530)
1021 write_scc(priv, R0, RES_EOM_L);
1025 static inline void rx_on(struct scc_priv *priv)
1027 unsigned long flags;
1029 /* Clear RX FIFO */
1030 while (read_scc(priv, R0) & Rx_CH_AV)
1031 read_scc_data(priv);
1032 priv->rx_over = 0;
1033 if (priv->param.dma >= 0) {
1034 /* Program DMA controller */
1035 flags = claim_dma_lock();
1036 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1037 set_dma_addr(priv->param.dma,
1038 (int) priv->rx_buf[priv->rx_head]);
1039 set_dma_count(priv->param.dma, BUF_SIZE);
1040 release_dma_lock(flags);
1041 enable_dma(priv->param.dma);
1042 /* Configure PackeTwin DMA */
1043 if (priv->type == TYPE_TWIN) {
1044 outb((priv->param.dma ==
1045 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1046 priv->card_base + TWIN_DMA_CFG);
1048 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1049 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1050 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1051 } else {
1052 /* Reset current frame */
1053 priv->rx_ptr = 0;
1054 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1055 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1056 WT_FN_RDYFN);
1058 write_scc(priv, R0, ERR_RES);
1059 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1063 static inline void rx_off(struct scc_priv *priv)
1065 /* Disable receiver */
1066 write_scc(priv, R3, Rx8);
1067 /* Disable DREQ / RX interrupt */
1068 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1069 outb(0, priv->card_base + TWIN_DMA_CFG);
1070 else
1071 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1072 /* Disable DMA */
1073 if (priv->param.dma >= 0)
1074 disable_dma(priv->param.dma);
1078 static void start_timer(struct scc_priv *priv, int t, int r15)
1080 unsigned long flags;
1082 outb(priv->tmr_mode, priv->tmr_ctrl);
1083 if (t == 0) {
1084 tm_isr(priv);
1085 } else if (t > 0) {
1086 save_flags(flags);
1087 cli();
1088 outb(t & 0xFF, priv->tmr_cnt);
1089 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1090 if (priv->type != TYPE_TWIN) {
1091 write_scc(priv, R15, r15 | CTSIE);
1092 priv->rr0 |= CTS;
1094 restore_flags(flags);
1099 static inline unsigned char random(void)
1101 /* See "Numerical Recipes in C", second edition, p. 284 */
1102 rand = rand * 1664525L + 1013904223L;
1103 return (unsigned char) (rand >> 24);
1106 static inline void z8530_isr(struct scc_info *info)
1108 int is, i = 100;
1110 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1111 if (is & CHARxIP) {
1112 rx_isr(&info->priv[0]);
1113 } else if (is & CHATxIP) {
1114 tx_isr(&info->priv[0]);
1115 } else if (is & CHAEXT) {
1116 es_isr(&info->priv[0]);
1117 } else if (is & CHBRxIP) {
1118 rx_isr(&info->priv[1]);
1119 } else if (is & CHBTxIP) {
1120 tx_isr(&info->priv[1]);
1121 } else {
1122 es_isr(&info->priv[1]);
1124 write_scc(&info->priv[0], R0, RES_H_IUS);
1125 i++;
1127 if (i < 0) {
1128 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1129 is);
1131 /* Ok, no interrupts pending from this 8530. The INT line should
1132 be inactive now. */
1136 static irqreturn_t scc_isr(int irq, void *dev_id)
1138 struct scc_info *info = dev_id;
1140 spin_lock(info->priv[0].register_lock);
1141 /* At this point interrupts are enabled, and the interrupt under service
1142 is already acknowledged, but masked off.
1144 Interrupt processing: We loop until we know that the IRQ line is
1145 low. If another positive edge occurs afterwards during the ISR,
1146 another interrupt will be triggered by the interrupt controller
1147 as soon as the IRQ level is enabled again (see asm/irq.h).
1149 Bottom-half handlers will be processed after scc_isr(). This is
1150 important, since we only have small ringbuffers and want new data
1151 to be fetched/delivered immediately. */
1153 if (info->priv[0].type == TYPE_TWIN) {
1154 int is, card_base = info->priv[0].card_base;
1155 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1156 TWIN_INT_MSK) {
1157 if (is & TWIN_SCC_MSK) {
1158 z8530_isr(info);
1159 } else if (is & TWIN_TMR1_MSK) {
1160 inb(card_base + TWIN_CLR_TMR1);
1161 tm_isr(&info->priv[0]);
1162 } else {
1163 inb(card_base + TWIN_CLR_TMR2);
1164 tm_isr(&info->priv[1]);
1167 } else
1168 z8530_isr(info);
1169 spin_unlock(info->priv[0].register_lock);
1170 return IRQ_HANDLED;
1174 static void rx_isr(struct scc_priv *priv)
1176 if (priv->param.dma >= 0) {
1177 /* Check special condition and perform error reset. See 2.4.7.5. */
1178 special_condition(priv, read_scc(priv, R1));
1179 write_scc(priv, R0, ERR_RES);
1180 } else {
1181 /* Check special condition for each character. Error reset not necessary.
1182 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1183 int rc;
1184 while (read_scc(priv, R0) & Rx_CH_AV) {
1185 rc = read_scc(priv, R1);
1186 if (priv->rx_ptr < BUF_SIZE)
1187 priv->rx_buf[priv->rx_head][priv->
1188 rx_ptr++] =
1189 read_scc_data(priv);
1190 else {
1191 priv->rx_over = 2;
1192 read_scc_data(priv);
1194 special_condition(priv, rc);
1200 static void special_condition(struct scc_priv *priv, int rc)
1202 int cb;
1203 unsigned long flags;
1205 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1207 if (rc & Rx_OVR) {
1208 /* Receiver overrun */
1209 priv->rx_over = 1;
1210 if (priv->param.dma < 0)
1211 write_scc(priv, R0, ERR_RES);
1212 } else if (rc & END_FR) {
1213 /* End of frame. Get byte count */
1214 if (priv->param.dma >= 0) {
1215 flags = claim_dma_lock();
1216 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1218 release_dma_lock(flags);
1219 } else {
1220 cb = priv->rx_ptr - 2;
1222 if (priv->rx_over) {
1223 /* We had an overrun */
1224 priv->stats.rx_errors++;
1225 if (priv->rx_over == 2)
1226 priv->stats.rx_length_errors++;
1227 else
1228 priv->stats.rx_fifo_errors++;
1229 priv->rx_over = 0;
1230 } else if (rc & CRC_ERR) {
1231 /* Count invalid CRC only if packet length >= minimum */
1232 if (cb >= 15) {
1233 priv->stats.rx_errors++;
1234 priv->stats.rx_crc_errors++;
1236 } else {
1237 if (cb >= 15) {
1238 if (priv->rx_count < NUM_RX_BUF - 1) {
1239 /* Put good frame in FIFO */
1240 priv->rx_len[priv->rx_head] = cb;
1241 priv->rx_head =
1242 (priv->rx_head +
1243 1) % NUM_RX_BUF;
1244 priv->rx_count++;
1245 schedule_work(&priv->rx_work);
1246 } else {
1247 priv->stats.rx_errors++;
1248 priv->stats.rx_over_errors++;
1252 /* Get ready for new frame */
1253 if (priv->param.dma >= 0) {
1254 flags = claim_dma_lock();
1255 set_dma_addr(priv->param.dma,
1256 (int) priv->rx_buf[priv->rx_head]);
1257 set_dma_count(priv->param.dma, BUF_SIZE);
1258 release_dma_lock(flags);
1259 } else {
1260 priv->rx_ptr = 0;
1266 static void rx_bh(struct work_struct *ugli_api)
1268 struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
1269 int i = priv->rx_tail;
1270 int cb;
1271 unsigned long flags;
1272 struct sk_buff *skb;
1273 unsigned char *data;
1275 spin_lock_irqsave(&priv->ring_lock, flags);
1276 while (priv->rx_count) {
1277 spin_unlock_irqrestore(&priv->ring_lock, flags);
1278 cb = priv->rx_len[i];
1279 /* Allocate buffer */
1280 skb = dev_alloc_skb(cb + 1);
1281 if (skb == NULL) {
1282 /* Drop packet */
1283 priv->stats.rx_dropped++;
1284 } else {
1285 /* Fill buffer */
1286 data = skb_put(skb, cb + 1);
1287 data[0] = 0;
1288 memcpy(&data[1], priv->rx_buf[i], cb);
1289 skb->protocol = ax25_type_trans(skb, priv->dev);
1290 netif_rx(skb);
1291 priv->dev->last_rx = jiffies;
1292 priv->stats.rx_packets++;
1293 priv->stats.rx_bytes += cb;
1295 spin_lock_irqsave(&priv->ring_lock, flags);
1296 /* Move tail */
1297 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1298 priv->rx_count--;
1300 spin_unlock_irqrestore(&priv->ring_lock, flags);
1304 static void tx_isr(struct scc_priv *priv)
1306 int i = priv->tx_tail, p = priv->tx_ptr;
1308 /* Suspend TX interrupts if we don't want to send anything.
1309 See Figure 2-22. */
1310 if (p == priv->tx_len[i]) {
1311 write_scc(priv, R0, RES_Tx_P);
1312 return;
1315 /* Write characters */
1316 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1317 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1320 /* Reset EOM latch of Z8530 */
1321 if (!priv->tx_ptr && p && priv->chip == Z8530)
1322 write_scc(priv, R0, RES_EOM_L);
1324 priv->tx_ptr = p;
1328 static void es_isr(struct scc_priv *priv)
1330 int i, rr0, drr0, res;
1331 unsigned long flags;
1333 /* Read status, reset interrupt bit (open latches) */
1334 rr0 = read_scc(priv, R0);
1335 write_scc(priv, R0, RES_EXT_INT);
1336 drr0 = priv->rr0 ^ rr0;
1337 priv->rr0 = rr0;
1339 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1340 it might have already been cleared again by AUTOEOM. */
1341 if (priv->state == TX_DATA) {
1342 /* Get remaining bytes */
1343 i = priv->tx_tail;
1344 if (priv->param.dma >= 0) {
1345 disable_dma(priv->param.dma);
1346 flags = claim_dma_lock();
1347 res = get_dma_residue(priv->param.dma);
1348 release_dma_lock(flags);
1349 } else {
1350 res = priv->tx_len[i] - priv->tx_ptr;
1351 priv->tx_ptr = 0;
1353 /* Disable DREQ / TX interrupt */
1354 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1355 outb(0, priv->card_base + TWIN_DMA_CFG);
1356 else
1357 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1358 if (res) {
1359 /* Update packet statistics */
1360 priv->stats.tx_errors++;
1361 priv->stats.tx_fifo_errors++;
1362 /* Other underrun interrupts may already be waiting */
1363 write_scc(priv, R0, RES_EXT_INT);
1364 write_scc(priv, R0, RES_EXT_INT);
1365 } else {
1366 /* Update packet statistics */
1367 priv->stats.tx_packets++;
1368 priv->stats.tx_bytes += priv->tx_len[i];
1369 /* Remove frame from FIFO */
1370 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1371 priv->tx_count--;
1372 /* Inform upper layers */
1373 netif_wake_queue(priv->dev);
1375 /* Switch state */
1376 write_scc(priv, R15, 0);
1377 if (priv->tx_count &&
1378 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1379 priv->state = TX_PAUSE;
1380 start_timer(priv, priv->param.txpause, 0);
1381 } else {
1382 priv->state = TX_TAIL;
1383 start_timer(priv, priv->param.txtail, 0);
1387 /* DCD transition */
1388 if (drr0 & DCD) {
1389 if (rr0 & DCD) {
1390 switch (priv->state) {
1391 case IDLE:
1392 case WAIT:
1393 priv->state = DCD_ON;
1394 write_scc(priv, R15, 0);
1395 start_timer(priv, priv->param.dcdon, 0);
1397 } else {
1398 switch (priv->state) {
1399 case RX_ON:
1400 rx_off(priv);
1401 priv->state = DCD_OFF;
1402 write_scc(priv, R15, 0);
1403 start_timer(priv, priv->param.dcdoff, 0);
1408 /* CTS transition */
1409 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1410 tm_isr(priv);
1415 static void tm_isr(struct scc_priv *priv)
1417 switch (priv->state) {
1418 case TX_HEAD:
1419 case TX_PAUSE:
1420 tx_on(priv);
1421 priv->state = TX_DATA;
1422 break;
1423 case TX_TAIL:
1424 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1425 priv->state = RTS_OFF;
1426 if (priv->type != TYPE_TWIN)
1427 write_scc(priv, R15, 0);
1428 start_timer(priv, priv->param.rtsoff, 0);
1429 break;
1430 case RTS_OFF:
1431 write_scc(priv, R15, DCDIE);
1432 priv->rr0 = read_scc(priv, R0);
1433 if (priv->rr0 & DCD) {
1434 priv->stats.collisions++;
1435 rx_on(priv);
1436 priv->state = RX_ON;
1437 } else {
1438 priv->state = WAIT;
1439 start_timer(priv, priv->param.waittime, DCDIE);
1441 break;
1442 case WAIT:
1443 if (priv->tx_count) {
1444 priv->state = TX_HEAD;
1445 priv->tx_start = jiffies;
1446 write_scc(priv, R5,
1447 TxCRC_ENAB | RTS | TxENAB | Tx8);
1448 write_scc(priv, R15, 0);
1449 start_timer(priv, priv->param.txdelay, 0);
1450 } else {
1451 priv->state = IDLE;
1452 if (priv->type != TYPE_TWIN)
1453 write_scc(priv, R15, DCDIE);
1455 break;
1456 case DCD_ON:
1457 case DCD_OFF:
1458 write_scc(priv, R15, DCDIE);
1459 priv->rr0 = read_scc(priv, R0);
1460 if (priv->rr0 & DCD) {
1461 rx_on(priv);
1462 priv->state = RX_ON;
1463 } else {
1464 priv->state = WAIT;
1465 start_timer(priv,
1466 random() / priv->param.persist *
1467 priv->param.slottime, DCDIE);
1469 break;