[ARM] pxa: Gumstix Verdex PCMCIA support
[linux-2.6/verdex.git] / drivers / s390 / net / claw.c
blobc63babefb69840be3ce38244b363aaec18c99479
1 /*
2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
5 * Linux for zSeries version
6 * Copyright IBM Corp. 2002, 2009
7 * Author(s) Original code written by:
8 * Kazuo Iimura <iimura@jp.ibm.com>
9 * Rewritten by
10 * Andy Richter <richtera@us.ibm.com>
11 * Marc Price <mwprice@us.ibm.com>
13 * sysfs parms:
14 * group x.x.rrrr,x.x.wwww
15 * read_buffer nnnnnnn
16 * write_buffer nnnnnn
17 * host_name aaaaaaaa
18 * adapter_name aaaaaaaa
19 * api_type aaaaaaaa
21 * eg.
22 * group 0.0.0200 0.0.0201
23 * read_buffer 25
24 * write_buffer 20
25 * host_name LINUX390
26 * adapter_name RS6K
27 * api_type TCPIP
29 * where
31 * The device id is decided by the order entries
32 * are added to the group the first is claw0 the second claw1
33 * up to CLAW_MAX_DEV
35 * rrrr - the first of 2 consecutive device addresses used for the
36 * CLAW protocol.
37 * The specified address is always used as the input (Read)
38 * channel and the next address is used as the output channel.
40 * wwww - the second of 2 consecutive device addresses used for
41 * the CLAW protocol.
42 * The specified address is always used as the output
43 * channel and the previous address is used as the input channel.
45 * read_buffer - specifies number of input buffers to allocate.
46 * write_buffer - specifies number of output buffers to allocate.
47 * host_name - host name
48 * adaptor_name - adaptor name
49 * api_type - API type TCPIP or API will be sent and expected
50 * as ws_name
52 * Note the following requirements:
53 * 1) host_name must match the configured adapter_name on the remote side
54 * 2) adaptor_name must match the configured host name on the remote side
56 * Change History
57 * 1.00 Initial release shipped
58 * 1.10 Changes for Buffer allocation
59 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
60 * 1.25 Added Packing support
61 * 1.5
64 #define KMSG_COMPONENT "claw"
66 #include <asm/ccwdev.h>
67 #include <asm/ccwgroup.h>
68 #include <asm/debug.h>
69 #include <asm/idals.h>
70 #include <asm/io.h>
71 #include <linux/bitops.h>
72 #include <linux/ctype.h>
73 #include <linux/delay.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/init.h>
77 #include <linux/interrupt.h>
78 #include <linux/ip.h>
79 #include <linux/kernel.h>
80 #include <linux/module.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/proc_fs.h>
84 #include <linux/sched.h>
85 #include <linux/signal.h>
86 #include <linux/skbuff.h>
87 #include <linux/slab.h>
88 #include <linux/string.h>
89 #include <linux/tcp.h>
90 #include <linux/timer.h>
91 #include <linux/types.h>
93 #include "cu3088.h"
94 #include "claw.h"
97 CLAW uses the s390dbf file system see claw_trace and claw_setup
100 static char version[] __initdata = "CLAW driver";
101 static char debug_buffer[255];
103 * Debug Facility Stuff
105 static debug_info_t *claw_dbf_setup;
106 static debug_info_t *claw_dbf_trace;
109 * CLAW Debug Facility functions
111 static void
112 claw_unregister_debug_facility(void)
114 if (claw_dbf_setup)
115 debug_unregister(claw_dbf_setup);
116 if (claw_dbf_trace)
117 debug_unregister(claw_dbf_trace);
120 static int
121 claw_register_debug_facility(void)
123 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
124 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
125 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
126 claw_unregister_debug_facility();
127 return -ENOMEM;
129 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
130 debug_set_level(claw_dbf_setup, 2);
131 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
132 debug_set_level(claw_dbf_trace, 2);
133 return 0;
136 static inline void
137 claw_set_busy(struct net_device *dev)
139 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
140 eieio();
143 static inline void
144 claw_clear_busy(struct net_device *dev)
146 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
147 netif_wake_queue(dev);
148 eieio();
151 static inline int
152 claw_check_busy(struct net_device *dev)
154 eieio();
155 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
158 static inline void
159 claw_setbit_busy(int nr,struct net_device *dev)
161 netif_stop_queue(dev);
162 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
165 static inline void
166 claw_clearbit_busy(int nr,struct net_device *dev)
168 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
169 netif_wake_queue(dev);
172 static inline int
173 claw_test_and_setbit_busy(int nr,struct net_device *dev)
175 netif_stop_queue(dev);
176 return test_and_set_bit(nr,
177 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
181 /* Functions for the DEV methods */
183 static int claw_probe(struct ccwgroup_device *cgdev);
184 static void claw_remove_device(struct ccwgroup_device *cgdev);
185 static void claw_purge_skb_queue(struct sk_buff_head *q);
186 static int claw_new_device(struct ccwgroup_device *cgdev);
187 static int claw_shutdown_device(struct ccwgroup_device *cgdev);
188 static int claw_tx(struct sk_buff *skb, struct net_device *dev);
189 static int claw_change_mtu( struct net_device *dev, int new_mtu);
190 static int claw_open(struct net_device *dev);
191 static void claw_irq_handler(struct ccw_device *cdev,
192 unsigned long intparm, struct irb *irb);
193 static void claw_irq_tasklet ( unsigned long data );
194 static int claw_release(struct net_device *dev);
195 static void claw_write_retry ( struct chbk * p_ch );
196 static void claw_write_next ( struct chbk * p_ch );
197 static void claw_timer ( struct chbk * p_ch );
199 /* Functions */
200 static int add_claw_reads(struct net_device *dev,
201 struct ccwbk* p_first, struct ccwbk* p_last);
202 static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
203 static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
204 static int find_link(struct net_device *dev, char *host_name, char *ws_name );
205 static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
206 static int init_ccw_bk(struct net_device *dev);
207 static void probe_error( struct ccwgroup_device *cgdev);
208 static struct net_device_stats *claw_stats(struct net_device *dev);
209 static int pages_to_order_of_mag(int num_of_pages);
210 static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
211 /* sysfs Functions */
212 static ssize_t claw_hname_show(struct device *dev,
213 struct device_attribute *attr, char *buf);
214 static ssize_t claw_hname_write(struct device *dev,
215 struct device_attribute *attr,
216 const char *buf, size_t count);
217 static ssize_t claw_adname_show(struct device *dev,
218 struct device_attribute *attr, char *buf);
219 static ssize_t claw_adname_write(struct device *dev,
220 struct device_attribute *attr,
221 const char *buf, size_t count);
222 static ssize_t claw_apname_show(struct device *dev,
223 struct device_attribute *attr, char *buf);
224 static ssize_t claw_apname_write(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count);
227 static ssize_t claw_wbuff_show(struct device *dev,
228 struct device_attribute *attr, char *buf);
229 static ssize_t claw_wbuff_write(struct device *dev,
230 struct device_attribute *attr,
231 const char *buf, size_t count);
232 static ssize_t claw_rbuff_show(struct device *dev,
233 struct device_attribute *attr, char *buf);
234 static ssize_t claw_rbuff_write(struct device *dev,
235 struct device_attribute *attr,
236 const char *buf, size_t count);
237 static int claw_add_files(struct device *dev);
238 static void claw_remove_files(struct device *dev);
240 /* Functions for System Validate */
241 static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
242 static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
243 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
244 static int claw_snd_conn_req(struct net_device *dev, __u8 link);
245 static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
246 static int claw_snd_sys_validate_rsp(struct net_device *dev,
247 struct clawctl * p_ctl, __u32 return_code);
248 static int claw_strt_conn_req(struct net_device *dev );
249 static void claw_strt_read(struct net_device *dev, int lock);
250 static void claw_strt_out_IO(struct net_device *dev);
251 static void claw_free_wrt_buf(struct net_device *dev);
253 /* Functions for unpack reads */
254 static void unpack_read(struct net_device *dev);
256 static int claw_pm_prepare(struct ccwgroup_device *gdev)
258 return -EPERM;
261 /* ccwgroup table */
263 static struct ccwgroup_driver claw_group_driver = {
264 .owner = THIS_MODULE,
265 .name = "claw",
266 .max_slaves = 2,
267 .driver_id = 0xC3D3C1E6,
268 .probe = claw_probe,
269 .remove = claw_remove_device,
270 .set_online = claw_new_device,
271 .set_offline = claw_shutdown_device,
272 .prepare = claw_pm_prepare,
276 * Key functions
279 /*----------------------------------------------------------------*
280 * claw_probe *
281 * this function is called for each CLAW device. *
282 *----------------------------------------------------------------*/
283 static int
284 claw_probe(struct ccwgroup_device *cgdev)
286 int rc;
287 struct claw_privbk *privptr=NULL;
289 CLAW_DBF_TEXT(2, setup, "probe");
290 if (!get_device(&cgdev->dev))
291 return -ENODEV;
292 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
293 dev_set_drvdata(&cgdev->dev, privptr);
294 if (privptr == NULL) {
295 probe_error(cgdev);
296 put_device(&cgdev->dev);
297 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
298 return -ENOMEM;
300 privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
301 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
302 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
303 probe_error(cgdev);
304 put_device(&cgdev->dev);
305 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
306 return -ENOMEM;
308 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
309 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
310 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
311 privptr->p_env->packing = 0;
312 privptr->p_env->write_buffers = 5;
313 privptr->p_env->read_buffers = 5;
314 privptr->p_env->read_size = CLAW_FRAME_SIZE;
315 privptr->p_env->write_size = CLAW_FRAME_SIZE;
316 rc = claw_add_files(&cgdev->dev);
317 if (rc) {
318 probe_error(cgdev);
319 put_device(&cgdev->dev);
320 dev_err(&cgdev->dev, "Creating the /proc files for a new"
321 " CLAW device failed\n");
322 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
323 return rc;
325 privptr->p_env->p_priv = privptr;
326 cgdev->cdev[0]->handler = claw_irq_handler;
327 cgdev->cdev[1]->handler = claw_irq_handler;
328 CLAW_DBF_TEXT(2, setup, "prbext 0");
330 return 0;
331 } /* end of claw_probe */
333 /*-------------------------------------------------------------------*
334 * claw_tx *
335 *-------------------------------------------------------------------*/
337 static int
338 claw_tx(struct sk_buff *skb, struct net_device *dev)
340 int rc;
341 struct claw_privbk *privptr = dev->ml_priv;
342 unsigned long saveflags;
343 struct chbk *p_ch;
345 CLAW_DBF_TEXT(4, trace, "claw_tx");
346 p_ch=&privptr->channel[WRITE];
347 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
348 rc=claw_hw_tx( skb, dev, 1 );
349 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
350 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
351 if (rc)
352 rc = NETDEV_TX_BUSY;
353 else
354 rc = NETDEV_TX_OK;
355 return rc;
356 } /* end of claw_tx */
358 /*------------------------------------------------------------------*
359 * pack the collect queue into an skb and return it *
360 * If not packing just return the top skb from the queue *
361 *------------------------------------------------------------------*/
363 static struct sk_buff *
364 claw_pack_skb(struct claw_privbk *privptr)
366 struct sk_buff *new_skb,*held_skb;
367 struct chbk *p_ch = &privptr->channel[WRITE];
368 struct claw_env *p_env = privptr->p_env;
369 int pkt_cnt,pk_ind,so_far;
371 new_skb = NULL; /* assume no dice */
372 pkt_cnt = 0;
373 CLAW_DBF_TEXT(4, trace, "PackSKBe");
374 if (!skb_queue_empty(&p_ch->collect_queue)) {
375 /* some data */
376 held_skb = skb_dequeue(&p_ch->collect_queue);
377 if (held_skb)
378 dev_kfree_skb_any(held_skb);
379 else
380 return NULL;
381 if (p_env->packing != DO_PACKED)
382 return held_skb;
383 /* get a new SKB we will pack at least one */
384 new_skb = dev_alloc_skb(p_env->write_size);
385 if (new_skb == NULL) {
386 atomic_inc(&held_skb->users);
387 skb_queue_head(&p_ch->collect_queue,held_skb);
388 return NULL;
390 /* we have packed packet and a place to put it */
391 pk_ind = 1;
392 so_far = 0;
393 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
394 while ((pk_ind) && (held_skb != NULL)) {
395 if (held_skb->len+so_far <= p_env->write_size-8) {
396 memcpy(skb_put(new_skb,held_skb->len),
397 held_skb->data,held_skb->len);
398 privptr->stats.tx_packets++;
399 so_far += held_skb->len;
400 pkt_cnt++;
401 dev_kfree_skb_any(held_skb);
402 held_skb = skb_dequeue(&p_ch->collect_queue);
403 if (held_skb)
404 atomic_dec(&held_skb->users);
405 } else {
406 pk_ind = 0;
407 atomic_inc(&held_skb->users);
408 skb_queue_head(&p_ch->collect_queue,held_skb);
412 CLAW_DBF_TEXT(4, trace, "PackSKBx");
413 return new_skb;
416 /*-------------------------------------------------------------------*
417 * claw_change_mtu *
419 *-------------------------------------------------------------------*/
421 static int
422 claw_change_mtu(struct net_device *dev, int new_mtu)
424 struct claw_privbk *privptr = dev->ml_priv;
425 int buff_size;
426 CLAW_DBF_TEXT(4, trace, "setmtu");
427 buff_size = privptr->p_env->write_size;
428 if ((new_mtu < 60) || (new_mtu > buff_size)) {
429 return -EINVAL;
431 dev->mtu = new_mtu;
432 return 0;
433 } /* end of claw_change_mtu */
436 /*-------------------------------------------------------------------*
437 * claw_open *
439 *-------------------------------------------------------------------*/
440 static int
441 claw_open(struct net_device *dev)
444 int rc;
445 int i;
446 unsigned long saveflags=0;
447 unsigned long parm;
448 struct claw_privbk *privptr;
449 DECLARE_WAITQUEUE(wait, current);
450 struct timer_list timer;
451 struct ccwbk *p_buf;
453 CLAW_DBF_TEXT(4, trace, "open");
454 privptr = (struct claw_privbk *)dev->ml_priv;
455 /* allocate and initialize CCW blocks */
456 if (privptr->buffs_alloc == 0) {
457 rc=init_ccw_bk(dev);
458 if (rc) {
459 CLAW_DBF_TEXT(2, trace, "openmem");
460 return -ENOMEM;
463 privptr->system_validate_comp=0;
464 privptr->release_pend=0;
465 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
466 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
467 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
468 privptr->p_env->packing=PACKING_ASK;
469 } else {
470 privptr->p_env->packing=0;
471 privptr->p_env->read_size=CLAW_FRAME_SIZE;
472 privptr->p_env->write_size=CLAW_FRAME_SIZE;
474 claw_set_busy(dev);
475 tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
476 (unsigned long) &privptr->channel[READ]);
477 for ( i = 0; i < 2; i++) {
478 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
479 init_waitqueue_head(&privptr->channel[i].wait);
480 /* skb_queue_head_init(&p_ch->io_queue); */
481 if (i == WRITE)
482 skb_queue_head_init(
483 &privptr->channel[WRITE].collect_queue);
484 privptr->channel[i].flag_a = 0;
485 privptr->channel[i].IO_active = 0;
486 privptr->channel[i].flag &= ~CLAW_TIMER;
487 init_timer(&timer);
488 timer.function = (void *)claw_timer;
489 timer.data = (unsigned long)(&privptr->channel[i]);
490 timer.expires = jiffies + 15*HZ;
491 add_timer(&timer);
492 spin_lock_irqsave(get_ccwdev_lock(
493 privptr->channel[i].cdev), saveflags);
494 parm = (unsigned long) &privptr->channel[i];
495 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
496 rc = 0;
497 add_wait_queue(&privptr->channel[i].wait, &wait);
498 rc = ccw_device_halt(
499 (struct ccw_device *)privptr->channel[i].cdev,parm);
500 set_current_state(TASK_INTERRUPTIBLE);
501 spin_unlock_irqrestore(
502 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
503 schedule();
504 set_current_state(TASK_RUNNING);
505 remove_wait_queue(&privptr->channel[i].wait, &wait);
506 if(rc != 0)
507 ccw_check_return_code(privptr->channel[i].cdev, rc);
508 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
509 del_timer(&timer);
511 if ((((privptr->channel[READ].last_dstat |
512 privptr->channel[WRITE].last_dstat) &
513 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
514 (((privptr->channel[READ].flag |
515 privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
516 dev_info(&privptr->channel[READ].cdev->dev,
517 "%s: remote side is not ready\n", dev->name);
518 CLAW_DBF_TEXT(2, trace, "notrdy");
520 for ( i = 0; i < 2; i++) {
521 spin_lock_irqsave(
522 get_ccwdev_lock(privptr->channel[i].cdev),
523 saveflags);
524 parm = (unsigned long) &privptr->channel[i];
525 privptr->channel[i].claw_state = CLAW_STOP;
526 rc = ccw_device_halt(
527 (struct ccw_device *)&privptr->channel[i].cdev,
528 parm);
529 spin_unlock_irqrestore(
530 get_ccwdev_lock(privptr->channel[i].cdev),
531 saveflags);
532 if (rc != 0) {
533 ccw_check_return_code(
534 privptr->channel[i].cdev, rc);
537 free_pages((unsigned long)privptr->p_buff_ccw,
538 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
539 if (privptr->p_env->read_size < PAGE_SIZE) {
540 free_pages((unsigned long)privptr->p_buff_read,
541 (int)pages_to_order_of_mag(
542 privptr->p_buff_read_num));
544 else {
545 p_buf=privptr->p_read_active_first;
546 while (p_buf!=NULL) {
547 free_pages((unsigned long)p_buf->p_buffer,
548 (int)pages_to_order_of_mag(
549 privptr->p_buff_pages_perread ));
550 p_buf=p_buf->next;
553 if (privptr->p_env->write_size < PAGE_SIZE ) {
554 free_pages((unsigned long)privptr->p_buff_write,
555 (int)pages_to_order_of_mag(
556 privptr->p_buff_write_num));
558 else {
559 p_buf=privptr->p_write_active_first;
560 while (p_buf!=NULL) {
561 free_pages((unsigned long)p_buf->p_buffer,
562 (int)pages_to_order_of_mag(
563 privptr->p_buff_pages_perwrite ));
564 p_buf=p_buf->next;
567 privptr->buffs_alloc = 0;
568 privptr->channel[READ].flag= 0x00;
569 privptr->channel[WRITE].flag = 0x00;
570 privptr->p_buff_ccw=NULL;
571 privptr->p_buff_read=NULL;
572 privptr->p_buff_write=NULL;
573 claw_clear_busy(dev);
574 CLAW_DBF_TEXT(2, trace, "open EIO");
575 return -EIO;
578 /* Send SystemValidate command */
580 claw_clear_busy(dev);
581 CLAW_DBF_TEXT(4, trace, "openok");
582 return 0;
583 } /* end of claw_open */
585 /*-------------------------------------------------------------------*
587 * claw_irq_handler *
589 *--------------------------------------------------------------------*/
590 static void
591 claw_irq_handler(struct ccw_device *cdev,
592 unsigned long intparm, struct irb *irb)
594 struct chbk *p_ch = NULL;
595 struct claw_privbk *privptr = NULL;
596 struct net_device *dev = NULL;
597 struct claw_env *p_env;
598 struct chbk *p_ch_r=NULL;
600 CLAW_DBF_TEXT(4, trace, "clawirq");
601 /* Bypass all 'unsolicited interrupts' */
602 privptr = dev_get_drvdata(&cdev->dev);
603 if (!privptr) {
604 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
605 " IRQ, c-%02x d-%02x\n",
606 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
607 CLAW_DBF_TEXT(2, trace, "badirq");
608 return;
611 /* Try to extract channel from driver data. */
612 if (privptr->channel[READ].cdev == cdev)
613 p_ch = &privptr->channel[READ];
614 else if (privptr->channel[WRITE].cdev == cdev)
615 p_ch = &privptr->channel[WRITE];
616 else {
617 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
618 CLAW_DBF_TEXT(2, trace, "badchan");
619 return;
621 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
623 dev = (struct net_device *) (p_ch->ndev);
624 p_env=privptr->p_env;
626 /* Copy interruption response block. */
627 memcpy(p_ch->irb, irb, sizeof(struct irb));
629 /* Check for good subchannel return code, otherwise info message */
630 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
631 dev_info(&cdev->dev,
632 "%s: subchannel check for device: %04x -"
633 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
634 dev->name, p_ch->devno,
635 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
636 irb->scsw.cmd.cpa);
637 CLAW_DBF_TEXT(2, trace, "chanchk");
638 /* return; */
641 /* Check the reason-code of a unit check */
642 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
643 ccw_check_unit_check(p_ch, irb->ecw[0]);
645 /* State machine to bring the connection up, down and to restart */
646 p_ch->last_dstat = irb->scsw.cmd.dstat;
648 switch (p_ch->claw_state) {
649 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
650 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
651 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
652 (p_ch->irb->scsw.cmd.stctl ==
653 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
654 return;
655 wake_up(&p_ch->wait); /* wake up claw_release */
656 CLAW_DBF_TEXT(4, trace, "stop");
657 return;
658 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
659 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
660 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
661 (p_ch->irb->scsw.cmd.stctl ==
662 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
663 CLAW_DBF_TEXT(4, trace, "haltio");
664 return;
666 if (p_ch->flag == CLAW_READ) {
667 p_ch->claw_state = CLAW_START_READ;
668 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
669 } else if (p_ch->flag == CLAW_WRITE) {
670 p_ch->claw_state = CLAW_START_WRITE;
671 /* send SYSTEM_VALIDATE */
672 claw_strt_read(dev, LOCK_NO);
673 claw_send_control(dev,
674 SYSTEM_VALIDATE_REQUEST,
675 0, 0, 0,
676 p_env->host_name,
677 p_env->adapter_name);
678 } else {
679 dev_warn(&cdev->dev, "The CLAW device received"
680 " an unexpected IRQ, "
681 "c-%02x d-%02x\n",
682 irb->scsw.cmd.cstat,
683 irb->scsw.cmd.dstat);
684 return;
686 CLAW_DBF_TEXT(4, trace, "haltio");
687 return;
688 case CLAW_START_READ:
689 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
690 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
691 clear_bit(0, (void *)&p_ch->IO_active);
692 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
693 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
694 (p_ch->irb->ecw[0]) == 0) {
695 privptr->stats.rx_errors++;
696 dev_info(&cdev->dev,
697 "%s: Restart is required after remote "
698 "side recovers \n",
699 dev->name);
701 CLAW_DBF_TEXT(4, trace, "notrdy");
702 return;
704 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
705 (p_ch->irb->scsw.cmd.dstat == 0)) {
706 if (test_and_set_bit(CLAW_BH_ACTIVE,
707 (void *)&p_ch->flag_a) == 0)
708 tasklet_schedule(&p_ch->tasklet);
709 else
710 CLAW_DBF_TEXT(4, trace, "PCINoBH");
711 CLAW_DBF_TEXT(4, trace, "PCI_read");
712 return;
714 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
715 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
716 (p_ch->irb->scsw.cmd.stctl ==
717 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
718 CLAW_DBF_TEXT(4, trace, "SPend_rd");
719 return;
721 clear_bit(0, (void *)&p_ch->IO_active);
722 claw_clearbit_busy(TB_RETRY, dev);
723 if (test_and_set_bit(CLAW_BH_ACTIVE,
724 (void *)&p_ch->flag_a) == 0)
725 tasklet_schedule(&p_ch->tasklet);
726 else
727 CLAW_DBF_TEXT(4, trace, "RdBHAct");
728 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
729 return;
730 case CLAW_START_WRITE:
731 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
732 dev_info(&cdev->dev,
733 "%s: Unit Check Occured in "
734 "write channel\n", dev->name);
735 clear_bit(0, (void *)&p_ch->IO_active);
736 if (p_ch->irb->ecw[0] & 0x80) {
737 dev_info(&cdev->dev,
738 "%s: Resetting Event "
739 "occurred:\n", dev->name);
740 init_timer(&p_ch->timer);
741 p_ch->timer.function =
742 (void *)claw_write_retry;
743 p_ch->timer.data = (unsigned long)p_ch;
744 p_ch->timer.expires = jiffies + 10*HZ;
745 add_timer(&p_ch->timer);
746 dev_info(&cdev->dev,
747 "%s: write connection "
748 "restarting\n", dev->name);
750 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
751 return;
753 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
754 clear_bit(0, (void *)&p_ch->IO_active);
755 dev_info(&cdev->dev,
756 "%s: Unit Exception "
757 "occurred in write channel\n",
758 dev->name);
760 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
761 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
762 (p_ch->irb->scsw.cmd.stctl ==
763 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
764 CLAW_DBF_TEXT(4, trace, "writeUE");
765 return;
767 clear_bit(0, (void *)&p_ch->IO_active);
768 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
769 claw_write_next(p_ch);
770 claw_clearbit_busy(TB_TX, dev);
771 claw_clear_busy(dev);
773 p_ch_r = (struct chbk *)&privptr->channel[READ];
774 if (test_and_set_bit(CLAW_BH_ACTIVE,
775 (void *)&p_ch_r->flag_a) == 0)
776 tasklet_schedule(&p_ch_r->tasklet);
777 CLAW_DBF_TEXT(4, trace, "StWtExit");
778 return;
779 default:
780 dev_warn(&cdev->dev,
781 "The CLAW device for %s received an unexpected IRQ\n",
782 dev->name);
783 CLAW_DBF_TEXT(2, trace, "badIRQ");
784 return;
787 } /* end of claw_irq_handler */
790 /*-------------------------------------------------------------------*
791 * claw_irq_tasklet *
793 *--------------------------------------------------------------------*/
794 static void
795 claw_irq_tasklet ( unsigned long data )
797 struct chbk * p_ch;
798 struct net_device *dev;
799 struct claw_privbk * privptr;
801 p_ch = (struct chbk *) data;
802 dev = (struct net_device *)p_ch->ndev;
803 CLAW_DBF_TEXT(4, trace, "IRQtask");
804 privptr = (struct claw_privbk *)dev->ml_priv;
805 unpack_read(dev);
806 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
807 CLAW_DBF_TEXT(4, trace, "TskletXt");
808 return;
809 } /* end of claw_irq_bh */
811 /*-------------------------------------------------------------------*
812 * claw_release *
814 *--------------------------------------------------------------------*/
815 static int
816 claw_release(struct net_device *dev)
818 int rc;
819 int i;
820 unsigned long saveflags;
821 unsigned long parm;
822 struct claw_privbk *privptr;
823 DECLARE_WAITQUEUE(wait, current);
824 struct ccwbk* p_this_ccw;
825 struct ccwbk* p_buf;
827 if (!dev)
828 return 0;
829 privptr = (struct claw_privbk *)dev->ml_priv;
830 if (!privptr)
831 return 0;
832 CLAW_DBF_TEXT(4, trace, "release");
833 privptr->release_pend=1;
834 claw_setbit_busy(TB_STOP,dev);
835 for ( i = 1; i >=0 ; i--) {
836 spin_lock_irqsave(
837 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
838 /* del_timer(&privptr->channel[READ].timer); */
839 privptr->channel[i].claw_state = CLAW_STOP;
840 privptr->channel[i].IO_active = 0;
841 parm = (unsigned long) &privptr->channel[i];
842 if (i == WRITE)
843 claw_purge_skb_queue(
844 &privptr->channel[WRITE].collect_queue);
845 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
846 if (privptr->system_validate_comp==0x00) /* never opened? */
847 init_waitqueue_head(&privptr->channel[i].wait);
848 add_wait_queue(&privptr->channel[i].wait, &wait);
849 set_current_state(TASK_INTERRUPTIBLE);
850 spin_unlock_irqrestore(
851 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
852 schedule();
853 set_current_state(TASK_RUNNING);
854 remove_wait_queue(&privptr->channel[i].wait, &wait);
855 if (rc != 0) {
856 ccw_check_return_code(privptr->channel[i].cdev, rc);
859 if (privptr->pk_skb != NULL) {
860 dev_kfree_skb_any(privptr->pk_skb);
861 privptr->pk_skb = NULL;
863 if(privptr->buffs_alloc != 1) {
864 CLAW_DBF_TEXT(4, trace, "none2fre");
865 return 0;
867 CLAW_DBF_TEXT(4, trace, "freebufs");
868 if (privptr->p_buff_ccw != NULL) {
869 free_pages((unsigned long)privptr->p_buff_ccw,
870 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
872 CLAW_DBF_TEXT(4, trace, "freeread");
873 if (privptr->p_env->read_size < PAGE_SIZE) {
874 if (privptr->p_buff_read != NULL) {
875 free_pages((unsigned long)privptr->p_buff_read,
876 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
879 else {
880 p_buf=privptr->p_read_active_first;
881 while (p_buf!=NULL) {
882 free_pages((unsigned long)p_buf->p_buffer,
883 (int)pages_to_order_of_mag(
884 privptr->p_buff_pages_perread ));
885 p_buf=p_buf->next;
888 CLAW_DBF_TEXT(4, trace, "freewrit");
889 if (privptr->p_env->write_size < PAGE_SIZE ) {
890 free_pages((unsigned long)privptr->p_buff_write,
891 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
893 else {
894 p_buf=privptr->p_write_active_first;
895 while (p_buf!=NULL) {
896 free_pages((unsigned long)p_buf->p_buffer,
897 (int)pages_to_order_of_mag(
898 privptr->p_buff_pages_perwrite ));
899 p_buf=p_buf->next;
902 CLAW_DBF_TEXT(4, trace, "clearptr");
903 privptr->buffs_alloc = 0;
904 privptr->p_buff_ccw=NULL;
905 privptr->p_buff_read=NULL;
906 privptr->p_buff_write=NULL;
907 privptr->system_validate_comp=0;
908 privptr->release_pend=0;
909 /* Remove any writes that were pending and reset all reads */
910 p_this_ccw=privptr->p_read_active_first;
911 while (p_this_ccw!=NULL) {
912 p_this_ccw->header.length=0xffff;
913 p_this_ccw->header.opcode=0xff;
914 p_this_ccw->header.flag=0x00;
915 p_this_ccw=p_this_ccw->next;
918 while (privptr->p_write_active_first!=NULL) {
919 p_this_ccw=privptr->p_write_active_first;
920 p_this_ccw->header.flag=CLAW_PENDING;
921 privptr->p_write_active_first=p_this_ccw->next;
922 p_this_ccw->next=privptr->p_write_free_chain;
923 privptr->p_write_free_chain=p_this_ccw;
924 ++privptr->write_free_count;
926 privptr->p_write_active_last=NULL;
927 privptr->mtc_logical_link = -1;
928 privptr->mtc_skipping = 1;
929 privptr->mtc_offset=0;
931 if (((privptr->channel[READ].last_dstat |
932 privptr->channel[WRITE].last_dstat) &
933 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
934 dev_warn(&privptr->channel[READ].cdev->dev,
935 "Deactivating %s completed with incorrect"
936 " subchannel status "
937 "(read %02x, write %02x)\n",
938 dev->name,
939 privptr->channel[READ].last_dstat,
940 privptr->channel[WRITE].last_dstat);
941 CLAW_DBF_TEXT(2, trace, "badclose");
943 CLAW_DBF_TEXT(4, trace, "rlsexit");
944 return 0;
945 } /* end of claw_release */
947 /*-------------------------------------------------------------------*
948 * claw_write_retry *
950 *--------------------------------------------------------------------*/
952 static void
953 claw_write_retry ( struct chbk *p_ch )
956 struct net_device *dev=p_ch->ndev;
958 CLAW_DBF_TEXT(4, trace, "w_retry");
959 if (p_ch->claw_state == CLAW_STOP) {
960 return;
962 claw_strt_out_IO( dev );
963 CLAW_DBF_TEXT(4, trace, "rtry_xit");
964 return;
965 } /* end of claw_write_retry */
968 /*-------------------------------------------------------------------*
969 * claw_write_next *
971 *--------------------------------------------------------------------*/
973 static void
974 claw_write_next ( struct chbk * p_ch )
977 struct net_device *dev;
978 struct claw_privbk *privptr=NULL;
979 struct sk_buff *pk_skb;
980 int rc;
982 CLAW_DBF_TEXT(4, trace, "claw_wrt");
983 if (p_ch->claw_state == CLAW_STOP)
984 return;
985 dev = (struct net_device *) p_ch->ndev;
986 privptr = (struct claw_privbk *) dev->ml_priv;
987 claw_free_wrt_buf( dev );
988 if ((privptr->write_free_count > 0) &&
989 !skb_queue_empty(&p_ch->collect_queue)) {
990 pk_skb = claw_pack_skb(privptr);
991 while (pk_skb != NULL) {
992 rc = claw_hw_tx( pk_skb, dev,1);
993 if (privptr->write_free_count > 0) {
994 pk_skb = claw_pack_skb(privptr);
995 } else
996 pk_skb = NULL;
999 if (privptr->p_write_active_first!=NULL) {
1000 claw_strt_out_IO(dev);
1002 return;
1003 } /* end of claw_write_next */
1005 /*-------------------------------------------------------------------*
1007 * claw_timer *
1008 *--------------------------------------------------------------------*/
1010 static void
1011 claw_timer ( struct chbk * p_ch )
1013 CLAW_DBF_TEXT(4, trace, "timer");
1014 p_ch->flag |= CLAW_TIMER;
1015 wake_up(&p_ch->wait);
1016 return;
1017 } /* end of claw_timer */
1021 * functions
1025 /*-------------------------------------------------------------------*
1027 * pages_to_order_of_mag *
1029 * takes a number of pages from 1 to 512 and returns the *
1030 * log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1031 * of magnitude get_free_pages() has an upper order of 9 *
1032 *--------------------------------------------------------------------*/
1034 static int
1035 pages_to_order_of_mag(int num_of_pages)
1037 int order_of_mag=1; /* assume 2 pages */
1038 int nump;
1040 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1041 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1042 /* 512 pages = 2Meg on 4k page systems */
1043 if (num_of_pages >= 512) {return 9; }
1044 /* we have two or more pages order is at least 1 */
1045 for (nump=2 ;nump <= 512;nump*=2) {
1046 if (num_of_pages <= nump)
1047 break;
1048 order_of_mag +=1;
1050 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1051 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1052 return order_of_mag;
1055 /*-------------------------------------------------------------------*
1057 * add_claw_reads *
1059 *--------------------------------------------------------------------*/
1060 static int
1061 add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1062 struct ccwbk* p_last)
1064 struct claw_privbk *privptr;
1065 struct ccw1 temp_ccw;
1066 struct endccw * p_end;
1067 CLAW_DBF_TEXT(4, trace, "addreads");
1068 privptr = dev->ml_priv;
1069 p_end = privptr->p_end_ccw;
1071 /* first CCW and last CCW contains a new set of read channel programs
1072 * to apend the running channel programs
1074 if ( p_first==NULL) {
1075 CLAW_DBF_TEXT(4, trace, "addexit");
1076 return 0;
1079 /* set up ending CCW sequence for this segment */
1080 if (p_end->read1) {
1081 p_end->read1=0x00; /* second ending CCW is now active */
1082 /* reset ending CCWs and setup TIC CCWs */
1083 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1084 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1085 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1086 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1087 p_end->read2_nop2.cda=0;
1088 p_end->read2_nop2.count=1;
1090 else {
1091 p_end->read1=0x01; /* first ending CCW is now active */
1092 /* reset ending CCWs and setup TIC CCWs */
1093 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1094 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1095 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1096 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1097 p_end->read1_nop2.cda=0;
1098 p_end->read1_nop2.count=1;
1101 if ( privptr-> p_read_active_first ==NULL ) {
1102 privptr->p_read_active_first = p_first; /* set new first */
1103 privptr->p_read_active_last = p_last; /* set new last */
1105 else {
1107 /* set up TIC ccw */
1108 temp_ccw.cda= (__u32)__pa(&p_first->read);
1109 temp_ccw.count=0;
1110 temp_ccw.flags=0;
1111 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1114 if (p_end->read1) {
1116 /* first set of CCW's is chained to the new read */
1117 /* chain, so the second set is chained to the active chain. */
1118 /* Therefore modify the second set to point to the new */
1119 /* read chain set up TIC CCWs */
1120 /* make sure we update the CCW so channel doesn't fetch it */
1121 /* when it's only half done */
1122 memcpy( &p_end->read2_nop2, &temp_ccw ,
1123 sizeof(struct ccw1));
1124 privptr->p_read_active_last->r_TIC_1.cda=
1125 (__u32)__pa(&p_first->read);
1126 privptr->p_read_active_last->r_TIC_2.cda=
1127 (__u32)__pa(&p_first->read);
1129 else {
1130 /* make sure we update the CCW so channel doesn't */
1131 /* fetch it when it is only half done */
1132 memcpy( &p_end->read1_nop2, &temp_ccw ,
1133 sizeof(struct ccw1));
1134 privptr->p_read_active_last->r_TIC_1.cda=
1135 (__u32)__pa(&p_first->read);
1136 privptr->p_read_active_last->r_TIC_2.cda=
1137 (__u32)__pa(&p_first->read);
1139 /* chain in new set of blocks */
1140 privptr->p_read_active_last->next = p_first;
1141 privptr->p_read_active_last=p_last;
1142 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1143 CLAW_DBF_TEXT(4, trace, "addexit");
1144 return 0;
1145 } /* end of add_claw_reads */
1147 /*-------------------------------------------------------------------*
1148 * ccw_check_return_code *
1150 *-------------------------------------------------------------------*/
1152 static void
1153 ccw_check_return_code(struct ccw_device *cdev, int return_code)
1155 CLAW_DBF_TEXT(4, trace, "ccwret");
1156 if (return_code != 0) {
1157 switch (return_code) {
1158 case -EBUSY: /* BUSY is a transient state no action needed */
1159 break;
1160 case -ENODEV:
1161 dev_err(&cdev->dev, "The remote channel adapter is not"
1162 " available\n");
1163 break;
1164 case -EINVAL:
1165 dev_err(&cdev->dev,
1166 "The status of the remote channel adapter"
1167 " is not valid\n");
1168 break;
1169 default:
1170 dev_err(&cdev->dev, "The common device layer"
1171 " returned error code %d\n",
1172 return_code);
1175 CLAW_DBF_TEXT(4, trace, "ccwret");
1176 } /* end of ccw_check_return_code */
1178 /*-------------------------------------------------------------------*
1179 * ccw_check_unit_check *
1180 *--------------------------------------------------------------------*/
1182 static void
1183 ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1185 struct net_device *ndev = p_ch->ndev;
1186 struct device *dev = &p_ch->cdev->dev;
1188 CLAW_DBF_TEXT(4, trace, "unitchek");
1189 dev_warn(dev, "The communication peer of %s disconnected\n",
1190 ndev->name);
1192 if (sense & 0x40) {
1193 if (sense & 0x01) {
1194 dev_warn(dev, "The remote channel adapter for"
1195 " %s has been reset\n",
1196 ndev->name);
1198 } else if (sense & 0x20) {
1199 if (sense & 0x04) {
1200 dev_warn(dev, "A data streaming timeout occurred"
1201 " for %s\n",
1202 ndev->name);
1203 } else if (sense & 0x10) {
1204 dev_warn(dev, "The remote channel adapter for %s"
1205 " is faulty\n",
1206 ndev->name);
1207 } else {
1208 dev_warn(dev, "A data transfer parity error occurred"
1209 " for %s\n",
1210 ndev->name);
1212 } else if (sense & 0x10) {
1213 dev_warn(dev, "A read data parity error occurred"
1214 " for %s\n",
1215 ndev->name);
1218 } /* end of ccw_check_unit_check */
1220 /*-------------------------------------------------------------------*
1221 * find_link *
1222 *--------------------------------------------------------------------*/
1223 static int
1224 find_link(struct net_device *dev, char *host_name, char *ws_name )
1226 struct claw_privbk *privptr;
1227 struct claw_env *p_env;
1228 int rc=0;
1230 CLAW_DBF_TEXT(2, setup, "findlink");
1231 privptr = dev->ml_priv;
1232 p_env=privptr->p_env;
1233 switch (p_env->packing)
1235 case PACKING_ASK:
1236 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1237 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1238 rc = EINVAL;
1239 break;
1240 case DO_PACKED:
1241 case PACK_SEND:
1242 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1243 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1244 rc = EINVAL;
1245 break;
1246 default:
1247 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1248 (memcmp(p_env->api_type , ws_name, 8)!=0))
1249 rc = EINVAL;
1250 break;
1253 return rc;
1254 } /* end of find_link */
1256 /*-------------------------------------------------------------------*
1257 * claw_hw_tx *
1260 *-------------------------------------------------------------------*/
1262 static int
1263 claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1265 int rc=0;
1266 struct claw_privbk *privptr;
1267 struct ccwbk *p_this_ccw;
1268 struct ccwbk *p_first_ccw;
1269 struct ccwbk *p_last_ccw;
1270 __u32 numBuffers;
1271 signed long len_of_data;
1272 unsigned long bytesInThisBuffer;
1273 unsigned char *pDataAddress;
1274 struct endccw *pEnd;
1275 struct ccw1 tempCCW;
1276 struct chbk *p_ch;
1277 struct claw_env *p_env;
1278 int lock;
1279 struct clawph *pk_head;
1280 struct chbk *ch;
1282 CLAW_DBF_TEXT(4, trace, "hw_tx");
1283 privptr = (struct claw_privbk *)(dev->ml_priv);
1284 p_ch=(struct chbk *)&privptr->channel[WRITE];
1285 p_env =privptr->p_env;
1286 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1287 /* scan the write queue to free any completed write packets */
1288 p_first_ccw=NULL;
1289 p_last_ccw=NULL;
1290 if ((p_env->packing >= PACK_SEND) &&
1291 (skb->cb[1] != 'P')) {
1292 skb_push(skb,sizeof(struct clawph));
1293 pk_head=(struct clawph *)skb->data;
1294 pk_head->len=skb->len-sizeof(struct clawph);
1295 if (pk_head->len%4) {
1296 pk_head->len+= 4-(pk_head->len%4);
1297 skb_pad(skb,4-(pk_head->len%4));
1298 skb_put(skb,4-(pk_head->len%4));
1300 if (p_env->packing == DO_PACKED)
1301 pk_head->link_num = linkid;
1302 else
1303 pk_head->link_num = 0;
1304 pk_head->flag = 0x00;
1305 skb_pad(skb,4);
1306 skb->cb[1] = 'P';
1308 if (linkid == 0) {
1309 if (claw_check_busy(dev)) {
1310 if (privptr->write_free_count!=0) {
1311 claw_clear_busy(dev);
1313 else {
1314 claw_strt_out_IO(dev );
1315 claw_free_wrt_buf( dev );
1316 if (privptr->write_free_count==0) {
1317 ch = &privptr->channel[WRITE];
1318 atomic_inc(&skb->users);
1319 skb_queue_tail(&ch->collect_queue, skb);
1320 goto Done;
1322 else {
1323 claw_clear_busy(dev);
1327 /* tx lock */
1328 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1329 ch = &privptr->channel[WRITE];
1330 atomic_inc(&skb->users);
1331 skb_queue_tail(&ch->collect_queue, skb);
1332 claw_strt_out_IO(dev );
1333 rc=-EBUSY;
1334 goto Done2;
1337 /* See how many write buffers are required to hold this data */
1338 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1340 /* If that number of buffers isn't available, give up for now */
1341 if (privptr->write_free_count < numBuffers ||
1342 privptr->p_write_free_chain == NULL ) {
1344 claw_setbit_busy(TB_NOBUFFER,dev);
1345 ch = &privptr->channel[WRITE];
1346 atomic_inc(&skb->users);
1347 skb_queue_tail(&ch->collect_queue, skb);
1348 CLAW_DBF_TEXT(2, trace, "clawbusy");
1349 goto Done2;
1351 pDataAddress=skb->data;
1352 len_of_data=skb->len;
1354 while (len_of_data > 0) {
1355 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1356 if (p_this_ccw == NULL) { /* lost the race */
1357 ch = &privptr->channel[WRITE];
1358 atomic_inc(&skb->users);
1359 skb_queue_tail(&ch->collect_queue, skb);
1360 goto Done2;
1362 privptr->p_write_free_chain=p_this_ccw->next;
1363 p_this_ccw->next=NULL;
1364 --privptr->write_free_count; /* -1 */
1365 if (len_of_data >= privptr->p_env->write_size)
1366 bytesInThisBuffer = privptr->p_env->write_size;
1367 else
1368 bytesInThisBuffer = len_of_data;
1369 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1370 len_of_data-=bytesInThisBuffer;
1371 pDataAddress+=(unsigned long)bytesInThisBuffer;
1372 /* setup write CCW */
1373 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1374 if (len_of_data>0) {
1375 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1377 p_this_ccw->write.count=bytesInThisBuffer;
1378 /* now add to end of this chain */
1379 if (p_first_ccw==NULL) {
1380 p_first_ccw=p_this_ccw;
1382 if (p_last_ccw!=NULL) {
1383 p_last_ccw->next=p_this_ccw;
1384 /* set up TIC ccws */
1385 p_last_ccw->w_TIC_1.cda=
1386 (__u32)__pa(&p_this_ccw->write);
1388 p_last_ccw=p_this_ccw; /* save new last block */
1391 /* FirstCCW and LastCCW now contain a new set of write channel
1392 * programs to append to the running channel program
1395 if (p_first_ccw!=NULL) {
1396 /* setup ending ccw sequence for this segment */
1397 pEnd=privptr->p_end_ccw;
1398 if (pEnd->write1) {
1399 pEnd->write1=0x00; /* second end ccw is now active */
1400 /* set up Tic CCWs */
1401 p_last_ccw->w_TIC_1.cda=
1402 (__u32)__pa(&pEnd->write2_nop1);
1403 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1404 pEnd->write2_nop2.flags =
1405 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1406 pEnd->write2_nop2.cda=0;
1407 pEnd->write2_nop2.count=1;
1409 else { /* end of if (pEnd->write1)*/
1410 pEnd->write1=0x01; /* first end ccw is now active */
1411 /* set up Tic CCWs */
1412 p_last_ccw->w_TIC_1.cda=
1413 (__u32)__pa(&pEnd->write1_nop1);
1414 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1415 pEnd->write1_nop2.flags =
1416 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1417 pEnd->write1_nop2.cda=0;
1418 pEnd->write1_nop2.count=1;
1419 } /* end if if (pEnd->write1) */
1421 if (privptr->p_write_active_first==NULL ) {
1422 privptr->p_write_active_first=p_first_ccw;
1423 privptr->p_write_active_last=p_last_ccw;
1425 else {
1426 /* set up Tic CCWs */
1428 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1429 tempCCW.count=0;
1430 tempCCW.flags=0;
1431 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1433 if (pEnd->write1) {
1436 * first set of ending CCW's is chained to the new write
1437 * chain, so the second set is chained to the active chain
1438 * Therefore modify the second set to point the new write chain.
1439 * make sure we update the CCW atomically
1440 * so channel does not fetch it when it's only half done
1442 memcpy( &pEnd->write2_nop2, &tempCCW ,
1443 sizeof(struct ccw1));
1444 privptr->p_write_active_last->w_TIC_1.cda=
1445 (__u32)__pa(&p_first_ccw->write);
1447 else {
1449 /*make sure we update the CCW atomically
1450 *so channel does not fetch it when it's only half done
1452 memcpy(&pEnd->write1_nop2, &tempCCW ,
1453 sizeof(struct ccw1));
1454 privptr->p_write_active_last->w_TIC_1.cda=
1455 (__u32)__pa(&p_first_ccw->write);
1457 } /* end if if (pEnd->write1) */
1459 privptr->p_write_active_last->next=p_first_ccw;
1460 privptr->p_write_active_last=p_last_ccw;
1463 } /* endif (p_first_ccw!=NULL) */
1464 dev_kfree_skb_any(skb);
1465 if (linkid==0) {
1466 lock=LOCK_NO;
1468 else {
1469 lock=LOCK_YES;
1471 claw_strt_out_IO(dev );
1472 /* if write free count is zero , set NOBUFFER */
1473 if (privptr->write_free_count==0) {
1474 claw_setbit_busy(TB_NOBUFFER,dev);
1476 Done2:
1477 claw_clearbit_busy(TB_TX,dev);
1478 Done:
1479 return(rc);
1480 } /* end of claw_hw_tx */
1482 /*-------------------------------------------------------------------*
1484 * init_ccw_bk *
1486 *--------------------------------------------------------------------*/
1488 static int
1489 init_ccw_bk(struct net_device *dev)
1492 __u32 ccw_blocks_required;
1493 __u32 ccw_blocks_perpage;
1494 __u32 ccw_pages_required;
1495 __u32 claw_reads_perpage=1;
1496 __u32 claw_read_pages;
1497 __u32 claw_writes_perpage=1;
1498 __u32 claw_write_pages;
1499 void *p_buff=NULL;
1500 struct ccwbk*p_free_chain;
1501 struct ccwbk*p_buf;
1502 struct ccwbk*p_last_CCWB;
1503 struct ccwbk*p_first_CCWB;
1504 struct endccw *p_endccw=NULL;
1505 addr_t real_address;
1506 struct claw_privbk *privptr = dev->ml_priv;
1507 struct clawh *pClawH=NULL;
1508 addr_t real_TIC_address;
1509 int i,j;
1510 CLAW_DBF_TEXT(4, trace, "init_ccw");
1512 /* initialize statistics field */
1513 privptr->active_link_ID=0;
1514 /* initialize ccwbk pointers */
1515 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1516 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1517 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1518 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1519 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1520 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1521 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1522 privptr->buffs_alloc = 0;
1523 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1524 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1525 /* initialize free write ccwbk counter */
1526 privptr->write_free_count=0; /* number of free bufs on write chain */
1527 p_last_CCWB = NULL;
1528 p_first_CCWB= NULL;
1530 * We need 1 CCW block for each read buffer, 1 for each
1531 * write buffer, plus 1 for ClawSignalBlock
1533 ccw_blocks_required =
1534 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1536 * compute number of CCW blocks that will fit in a page
1538 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1539 ccw_pages_required=
1540 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1543 * read and write sizes are set by 2 constants in claw.h
1544 * 4k and 32k. Unpacked values other than 4k are not going to
1545 * provide good performance. With packing buffers support 32k
1546 * buffers are used.
1548 if (privptr->p_env->read_size < PAGE_SIZE) {
1549 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1550 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1551 claw_reads_perpage);
1553 else { /* > or equal */
1554 privptr->p_buff_pages_perread =
1555 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1556 claw_read_pages = privptr->p_env->read_buffers *
1557 privptr->p_buff_pages_perread;
1559 if (privptr->p_env->write_size < PAGE_SIZE) {
1560 claw_writes_perpage =
1561 PAGE_SIZE / privptr->p_env->write_size;
1562 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1563 claw_writes_perpage);
1566 else { /* > or equal */
1567 privptr->p_buff_pages_perwrite =
1568 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1569 claw_write_pages = privptr->p_env->write_buffers *
1570 privptr->p_buff_pages_perwrite;
1573 * allocate ccw_pages_required
1575 if (privptr->p_buff_ccw==NULL) {
1576 privptr->p_buff_ccw=
1577 (void *)__get_free_pages(__GFP_DMA,
1578 (int)pages_to_order_of_mag(ccw_pages_required ));
1579 if (privptr->p_buff_ccw==NULL) {
1580 return -ENOMEM;
1582 privptr->p_buff_ccw_num=ccw_pages_required;
1584 memset(privptr->p_buff_ccw, 0x00,
1585 privptr->p_buff_ccw_num * PAGE_SIZE);
1588 * obtain ending ccw block address
1591 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1592 real_address = (__u32)__pa(privptr->p_end_ccw);
1593 /* Initialize ending CCW block */
1594 p_endccw=privptr->p_end_ccw;
1595 p_endccw->real=real_address;
1596 p_endccw->write1=0x00;
1597 p_endccw->read1=0x00;
1599 /* write1_nop1 */
1600 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1601 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1602 p_endccw->write1_nop1.count = 1;
1603 p_endccw->write1_nop1.cda = 0;
1605 /* write1_nop2 */
1606 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1607 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1608 p_endccw->write1_nop2.count = 1;
1609 p_endccw->write1_nop2.cda = 0;
1611 /* write2_nop1 */
1612 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1613 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1614 p_endccw->write2_nop1.count = 1;
1615 p_endccw->write2_nop1.cda = 0;
1617 /* write2_nop2 */
1618 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1619 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1620 p_endccw->write2_nop2.count = 1;
1621 p_endccw->write2_nop2.cda = 0;
1623 /* read1_nop1 */
1624 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1625 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1626 p_endccw->read1_nop1.count = 1;
1627 p_endccw->read1_nop1.cda = 0;
1629 /* read1_nop2 */
1630 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1631 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1632 p_endccw->read1_nop2.count = 1;
1633 p_endccw->read1_nop2.cda = 0;
1635 /* read2_nop1 */
1636 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1637 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1638 p_endccw->read2_nop1.count = 1;
1639 p_endccw->read2_nop1.cda = 0;
1641 /* read2_nop2 */
1642 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1643 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1644 p_endccw->read2_nop2.count = 1;
1645 p_endccw->read2_nop2.cda = 0;
1648 * Build a chain of CCWs
1651 p_buff=privptr->p_buff_ccw;
1653 p_free_chain=NULL;
1654 for (i=0 ; i < ccw_pages_required; i++ ) {
1655 real_address = (__u32)__pa(p_buff);
1656 p_buf=p_buff;
1657 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1658 p_buf->next = p_free_chain;
1659 p_free_chain = p_buf;
1660 p_buf->real=(__u32)__pa(p_buf);
1661 ++p_buf;
1663 p_buff+=PAGE_SIZE;
1666 * Initialize ClawSignalBlock
1669 if (privptr->p_claw_signal_blk==NULL) {
1670 privptr->p_claw_signal_blk=p_free_chain;
1671 p_free_chain=p_free_chain->next;
1672 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1673 pClawH->length=0xffff;
1674 pClawH->opcode=0xff;
1675 pClawH->flag=CLAW_BUSY;
1679 * allocate write_pages_required and add to free chain
1681 if (privptr->p_buff_write==NULL) {
1682 if (privptr->p_env->write_size < PAGE_SIZE) {
1683 privptr->p_buff_write=
1684 (void *)__get_free_pages(__GFP_DMA,
1685 (int)pages_to_order_of_mag(claw_write_pages ));
1686 if (privptr->p_buff_write==NULL) {
1687 privptr->p_buff_ccw=NULL;
1688 return -ENOMEM;
1691 * Build CLAW write free chain
1695 memset(privptr->p_buff_write, 0x00,
1696 ccw_pages_required * PAGE_SIZE);
1697 privptr->p_write_free_chain=NULL;
1699 p_buff=privptr->p_buff_write;
1701 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1702 p_buf = p_free_chain; /* get a CCW */
1703 p_free_chain = p_buf->next;
1704 p_buf->next =privptr->p_write_free_chain;
1705 privptr->p_write_free_chain = p_buf;
1706 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1707 p_buf-> write.cda = (__u32)__pa(p_buff);
1708 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1709 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1710 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1711 p_buf-> w_read_FF.count = 1;
1712 p_buf-> w_read_FF.cda =
1713 (__u32)__pa(&p_buf-> header.flag);
1714 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1715 p_buf-> w_TIC_1.flags = 0;
1716 p_buf-> w_TIC_1.count = 0;
1718 if (((unsigned long)p_buff +
1719 privptr->p_env->write_size) >=
1720 ((unsigned long)(p_buff+2*
1721 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1722 p_buff = p_buff+privptr->p_env->write_size;
1726 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1728 privptr->p_write_free_chain=NULL;
1729 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1730 p_buff=(void *)__get_free_pages(__GFP_DMA,
1731 (int)pages_to_order_of_mag(
1732 privptr->p_buff_pages_perwrite) );
1733 if (p_buff==NULL) {
1734 free_pages((unsigned long)privptr->p_buff_ccw,
1735 (int)pages_to_order_of_mag(
1736 privptr->p_buff_ccw_num));
1737 privptr->p_buff_ccw=NULL;
1738 p_buf=privptr->p_buff_write;
1739 while (p_buf!=NULL) {
1740 free_pages((unsigned long)
1741 p_buf->p_buffer,
1742 (int)pages_to_order_of_mag(
1743 privptr->p_buff_pages_perwrite));
1744 p_buf=p_buf->next;
1746 return -ENOMEM;
1747 } /* Error on get_pages */
1748 memset(p_buff, 0x00, privptr->p_env->write_size );
1749 p_buf = p_free_chain;
1750 p_free_chain = p_buf->next;
1751 p_buf->next = privptr->p_write_free_chain;
1752 privptr->p_write_free_chain = p_buf;
1753 privptr->p_buff_write = p_buf;
1754 p_buf->p_buffer=(struct clawbuf *)p_buff;
1755 p_buf-> write.cda = (__u32)__pa(p_buff);
1756 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1757 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1758 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1759 p_buf-> w_read_FF.count = 1;
1760 p_buf-> w_read_FF.cda =
1761 (__u32)__pa(&p_buf-> header.flag);
1762 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1763 p_buf-> w_TIC_1.flags = 0;
1764 p_buf-> w_TIC_1.count = 0;
1765 } /* for all write_buffers */
1767 } /* else buffers are PAGE_SIZE or bigger */
1770 privptr->p_buff_write_num=claw_write_pages;
1771 privptr->write_free_count=privptr->p_env->write_buffers;
1775 * allocate read_pages_required and chain to free chain
1777 if (privptr->p_buff_read==NULL) {
1778 if (privptr->p_env->read_size < PAGE_SIZE) {
1779 privptr->p_buff_read=
1780 (void *)__get_free_pages(__GFP_DMA,
1781 (int)pages_to_order_of_mag(claw_read_pages) );
1782 if (privptr->p_buff_read==NULL) {
1783 free_pages((unsigned long)privptr->p_buff_ccw,
1784 (int)pages_to_order_of_mag(
1785 privptr->p_buff_ccw_num));
1786 /* free the write pages size is < page size */
1787 free_pages((unsigned long)privptr->p_buff_write,
1788 (int)pages_to_order_of_mag(
1789 privptr->p_buff_write_num));
1790 privptr->p_buff_ccw=NULL;
1791 privptr->p_buff_write=NULL;
1792 return -ENOMEM;
1794 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1795 privptr->p_buff_read_num=claw_read_pages;
1797 * Build CLAW read free chain
1800 p_buff=privptr->p_buff_read;
1801 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1802 p_buf = p_free_chain;
1803 p_free_chain = p_buf->next;
1805 if (p_last_CCWB==NULL) {
1806 p_buf->next=NULL;
1807 real_TIC_address=0;
1808 p_last_CCWB=p_buf;
1810 else {
1811 p_buf->next=p_first_CCWB;
1812 real_TIC_address=
1813 (__u32)__pa(&p_first_CCWB -> read );
1816 p_first_CCWB=p_buf;
1818 p_buf->p_buffer=(struct clawbuf *)p_buff;
1819 /* initialize read command */
1820 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1821 p_buf-> read.cda = (__u32)__pa(p_buff);
1822 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1823 p_buf-> read.count = privptr->p_env->read_size;
1825 /* initialize read_h command */
1826 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1827 p_buf-> read_h.cda =
1828 (__u32)__pa(&(p_buf->header));
1829 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1830 p_buf-> read_h.count = sizeof(struct clawh);
1832 /* initialize Signal command */
1833 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1834 p_buf-> signal.cda =
1835 (__u32)__pa(&(pClawH->flag));
1836 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1837 p_buf-> signal.count = 1;
1839 /* initialize r_TIC_1 command */
1840 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1841 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1842 p_buf-> r_TIC_1.flags = 0;
1843 p_buf-> r_TIC_1.count = 0;
1845 /* initialize r_read_FF command */
1846 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1847 p_buf-> r_read_FF.cda =
1848 (__u32)__pa(&(pClawH->flag));
1849 p_buf-> r_read_FF.flags =
1850 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1851 p_buf-> r_read_FF.count = 1;
1853 /* initialize r_TIC_2 */
1854 memcpy(&p_buf->r_TIC_2,
1855 &p_buf->r_TIC_1, sizeof(struct ccw1));
1857 /* initialize Header */
1858 p_buf->header.length=0xffff;
1859 p_buf->header.opcode=0xff;
1860 p_buf->header.flag=CLAW_PENDING;
1862 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1863 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1865 & PAGE_MASK)) {
1866 p_buff= p_buff+privptr->p_env->read_size;
1868 else {
1869 p_buff=
1870 (void *)((unsigned long)
1871 (p_buff+2*(privptr->p_env->read_size)-1)
1872 & PAGE_MASK) ;
1874 } /* for read_buffers */
1875 } /* read_size < PAGE_SIZE */
1876 else { /* read Size >= PAGE_SIZE */
1877 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1878 p_buff = (void *)__get_free_pages(__GFP_DMA,
1879 (int)pages_to_order_of_mag(
1880 privptr->p_buff_pages_perread));
1881 if (p_buff==NULL) {
1882 free_pages((unsigned long)privptr->p_buff_ccw,
1883 (int)pages_to_order_of_mag(privptr->
1884 p_buff_ccw_num));
1885 /* free the write pages */
1886 p_buf=privptr->p_buff_write;
1887 while (p_buf!=NULL) {
1888 free_pages(
1889 (unsigned long)p_buf->p_buffer,
1890 (int)pages_to_order_of_mag(
1891 privptr->p_buff_pages_perwrite));
1892 p_buf=p_buf->next;
1894 /* free any read pages already alloc */
1895 p_buf=privptr->p_buff_read;
1896 while (p_buf!=NULL) {
1897 free_pages(
1898 (unsigned long)p_buf->p_buffer,
1899 (int)pages_to_order_of_mag(
1900 privptr->p_buff_pages_perread));
1901 p_buf=p_buf->next;
1903 privptr->p_buff_ccw=NULL;
1904 privptr->p_buff_write=NULL;
1905 return -ENOMEM;
1907 memset(p_buff, 0x00, privptr->p_env->read_size);
1908 p_buf = p_free_chain;
1909 privptr->p_buff_read = p_buf;
1910 p_free_chain = p_buf->next;
1912 if (p_last_CCWB==NULL) {
1913 p_buf->next=NULL;
1914 real_TIC_address=0;
1915 p_last_CCWB=p_buf;
1917 else {
1918 p_buf->next=p_first_CCWB;
1919 real_TIC_address=
1920 (addr_t)__pa(
1921 &p_first_CCWB -> read );
1924 p_first_CCWB=p_buf;
1925 /* save buff address */
1926 p_buf->p_buffer=(struct clawbuf *)p_buff;
1927 /* initialize read command */
1928 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1929 p_buf-> read.cda = (__u32)__pa(p_buff);
1930 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1931 p_buf-> read.count = privptr->p_env->read_size;
1933 /* initialize read_h command */
1934 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1935 p_buf-> read_h.cda =
1936 (__u32)__pa(&(p_buf->header));
1937 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1938 p_buf-> read_h.count = sizeof(struct clawh);
1940 /* initialize Signal command */
1941 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1942 p_buf-> signal.cda =
1943 (__u32)__pa(&(pClawH->flag));
1944 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1945 p_buf-> signal.count = 1;
1947 /* initialize r_TIC_1 command */
1948 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1949 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1950 p_buf-> r_TIC_1.flags = 0;
1951 p_buf-> r_TIC_1.count = 0;
1953 /* initialize r_read_FF command */
1954 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1955 p_buf-> r_read_FF.cda =
1956 (__u32)__pa(&(pClawH->flag));
1957 p_buf-> r_read_FF.flags =
1958 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1959 p_buf-> r_read_FF.count = 1;
1961 /* initialize r_TIC_2 */
1962 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
1963 sizeof(struct ccw1));
1965 /* initialize Header */
1966 p_buf->header.length=0xffff;
1967 p_buf->header.opcode=0xff;
1968 p_buf->header.flag=CLAW_PENDING;
1970 } /* For read_buffers */
1971 } /* read_size >= PAGE_SIZE */
1972 } /* pBuffread = NULL */
1973 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
1974 privptr->buffs_alloc = 1;
1976 return 0;
1977 } /* end of init_ccw_bk */
1979 /*-------------------------------------------------------------------*
1981 * probe_error *
1983 *--------------------------------------------------------------------*/
1985 static void
1986 probe_error( struct ccwgroup_device *cgdev)
1988 struct claw_privbk *privptr;
1990 CLAW_DBF_TEXT(4, trace, "proberr");
1991 privptr = dev_get_drvdata(&cgdev->dev);
1992 if (privptr != NULL) {
1993 dev_set_drvdata(&cgdev->dev, NULL);
1994 kfree(privptr->p_env);
1995 kfree(privptr->p_mtc_envelope);
1996 kfree(privptr);
1998 } /* probe_error */
2000 /*-------------------------------------------------------------------*
2001 * claw_process_control *
2004 *--------------------------------------------------------------------*/
2006 static int
2007 claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2010 struct clawbuf *p_buf;
2011 struct clawctl ctlbk;
2012 struct clawctl *p_ctlbk;
2013 char temp_host_name[8];
2014 char temp_ws_name[8];
2015 struct claw_privbk *privptr;
2016 struct claw_env *p_env;
2017 struct sysval *p_sysval;
2018 struct conncmd *p_connect=NULL;
2019 int rc;
2020 struct chbk *p_ch = NULL;
2021 struct device *tdev;
2022 CLAW_DBF_TEXT(2, setup, "clw_cntl");
2023 udelay(1000); /* Wait a ms for the control packets to
2024 *catch up to each other */
2025 privptr = dev->ml_priv;
2026 p_env=privptr->p_env;
2027 tdev = &privptr->channel[READ].cdev->dev;
2028 memcpy( &temp_host_name, p_env->host_name, 8);
2029 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2030 dev_info(tdev, "%s: CLAW device %.8s: "
2031 "Received Control Packet\n",
2032 dev->name, temp_ws_name);
2033 if (privptr->release_pend==1) {
2034 return 0;
2036 p_buf=p_ccw->p_buffer;
2037 p_ctlbk=&ctlbk;
2038 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2039 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2040 } else {
2041 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2043 switch (p_ctlbk->command)
2045 case SYSTEM_VALIDATE_REQUEST:
2046 if (p_ctlbk->version != CLAW_VERSION_ID) {
2047 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2048 CLAW_RC_WRONG_VERSION);
2049 dev_warn(tdev, "The communication peer of %s"
2050 " uses an incorrect API version %d\n",
2051 dev->name, p_ctlbk->version);
2053 p_sysval = (struct sysval *)&(p_ctlbk->data);
2054 dev_info(tdev, "%s: Recv Sys Validate Request: "
2055 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2056 "Host name=%.8s\n",
2057 dev->name, p_ctlbk->version,
2058 p_ctlbk->linkid,
2059 p_ctlbk->correlator,
2060 p_sysval->WS_name,
2061 p_sysval->host_name);
2062 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2063 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2064 CLAW_RC_NAME_MISMATCH);
2065 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2066 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2067 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2068 dev_warn(tdev,
2069 "Host name %s for %s does not match the"
2070 " remote adapter name %s\n",
2071 p_sysval->host_name,
2072 dev->name,
2073 temp_host_name);
2075 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2076 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2077 CLAW_RC_NAME_MISMATCH);
2078 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2079 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2080 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2081 dev_warn(tdev, "Adapter name %s for %s does not match"
2082 " the remote host name %s\n",
2083 p_sysval->WS_name,
2084 dev->name,
2085 temp_ws_name);
2087 if ((p_sysval->write_frame_size < p_env->write_size) &&
2088 (p_env->packing == 0)) {
2089 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2090 CLAW_RC_HOST_RCV_TOO_SMALL);
2091 dev_warn(tdev,
2092 "The local write buffer is smaller than the"
2093 " remote read buffer\n");
2094 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2096 if ((p_sysval->read_frame_size < p_env->read_size) &&
2097 (p_env->packing == 0)) {
2098 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2099 CLAW_RC_HOST_RCV_TOO_SMALL);
2100 dev_warn(tdev,
2101 "The local read buffer is smaller than the"
2102 " remote write buffer\n");
2103 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2105 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2106 dev_info(tdev,
2107 "CLAW device %.8s: System validate"
2108 " completed.\n", temp_ws_name);
2109 dev_info(tdev,
2110 "%s: sys Validate Rsize:%d Wsize:%d\n",
2111 dev->name, p_sysval->read_frame_size,
2112 p_sysval->write_frame_size);
2113 privptr->system_validate_comp = 1;
2114 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2115 p_env->packing = PACKING_ASK;
2116 claw_strt_conn_req(dev);
2117 break;
2118 case SYSTEM_VALIDATE_RESPONSE:
2119 p_sysval = (struct sysval *)&(p_ctlbk->data);
2120 dev_info(tdev,
2121 "Settings for %s validated (version=%d, "
2122 "remote device=%d, rc=%d, adapter name=%.8s, "
2123 "host name=%.8s)\n",
2124 dev->name,
2125 p_ctlbk->version,
2126 p_ctlbk->correlator,
2127 p_ctlbk->rc,
2128 p_sysval->WS_name,
2129 p_sysval->host_name);
2130 switch (p_ctlbk->rc) {
2131 case 0:
2132 dev_info(tdev, "%s: CLAW device "
2133 "%.8s: System validate completed.\n",
2134 dev->name, temp_ws_name);
2135 if (privptr->system_validate_comp == 0)
2136 claw_strt_conn_req(dev);
2137 privptr->system_validate_comp = 1;
2138 break;
2139 case CLAW_RC_NAME_MISMATCH:
2140 dev_warn(tdev, "Validating %s failed because of"
2141 " a host or adapter name mismatch\n",
2142 dev->name);
2143 break;
2144 case CLAW_RC_WRONG_VERSION:
2145 dev_warn(tdev, "Validating %s failed because of a"
2146 " version conflict\n",
2147 dev->name);
2148 break;
2149 case CLAW_RC_HOST_RCV_TOO_SMALL:
2150 dev_warn(tdev, "Validating %s failed because of a"
2151 " frame size conflict\n",
2152 dev->name);
2153 break;
2154 default:
2155 dev_warn(tdev, "The communication peer of %s rejected"
2156 " the connection\n",
2157 dev->name);
2158 break;
2160 break;
2162 case CONNECTION_REQUEST:
2163 p_connect = (struct conncmd *)&(p_ctlbk->data);
2164 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2165 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2166 dev->name,
2167 p_ctlbk->version,
2168 p_ctlbk->linkid,
2169 p_ctlbk->correlator,
2170 p_connect->host_name,
2171 p_connect->WS_name);
2172 if (privptr->active_link_ID != 0) {
2173 claw_snd_disc(dev, p_ctlbk);
2174 dev_info(tdev, "%s rejected a connection request"
2175 " because it is already active\n",
2176 dev->name);
2178 if (p_ctlbk->linkid != 1) {
2179 claw_snd_disc(dev, p_ctlbk);
2180 dev_info(tdev, "%s rejected a request to open multiple"
2181 " connections\n",
2182 dev->name);
2184 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2185 if (rc != 0) {
2186 claw_snd_disc(dev, p_ctlbk);
2187 dev_info(tdev, "%s rejected a connection request"
2188 " because of a type mismatch\n",
2189 dev->name);
2191 claw_send_control(dev,
2192 CONNECTION_CONFIRM, p_ctlbk->linkid,
2193 p_ctlbk->correlator,
2194 0, p_connect->host_name,
2195 p_connect->WS_name);
2196 if (p_env->packing == PACKING_ASK) {
2197 p_env->packing = PACK_SEND;
2198 claw_snd_conn_req(dev, 0);
2200 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2201 "completed link_id=%d.\n",
2202 dev->name, temp_ws_name,
2203 p_ctlbk->linkid);
2204 privptr->active_link_ID = p_ctlbk->linkid;
2205 p_ch = &privptr->channel[WRITE];
2206 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2207 break;
2208 case CONNECTION_RESPONSE:
2209 p_connect = (struct conncmd *)&(p_ctlbk->data);
2210 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2211 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2212 dev->name,
2213 p_ctlbk->version,
2214 p_ctlbk->linkid,
2215 p_ctlbk->correlator,
2216 p_ctlbk->rc,
2217 p_connect->host_name,
2218 p_connect->WS_name);
2220 if (p_ctlbk->rc != 0) {
2221 dev_warn(tdev, "The communication peer of %s rejected"
2222 " a connection request\n",
2223 dev->name);
2224 return 1;
2226 rc = find_link(dev,
2227 p_connect->host_name, p_connect->WS_name);
2228 if (rc != 0) {
2229 claw_snd_disc(dev, p_ctlbk);
2230 dev_warn(tdev, "The communication peer of %s"
2231 " rejected a connection "
2232 "request because of a type mismatch\n",
2233 dev->name);
2235 /* should be until CONNECTION_CONFIRM */
2236 privptr->active_link_ID = -(p_ctlbk->linkid);
2237 break;
2238 case CONNECTION_CONFIRM:
2239 p_connect = (struct conncmd *)&(p_ctlbk->data);
2240 dev_info(tdev,
2241 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2242 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2243 dev->name,
2244 p_ctlbk->version,
2245 p_ctlbk->linkid,
2246 p_ctlbk->correlator,
2247 p_connect->host_name,
2248 p_connect->WS_name);
2249 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2250 privptr->active_link_ID = p_ctlbk->linkid;
2251 if (p_env->packing > PACKING_ASK) {
2252 dev_info(tdev,
2253 "%s: Confirmed Now packing\n", dev->name);
2254 p_env->packing = DO_PACKED;
2256 p_ch = &privptr->channel[WRITE];
2257 wake_up(&p_ch->wait);
2258 } else {
2259 dev_warn(tdev, "Activating %s failed because of"
2260 " an incorrect link ID=%d\n",
2261 dev->name, p_ctlbk->linkid);
2262 claw_snd_disc(dev, p_ctlbk);
2264 break;
2265 case DISCONNECT:
2266 dev_info(tdev, "%s: Disconnect: "
2267 "Vers=%d,link_id=%d,Corr=%d\n",
2268 dev->name, p_ctlbk->version,
2269 p_ctlbk->linkid, p_ctlbk->correlator);
2270 if ((p_ctlbk->linkid == 2) &&
2271 (p_env->packing == PACK_SEND)) {
2272 privptr->active_link_ID = 1;
2273 p_env->packing = DO_PACKED;
2274 } else
2275 privptr->active_link_ID = 0;
2276 break;
2277 case CLAW_ERROR:
2278 dev_warn(tdev, "The communication peer of %s failed\n",
2279 dev->name);
2280 break;
2281 default:
2282 dev_warn(tdev, "The communication peer of %s sent"
2283 " an unknown command code\n",
2284 dev->name);
2285 break;
2288 return 0;
2289 } /* end of claw_process_control */
2292 /*-------------------------------------------------------------------*
2293 * claw_send_control *
2295 *--------------------------------------------------------------------*/
2297 static int
2298 claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2299 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2301 struct claw_privbk *privptr;
2302 struct clawctl *p_ctl;
2303 struct sysval *p_sysval;
2304 struct conncmd *p_connect;
2305 struct sk_buff *skb;
2307 CLAW_DBF_TEXT(2, setup, "sndcntl");
2308 privptr = dev->ml_priv;
2309 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2311 p_ctl->command=type;
2312 p_ctl->version=CLAW_VERSION_ID;
2313 p_ctl->linkid=link;
2314 p_ctl->correlator=correlator;
2315 p_ctl->rc=rc;
2317 p_sysval=(struct sysval *)&p_ctl->data;
2318 p_connect=(struct conncmd *)&p_ctl->data;
2320 switch (p_ctl->command) {
2321 case SYSTEM_VALIDATE_REQUEST:
2322 case SYSTEM_VALIDATE_RESPONSE:
2323 memcpy(&p_sysval->host_name, local_name, 8);
2324 memcpy(&p_sysval->WS_name, remote_name, 8);
2325 if (privptr->p_env->packing > 0) {
2326 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2327 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2328 } else {
2329 /* how big is the biggest group of packets */
2330 p_sysval->read_frame_size =
2331 privptr->p_env->read_size;
2332 p_sysval->write_frame_size =
2333 privptr->p_env->write_size;
2335 memset(&p_sysval->reserved, 0x00, 4);
2336 break;
2337 case CONNECTION_REQUEST:
2338 case CONNECTION_RESPONSE:
2339 case CONNECTION_CONFIRM:
2340 case DISCONNECT:
2341 memcpy(&p_sysval->host_name, local_name, 8);
2342 memcpy(&p_sysval->WS_name, remote_name, 8);
2343 if (privptr->p_env->packing > 0) {
2344 /* How big is the biggest packet */
2345 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2346 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2347 } else {
2348 memset(&p_connect->reserved1, 0x00, 4);
2349 memset(&p_connect->reserved2, 0x00, 4);
2351 break;
2352 default:
2353 break;
2356 /* write Control Record to the device */
2359 skb = dev_alloc_skb(sizeof(struct clawctl));
2360 if (!skb) {
2361 return -ENOMEM;
2363 memcpy(skb_put(skb, sizeof(struct clawctl)),
2364 p_ctl, sizeof(struct clawctl));
2365 if (privptr->p_env->packing >= PACK_SEND)
2366 claw_hw_tx(skb, dev, 1);
2367 else
2368 claw_hw_tx(skb, dev, 0);
2369 return 0;
2370 } /* end of claw_send_control */
2372 /*-------------------------------------------------------------------*
2373 * claw_snd_conn_req *
2375 *--------------------------------------------------------------------*/
2376 static int
2377 claw_snd_conn_req(struct net_device *dev, __u8 link)
2379 int rc;
2380 struct claw_privbk *privptr = dev->ml_priv;
2381 struct clawctl *p_ctl;
2383 CLAW_DBF_TEXT(2, setup, "snd_conn");
2384 rc = 1;
2385 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2386 p_ctl->linkid = link;
2387 if ( privptr->system_validate_comp==0x00 ) {
2388 return rc;
2390 if (privptr->p_env->packing == PACKING_ASK )
2391 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2392 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2393 if (privptr->p_env->packing == PACK_SEND) {
2394 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2395 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2397 if (privptr->p_env->packing == 0)
2398 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2399 HOST_APPL_NAME, privptr->p_env->api_type);
2400 return rc;
2402 } /* end of claw_snd_conn_req */
2405 /*-------------------------------------------------------------------*
2406 * claw_snd_disc *
2408 *--------------------------------------------------------------------*/
2410 static int
2411 claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2413 int rc;
2414 struct conncmd * p_connect;
2416 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2417 p_connect=(struct conncmd *)&p_ctl->data;
2419 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2420 p_ctl->correlator, 0,
2421 p_connect->host_name, p_connect->WS_name);
2422 return rc;
2423 } /* end of claw_snd_disc */
2426 /*-------------------------------------------------------------------*
2427 * claw_snd_sys_validate_rsp *
2429 *--------------------------------------------------------------------*/
2431 static int
2432 claw_snd_sys_validate_rsp(struct net_device *dev,
2433 struct clawctl *p_ctl, __u32 return_code)
2435 struct claw_env * p_env;
2436 struct claw_privbk *privptr;
2437 int rc;
2439 CLAW_DBF_TEXT(2, setup, "chkresp");
2440 privptr = dev->ml_priv;
2441 p_env=privptr->p_env;
2442 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2443 p_ctl->linkid,
2444 p_ctl->correlator,
2445 return_code,
2446 p_env->host_name,
2447 p_env->adapter_name );
2448 return rc;
2449 } /* end of claw_snd_sys_validate_rsp */
2451 /*-------------------------------------------------------------------*
2452 * claw_strt_conn_req *
2454 *--------------------------------------------------------------------*/
2456 static int
2457 claw_strt_conn_req(struct net_device *dev )
2459 int rc;
2461 CLAW_DBF_TEXT(2, setup, "conn_req");
2462 rc=claw_snd_conn_req(dev, 1);
2463 return rc;
2464 } /* end of claw_strt_conn_req */
2468 /*-------------------------------------------------------------------*
2469 * claw_stats *
2470 *-------------------------------------------------------------------*/
2472 static struct
2473 net_device_stats *claw_stats(struct net_device *dev)
2475 struct claw_privbk *privptr;
2477 CLAW_DBF_TEXT(4, trace, "stats");
2478 privptr = dev->ml_priv;
2479 return &privptr->stats;
2480 } /* end of claw_stats */
2483 /*-------------------------------------------------------------------*
2484 * unpack_read *
2486 *--------------------------------------------------------------------*/
2487 static void
2488 unpack_read(struct net_device *dev )
2490 struct sk_buff *skb;
2491 struct claw_privbk *privptr;
2492 struct claw_env *p_env;
2493 struct ccwbk *p_this_ccw;
2494 struct ccwbk *p_first_ccw;
2495 struct ccwbk *p_last_ccw;
2496 struct clawph *p_packh;
2497 void *p_packd;
2498 struct clawctl *p_ctlrec=NULL;
2499 struct device *p_dev;
2501 __u32 len_of_data;
2502 __u32 pack_off;
2503 __u8 link_num;
2504 __u8 mtc_this_frm=0;
2505 __u32 bytes_to_mov;
2506 int i=0;
2507 int p=0;
2509 CLAW_DBF_TEXT(4, trace, "unpkread");
2510 p_first_ccw=NULL;
2511 p_last_ccw=NULL;
2512 p_packh=NULL;
2513 p_packd=NULL;
2514 privptr = dev->ml_priv;
2516 p_dev = &privptr->channel[READ].cdev->dev;
2517 p_env = privptr->p_env;
2518 p_this_ccw=privptr->p_read_active_first;
2519 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2520 pack_off = 0;
2521 p = 0;
2522 p_this_ccw->header.flag=CLAW_PENDING;
2523 privptr->p_read_active_first=p_this_ccw->next;
2524 p_this_ccw->next=NULL;
2525 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2526 if ((p_env->packing == PACK_SEND) &&
2527 (p_packh->len == 32) &&
2528 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2529 p_packh++; /* peek past pack header */
2530 p_ctlrec = (struct clawctl *)p_packh;
2531 p_packh--; /* un peek */
2532 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2533 (p_ctlrec->command == CONNECTION_CONFIRM))
2534 p_env->packing = DO_PACKED;
2536 if (p_env->packing == DO_PACKED)
2537 link_num=p_packh->link_num;
2538 else
2539 link_num=p_this_ccw->header.opcode / 8;
2540 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2541 mtc_this_frm=1;
2542 if (p_this_ccw->header.length!=
2543 privptr->p_env->read_size ) {
2544 dev_warn(p_dev,
2545 "The communication peer of %s"
2546 " sent a faulty"
2547 " frame of length %02x\n",
2548 dev->name, p_this_ccw->header.length);
2552 if (privptr->mtc_skipping) {
2554 * We're in the mode of skipping past a
2555 * multi-frame message
2556 * that we can't process for some reason or other.
2557 * The first frame without the More-To-Come flag is
2558 * the last frame of the skipped message.
2560 /* in case of More-To-Come not set in this frame */
2561 if (mtc_this_frm==0) {
2562 privptr->mtc_skipping=0; /* Ok, the end */
2563 privptr->mtc_logical_link=-1;
2565 goto NextFrame;
2568 if (link_num==0) {
2569 claw_process_control(dev, p_this_ccw);
2570 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2571 goto NextFrame;
2573 unpack_next:
2574 if (p_env->packing == DO_PACKED) {
2575 if (pack_off > p_env->read_size)
2576 goto NextFrame;
2577 p_packd = p_this_ccw->p_buffer+pack_off;
2578 p_packh = (struct clawph *) p_packd;
2579 if ((p_packh->len == 0) || /* done with this frame? */
2580 (p_packh->flag != 0))
2581 goto NextFrame;
2582 bytes_to_mov = p_packh->len;
2583 pack_off += bytes_to_mov+sizeof(struct clawph);
2584 p++;
2585 } else {
2586 bytes_to_mov=p_this_ccw->header.length;
2588 if (privptr->mtc_logical_link<0) {
2591 * if More-To-Come is set in this frame then we don't know
2592 * length of entire message, and hence have to allocate
2593 * large buffer */
2595 /* We are starting a new envelope */
2596 privptr->mtc_offset=0;
2597 privptr->mtc_logical_link=link_num;
2600 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2601 /* error */
2602 privptr->stats.rx_frame_errors++;
2603 goto NextFrame;
2605 if (p_env->packing == DO_PACKED) {
2606 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2607 p_packd+sizeof(struct clawph), bytes_to_mov);
2609 } else {
2610 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2611 p_this_ccw->p_buffer, bytes_to_mov);
2613 if (mtc_this_frm==0) {
2614 len_of_data=privptr->mtc_offset+bytes_to_mov;
2615 skb=dev_alloc_skb(len_of_data);
2616 if (skb) {
2617 memcpy(skb_put(skb,len_of_data),
2618 privptr->p_mtc_envelope,
2619 len_of_data);
2620 skb->dev=dev;
2621 skb_reset_mac_header(skb);
2622 skb->protocol=htons(ETH_P_IP);
2623 skb->ip_summed=CHECKSUM_UNNECESSARY;
2624 privptr->stats.rx_packets++;
2625 privptr->stats.rx_bytes+=len_of_data;
2626 netif_rx(skb);
2628 else {
2629 dev_info(p_dev, "Allocating a buffer for"
2630 " incoming data failed\n");
2631 privptr->stats.rx_dropped++;
2633 privptr->mtc_offset=0;
2634 privptr->mtc_logical_link=-1;
2636 else {
2637 privptr->mtc_offset+=bytes_to_mov;
2639 if (p_env->packing == DO_PACKED)
2640 goto unpack_next;
2641 NextFrame:
2643 * Remove ThisCCWblock from active read queue, and add it
2644 * to queue of free blocks to be reused.
2646 i++;
2647 p_this_ccw->header.length=0xffff;
2648 p_this_ccw->header.opcode=0xff;
2650 * add this one to the free queue for later reuse
2652 if (p_first_ccw==NULL) {
2653 p_first_ccw = p_this_ccw;
2655 else {
2656 p_last_ccw->next = p_this_ccw;
2658 p_last_ccw = p_this_ccw;
2660 * chain to next block on active read queue
2662 p_this_ccw = privptr->p_read_active_first;
2663 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2664 } /* end of while */
2666 /* check validity */
2668 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2669 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2670 claw_strt_read(dev, LOCK_YES);
2671 return;
2672 } /* end of unpack_read */
2674 /*-------------------------------------------------------------------*
2675 * claw_strt_read *
2677 *--------------------------------------------------------------------*/
2678 static void
2679 claw_strt_read (struct net_device *dev, int lock )
2681 int rc = 0;
2682 __u32 parm;
2683 unsigned long saveflags = 0;
2684 struct claw_privbk *privptr = dev->ml_priv;
2685 struct ccwbk*p_ccwbk;
2686 struct chbk *p_ch;
2687 struct clawh *p_clawh;
2688 p_ch=&privptr->channel[READ];
2690 CLAW_DBF_TEXT(4, trace, "StRdNter");
2691 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2692 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2694 if ((privptr->p_write_active_first!=NULL &&
2695 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2696 (privptr->p_read_active_first!=NULL &&
2697 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2698 p_clawh->flag=CLAW_BUSY; /* 0xff */
2700 if (lock==LOCK_YES) {
2701 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2703 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2704 CLAW_DBF_TEXT(4, trace, "HotRead");
2705 p_ccwbk=privptr->p_read_active_first;
2706 parm = (unsigned long) p_ch;
2707 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2708 0xff, 0);
2709 if (rc != 0) {
2710 ccw_check_return_code(p_ch->cdev, rc);
2713 else {
2714 CLAW_DBF_TEXT(2, trace, "ReadAct");
2717 if (lock==LOCK_YES) {
2718 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2720 CLAW_DBF_TEXT(4, trace, "StRdExit");
2721 return;
2722 } /* end of claw_strt_read */
2724 /*-------------------------------------------------------------------*
2725 * claw_strt_out_IO *
2727 *--------------------------------------------------------------------*/
2729 static void
2730 claw_strt_out_IO( struct net_device *dev )
2732 int rc = 0;
2733 unsigned long parm;
2734 struct claw_privbk *privptr;
2735 struct chbk *p_ch;
2736 struct ccwbk *p_first_ccw;
2738 if (!dev) {
2739 return;
2741 privptr = (struct claw_privbk *)dev->ml_priv;
2742 p_ch=&privptr->channel[WRITE];
2744 CLAW_DBF_TEXT(4, trace, "strt_io");
2745 p_first_ccw=privptr->p_write_active_first;
2747 if (p_ch->claw_state == CLAW_STOP)
2748 return;
2749 if (p_first_ccw == NULL) {
2750 return;
2752 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2753 parm = (unsigned long) p_ch;
2754 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2755 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2756 0xff, 0);
2757 if (rc != 0) {
2758 ccw_check_return_code(p_ch->cdev, rc);
2761 dev->trans_start = jiffies;
2762 return;
2763 } /* end of claw_strt_out_IO */
2765 /*-------------------------------------------------------------------*
2766 * Free write buffers *
2768 *--------------------------------------------------------------------*/
2770 static void
2771 claw_free_wrt_buf( struct net_device *dev )
2774 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2775 struct ccwbk*p_first_ccw;
2776 struct ccwbk*p_last_ccw;
2777 struct ccwbk*p_this_ccw;
2778 struct ccwbk*p_next_ccw;
2780 CLAW_DBF_TEXT(4, trace, "freewrtb");
2781 /* scan the write queue to free any completed write packets */
2782 p_first_ccw=NULL;
2783 p_last_ccw=NULL;
2784 p_this_ccw=privptr->p_write_active_first;
2785 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2787 p_next_ccw = p_this_ccw->next;
2788 if (((p_next_ccw!=NULL) &&
2789 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2790 ((p_this_ccw == privptr->p_write_active_last) &&
2791 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2792 /* The next CCW is OK or this is */
2793 /* the last CCW...free it @A1A */
2794 privptr->p_write_active_first=p_this_ccw->next;
2795 p_this_ccw->header.flag=CLAW_PENDING;
2796 p_this_ccw->next=privptr->p_write_free_chain;
2797 privptr->p_write_free_chain=p_this_ccw;
2798 ++privptr->write_free_count;
2799 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2800 p_this_ccw=privptr->p_write_active_first;
2801 privptr->stats.tx_packets++;
2803 else {
2804 break;
2807 if (privptr->write_free_count!=0) {
2808 claw_clearbit_busy(TB_NOBUFFER,dev);
2810 /* whole chain removed? */
2811 if (privptr->p_write_active_first==NULL) {
2812 privptr->p_write_active_last=NULL;
2814 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2815 return;
2818 /*-------------------------------------------------------------------*
2819 * claw free netdevice *
2821 *--------------------------------------------------------------------*/
2822 static void
2823 claw_free_netdevice(struct net_device * dev, int free_dev)
2825 struct claw_privbk *privptr;
2827 CLAW_DBF_TEXT(2, setup, "free_dev");
2828 if (!dev)
2829 return;
2830 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2831 privptr = dev->ml_priv;
2832 if (dev->flags & IFF_RUNNING)
2833 claw_release(dev);
2834 if (privptr) {
2835 privptr->channel[READ].ndev = NULL; /* say it's free */
2837 dev->ml_priv = NULL;
2838 #ifdef MODULE
2839 if (free_dev) {
2840 free_netdev(dev);
2842 #endif
2843 CLAW_DBF_TEXT(2, setup, "free_ok");
2847 * Claw init netdevice
2848 * Initialize everything of the net device except the name and the
2849 * channel structs.
2851 static const struct net_device_ops claw_netdev_ops = {
2852 .ndo_open = claw_open,
2853 .ndo_stop = claw_release,
2854 .ndo_get_stats = claw_stats,
2855 .ndo_start_xmit = claw_tx,
2856 .ndo_change_mtu = claw_change_mtu,
2859 static void
2860 claw_init_netdevice(struct net_device * dev)
2862 CLAW_DBF_TEXT(2, setup, "init_dev");
2863 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2864 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2865 dev->hard_header_len = 0;
2866 dev->addr_len = 0;
2867 dev->type = ARPHRD_SLIP;
2868 dev->tx_queue_len = 1300;
2869 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2870 dev->netdev_ops = &claw_netdev_ops;
2871 CLAW_DBF_TEXT(2, setup, "initok");
2872 return;
2876 * Init a new channel in the privptr->channel[i].
2878 * @param cdev The ccw_device to be added.
2880 * @return 0 on success, !0 on error.
2882 static int
2883 add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2885 struct chbk *p_ch;
2886 struct ccw_dev_id dev_id;
2888 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2889 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2890 p_ch = &privptr->channel[i];
2891 p_ch->cdev = cdev;
2892 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2893 ccw_device_get_id(cdev, &dev_id);
2894 p_ch->devno = dev_id.devno;
2895 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2896 return -ENOMEM;
2898 return 0;
2904 * Setup an interface.
2906 * @param cgdev Device to be setup.
2908 * @returns 0 on success, !0 on failure.
2910 static int
2911 claw_new_device(struct ccwgroup_device *cgdev)
2913 struct claw_privbk *privptr;
2914 struct claw_env *p_env;
2915 struct net_device *dev;
2916 int ret;
2917 struct ccw_dev_id dev_id;
2919 dev_info(&cgdev->dev, "add for %s\n",
2920 dev_name(&cgdev->cdev[READ]->dev));
2921 CLAW_DBF_TEXT(2, setup, "new_dev");
2922 privptr = dev_get_drvdata(&cgdev->dev);
2923 dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
2924 dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
2925 if (!privptr)
2926 return -ENODEV;
2927 p_env = privptr->p_env;
2928 ccw_device_get_id(cgdev->cdev[READ], &dev_id);
2929 p_env->devno[READ] = dev_id.devno;
2930 ccw_device_get_id(cgdev->cdev[WRITE], &dev_id);
2931 p_env->devno[WRITE] = dev_id.devno;
2932 ret = add_channel(cgdev->cdev[0],0,privptr);
2933 if (ret == 0)
2934 ret = add_channel(cgdev->cdev[1],1,privptr);
2935 if (ret != 0) {
2936 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2937 " failed with error code %d\n", ret);
2938 goto out;
2940 ret = ccw_device_set_online(cgdev->cdev[READ]);
2941 if (ret != 0) {
2942 dev_warn(&cgdev->dev,
2943 "Setting the read subchannel online"
2944 " failed with error code %d\n", ret);
2945 goto out;
2947 ret = ccw_device_set_online(cgdev->cdev[WRITE]);
2948 if (ret != 0) {
2949 dev_warn(&cgdev->dev,
2950 "Setting the write subchannel online "
2951 "failed with error code %d\n", ret);
2952 goto out;
2954 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
2955 if (!dev) {
2956 dev_warn(&cgdev->dev,
2957 "Activating the CLAW device failed\n");
2958 goto out;
2960 dev->ml_priv = privptr;
2961 dev_set_drvdata(&cgdev->dev, privptr);
2962 dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
2963 dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
2964 /* sysfs magic */
2965 SET_NETDEV_DEV(dev, &cgdev->dev);
2966 if (register_netdev(dev) != 0) {
2967 claw_free_netdevice(dev, 1);
2968 CLAW_DBF_TEXT(2, trace, "regfail");
2969 goto out;
2971 dev->flags &=~IFF_RUNNING;
2972 if (privptr->buffs_alloc == 0) {
2973 ret=init_ccw_bk(dev);
2974 if (ret !=0) {
2975 unregister_netdev(dev);
2976 claw_free_netdevice(dev,1);
2977 CLAW_DBF_TEXT(2, trace, "ccwmem");
2978 goto out;
2981 privptr->channel[READ].ndev = dev;
2982 privptr->channel[WRITE].ndev = dev;
2983 privptr->p_env->ndev = dev;
2985 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
2986 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
2987 dev->name, p_env->read_size,
2988 p_env->write_size, p_env->read_buffers,
2989 p_env->write_buffers, p_env->devno[READ],
2990 p_env->devno[WRITE]);
2991 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
2992 ":%.8s api_type: %.8s\n",
2993 dev->name, p_env->host_name,
2994 p_env->adapter_name , p_env->api_type);
2995 return 0;
2996 out:
2997 ccw_device_set_offline(cgdev->cdev[1]);
2998 ccw_device_set_offline(cgdev->cdev[0]);
2999 return -ENODEV;
3002 static void
3003 claw_purge_skb_queue(struct sk_buff_head *q)
3005 struct sk_buff *skb;
3007 CLAW_DBF_TEXT(4, trace, "purgque");
3008 while ((skb = skb_dequeue(q))) {
3009 atomic_dec(&skb->users);
3010 dev_kfree_skb_any(skb);
3015 * Shutdown an interface.
3017 * @param cgdev Device to be shut down.
3019 * @returns 0 on success, !0 on failure.
3021 static int
3022 claw_shutdown_device(struct ccwgroup_device *cgdev)
3024 struct claw_privbk *priv;
3025 struct net_device *ndev;
3026 int ret;
3028 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3029 priv = dev_get_drvdata(&cgdev->dev);
3030 if (!priv)
3031 return -ENODEV;
3032 ndev = priv->channel[READ].ndev;
3033 if (ndev) {
3034 /* Close the device */
3035 dev_info(&cgdev->dev, "%s: shutting down \n",
3036 ndev->name);
3037 if (ndev->flags & IFF_RUNNING)
3038 ret = claw_release(ndev);
3039 ndev->flags &=~IFF_RUNNING;
3040 unregister_netdev(ndev);
3041 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3042 claw_free_netdevice(ndev, 1);
3043 priv->channel[READ].ndev = NULL;
3044 priv->channel[WRITE].ndev = NULL;
3045 priv->p_env->ndev = NULL;
3047 ccw_device_set_offline(cgdev->cdev[1]);
3048 ccw_device_set_offline(cgdev->cdev[0]);
3049 return 0;
3052 static void
3053 claw_remove_device(struct ccwgroup_device *cgdev)
3055 struct claw_privbk *priv;
3057 BUG_ON(!cgdev);
3058 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3059 priv = dev_get_drvdata(&cgdev->dev);
3060 BUG_ON(!priv);
3061 dev_info(&cgdev->dev, " will be removed.\n");
3062 if (cgdev->state == CCWGROUP_ONLINE)
3063 claw_shutdown_device(cgdev);
3064 claw_remove_files(&cgdev->dev);
3065 kfree(priv->p_mtc_envelope);
3066 priv->p_mtc_envelope=NULL;
3067 kfree(priv->p_env);
3068 priv->p_env=NULL;
3069 kfree(priv->channel[0].irb);
3070 priv->channel[0].irb=NULL;
3071 kfree(priv->channel[1].irb);
3072 priv->channel[1].irb=NULL;
3073 kfree(priv);
3074 dev_set_drvdata(&cgdev->dev, NULL);
3075 dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
3076 dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
3077 put_device(&cgdev->dev);
3079 return;
3084 * sysfs attributes
3086 static ssize_t
3087 claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3089 struct claw_privbk *priv;
3090 struct claw_env * p_env;
3092 priv = dev_get_drvdata(dev);
3093 if (!priv)
3094 return -ENODEV;
3095 p_env = priv->p_env;
3096 return sprintf(buf, "%s\n",p_env->host_name);
3099 static ssize_t
3100 claw_hname_write(struct device *dev, struct device_attribute *attr,
3101 const char *buf, size_t count)
3103 struct claw_privbk *priv;
3104 struct claw_env * p_env;
3106 priv = dev_get_drvdata(dev);
3107 if (!priv)
3108 return -ENODEV;
3109 p_env = priv->p_env;
3110 if (count > MAX_NAME_LEN+1)
3111 return -EINVAL;
3112 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3113 strncpy(p_env->host_name,buf, count);
3114 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3115 p_env->host_name[MAX_NAME_LEN] = 0x00;
3116 CLAW_DBF_TEXT(2, setup, "HstnSet");
3117 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3119 return count;
3122 static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3124 static ssize_t
3125 claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3127 struct claw_privbk *priv;
3128 struct claw_env * p_env;
3130 priv = dev_get_drvdata(dev);
3131 if (!priv)
3132 return -ENODEV;
3133 p_env = priv->p_env;
3134 return sprintf(buf, "%s\n", p_env->adapter_name);
3137 static ssize_t
3138 claw_adname_write(struct device *dev, struct device_attribute *attr,
3139 const char *buf, size_t count)
3141 struct claw_privbk *priv;
3142 struct claw_env * p_env;
3144 priv = dev_get_drvdata(dev);
3145 if (!priv)
3146 return -ENODEV;
3147 p_env = priv->p_env;
3148 if (count > MAX_NAME_LEN+1)
3149 return -EINVAL;
3150 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3151 strncpy(p_env->adapter_name,buf, count);
3152 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3153 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3154 CLAW_DBF_TEXT(2, setup, "AdnSet");
3155 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3157 return count;
3160 static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3162 static ssize_t
3163 claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3165 struct claw_privbk *priv;
3166 struct claw_env * p_env;
3168 priv = dev_get_drvdata(dev);
3169 if (!priv)
3170 return -ENODEV;
3171 p_env = priv->p_env;
3172 return sprintf(buf, "%s\n",
3173 p_env->api_type);
3176 static ssize_t
3177 claw_apname_write(struct device *dev, struct device_attribute *attr,
3178 const char *buf, size_t count)
3180 struct claw_privbk *priv;
3181 struct claw_env * p_env;
3183 priv = dev_get_drvdata(dev);
3184 if (!priv)
3185 return -ENODEV;
3186 p_env = priv->p_env;
3187 if (count > MAX_NAME_LEN+1)
3188 return -EINVAL;
3189 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3190 strncpy(p_env->api_type,buf, count);
3191 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3192 p_env->api_type[MAX_NAME_LEN] = 0x00;
3193 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3194 p_env->read_size=DEF_PACK_BUFSIZE;
3195 p_env->write_size=DEF_PACK_BUFSIZE;
3196 p_env->packing=PACKING_ASK;
3197 CLAW_DBF_TEXT(2, setup, "PACKING");
3199 else {
3200 p_env->packing=0;
3201 p_env->read_size=CLAW_FRAME_SIZE;
3202 p_env->write_size=CLAW_FRAME_SIZE;
3203 CLAW_DBF_TEXT(2, setup, "ApiSet");
3205 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3206 return count;
3209 static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3211 static ssize_t
3212 claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3214 struct claw_privbk *priv;
3215 struct claw_env * p_env;
3217 priv = dev_get_drvdata(dev);
3218 if (!priv)
3219 return -ENODEV;
3220 p_env = priv->p_env;
3221 return sprintf(buf, "%d\n", p_env->write_buffers);
3224 static ssize_t
3225 claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3226 const char *buf, size_t count)
3228 struct claw_privbk *priv;
3229 struct claw_env * p_env;
3230 int nnn,max;
3232 priv = dev_get_drvdata(dev);
3233 if (!priv)
3234 return -ENODEV;
3235 p_env = priv->p_env;
3236 sscanf(buf, "%i", &nnn);
3237 if (p_env->packing) {
3238 max = 64;
3240 else {
3241 max = 512;
3243 if ((nnn > max ) || (nnn < 2))
3244 return -EINVAL;
3245 p_env->write_buffers = nnn;
3246 CLAW_DBF_TEXT(2, setup, "Wbufset");
3247 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3248 return count;
3251 static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3253 static ssize_t
3254 claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3256 struct claw_privbk *priv;
3257 struct claw_env * p_env;
3259 priv = dev_get_drvdata(dev);
3260 if (!priv)
3261 return -ENODEV;
3262 p_env = priv->p_env;
3263 return sprintf(buf, "%d\n", p_env->read_buffers);
3266 static ssize_t
3267 claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3268 const char *buf, size_t count)
3270 struct claw_privbk *priv;
3271 struct claw_env *p_env;
3272 int nnn,max;
3274 priv = dev_get_drvdata(dev);
3275 if (!priv)
3276 return -ENODEV;
3277 p_env = priv->p_env;
3278 sscanf(buf, "%i", &nnn);
3279 if (p_env->packing) {
3280 max = 64;
3282 else {
3283 max = 512;
3285 if ((nnn > max ) || (nnn < 2))
3286 return -EINVAL;
3287 p_env->read_buffers = nnn;
3288 CLAW_DBF_TEXT(2, setup, "Rbufset");
3289 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3290 return count;
3293 static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3295 static struct attribute *claw_attr[] = {
3296 &dev_attr_read_buffer.attr,
3297 &dev_attr_write_buffer.attr,
3298 &dev_attr_adapter_name.attr,
3299 &dev_attr_api_type.attr,
3300 &dev_attr_host_name.attr,
3301 NULL,
3304 static struct attribute_group claw_attr_group = {
3305 .attrs = claw_attr,
3308 static int
3309 claw_add_files(struct device *dev)
3311 CLAW_DBF_TEXT(2, setup, "add_file");
3312 return sysfs_create_group(&dev->kobj, &claw_attr_group);
3315 static void
3316 claw_remove_files(struct device *dev)
3318 CLAW_DBF_TEXT(2, setup, "rem_file");
3319 sysfs_remove_group(&dev->kobj, &claw_attr_group);
3322 /*--------------------------------------------------------------------*
3323 * claw_init and cleanup *
3324 *---------------------------------------------------------------------*/
3326 static void __exit
3327 claw_cleanup(void)
3329 unregister_cu3088_discipline(&claw_group_driver);
3330 claw_unregister_debug_facility();
3331 pr_info("Driver unloaded\n");
3336 * Initialize module.
3337 * This is called just after the module is loaded.
3339 * @return 0 on success, !0 on error.
3341 static int __init
3342 claw_init(void)
3344 int ret = 0;
3346 pr_info("Loading %s\n", version);
3347 ret = claw_register_debug_facility();
3348 if (ret) {
3349 pr_err("Registering with the S/390 debug feature"
3350 " failed with error code %d\n", ret);
3351 return ret;
3353 CLAW_DBF_TEXT(2, setup, "init_mod");
3354 ret = register_cu3088_discipline(&claw_group_driver);
3355 if (ret) {
3356 CLAW_DBF_TEXT(2, setup, "init_bad");
3357 claw_unregister_debug_facility();
3358 pr_err("Registering with the cu3088 device driver failed "
3359 "with error code %d\n", ret);
3361 return ret;
3364 module_init(claw_init);
3365 module_exit(claw_cleanup);
3367 MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3368 MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3369 "Copyright 2000,2008 IBM Corporation\n");
3370 MODULE_LICENSE("GPL");