2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
5 * Linux for zSeries version
6 * Copyright (C) 2002,2005 IBM Corporation
7 * Author(s) Original code written by:
8 * Kazuo Iimura (iimura@jp.ibm.com)
10 * Andy Richter (richtera@us.ibm.com)
11 * Marc Price (mwprice@us.ibm.com)
14 * group x.x.rrrr,x.x.wwww
18 * adapter_name aaaaaaaa
22 * group 0.0.0200 0.0.0201
31 * The device id is decided by the order entries
32 * are added to the group the first is claw0 the second claw1
35 * rrrr - the first of 2 consecutive device addresses used for the
37 * The specified address is always used as the input (Read)
38 * channel and the next address is used as the output channel.
40 * wwww - the second of 2 consecutive device addresses used for
42 * The specified address is always used as the output
43 * channel and the previous address is used as the input channel.
45 * read_buffer - specifies number of input buffers to allocate.
46 * write_buffer - specifies number of output buffers to allocate.
47 * host_name - host name
48 * adaptor_name - adaptor name
49 * api_type - API type TCPIP or API will be sent and expected
52 * Note the following requirements:
53 * 1) host_name must match the configured adapter_name on the remote side
54 * 2) adaptor_name must match the configured host name on the remote side
57 * 1.00 Initial release shipped
58 * 1.10 Changes for Buffer allocation
59 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
60 * 1.25 Added Packing support
63 #include <asm/ccwdev.h>
64 #include <asm/ccwgroup.h>
65 #include <asm/debug.h>
66 #include <asm/idals.h>
68 #include <linux/bitops.h>
69 #include <linux/ctype.h>
70 #include <linux/delay.h>
71 #include <linux/errno.h>
72 #include <linux/if_arp.h>
73 #include <linux/init.h>
74 #include <linux/interrupt.h>
76 #include <linux/kernel.h>
77 #include <linux/module.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/sched.h>
82 #include <linux/signal.h>
83 #include <linux/skbuff.h>
84 #include <linux/slab.h>
85 #include <linux/string.h>
86 #include <linux/tcp.h>
87 #include <linux/timer.h>
88 #include <linux/types.h>
94 CLAW uses the s390dbf file system see claw_trace and claw_setup
98 static char debug_buffer
[255];
100 * Debug Facility Stuff
102 static debug_info_t
*claw_dbf_setup
;
103 static debug_info_t
*claw_dbf_trace
;
106 * CLAW Debug Facility functions
109 claw_unregister_debug_facility(void)
112 debug_unregister(claw_dbf_setup
);
114 debug_unregister(claw_dbf_trace
);
118 claw_register_debug_facility(void)
120 claw_dbf_setup
= debug_register("claw_setup", 2, 1, 8);
121 claw_dbf_trace
= debug_register("claw_trace", 2, 2, 8);
122 if (claw_dbf_setup
== NULL
|| claw_dbf_trace
== NULL
) {
123 claw_unregister_debug_facility();
126 debug_register_view(claw_dbf_setup
, &debug_hex_ascii_view
);
127 debug_set_level(claw_dbf_setup
, 2);
128 debug_register_view(claw_dbf_trace
, &debug_hex_ascii_view
);
129 debug_set_level(claw_dbf_trace
, 2);
134 claw_set_busy(struct net_device
*dev
)
136 ((struct claw_privbk
*)dev
->ml_priv
)->tbusy
= 1;
141 claw_clear_busy(struct net_device
*dev
)
143 clear_bit(0, &(((struct claw_privbk
*) dev
->ml_priv
)->tbusy
));
144 netif_wake_queue(dev
);
149 claw_check_busy(struct net_device
*dev
)
152 return ((struct claw_privbk
*) dev
->ml_priv
)->tbusy
;
156 claw_setbit_busy(int nr
,struct net_device
*dev
)
158 netif_stop_queue(dev
);
159 set_bit(nr
, (void *)&(((struct claw_privbk
*)dev
->ml_priv
)->tbusy
));
163 claw_clearbit_busy(int nr
,struct net_device
*dev
)
165 clear_bit(nr
, (void *)&(((struct claw_privbk
*)dev
->ml_priv
)->tbusy
));
166 netif_wake_queue(dev
);
170 claw_test_and_setbit_busy(int nr
,struct net_device
*dev
)
172 netif_stop_queue(dev
);
173 return test_and_set_bit(nr
,
174 (void *)&(((struct claw_privbk
*) dev
->ml_priv
)->tbusy
));
178 /* Functions for the DEV methods */
180 static int claw_probe(struct ccwgroup_device
*cgdev
);
181 static void claw_remove_device(struct ccwgroup_device
*cgdev
);
182 static void claw_purge_skb_queue(struct sk_buff_head
*q
);
183 static int claw_new_device(struct ccwgroup_device
*cgdev
);
184 static int claw_shutdown_device(struct ccwgroup_device
*cgdev
);
185 static int claw_tx(struct sk_buff
*skb
, struct net_device
*dev
);
186 static int claw_change_mtu( struct net_device
*dev
, int new_mtu
);
187 static int claw_open(struct net_device
*dev
);
188 static void claw_irq_handler(struct ccw_device
*cdev
,
189 unsigned long intparm
, struct irb
*irb
);
190 static void claw_irq_tasklet ( unsigned long data
);
191 static int claw_release(struct net_device
*dev
);
192 static void claw_write_retry ( struct chbk
* p_ch
);
193 static void claw_write_next ( struct chbk
* p_ch
);
194 static void claw_timer ( struct chbk
* p_ch
);
197 static int add_claw_reads(struct net_device
*dev
,
198 struct ccwbk
* p_first
, struct ccwbk
* p_last
);
199 static void ccw_check_return_code (struct ccw_device
*cdev
, int return_code
);
200 static void ccw_check_unit_check (struct chbk
* p_ch
, unsigned char sense
);
201 static int find_link(struct net_device
*dev
, char *host_name
, char *ws_name
);
202 static int claw_hw_tx(struct sk_buff
*skb
, struct net_device
*dev
, long linkid
);
203 static int init_ccw_bk(struct net_device
*dev
);
204 static void probe_error( struct ccwgroup_device
*cgdev
);
205 static struct net_device_stats
*claw_stats(struct net_device
*dev
);
206 static int pages_to_order_of_mag(int num_of_pages
);
207 static struct sk_buff
*claw_pack_skb(struct claw_privbk
*privptr
);
208 /* sysfs Functions */
209 static ssize_t
claw_hname_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
);
210 static ssize_t
claw_hname_write(struct device
*dev
, struct device_attribute
*attr
,
211 const char *buf
, size_t count
);
212 static ssize_t
claw_adname_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
);
213 static ssize_t
claw_adname_write(struct device
*dev
, struct device_attribute
*attr
,
214 const char *buf
, size_t count
);
215 static ssize_t
claw_apname_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
);
216 static ssize_t
claw_apname_write(struct device
*dev
, struct device_attribute
*attr
,
217 const char *buf
, size_t count
);
218 static ssize_t
claw_wbuff_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
);
219 static ssize_t
claw_wbuff_write(struct device
*dev
, struct device_attribute
*attr
,
220 const char *buf
, size_t count
);
221 static ssize_t
claw_rbuff_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
);
222 static ssize_t
claw_rbuff_write(struct device
*dev
, struct device_attribute
*attr
,
223 const char *buf
, size_t count
);
224 static int claw_add_files(struct device
*dev
);
225 static void claw_remove_files(struct device
*dev
);
227 /* Functions for System Validate */
228 static int claw_process_control( struct net_device
*dev
, struct ccwbk
* p_ccw
);
229 static int claw_send_control(struct net_device
*dev
, __u8 type
, __u8 link
,
230 __u8 correlator
, __u8 rc
, char *local_name
, char *remote_name
);
231 static int claw_snd_conn_req(struct net_device
*dev
, __u8 link
);
232 static int claw_snd_disc(struct net_device
*dev
, struct clawctl
* p_ctl
);
233 static int claw_snd_sys_validate_rsp(struct net_device
*dev
,
234 struct clawctl
* p_ctl
, __u32 return_code
);
235 static int claw_strt_conn_req(struct net_device
*dev
);
236 static void claw_strt_read(struct net_device
*dev
, int lock
);
237 static void claw_strt_out_IO(struct net_device
*dev
);
238 static void claw_free_wrt_buf(struct net_device
*dev
);
240 /* Functions for unpack reads */
241 static void unpack_read(struct net_device
*dev
);
245 static struct ccwgroup_driver claw_group_driver
= {
246 .owner
= THIS_MODULE
,
249 .driver_id
= 0xC3D3C1E6,
251 .remove
= claw_remove_device
,
252 .set_online
= claw_new_device
,
253 .set_offline
= claw_shutdown_device
,
260 /*----------------------------------------------------------------*
262 * this function is called for each CLAW device. *
263 *----------------------------------------------------------------*/
265 claw_probe(struct ccwgroup_device
*cgdev
)
268 struct claw_privbk
*privptr
=NULL
;
270 CLAW_DBF_TEXT(2, setup
, "probe");
271 if (!get_device(&cgdev
->dev
))
273 privptr
= kzalloc(sizeof(struct claw_privbk
), GFP_KERNEL
);
274 cgdev
->dev
.driver_data
= privptr
;
275 if (privptr
== NULL
) {
277 put_device(&cgdev
->dev
);
278 CLAW_DBF_TEXT_(2, setup
, "probex%d", -ENOMEM
);
281 privptr
->p_mtc_envelope
= kzalloc( MAX_ENVELOPE_SIZE
, GFP_KERNEL
);
282 privptr
->p_env
= kzalloc(sizeof(struct claw_env
), GFP_KERNEL
);
283 if ((privptr
->p_mtc_envelope
==NULL
) || (privptr
->p_env
==NULL
)) {
285 put_device(&cgdev
->dev
);
286 CLAW_DBF_TEXT_(2, setup
, "probex%d", -ENOMEM
);
289 memcpy(privptr
->p_env
->adapter_name
,WS_NAME_NOT_DEF
,8);
290 memcpy(privptr
->p_env
->host_name
,WS_NAME_NOT_DEF
,8);
291 memcpy(privptr
->p_env
->api_type
,WS_NAME_NOT_DEF
,8);
292 privptr
->p_env
->packing
= 0;
293 privptr
->p_env
->write_buffers
= 5;
294 privptr
->p_env
->read_buffers
= 5;
295 privptr
->p_env
->read_size
= CLAW_FRAME_SIZE
;
296 privptr
->p_env
->write_size
= CLAW_FRAME_SIZE
;
297 rc
= claw_add_files(&cgdev
->dev
);
300 put_device(&cgdev
->dev
);
301 printk(KERN_WARNING
"add_files failed %s %s Exit Line %d \n",
302 dev_name(&cgdev
->cdev
[0]->dev
), __func__
, __LINE__
);
303 CLAW_DBF_TEXT_(2, setup
, "probex%d", rc
);
306 privptr
->p_env
->p_priv
= privptr
;
307 cgdev
->cdev
[0]->handler
= claw_irq_handler
;
308 cgdev
->cdev
[1]->handler
= claw_irq_handler
;
309 CLAW_DBF_TEXT(2, setup
, "prbext 0");
312 } /* end of claw_probe */
314 /*-------------------------------------------------------------------*
316 *-------------------------------------------------------------------*/
319 claw_tx(struct sk_buff
*skb
, struct net_device
*dev
)
322 struct claw_privbk
*privptr
= dev
->ml_priv
;
323 unsigned long saveflags
;
326 CLAW_DBF_TEXT(4, trace
, "claw_tx");
327 p_ch
=&privptr
->channel
[WRITE
];
329 privptr
->stats
.tx_dropped
++;
330 privptr
->stats
.tx_errors
++;
331 CLAW_DBF_TEXT_(2, trace
, "clawtx%d", -EIO
);
334 spin_lock_irqsave(get_ccwdev_lock(p_ch
->cdev
), saveflags
);
335 rc
=claw_hw_tx( skb
, dev
, 1 );
336 spin_unlock_irqrestore(get_ccwdev_lock(p_ch
->cdev
), saveflags
);
337 CLAW_DBF_TEXT_(4, trace
, "clawtx%d", rc
);
339 } /* end of claw_tx */
341 /*------------------------------------------------------------------*
342 * pack the collect queue into an skb and return it *
343 * If not packing just return the top skb from the queue *
344 *------------------------------------------------------------------*/
346 static struct sk_buff
*
347 claw_pack_skb(struct claw_privbk
*privptr
)
349 struct sk_buff
*new_skb
,*held_skb
;
350 struct chbk
*p_ch
= &privptr
->channel
[WRITE
];
351 struct claw_env
*p_env
= privptr
->p_env
;
352 int pkt_cnt
,pk_ind
,so_far
;
354 new_skb
= NULL
; /* assume no dice */
356 CLAW_DBF_TEXT(4, trace
, "PackSKBe");
357 if (!skb_queue_empty(&p_ch
->collect_queue
)) {
359 held_skb
= skb_dequeue(&p_ch
->collect_queue
);
361 dev_kfree_skb_any(held_skb
);
364 if (p_env
->packing
!= DO_PACKED
)
366 /* get a new SKB we will pack at least one */
367 new_skb
= dev_alloc_skb(p_env
->write_size
);
368 if (new_skb
== NULL
) {
369 atomic_inc(&held_skb
->users
);
370 skb_queue_head(&p_ch
->collect_queue
,held_skb
);
373 /* we have packed packet and a place to put it */
376 new_skb
->cb
[1] = 'P'; /* every skb on queue has pack header */
377 while ((pk_ind
) && (held_skb
!= NULL
)) {
378 if (held_skb
->len
+so_far
<= p_env
->write_size
-8) {
379 memcpy(skb_put(new_skb
,held_skb
->len
),
380 held_skb
->data
,held_skb
->len
);
381 privptr
->stats
.tx_packets
++;
382 so_far
+= held_skb
->len
;
384 dev_kfree_skb_any(held_skb
);
385 held_skb
= skb_dequeue(&p_ch
->collect_queue
);
387 atomic_dec(&held_skb
->users
);
390 atomic_inc(&held_skb
->users
);
391 skb_queue_head(&p_ch
->collect_queue
,held_skb
);
395 CLAW_DBF_TEXT(4, trace
, "PackSKBx");
399 /*-------------------------------------------------------------------*
402 *-------------------------------------------------------------------*/
405 claw_change_mtu(struct net_device
*dev
, int new_mtu
)
407 struct claw_privbk
*privptr
= dev
->ml_priv
;
409 CLAW_DBF_TEXT(4, trace
, "setmtu");
410 buff_size
= privptr
->p_env
->write_size
;
411 if ((new_mtu
< 60) || (new_mtu
> buff_size
)) {
416 } /* end of claw_change_mtu */
419 /*-------------------------------------------------------------------*
422 *-------------------------------------------------------------------*/
424 claw_open(struct net_device
*dev
)
429 unsigned long saveflags
=0;
431 struct claw_privbk
*privptr
;
432 DECLARE_WAITQUEUE(wait
, current
);
433 struct timer_list timer
;
436 CLAW_DBF_TEXT(4, trace
, "open");
437 privptr
= (struct claw_privbk
*)dev
->ml_priv
;
438 /* allocate and initialize CCW blocks */
439 if (privptr
->buffs_alloc
== 0) {
442 CLAW_DBF_TEXT(2, trace
, "openmem");
446 privptr
->system_validate_comp
=0;
447 privptr
->release_pend
=0;
448 if(strncmp(privptr
->p_env
->api_type
,WS_APPL_NAME_PACKED
,6) == 0) {
449 privptr
->p_env
->read_size
=DEF_PACK_BUFSIZE
;
450 privptr
->p_env
->write_size
=DEF_PACK_BUFSIZE
;
451 privptr
->p_env
->packing
=PACKING_ASK
;
453 privptr
->p_env
->packing
=0;
454 privptr
->p_env
->read_size
=CLAW_FRAME_SIZE
;
455 privptr
->p_env
->write_size
=CLAW_FRAME_SIZE
;
458 tasklet_init(&privptr
->channel
[READ
].tasklet
, claw_irq_tasklet
,
459 (unsigned long) &privptr
->channel
[READ
]);
460 for ( i
= 0; i
< 2; i
++) {
461 CLAW_DBF_TEXT_(2, trace
, "opn_ch%d", i
);
462 init_waitqueue_head(&privptr
->channel
[i
].wait
);
463 /* skb_queue_head_init(&p_ch->io_queue); */
466 &privptr
->channel
[WRITE
].collect_queue
);
467 privptr
->channel
[i
].flag_a
= 0;
468 privptr
->channel
[i
].IO_active
= 0;
469 privptr
->channel
[i
].flag
&= ~CLAW_TIMER
;
471 timer
.function
= (void *)claw_timer
;
472 timer
.data
= (unsigned long)(&privptr
->channel
[i
]);
473 timer
.expires
= jiffies
+ 15*HZ
;
475 spin_lock_irqsave(get_ccwdev_lock(
476 privptr
->channel
[i
].cdev
), saveflags
);
477 parm
= (unsigned long) &privptr
->channel
[i
];
478 privptr
->channel
[i
].claw_state
= CLAW_START_HALT_IO
;
480 add_wait_queue(&privptr
->channel
[i
].wait
, &wait
);
481 rc
= ccw_device_halt(
482 (struct ccw_device
*)privptr
->channel
[i
].cdev
,parm
);
483 set_current_state(TASK_INTERRUPTIBLE
);
484 spin_unlock_irqrestore(
485 get_ccwdev_lock(privptr
->channel
[i
].cdev
), saveflags
);
487 set_current_state(TASK_RUNNING
);
488 remove_wait_queue(&privptr
->channel
[i
].wait
, &wait
);
490 ccw_check_return_code(privptr
->channel
[i
].cdev
, rc
);
491 if((privptr
->channel
[i
].flag
& CLAW_TIMER
) == 0x00)
494 if ((((privptr
->channel
[READ
].last_dstat
|
495 privptr
->channel
[WRITE
].last_dstat
) &
496 ~(DEV_STAT_CHN_END
| DEV_STAT_DEV_END
)) != 0x00) ||
497 (((privptr
->channel
[READ
].flag
|
498 privptr
->channel
[WRITE
].flag
) & CLAW_TIMER
) != 0x00)) {
499 printk(KERN_INFO
"%s: remote side is not ready\n", dev
->name
);
500 CLAW_DBF_TEXT(2, trace
, "notrdy");
502 for ( i
= 0; i
< 2; i
++) {
504 get_ccwdev_lock(privptr
->channel
[i
].cdev
),
506 parm
= (unsigned long) &privptr
->channel
[i
];
507 privptr
->channel
[i
].claw_state
= CLAW_STOP
;
508 rc
= ccw_device_halt(
509 (struct ccw_device
*)&privptr
->channel
[i
].cdev
,
511 spin_unlock_irqrestore(
512 get_ccwdev_lock(privptr
->channel
[i
].cdev
),
515 ccw_check_return_code(
516 privptr
->channel
[i
].cdev
, rc
);
519 free_pages((unsigned long)privptr
->p_buff_ccw
,
520 (int)pages_to_order_of_mag(privptr
->p_buff_ccw_num
));
521 if (privptr
->p_env
->read_size
< PAGE_SIZE
) {
522 free_pages((unsigned long)privptr
->p_buff_read
,
523 (int)pages_to_order_of_mag(
524 privptr
->p_buff_read_num
));
527 p_buf
=privptr
->p_read_active_first
;
528 while (p_buf
!=NULL
) {
529 free_pages((unsigned long)p_buf
->p_buffer
,
530 (int)pages_to_order_of_mag(
531 privptr
->p_buff_pages_perread
));
535 if (privptr
->p_env
->write_size
< PAGE_SIZE
) {
536 free_pages((unsigned long)privptr
->p_buff_write
,
537 (int)pages_to_order_of_mag(
538 privptr
->p_buff_write_num
));
541 p_buf
=privptr
->p_write_active_first
;
542 while (p_buf
!=NULL
) {
543 free_pages((unsigned long)p_buf
->p_buffer
,
544 (int)pages_to_order_of_mag(
545 privptr
->p_buff_pages_perwrite
));
549 privptr
->buffs_alloc
= 0;
550 privptr
->channel
[READ
].flag
= 0x00;
551 privptr
->channel
[WRITE
].flag
= 0x00;
552 privptr
->p_buff_ccw
=NULL
;
553 privptr
->p_buff_read
=NULL
;
554 privptr
->p_buff_write
=NULL
;
555 claw_clear_busy(dev
);
556 CLAW_DBF_TEXT(2, trace
, "open EIO");
560 /* Send SystemValidate command */
562 claw_clear_busy(dev
);
563 CLAW_DBF_TEXT(4, trace
, "openok");
565 } /* end of claw_open */
567 /*-------------------------------------------------------------------*
571 *--------------------------------------------------------------------*/
573 claw_irq_handler(struct ccw_device
*cdev
,
574 unsigned long intparm
, struct irb
*irb
)
576 struct chbk
*p_ch
= NULL
;
577 struct claw_privbk
*privptr
= NULL
;
578 struct net_device
*dev
= NULL
;
579 struct claw_env
*p_env
;
580 struct chbk
*p_ch_r
=NULL
;
582 CLAW_DBF_TEXT(4, trace
, "clawirq");
583 /* Bypass all 'unsolicited interrupts' */
584 if (!cdev
->dev
.driver_data
) {
585 printk(KERN_WARNING
"claw: unsolicited interrupt for device:"
586 "%s received c-%02x d-%02x\n",
587 dev_name(&cdev
->dev
), irb
->scsw
.cmd
.cstat
,
588 irb
->scsw
.cmd
.dstat
);
589 CLAW_DBF_TEXT(2, trace
, "badirq");
592 privptr
= (struct claw_privbk
*)cdev
->dev
.driver_data
;
594 /* Try to extract channel from driver data. */
595 if (privptr
->channel
[READ
].cdev
== cdev
)
596 p_ch
= &privptr
->channel
[READ
];
597 else if (privptr
->channel
[WRITE
].cdev
== cdev
)
598 p_ch
= &privptr
->channel
[WRITE
];
600 printk(KERN_WARNING
"claw: Can't determine channel for "
601 "interrupt, device %s\n", dev_name(&cdev
->dev
));
602 CLAW_DBF_TEXT(2, trace
, "badchan");
605 CLAW_DBF_TEXT_(4, trace
, "IRQCH=%d", p_ch
->flag
);
607 dev
= (struct net_device
*) (p_ch
->ndev
);
608 p_env
=privptr
->p_env
;
610 /* Copy interruption response block. */
611 memcpy(p_ch
->irb
, irb
, sizeof(struct irb
));
613 /* Check for good subchannel return code, otherwise info message */
614 if (irb
->scsw
.cmd
.cstat
&& !(irb
->scsw
.cmd
.cstat
& SCHN_STAT_PCI
)) {
615 printk(KERN_INFO
"%s: subchannel check for device: %04x -"
616 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
617 dev
->name
, p_ch
->devno
,
618 irb
->scsw
.cmd
.cstat
, irb
->scsw
.cmd
.dstat
,
620 CLAW_DBF_TEXT(2, trace
, "chanchk");
624 /* Check the reason-code of a unit check */
625 if (irb
->scsw
.cmd
.dstat
& DEV_STAT_UNIT_CHECK
)
626 ccw_check_unit_check(p_ch
, irb
->ecw
[0]);
628 /* State machine to bring the connection up, down and to restart */
629 p_ch
->last_dstat
= irb
->scsw
.cmd
.dstat
;
631 switch (p_ch
->claw_state
) {
632 case CLAW_STOP
:/* HALT_IO by claw_release (halt sequence) */
633 if (!((p_ch
->irb
->scsw
.cmd
.stctl
& SCSW_STCTL_SEC_STATUS
) ||
634 (p_ch
->irb
->scsw
.cmd
.stctl
== SCSW_STCTL_STATUS_PEND
) ||
635 (p_ch
->irb
->scsw
.cmd
.stctl
==
636 (SCSW_STCTL_ALERT_STATUS
| SCSW_STCTL_STATUS_PEND
))))
638 wake_up(&p_ch
->wait
); /* wake up claw_release */
639 CLAW_DBF_TEXT(4, trace
, "stop");
641 case CLAW_START_HALT_IO
: /* HALT_IO issued by claw_open */
642 if (!((p_ch
->irb
->scsw
.cmd
.stctl
& SCSW_STCTL_SEC_STATUS
) ||
643 (p_ch
->irb
->scsw
.cmd
.stctl
== SCSW_STCTL_STATUS_PEND
) ||
644 (p_ch
->irb
->scsw
.cmd
.stctl
==
645 (SCSW_STCTL_ALERT_STATUS
| SCSW_STCTL_STATUS_PEND
)))) {
646 CLAW_DBF_TEXT(4, trace
, "haltio");
649 if (p_ch
->flag
== CLAW_READ
) {
650 p_ch
->claw_state
= CLAW_START_READ
;
651 wake_up(&p_ch
->wait
); /* wake claw_open (READ)*/
652 } else if (p_ch
->flag
== CLAW_WRITE
) {
653 p_ch
->claw_state
= CLAW_START_WRITE
;
654 /* send SYSTEM_VALIDATE */
655 claw_strt_read(dev
, LOCK_NO
);
656 claw_send_control(dev
,
657 SYSTEM_VALIDATE_REQUEST
,
660 p_env
->adapter_name
);
662 printk(KERN_WARNING
"claw: unsolicited "
663 "interrupt for device:"
664 "%s received c-%02x d-%02x\n",
665 dev_name(&cdev
->dev
),
667 irb
->scsw
.cmd
.dstat
);
670 CLAW_DBF_TEXT(4, trace
, "haltio");
672 case CLAW_START_READ
:
673 CLAW_DBF_TEXT(4, trace
, "ReadIRQ");
674 if (p_ch
->irb
->scsw
.cmd
.dstat
& DEV_STAT_UNIT_CHECK
) {
675 clear_bit(0, (void *)&p_ch
->IO_active
);
676 if ((p_ch
->irb
->ecw
[0] & 0x41) == 0x41 ||
677 (p_ch
->irb
->ecw
[0] & 0x40) == 0x40 ||
678 (p_ch
->irb
->ecw
[0]) == 0) {
679 privptr
->stats
.rx_errors
++;
680 printk(KERN_INFO
"%s: Restart is "
681 "required after remote "
685 CLAW_DBF_TEXT(4, trace
, "notrdy");
688 if ((p_ch
->irb
->scsw
.cmd
.cstat
& SCHN_STAT_PCI
) &&
689 (p_ch
->irb
->scsw
.cmd
.dstat
== 0)) {
690 if (test_and_set_bit(CLAW_BH_ACTIVE
,
691 (void *)&p_ch
->flag_a
) == 0)
692 tasklet_schedule(&p_ch
->tasklet
);
694 CLAW_DBF_TEXT(4, trace
, "PCINoBH");
695 CLAW_DBF_TEXT(4, trace
, "PCI_read");
698 if (!((p_ch
->irb
->scsw
.cmd
.stctl
& SCSW_STCTL_SEC_STATUS
) ||
699 (p_ch
->irb
->scsw
.cmd
.stctl
== SCSW_STCTL_STATUS_PEND
) ||
700 (p_ch
->irb
->scsw
.cmd
.stctl
==
701 (SCSW_STCTL_ALERT_STATUS
| SCSW_STCTL_STATUS_PEND
)))) {
702 CLAW_DBF_TEXT(4, trace
, "SPend_rd");
705 clear_bit(0, (void *)&p_ch
->IO_active
);
706 claw_clearbit_busy(TB_RETRY
, dev
);
707 if (test_and_set_bit(CLAW_BH_ACTIVE
,
708 (void *)&p_ch
->flag_a
) == 0)
709 tasklet_schedule(&p_ch
->tasklet
);
711 CLAW_DBF_TEXT(4, trace
, "RdBHAct");
712 CLAW_DBF_TEXT(4, trace
, "RdIRQXit");
714 case CLAW_START_WRITE
:
715 if (p_ch
->irb
->scsw
.cmd
.dstat
& DEV_STAT_UNIT_CHECK
) {
716 printk(KERN_INFO
"%s: Unit Check Occured in "
717 "write channel\n", dev
->name
);
718 clear_bit(0, (void *)&p_ch
->IO_active
);
719 if (p_ch
->irb
->ecw
[0] & 0x80) {
720 printk(KERN_INFO
"%s: Resetting Event "
721 "occurred:\n", dev
->name
);
722 init_timer(&p_ch
->timer
);
723 p_ch
->timer
.function
=
724 (void *)claw_write_retry
;
725 p_ch
->timer
.data
= (unsigned long)p_ch
;
726 p_ch
->timer
.expires
= jiffies
+ 10*HZ
;
727 add_timer(&p_ch
->timer
);
728 printk(KERN_INFO
"%s: write connection "
729 "restarting\n", dev
->name
);
731 CLAW_DBF_TEXT(4, trace
, "rstrtwrt");
734 if (p_ch
->irb
->scsw
.cmd
.dstat
& DEV_STAT_UNIT_EXCEP
) {
735 clear_bit(0, (void *)&p_ch
->IO_active
);
736 printk(KERN_INFO
"%s: Unit Exception "
737 "Occured in write channel\n",
740 if (!((p_ch
->irb
->scsw
.cmd
.stctl
& SCSW_STCTL_SEC_STATUS
) ||
741 (p_ch
->irb
->scsw
.cmd
.stctl
== SCSW_STCTL_STATUS_PEND
) ||
742 (p_ch
->irb
->scsw
.cmd
.stctl
==
743 (SCSW_STCTL_ALERT_STATUS
| SCSW_STCTL_STATUS_PEND
)))) {
744 CLAW_DBF_TEXT(4, trace
, "writeUE");
747 clear_bit(0, (void *)&p_ch
->IO_active
);
748 if (claw_test_and_setbit_busy(TB_TX
, dev
) == 0) {
749 claw_write_next(p_ch
);
750 claw_clearbit_busy(TB_TX
, dev
);
751 claw_clear_busy(dev
);
753 p_ch_r
= (struct chbk
*)&privptr
->channel
[READ
];
754 if (test_and_set_bit(CLAW_BH_ACTIVE
,
755 (void *)&p_ch_r
->flag_a
) == 0)
756 tasklet_schedule(&p_ch_r
->tasklet
);
757 CLAW_DBF_TEXT(4, trace
, "StWtExit");
760 printk(KERN_WARNING
"%s: wrong selection code - irq "
761 "state=%d\n", dev
->name
, p_ch
->claw_state
);
762 CLAW_DBF_TEXT(2, trace
, "badIRQ");
766 } /* end of claw_irq_handler */
769 /*-------------------------------------------------------------------*
772 *--------------------------------------------------------------------*/
774 claw_irq_tasklet ( unsigned long data
)
777 struct net_device
*dev
;
778 struct claw_privbk
* privptr
;
780 p_ch
= (struct chbk
*) data
;
781 dev
= (struct net_device
*)p_ch
->ndev
;
782 CLAW_DBF_TEXT(4, trace
, "IRQtask");
783 privptr
= (struct claw_privbk
*)dev
->ml_priv
;
785 clear_bit(CLAW_BH_ACTIVE
, (void *)&p_ch
->flag_a
);
786 CLAW_DBF_TEXT(4, trace
, "TskletXt");
788 } /* end of claw_irq_bh */
790 /*-------------------------------------------------------------------*
793 *--------------------------------------------------------------------*/
795 claw_release(struct net_device
*dev
)
799 unsigned long saveflags
;
801 struct claw_privbk
*privptr
;
802 DECLARE_WAITQUEUE(wait
, current
);
803 struct ccwbk
* p_this_ccw
;
808 privptr
= (struct claw_privbk
*)dev
->ml_priv
;
811 CLAW_DBF_TEXT(4, trace
, "release");
812 privptr
->release_pend
=1;
813 claw_setbit_busy(TB_STOP
,dev
);
814 for ( i
= 1; i
>=0 ; i
--) {
816 get_ccwdev_lock(privptr
->channel
[i
].cdev
), saveflags
);
817 /* del_timer(&privptr->channel[READ].timer); */
818 privptr
->channel
[i
].claw_state
= CLAW_STOP
;
819 privptr
->channel
[i
].IO_active
= 0;
820 parm
= (unsigned long) &privptr
->channel
[i
];
822 claw_purge_skb_queue(
823 &privptr
->channel
[WRITE
].collect_queue
);
824 rc
= ccw_device_halt (privptr
->channel
[i
].cdev
, parm
);
825 if (privptr
->system_validate_comp
==0x00) /* never opened? */
826 init_waitqueue_head(&privptr
->channel
[i
].wait
);
827 add_wait_queue(&privptr
->channel
[i
].wait
, &wait
);
828 set_current_state(TASK_INTERRUPTIBLE
);
829 spin_unlock_irqrestore(
830 get_ccwdev_lock(privptr
->channel
[i
].cdev
), saveflags
);
832 set_current_state(TASK_RUNNING
);
833 remove_wait_queue(&privptr
->channel
[i
].wait
, &wait
);
835 ccw_check_return_code(privptr
->channel
[i
].cdev
, rc
);
838 if (privptr
->pk_skb
!= NULL
) {
839 dev_kfree_skb_any(privptr
->pk_skb
);
840 privptr
->pk_skb
= NULL
;
842 if(privptr
->buffs_alloc
!= 1) {
843 CLAW_DBF_TEXT(4, trace
, "none2fre");
846 CLAW_DBF_TEXT(4, trace
, "freebufs");
847 if (privptr
->p_buff_ccw
!= NULL
) {
848 free_pages((unsigned long)privptr
->p_buff_ccw
,
849 (int)pages_to_order_of_mag(privptr
->p_buff_ccw_num
));
851 CLAW_DBF_TEXT(4, trace
, "freeread");
852 if (privptr
->p_env
->read_size
< PAGE_SIZE
) {
853 if (privptr
->p_buff_read
!= NULL
) {
854 free_pages((unsigned long)privptr
->p_buff_read
,
855 (int)pages_to_order_of_mag(privptr
->p_buff_read_num
));
859 p_buf
=privptr
->p_read_active_first
;
860 while (p_buf
!=NULL
) {
861 free_pages((unsigned long)p_buf
->p_buffer
,
862 (int)pages_to_order_of_mag(
863 privptr
->p_buff_pages_perread
));
867 CLAW_DBF_TEXT(4, trace
, "freewrit");
868 if (privptr
->p_env
->write_size
< PAGE_SIZE
) {
869 free_pages((unsigned long)privptr
->p_buff_write
,
870 (int)pages_to_order_of_mag(privptr
->p_buff_write_num
));
873 p_buf
=privptr
->p_write_active_first
;
874 while (p_buf
!=NULL
) {
875 free_pages((unsigned long)p_buf
->p_buffer
,
876 (int)pages_to_order_of_mag(
877 privptr
->p_buff_pages_perwrite
));
881 CLAW_DBF_TEXT(4, trace
, "clearptr");
882 privptr
->buffs_alloc
= 0;
883 privptr
->p_buff_ccw
=NULL
;
884 privptr
->p_buff_read
=NULL
;
885 privptr
->p_buff_write
=NULL
;
886 privptr
->system_validate_comp
=0;
887 privptr
->release_pend
=0;
888 /* Remove any writes that were pending and reset all reads */
889 p_this_ccw
=privptr
->p_read_active_first
;
890 while (p_this_ccw
!=NULL
) {
891 p_this_ccw
->header
.length
=0xffff;
892 p_this_ccw
->header
.opcode
=0xff;
893 p_this_ccw
->header
.flag
=0x00;
894 p_this_ccw
=p_this_ccw
->next
;
897 while (privptr
->p_write_active_first
!=NULL
) {
898 p_this_ccw
=privptr
->p_write_active_first
;
899 p_this_ccw
->header
.flag
=CLAW_PENDING
;
900 privptr
->p_write_active_first
=p_this_ccw
->next
;
901 p_this_ccw
->next
=privptr
->p_write_free_chain
;
902 privptr
->p_write_free_chain
=p_this_ccw
;
903 ++privptr
->write_free_count
;
905 privptr
->p_write_active_last
=NULL
;
906 privptr
->mtc_logical_link
= -1;
907 privptr
->mtc_skipping
= 1;
908 privptr
->mtc_offset
=0;
910 if (((privptr
->channel
[READ
].last_dstat
|
911 privptr
->channel
[WRITE
].last_dstat
) &
912 ~(DEV_STAT_CHN_END
| DEV_STAT_DEV_END
)) != 0x00) {
913 printk(KERN_WARNING
"%s: channel problems during close - "
914 "read: %02x - write: %02x\n",
916 privptr
->channel
[READ
].last_dstat
,
917 privptr
->channel
[WRITE
].last_dstat
);
918 CLAW_DBF_TEXT(2, trace
, "badclose");
920 CLAW_DBF_TEXT(4, trace
, "rlsexit");
922 } /* end of claw_release */
924 /*-------------------------------------------------------------------*
927 *--------------------------------------------------------------------*/
930 claw_write_retry ( struct chbk
*p_ch
)
933 struct net_device
*dev
=p_ch
->ndev
;
935 CLAW_DBF_TEXT(4, trace
, "w_retry");
936 if (p_ch
->claw_state
== CLAW_STOP
) {
939 claw_strt_out_IO( dev
);
940 CLAW_DBF_TEXT(4, trace
, "rtry_xit");
942 } /* end of claw_write_retry */
945 /*-------------------------------------------------------------------*
948 *--------------------------------------------------------------------*/
951 claw_write_next ( struct chbk
* p_ch
)
954 struct net_device
*dev
;
955 struct claw_privbk
*privptr
=NULL
;
956 struct sk_buff
*pk_skb
;
959 CLAW_DBF_TEXT(4, trace
, "claw_wrt");
960 if (p_ch
->claw_state
== CLAW_STOP
)
962 dev
= (struct net_device
*) p_ch
->ndev
;
963 privptr
= (struct claw_privbk
*) dev
->ml_priv
;
964 claw_free_wrt_buf( dev
);
965 if ((privptr
->write_free_count
> 0) &&
966 !skb_queue_empty(&p_ch
->collect_queue
)) {
967 pk_skb
= claw_pack_skb(privptr
);
968 while (pk_skb
!= NULL
) {
969 rc
= claw_hw_tx( pk_skb
, dev
,1);
970 if (privptr
->write_free_count
> 0) {
971 pk_skb
= claw_pack_skb(privptr
);
976 if (privptr
->p_write_active_first
!=NULL
) {
977 claw_strt_out_IO(dev
);
980 } /* end of claw_write_next */
982 /*-------------------------------------------------------------------*
985 *--------------------------------------------------------------------*/
988 claw_timer ( struct chbk
* p_ch
)
990 CLAW_DBF_TEXT(4, trace
, "timer");
991 p_ch
->flag
|= CLAW_TIMER
;
992 wake_up(&p_ch
->wait
);
994 } /* end of claw_timer */
1002 /*-------------------------------------------------------------------*
1004 * pages_to_order_of_mag *
1006 * takes a number of pages from 1 to 512 and returns the *
1007 * log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1008 * of magnitude get_free_pages() has an upper order of 9 *
1009 *--------------------------------------------------------------------*/
1012 pages_to_order_of_mag(int num_of_pages
)
1014 int order_of_mag
=1; /* assume 2 pages */
1017 CLAW_DBF_TEXT_(5, trace
, "pages%d", num_of_pages
);
1018 if (num_of_pages
== 1) {return 0; } /* magnitude of 0 = 1 page */
1019 /* 512 pages = 2Meg on 4k page systems */
1020 if (num_of_pages
>= 512) {return 9; }
1021 /* we have two or more pages order is at least 1 */
1022 for (nump
=2 ;nump
<= 512;nump
*=2) {
1023 if (num_of_pages
<= nump
)
1027 if (order_of_mag
> 9) { order_of_mag
= 9; } /* I know it's paranoid */
1028 CLAW_DBF_TEXT_(5, trace
, "mag%d", order_of_mag
);
1029 return order_of_mag
;
1032 /*-------------------------------------------------------------------*
1036 *--------------------------------------------------------------------*/
1038 add_claw_reads(struct net_device
*dev
, struct ccwbk
* p_first
,
1039 struct ccwbk
* p_last
)
1041 struct claw_privbk
*privptr
;
1042 struct ccw1 temp_ccw
;
1043 struct endccw
* p_end
;
1044 CLAW_DBF_TEXT(4, trace
, "addreads");
1045 privptr
= dev
->ml_priv
;
1046 p_end
= privptr
->p_end_ccw
;
1048 /* first CCW and last CCW contains a new set of read channel programs
1049 * to apend the running channel programs
1051 if ( p_first
==NULL
) {
1052 CLAW_DBF_TEXT(4, trace
, "addexit");
1056 /* set up ending CCW sequence for this segment */
1058 p_end
->read1
=0x00; /* second ending CCW is now active */
1059 /* reset ending CCWs and setup TIC CCWs */
1060 p_end
->read2_nop2
.cmd_code
= CCW_CLAW_CMD_READFF
;
1061 p_end
->read2_nop2
.flags
= CCW_FLAG_SLI
| CCW_FLAG_SKIP
;
1062 p_last
->r_TIC_1
.cda
=(__u32
)__pa(&p_end
->read2_nop1
);
1063 p_last
->r_TIC_2
.cda
=(__u32
)__pa(&p_end
->read2_nop1
);
1064 p_end
->read2_nop2
.cda
=0;
1065 p_end
->read2_nop2
.count
=1;
1068 p_end
->read1
=0x01; /* first ending CCW is now active */
1069 /* reset ending CCWs and setup TIC CCWs */
1070 p_end
->read1_nop2
.cmd_code
= CCW_CLAW_CMD_READFF
;
1071 p_end
->read1_nop2
.flags
= CCW_FLAG_SLI
| CCW_FLAG_SKIP
;
1072 p_last
->r_TIC_1
.cda
= (__u32
)__pa(&p_end
->read1_nop1
);
1073 p_last
->r_TIC_2
.cda
= (__u32
)__pa(&p_end
->read1_nop1
);
1074 p_end
->read1_nop2
.cda
=0;
1075 p_end
->read1_nop2
.count
=1;
1078 if ( privptr
-> p_read_active_first
==NULL
) {
1079 privptr
-> p_read_active_first
= p_first
; /* set new first */
1080 privptr
-> p_read_active_last
= p_last
; /* set new last */
1084 /* set up TIC ccw */
1085 temp_ccw
.cda
= (__u32
)__pa(&p_first
->read
);
1088 temp_ccw
.cmd_code
= CCW_CLAW_CMD_TIC
;
1093 /* first set of CCW's is chained to the new read */
1094 /* chain, so the second set is chained to the active chain. */
1095 /* Therefore modify the second set to point to the new */
1096 /* read chain set up TIC CCWs */
1097 /* make sure we update the CCW so channel doesn't fetch it */
1098 /* when it's only half done */
1099 memcpy( &p_end
->read2_nop2
, &temp_ccw
,
1100 sizeof(struct ccw1
));
1101 privptr
->p_read_active_last
->r_TIC_1
.cda
=
1102 (__u32
)__pa(&p_first
->read
);
1103 privptr
->p_read_active_last
->r_TIC_2
.cda
=
1104 (__u32
)__pa(&p_first
->read
);
1107 /* make sure we update the CCW so channel doesn't */
1108 /* fetch it when it is only half done */
1109 memcpy( &p_end
->read1_nop2
, &temp_ccw
,
1110 sizeof(struct ccw1
));
1111 privptr
->p_read_active_last
->r_TIC_1
.cda
=
1112 (__u32
)__pa(&p_first
->read
);
1113 privptr
->p_read_active_last
->r_TIC_2
.cda
=
1114 (__u32
)__pa(&p_first
->read
);
1116 /* chain in new set of blocks */
1117 privptr
->p_read_active_last
->next
= p_first
;
1118 privptr
->p_read_active_last
=p_last
;
1119 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1120 CLAW_DBF_TEXT(4, trace
, "addexit");
1122 } /* end of add_claw_reads */
1124 /*-------------------------------------------------------------------*
1125 * ccw_check_return_code *
1127 *-------------------------------------------------------------------*/
1130 ccw_check_return_code(struct ccw_device
*cdev
, int return_code
)
1132 CLAW_DBF_TEXT(4, trace
, "ccwret");
1133 if (return_code
!= 0) {
1134 switch (return_code
) {
1135 case -EBUSY
: /* BUSY is a transient state no action needed */
1138 printk(KERN_EMERG
"%s: Missing device called "
1139 "for IO ENODEV\n", dev_name(&cdev
->dev
));
1142 printk(KERN_EMERG
"%s: Status pending... EIO \n",
1143 dev_name(&cdev
->dev
));
1146 printk(KERN_EMERG
"%s: Invalid Dev State EINVAL \n",
1147 dev_name(&cdev
->dev
));
1150 printk(KERN_EMERG
"%s: Unknown error in "
1151 "Do_IO %d\n", dev_name(&cdev
->dev
),
1155 CLAW_DBF_TEXT(4, trace
, "ccwret");
1156 } /* end of ccw_check_return_code */
1158 /*-------------------------------------------------------------------*
1159 * ccw_check_unit_check *
1160 *--------------------------------------------------------------------*/
1163 ccw_check_unit_check(struct chbk
* p_ch
, unsigned char sense
)
1165 struct net_device
*ndev
= p_ch
->ndev
;
1167 CLAW_DBF_TEXT(4, trace
, "unitchek");
1168 printk(KERN_INFO
"%s: Unit Check with sense byte:0x%04x\n",
1173 printk(KERN_WARNING
"%s: Interface disconnect or "
1175 "occurred (remote side)\n", ndev
->name
);
1178 printk(KERN_WARNING
"%s: System reset occured"
1179 " (remote side)\n", ndev
->name
);
1182 else if (sense
& 0x20) {
1184 printk(KERN_WARNING
"%s: Data-streaming "
1185 "timeout)\n", ndev
->name
);
1188 printk(KERN_WARNING
"%s: Data-transfer parity"
1189 " error\n", ndev
->name
);
1192 else if (sense
& 0x10) {
1194 printk(KERN_WARNING
"%s: Hardware malfunction "
1195 "(remote side)\n", ndev
->name
);
1198 printk(KERN_WARNING
"%s: read-data parity error "
1199 "(remote side)\n", ndev
->name
);
1203 } /* end of ccw_check_unit_check */
1205 /*-------------------------------------------------------------------*
1207 *--------------------------------------------------------------------*/
1209 find_link(struct net_device
*dev
, char *host_name
, char *ws_name
)
1211 struct claw_privbk
*privptr
;
1212 struct claw_env
*p_env
;
1215 CLAW_DBF_TEXT(2, setup
, "findlink");
1216 privptr
= dev
->ml_priv
;
1217 p_env
=privptr
->p_env
;
1218 switch (p_env
->packing
)
1221 if ((memcmp(WS_APPL_NAME_PACKED
, host_name
, 8)!=0) ||
1222 (memcmp(WS_APPL_NAME_PACKED
, ws_name
, 8)!=0 ))
1227 if ((memcmp(WS_APPL_NAME_IP_NAME
, host_name
, 8)!=0) ||
1228 (memcmp(WS_APPL_NAME_IP_NAME
, ws_name
, 8)!=0 ))
1232 if ((memcmp(HOST_APPL_NAME
, host_name
, 8)!=0) ||
1233 (memcmp(p_env
->api_type
, ws_name
, 8)!=0))
1239 } /* end of find_link */
1241 /*-------------------------------------------------------------------*
1245 *-------------------------------------------------------------------*/
1248 claw_hw_tx(struct sk_buff
*skb
, struct net_device
*dev
, long linkid
)
1251 struct claw_privbk
*privptr
;
1252 struct ccwbk
*p_this_ccw
;
1253 struct ccwbk
*p_first_ccw
;
1254 struct ccwbk
*p_last_ccw
;
1256 signed long len_of_data
;
1257 unsigned long bytesInThisBuffer
;
1258 unsigned char *pDataAddress
;
1259 struct endccw
*pEnd
;
1260 struct ccw1 tempCCW
;
1262 struct claw_env
*p_env
;
1264 struct clawph
*pk_head
;
1267 CLAW_DBF_TEXT(4, trace
, "hw_tx");
1268 privptr
= (struct claw_privbk
*)(dev
->ml_priv
);
1269 p_ch
=(struct chbk
*)&privptr
->channel
[WRITE
];
1270 p_env
=privptr
->p_env
;
1271 claw_free_wrt_buf(dev
); /* Clean up free chain if posible */
1272 /* scan the write queue to free any completed write packets */
1275 if ((p_env
->packing
>= PACK_SEND
) &&
1276 (skb
->cb
[1] != 'P')) {
1277 skb_push(skb
,sizeof(struct clawph
));
1278 pk_head
=(struct clawph
*)skb
->data
;
1279 pk_head
->len
=skb
->len
-sizeof(struct clawph
);
1280 if (pk_head
->len
%4) {
1281 pk_head
->len
+= 4-(pk_head
->len
%4);
1282 skb_pad(skb
,4-(pk_head
->len
%4));
1283 skb_put(skb
,4-(pk_head
->len
%4));
1285 if (p_env
->packing
== DO_PACKED
)
1286 pk_head
->link_num
= linkid
;
1288 pk_head
->link_num
= 0;
1289 pk_head
->flag
= 0x00;
1294 if (claw_check_busy(dev
)) {
1295 if (privptr
->write_free_count
!=0) {
1296 claw_clear_busy(dev
);
1299 claw_strt_out_IO(dev
);
1300 claw_free_wrt_buf( dev
);
1301 if (privptr
->write_free_count
==0) {
1302 ch
= &privptr
->channel
[WRITE
];
1303 atomic_inc(&skb
->users
);
1304 skb_queue_tail(&ch
->collect_queue
, skb
);
1308 claw_clear_busy(dev
);
1313 if (claw_test_and_setbit_busy(TB_TX
,dev
)) { /* set to busy */
1314 ch
= &privptr
->channel
[WRITE
];
1315 atomic_inc(&skb
->users
);
1316 skb_queue_tail(&ch
->collect_queue
, skb
);
1317 claw_strt_out_IO(dev
);
1322 /* See how many write buffers are required to hold this data */
1323 numBuffers
= DIV_ROUND_UP(skb
->len
, privptr
->p_env
->write_size
);
1325 /* If that number of buffers isn't available, give up for now */
1326 if (privptr
->write_free_count
< numBuffers
||
1327 privptr
->p_write_free_chain
== NULL
) {
1329 claw_setbit_busy(TB_NOBUFFER
,dev
);
1330 ch
= &privptr
->channel
[WRITE
];
1331 atomic_inc(&skb
->users
);
1332 skb_queue_tail(&ch
->collect_queue
, skb
);
1333 CLAW_DBF_TEXT(2, trace
, "clawbusy");
1336 pDataAddress
=skb
->data
;
1337 len_of_data
=skb
->len
;
1339 while (len_of_data
> 0) {
1340 p_this_ccw
=privptr
->p_write_free_chain
; /* get a block */
1341 if (p_this_ccw
== NULL
) { /* lost the race */
1342 ch
= &privptr
->channel
[WRITE
];
1343 atomic_inc(&skb
->users
);
1344 skb_queue_tail(&ch
->collect_queue
, skb
);
1347 privptr
->p_write_free_chain
=p_this_ccw
->next
;
1348 p_this_ccw
->next
=NULL
;
1349 --privptr
->write_free_count
; /* -1 */
1350 bytesInThisBuffer
=len_of_data
;
1351 memcpy( p_this_ccw
->p_buffer
,pDataAddress
, bytesInThisBuffer
);
1352 len_of_data
-=bytesInThisBuffer
;
1353 pDataAddress
+=(unsigned long)bytesInThisBuffer
;
1354 /* setup write CCW */
1355 p_this_ccw
->write
.cmd_code
= (linkid
* 8) +1;
1356 if (len_of_data
>0) {
1357 p_this_ccw
->write
.cmd_code
+=MORE_to_COME_FLAG
;
1359 p_this_ccw
->write
.count
=bytesInThisBuffer
;
1360 /* now add to end of this chain */
1361 if (p_first_ccw
==NULL
) {
1362 p_first_ccw
=p_this_ccw
;
1364 if (p_last_ccw
!=NULL
) {
1365 p_last_ccw
->next
=p_this_ccw
;
1366 /* set up TIC ccws */
1367 p_last_ccw
->w_TIC_1
.cda
=
1368 (__u32
)__pa(&p_this_ccw
->write
);
1370 p_last_ccw
=p_this_ccw
; /* save new last block */
1373 /* FirstCCW and LastCCW now contain a new set of write channel
1374 * programs to append to the running channel program
1377 if (p_first_ccw
!=NULL
) {
1378 /* setup ending ccw sequence for this segment */
1379 pEnd
=privptr
->p_end_ccw
;
1381 pEnd
->write1
=0x00; /* second end ccw is now active */
1382 /* set up Tic CCWs */
1383 p_last_ccw
->w_TIC_1
.cda
=
1384 (__u32
)__pa(&pEnd
->write2_nop1
);
1385 pEnd
->write2_nop2
.cmd_code
= CCW_CLAW_CMD_READFF
;
1386 pEnd
->write2_nop2
.flags
=
1387 CCW_FLAG_SLI
| CCW_FLAG_SKIP
;
1388 pEnd
->write2_nop2
.cda
=0;
1389 pEnd
->write2_nop2
.count
=1;
1391 else { /* end of if (pEnd->write1)*/
1392 pEnd
->write1
=0x01; /* first end ccw is now active */
1393 /* set up Tic CCWs */
1394 p_last_ccw
->w_TIC_1
.cda
=
1395 (__u32
)__pa(&pEnd
->write1_nop1
);
1396 pEnd
->write1_nop2
.cmd_code
= CCW_CLAW_CMD_READFF
;
1397 pEnd
->write1_nop2
.flags
=
1398 CCW_FLAG_SLI
| CCW_FLAG_SKIP
;
1399 pEnd
->write1_nop2
.cda
=0;
1400 pEnd
->write1_nop2
.count
=1;
1401 } /* end if if (pEnd->write1) */
1403 if (privptr
->p_write_active_first
==NULL
) {
1404 privptr
->p_write_active_first
=p_first_ccw
;
1405 privptr
->p_write_active_last
=p_last_ccw
;
1408 /* set up Tic CCWs */
1410 tempCCW
.cda
=(__u32
)__pa(&p_first_ccw
->write
);
1413 tempCCW
.cmd_code
=CCW_CLAW_CMD_TIC
;
1418 * first set of ending CCW's is chained to the new write
1419 * chain, so the second set is chained to the active chain
1420 * Therefore modify the second set to point the new write chain.
1421 * make sure we update the CCW atomically
1422 * so channel does not fetch it when it's only half done
1424 memcpy( &pEnd
->write2_nop2
, &tempCCW
,
1425 sizeof(struct ccw1
));
1426 privptr
->p_write_active_last
->w_TIC_1
.cda
=
1427 (__u32
)__pa(&p_first_ccw
->write
);
1431 /*make sure we update the CCW atomically
1432 *so channel does not fetch it when it's only half done
1434 memcpy(&pEnd
->write1_nop2
, &tempCCW
,
1435 sizeof(struct ccw1
));
1436 privptr
->p_write_active_last
->w_TIC_1
.cda
=
1437 (__u32
)__pa(&p_first_ccw
->write
);
1439 } /* end if if (pEnd->write1) */
1441 privptr
->p_write_active_last
->next
=p_first_ccw
;
1442 privptr
->p_write_active_last
=p_last_ccw
;
1445 } /* endif (p_first_ccw!=NULL) */
1446 dev_kfree_skb_any(skb
);
1453 claw_strt_out_IO(dev
);
1454 /* if write free count is zero , set NOBUFFER */
1455 if (privptr
->write_free_count
==0) {
1456 claw_setbit_busy(TB_NOBUFFER
,dev
);
1459 claw_clearbit_busy(TB_TX
,dev
);
1462 } /* end of claw_hw_tx */
1464 /*-------------------------------------------------------------------*
1468 *--------------------------------------------------------------------*/
1471 init_ccw_bk(struct net_device
*dev
)
1474 __u32 ccw_blocks_required
;
1475 __u32 ccw_blocks_perpage
;
1476 __u32 ccw_pages_required
;
1477 __u32 claw_reads_perpage
=1;
1478 __u32 claw_read_pages
;
1479 __u32 claw_writes_perpage
=1;
1480 __u32 claw_write_pages
;
1482 struct ccwbk
*p_free_chain
;
1484 struct ccwbk
*p_last_CCWB
;
1485 struct ccwbk
*p_first_CCWB
;
1486 struct endccw
*p_endccw
=NULL
;
1487 addr_t real_address
;
1488 struct claw_privbk
*privptr
= dev
->ml_priv
;
1489 struct clawh
*pClawH
=NULL
;
1490 addr_t real_TIC_address
;
1492 CLAW_DBF_TEXT(4, trace
, "init_ccw");
1494 /* initialize statistics field */
1495 privptr
->active_link_ID
=0;
1496 /* initialize ccwbk pointers */
1497 privptr
->p_write_free_chain
=NULL
; /* pointer to free ccw chain*/
1498 privptr
->p_write_active_first
=NULL
; /* pointer to the first write ccw*/
1499 privptr
->p_write_active_last
=NULL
; /* pointer to the last write ccw*/
1500 privptr
->p_read_active_first
=NULL
; /* pointer to the first read ccw*/
1501 privptr
->p_read_active_last
=NULL
; /* pointer to the last read ccw */
1502 privptr
->p_end_ccw
=NULL
; /* pointer to ending ccw */
1503 privptr
->p_claw_signal_blk
=NULL
; /* pointer to signal block */
1504 privptr
->buffs_alloc
= 0;
1505 memset(&privptr
->end_ccw
, 0x00, sizeof(struct endccw
));
1506 memset(&privptr
->ctl_bk
, 0x00, sizeof(struct clawctl
));
1507 /* initialize free write ccwbk counter */
1508 privptr
->write_free_count
=0; /* number of free bufs on write chain */
1512 * We need 1 CCW block for each read buffer, 1 for each
1513 * write buffer, plus 1 for ClawSignalBlock
1515 ccw_blocks_required
=
1516 privptr
->p_env
->read_buffers
+privptr
->p_env
->write_buffers
+1;
1518 * compute number of CCW blocks that will fit in a page
1520 ccw_blocks_perpage
= PAGE_SIZE
/ CCWBK_SIZE
;
1522 DIV_ROUND_UP(ccw_blocks_required
, ccw_blocks_perpage
);
1525 * read and write sizes are set by 2 constants in claw.h
1526 * 4k and 32k. Unpacked values other than 4k are not going to
1527 * provide good performance. With packing buffers support 32k
1530 if (privptr
->p_env
->read_size
< PAGE_SIZE
) {
1531 claw_reads_perpage
= PAGE_SIZE
/ privptr
->p_env
->read_size
;
1532 claw_read_pages
= DIV_ROUND_UP(privptr
->p_env
->read_buffers
,
1533 claw_reads_perpage
);
1535 else { /* > or equal */
1536 privptr
->p_buff_pages_perread
=
1537 DIV_ROUND_UP(privptr
->p_env
->read_size
, PAGE_SIZE
);
1538 claw_read_pages
= privptr
->p_env
->read_buffers
*
1539 privptr
->p_buff_pages_perread
;
1541 if (privptr
->p_env
->write_size
< PAGE_SIZE
) {
1542 claw_writes_perpage
=
1543 PAGE_SIZE
/ privptr
->p_env
->write_size
;
1544 claw_write_pages
= DIV_ROUND_UP(privptr
->p_env
->write_buffers
,
1545 claw_writes_perpage
);
1548 else { /* > or equal */
1549 privptr
->p_buff_pages_perwrite
=
1550 DIV_ROUND_UP(privptr
->p_env
->read_size
, PAGE_SIZE
);
1551 claw_write_pages
= privptr
->p_env
->write_buffers
*
1552 privptr
->p_buff_pages_perwrite
;
1555 * allocate ccw_pages_required
1557 if (privptr
->p_buff_ccw
==NULL
) {
1558 privptr
->p_buff_ccw
=
1559 (void *)__get_free_pages(__GFP_DMA
,
1560 (int)pages_to_order_of_mag(ccw_pages_required
));
1561 if (privptr
->p_buff_ccw
==NULL
) {
1564 privptr
->p_buff_ccw_num
=ccw_pages_required
;
1566 memset(privptr
->p_buff_ccw
, 0x00,
1567 privptr
->p_buff_ccw_num
* PAGE_SIZE
);
1570 * obtain ending ccw block address
1573 privptr
->p_end_ccw
= (struct endccw
*)&privptr
->end_ccw
;
1574 real_address
= (__u32
)__pa(privptr
->p_end_ccw
);
1575 /* Initialize ending CCW block */
1576 p_endccw
=privptr
->p_end_ccw
;
1577 p_endccw
->real
=real_address
;
1578 p_endccw
->write1
=0x00;
1579 p_endccw
->read1
=0x00;
1582 p_endccw
->write1_nop1
.cmd_code
= CCW_CLAW_CMD_NOP
;
1583 p_endccw
->write1_nop1
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1584 p_endccw
->write1_nop1
.count
= 1;
1585 p_endccw
->write1_nop1
.cda
= 0;
1588 p_endccw
->write1_nop2
.cmd_code
= CCW_CLAW_CMD_READFF
;
1589 p_endccw
->write1_nop2
.flags
= CCW_FLAG_SLI
| CCW_FLAG_SKIP
;
1590 p_endccw
->write1_nop2
.count
= 1;
1591 p_endccw
->write1_nop2
.cda
= 0;
1594 p_endccw
->write2_nop1
.cmd_code
= CCW_CLAW_CMD_NOP
;
1595 p_endccw
->write2_nop1
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1596 p_endccw
->write2_nop1
.count
= 1;
1597 p_endccw
->write2_nop1
.cda
= 0;
1600 p_endccw
->write2_nop2
.cmd_code
= CCW_CLAW_CMD_READFF
;
1601 p_endccw
->write2_nop2
.flags
= CCW_FLAG_SLI
| CCW_FLAG_SKIP
;
1602 p_endccw
->write2_nop2
.count
= 1;
1603 p_endccw
->write2_nop2
.cda
= 0;
1606 p_endccw
->read1_nop1
.cmd_code
= CCW_CLAW_CMD_NOP
;
1607 p_endccw
->read1_nop1
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1608 p_endccw
->read1_nop1
.count
= 1;
1609 p_endccw
->read1_nop1
.cda
= 0;
1612 p_endccw
->read1_nop2
.cmd_code
= CCW_CLAW_CMD_READFF
;
1613 p_endccw
->read1_nop2
.flags
= CCW_FLAG_SLI
| CCW_FLAG_SKIP
;
1614 p_endccw
->read1_nop2
.count
= 1;
1615 p_endccw
->read1_nop2
.cda
= 0;
1618 p_endccw
->read2_nop1
.cmd_code
= CCW_CLAW_CMD_NOP
;
1619 p_endccw
->read2_nop1
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1620 p_endccw
->read2_nop1
.count
= 1;
1621 p_endccw
->read2_nop1
.cda
= 0;
1624 p_endccw
->read2_nop2
.cmd_code
= CCW_CLAW_CMD_READFF
;
1625 p_endccw
->read2_nop2
.flags
= CCW_FLAG_SLI
| CCW_FLAG_SKIP
;
1626 p_endccw
->read2_nop2
.count
= 1;
1627 p_endccw
->read2_nop2
.cda
= 0;
1630 * Build a chain of CCWs
1633 p_buff
=privptr
->p_buff_ccw
;
1636 for (i
=0 ; i
< ccw_pages_required
; i
++ ) {
1637 real_address
= (__u32
)__pa(p_buff
);
1639 for (j
=0 ; j
< ccw_blocks_perpage
; j
++) {
1640 p_buf
->next
= p_free_chain
;
1641 p_free_chain
= p_buf
;
1642 p_buf
->real
=(__u32
)__pa(p_buf
);
1648 * Initialize ClawSignalBlock
1651 if (privptr
->p_claw_signal_blk
==NULL
) {
1652 privptr
->p_claw_signal_blk
=p_free_chain
;
1653 p_free_chain
=p_free_chain
->next
;
1654 pClawH
=(struct clawh
*)privptr
->p_claw_signal_blk
;
1655 pClawH
->length
=0xffff;
1656 pClawH
->opcode
=0xff;
1657 pClawH
->flag
=CLAW_BUSY
;
1661 * allocate write_pages_required and add to free chain
1663 if (privptr
->p_buff_write
==NULL
) {
1664 if (privptr
->p_env
->write_size
< PAGE_SIZE
) {
1665 privptr
->p_buff_write
=
1666 (void *)__get_free_pages(__GFP_DMA
,
1667 (int)pages_to_order_of_mag(claw_write_pages
));
1668 if (privptr
->p_buff_write
==NULL
) {
1669 privptr
->p_buff_ccw
=NULL
;
1673 * Build CLAW write free chain
1677 memset(privptr
->p_buff_write
, 0x00,
1678 ccw_pages_required
* PAGE_SIZE
);
1679 privptr
->p_write_free_chain
=NULL
;
1681 p_buff
=privptr
->p_buff_write
;
1683 for (i
=0 ; i
< privptr
->p_env
->write_buffers
; i
++) {
1684 p_buf
= p_free_chain
; /* get a CCW */
1685 p_free_chain
= p_buf
->next
;
1686 p_buf
->next
=privptr
->p_write_free_chain
;
1687 privptr
->p_write_free_chain
= p_buf
;
1688 p_buf
-> p_buffer
= (struct clawbuf
*)p_buff
;
1689 p_buf
-> write
.cda
= (__u32
)__pa(p_buff
);
1690 p_buf
-> write
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1691 p_buf
-> w_read_FF
.cmd_code
= CCW_CLAW_CMD_READFF
;
1692 p_buf
-> w_read_FF
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1693 p_buf
-> w_read_FF
.count
= 1;
1694 p_buf
-> w_read_FF
.cda
=
1695 (__u32
)__pa(&p_buf
-> header
.flag
);
1696 p_buf
-> w_TIC_1
.cmd_code
= CCW_CLAW_CMD_TIC
;
1697 p_buf
-> w_TIC_1
.flags
= 0;
1698 p_buf
-> w_TIC_1
.count
= 0;
1700 if (((unsigned long)p_buff
+privptr
->p_env
->write_size
) >=
1701 ((unsigned long)(p_buff
+2*
1702 (privptr
->p_env
->write_size
) -1) & PAGE_MASK
)) {
1703 p_buff
= p_buff
+privptr
->p_env
->write_size
;
1707 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1709 privptr
->p_write_free_chain
=NULL
;
1710 for (i
= 0; i
< privptr
->p_env
->write_buffers
; i
++) {
1711 p_buff
=(void *)__get_free_pages(__GFP_DMA
,
1712 (int)pages_to_order_of_mag(
1713 privptr
->p_buff_pages_perwrite
) );
1715 free_pages((unsigned long)privptr
->p_buff_ccw
,
1716 (int)pages_to_order_of_mag(
1717 privptr
->p_buff_ccw_num
));
1718 privptr
->p_buff_ccw
=NULL
;
1719 p_buf
=privptr
->p_buff_write
;
1720 while (p_buf
!=NULL
) {
1721 free_pages((unsigned long)
1723 (int)pages_to_order_of_mag(
1724 privptr
->p_buff_pages_perwrite
));
1728 } /* Error on get_pages */
1729 memset(p_buff
, 0x00, privptr
->p_env
->write_size
);
1730 p_buf
= p_free_chain
;
1731 p_free_chain
= p_buf
->next
;
1732 p_buf
->next
= privptr
->p_write_free_chain
;
1733 privptr
->p_write_free_chain
= p_buf
;
1734 privptr
->p_buff_write
= p_buf
;
1735 p_buf
->p_buffer
=(struct clawbuf
*)p_buff
;
1736 p_buf
-> write
.cda
= (__u32
)__pa(p_buff
);
1737 p_buf
-> write
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1738 p_buf
-> w_read_FF
.cmd_code
= CCW_CLAW_CMD_READFF
;
1739 p_buf
-> w_read_FF
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1740 p_buf
-> w_read_FF
.count
= 1;
1741 p_buf
-> w_read_FF
.cda
=
1742 (__u32
)__pa(&p_buf
-> header
.flag
);
1743 p_buf
-> w_TIC_1
.cmd_code
= CCW_CLAW_CMD_TIC
;
1744 p_buf
-> w_TIC_1
.flags
= 0;
1745 p_buf
-> w_TIC_1
.count
= 0;
1746 } /* for all write_buffers */
1748 } /* else buffers are PAGE_SIZE or bigger */
1751 privptr
->p_buff_write_num
=claw_write_pages
;
1752 privptr
->write_free_count
=privptr
->p_env
->write_buffers
;
1756 * allocate read_pages_required and chain to free chain
1758 if (privptr
->p_buff_read
==NULL
) {
1759 if (privptr
->p_env
->read_size
< PAGE_SIZE
) {
1760 privptr
->p_buff_read
=
1761 (void *)__get_free_pages(__GFP_DMA
,
1762 (int)pages_to_order_of_mag(claw_read_pages
) );
1763 if (privptr
->p_buff_read
==NULL
) {
1764 free_pages((unsigned long)privptr
->p_buff_ccw
,
1765 (int)pages_to_order_of_mag(
1766 privptr
->p_buff_ccw_num
));
1767 /* free the write pages size is < page size */
1768 free_pages((unsigned long)privptr
->p_buff_write
,
1769 (int)pages_to_order_of_mag(
1770 privptr
->p_buff_write_num
));
1771 privptr
->p_buff_ccw
=NULL
;
1772 privptr
->p_buff_write
=NULL
;
1775 memset(privptr
->p_buff_read
, 0x00, claw_read_pages
* PAGE_SIZE
);
1776 privptr
->p_buff_read_num
=claw_read_pages
;
1778 * Build CLAW read free chain
1781 p_buff
=privptr
->p_buff_read
;
1782 for (i
=0 ; i
< privptr
->p_env
->read_buffers
; i
++) {
1783 p_buf
= p_free_chain
;
1784 p_free_chain
= p_buf
->next
;
1786 if (p_last_CCWB
==NULL
) {
1792 p_buf
->next
=p_first_CCWB
;
1794 (__u32
)__pa(&p_first_CCWB
-> read
);
1799 p_buf
->p_buffer
=(struct clawbuf
*)p_buff
;
1800 /* initialize read command */
1801 p_buf
-> read
.cmd_code
= CCW_CLAW_CMD_READ
;
1802 p_buf
-> read
.cda
= (__u32
)__pa(p_buff
);
1803 p_buf
-> read
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1804 p_buf
-> read
.count
= privptr
->p_env
->read_size
;
1806 /* initialize read_h command */
1807 p_buf
-> read_h
.cmd_code
= CCW_CLAW_CMD_READHEADER
;
1808 p_buf
-> read_h
.cda
=
1809 (__u32
)__pa(&(p_buf
->header
));
1810 p_buf
-> read_h
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1811 p_buf
-> read_h
.count
= sizeof(struct clawh
);
1813 /* initialize Signal command */
1814 p_buf
-> signal
.cmd_code
= CCW_CLAW_CMD_SIGNAL_SMOD
;
1815 p_buf
-> signal
.cda
=
1816 (__u32
)__pa(&(pClawH
->flag
));
1817 p_buf
-> signal
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1818 p_buf
-> signal
.count
= 1;
1820 /* initialize r_TIC_1 command */
1821 p_buf
-> r_TIC_1
.cmd_code
= CCW_CLAW_CMD_TIC
;
1822 p_buf
-> r_TIC_1
.cda
= (__u32
)real_TIC_address
;
1823 p_buf
-> r_TIC_1
.flags
= 0;
1824 p_buf
-> r_TIC_1
.count
= 0;
1826 /* initialize r_read_FF command */
1827 p_buf
-> r_read_FF
.cmd_code
= CCW_CLAW_CMD_READFF
;
1828 p_buf
-> r_read_FF
.cda
=
1829 (__u32
)__pa(&(pClawH
->flag
));
1830 p_buf
-> r_read_FF
.flags
=
1831 CCW_FLAG_SLI
| CCW_FLAG_CC
| CCW_FLAG_PCI
;
1832 p_buf
-> r_read_FF
.count
= 1;
1834 /* initialize r_TIC_2 */
1835 memcpy(&p_buf
->r_TIC_2
,
1836 &p_buf
->r_TIC_1
, sizeof(struct ccw1
));
1838 /* initialize Header */
1839 p_buf
->header
.length
=0xffff;
1840 p_buf
->header
.opcode
=0xff;
1841 p_buf
->header
.flag
=CLAW_PENDING
;
1843 if (((unsigned long)p_buff
+privptr
->p_env
->read_size
) >=
1844 ((unsigned long)(p_buff
+2*(privptr
->p_env
->read_size
) -1)
1846 p_buff
= p_buff
+privptr
->p_env
->read_size
;
1850 (void *)((unsigned long)
1851 (p_buff
+2*(privptr
->p_env
->read_size
) -1)
1854 } /* for read_buffers */
1855 } /* read_size < PAGE_SIZE */
1856 else { /* read Size >= PAGE_SIZE */
1857 for (i
=0 ; i
< privptr
->p_env
->read_buffers
; i
++) {
1858 p_buff
= (void *)__get_free_pages(__GFP_DMA
,
1859 (int)pages_to_order_of_mag(privptr
->p_buff_pages_perread
) );
1861 free_pages((unsigned long)privptr
->p_buff_ccw
,
1862 (int)pages_to_order_of_mag(privptr
->p_buff_ccw_num
));
1863 /* free the write pages */
1864 p_buf
=privptr
->p_buff_write
;
1865 while (p_buf
!=NULL
) {
1866 free_pages((unsigned long)p_buf
->p_buffer
,
1867 (int)pages_to_order_of_mag(
1868 privptr
->p_buff_pages_perwrite
));
1871 /* free any read pages already alloc */
1872 p_buf
=privptr
->p_buff_read
;
1873 while (p_buf
!=NULL
) {
1874 free_pages((unsigned long)p_buf
->p_buffer
,
1875 (int)pages_to_order_of_mag(
1876 privptr
->p_buff_pages_perread
));
1879 privptr
->p_buff_ccw
=NULL
;
1880 privptr
->p_buff_write
=NULL
;
1883 memset(p_buff
, 0x00, privptr
->p_env
->read_size
);
1884 p_buf
= p_free_chain
;
1885 privptr
->p_buff_read
= p_buf
;
1886 p_free_chain
= p_buf
->next
;
1888 if (p_last_CCWB
==NULL
) {
1894 p_buf
->next
=p_first_CCWB
;
1897 &p_first_CCWB
-> read
);
1901 /* save buff address */
1902 p_buf
->p_buffer
=(struct clawbuf
*)p_buff
;
1903 /* initialize read command */
1904 p_buf
-> read
.cmd_code
= CCW_CLAW_CMD_READ
;
1905 p_buf
-> read
.cda
= (__u32
)__pa(p_buff
);
1906 p_buf
-> read
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1907 p_buf
-> read
.count
= privptr
->p_env
->read_size
;
1909 /* initialize read_h command */
1910 p_buf
-> read_h
.cmd_code
= CCW_CLAW_CMD_READHEADER
;
1911 p_buf
-> read_h
.cda
=
1912 (__u32
)__pa(&(p_buf
->header
));
1913 p_buf
-> read_h
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1914 p_buf
-> read_h
.count
= sizeof(struct clawh
);
1916 /* initialize Signal command */
1917 p_buf
-> signal
.cmd_code
= CCW_CLAW_CMD_SIGNAL_SMOD
;
1918 p_buf
-> signal
.cda
=
1919 (__u32
)__pa(&(pClawH
->flag
));
1920 p_buf
-> signal
.flags
= CCW_FLAG_SLI
| CCW_FLAG_CC
;
1921 p_buf
-> signal
.count
= 1;
1923 /* initialize r_TIC_1 command */
1924 p_buf
-> r_TIC_1
.cmd_code
= CCW_CLAW_CMD_TIC
;
1925 p_buf
-> r_TIC_1
.cda
= (__u32
)real_TIC_address
;
1926 p_buf
-> r_TIC_1
.flags
= 0;
1927 p_buf
-> r_TIC_1
.count
= 0;
1929 /* initialize r_read_FF command */
1930 p_buf
-> r_read_FF
.cmd_code
= CCW_CLAW_CMD_READFF
;
1931 p_buf
-> r_read_FF
.cda
=
1932 (__u32
)__pa(&(pClawH
->flag
));
1933 p_buf
-> r_read_FF
.flags
=
1934 CCW_FLAG_SLI
| CCW_FLAG_CC
| CCW_FLAG_PCI
;
1935 p_buf
-> r_read_FF
.count
= 1;
1937 /* initialize r_TIC_2 */
1938 memcpy(&p_buf
->r_TIC_2
, &p_buf
->r_TIC_1
,
1939 sizeof(struct ccw1
));
1941 /* initialize Header */
1942 p_buf
->header
.length
=0xffff;
1943 p_buf
->header
.opcode
=0xff;
1944 p_buf
->header
.flag
=CLAW_PENDING
;
1946 } /* For read_buffers */
1947 } /* read_size >= PAGE_SIZE */
1948 } /* pBuffread = NULL */
1949 add_claw_reads( dev
,p_first_CCWB
, p_last_CCWB
);
1950 privptr
->buffs_alloc
= 1;
1953 } /* end of init_ccw_bk */
1955 /*-------------------------------------------------------------------*
1959 *--------------------------------------------------------------------*/
1962 probe_error( struct ccwgroup_device
*cgdev
)
1964 struct claw_privbk
*privptr
;
1966 CLAW_DBF_TEXT(4, trace
, "proberr");
1967 privptr
= (struct claw_privbk
*) cgdev
->dev
.driver_data
;
1968 if (privptr
!= NULL
) {
1969 cgdev
->dev
.driver_data
= NULL
;
1970 kfree(privptr
->p_env
);
1971 kfree(privptr
->p_mtc_envelope
);
1976 /*-------------------------------------------------------------------*
1977 * claw_process_control *
1980 *--------------------------------------------------------------------*/
1983 claw_process_control( struct net_device
*dev
, struct ccwbk
* p_ccw
)
1986 struct clawbuf
*p_buf
;
1987 struct clawctl ctlbk
;
1988 struct clawctl
*p_ctlbk
;
1989 char temp_host_name
[8];
1990 char temp_ws_name
[8];
1991 struct claw_privbk
*privptr
;
1992 struct claw_env
*p_env
;
1993 struct sysval
*p_sysval
;
1994 struct conncmd
*p_connect
=NULL
;
1996 struct chbk
*p_ch
= NULL
;
1997 struct device
*tdev
;
1998 CLAW_DBF_TEXT(2, setup
, "clw_cntl");
1999 udelay(1000); /* Wait a ms for the control packets to
2000 *catch up to each other */
2001 privptr
= dev
->ml_priv
;
2002 p_env
=privptr
->p_env
;
2003 tdev
= &privptr
->channel
[READ
].cdev
->dev
;
2004 memcpy( &temp_host_name
, p_env
->host_name
, 8);
2005 memcpy( &temp_ws_name
, p_env
->adapter_name
, 8);
2006 printk(KERN_INFO
"%s: CLAW device %.8s: "
2007 "Received Control Packet\n",
2008 dev
->name
, temp_ws_name
);
2009 if (privptr
->release_pend
==1) {
2012 p_buf
=p_ccw
->p_buffer
;
2014 if (p_env
->packing
== DO_PACKED
) { /* packing in progress?*/
2015 memcpy(p_ctlbk
, &p_buf
->buffer
[4], sizeof(struct clawctl
));
2017 memcpy(p_ctlbk
, p_buf
, sizeof(struct clawctl
));
2019 switch (p_ctlbk
->command
)
2021 case SYSTEM_VALIDATE_REQUEST
:
2022 if (p_ctlbk
->version
!= CLAW_VERSION_ID
) {
2023 claw_snd_sys_validate_rsp(dev
, p_ctlbk
,
2024 CLAW_RC_WRONG_VERSION
);
2025 printk("%s: %d is wrong version id. "
2027 dev
->name
, p_ctlbk
->version
,
2030 p_sysval
= (struct sysval
*)&(p_ctlbk
->data
);
2031 printk("%s: Recv Sys Validate Request: "
2032 "Vers=%d,link_id=%d,Corr=%d,WS name=%."
2033 "8s,Host name=%.8s\n",
2034 dev
->name
, p_ctlbk
->version
,
2036 p_ctlbk
->correlator
,
2038 p_sysval
->host_name
);
2039 if (memcmp(temp_host_name
, p_sysval
->host_name
, 8)) {
2040 claw_snd_sys_validate_rsp(dev
, p_ctlbk
,
2041 CLAW_RC_NAME_MISMATCH
);
2042 CLAW_DBF_TEXT(2, setup
, "HSTBAD");
2043 CLAW_DBF_TEXT_(2, setup
, "%s", p_sysval
->host_name
);
2044 CLAW_DBF_TEXT_(2, setup
, "%s", temp_host_name
);
2045 printk(KERN_INFO
"%s: Host name mismatch\n",
2047 printk(KERN_INFO
"%s: Received :%s: "
2050 p_sysval
->host_name
,
2053 if (memcmp(temp_ws_name
, p_sysval
->WS_name
, 8)) {
2054 claw_snd_sys_validate_rsp(dev
, p_ctlbk
,
2055 CLAW_RC_NAME_MISMATCH
);
2056 CLAW_DBF_TEXT(2, setup
, "WSNBAD");
2057 CLAW_DBF_TEXT_(2, setup
, "%s", p_sysval
->WS_name
);
2058 CLAW_DBF_TEXT_(2, setup
, "%s", temp_ws_name
);
2059 printk(KERN_INFO
"%s: WS name mismatch\n",
2061 printk(KERN_INFO
"%s: Received :%s: "
2067 if ((p_sysval
->write_frame_size
< p_env
->write_size
) &&
2068 (p_env
->packing
== 0)) {
2069 claw_snd_sys_validate_rsp(dev
, p_ctlbk
,
2070 CLAW_RC_HOST_RCV_TOO_SMALL
);
2071 printk(KERN_INFO
"%s: host write size is too "
2072 "small\n", dev
->name
);
2073 CLAW_DBF_TEXT(2, setup
, "wrtszbad");
2075 if ((p_sysval
->read_frame_size
< p_env
->read_size
) &&
2076 (p_env
->packing
== 0)) {
2077 claw_snd_sys_validate_rsp(dev
, p_ctlbk
,
2078 CLAW_RC_HOST_RCV_TOO_SMALL
);
2079 printk(KERN_INFO
"%s: host read size is too "
2080 "small\n", dev
->name
);
2081 CLAW_DBF_TEXT(2, setup
, "rdsizbad");
2083 claw_snd_sys_validate_rsp(dev
, p_ctlbk
, 0);
2084 printk(KERN_INFO
"%s: CLAW device %.8s: System validate "
2085 "completed.\n", dev
->name
, temp_ws_name
);
2086 printk("%s: sys Validate Rsize:%d Wsize:%d\n", dev
->name
,
2087 p_sysval
->read_frame_size
, p_sysval
->write_frame_size
);
2088 privptr
->system_validate_comp
= 1;
2089 if (strncmp(p_env
->api_type
, WS_APPL_NAME_PACKED
, 6) == 0)
2090 p_env
->packing
= PACKING_ASK
;
2091 claw_strt_conn_req(dev
);
2093 case SYSTEM_VALIDATE_RESPONSE
:
2094 p_sysval
= (struct sysval
*)&(p_ctlbk
->data
);
2095 printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d,"
2096 "WS name=%.8s,Host name=%.8s\n",
2099 p_ctlbk
->correlator
,
2102 p_sysval
->host_name
);
2103 switch (p_ctlbk
->rc
) {
2105 printk(KERN_INFO
"%s: CLAW device "
2106 "%.8s: System validate "
2108 dev
->name
, temp_ws_name
);
2109 if (privptr
->system_validate_comp
== 0)
2110 claw_strt_conn_req(dev
);
2111 privptr
->system_validate_comp
= 1;
2113 case CLAW_RC_NAME_MISMATCH
:
2114 printk(KERN_INFO
"%s: Sys Validate "
2115 "Resp : Host, WS name is "
2119 case CLAW_RC_WRONG_VERSION
:
2120 printk(KERN_INFO
"%s: Sys Validate "
2121 "Resp : Wrong version\n",
2124 case CLAW_RC_HOST_RCV_TOO_SMALL
:
2125 printk(KERN_INFO
"%s: Sys Validate "
2126 "Resp : bad frame size\n",
2130 printk(KERN_INFO
"%s: Sys Validate "
2132 dev
->name
, p_ctlbk
->rc
);
2137 case CONNECTION_REQUEST
:
2138 p_connect
= (struct conncmd
*)&(p_ctlbk
->data
);
2139 printk(KERN_INFO
"%s: Recv Conn Req: Vers=%d,link_id=%d,"
2140 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2144 p_ctlbk
->correlator
,
2145 p_connect
->host_name
,
2146 p_connect
->WS_name
);
2147 if (privptr
->active_link_ID
!= 0) {
2148 claw_snd_disc(dev
, p_ctlbk
);
2149 printk(KERN_INFO
"%s: Conn Req error : "
2150 "already logical link is active \n",
2153 if (p_ctlbk
->linkid
!= 1) {
2154 claw_snd_disc(dev
, p_ctlbk
);
2155 printk(KERN_INFO
"%s: Conn Req error : "
2156 "req logical link id is not 1\n",
2159 rc
= find_link(dev
, p_connect
->host_name
, p_connect
->WS_name
);
2161 claw_snd_disc(dev
, p_ctlbk
);
2162 printk(KERN_INFO
"%s: Conn Resp error: "
2163 "req appl name does not match\n",
2166 claw_send_control(dev
,
2167 CONNECTION_CONFIRM
, p_ctlbk
->linkid
,
2168 p_ctlbk
->correlator
,
2169 0, p_connect
->host_name
,
2170 p_connect
->WS_name
);
2171 if (p_env
->packing
== PACKING_ASK
) {
2172 p_env
->packing
= PACK_SEND
;
2173 claw_snd_conn_req(dev
, 0);
2175 printk(KERN_INFO
"%s: CLAW device %.8s: Connection "
2176 "completed link_id=%d.\n",
2177 dev
->name
, temp_ws_name
,
2179 privptr
->active_link_ID
= p_ctlbk
->linkid
;
2180 p_ch
= &privptr
->channel
[WRITE
];
2181 wake_up(&p_ch
->wait
); /* wake up claw_open ( WRITE) */
2183 case CONNECTION_RESPONSE
:
2184 p_connect
= (struct conncmd
*)&(p_ctlbk
->data
);
2185 printk(KERN_INFO
"%s: Revc Conn Resp: Vers=%d,link_id=%d,"
2186 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2190 p_ctlbk
->correlator
,
2192 p_connect
->host_name
,
2193 p_connect
->WS_name
);
2195 if (p_ctlbk
->rc
!= 0) {
2196 printk(KERN_INFO
"%s: Conn Resp error: rc=%d \n",
2197 dev
->name
, p_ctlbk
->rc
);
2201 p_connect
->host_name
, p_connect
->WS_name
);
2203 claw_snd_disc(dev
, p_ctlbk
);
2204 printk(KERN_INFO
"%s: Conn Resp error: "
2205 "req appl name does not match\n",
2208 /* should be until CONNECTION_CONFIRM */
2209 privptr
->active_link_ID
= -(p_ctlbk
->linkid
);
2211 case CONNECTION_CONFIRM
:
2212 p_connect
= (struct conncmd
*)&(p_ctlbk
->data
);
2213 printk(KERN_INFO
"%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2214 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2218 p_ctlbk
->correlator
,
2219 p_connect
->host_name
,
2220 p_connect
->WS_name
);
2221 if (p_ctlbk
->linkid
== -(privptr
->active_link_ID
)) {
2222 privptr
->active_link_ID
= p_ctlbk
->linkid
;
2223 if (p_env
->packing
> PACKING_ASK
) {
2224 printk(KERN_INFO
"%s: Confirmed Now packing\n",
2226 p_env
->packing
= DO_PACKED
;
2228 p_ch
= &privptr
->channel
[WRITE
];
2229 wake_up(&p_ch
->wait
);
2231 printk(KERN_INFO
"%s: Conn confirm: "
2232 "unexpected linkid=%d \n",
2233 dev
->name
, p_ctlbk
->linkid
);
2234 claw_snd_disc(dev
, p_ctlbk
);
2238 printk(KERN_INFO
"%s: Disconnect: "
2239 "Vers=%d,link_id=%d,Corr=%d\n",
2240 dev
->name
, p_ctlbk
->version
,
2241 p_ctlbk
->linkid
, p_ctlbk
->correlator
);
2242 if ((p_ctlbk
->linkid
== 2) &&
2243 (p_env
->packing
== PACK_SEND
)) {
2244 privptr
->active_link_ID
= 1;
2245 p_env
->packing
= DO_PACKED
;
2247 privptr
->active_link_ID
= 0;
2250 printk(KERN_INFO
"%s: CLAW ERROR detected\n",
2254 printk(KERN_INFO
"%s: Unexpected command code=%d \n",
2255 dev
->name
, p_ctlbk
->command
);
2260 } /* end of claw_process_control */
2263 /*-------------------------------------------------------------------*
2264 * claw_send_control *
2266 *--------------------------------------------------------------------*/
2269 claw_send_control(struct net_device
*dev
, __u8 type
, __u8 link
,
2270 __u8 correlator
, __u8 rc
, char *local_name
, char *remote_name
)
2272 struct claw_privbk
*privptr
;
2273 struct clawctl
*p_ctl
;
2274 struct sysval
*p_sysval
;
2275 struct conncmd
*p_connect
;
2276 struct sk_buff
*skb
;
2278 CLAW_DBF_TEXT(2, setup
, "sndcntl");
2279 privptr
= dev
->ml_priv
;
2280 p_ctl
=(struct clawctl
*)&privptr
->ctl_bk
;
2282 p_ctl
->command
=type
;
2283 p_ctl
->version
=CLAW_VERSION_ID
;
2285 p_ctl
->correlator
=correlator
;
2288 p_sysval
=(struct sysval
*)&p_ctl
->data
;
2289 p_connect
=(struct conncmd
*)&p_ctl
->data
;
2291 switch (p_ctl
->command
) {
2292 case SYSTEM_VALIDATE_REQUEST
:
2293 case SYSTEM_VALIDATE_RESPONSE
:
2294 memcpy(&p_sysval
->host_name
, local_name
, 8);
2295 memcpy(&p_sysval
->WS_name
, remote_name
, 8);
2296 if (privptr
->p_env
->packing
> 0) {
2297 p_sysval
->read_frame_size
=DEF_PACK_BUFSIZE
;
2298 p_sysval
->write_frame_size
=DEF_PACK_BUFSIZE
;
2300 /* how big is the biggest group of packets */
2301 p_sysval
->read_frame_size
=privptr
->p_env
->read_size
;
2302 p_sysval
->write_frame_size
=privptr
->p_env
->write_size
;
2304 memset(&p_sysval
->reserved
, 0x00, 4);
2306 case CONNECTION_REQUEST
:
2307 case CONNECTION_RESPONSE
:
2308 case CONNECTION_CONFIRM
:
2310 memcpy(&p_sysval
->host_name
, local_name
, 8);
2311 memcpy(&p_sysval
->WS_name
, remote_name
, 8);
2312 if (privptr
->p_env
->packing
> 0) {
2313 /* How big is the biggest packet */
2314 p_connect
->reserved1
[0]=CLAW_FRAME_SIZE
;
2315 p_connect
->reserved1
[1]=CLAW_FRAME_SIZE
;
2317 memset(&p_connect
->reserved1
, 0x00, 4);
2318 memset(&p_connect
->reserved2
, 0x00, 4);
2325 /* write Control Record to the device */
2328 skb
= dev_alloc_skb(sizeof(struct clawctl
));
2332 memcpy(skb_put(skb
, sizeof(struct clawctl
)),
2333 p_ctl
, sizeof(struct clawctl
));
2334 if (privptr
->p_env
->packing
>= PACK_SEND
)
2335 claw_hw_tx(skb
, dev
, 1);
2337 claw_hw_tx(skb
, dev
, 0);
2339 } /* end of claw_send_control */
2341 /*-------------------------------------------------------------------*
2342 * claw_snd_conn_req *
2344 *--------------------------------------------------------------------*/
2346 claw_snd_conn_req(struct net_device
*dev
, __u8 link
)
2349 struct claw_privbk
*privptr
= dev
->ml_priv
;
2350 struct clawctl
*p_ctl
;
2352 CLAW_DBF_TEXT(2, setup
, "snd_conn");
2354 p_ctl
=(struct clawctl
*)&privptr
->ctl_bk
;
2355 p_ctl
->linkid
= link
;
2356 if ( privptr
->system_validate_comp
==0x00 ) {
2359 if (privptr
->p_env
->packing
== PACKING_ASK
)
2360 rc
=claw_send_control(dev
, CONNECTION_REQUEST
,0,0,0,
2361 WS_APPL_NAME_PACKED
, WS_APPL_NAME_PACKED
);
2362 if (privptr
->p_env
->packing
== PACK_SEND
) {
2363 rc
=claw_send_control(dev
, CONNECTION_REQUEST
,0,0,0,
2364 WS_APPL_NAME_IP_NAME
, WS_APPL_NAME_IP_NAME
);
2366 if (privptr
->p_env
->packing
== 0)
2367 rc
=claw_send_control(dev
, CONNECTION_REQUEST
,0,0,0,
2368 HOST_APPL_NAME
, privptr
->p_env
->api_type
);
2371 } /* end of claw_snd_conn_req */
2374 /*-------------------------------------------------------------------*
2377 *--------------------------------------------------------------------*/
2380 claw_snd_disc(struct net_device
*dev
, struct clawctl
* p_ctl
)
2383 struct conncmd
* p_connect
;
2385 CLAW_DBF_TEXT(2, setup
, "snd_dsc");
2386 p_connect
=(struct conncmd
*)&p_ctl
->data
;
2388 rc
=claw_send_control(dev
, DISCONNECT
, p_ctl
->linkid
,
2389 p_ctl
->correlator
, 0,
2390 p_connect
->host_name
, p_connect
->WS_name
);
2392 } /* end of claw_snd_disc */
2395 /*-------------------------------------------------------------------*
2396 * claw_snd_sys_validate_rsp *
2398 *--------------------------------------------------------------------*/
2401 claw_snd_sys_validate_rsp(struct net_device
*dev
,
2402 struct clawctl
*p_ctl
, __u32 return_code
)
2404 struct claw_env
* p_env
;
2405 struct claw_privbk
*privptr
;
2408 CLAW_DBF_TEXT(2, setup
, "chkresp");
2409 privptr
= dev
->ml_priv
;
2410 p_env
=privptr
->p_env
;
2411 rc
=claw_send_control(dev
, SYSTEM_VALIDATE_RESPONSE
,
2416 p_env
->adapter_name
);
2418 } /* end of claw_snd_sys_validate_rsp */
2420 /*-------------------------------------------------------------------*
2421 * claw_strt_conn_req *
2423 *--------------------------------------------------------------------*/
2426 claw_strt_conn_req(struct net_device
*dev
)
2430 CLAW_DBF_TEXT(2, setup
, "conn_req");
2431 rc
=claw_snd_conn_req(dev
, 1);
2433 } /* end of claw_strt_conn_req */
2437 /*-------------------------------------------------------------------*
2439 *-------------------------------------------------------------------*/
2442 net_device_stats
*claw_stats(struct net_device
*dev
)
2444 struct claw_privbk
*privptr
;
2446 CLAW_DBF_TEXT(4, trace
, "stats");
2447 privptr
= dev
->ml_priv
;
2448 return &privptr
->stats
;
2449 } /* end of claw_stats */
2452 /*-------------------------------------------------------------------*
2455 *--------------------------------------------------------------------*/
2457 unpack_read(struct net_device
*dev
)
2459 struct sk_buff
*skb
;
2460 struct claw_privbk
*privptr
;
2461 struct claw_env
*p_env
;
2462 struct ccwbk
*p_this_ccw
;
2463 struct ccwbk
*p_first_ccw
;
2464 struct ccwbk
*p_last_ccw
;
2465 struct clawph
*p_packh
;
2467 struct clawctl
*p_ctlrec
=NULL
;
2468 struct device
*p_dev
;
2473 __u8 mtc_this_frm
=0;
2478 CLAW_DBF_TEXT(4, trace
, "unpkread");
2483 privptr
= dev
->ml_priv
;
2485 p_dev
= &privptr
->channel
[READ
].cdev
->dev
;
2486 p_env
= privptr
->p_env
;
2487 p_this_ccw
=privptr
->p_read_active_first
;
2489 while (p_this_ccw
!=NULL
&& p_this_ccw
->header
.flag
!=CLAW_PENDING
) {
2492 p_this_ccw
->header
.flag
=CLAW_PENDING
;
2493 privptr
->p_read_active_first
=p_this_ccw
->next
;
2494 p_this_ccw
->next
=NULL
;
2495 p_packh
= (struct clawph
*)p_this_ccw
->p_buffer
;
2496 if ((p_env
->packing
== PACK_SEND
) &&
2497 (p_packh
->len
== 32) &&
2498 (p_packh
->link_num
== 0)) { /* is it a packed ctl rec? */
2499 p_packh
++; /* peek past pack header */
2500 p_ctlrec
= (struct clawctl
*)p_packh
;
2501 p_packh
--; /* un peek */
2502 if ((p_ctlrec
->command
== CONNECTION_RESPONSE
) ||
2503 (p_ctlrec
->command
== CONNECTION_CONFIRM
))
2504 p_env
->packing
= DO_PACKED
;
2506 if (p_env
->packing
== DO_PACKED
)
2507 link_num
=p_packh
->link_num
;
2509 link_num
=p_this_ccw
->header
.opcode
/ 8;
2510 if ((p_this_ccw
->header
.opcode
& MORE_to_COME_FLAG
)!=0) {
2512 if (p_this_ccw
->header
.length
!=
2513 privptr
->p_env
->read_size
) {
2514 printk(KERN_INFO
" %s: Invalid frame detected "
2515 "length is %02x\n" ,
2516 dev
->name
, p_this_ccw
->header
.length
);
2520 if (privptr
->mtc_skipping
) {
2522 * We're in the mode of skipping past a
2523 * multi-frame message
2524 * that we can't process for some reason or other.
2525 * The first frame without the More-To-Come flag is
2526 * the last frame of the skipped message.
2528 /* in case of More-To-Come not set in this frame */
2529 if (mtc_this_frm
==0) {
2530 privptr
->mtc_skipping
=0; /* Ok, the end */
2531 privptr
->mtc_logical_link
=-1;
2537 claw_process_control(dev
, p_this_ccw
);
2538 CLAW_DBF_TEXT(4, trace
, "UnpkCntl");
2542 if (p_env
->packing
== DO_PACKED
) {
2543 if (pack_off
> p_env
->read_size
)
2545 p_packd
= p_this_ccw
->p_buffer
+pack_off
;
2546 p_packh
= (struct clawph
*) p_packd
;
2547 if ((p_packh
->len
== 0) || /* all done with this frame? */
2548 (p_packh
->flag
!= 0))
2550 bytes_to_mov
= p_packh
->len
;
2551 pack_off
+= bytes_to_mov
+sizeof(struct clawph
);
2554 bytes_to_mov
=p_this_ccw
->header
.length
;
2556 if (privptr
->mtc_logical_link
<0) {
2559 * if More-To-Come is set in this frame then we don't know
2560 * length of entire message, and hence have to allocate
2563 /* We are starting a new envelope */
2564 privptr
->mtc_offset
=0;
2565 privptr
->mtc_logical_link
=link_num
;
2568 if (bytes_to_mov
> (MAX_ENVELOPE_SIZE
- privptr
->mtc_offset
) ) {
2570 privptr
->stats
.rx_frame_errors
++;
2573 if (p_env
->packing
== DO_PACKED
) {
2574 memcpy( privptr
->p_mtc_envelope
+ privptr
->mtc_offset
,
2575 p_packd
+sizeof(struct clawph
), bytes_to_mov
);
2578 memcpy( privptr
->p_mtc_envelope
+ privptr
->mtc_offset
,
2579 p_this_ccw
->p_buffer
, bytes_to_mov
);
2581 if (mtc_this_frm
==0) {
2582 len_of_data
=privptr
->mtc_offset
+bytes_to_mov
;
2583 skb
=dev_alloc_skb(len_of_data
);
2585 memcpy(skb_put(skb
,len_of_data
),
2586 privptr
->p_mtc_envelope
,
2589 skb_reset_mac_header(skb
);
2590 skb
->protocol
=htons(ETH_P_IP
);
2591 skb
->ip_summed
=CHECKSUM_UNNECESSARY
;
2592 privptr
->stats
.rx_packets
++;
2593 privptr
->stats
.rx_bytes
+=len_of_data
;
2597 privptr
->stats
.rx_dropped
++;
2598 printk(KERN_WARNING
"%s: %s() low on memory\n",
2599 dev
->name
,__func__
);
2601 privptr
->mtc_offset
=0;
2602 privptr
->mtc_logical_link
=-1;
2605 privptr
->mtc_offset
+=bytes_to_mov
;
2607 if (p_env
->packing
== DO_PACKED
)
2611 * Remove ThisCCWblock from active read queue, and add it
2612 * to queue of free blocks to be reused.
2615 p_this_ccw
->header
.length
=0xffff;
2616 p_this_ccw
->header
.opcode
=0xff;
2618 * add this one to the free queue for later reuse
2620 if (p_first_ccw
==NULL
) {
2621 p_first_ccw
= p_this_ccw
;
2624 p_last_ccw
->next
= p_this_ccw
;
2626 p_last_ccw
= p_this_ccw
;
2628 * chain to next block on active read queue
2630 p_this_ccw
= privptr
->p_read_active_first
;
2631 CLAW_DBF_TEXT_(4, trace
, "rxpkt %d", p
);
2632 } /* end of while */
2634 /* check validity */
2636 CLAW_DBF_TEXT_(4, trace
, "rxfrm %d", i
);
2637 add_claw_reads(dev
, p_first_ccw
, p_last_ccw
);
2638 claw_strt_read(dev
, LOCK_YES
);
2640 } /* end of unpack_read */
2642 /*-------------------------------------------------------------------*
2645 *--------------------------------------------------------------------*/
2647 claw_strt_read (struct net_device
*dev
, int lock
)
2651 unsigned long saveflags
= 0;
2652 struct claw_privbk
*privptr
= dev
->ml_priv
;
2653 struct ccwbk
*p_ccwbk
;
2655 struct clawh
*p_clawh
;
2656 p_ch
=&privptr
->channel
[READ
];
2658 CLAW_DBF_TEXT(4, trace
, "StRdNter");
2659 p_clawh
=(struct clawh
*)privptr
->p_claw_signal_blk
;
2660 p_clawh
->flag
=CLAW_IDLE
; /* 0x00 */
2662 if ((privptr
->p_write_active_first
!=NULL
&&
2663 privptr
->p_write_active_first
->header
.flag
!=CLAW_PENDING
) ||
2664 (privptr
->p_read_active_first
!=NULL
&&
2665 privptr
->p_read_active_first
->header
.flag
!=CLAW_PENDING
)) {
2666 p_clawh
->flag
=CLAW_BUSY
; /* 0xff */
2668 if (lock
==LOCK_YES
) {
2669 spin_lock_irqsave(get_ccwdev_lock(p_ch
->cdev
), saveflags
);
2671 if (test_and_set_bit(0, (void *)&p_ch
->IO_active
) == 0) {
2672 CLAW_DBF_TEXT(4, trace
, "HotRead");
2673 p_ccwbk
=privptr
->p_read_active_first
;
2674 parm
= (unsigned long) p_ch
;
2675 rc
= ccw_device_start (p_ch
->cdev
, &p_ccwbk
->read
, parm
,
2678 ccw_check_return_code(p_ch
->cdev
, rc
);
2682 CLAW_DBF_TEXT(2, trace
, "ReadAct");
2685 if (lock
==LOCK_YES
) {
2686 spin_unlock_irqrestore(get_ccwdev_lock(p_ch
->cdev
), saveflags
);
2688 CLAW_DBF_TEXT(4, trace
, "StRdExit");
2690 } /* end of claw_strt_read */
2692 /*-------------------------------------------------------------------*
2693 * claw_strt_out_IO *
2695 *--------------------------------------------------------------------*/
2698 claw_strt_out_IO( struct net_device
*dev
)
2702 struct claw_privbk
*privptr
;
2704 struct ccwbk
*p_first_ccw
;
2709 privptr
= (struct claw_privbk
*)dev
->ml_priv
;
2710 p_ch
=&privptr
->channel
[WRITE
];
2712 CLAW_DBF_TEXT(4, trace
, "strt_io");
2713 p_first_ccw
=privptr
->p_write_active_first
;
2715 if (p_ch
->claw_state
== CLAW_STOP
)
2717 if (p_first_ccw
== NULL
) {
2720 if (test_and_set_bit(0, (void *)&p_ch
->IO_active
) == 0) {
2721 parm
= (unsigned long) p_ch
;
2722 CLAW_DBF_TEXT(2, trace
, "StWrtIO");
2723 rc
= ccw_device_start (p_ch
->cdev
,&p_first_ccw
->write
, parm
,
2726 ccw_check_return_code(p_ch
->cdev
, rc
);
2729 dev
->trans_start
= jiffies
;
2731 } /* end of claw_strt_out_IO */
2733 /*-------------------------------------------------------------------*
2734 * Free write buffers *
2736 *--------------------------------------------------------------------*/
2739 claw_free_wrt_buf( struct net_device
*dev
)
2742 struct claw_privbk
*privptr
= (struct claw_privbk
*)dev
->ml_priv
;
2743 struct ccwbk
*p_first_ccw
;
2744 struct ccwbk
*p_last_ccw
;
2745 struct ccwbk
*p_this_ccw
;
2746 struct ccwbk
*p_next_ccw
;
2748 CLAW_DBF_TEXT(4, trace
, "freewrtb");
2749 /* scan the write queue to free any completed write packets */
2752 p_this_ccw
=privptr
->p_write_active_first
;
2753 while ( (p_this_ccw
!=NULL
) && (p_this_ccw
->header
.flag
!=CLAW_PENDING
))
2755 p_next_ccw
= p_this_ccw
->next
;
2756 if (((p_next_ccw
!=NULL
) &&
2757 (p_next_ccw
->header
.flag
!=CLAW_PENDING
)) ||
2758 ((p_this_ccw
== privptr
->p_write_active_last
) &&
2759 (p_this_ccw
->header
.flag
!=CLAW_PENDING
))) {
2760 /* The next CCW is OK or this is */
2761 /* the last CCW...free it @A1A */
2762 privptr
->p_write_active_first
=p_this_ccw
->next
;
2763 p_this_ccw
->header
.flag
=CLAW_PENDING
;
2764 p_this_ccw
->next
=privptr
->p_write_free_chain
;
2765 privptr
->p_write_free_chain
=p_this_ccw
;
2766 ++privptr
->write_free_count
;
2767 privptr
->stats
.tx_bytes
+= p_this_ccw
->write
.count
;
2768 p_this_ccw
=privptr
->p_write_active_first
;
2769 privptr
->stats
.tx_packets
++;
2775 if (privptr
->write_free_count
!=0) {
2776 claw_clearbit_busy(TB_NOBUFFER
,dev
);
2778 /* whole chain removed? */
2779 if (privptr
->p_write_active_first
==NULL
) {
2780 privptr
->p_write_active_last
=NULL
;
2782 CLAW_DBF_TEXT_(4, trace
, "FWC=%d", privptr
->write_free_count
);
2786 /*-------------------------------------------------------------------*
2787 * claw free netdevice *
2789 *--------------------------------------------------------------------*/
2791 claw_free_netdevice(struct net_device
* dev
, int free_dev
)
2793 struct claw_privbk
*privptr
;
2795 CLAW_DBF_TEXT(2, setup
, "free_dev");
2798 CLAW_DBF_TEXT_(2, setup
, "%s", dev
->name
);
2799 privptr
= dev
->ml_priv
;
2800 if (dev
->flags
& IFF_RUNNING
)
2803 privptr
->channel
[READ
].ndev
= NULL
; /* say it's free */
2805 dev
->ml_priv
= NULL
;
2811 CLAW_DBF_TEXT(2, setup
, "free_ok");
2815 * Claw init netdevice
2816 * Initialize everything of the net device except the name and the
2820 claw_init_netdevice(struct net_device
* dev
)
2822 CLAW_DBF_TEXT(2, setup
, "init_dev");
2823 CLAW_DBF_TEXT_(2, setup
, "%s", dev
->name
);
2824 dev
->mtu
= CLAW_DEFAULT_MTU_SIZE
;
2825 dev
->hard_start_xmit
= claw_tx
;
2826 dev
->open
= claw_open
;
2827 dev
->stop
= claw_release
;
2828 dev
->get_stats
= claw_stats
;
2829 dev
->change_mtu
= claw_change_mtu
;
2830 dev
->hard_header_len
= 0;
2832 dev
->type
= ARPHRD_SLIP
;
2833 dev
->tx_queue_len
= 1300;
2834 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
2835 CLAW_DBF_TEXT(2, setup
, "initok");
2840 * Init a new channel in the privptr->channel[i].
2842 * @param cdev The ccw_device to be added.
2844 * @return 0 on success, !0 on error.
2847 add_channel(struct ccw_device
*cdev
,int i
,struct claw_privbk
*privptr
)
2850 struct ccw_dev_id dev_id
;
2852 CLAW_DBF_TEXT_(2, setup
, "%s", dev_name(&cdev
->dev
));
2853 privptr
->channel
[i
].flag
= i
+1; /* Read is 1 Write is 2 */
2854 p_ch
= &privptr
->channel
[i
];
2856 snprintf(p_ch
->id
, CLAW_ID_SIZE
, "cl-%s", dev_name(&cdev
->dev
));
2857 ccw_device_get_id(cdev
, &dev_id
);
2858 p_ch
->devno
= dev_id
.devno
;
2859 if ((p_ch
->irb
= kzalloc(sizeof (struct irb
),GFP_KERNEL
)) == NULL
) {
2868 * Setup an interface.
2870 * @param cgdev Device to be setup.
2872 * @returns 0 on success, !0 on failure.
2875 claw_new_device(struct ccwgroup_device
*cgdev
)
2877 struct claw_privbk
*privptr
;
2878 struct claw_env
*p_env
;
2879 struct net_device
*dev
;
2881 struct ccw_dev_id dev_id
;
2883 printk(KERN_INFO
"claw: add for %s\n",
2884 dev_name(&cgdev
->cdev
[READ
]->dev
));
2885 CLAW_DBF_TEXT(2, setup
, "new_dev");
2886 privptr
= cgdev
->dev
.driver_data
;
2887 cgdev
->cdev
[READ
]->dev
.driver_data
= privptr
;
2888 cgdev
->cdev
[WRITE
]->dev
.driver_data
= privptr
;
2891 p_env
= privptr
->p_env
;
2892 ccw_device_get_id(cgdev
->cdev
[READ
], &dev_id
);
2893 p_env
->devno
[READ
] = dev_id
.devno
;
2894 ccw_device_get_id(cgdev
->cdev
[WRITE
], &dev_id
);
2895 p_env
->devno
[WRITE
] = dev_id
.devno
;
2896 ret
= add_channel(cgdev
->cdev
[0],0,privptr
);
2898 ret
= add_channel(cgdev
->cdev
[1],1,privptr
);
2901 "add channel failed with ret = %d\n", ret
);
2904 ret
= ccw_device_set_online(cgdev
->cdev
[READ
]);
2907 "claw: ccw_device_set_online %s READ failed "
2908 "with ret = %d\n", dev_name(&cgdev
->cdev
[READ
]->dev
),
2912 ret
= ccw_device_set_online(cgdev
->cdev
[WRITE
]);
2915 "claw: ccw_device_set_online %s WRITE failed "
2916 "with ret = %d\n", dev_name(&cgdev
->cdev
[WRITE
]->dev
),
2920 dev
= alloc_netdev(0,"claw%d",claw_init_netdevice
);
2922 printk(KERN_WARNING
"%s:alloc_netdev failed\n",__func__
);
2925 dev
->ml_priv
= privptr
;
2926 cgdev
->dev
.driver_data
= privptr
;
2927 cgdev
->cdev
[READ
]->dev
.driver_data
= privptr
;
2928 cgdev
->cdev
[WRITE
]->dev
.driver_data
= privptr
;
2930 SET_NETDEV_DEV(dev
, &cgdev
->dev
);
2931 if (register_netdev(dev
) != 0) {
2932 claw_free_netdevice(dev
, 1);
2933 CLAW_DBF_TEXT(2, trace
, "regfail");
2936 dev
->flags
&=~IFF_RUNNING
;
2937 if (privptr
->buffs_alloc
== 0) {
2938 ret
=init_ccw_bk(dev
);
2940 unregister_netdev(dev
);
2941 claw_free_netdevice(dev
,1);
2942 CLAW_DBF_TEXT(2, trace
, "ccwmem");
2946 privptr
->channel
[READ
].ndev
= dev
;
2947 privptr
->channel
[WRITE
].ndev
= dev
;
2948 privptr
->p_env
->ndev
= dev
;
2950 printk(KERN_INFO
"%s:readsize=%d writesize=%d "
2951 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
2952 dev
->name
, p_env
->read_size
,
2953 p_env
->write_size
, p_env
->read_buffers
,
2954 p_env
->write_buffers
, p_env
->devno
[READ
],
2955 p_env
->devno
[WRITE
]);
2956 printk(KERN_INFO
"%s:host_name:%.8s, adapter_name "
2957 ":%.8s api_type: %.8s\n",
2958 dev
->name
, p_env
->host_name
,
2959 p_env
->adapter_name
, p_env
->api_type
);
2962 ccw_device_set_offline(cgdev
->cdev
[1]);
2963 ccw_device_set_offline(cgdev
->cdev
[0]);
2968 claw_purge_skb_queue(struct sk_buff_head
*q
)
2970 struct sk_buff
*skb
;
2972 CLAW_DBF_TEXT(4, trace
, "purgque");
2973 while ((skb
= skb_dequeue(q
))) {
2974 atomic_dec(&skb
->users
);
2975 dev_kfree_skb_any(skb
);
2980 * Shutdown an interface.
2982 * @param cgdev Device to be shut down.
2984 * @returns 0 on success, !0 on failure.
2987 claw_shutdown_device(struct ccwgroup_device
*cgdev
)
2989 struct claw_privbk
*priv
;
2990 struct net_device
*ndev
;
2993 CLAW_DBF_TEXT_(2, setup
, "%s", dev_name(&cgdev
->dev
));
2994 priv
= cgdev
->dev
.driver_data
;
2997 ndev
= priv
->channel
[READ
].ndev
;
2999 /* Close the device */
3001 "%s: shuting down \n",ndev
->name
);
3002 if (ndev
->flags
& IFF_RUNNING
)
3003 ret
= claw_release(ndev
);
3004 ndev
->flags
&=~IFF_RUNNING
;
3005 unregister_netdev(ndev
);
3006 ndev
->ml_priv
= NULL
; /* cgdev data, not ndev's to free */
3007 claw_free_netdevice(ndev
, 1);
3008 priv
->channel
[READ
].ndev
= NULL
;
3009 priv
->channel
[WRITE
].ndev
= NULL
;
3010 priv
->p_env
->ndev
= NULL
;
3012 ccw_device_set_offline(cgdev
->cdev
[1]);
3013 ccw_device_set_offline(cgdev
->cdev
[0]);
3018 claw_remove_device(struct ccwgroup_device
*cgdev
)
3020 struct claw_privbk
*priv
;
3023 CLAW_DBF_TEXT_(2, setup
, "%s", dev_name(&cgdev
->dev
));
3024 priv
= cgdev
->dev
.driver_data
;
3026 printk(KERN_INFO
"claw: %s() called %s will be removed.\n",
3027 __func__
, dev_name(&cgdev
->cdev
[0]->dev
));
3028 if (cgdev
->state
== CCWGROUP_ONLINE
)
3029 claw_shutdown_device(cgdev
);
3030 claw_remove_files(&cgdev
->dev
);
3031 kfree(priv
->p_mtc_envelope
);
3032 priv
->p_mtc_envelope
=NULL
;
3035 kfree(priv
->channel
[0].irb
);
3036 priv
->channel
[0].irb
=NULL
;
3037 kfree(priv
->channel
[1].irb
);
3038 priv
->channel
[1].irb
=NULL
;
3040 cgdev
->dev
.driver_data
=NULL
;
3041 cgdev
->cdev
[READ
]->dev
.driver_data
= NULL
;
3042 cgdev
->cdev
[WRITE
]->dev
.driver_data
= NULL
;
3043 put_device(&cgdev
->dev
);
3053 claw_hname_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3055 struct claw_privbk
*priv
;
3056 struct claw_env
* p_env
;
3058 priv
= dev
->driver_data
;
3061 p_env
= priv
->p_env
;
3062 return sprintf(buf
, "%s\n",p_env
->host_name
);
3066 claw_hname_write(struct device
*dev
, struct device_attribute
*attr
, const char *buf
, size_t count
)
3068 struct claw_privbk
*priv
;
3069 struct claw_env
* p_env
;
3071 priv
= dev
->driver_data
;
3074 p_env
= priv
->p_env
;
3075 if (count
> MAX_NAME_LEN
+1)
3077 memset(p_env
->host_name
, 0x20, MAX_NAME_LEN
);
3078 strncpy(p_env
->host_name
,buf
, count
);
3079 p_env
->host_name
[count
-1] = 0x20; /* clear extra 0x0a */
3080 p_env
->host_name
[MAX_NAME_LEN
] = 0x00;
3081 CLAW_DBF_TEXT(2, setup
, "HstnSet");
3082 CLAW_DBF_TEXT_(2, setup
, "%s", p_env
->host_name
);
3087 static DEVICE_ATTR(host_name
, 0644, claw_hname_show
, claw_hname_write
);
3090 claw_adname_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3092 struct claw_privbk
*priv
;
3093 struct claw_env
* p_env
;
3095 priv
= dev
->driver_data
;
3098 p_env
= priv
->p_env
;
3099 return sprintf(buf
, "%s\n", p_env
->adapter_name
);
3103 claw_adname_write(struct device
*dev
, struct device_attribute
*attr
, const char *buf
, size_t count
)
3105 struct claw_privbk
*priv
;
3106 struct claw_env
* p_env
;
3108 priv
= dev
->driver_data
;
3111 p_env
= priv
->p_env
;
3112 if (count
> MAX_NAME_LEN
+1)
3114 memset(p_env
->adapter_name
, 0x20, MAX_NAME_LEN
);
3115 strncpy(p_env
->adapter_name
,buf
, count
);
3116 p_env
->adapter_name
[count
-1] = 0x20; /* clear extra 0x0a */
3117 p_env
->adapter_name
[MAX_NAME_LEN
] = 0x00;
3118 CLAW_DBF_TEXT(2, setup
, "AdnSet");
3119 CLAW_DBF_TEXT_(2, setup
, "%s", p_env
->adapter_name
);
3124 static DEVICE_ATTR(adapter_name
, 0644, claw_adname_show
, claw_adname_write
);
3127 claw_apname_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3129 struct claw_privbk
*priv
;
3130 struct claw_env
* p_env
;
3132 priv
= dev
->driver_data
;
3135 p_env
= priv
->p_env
;
3136 return sprintf(buf
, "%s\n",
3141 claw_apname_write(struct device
*dev
, struct device_attribute
*attr
, const char *buf
, size_t count
)
3143 struct claw_privbk
*priv
;
3144 struct claw_env
* p_env
;
3146 priv
= dev
->driver_data
;
3149 p_env
= priv
->p_env
;
3150 if (count
> MAX_NAME_LEN
+1)
3152 memset(p_env
->api_type
, 0x20, MAX_NAME_LEN
);
3153 strncpy(p_env
->api_type
,buf
, count
);
3154 p_env
->api_type
[count
-1] = 0x20; /* we get a loose 0x0a */
3155 p_env
->api_type
[MAX_NAME_LEN
] = 0x00;
3156 if(strncmp(p_env
->api_type
,WS_APPL_NAME_PACKED
,6) == 0) {
3157 p_env
->read_size
=DEF_PACK_BUFSIZE
;
3158 p_env
->write_size
=DEF_PACK_BUFSIZE
;
3159 p_env
->packing
=PACKING_ASK
;
3160 CLAW_DBF_TEXT(2, setup
, "PACKING");
3164 p_env
->read_size
=CLAW_FRAME_SIZE
;
3165 p_env
->write_size
=CLAW_FRAME_SIZE
;
3166 CLAW_DBF_TEXT(2, setup
, "ApiSet");
3168 CLAW_DBF_TEXT_(2, setup
, "%s", p_env
->api_type
);
3172 static DEVICE_ATTR(api_type
, 0644, claw_apname_show
, claw_apname_write
);
3175 claw_wbuff_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3177 struct claw_privbk
*priv
;
3178 struct claw_env
* p_env
;
3180 priv
= dev
->driver_data
;
3183 p_env
= priv
->p_env
;
3184 return sprintf(buf
, "%d\n", p_env
->write_buffers
);
3188 claw_wbuff_write(struct device
*dev
, struct device_attribute
*attr
, const char *buf
, size_t count
)
3190 struct claw_privbk
*priv
;
3191 struct claw_env
* p_env
;
3194 priv
= dev
->driver_data
;
3197 p_env
= priv
->p_env
;
3198 sscanf(buf
, "%i", &nnn
);
3199 if (p_env
->packing
) {
3205 if ((nnn
> max
) || (nnn
< 2))
3207 p_env
->write_buffers
= nnn
;
3208 CLAW_DBF_TEXT(2, setup
, "Wbufset");
3209 CLAW_DBF_TEXT_(2, setup
, "WB=%d", p_env
->write_buffers
);
3213 static DEVICE_ATTR(write_buffer
, 0644, claw_wbuff_show
, claw_wbuff_write
);
3216 claw_rbuff_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3218 struct claw_privbk
*priv
;
3219 struct claw_env
* p_env
;
3221 priv
= dev
->driver_data
;
3224 p_env
= priv
->p_env
;
3225 return sprintf(buf
, "%d\n", p_env
->read_buffers
);
3229 claw_rbuff_write(struct device
*dev
, struct device_attribute
*attr
, const char *buf
, size_t count
)
3231 struct claw_privbk
*priv
;
3232 struct claw_env
*p_env
;
3235 priv
= dev
->driver_data
;
3238 p_env
= priv
->p_env
;
3239 sscanf(buf
, "%i", &nnn
);
3240 if (p_env
->packing
) {
3246 if ((nnn
> max
) || (nnn
< 2))
3248 p_env
->read_buffers
= nnn
;
3249 CLAW_DBF_TEXT(2, setup
, "Rbufset");
3250 CLAW_DBF_TEXT_(2, setup
, "RB=%d", p_env
->read_buffers
);
3254 static DEVICE_ATTR(read_buffer
, 0644, claw_rbuff_show
, claw_rbuff_write
);
3256 static struct attribute
*claw_attr
[] = {
3257 &dev_attr_read_buffer
.attr
,
3258 &dev_attr_write_buffer
.attr
,
3259 &dev_attr_adapter_name
.attr
,
3260 &dev_attr_api_type
.attr
,
3261 &dev_attr_host_name
.attr
,
3265 static struct attribute_group claw_attr_group
= {
3270 claw_add_files(struct device
*dev
)
3272 CLAW_DBF_TEXT(2, setup
, "add_file");
3273 return sysfs_create_group(&dev
->kobj
, &claw_attr_group
);
3277 claw_remove_files(struct device
*dev
)
3279 CLAW_DBF_TEXT(2, setup
, "rem_file");
3280 sysfs_remove_group(&dev
->kobj
, &claw_attr_group
);
3283 /*--------------------------------------------------------------------*
3284 * claw_init and cleanup *
3285 *---------------------------------------------------------------------*/
3290 unregister_cu3088_discipline(&claw_group_driver
);
3291 claw_unregister_debug_facility();
3292 printk(KERN_INFO
"claw: Driver unloaded\n");
3297 * Initialize module.
3298 * This is called just after the module is loaded.
3300 * @return 0 on success, !0 on error.
3306 printk(KERN_INFO
"claw: starting driver\n");
3308 ret
= claw_register_debug_facility();
3310 printk(KERN_WARNING
"claw: %s() debug_register failed %d\n",
3314 CLAW_DBF_TEXT(2, setup
, "init_mod");
3315 ret
= register_cu3088_discipline(&claw_group_driver
);
3317 CLAW_DBF_TEXT(2, setup
, "init_bad");
3318 claw_unregister_debug_facility();
3319 printk(KERN_WARNING
"claw; %s() cu3088 register failed %d\n",
3325 module_init(claw_init
);
3326 module_exit(claw_cleanup
);
3328 MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3329 MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3330 "Copyright 2000,2008 IBM Corporation\n");
3331 MODULE_LICENSE("GPL");