3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.224 $)
5 * Linux on zSeries OSA Express and HiperSockets support
7 * Copyright 2000,2003 IBM Corporation
9 * Author(s): Original Code written by
10 * Utz Bacher (utz.bacher@de.ibm.com)
12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com>
15 * $Revision: 1.224 $ $Date: 2005/05/04 20:19:18 $
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/string.h>
37 #include <linux/errno.h>
40 #include <linux/inetdevice.h>
41 #include <linux/netdevice.h>
42 #include <linux/sched.h>
43 #include <linux/workqueue.h>
44 #include <linux/kernel.h>
45 #include <linux/slab.h>
46 #include <linux/interrupt.h>
47 #include <linux/tcp.h>
48 #include <linux/icmp.h>
49 #include <linux/skbuff.h>
51 #include <linux/igmp.h>
52 #include <linux/init.h>
53 #include <linux/reboot.h>
54 #include <linux/mii.h>
55 #include <linux/rcupdate.h>
56 #include <linux/ethtool.h>
60 #include <net/route.h>
62 #include <asm/ebcdic.h>
65 #include <asm/timex.h>
66 #include <asm/semaphore.h>
67 #include <asm/uaccess.h>
72 #include "qeth_eddp.h"
75 #define VERSION_QETH_C "$Revision: 1.224 $"
76 static const char *version
= "qeth S/390 OSA-Express driver";
79 * Debug Facility Stuff
81 static debug_info_t
*qeth_dbf_setup
= NULL
;
82 static debug_info_t
*qeth_dbf_data
= NULL
;
83 static debug_info_t
*qeth_dbf_misc
= NULL
;
84 static debug_info_t
*qeth_dbf_control
= NULL
;
85 debug_info_t
*qeth_dbf_trace
= NULL
;
86 static debug_info_t
*qeth_dbf_sense
= NULL
;
87 static debug_info_t
*qeth_dbf_qerr
= NULL
;
89 DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf
);
92 * some more definitions and declarations
94 static unsigned int known_devices
[][10] = QETH_MODELLIST_ARRAY
;
96 /* list of our cards */
97 struct qeth_card_list_struct qeth_card_list
;
98 /*process list want to be notified*/
99 spinlock_t qeth_notify_lock
;
100 struct list_head qeth_notify_list
;
102 static void qeth_send_control_data_cb(struct qeth_channel
*,
103 struct qeth_cmd_buffer
*);
106 * here we go with function implementation
109 qeth_init_qdio_info(struct qeth_card
*card
);
112 qeth_init_qdio_queues(struct qeth_card
*card
);
115 qeth_alloc_qdio_buffers(struct qeth_card
*card
);
118 qeth_free_qdio_buffers(struct qeth_card
*);
121 qeth_clear_qdio_buffers(struct qeth_card
*);
124 qeth_clear_ip_list(struct qeth_card
*, int, int);
127 qeth_clear_ipacmd_list(struct qeth_card
*);
130 qeth_qdio_clear_card(struct qeth_card
*, int);
133 qeth_clear_working_pool_list(struct qeth_card
*);
136 qeth_clear_cmd_buffers(struct qeth_channel
*);
139 qeth_stop(struct net_device
*);
142 qeth_clear_ipato_list(struct qeth_card
*);
145 qeth_is_addr_covered_by_ipato(struct qeth_card
*, struct qeth_ipaddr
*);
148 qeth_irq_tasklet(unsigned long);
151 qeth_set_online(struct ccwgroup_device
*);
154 __qeth_set_online(struct ccwgroup_device
*gdev
, int recovery_mode
);
156 static struct qeth_ipaddr
*
157 qeth_get_addr_buffer(enum qeth_prot_versions
);
160 qeth_set_multicast_list(struct net_device
*);
163 qeth_notify_processes(void)
165 /*notify all registered processes */
166 struct qeth_notify_list_struct
*n_entry
;
168 QETH_DBF_TEXT(trace
,3,"procnoti");
169 spin_lock(&qeth_notify_lock
);
170 list_for_each_entry(n_entry
, &qeth_notify_list
, list
) {
171 send_sig(n_entry
->signum
, n_entry
->task
, 1);
173 spin_unlock(&qeth_notify_lock
);
177 qeth_notifier_unregister(struct task_struct
*p
)
179 struct qeth_notify_list_struct
*n_entry
, *tmp
;
181 QETH_DBF_TEXT(trace
, 2, "notunreg");
182 spin_lock(&qeth_notify_lock
);
183 list_for_each_entry_safe(n_entry
, tmp
, &qeth_notify_list
, list
) {
184 if (n_entry
->task
== p
) {
185 list_del(&n_entry
->list
);
191 spin_unlock(&qeth_notify_lock
);
195 qeth_notifier_register(struct task_struct
*p
, int signum
)
197 struct qeth_notify_list_struct
*n_entry
;
200 /*check first if entry already exists*/
201 spin_lock(&qeth_notify_lock
);
202 list_for_each_entry(n_entry
, &qeth_notify_list
, list
) {
203 if (n_entry
->task
== p
) {
204 n_entry
->signum
= signum
;
205 spin_unlock(&qeth_notify_lock
);
209 spin_unlock(&qeth_notify_lock
);
211 n_entry
= (struct qeth_notify_list_struct
*)
212 kmalloc(sizeof(struct qeth_notify_list_struct
),GFP_KERNEL
);
216 n_entry
->signum
= signum
;
217 spin_lock(&qeth_notify_lock
);
218 list_add(&n_entry
->list
,&qeth_notify_list
);
219 spin_unlock(&qeth_notify_lock
);
225 * free channel command buffers
228 qeth_clean_channel(struct qeth_channel
*channel
)
232 QETH_DBF_TEXT(setup
, 2, "freech");
233 for (cnt
= 0; cnt
< QETH_CMD_BUFFER_NO
; cnt
++)
234 kfree(channel
->iob
[cnt
].data
);
241 qeth_free_card(struct qeth_card
*card
)
244 QETH_DBF_TEXT(setup
, 2, "freecrd");
245 QETH_DBF_HEX(setup
, 2, &card
, sizeof(void *));
246 qeth_clean_channel(&card
->read
);
247 qeth_clean_channel(&card
->write
);
249 free_netdev(card
->dev
);
250 qeth_clear_ip_list(card
, 0, 0);
251 qeth_clear_ipato_list(card
);
252 kfree(card
->ip_tbd_list
);
253 qeth_free_qdio_buffers(card
);
258 * alloc memory for command buffer per channel
261 qeth_setup_channel(struct qeth_channel
*channel
)
265 QETH_DBF_TEXT(setup
, 2, "setupch");
266 for (cnt
=0; cnt
< QETH_CMD_BUFFER_NO
; cnt
++) {
267 channel
->iob
[cnt
].data
= (char *)
268 kmalloc(QETH_BUFSIZE
, GFP_DMA
|GFP_KERNEL
);
269 if (channel
->iob
[cnt
].data
== NULL
)
271 channel
->iob
[cnt
].state
= BUF_STATE_FREE
;
272 channel
->iob
[cnt
].channel
= channel
;
273 channel
->iob
[cnt
].callback
= qeth_send_control_data_cb
;
274 channel
->iob
[cnt
].rc
= 0;
276 if (cnt
< QETH_CMD_BUFFER_NO
) {
278 kfree(channel
->iob
[cnt
].data
);
282 channel
->io_buf_no
= 0;
283 atomic_set(&channel
->irq_pending
, 0);
284 spin_lock_init(&channel
->iob_lock
);
286 init_waitqueue_head(&channel
->wait_q
);
287 channel
->irq_tasklet
.data
= (unsigned long) channel
;
288 channel
->irq_tasklet
.func
= qeth_irq_tasklet
;
293 * alloc memory for card structure
295 static struct qeth_card
*
296 qeth_alloc_card(void)
298 struct qeth_card
*card
;
300 QETH_DBF_TEXT(setup
, 2, "alloccrd");
301 card
= (struct qeth_card
*) kmalloc(sizeof(struct qeth_card
),
305 QETH_DBF_HEX(setup
, 2, &card
, sizeof(void *));
306 memset(card
, 0, sizeof(struct qeth_card
));
307 if (qeth_setup_channel(&card
->read
)) {
311 if (qeth_setup_channel(&card
->write
)) {
312 qeth_clean_channel(&card
->read
);
320 __qeth_check_irb_error(struct ccw_device
*cdev
, struct irb
*irb
)
325 switch (PTR_ERR(irb
)) {
327 PRINT_WARN("i/o-error on device %s\n", cdev
->dev
.bus_id
);
328 QETH_DBF_TEXT(trace
, 2, "ckirberr");
329 QETH_DBF_TEXT_(trace
, 2, " rc%d", -EIO
);
332 PRINT_WARN("timeout on device %s\n", cdev
->dev
.bus_id
);
333 QETH_DBF_TEXT(trace
, 2, "ckirberr");
334 QETH_DBF_TEXT_(trace
, 2, " rc%d", -ETIMEDOUT
);
337 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb
),
339 QETH_DBF_TEXT(trace
, 2, "ckirberr");
340 QETH_DBF_TEXT(trace
, 2, " rc???");
346 qeth_get_problem(struct ccw_device
*cdev
, struct irb
*irb
)
351 sense
= (char *) irb
->ecw
;
352 cstat
= irb
->scsw
.cstat
;
353 dstat
= irb
->scsw
.dstat
;
355 if (cstat
& (SCHN_STAT_CHN_CTRL_CHK
| SCHN_STAT_INTF_CTRL_CHK
|
356 SCHN_STAT_CHN_DATA_CHK
| SCHN_STAT_CHAIN_CHECK
|
357 SCHN_STAT_PROT_CHECK
| SCHN_STAT_PROG_CHECK
)) {
358 QETH_DBF_TEXT(trace
,2, "CGENCHK");
359 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
360 cdev
->dev
.bus_id
, dstat
, cstat
);
361 HEXDUMP16(WARN
, "irb: ", irb
);
362 HEXDUMP16(WARN
, "irb: ", ((char *) irb
) + 32);
366 if (dstat
& DEV_STAT_UNIT_CHECK
) {
367 if (sense
[SENSE_RESETTING_EVENT_BYTE
] &
368 SENSE_RESETTING_EVENT_FLAG
) {
369 QETH_DBF_TEXT(trace
,2,"REVIND");
372 if (sense
[SENSE_COMMAND_REJECT_BYTE
] &
373 SENSE_COMMAND_REJECT_FLAG
) {
374 QETH_DBF_TEXT(trace
,2,"CMDREJi");
377 if ((sense
[2] == 0xaf) && (sense
[3] == 0xfe)) {
378 QETH_DBF_TEXT(trace
,2,"AFFE");
381 if ((!sense
[0]) && (!sense
[1]) && (!sense
[2]) && (!sense
[3])) {
382 QETH_DBF_TEXT(trace
,2,"ZEROSEN");
385 QETH_DBF_TEXT(trace
,2,"DGENCHK");
390 static int qeth_issue_next_read(struct qeth_card
*);
396 qeth_irq(struct ccw_device
*cdev
, unsigned long intparm
, struct irb
*irb
)
400 struct qeth_cmd_buffer
*buffer
;
401 struct qeth_channel
*channel
;
402 struct qeth_card
*card
;
404 QETH_DBF_TEXT(trace
,5,"irq");
406 if (__qeth_check_irb_error(cdev
, irb
))
408 cstat
= irb
->scsw
.cstat
;
409 dstat
= irb
->scsw
.dstat
;
411 card
= CARD_FROM_CDEV(cdev
);
415 if (card
->read
.ccwdev
== cdev
){
416 channel
= &card
->read
;
417 QETH_DBF_TEXT(trace
,5,"read");
418 } else if (card
->write
.ccwdev
== cdev
) {
419 channel
= &card
->write
;
420 QETH_DBF_TEXT(trace
,5,"write");
422 channel
= &card
->data
;
423 QETH_DBF_TEXT(trace
,5,"data");
425 atomic_set(&channel
->irq_pending
, 0);
427 if (irb
->scsw
.fctl
& (SCSW_FCTL_CLEAR_FUNC
))
428 channel
->state
= CH_STATE_STOPPED
;
430 if (irb
->scsw
.fctl
& (SCSW_FCTL_HALT_FUNC
))
431 channel
->state
= CH_STATE_HALTED
;
433 /*let's wake up immediately on data channel*/
434 if ((channel
== &card
->data
) && (intparm
!= 0))
437 if (intparm
== QETH_CLEAR_CHANNEL_PARM
) {
438 QETH_DBF_TEXT(trace
, 6, "clrchpar");
439 /* we don't have to handle this further */
442 if (intparm
== QETH_HALT_CHANNEL_PARM
) {
443 QETH_DBF_TEXT(trace
, 6, "hltchpar");
444 /* we don't have to handle this further */
447 if ((dstat
& DEV_STAT_UNIT_EXCEP
) ||
448 (dstat
& DEV_STAT_UNIT_CHECK
) ||
450 if (irb
->esw
.esw0
.erw
.cons
) {
451 /* TODO: we should make this s390dbf */
452 PRINT_WARN("sense data available on channel %s.\n",
453 CHANNEL_ID(channel
));
454 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat
, dstat
);
455 HEXDUMP16(WARN
,"irb: ",irb
);
456 HEXDUMP16(WARN
,"sense data: ",irb
->ecw
);
458 rc
= qeth_get_problem(cdev
,irb
);
460 qeth_schedule_recovery(card
);
466 buffer
= (struct qeth_cmd_buffer
*) __va((addr_t
)intparm
);
467 buffer
->state
= BUF_STATE_PROCESSED
;
469 if (channel
== &card
->data
)
472 if (channel
== &card
->read
&&
473 channel
->state
== CH_STATE_UP
)
474 qeth_issue_next_read(card
);
476 tasklet_schedule(&channel
->irq_tasklet
);
479 wake_up(&card
->wait_q
);
483 * tasklet function scheduled from irq handler
486 qeth_irq_tasklet(unsigned long data
)
488 struct qeth_card
*card
;
489 struct qeth_channel
*channel
;
490 struct qeth_cmd_buffer
*iob
;
493 QETH_DBF_TEXT(trace
,5,"irqtlet");
494 channel
= (struct qeth_channel
*) data
;
496 index
= channel
->buf_no
;
497 card
= CARD_FROM_CDEV(channel
->ccwdev
);
498 while (iob
[index
].state
== BUF_STATE_PROCESSED
) {
499 if (iob
[index
].callback
!=NULL
) {
500 iob
[index
].callback(channel
,iob
+ index
);
502 index
= (index
+ 1) % QETH_CMD_BUFFER_NO
;
504 channel
->buf_no
= index
;
505 wake_up(&card
->wait_q
);
508 static int qeth_stop_card(struct qeth_card
*, int);
511 __qeth_set_offline(struct ccwgroup_device
*cgdev
, int recovery_mode
)
513 struct qeth_card
*card
= (struct qeth_card
*) cgdev
->dev
.driver_data
;
515 enum qeth_card_states recover_flag
;
517 QETH_DBF_TEXT(setup
, 3, "setoffl");
518 QETH_DBF_HEX(setup
, 3, &card
, sizeof(void *));
520 recover_flag
= card
->state
;
521 if (qeth_stop_card(card
, recovery_mode
) == -ERESTARTSYS
){
522 PRINT_WARN("Stopping card %s interrupted by user!\n",
526 if ((rc
= ccw_device_set_offline(CARD_DDEV(card
))) ||
527 (rc
= ccw_device_set_offline(CARD_WDEV(card
))) ||
528 (rc
= ccw_device_set_offline(CARD_RDEV(card
)))) {
529 QETH_DBF_TEXT_(setup
, 2, "1err%d", rc
);
531 if (recover_flag
== CARD_STATE_UP
)
532 card
->state
= CARD_STATE_RECOVER
;
533 qeth_notify_processes();
538 qeth_set_offline(struct ccwgroup_device
*cgdev
)
540 return __qeth_set_offline(cgdev
, 0);
544 qeth_wait_for_threads(struct qeth_card
*card
, unsigned long threads
);
548 qeth_remove_device(struct ccwgroup_device
*cgdev
)
550 struct qeth_card
*card
= (struct qeth_card
*) cgdev
->dev
.driver_data
;
553 QETH_DBF_TEXT(setup
, 3, "rmdev");
554 QETH_DBF_HEX(setup
, 3, &card
, sizeof(void *));
559 if (qeth_wait_for_threads(card
, 0xffffffff))
562 if (cgdev
->state
== CCWGROUP_ONLINE
){
563 card
->use_hard_stop
= 1;
564 qeth_set_offline(cgdev
);
566 /* remove form our internal list */
567 write_lock_irqsave(&qeth_card_list
.rwlock
, flags
);
568 list_del(&card
->list
);
569 write_unlock_irqrestore(&qeth_card_list
.rwlock
, flags
);
571 unregister_netdev(card
->dev
);
572 qeth_remove_device_attributes(&cgdev
->dev
);
573 qeth_free_card(card
);
574 cgdev
->dev
.driver_data
= NULL
;
575 put_device(&cgdev
->dev
);
579 qeth_register_addr_entry(struct qeth_card
*, struct qeth_ipaddr
*);
581 qeth_deregister_addr_entry(struct qeth_card
*, struct qeth_ipaddr
*);
584 * Add/remove address to/from card's ip list, i.e. try to add or remove
585 * reference to/from an IP address that is already registered on the card.
587 * 0 address was on card and its reference count has been adjusted,
588 * but is still > 0, so nothing has to be done
589 * also returns 0 if card was not on card and the todo was to delete
590 * the address -> there is also nothing to be done
591 * 1 address was not on card and the todo is to add it to the card's ip
593 * -1 address was on card and its reference count has been decremented
594 * to <= 0 by the todo -> address must be removed from card
597 __qeth_ref_ip_on_card(struct qeth_card
*card
, struct qeth_ipaddr
*todo
,
598 struct qeth_ipaddr
**__addr
)
600 struct qeth_ipaddr
*addr
;
603 list_for_each_entry(addr
, &card
->ip_list
, entry
) {
604 if ((addr
->proto
== QETH_PROT_IPV4
) &&
605 (todo
->proto
== QETH_PROT_IPV4
) &&
606 (addr
->type
== todo
->type
) &&
607 (addr
->u
.a4
.addr
== todo
->u
.a4
.addr
) &&
608 (addr
->u
.a4
.mask
== todo
->u
.a4
.mask
) ){
612 if ((addr
->proto
== QETH_PROT_IPV6
) &&
613 (todo
->proto
== QETH_PROT_IPV6
) &&
614 (addr
->type
== todo
->type
) &&
615 (addr
->u
.a6
.pfxlen
== todo
->u
.a6
.pfxlen
) &&
616 (memcmp(&addr
->u
.a6
.addr
, &todo
->u
.a6
.addr
,
617 sizeof(struct in6_addr
)) == 0)) {
623 addr
->users
+= todo
->users
;
624 if (addr
->users
<= 0){
628 /* for VIPA and RXIP limit refcount to 1 */
629 if (addr
->type
!= QETH_IP_TYPE_NORMAL
)
634 if (todo
->users
> 0){
635 /* for VIPA and RXIP limit refcount to 1 */
636 if (todo
->type
!= QETH_IP_TYPE_NORMAL
)
644 __qeth_address_exists_in_list(struct list_head
*list
, struct qeth_ipaddr
*addr
,
647 struct qeth_ipaddr
*tmp
;
649 list_for_each_entry(tmp
, list
, entry
) {
650 if ((tmp
->proto
== QETH_PROT_IPV4
) &&
651 (addr
->proto
== QETH_PROT_IPV4
) &&
652 ((same_type
&& (tmp
->type
== addr
->type
)) ||
653 (!same_type
&& (tmp
->type
!= addr
->type
)) ) &&
654 (tmp
->u
.a4
.addr
== addr
->u
.a4
.addr
) ){
657 if ((tmp
->proto
== QETH_PROT_IPV6
) &&
658 (addr
->proto
== QETH_PROT_IPV6
) &&
659 ((same_type
&& (tmp
->type
== addr
->type
)) ||
660 (!same_type
&& (tmp
->type
!= addr
->type
)) ) &&
661 (memcmp(&tmp
->u
.a6
.addr
, &addr
->u
.a6
.addr
,
662 sizeof(struct in6_addr
)) == 0) ) {
670 * Add IP to be added to todo list. If there is already an "add todo"
671 * in this list we just incremenent the reference count.
672 * Returns 0 if we just incremented reference count.
675 __qeth_insert_ip_todo(struct qeth_card
*card
, struct qeth_ipaddr
*addr
, int add
)
677 struct qeth_ipaddr
*tmp
, *t
;
680 list_for_each_entry_safe(tmp
, t
, card
->ip_tbd_list
, entry
) {
681 if ((addr
->type
== QETH_IP_TYPE_DEL_ALL_MC
) &&
682 (tmp
->type
== QETH_IP_TYPE_DEL_ALL_MC
))
684 if ((tmp
->proto
== QETH_PROT_IPV4
) &&
685 (addr
->proto
== QETH_PROT_IPV4
) &&
686 (tmp
->type
== addr
->type
) &&
687 (tmp
->is_multicast
== addr
->is_multicast
) &&
688 (tmp
->u
.a4
.addr
== addr
->u
.a4
.addr
) &&
689 (tmp
->u
.a4
.mask
== addr
->u
.a4
.mask
) ){
693 if ((tmp
->proto
== QETH_PROT_IPV6
) &&
694 (addr
->proto
== QETH_PROT_IPV6
) &&
695 (tmp
->type
== addr
->type
) &&
696 (tmp
->is_multicast
== addr
->is_multicast
) &&
697 (tmp
->u
.a6
.pfxlen
== addr
->u
.a6
.pfxlen
) &&
698 (memcmp(&tmp
->u
.a6
.addr
, &addr
->u
.a6
.addr
,
699 sizeof(struct in6_addr
)) == 0) ){
705 if (addr
->users
!= 0)
706 tmp
->users
+= addr
->users
;
708 tmp
->users
+= add
? 1:-1;
709 if (tmp
->users
== 0){
710 list_del(&tmp
->entry
);
715 if (addr
->type
== QETH_IP_TYPE_DEL_ALL_MC
)
716 list_add(&addr
->entry
, card
->ip_tbd_list
);
718 if (addr
->users
== 0)
719 addr
->users
+= add
? 1:-1;
720 if (add
&& (addr
->type
== QETH_IP_TYPE_NORMAL
) &&
721 qeth_is_addr_covered_by_ipato(card
, addr
)){
722 QETH_DBF_TEXT(trace
, 2, "tkovaddr");
723 addr
->set_flags
|= QETH_IPA_SETIP_TAKEOVER_FLAG
;
725 list_add_tail(&addr
->entry
, card
->ip_tbd_list
);
732 * Remove IP address from list
735 qeth_delete_ip(struct qeth_card
*card
, struct qeth_ipaddr
*addr
)
740 QETH_DBF_TEXT(trace
,4,"delip");
741 if (addr
->proto
== QETH_PROT_IPV4
)
742 QETH_DBF_HEX(trace
,4,&addr
->u
.a4
.addr
,4);
744 QETH_DBF_HEX(trace
,4,&addr
->u
.a6
.addr
,8);
745 QETH_DBF_HEX(trace
,4,((char *)&addr
->u
.a6
.addr
)+8,8);
747 spin_lock_irqsave(&card
->ip_lock
, flags
);
748 rc
= __qeth_insert_ip_todo(card
, addr
, 0);
749 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
754 qeth_add_ip(struct qeth_card
*card
, struct qeth_ipaddr
*addr
)
759 QETH_DBF_TEXT(trace
,4,"addip");
760 if (addr
->proto
== QETH_PROT_IPV4
)
761 QETH_DBF_HEX(trace
,4,&addr
->u
.a4
.addr
,4);
763 QETH_DBF_HEX(trace
,4,&addr
->u
.a6
.addr
,8);
764 QETH_DBF_HEX(trace
,4,((char *)&addr
->u
.a6
.addr
)+8,8);
766 spin_lock_irqsave(&card
->ip_lock
, flags
);
767 rc
= __qeth_insert_ip_todo(card
, addr
, 1);
768 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
773 __qeth_delete_all_mc(struct qeth_card
*card
, unsigned long *flags
)
775 struct qeth_ipaddr
*addr
, *tmp
;
778 list_for_each_entry_safe(addr
, tmp
, &card
->ip_list
, entry
) {
779 if (addr
->is_multicast
) {
780 spin_unlock_irqrestore(&card
->ip_lock
, *flags
);
781 rc
= qeth_deregister_addr_entry(card
, addr
);
782 spin_lock_irqsave(&card
->ip_lock
, *flags
);
784 list_del(&addr
->entry
);
792 qeth_set_ip_addr_list(struct qeth_card
*card
)
794 struct list_head
*tbd_list
;
795 struct qeth_ipaddr
*todo
, *addr
;
799 QETH_DBF_TEXT(trace
, 2, "sdiplist");
800 QETH_DBF_HEX(trace
, 2, &card
, sizeof(void *));
802 spin_lock_irqsave(&card
->ip_lock
, flags
);
803 tbd_list
= card
->ip_tbd_list
;
804 card
->ip_tbd_list
= kmalloc(sizeof(struct list_head
), GFP_ATOMIC
);
805 if (!card
->ip_tbd_list
) {
806 QETH_DBF_TEXT(trace
, 0, "silnomem");
807 card
->ip_tbd_list
= tbd_list
;
808 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
811 INIT_LIST_HEAD(card
->ip_tbd_list
);
813 while (!list_empty(tbd_list
)){
814 todo
= list_entry(tbd_list
->next
, struct qeth_ipaddr
, entry
);
815 list_del(&todo
->entry
);
816 if (todo
->type
== QETH_IP_TYPE_DEL_ALL_MC
){
817 __qeth_delete_all_mc(card
, &flags
);
821 rc
= __qeth_ref_ip_on_card(card
, todo
, &addr
);
823 /* nothing to be done; only adjusted refcount */
825 } else if (rc
== 1) {
826 /* new entry to be added to on-card list */
827 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
828 rc
= qeth_register_addr_entry(card
, todo
);
829 spin_lock_irqsave(&card
->ip_lock
, flags
);
831 list_add_tail(&todo
->entry
, &card
->ip_list
);
834 } else if (rc
== -1) {
835 /* on-card entry to be removed */
836 list_del_init(&addr
->entry
);
837 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
838 rc
= qeth_deregister_addr_entry(card
, addr
);
839 spin_lock_irqsave(&card
->ip_lock
, flags
);
843 list_add_tail(&addr
->entry
, &card
->ip_list
);
847 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
851 static void qeth_delete_mc_addresses(struct qeth_card
*);
852 static void qeth_add_multicast_ipv4(struct qeth_card
*);
853 #ifdef CONFIG_QETH_IPV6
854 static void qeth_add_multicast_ipv6(struct qeth_card
*);
858 qeth_set_thread_start_bit(struct qeth_card
*card
, unsigned long thread
)
862 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
863 if ( !(card
->thread_allowed_mask
& thread
) ||
864 (card
->thread_start_mask
& thread
) ) {
865 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
868 card
->thread_start_mask
|= thread
;
869 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
874 qeth_clear_thread_start_bit(struct qeth_card
*card
, unsigned long thread
)
878 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
879 card
->thread_start_mask
&= ~thread
;
880 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
881 wake_up(&card
->wait_q
);
885 qeth_clear_thread_running_bit(struct qeth_card
*card
, unsigned long thread
)
889 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
890 card
->thread_running_mask
&= ~thread
;
891 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
892 wake_up(&card
->wait_q
);
896 __qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
901 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
902 if (card
->thread_start_mask
& thread
){
903 if ((card
->thread_allowed_mask
& thread
) &&
904 !(card
->thread_running_mask
& thread
)){
906 card
->thread_start_mask
&= ~thread
;
907 card
->thread_running_mask
|= thread
;
911 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
916 qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
920 wait_event(card
->wait_q
,
921 (rc
= __qeth_do_run_thread(card
, thread
)) >= 0);
926 qeth_register_ip_addresses(void *ptr
)
928 struct qeth_card
*card
;
930 card
= (struct qeth_card
*) ptr
;
931 daemonize("qeth_reg_ip");
932 QETH_DBF_TEXT(trace
,4,"regipth1");
933 if (!qeth_do_run_thread(card
, QETH_SET_IP_THREAD
))
935 QETH_DBF_TEXT(trace
,4,"regipth2");
936 qeth_set_ip_addr_list(card
);
937 qeth_clear_thread_running_bit(card
, QETH_SET_IP_THREAD
);
942 qeth_recover(void *ptr
)
944 struct qeth_card
*card
;
947 card
= (struct qeth_card
*) ptr
;
948 daemonize("qeth_recover");
949 QETH_DBF_TEXT(trace
,2,"recover1");
950 QETH_DBF_HEX(trace
, 2, &card
, sizeof(void *));
951 if (!qeth_do_run_thread(card
, QETH_RECOVER_THREAD
))
953 QETH_DBF_TEXT(trace
,2,"recover2");
954 PRINT_WARN("Recovery of device %s started ...\n",
956 card
->use_hard_stop
= 1;
957 __qeth_set_offline(card
->gdev
,1);
958 rc
= __qeth_set_online(card
->gdev
,1);
960 PRINT_INFO("Device %s successfully recovered!\n",
963 PRINT_INFO("Device %s could not be recovered!\n",
965 /* don't run another scheduled recovery */
966 qeth_clear_thread_start_bit(card
, QETH_RECOVER_THREAD
);
967 qeth_clear_thread_running_bit(card
, QETH_RECOVER_THREAD
);
972 qeth_schedule_recovery(struct qeth_card
*card
)
974 QETH_DBF_TEXT(trace
,2,"startrec");
976 if (qeth_set_thread_start_bit(card
, QETH_RECOVER_THREAD
) == 0)
977 schedule_work(&card
->kernel_thread_starter
);
981 qeth_do_start_thread(struct qeth_card
*card
, unsigned long thread
)
986 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
987 QETH_DBF_TEXT_(trace
, 4, " %02x%02x%02x",
988 (u8
) card
->thread_start_mask
,
989 (u8
) card
->thread_allowed_mask
,
990 (u8
) card
->thread_running_mask
);
991 rc
= (card
->thread_start_mask
& thread
);
992 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
997 qeth_start_kernel_thread(struct qeth_card
*card
)
999 QETH_DBF_TEXT(trace
, 2, "strthrd");
1001 if (card
->read
.state
!= CH_STATE_UP
&&
1002 card
->write
.state
!= CH_STATE_UP
)
1005 if (qeth_do_start_thread(card
, QETH_SET_IP_THREAD
))
1006 kernel_thread(qeth_register_ip_addresses
, (void *)card
,SIGCHLD
);
1007 if (qeth_do_start_thread(card
, QETH_RECOVER_THREAD
))
1008 kernel_thread(qeth_recover
, (void *) card
, SIGCHLD
);
1013 qeth_set_intial_options(struct qeth_card
*card
)
1015 card
->options
.route4
.type
= NO_ROUTER
;
1016 #ifdef CONFIG_QETH_IPV6
1017 card
->options
.route6
.type
= NO_ROUTER
;
1018 #endif /* QETH_IPV6 */
1019 card
->options
.checksum_type
= QETH_CHECKSUM_DEFAULT
;
1020 card
->options
.broadcast_mode
= QETH_TR_BROADCAST_ALLRINGS
;
1021 card
->options
.macaddr_mode
= QETH_TR_MACADDR_NONCANONICAL
;
1022 card
->options
.fake_broadcast
= 0;
1023 card
->options
.add_hhlen
= DEFAULT_ADD_HHLEN
;
1024 card
->options
.fake_ll
= 0;
1025 card
->options
.layer2
= 0;
1029 * initialize channels ,card and all state machines
1032 qeth_setup_card(struct qeth_card
*card
)
1035 QETH_DBF_TEXT(setup
, 2, "setupcrd");
1036 QETH_DBF_HEX(setup
, 2, &card
, sizeof(void *));
1038 card
->read
.state
= CH_STATE_DOWN
;
1039 card
->write
.state
= CH_STATE_DOWN
;
1040 card
->data
.state
= CH_STATE_DOWN
;
1041 card
->state
= CARD_STATE_DOWN
;
1042 card
->lan_online
= 0;
1043 card
->use_hard_stop
= 0;
1045 #ifdef CONFIG_QETH_VLAN
1046 spin_lock_init(&card
->vlanlock
);
1047 card
->vlangrp
= NULL
;
1049 spin_lock_init(&card
->ip_lock
);
1050 spin_lock_init(&card
->thread_mask_lock
);
1051 card
->thread_start_mask
= 0;
1052 card
->thread_allowed_mask
= 0;
1053 card
->thread_running_mask
= 0;
1054 INIT_WORK(&card
->kernel_thread_starter
,
1055 (void *)qeth_start_kernel_thread
,card
);
1056 INIT_LIST_HEAD(&card
->ip_list
);
1057 card
->ip_tbd_list
= kmalloc(sizeof(struct list_head
), GFP_KERNEL
);
1058 if (!card
->ip_tbd_list
) {
1059 QETH_DBF_TEXT(setup
, 0, "iptbdnom");
1062 INIT_LIST_HEAD(card
->ip_tbd_list
);
1063 INIT_LIST_HEAD(&card
->cmd_waiter_list
);
1064 init_waitqueue_head(&card
->wait_q
);
1065 /* intial options */
1066 qeth_set_intial_options(card
);
1067 /* IP address takeover */
1068 INIT_LIST_HEAD(&card
->ipato
.entries
);
1069 card
->ipato
.enabled
= 0;
1070 card
->ipato
.invert4
= 0;
1071 card
->ipato
.invert6
= 0;
1072 /* init QDIO stuff */
1073 qeth_init_qdio_info(card
);
1078 is_1920_device (struct qeth_card
*card
)
1080 int single_queue
= 0;
1081 struct ccw_device
*ccwdev
;
1082 struct channelPath_dsc
{
1093 QETH_DBF_TEXT(setup
, 2, "chk_1920");
1095 ccwdev
= card
->data
.ccwdev
;
1096 chp_dsc
= (struct channelPath_dsc
*)ccw_device_get_chp_desc(ccwdev
, 0);
1097 if (chp_dsc
!= NULL
) {
1098 /* CHPP field bit 6 == 1 -> single queue */
1099 single_queue
= ((chp_dsc
->chpp
& 0x02) == 0x02);
1102 QETH_DBF_TEXT_(setup
, 2, "rc:%x", single_queue
);
1103 return single_queue
;
1107 qeth_determine_card_type(struct qeth_card
*card
)
1111 QETH_DBF_TEXT(setup
, 2, "detcdtyp");
1113 while (known_devices
[i
][4]) {
1114 if ((CARD_RDEV(card
)->id
.dev_type
== known_devices
[i
][2]) &&
1115 (CARD_RDEV(card
)->id
.dev_model
== known_devices
[i
][3])) {
1116 card
->info
.type
= known_devices
[i
][4];
1117 if (is_1920_device(card
)) {
1118 PRINT_INFO("Priority Queueing not able "
1119 "due to hardware limitations!\n");
1120 card
->qdio
.no_out_queues
= 1;
1121 card
->qdio
.default_out_queue
= 0;
1123 card
->qdio
.no_out_queues
= known_devices
[i
][8];
1125 card
->info
.is_multicast_different
= known_devices
[i
][9];
1130 card
->info
.type
= QETH_CARD_TYPE_UNKNOWN
;
1131 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card
));
1136 qeth_probe_device(struct ccwgroup_device
*gdev
)
1138 struct qeth_card
*card
;
1140 unsigned long flags
;
1143 QETH_DBF_TEXT(setup
, 2, "probedev");
1146 if (!get_device(dev
))
1149 card
= qeth_alloc_card();
1152 QETH_DBF_TEXT_(setup
, 2, "1err%d", -ENOMEM
);
1155 card
->read
.ccwdev
= gdev
->cdev
[0];
1156 card
->write
.ccwdev
= gdev
->cdev
[1];
1157 card
->data
.ccwdev
= gdev
->cdev
[2];
1159 if ((rc
= qeth_setup_card(card
))){
1160 QETH_DBF_TEXT_(setup
, 2, "2err%d", rc
);
1162 qeth_free_card(card
);
1165 gdev
->dev
.driver_data
= card
;
1167 gdev
->cdev
[0]->handler
= qeth_irq
;
1168 gdev
->cdev
[1]->handler
= qeth_irq
;
1169 gdev
->cdev
[2]->handler
= qeth_irq
;
1171 rc
= qeth_create_device_attributes(dev
);
1174 qeth_free_card(card
);
1177 if ((rc
= qeth_determine_card_type(card
))){
1178 PRINT_WARN("%s: not a valid card type\n", __func__
);
1179 QETH_DBF_TEXT_(setup
, 2, "3err%d", rc
);
1181 qeth_free_card(card
);
1184 /* insert into our internal list */
1185 write_lock_irqsave(&qeth_card_list
.rwlock
, flags
);
1186 list_add_tail(&card
->list
, &qeth_card_list
.list
);
1187 write_unlock_irqrestore(&qeth_card_list
.rwlock
, flags
);
1193 qeth_get_unitaddr(struct qeth_card
*card
)
1199 QETH_DBF_TEXT(setup
, 2, "getunit");
1200 rc
= read_conf_data(CARD_DDEV(card
), (void **) &prcd
, &length
);
1202 PRINT_ERR("read_conf_data for device %s returned %i\n",
1203 CARD_DDEV_ID(card
), rc
);
1206 card
->info
.chpid
= prcd
[30];
1207 card
->info
.unit_addr2
= prcd
[31];
1208 card
->info
.cula
= prcd
[63];
1209 card
->info
.guestlan
= ((prcd
[0x10] == _ascebc
['V']) &&
1210 (prcd
[0x11] == _ascebc
['M']));
1215 qeth_init_tokens(struct qeth_card
*card
)
1217 card
->token
.issuer_rm_w
= 0x00010103UL
;
1218 card
->token
.cm_filter_w
= 0x00010108UL
;
1219 card
->token
.cm_connection_w
= 0x0001010aUL
;
1220 card
->token
.ulp_filter_w
= 0x0001010bUL
;
1221 card
->token
.ulp_connection_w
= 0x0001010dUL
;
1225 raw_devno_from_bus_id(char *id
)
1227 id
+= (strlen(id
) - 4);
1228 return (__u16
) simple_strtoul(id
, &id
, 16);
1234 qeth_setup_ccw(struct qeth_channel
*channel
,unsigned char *iob
, __u32 len
)
1236 struct qeth_card
*card
;
1238 QETH_DBF_TEXT(trace
, 4, "setupccw");
1239 card
= CARD_FROM_CDEV(channel
->ccwdev
);
1240 if (channel
== &card
->read
)
1241 memcpy(&channel
->ccw
, READ_CCW
, sizeof(struct ccw1
));
1243 memcpy(&channel
->ccw
, WRITE_CCW
, sizeof(struct ccw1
));
1244 channel
->ccw
.count
= len
;
1245 channel
->ccw
.cda
= (__u32
) __pa(iob
);
1249 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1251 static struct qeth_cmd_buffer
*
1252 __qeth_get_buffer(struct qeth_channel
*channel
)
1256 QETH_DBF_TEXT(trace
, 6, "getbuff");
1257 index
= channel
->io_buf_no
;
1259 if (channel
->iob
[index
].state
== BUF_STATE_FREE
) {
1260 channel
->iob
[index
].state
= BUF_STATE_LOCKED
;
1261 channel
->io_buf_no
= (channel
->io_buf_no
+ 1) %
1263 memset(channel
->iob
[index
].data
, 0, QETH_BUFSIZE
);
1264 return channel
->iob
+ index
;
1266 index
= (index
+ 1) % QETH_CMD_BUFFER_NO
;
1267 } while(index
!= channel
->io_buf_no
);
1273 * release command buffer
1276 qeth_release_buffer(struct qeth_channel
*channel
, struct qeth_cmd_buffer
*iob
)
1278 unsigned long flags
;
1280 QETH_DBF_TEXT(trace
, 6, "relbuff");
1281 spin_lock_irqsave(&channel
->iob_lock
, flags
);
1282 memset(iob
->data
, 0, QETH_BUFSIZE
);
1283 iob
->state
= BUF_STATE_FREE
;
1284 iob
->callback
= qeth_send_control_data_cb
;
1286 spin_unlock_irqrestore(&channel
->iob_lock
, flags
);
1289 static struct qeth_cmd_buffer
*
1290 qeth_get_buffer(struct qeth_channel
*channel
)
1292 struct qeth_cmd_buffer
*buffer
= NULL
;
1293 unsigned long flags
;
1295 spin_lock_irqsave(&channel
->iob_lock
, flags
);
1296 buffer
= __qeth_get_buffer(channel
);
1297 spin_unlock_irqrestore(&channel
->iob_lock
, flags
);
1301 static struct qeth_cmd_buffer
*
1302 qeth_wait_for_buffer(struct qeth_channel
*channel
)
1304 struct qeth_cmd_buffer
*buffer
;
1305 wait_event(channel
->wait_q
,
1306 ((buffer
= qeth_get_buffer(channel
)) != NULL
));
1311 qeth_clear_cmd_buffers(struct qeth_channel
*channel
)
1315 for (cnt
=0; cnt
< QETH_CMD_BUFFER_NO
; cnt
++)
1316 qeth_release_buffer(channel
,&channel
->iob
[cnt
]);
1317 channel
->buf_no
= 0;
1318 channel
->io_buf_no
= 0;
1322 * start IDX for read and write channel
1325 qeth_idx_activate_get_answer(struct qeth_channel
*channel
,
1326 void (*idx_reply_cb
)(struct qeth_channel
*,
1327 struct qeth_cmd_buffer
*))
1329 struct qeth_cmd_buffer
*iob
;
1330 unsigned long flags
;
1332 struct qeth_card
*card
;
1334 QETH_DBF_TEXT(setup
, 2, "idxanswr");
1335 card
= CARD_FROM_CDEV(channel
->ccwdev
);
1336 iob
= qeth_get_buffer(channel
);
1337 iob
->callback
= idx_reply_cb
;
1338 memcpy(&channel
->ccw
, READ_CCW
, sizeof(struct ccw1
));
1339 channel
->ccw
.count
= QETH_BUFSIZE
;
1340 channel
->ccw
.cda
= (__u32
) __pa(iob
->data
);
1342 wait_event(card
->wait_q
,
1343 atomic_compare_and_swap(0,1,&channel
->irq_pending
) == 0);
1344 QETH_DBF_TEXT(setup
, 6, "noirqpnd");
1345 spin_lock_irqsave(get_ccwdev_lock(channel
->ccwdev
), flags
);
1346 rc
= ccw_device_start(channel
->ccwdev
,
1347 &channel
->ccw
,(addr_t
) iob
, 0, 0);
1348 spin_unlock_irqrestore(get_ccwdev_lock(channel
->ccwdev
), flags
);
1351 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc
);
1352 QETH_DBF_TEXT_(setup
, 2, "2err%d", rc
);
1353 atomic_set(&channel
->irq_pending
, 0);
1354 wake_up(&card
->wait_q
);
1357 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1358 channel
->state
== CH_STATE_UP
, QETH_TIMEOUT
);
1359 if (rc
== -ERESTARTSYS
)
1361 if (channel
->state
!= CH_STATE_UP
){
1363 QETH_DBF_TEXT_(setup
, 2, "3err%d", rc
);
1364 qeth_clear_cmd_buffers(channel
);
1371 qeth_idx_activate_channel(struct qeth_channel
*channel
,
1372 void (*idx_reply_cb
)(struct qeth_channel
*,
1373 struct qeth_cmd_buffer
*))
1375 struct qeth_card
*card
;
1376 struct qeth_cmd_buffer
*iob
;
1377 unsigned long flags
;
1381 card
= CARD_FROM_CDEV(channel
->ccwdev
);
1383 QETH_DBF_TEXT(setup
, 2, "idxactch");
1385 iob
= qeth_get_buffer(channel
);
1386 iob
->callback
= idx_reply_cb
;
1387 memcpy(&channel
->ccw
, WRITE_CCW
, sizeof(struct ccw1
));
1388 channel
->ccw
.count
= IDX_ACTIVATE_SIZE
;
1389 channel
->ccw
.cda
= (__u32
) __pa(iob
->data
);
1390 if (channel
== &card
->write
) {
1391 memcpy(iob
->data
, IDX_ACTIVATE_WRITE
, IDX_ACTIVATE_SIZE
);
1392 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob
->data
),
1393 &card
->seqno
.trans_hdr
, QETH_SEQ_NO_LENGTH
);
1394 card
->seqno
.trans_hdr
++;
1396 memcpy(iob
->data
, IDX_ACTIVATE_READ
, IDX_ACTIVATE_SIZE
);
1397 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob
->data
),
1398 &card
->seqno
.trans_hdr
, QETH_SEQ_NO_LENGTH
);
1400 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
1401 &card
->token
.issuer_rm_w
,QETH_MPC_TOKEN_LENGTH
);
1402 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob
->data
),
1403 &card
->info
.func_level
,sizeof(__u16
));
1404 temp
= raw_devno_from_bus_id(CARD_DDEV_ID(card
));
1405 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob
->data
), &temp
, 2);
1406 temp
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
1407 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob
->data
), &temp
, 2);
1409 wait_event(card
->wait_q
,
1410 atomic_compare_and_swap(0,1,&channel
->irq_pending
) == 0);
1411 QETH_DBF_TEXT(setup
, 6, "noirqpnd");
1412 spin_lock_irqsave(get_ccwdev_lock(channel
->ccwdev
), flags
);
1413 rc
= ccw_device_start(channel
->ccwdev
,
1414 &channel
->ccw
,(addr_t
) iob
, 0, 0);
1415 spin_unlock_irqrestore(get_ccwdev_lock(channel
->ccwdev
), flags
);
1418 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc
);
1419 QETH_DBF_TEXT_(setup
, 2, "1err%d", rc
);
1420 atomic_set(&channel
->irq_pending
, 0);
1421 wake_up(&card
->wait_q
);
1424 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1425 channel
->state
== CH_STATE_ACTIVATING
, QETH_TIMEOUT
);
1426 if (rc
== -ERESTARTSYS
)
1428 if (channel
->state
!= CH_STATE_ACTIVATING
) {
1429 PRINT_WARN("qeth: IDX activate timed out!\n");
1430 QETH_DBF_TEXT_(setup
, 2, "2err%d", -ETIME
);
1431 qeth_clear_cmd_buffers(channel
);
1434 return qeth_idx_activate_get_answer(channel
,idx_reply_cb
);
1438 qeth_peer_func_level(int level
)
1440 if ((level
& 0xff) == 8)
1441 return (level
& 0xff) + 0x400;
1442 if (((level
>> 8) & 3) == 1)
1443 return (level
& 0xff) + 0x200;
1448 qeth_idx_write_cb(struct qeth_channel
*channel
, struct qeth_cmd_buffer
*iob
)
1450 struct qeth_card
*card
;
1453 QETH_DBF_TEXT(setup
,2, "idxwrcb");
1455 if (channel
->state
== CH_STATE_DOWN
) {
1456 channel
->state
= CH_STATE_ACTIVATING
;
1459 card
= CARD_FROM_CDEV(channel
->ccwdev
);
1461 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob
->data
))) {
1462 PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
1463 "reply\n", CARD_WDEV_ID(card
));
1466 memcpy(&temp
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
1467 if ((temp
& ~0x0100) != qeth_peer_func_level(card
->info
.func_level
)) {
1468 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1469 "function level mismatch "
1470 "(sent: 0x%x, received: 0x%x)\n",
1471 CARD_WDEV_ID(card
), card
->info
.func_level
, temp
);
1474 channel
->state
= CH_STATE_UP
;
1476 qeth_release_buffer(channel
, iob
);
1480 qeth_check_idx_response(unsigned char *buffer
)
1485 QETH_DBF_HEX(control
, 2, buffer
, QETH_DBF_CONTROL_LEN
);
1486 if ((buffer
[2] & 0xc0) == 0xc0) {
1487 PRINT_WARN("received an IDX TERMINATE "
1488 "with cause code 0x%02x%s\n",
1490 ((buffer
[4] == 0x22) ?
1491 " -- try another portname" : ""));
1492 QETH_DBF_TEXT(trace
, 2, "ckidxres");
1493 QETH_DBF_TEXT(trace
, 2, " idxterm");
1494 QETH_DBF_TEXT_(trace
, 2, " rc%d", -EIO
);
1501 qeth_idx_read_cb(struct qeth_channel
*channel
, struct qeth_cmd_buffer
*iob
)
1503 struct qeth_card
*card
;
1506 QETH_DBF_TEXT(setup
, 2, "idxrdcb");
1507 if (channel
->state
== CH_STATE_DOWN
) {
1508 channel
->state
= CH_STATE_ACTIVATING
;
1512 card
= CARD_FROM_CDEV(channel
->ccwdev
);
1513 if (qeth_check_idx_response(iob
->data
)) {
1516 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob
->data
))) {
1517 PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
1518 "reply\n", CARD_RDEV_ID(card
));
1523 * temporary fix for microcode bug
1524 * to revert it,replace OR by AND
1526 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob
->data
)) ||
1527 (card
->info
.type
== QETH_CARD_TYPE_OSAE
) )
1528 card
->info
.portname_required
= 1;
1530 memcpy(&temp
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
1531 if (temp
!= qeth_peer_func_level(card
->info
.func_level
)) {
1532 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1533 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1534 CARD_RDEV_ID(card
), card
->info
.func_level
, temp
);
1537 memcpy(&card
->token
.issuer_rm_r
,
1538 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
1539 QETH_MPC_TOKEN_LENGTH
);
1540 memcpy(&card
->info
.mcl_level
[0],
1541 QETH_IDX_REPLY_LEVEL(iob
->data
), QETH_MCL_LENGTH
);
1542 channel
->state
= CH_STATE_UP
;
1544 qeth_release_buffer(channel
,iob
);
1548 qeth_issue_next_read(struct qeth_card
*card
)
1551 struct qeth_cmd_buffer
*iob
;
1553 QETH_DBF_TEXT(trace
,5,"issnxrd");
1554 if (card
->read
.state
!= CH_STATE_UP
)
1556 iob
= qeth_get_buffer(&card
->read
);
1558 PRINT_WARN("issue_next_read failed: no iob available!\n");
1561 qeth_setup_ccw(&card
->read
, iob
->data
, QETH_BUFSIZE
);
1562 wait_event(card
->wait_q
,
1563 atomic_compare_and_swap(0,1,&card
->read
.irq_pending
) == 0);
1564 QETH_DBF_TEXT(trace
, 6, "noirqpnd");
1565 rc
= ccw_device_start(card
->read
.ccwdev
, &card
->read
.ccw
,
1566 (addr_t
) iob
, 0, 0);
1568 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc
);
1569 atomic_set(&card
->read
.irq_pending
, 0);
1570 qeth_schedule_recovery(card
);
1571 wake_up(&card
->wait_q
);
1576 static struct qeth_reply
*
1577 qeth_alloc_reply(struct qeth_card
*card
)
1579 struct qeth_reply
*reply
;
1581 reply
= kmalloc(sizeof(struct qeth_reply
), GFP_ATOMIC
);
1583 memset(reply
, 0, sizeof(struct qeth_reply
));
1584 atomic_set(&reply
->refcnt
, 1);
1591 qeth_get_reply(struct qeth_reply
*reply
)
1593 WARN_ON(atomic_read(&reply
->refcnt
) <= 0);
1594 atomic_inc(&reply
->refcnt
);
1598 qeth_put_reply(struct qeth_reply
*reply
)
1600 WARN_ON(atomic_read(&reply
->refcnt
) <= 0);
1601 if (atomic_dec_and_test(&reply
->refcnt
))
1606 qeth_cmd_timeout(unsigned long data
)
1608 struct qeth_reply
*reply
, *list_reply
, *r
;
1609 unsigned long flags
;
1611 reply
= (struct qeth_reply
*) data
;
1612 spin_lock_irqsave(&reply
->card
->lock
, flags
);
1613 list_for_each_entry_safe(list_reply
, r
,
1614 &reply
->card
->cmd_waiter_list
, list
) {
1615 if (reply
== list_reply
){
1616 qeth_get_reply(reply
);
1617 list_del_init(&reply
->list
);
1618 spin_unlock_irqrestore(&reply
->card
->lock
, flags
);
1620 reply
->received
= 1;
1621 wake_up(&reply
->wait_q
);
1622 qeth_put_reply(reply
);
1626 spin_unlock_irqrestore(&reply
->card
->lock
, flags
);
1630 qeth_reset_ip_addresses(struct qeth_card
*card
)
1632 QETH_DBF_TEXT(trace
, 2, "rstipadd");
1634 qeth_clear_ip_list(card
, 0, 1);
1635 /* this function will also schedule the SET_IP_THREAD */
1636 qeth_set_multicast_list(card
->dev
);
1639 static struct qeth_ipa_cmd
*
1640 qeth_check_ipa_data(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
)
1642 struct qeth_ipa_cmd
*cmd
= NULL
;
1644 QETH_DBF_TEXT(trace
,5,"chkipad");
1645 if (IS_IPA(iob
->data
)){
1646 cmd
= (struct qeth_ipa_cmd
*) PDU_ENCAPSULATION(iob
->data
);
1647 if (IS_IPA_REPLY(cmd
))
1650 switch (cmd
->hdr
.command
) {
1651 case IPA_CMD_STOPLAN
:
1652 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1653 "there is a network problem or "
1654 "someone pulled the cable or "
1655 "disabled the port.\n",
1656 QETH_CARD_IFNAME(card
),
1658 card
->lan_online
= 0;
1659 netif_carrier_off(card
->dev
);
1661 case IPA_CMD_STARTLAN
:
1662 PRINT_INFO("Link reestablished on %s "
1663 "(CHPID 0x%X). Scheduling "
1664 "IP address reset.\n",
1665 QETH_CARD_IFNAME(card
),
1667 card
->lan_online
= 1;
1668 netif_carrier_on(card
->dev
);
1669 qeth_reset_ip_addresses(card
);
1671 case IPA_CMD_REGISTER_LOCAL_ADDR
:
1672 QETH_DBF_TEXT(trace
,3, "irla");
1674 case IPA_CMD_UNREGISTER_LOCAL_ADDR
:
1675 QETH_DBF_TEXT(trace
,3, "urla");
1678 PRINT_WARN("Received data is IPA "
1679 "but not a reply!\n");
1688 * wake all waiting ipa commands
1691 qeth_clear_ipacmd_list(struct qeth_card
*card
)
1693 struct qeth_reply
*reply
, *r
;
1694 unsigned long flags
;
1696 QETH_DBF_TEXT(trace
, 4, "clipalst");
1698 spin_lock_irqsave(&card
->lock
, flags
);
1699 list_for_each_entry_safe(reply
, r
, &card
->cmd_waiter_list
, list
) {
1700 qeth_get_reply(reply
);
1702 reply
->received
= 1;
1703 list_del_init(&reply
->list
);
1704 wake_up(&reply
->wait_q
);
1705 qeth_put_reply(reply
);
1707 spin_unlock_irqrestore(&card
->lock
, flags
);
1711 qeth_send_control_data_cb(struct qeth_channel
*channel
,
1712 struct qeth_cmd_buffer
*iob
)
1714 struct qeth_card
*card
;
1715 struct qeth_reply
*reply
, *r
;
1716 struct qeth_ipa_cmd
*cmd
;
1717 unsigned long flags
;
1720 QETH_DBF_TEXT(trace
,4,"sndctlcb");
1722 card
= CARD_FROM_CDEV(channel
->ccwdev
);
1723 if (qeth_check_idx_response(iob
->data
)) {
1724 qeth_clear_ipacmd_list(card
);
1725 qeth_schedule_recovery(card
);
1729 cmd
= qeth_check_ipa_data(card
, iob
);
1730 if ((cmd
== NULL
) && (card
->state
!= CARD_STATE_DOWN
))
1733 spin_lock_irqsave(&card
->lock
, flags
);
1734 list_for_each_entry_safe(reply
, r
, &card
->cmd_waiter_list
, list
) {
1735 if ((reply
->seqno
== QETH_IDX_COMMAND_SEQNO
) ||
1736 ((cmd
) && (reply
->seqno
== cmd
->hdr
.seqno
))) {
1737 qeth_get_reply(reply
);
1738 list_del_init(&reply
->list
);
1739 spin_unlock_irqrestore(&card
->lock
, flags
);
1741 if (reply
->callback
!= NULL
) {
1743 reply
->offset
= (__u16
)((char*)cmd
-
1745 keep_reply
= reply
->callback(card
,
1747 (unsigned long)cmd
);
1750 keep_reply
= reply
->callback(card
,
1752 (unsigned long)iob
);
1755 reply
->rc
= (u16
) cmd
->hdr
.return_code
;
1757 reply
->rc
= iob
->rc
;
1759 spin_lock_irqsave(&card
->lock
, flags
);
1760 list_add_tail(&reply
->list
,
1761 &card
->cmd_waiter_list
);
1762 spin_unlock_irqrestore(&card
->lock
, flags
);
1764 reply
->received
= 1;
1765 wake_up(&reply
->wait_q
);
1767 qeth_put_reply(reply
);
1771 spin_unlock_irqrestore(&card
->lock
, flags
);
1773 memcpy(&card
->seqno
.pdu_hdr_ack
,
1774 QETH_PDU_HEADER_SEQ_NO(iob
->data
),
1775 QETH_SEQ_NO_LENGTH
);
1776 qeth_release_buffer(channel
,iob
);
1780 qeth_send_control_data(struct qeth_card
*card
, int len
,
1781 struct qeth_cmd_buffer
*iob
,
1783 (struct qeth_card
*, struct qeth_reply
*, unsigned long),
1788 unsigned long flags
;
1789 struct qeth_reply
*reply
;
1790 struct timer_list timer
;
1792 QETH_DBF_TEXT(trace
, 2, "sendctl");
1794 qeth_setup_ccw(&card
->write
,iob
->data
,len
);
1796 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob
->data
),
1797 &card
->seqno
.trans_hdr
, QETH_SEQ_NO_LENGTH
);
1798 card
->seqno
.trans_hdr
++;
1800 memcpy(QETH_PDU_HEADER_SEQ_NO(iob
->data
),
1801 &card
->seqno
.pdu_hdr
, QETH_SEQ_NO_LENGTH
);
1802 card
->seqno
.pdu_hdr
++;
1803 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob
->data
),
1804 &card
->seqno
.pdu_hdr_ack
, QETH_SEQ_NO_LENGTH
);
1805 iob
->callback
= qeth_release_buffer
;
1807 reply
= qeth_alloc_reply(card
);
1809 PRINT_WARN("Could no alloc qeth_reply!\n");
1812 reply
->callback
= reply_cb
;
1813 reply
->param
= reply_param
;
1814 if (card
->state
== CARD_STATE_DOWN
)
1815 reply
->seqno
= QETH_IDX_COMMAND_SEQNO
;
1817 reply
->seqno
= card
->seqno
.ipa
++;
1819 timer
.function
= qeth_cmd_timeout
;
1820 timer
.data
= (unsigned long) reply
;
1821 if (IS_IPA(iob
->data
))
1822 timer
.expires
= jiffies
+ QETH_IPA_TIMEOUT
;
1824 timer
.expires
= jiffies
+ QETH_TIMEOUT
;
1825 init_waitqueue_head(&reply
->wait_q
);
1826 spin_lock_irqsave(&card
->lock
, flags
);
1827 list_add_tail(&reply
->list
, &card
->cmd_waiter_list
);
1828 spin_unlock_irqrestore(&card
->lock
, flags
);
1829 QETH_DBF_HEX(control
, 2, iob
->data
, QETH_DBF_CONTROL_LEN
);
1830 wait_event(card
->wait_q
,
1831 atomic_compare_and_swap(0,1,&card
->write
.irq_pending
) == 0);
1832 QETH_DBF_TEXT(trace
, 6, "noirqpnd");
1833 spin_lock_irqsave(get_ccwdev_lock(card
->write
.ccwdev
), flags
);
1834 rc
= ccw_device_start(card
->write
.ccwdev
, &card
->write
.ccw
,
1835 (addr_t
) iob
, 0, 0);
1836 spin_unlock_irqrestore(get_ccwdev_lock(card
->write
.ccwdev
), flags
);
1838 PRINT_WARN("qeth_send_control_data: "
1839 "ccw_device_start rc = %i\n", rc
);
1840 QETH_DBF_TEXT_(trace
, 2, " err%d", rc
);
1841 spin_lock_irqsave(&card
->lock
, flags
);
1842 list_del_init(&reply
->list
);
1843 qeth_put_reply(reply
);
1844 spin_unlock_irqrestore(&card
->lock
, flags
);
1845 qeth_release_buffer(iob
->channel
, iob
);
1846 atomic_set(&card
->write
.irq_pending
, 0);
1847 wake_up(&card
->wait_q
);
1851 wait_event(reply
->wait_q
, reply
->received
);
1852 del_timer_sync(&timer
);
1854 qeth_put_reply(reply
);
1859 qeth_send_ipa_cmd(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
1861 (struct qeth_card
*,struct qeth_reply
*, unsigned long),
1867 QETH_DBF_TEXT(trace
,4,"sendipa");
1869 memcpy(iob
->data
, IPA_PDU_HEADER
, IPA_PDU_HEADER_SIZE
);
1871 if (card
->options
.layer2
)
1872 prot_type
= QETH_PROT_LAYER2
;
1874 prot_type
= QETH_PROT_TCPIP
;
1876 memcpy(QETH_IPA_CMD_PROT_TYPE(iob
->data
),&prot_type
,1);
1877 memcpy(QETH_IPA_CMD_DEST_ADDR(iob
->data
),
1878 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
1880 rc
= qeth_send_control_data(card
, IPA_CMD_LENGTH
, iob
,
1881 reply_cb
, reply_param
);
1887 qeth_cm_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
1890 struct qeth_cmd_buffer
*iob
;
1892 QETH_DBF_TEXT(setup
, 2, "cmenblcb");
1894 iob
= (struct qeth_cmd_buffer
*) data
;
1895 memcpy(&card
->token
.cm_filter_r
,
1896 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
1897 QETH_MPC_TOKEN_LENGTH
);
1898 QETH_DBF_TEXT_(setup
, 2, " rc%d", iob
->rc
);
1903 qeth_cm_enable(struct qeth_card
*card
)
1906 struct qeth_cmd_buffer
*iob
;
1908 QETH_DBF_TEXT(setup
,2,"cmenable");
1910 iob
= qeth_wait_for_buffer(&card
->write
);
1911 memcpy(iob
->data
, CM_ENABLE
, CM_ENABLE_SIZE
);
1912 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob
->data
),
1913 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
1914 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob
->data
),
1915 &card
->token
.cm_filter_w
, QETH_MPC_TOKEN_LENGTH
);
1917 rc
= qeth_send_control_data(card
, CM_ENABLE_SIZE
, iob
,
1918 qeth_cm_enable_cb
, NULL
);
1923 qeth_cm_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
1927 struct qeth_cmd_buffer
*iob
;
1929 QETH_DBF_TEXT(setup
, 2, "cmsetpcb");
1931 iob
= (struct qeth_cmd_buffer
*) data
;
1932 memcpy(&card
->token
.cm_connection_r
,
1933 QETH_CM_SETUP_RESP_DEST_ADDR(iob
->data
),
1934 QETH_MPC_TOKEN_LENGTH
);
1935 QETH_DBF_TEXT_(setup
, 2, " rc%d", iob
->rc
);
1940 qeth_cm_setup(struct qeth_card
*card
)
1943 struct qeth_cmd_buffer
*iob
;
1945 QETH_DBF_TEXT(setup
,2,"cmsetup");
1947 iob
= qeth_wait_for_buffer(&card
->write
);
1948 memcpy(iob
->data
, CM_SETUP
, CM_SETUP_SIZE
);
1949 memcpy(QETH_CM_SETUP_DEST_ADDR(iob
->data
),
1950 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
1951 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob
->data
),
1952 &card
->token
.cm_connection_w
, QETH_MPC_TOKEN_LENGTH
);
1953 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob
->data
),
1954 &card
->token
.cm_filter_r
, QETH_MPC_TOKEN_LENGTH
);
1955 rc
= qeth_send_control_data(card
, CM_SETUP_SIZE
, iob
,
1956 qeth_cm_setup_cb
, NULL
);
1962 qeth_ulp_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
1966 __u16 mtu
, framesize
;
1969 struct qeth_cmd_buffer
*iob
;
1971 QETH_DBF_TEXT(setup
, 2, "ulpenacb");
1973 iob
= (struct qeth_cmd_buffer
*) data
;
1974 memcpy(&card
->token
.ulp_filter_r
,
1975 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
1976 QETH_MPC_TOKEN_LENGTH
);
1977 if (qeth_get_mtu_out_of_mpc(card
->info
.type
)) {
1978 memcpy(&framesize
, QETH_ULP_ENABLE_RESP_MAX_MTU(iob
->data
), 2);
1979 mtu
= qeth_get_mtu_outof_framesize(framesize
);
1982 QETH_DBF_TEXT_(setup
, 2, " rc%d", iob
->rc
);
1985 card
->info
.max_mtu
= mtu
;
1986 card
->info
.initial_mtu
= mtu
;
1987 card
->qdio
.in_buf_size
= mtu
+ 2 * PAGE_SIZE
;
1989 card
->info
.initial_mtu
= qeth_get_initial_mtu_for_card(card
);
1990 card
->info
.max_mtu
= qeth_get_max_mtu_for_card(card
->info
.type
);
1991 card
->qdio
.in_buf_size
= QETH_IN_BUF_SIZE_DEFAULT
;
1994 memcpy(&len
, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob
->data
), 2);
1995 if (len
>= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE
) {
1997 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob
->data
), 1);
1998 card
->info
.link_type
= link_type
;
2000 card
->info
.link_type
= 0;
2001 QETH_DBF_TEXT_(setup
, 2, " rc%d", iob
->rc
);
2006 qeth_ulp_enable(struct qeth_card
*card
)
2010 struct qeth_cmd_buffer
*iob
;
2012 /*FIXME: trace view callbacks*/
2013 QETH_DBF_TEXT(setup
,2,"ulpenabl");
2015 iob
= qeth_wait_for_buffer(&card
->write
);
2016 memcpy(iob
->data
, ULP_ENABLE
, ULP_ENABLE_SIZE
);
2018 *(QETH_ULP_ENABLE_LINKNUM(iob
->data
)) =
2019 (__u8
) card
->info
.portno
;
2020 if (card
->options
.layer2
)
2021 prot_type
= QETH_PROT_LAYER2
;
2023 prot_type
= QETH_PROT_TCPIP
;
2025 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob
->data
),&prot_type
,1);
2026 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob
->data
),
2027 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2028 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob
->data
),
2029 &card
->token
.ulp_filter_w
, QETH_MPC_TOKEN_LENGTH
);
2030 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob
->data
),
2031 card
->info
.portname
, 9);
2032 rc
= qeth_send_control_data(card
, ULP_ENABLE_SIZE
, iob
,
2033 qeth_ulp_enable_cb
, NULL
);
2039 __raw_devno_from_bus_id(char *id
)
2041 id
+= (strlen(id
) - 4);
2042 return (__u16
) simple_strtoul(id
, &id
, 16);
2046 qeth_ulp_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2049 struct qeth_cmd_buffer
*iob
;
2051 QETH_DBF_TEXT(setup
, 2, "ulpstpcb");
2053 iob
= (struct qeth_cmd_buffer
*) data
;
2054 memcpy(&card
->token
.ulp_connection_r
,
2055 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob
->data
),
2056 QETH_MPC_TOKEN_LENGTH
);
2057 QETH_DBF_TEXT_(setup
, 2, " rc%d", iob
->rc
);
2062 qeth_ulp_setup(struct qeth_card
*card
)
2066 struct qeth_cmd_buffer
*iob
;
2068 QETH_DBF_TEXT(setup
,2,"ulpsetup");
2070 iob
= qeth_wait_for_buffer(&card
->write
);
2071 memcpy(iob
->data
, ULP_SETUP
, ULP_SETUP_SIZE
);
2073 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob
->data
),
2074 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2075 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob
->data
),
2076 &card
->token
.ulp_connection_w
, QETH_MPC_TOKEN_LENGTH
);
2077 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob
->data
),
2078 &card
->token
.ulp_filter_r
, QETH_MPC_TOKEN_LENGTH
);
2080 temp
= __raw_devno_from_bus_id(CARD_DDEV_ID(card
));
2081 memcpy(QETH_ULP_SETUP_CUA(iob
->data
), &temp
, 2);
2082 temp
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
2083 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob
->data
), &temp
, 2);
2084 rc
= qeth_send_control_data(card
, ULP_SETUP_SIZE
, iob
,
2085 qeth_ulp_setup_cb
, NULL
);
2090 qeth_check_for_inbound_error(struct qeth_qdio_buffer
*buf
,
2091 unsigned int qdio_error
,
2092 unsigned int siga_error
)
2096 if (qdio_error
|| siga_error
) {
2097 QETH_DBF_TEXT(trace
, 2, "qdinerr");
2098 QETH_DBF_TEXT(qerr
, 2, "qdinerr");
2099 QETH_DBF_TEXT_(qerr
, 2, " F15=%02X",
2100 buf
->buffer
->element
[15].flags
& 0xff);
2101 QETH_DBF_TEXT_(qerr
, 2, " F14=%02X",
2102 buf
->buffer
->element
[14].flags
& 0xff);
2103 QETH_DBF_TEXT_(qerr
, 2, " qerr=%X", qdio_error
);
2104 QETH_DBF_TEXT_(qerr
, 2, " serr=%X", siga_error
);
2110 static inline struct sk_buff
*
2111 qeth_get_skb(unsigned int length
)
2113 struct sk_buff
* skb
;
2114 #ifdef CONFIG_QETH_VLAN
2115 if ((skb
= dev_alloc_skb(length
+ VLAN_HLEN
)))
2116 skb_reserve(skb
, VLAN_HLEN
);
2118 skb
= dev_alloc_skb(length
);
2123 static inline struct sk_buff
*
2124 qeth_get_next_skb(struct qeth_card
*card
, struct qdio_buffer
*buffer
,
2125 struct qdio_buffer_element
**__element
, int *__offset
,
2126 struct qeth_hdr
**hdr
)
2128 struct qdio_buffer_element
*element
= *__element
;
2129 int offset
= *__offset
;
2130 struct sk_buff
*skb
= NULL
;
2135 QETH_DBF_TEXT(trace
,6,"nextskb");
2136 /* qeth_hdr must not cross element boundaries */
2137 if (element
->length
< offset
+ sizeof(struct qeth_hdr
)){
2138 if (qeth_is_last_sbale(element
))
2142 if (element
->length
< sizeof(struct qeth_hdr
))
2145 *hdr
= element
->addr
+ offset
;
2147 offset
+= sizeof(struct qeth_hdr
);
2148 if (card
->options
.layer2
)
2149 skb_len
= (*hdr
)->hdr
.l2
.pkt_length
;
2151 skb_len
= (*hdr
)->hdr
.l3
.length
;
2155 if (card
->options
.fake_ll
){
2156 if(card
->dev
->type
== ARPHRD_IEEE802_TR
){
2157 if (!(skb
= qeth_get_skb(skb_len
+QETH_FAKE_LL_LEN_TR
)))
2159 skb_reserve(skb
,QETH_FAKE_LL_LEN_TR
);
2161 if (!(skb
= qeth_get_skb(skb_len
+QETH_FAKE_LL_LEN_ETH
)))
2163 skb_reserve(skb
,QETH_FAKE_LL_LEN_ETH
);
2165 } else if (!(skb
= qeth_get_skb(skb_len
)))
2167 data_ptr
= element
->addr
+ offset
;
2169 data_len
= min(skb_len
, (int)(element
->length
- offset
));
2171 memcpy(skb_put(skb
, data_len
), data_ptr
, data_len
);
2172 skb_len
-= data_len
;
2174 if (qeth_is_last_sbale(element
)){
2175 QETH_DBF_TEXT(trace
,4,"unexeob");
2176 QETH_DBF_TEXT_(trace
,4,"%s",CARD_BUS_ID(card
));
2177 QETH_DBF_TEXT(qerr
,2,"unexeob");
2178 QETH_DBF_TEXT_(qerr
,2,"%s",CARD_BUS_ID(card
));
2179 QETH_DBF_HEX(misc
,4,buffer
,sizeof(*buffer
));
2180 dev_kfree_skb_any(skb
);
2181 card
->stats
.rx_errors
++;
2186 data_ptr
= element
->addr
;
2191 *__element
= element
;
2195 if (net_ratelimit()){
2196 PRINT_WARN("No memory for packet received on %s.\n",
2197 QETH_CARD_IFNAME(card
));
2198 QETH_DBF_TEXT(trace
,2,"noskbmem");
2199 QETH_DBF_TEXT_(trace
,2,"%s",CARD_BUS_ID(card
));
2201 card
->stats
.rx_dropped
++;
2205 static inline __be16
2206 qeth_type_trans(struct sk_buff
*skb
, struct net_device
*dev
)
2208 struct qeth_card
*card
;
2211 QETH_DBF_TEXT(trace
,6,"typtrans");
2213 card
= (struct qeth_card
*)dev
->priv
;
2215 if ((card
->info
.link_type
== QETH_LINK_TYPE_HSTR
) ||
2216 (card
->info
.link_type
== QETH_LINK_TYPE_LANE_TR
))
2217 return tr_type_trans(skb
,dev
);
2218 #endif /* CONFIG_TR */
2219 skb
->mac
.raw
= skb
->data
;
2220 skb_pull(skb
, ETH_HLEN
);
2223 if (*eth
->h_dest
& 1) {
2224 if (memcmp(eth
->h_dest
, dev
->broadcast
, ETH_ALEN
) == 0)
2225 skb
->pkt_type
= PACKET_BROADCAST
;
2227 skb
->pkt_type
= PACKET_MULTICAST
;
2228 } else if (memcmp(eth
->h_dest
, dev
->dev_addr
, ETH_ALEN
))
2229 skb
->pkt_type
= PACKET_OTHERHOST
;
2231 if (ntohs(eth
->h_proto
) >= 1536)
2232 return eth
->h_proto
;
2233 if (*(unsigned short *) (skb
->data
) == 0xFFFF)
2234 return htons(ETH_P_802_3
);
2235 return htons(ETH_P_802_2
);
2239 qeth_rebuild_skb_fake_ll_tr(struct qeth_card
*card
, struct sk_buff
*skb
,
2240 struct qeth_hdr
*hdr
)
2242 struct trh_hdr
*fake_hdr
;
2243 struct trllc
*fake_llc
;
2244 struct iphdr
*ip_hdr
;
2246 QETH_DBF_TEXT(trace
,5,"skbfktr");
2247 skb
->mac
.raw
= skb
->data
- QETH_FAKE_LL_LEN_TR
;
2248 /* this is a fake ethernet header */
2249 fake_hdr
= (struct trh_hdr
*) skb
->mac
.raw
;
2251 /* the destination MAC address */
2252 switch (skb
->pkt_type
){
2253 case PACKET_MULTICAST
:
2254 switch (skb
->protocol
){
2255 #ifdef CONFIG_QETH_IPV6
2256 case __constant_htons(ETH_P_IPV6
):
2257 ndisc_mc_map((struct in6_addr
*)
2258 skb
->data
+ QETH_FAKE_LL_V6_ADDR_POS
,
2259 fake_hdr
->daddr
, card
->dev
, 0);
2261 #endif /* CONFIG_QETH_IPV6 */
2262 case __constant_htons(ETH_P_IP
):
2263 ip_hdr
= (struct iphdr
*)skb
->data
;
2264 ip_tr_mc_map(ip_hdr
->daddr
, fake_hdr
->daddr
);
2267 memcpy(fake_hdr
->daddr
, card
->dev
->dev_addr
, TR_ALEN
);
2270 case PACKET_BROADCAST
:
2271 memset(fake_hdr
->daddr
, 0xff, TR_ALEN
);
2274 memcpy(fake_hdr
->daddr
, card
->dev
->dev_addr
, TR_ALEN
);
2276 /* the source MAC address */
2277 if (hdr
->hdr
.l3
.ext_flags
& QETH_HDR_EXT_SRC_MAC_ADDR
)
2278 memcpy(fake_hdr
->saddr
, &hdr
->hdr
.l3
.dest_addr
[2], TR_ALEN
);
2280 memset(fake_hdr
->saddr
, 0, TR_ALEN
);
2282 fake_llc
= (struct trllc
*)&(fake_hdr
->rcf
);
2283 fake_llc
->dsap
= EXTENDED_SAP
;
2284 fake_llc
->ssap
= EXTENDED_SAP
;
2285 fake_llc
->llc
= UI_CMD
;
2286 fake_llc
->protid
[0] = 0;
2287 fake_llc
->protid
[1] = 0;
2288 fake_llc
->protid
[2] = 0;
2289 fake_llc
->ethertype
= ETH_P_IP
;
2293 qeth_rebuild_skb_fake_ll_eth(struct qeth_card
*card
, struct sk_buff
*skb
,
2294 struct qeth_hdr
*hdr
)
2296 struct ethhdr
*fake_hdr
;
2297 struct iphdr
*ip_hdr
;
2299 QETH_DBF_TEXT(trace
,5,"skbfketh");
2300 skb
->mac
.raw
= skb
->data
- QETH_FAKE_LL_LEN_ETH
;
2301 /* this is a fake ethernet header */
2302 fake_hdr
= (struct ethhdr
*) skb
->mac
.raw
;
2304 /* the destination MAC address */
2305 switch (skb
->pkt_type
){
2306 case PACKET_MULTICAST
:
2307 switch (skb
->protocol
){
2308 #ifdef CONFIG_QETH_IPV6
2309 case __constant_htons(ETH_P_IPV6
):
2310 ndisc_mc_map((struct in6_addr
*)
2311 skb
->data
+ QETH_FAKE_LL_V6_ADDR_POS
,
2312 fake_hdr
->h_dest
, card
->dev
, 0);
2314 #endif /* CONFIG_QETH_IPV6 */
2315 case __constant_htons(ETH_P_IP
):
2316 ip_hdr
= (struct iphdr
*)skb
->data
;
2317 ip_eth_mc_map(ip_hdr
->daddr
, fake_hdr
->h_dest
);
2320 memcpy(fake_hdr
->h_dest
, card
->dev
->dev_addr
, ETH_ALEN
);
2323 case PACKET_BROADCAST
:
2324 memset(fake_hdr
->h_dest
, 0xff, ETH_ALEN
);
2327 memcpy(fake_hdr
->h_dest
, card
->dev
->dev_addr
, ETH_ALEN
);
2329 /* the source MAC address */
2330 if (hdr
->hdr
.l3
.ext_flags
& QETH_HDR_EXT_SRC_MAC_ADDR
)
2331 memcpy(fake_hdr
->h_source
, &hdr
->hdr
.l3
.dest_addr
[2], ETH_ALEN
);
2333 memset(fake_hdr
->h_source
, 0, ETH_ALEN
);
2335 fake_hdr
->h_proto
= skb
->protocol
;
2339 qeth_rebuild_skb_fake_ll(struct qeth_card
*card
, struct sk_buff
*skb
,
2340 struct qeth_hdr
*hdr
)
2342 if (card
->dev
->type
== ARPHRD_IEEE802_TR
)
2343 qeth_rebuild_skb_fake_ll_tr(card
, skb
, hdr
);
2345 qeth_rebuild_skb_fake_ll_eth(card
, skb
, hdr
);
2349 qeth_rebuild_skb_vlan(struct qeth_card
*card
, struct sk_buff
*skb
,
2350 struct qeth_hdr
*hdr
)
2352 #ifdef CONFIG_QETH_VLAN
2355 if (hdr
->hdr
.l3
.ext_flags
&
2356 (QETH_HDR_EXT_VLAN_FRAME
| QETH_HDR_EXT_INCLUDE_VLAN_TAG
)) {
2357 vlan_tag
= (u16
*) skb_push(skb
, VLAN_HLEN
);
2358 *vlan_tag
= (hdr
->hdr
.l3
.ext_flags
& QETH_HDR_EXT_VLAN_FRAME
)?
2359 hdr
->hdr
.l3
.vlan_id
: *((u16
*)&hdr
->hdr
.l3
.dest_addr
[12]);
2360 *(vlan_tag
+ 1) = skb
->protocol
;
2361 skb
->protocol
= __constant_htons(ETH_P_8021Q
);
2363 #endif /* CONFIG_QETH_VLAN */
2367 qeth_layer2_rebuild_skb(struct qeth_card
*card
, struct sk_buff
*skb
,
2368 struct qeth_hdr
*hdr
)
2370 unsigned short vlan_id
= 0;
2371 #ifdef CONFIG_QETH_VLAN
2372 struct vlan_hdr
*vhdr
;
2375 skb
->pkt_type
= PACKET_HOST
;
2376 skb
->protocol
= qeth_type_trans(skb
, skb
->dev
);
2377 if (card
->options
.checksum_type
== NO_CHECKSUMMING
)
2378 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2380 skb
->ip_summed
= CHECKSUM_NONE
;
2381 #ifdef CONFIG_QETH_VLAN
2382 if (hdr
->hdr
.l2
.flags
[2] & (QETH_LAYER2_FLAG_VLAN
)) {
2383 vhdr
= (struct vlan_hdr
*) skb
->data
;
2385 __constant_htons(vhdr
->h_vlan_encapsulated_proto
);
2386 vlan_id
= hdr
->hdr
.l2
.vlan_id
;
2387 skb_pull(skb
, VLAN_HLEN
);
2394 qeth_rebuild_skb(struct qeth_card
*card
, struct sk_buff
*skb
,
2395 struct qeth_hdr
*hdr
)
2397 #ifdef CONFIG_QETH_IPV6
2398 if (hdr
->hdr
.l3
.flags
& QETH_HDR_PASSTHRU
) {
2399 skb
->pkt_type
= PACKET_HOST
;
2400 skb
->protocol
= qeth_type_trans(skb
, card
->dev
);
2403 #endif /* CONFIG_QETH_IPV6 */
2404 skb
->protocol
= htons((hdr
->hdr
.l3
.flags
& QETH_HDR_IPV6
)? ETH_P_IPV6
:
2406 switch (hdr
->hdr
.l3
.flags
& QETH_HDR_CAST_MASK
){
2407 case QETH_CAST_UNICAST
:
2408 skb
->pkt_type
= PACKET_HOST
;
2410 case QETH_CAST_MULTICAST
:
2411 skb
->pkt_type
= PACKET_MULTICAST
;
2412 card
->stats
.multicast
++;
2414 case QETH_CAST_BROADCAST
:
2415 skb
->pkt_type
= PACKET_BROADCAST
;
2416 card
->stats
.multicast
++;
2418 case QETH_CAST_ANYCAST
:
2419 case QETH_CAST_NOCAST
:
2421 skb
->pkt_type
= PACKET_HOST
;
2423 qeth_rebuild_skb_vlan(card
, skb
, hdr
);
2424 if (card
->options
.fake_ll
)
2425 qeth_rebuild_skb_fake_ll(card
, skb
, hdr
);
2427 skb
->mac
.raw
= skb
->data
;
2428 skb
->ip_summed
= card
->options
.checksum_type
;
2429 if (card
->options
.checksum_type
== HW_CHECKSUMMING
){
2430 if ( (hdr
->hdr
.l3
.ext_flags
&
2431 (QETH_HDR_EXT_CSUM_HDR_REQ
|
2432 QETH_HDR_EXT_CSUM_TRANSP_REQ
)) ==
2433 (QETH_HDR_EXT_CSUM_HDR_REQ
|
2434 QETH_HDR_EXT_CSUM_TRANSP_REQ
) )
2435 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2437 skb
->ip_summed
= SW_CHECKSUMMING
;
2442 qeth_process_inbound_buffer(struct qeth_card
*card
,
2443 struct qeth_qdio_buffer
*buf
, int index
)
2445 struct qdio_buffer_element
*element
;
2446 struct sk_buff
*skb
;
2447 struct qeth_hdr
*hdr
;
2452 /* get first element of current buffer */
2453 element
= (struct qdio_buffer_element
*)&buf
->buffer
->element
[0];
2455 #ifdef CONFIG_QETH_PERF_STATS
2456 card
->perf_stats
.bufs_rec
++;
2458 while((skb
= qeth_get_next_skb(card
, buf
->buffer
, &element
,
2460 skb
->dev
= card
->dev
;
2461 if (hdr
->hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
)
2462 vlan_tag
= qeth_layer2_rebuild_skb(card
, skb
, hdr
);
2464 qeth_rebuild_skb(card
, skb
, hdr
);
2465 /* is device UP ? */
2466 if (!(card
->dev
->flags
& IFF_UP
)){
2467 dev_kfree_skb_any(skb
);
2470 #ifdef CONFIG_QETH_VLAN
2472 vlan_hwaccel_rx(skb
, card
->vlangrp
, vlan_tag
);
2475 rxrc
= netif_rx(skb
);
2476 card
->dev
->last_rx
= jiffies
;
2477 card
->stats
.rx_packets
++;
2478 card
->stats
.rx_bytes
+= skb
->len
;
2482 static inline struct qeth_buffer_pool_entry
*
2483 qeth_get_buffer_pool_entry(struct qeth_card
*card
)
2485 struct qeth_buffer_pool_entry
*entry
;
2487 QETH_DBF_TEXT(trace
, 6, "gtbfplen");
2488 if (!list_empty(&card
->qdio
.in_buf_pool
.entry_list
)) {
2489 entry
= list_entry(card
->qdio
.in_buf_pool
.entry_list
.next
,
2490 struct qeth_buffer_pool_entry
, list
);
2491 list_del_init(&entry
->list
);
2498 qeth_init_input_buffer(struct qeth_card
*card
, struct qeth_qdio_buffer
*buf
)
2500 struct qeth_buffer_pool_entry
*pool_entry
;
2503 pool_entry
= qeth_get_buffer_pool_entry(card
);
2505 * since the buffer is accessed only from the input_tasklet
2506 * there shouldn't be a need to synchronize; also, since we use
2507 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2510 BUG_ON(!pool_entry
);
2512 buf
->pool_entry
= pool_entry
;
2513 for(i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
){
2514 buf
->buffer
->element
[i
].length
= PAGE_SIZE
;
2515 buf
->buffer
->element
[i
].addr
= pool_entry
->elements
[i
];
2516 if (i
== QETH_MAX_BUFFER_ELEMENTS(card
) - 1)
2517 buf
->buffer
->element
[i
].flags
= SBAL_FLAGS_LAST_ENTRY
;
2519 buf
->buffer
->element
[i
].flags
= 0;
2521 buf
->state
= QETH_QDIO_BUF_EMPTY
;
2525 qeth_clear_output_buffer(struct qeth_qdio_out_q
*queue
,
2526 struct qeth_qdio_out_buffer
*buf
)
2529 struct sk_buff
*skb
;
2531 /* is PCI flag set on buffer? */
2532 if (buf
->buffer
->element
[0].flags
& 0x40)
2533 atomic_dec(&queue
->set_pci_flags_count
);
2535 while ((skb
= skb_dequeue(&buf
->skb_list
))){
2536 atomic_dec(&skb
->users
);
2537 dev_kfree_skb_any(skb
);
2539 qeth_eddp_buf_release_contexts(buf
);
2540 for(i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(queue
->card
); ++i
){
2541 buf
->buffer
->element
[i
].length
= 0;
2542 buf
->buffer
->element
[i
].addr
= NULL
;
2543 buf
->buffer
->element
[i
].flags
= 0;
2545 buf
->next_element_to_fill
= 0;
2546 atomic_set(&buf
->state
, QETH_QDIO_BUF_EMPTY
);
2550 qeth_queue_input_buffer(struct qeth_card
*card
, int index
)
2552 struct qeth_qdio_q
*queue
= card
->qdio
.in_q
;
2557 QETH_DBF_TEXT(trace
,6,"queinbuf");
2558 count
= (index
< queue
->next_buf_to_init
)?
2559 card
->qdio
.in_buf_pool
.buf_count
-
2560 (queue
->next_buf_to_init
- index
) :
2561 card
->qdio
.in_buf_pool
.buf_count
-
2562 (queue
->next_buf_to_init
+ QDIO_MAX_BUFFERS_PER_Q
- index
);
2563 /* only requeue at a certain threshold to avoid SIGAs */
2564 if (count
>= QETH_IN_BUF_REQUEUE_THRESHOLD(card
)){
2565 for (i
= queue
->next_buf_to_init
;
2566 i
< queue
->next_buf_to_init
+ count
; ++i
)
2567 qeth_init_input_buffer(card
,
2568 &queue
->bufs
[i
% QDIO_MAX_BUFFERS_PER_Q
]);
2570 * according to old code it should be avoided to requeue all
2571 * 128 buffers in order to benefit from PCI avoidance.
2572 * this function keeps at least one buffer (the buffer at
2573 * 'index') un-requeued -> this buffer is the first buffer that
2574 * will be requeued the next time
2576 #ifdef CONFIG_QETH_PERF_STATS
2577 card
->perf_stats
.inbound_do_qdio_cnt
++;
2578 card
->perf_stats
.inbound_do_qdio_start_time
= qeth_get_micros();
2580 rc
= do_QDIO(CARD_DDEV(card
),
2581 QDIO_FLAG_SYNC_INPUT
| QDIO_FLAG_UNDER_INTERRUPT
,
2582 0, queue
->next_buf_to_init
, count
, NULL
);
2583 #ifdef CONFIG_QETH_PERF_STATS
2584 card
->perf_stats
.inbound_do_qdio_time
+= qeth_get_micros() -
2585 card
->perf_stats
.inbound_do_qdio_start_time
;
2588 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2589 "return %i (device %s).\n",
2590 rc
, CARD_DDEV_ID(card
));
2591 QETH_DBF_TEXT(trace
,2,"qinberr");
2592 QETH_DBF_TEXT_(trace
,2,"%s",CARD_BUS_ID(card
));
2594 queue
->next_buf_to_init
= (queue
->next_buf_to_init
+ count
) %
2595 QDIO_MAX_BUFFERS_PER_Q
;
2600 qeth_put_buffer_pool_entry(struct qeth_card
*card
,
2601 struct qeth_buffer_pool_entry
*entry
)
2603 QETH_DBF_TEXT(trace
, 6, "ptbfplen");
2604 list_add_tail(&entry
->list
, &card
->qdio
.in_buf_pool
.entry_list
);
2608 qeth_qdio_input_handler(struct ccw_device
* ccwdev
, unsigned int status
,
2609 unsigned int qdio_err
, unsigned int siga_err
,
2610 unsigned int queue
, int first_element
, int count
,
2611 unsigned long card_ptr
)
2613 struct net_device
*net_dev
;
2614 struct qeth_card
*card
;
2615 struct qeth_qdio_buffer
*buffer
;
2619 QETH_DBF_TEXT(trace
, 6, "qdinput");
2620 card
= (struct qeth_card
*) card_ptr
;
2621 net_dev
= card
->dev
;
2622 #ifdef CONFIG_QETH_PERF_STATS
2623 card
->perf_stats
.inbound_cnt
++;
2624 card
->perf_stats
.inbound_start_time
= qeth_get_micros();
2626 if (status
& QDIO_STATUS_LOOK_FOR_ERROR
) {
2627 if (status
& QDIO_STATUS_ACTIVATE_CHECK_CONDITION
){
2628 QETH_DBF_TEXT(trace
, 1,"qdinchk");
2629 QETH_DBF_TEXT_(trace
,1,"%s",CARD_BUS_ID(card
));
2630 QETH_DBF_TEXT_(trace
,1,"%04X%04X",first_element
,count
);
2631 QETH_DBF_TEXT_(trace
,1,"%04X%04X", queue
, status
);
2632 qeth_schedule_recovery(card
);
2636 for (i
= first_element
; i
< (first_element
+ count
); ++i
) {
2637 index
= i
% QDIO_MAX_BUFFERS_PER_Q
;
2638 buffer
= &card
->qdio
.in_q
->bufs
[index
];
2639 if (!((status
== QDIO_STATUS_LOOK_FOR_ERROR
) &&
2640 qeth_check_for_inbound_error(buffer
, qdio_err
, siga_err
)))
2641 qeth_process_inbound_buffer(card
, buffer
, index
);
2642 /* clear buffer and give back to hardware */
2643 qeth_put_buffer_pool_entry(card
, buffer
->pool_entry
);
2644 qeth_queue_input_buffer(card
, index
);
2646 #ifdef CONFIG_QETH_PERF_STATS
2647 card
->perf_stats
.inbound_time
+= qeth_get_micros() -
2648 card
->perf_stats
.inbound_start_time
;
2653 qeth_handle_send_error(struct qeth_card
*card
,
2654 struct qeth_qdio_out_buffer
*buffer
,
2655 int qdio_err
, int siga_err
)
2657 int sbalf15
= buffer
->buffer
->element
[15].flags
& 0xff;
2658 int cc
= siga_err
& 3;
2660 QETH_DBF_TEXT(trace
, 6, "hdsnderr");
2664 QETH_DBF_TEXT(trace
, 1,"lnkfail");
2665 QETH_DBF_TEXT_(trace
,1,"%s",CARD_BUS_ID(card
));
2666 QETH_DBF_TEXT_(trace
,1,"%04x %02x",
2667 (u16
)qdio_err
, (u8
)sbalf15
);
2668 return QETH_SEND_ERROR_LINK_FAILURE
;
2670 return QETH_SEND_ERROR_NONE
;
2672 if (siga_err
& QDIO_SIGA_ERROR_B_BIT_SET
) {
2673 QETH_DBF_TEXT(trace
, 1, "SIGAcc2B");
2674 QETH_DBF_TEXT_(trace
,1,"%s",CARD_BUS_ID(card
));
2675 return QETH_SEND_ERROR_KICK_IT
;
2677 if ((sbalf15
>= 15) && (sbalf15
<= 31))
2678 return QETH_SEND_ERROR_RETRY
;
2679 return QETH_SEND_ERROR_LINK_FAILURE
;
2680 /* look at qdio_error and sbalf 15 */
2682 QETH_DBF_TEXT(trace
, 1, "SIGAcc1");
2683 QETH_DBF_TEXT_(trace
,1,"%s",CARD_BUS_ID(card
));
2684 return QETH_SEND_ERROR_LINK_FAILURE
;
2686 QETH_DBF_TEXT(trace
, 1, "SIGAcc3");
2687 QETH_DBF_TEXT_(trace
,1,"%s",CARD_BUS_ID(card
));
2688 return QETH_SEND_ERROR_KICK_IT
;
2690 return QETH_SEND_ERROR_LINK_FAILURE
;
2694 qeth_flush_buffers(struct qeth_qdio_out_q
*queue
, int under_int
,
2695 int index
, int count
)
2697 struct qeth_qdio_out_buffer
*buf
;
2701 QETH_DBF_TEXT(trace
, 6, "flushbuf");
2703 for (i
= index
; i
< index
+ count
; ++i
) {
2704 buf
= &queue
->bufs
[i
% QDIO_MAX_BUFFERS_PER_Q
];
2705 buf
->buffer
->element
[buf
->next_element_to_fill
- 1].flags
|=
2706 SBAL_FLAGS_LAST_ENTRY
;
2708 if (queue
->card
->info
.type
== QETH_CARD_TYPE_IQD
)
2711 if (!queue
->do_pack
){
2712 if ((atomic_read(&queue
->used_buffers
) >=
2713 (QETH_HIGH_WATERMARK_PACK
-
2714 QETH_WATERMARK_PACK_FUZZ
)) &&
2715 !atomic_read(&queue
->set_pci_flags_count
)){
2716 /* it's likely that we'll go to packing
2718 atomic_inc(&queue
->set_pci_flags_count
);
2719 buf
->buffer
->element
[0].flags
|= 0x40;
2722 if (!atomic_read(&queue
->set_pci_flags_count
)){
2724 * there's no outstanding PCI any more, so we
2725 * have to request a PCI to be sure the the PCI
2726 * will wake at some time in the future then we
2727 * can flush packed buffers that might still be
2728 * hanging around, which can happen if no
2729 * further send was requested by the stack
2731 atomic_inc(&queue
->set_pci_flags_count
);
2732 buf
->buffer
->element
[0].flags
|= 0x40;
2737 queue
->card
->dev
->trans_start
= jiffies
;
2738 #ifdef CONFIG_QETH_PERF_STATS
2739 queue
->card
->perf_stats
.outbound_do_qdio_cnt
++;
2740 queue
->card
->perf_stats
.outbound_do_qdio_start_time
= qeth_get_micros();
2743 rc
= do_QDIO(CARD_DDEV(queue
->card
),
2744 QDIO_FLAG_SYNC_OUTPUT
| QDIO_FLAG_UNDER_INTERRUPT
,
2745 queue
->queue_no
, index
, count
, NULL
);
2747 rc
= do_QDIO(CARD_DDEV(queue
->card
), QDIO_FLAG_SYNC_OUTPUT
,
2748 queue
->queue_no
, index
, count
, NULL
);
2749 #ifdef CONFIG_QETH_PERF_STATS
2750 queue
->card
->perf_stats
.outbound_do_qdio_time
+= qeth_get_micros() -
2751 queue
->card
->perf_stats
.outbound_do_qdio_start_time
;
2754 QETH_DBF_TEXT(trace
, 2, "flushbuf");
2755 QETH_DBF_TEXT_(trace
, 2, " err%d", rc
);
2756 QETH_DBF_TEXT_(trace
, 2, "%s", CARD_DDEV_ID(queue
->card
));
2757 queue
->card
->stats
.tx_errors
+= count
;
2758 /* this must not happen under normal circumstances. if it
2759 * happens something is really wrong -> recover */
2760 qeth_schedule_recovery(queue
->card
);
2763 atomic_add(count
, &queue
->used_buffers
);
2764 #ifdef CONFIG_QETH_PERF_STATS
2765 queue
->card
->perf_stats
.bufs_sent
+= count
;
2770 * Switched to packing state if the number of used buffers on a queue
2771 * reaches a certain limit.
2774 qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q
*queue
)
2776 if (!queue
->do_pack
) {
2777 if (atomic_read(&queue
->used_buffers
)
2778 >= QETH_HIGH_WATERMARK_PACK
){
2779 /* switch non-PACKING -> PACKING */
2780 QETH_DBF_TEXT(trace
, 6, "np->pack");
2781 #ifdef CONFIG_QETH_PERF_STATS
2782 queue
->card
->perf_stats
.sc_dp_p
++;
2790 * Switches from packing to non-packing mode. If there is a packing
2791 * buffer on the queue this buffer will be prepared to be flushed.
2792 * In that case 1 is returned to inform the caller. If no buffer
2793 * has to be flushed, zero is returned.
2796 qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q
*queue
)
2798 struct qeth_qdio_out_buffer
*buffer
;
2799 int flush_count
= 0;
2801 if (queue
->do_pack
) {
2802 if (atomic_read(&queue
->used_buffers
)
2803 <= QETH_LOW_WATERMARK_PACK
) {
2804 /* switch PACKING -> non-PACKING */
2805 QETH_DBF_TEXT(trace
, 6, "pack->np");
2806 #ifdef CONFIG_QETH_PERF_STATS
2807 queue
->card
->perf_stats
.sc_p_dp
++;
2810 /* flush packing buffers */
2811 buffer
= &queue
->bufs
[queue
->next_buf_to_fill
];
2812 if ((atomic_read(&buffer
->state
) ==
2813 QETH_QDIO_BUF_EMPTY
) &&
2814 (buffer
->next_element_to_fill
> 0)) {
2815 atomic_set(&buffer
->state
,QETH_QDIO_BUF_PRIMED
);
2817 queue
->next_buf_to_fill
=
2818 (queue
->next_buf_to_fill
+ 1) %
2819 QDIO_MAX_BUFFERS_PER_Q
;
2827 * Called to flush a packing buffer if no more pci flags are on the queue.
2828 * Checks if there is a packing buffer and prepares it to be flushed.
2829 * In that case returns 1, otherwise zero.
2832 qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q
*queue
)
2834 struct qeth_qdio_out_buffer
*buffer
;
2836 buffer
= &queue
->bufs
[queue
->next_buf_to_fill
];
2837 if((atomic_read(&buffer
->state
) == QETH_QDIO_BUF_EMPTY
) &&
2838 (buffer
->next_element_to_fill
> 0)){
2839 /* it's a packing buffer */
2840 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
2841 queue
->next_buf_to_fill
=
2842 (queue
->next_buf_to_fill
+ 1) % QDIO_MAX_BUFFERS_PER_Q
;
2849 qeth_check_outbound_queue(struct qeth_qdio_out_q
*queue
)
2853 int q_was_packing
= 0;
2856 * check if weed have to switch to non-packing mode or if
2857 * we have to get a pci flag out on the queue
2859 if ((atomic_read(&queue
->used_buffers
) <= QETH_LOW_WATERMARK_PACK
) ||
2860 !atomic_read(&queue
->set_pci_flags_count
)){
2861 if (atomic_swap(&queue
->state
, QETH_OUT_Q_LOCKED_FLUSH
) ==
2862 QETH_OUT_Q_UNLOCKED
) {
2864 * If we get in here, there was no action in
2865 * do_send_packet. So, we check if there is a
2866 * packing buffer to be flushed here.
2868 netif_stop_queue(queue
->card
->dev
);
2869 index
= queue
->next_buf_to_fill
;
2870 q_was_packing
= queue
->do_pack
;
2871 flush_cnt
+= qeth_switch_to_nonpacking_if_needed(queue
);
2873 !atomic_read(&queue
->set_pci_flags_count
))
2875 qeth_flush_buffers_on_no_pci(queue
);
2876 #ifdef CONFIG_QETH_PERF_STATS
2878 queue
->card
->perf_stats
.bufs_sent_pack
+=
2882 qeth_flush_buffers(queue
, 1, index
, flush_cnt
);
2883 atomic_set(&queue
->state
, QETH_OUT_Q_UNLOCKED
);
2889 qeth_qdio_output_handler(struct ccw_device
* ccwdev
, unsigned int status
,
2890 unsigned int qdio_error
, unsigned int siga_error
,
2891 unsigned int __queue
, int first_element
, int count
,
2892 unsigned long card_ptr
)
2894 struct qeth_card
*card
= (struct qeth_card
*) card_ptr
;
2895 struct qeth_qdio_out_q
*queue
= card
->qdio
.out_qs
[__queue
];
2896 struct qeth_qdio_out_buffer
*buffer
;
2899 QETH_DBF_TEXT(trace
, 6, "qdouhdl");
2900 if (status
& QDIO_STATUS_LOOK_FOR_ERROR
) {
2901 if (status
& QDIO_STATUS_ACTIVATE_CHECK_CONDITION
){
2902 QETH_DBF_TEXT(trace
, 2, "achkcond");
2903 QETH_DBF_TEXT_(trace
, 2, "%s", CARD_BUS_ID(card
));
2904 QETH_DBF_TEXT_(trace
, 2, "%08x", status
);
2905 netif_stop_queue(card
->dev
);
2906 qeth_schedule_recovery(card
);
2910 #ifdef CONFIG_QETH_PERF_STATS
2911 card
->perf_stats
.outbound_handler_cnt
++;
2912 card
->perf_stats
.outbound_handler_start_time
= qeth_get_micros();
2914 for(i
= first_element
; i
< (first_element
+ count
); ++i
){
2915 buffer
= &queue
->bufs
[i
% QDIO_MAX_BUFFERS_PER_Q
];
2916 /*we only handle the KICK_IT error by doing a recovery */
2917 if (qeth_handle_send_error(card
, buffer
, qdio_error
, siga_error
)
2918 == QETH_SEND_ERROR_KICK_IT
){
2919 netif_stop_queue(card
->dev
);
2920 qeth_schedule_recovery(card
);
2923 qeth_clear_output_buffer(queue
, buffer
);
2925 atomic_sub(count
, &queue
->used_buffers
);
2926 /* check if we need to do something on this outbound queue */
2927 if (card
->info
.type
!= QETH_CARD_TYPE_IQD
)
2928 qeth_check_outbound_queue(queue
);
2930 netif_wake_queue(queue
->card
->dev
);
2931 #ifdef CONFIG_QETH_PERF_STATS
2932 card
->perf_stats
.outbound_handler_time
+= qeth_get_micros() -
2933 card
->perf_stats
.outbound_handler_start_time
;
2938 qeth_create_qib_param_field(struct qeth_card
*card
, char *param_field
)
2941 param_field
[0] = _ascebc
['P'];
2942 param_field
[1] = _ascebc
['C'];
2943 param_field
[2] = _ascebc
['I'];
2944 param_field
[3] = _ascebc
['T'];
2945 *((unsigned int *) (¶m_field
[4])) = QETH_PCI_THRESHOLD_A(card
);
2946 *((unsigned int *) (¶m_field
[8])) = QETH_PCI_THRESHOLD_B(card
);
2947 *((unsigned int *) (¶m_field
[12])) = QETH_PCI_TIMER_VALUE(card
);
2951 qeth_create_qib_param_field_blkt(struct qeth_card
*card
, char *param_field
)
2953 param_field
[16] = _ascebc
['B'];
2954 param_field
[17] = _ascebc
['L'];
2955 param_field
[18] = _ascebc
['K'];
2956 param_field
[19] = _ascebc
['T'];
2957 *((unsigned int *) (¶m_field
[20])) = card
->info
.blkt
.time_total
;
2958 *((unsigned int *) (¶m_field
[24])) = card
->info
.blkt
.inter_packet
;
2959 *((unsigned int *) (¶m_field
[28])) = card
->info
.blkt
.inter_packet_jumbo
;
2963 qeth_initialize_working_pool_list(struct qeth_card
*card
)
2965 struct qeth_buffer_pool_entry
*entry
;
2967 QETH_DBF_TEXT(trace
,5,"inwrklst");
2969 list_for_each_entry(entry
,
2970 &card
->qdio
.init_pool
.entry_list
, init_list
) {
2971 qeth_put_buffer_pool_entry(card
,entry
);
2976 qeth_clear_working_pool_list(struct qeth_card
*card
)
2978 struct qeth_buffer_pool_entry
*pool_entry
, *tmp
;
2980 QETH_DBF_TEXT(trace
,5,"clwrklst");
2981 list_for_each_entry_safe(pool_entry
, tmp
,
2982 &card
->qdio
.in_buf_pool
.entry_list
, list
){
2983 list_del(&pool_entry
->list
);
2988 qeth_free_buffer_pool(struct qeth_card
*card
)
2990 struct qeth_buffer_pool_entry
*pool_entry
, *tmp
;
2992 QETH_DBF_TEXT(trace
,5,"freepool");
2993 list_for_each_entry_safe(pool_entry
, tmp
,
2994 &card
->qdio
.init_pool
.entry_list
, init_list
){
2995 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
)
2996 free_page((unsigned long)pool_entry
->elements
[i
]);
2997 list_del(&pool_entry
->init_list
);
3003 qeth_alloc_buffer_pool(struct qeth_card
*card
)
3005 struct qeth_buffer_pool_entry
*pool_entry
;
3009 QETH_DBF_TEXT(trace
,5,"alocpool");
3010 for (i
= 0; i
< card
->qdio
.init_pool
.buf_count
; ++i
){
3011 pool_entry
= kmalloc(sizeof(*pool_entry
), GFP_KERNEL
);
3013 qeth_free_buffer_pool(card
);
3016 for(j
= 0; j
< QETH_MAX_BUFFER_ELEMENTS(card
); ++j
){
3017 ptr
= (void *) __get_free_page(GFP_KERNEL
);
3020 free_page((unsigned long)
3021 pool_entry
->elements
[--j
]);
3023 qeth_free_buffer_pool(card
);
3026 pool_entry
->elements
[j
] = ptr
;
3028 list_add(&pool_entry
->init_list
,
3029 &card
->qdio
.init_pool
.entry_list
);
3035 qeth_realloc_buffer_pool(struct qeth_card
*card
, int bufcnt
)
3037 QETH_DBF_TEXT(trace
, 2, "realcbp");
3039 if ((card
->state
!= CARD_STATE_DOWN
) &&
3040 (card
->state
!= CARD_STATE_RECOVER
))
3043 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
3044 qeth_clear_working_pool_list(card
);
3045 qeth_free_buffer_pool(card
);
3046 card
->qdio
.in_buf_pool
.buf_count
= bufcnt
;
3047 card
->qdio
.init_pool
.buf_count
= bufcnt
;
3048 return qeth_alloc_buffer_pool(card
);
3052 qeth_alloc_qdio_buffers(struct qeth_card
*card
)
3056 QETH_DBF_TEXT(setup
, 2, "allcqdbf");
3058 if (card
->qdio
.state
== QETH_QDIO_ALLOCATED
)
3061 card
->qdio
.in_q
= kmalloc(sizeof(struct qeth_qdio_q
), GFP_KERNEL
);
3062 if (!card
->qdio
.in_q
)
3064 QETH_DBF_TEXT(setup
, 2, "inq");
3065 QETH_DBF_HEX(setup
, 2, &card
->qdio
.in_q
, sizeof(void *));
3066 memset(card
->qdio
.in_q
, 0, sizeof(struct qeth_qdio_q
));
3067 /* give inbound qeth_qdio_buffers their qdio_buffers */
3068 for (i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; ++i
)
3069 card
->qdio
.in_q
->bufs
[i
].buffer
=
3070 &card
->qdio
.in_q
->qdio_bufs
[i
];
3071 /* inbound buffer pool */
3072 if (qeth_alloc_buffer_pool(card
)){
3073 kfree(card
->qdio
.in_q
);
3078 kmalloc(card
->qdio
.no_out_queues
*
3079 sizeof(struct qeth_qdio_out_q
*), GFP_KERNEL
);
3080 if (!card
->qdio
.out_qs
){
3081 qeth_free_buffer_pool(card
);
3084 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
){
3085 card
->qdio
.out_qs
[i
] = kmalloc(sizeof(struct qeth_qdio_out_q
),
3087 if (!card
->qdio
.out_qs
[i
]){
3089 kfree(card
->qdio
.out_qs
[--i
]);
3090 kfree(card
->qdio
.out_qs
);
3093 QETH_DBF_TEXT_(setup
, 2, "outq %i", i
);
3094 QETH_DBF_HEX(setup
, 2, &card
->qdio
.out_qs
[i
], sizeof(void *));
3095 memset(card
->qdio
.out_qs
[i
], 0, sizeof(struct qeth_qdio_out_q
));
3096 card
->qdio
.out_qs
[i
]->queue_no
= i
;
3097 /* give outbound qeth_qdio_buffers their qdio_buffers */
3098 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
){
3099 card
->qdio
.out_qs
[i
]->bufs
[j
].buffer
=
3100 &card
->qdio
.out_qs
[i
]->qdio_bufs
[j
];
3101 skb_queue_head_init(&card
->qdio
.out_qs
[i
]->bufs
[j
].
3103 INIT_LIST_HEAD(&card
->qdio
.out_qs
[i
]->bufs
[j
].ctx_list
);
3106 card
->qdio
.state
= QETH_QDIO_ALLOCATED
;
3111 qeth_free_qdio_buffers(struct qeth_card
*card
)
3115 QETH_DBF_TEXT(trace
, 2, "freeqdbf");
3116 if (card
->qdio
.state
== QETH_QDIO_UNINITIALIZED
)
3118 kfree(card
->qdio
.in_q
);
3119 /* inbound buffer pool */
3120 qeth_free_buffer_pool(card
);
3121 /* free outbound qdio_qs */
3122 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
){
3123 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
)
3124 qeth_clear_output_buffer(card
->qdio
.out_qs
[i
],
3125 &card
->qdio
.out_qs
[i
]->bufs
[j
]);
3126 kfree(card
->qdio
.out_qs
[i
]);
3128 kfree(card
->qdio
.out_qs
);
3129 card
->qdio
.state
= QETH_QDIO_UNINITIALIZED
;
3133 qeth_clear_qdio_buffers(struct qeth_card
*card
)
3137 QETH_DBF_TEXT(trace
, 2, "clearqdbf");
3138 /* clear outbound buffers to free skbs */
3139 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
)
3140 if (card
->qdio
.out_qs
[i
]){
3141 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
)
3142 qeth_clear_output_buffer(card
->qdio
.out_qs
[i
],
3143 &card
->qdio
.out_qs
[i
]->bufs
[j
]);
3148 qeth_init_qdio_info(struct qeth_card
*card
)
3150 QETH_DBF_TEXT(setup
, 4, "intqdinf");
3151 card
->qdio
.state
= QETH_QDIO_UNINITIALIZED
;
3153 card
->qdio
.in_buf_size
= QETH_IN_BUF_SIZE_DEFAULT
;
3154 card
->qdio
.init_pool
.buf_count
= QETH_IN_BUF_COUNT_DEFAULT
;
3155 card
->qdio
.in_buf_pool
.buf_count
= card
->qdio
.init_pool
.buf_count
;
3156 INIT_LIST_HEAD(&card
->qdio
.in_buf_pool
.entry_list
);
3157 INIT_LIST_HEAD(&card
->qdio
.init_pool
.entry_list
);
3159 card
->qdio
.do_prio_queueing
= QETH_PRIOQ_DEFAULT
;
3160 card
->qdio
.default_out_queue
= QETH_DEFAULT_QUEUE
;
3164 qeth_init_qdio_queues(struct qeth_card
*card
)
3169 QETH_DBF_TEXT(setup
, 2, "initqdqs");
3172 memset(card
->qdio
.in_q
->qdio_bufs
, 0,
3173 QDIO_MAX_BUFFERS_PER_Q
* sizeof(struct qdio_buffer
));
3174 qeth_initialize_working_pool_list(card
);
3175 /*give only as many buffers to hardware as we have buffer pool entries*/
3176 for (i
= 0; i
< card
->qdio
.in_buf_pool
.buf_count
- 1; ++i
)
3177 qeth_init_input_buffer(card
, &card
->qdio
.in_q
->bufs
[i
]);
3178 card
->qdio
.in_q
->next_buf_to_init
= card
->qdio
.in_buf_pool
.buf_count
- 1;
3179 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, 0, 0,
3180 card
->qdio
.in_buf_pool
.buf_count
- 1, NULL
);
3182 QETH_DBF_TEXT_(setup
, 2, "1err%d", rc
);
3185 rc
= qdio_synchronize(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, 0);
3187 QETH_DBF_TEXT_(setup
, 2, "2err%d", rc
);
3190 /* outbound queue */
3191 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
){
3192 memset(card
->qdio
.out_qs
[i
]->qdio_bufs
, 0,
3193 QDIO_MAX_BUFFERS_PER_Q
* sizeof(struct qdio_buffer
));
3194 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
){
3195 qeth_clear_output_buffer(card
->qdio
.out_qs
[i
],
3196 &card
->qdio
.out_qs
[i
]->bufs
[j
]);
3198 card
->qdio
.out_qs
[i
]->card
= card
;
3199 card
->qdio
.out_qs
[i
]->next_buf_to_fill
= 0;
3200 card
->qdio
.out_qs
[i
]->do_pack
= 0;
3201 atomic_set(&card
->qdio
.out_qs
[i
]->used_buffers
,0);
3202 atomic_set(&card
->qdio
.out_qs
[i
]->set_pci_flags_count
, 0);
3203 atomic_set(&card
->qdio
.out_qs
[i
]->state
,
3204 QETH_OUT_Q_UNLOCKED
);
3210 qeth_qdio_establish(struct qeth_card
*card
)
3212 struct qdio_initialize init_data
;
3213 char *qib_param_field
;
3214 struct qdio_buffer
**in_sbal_ptrs
;
3215 struct qdio_buffer
**out_sbal_ptrs
;
3219 QETH_DBF_TEXT(setup
, 2, "qdioest");
3221 qib_param_field
= kmalloc(QDIO_MAX_BUFFERS_PER_Q
* sizeof(char),
3223 if (!qib_param_field
)
3226 memset(qib_param_field
, 0, QDIO_MAX_BUFFERS_PER_Q
* sizeof(char));
3228 qeth_create_qib_param_field(card
, qib_param_field
);
3229 qeth_create_qib_param_field_blkt(card
, qib_param_field
);
3231 in_sbal_ptrs
= kmalloc(QDIO_MAX_BUFFERS_PER_Q
* sizeof(void *),
3233 if (!in_sbal_ptrs
) {
3234 kfree(qib_param_field
);
3237 for(i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; ++i
)
3238 in_sbal_ptrs
[i
] = (struct qdio_buffer
*)
3239 virt_to_phys(card
->qdio
.in_q
->bufs
[i
].buffer
);
3242 kmalloc(card
->qdio
.no_out_queues
* QDIO_MAX_BUFFERS_PER_Q
*
3243 sizeof(void *), GFP_KERNEL
);
3244 if (!out_sbal_ptrs
) {
3245 kfree(in_sbal_ptrs
);
3246 kfree(qib_param_field
);
3249 for(i
= 0, k
= 0; i
< card
->qdio
.no_out_queues
; ++i
)
3250 for(j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
, ++k
){
3251 out_sbal_ptrs
[k
] = (struct qdio_buffer
*)
3252 virt_to_phys(card
->qdio
.out_qs
[i
]->
3256 memset(&init_data
, 0, sizeof(struct qdio_initialize
));
3257 init_data
.cdev
= CARD_DDEV(card
);
3258 init_data
.q_format
= qeth_get_qdio_q_format(card
);
3259 init_data
.qib_param_field_format
= 0;
3260 init_data
.qib_param_field
= qib_param_field
;
3261 init_data
.min_input_threshold
= QETH_MIN_INPUT_THRESHOLD
;
3262 init_data
.max_input_threshold
= QETH_MAX_INPUT_THRESHOLD
;
3263 init_data
.min_output_threshold
= QETH_MIN_OUTPUT_THRESHOLD
;
3264 init_data
.max_output_threshold
= QETH_MAX_OUTPUT_THRESHOLD
;
3265 init_data
.no_input_qs
= 1;
3266 init_data
.no_output_qs
= card
->qdio
.no_out_queues
;
3267 init_data
.input_handler
= (qdio_handler_t
*)
3268 qeth_qdio_input_handler
;
3269 init_data
.output_handler
= (qdio_handler_t
*)
3270 qeth_qdio_output_handler
;
3271 init_data
.int_parm
= (unsigned long) card
;
3272 init_data
.flags
= QDIO_INBOUND_0COPY_SBALS
|
3273 QDIO_OUTBOUND_0COPY_SBALS
|
3274 QDIO_USE_OUTBOUND_PCIS
;
3275 init_data
.input_sbal_addr_array
= (void **) in_sbal_ptrs
;
3276 init_data
.output_sbal_addr_array
= (void **) out_sbal_ptrs
;
3278 if (!(rc
= qdio_initialize(&init_data
)))
3279 card
->qdio
.state
= QETH_QDIO_ESTABLISHED
;
3281 kfree(out_sbal_ptrs
);
3282 kfree(in_sbal_ptrs
);
3283 kfree(qib_param_field
);
3288 qeth_qdio_activate(struct qeth_card
*card
)
3290 QETH_DBF_TEXT(setup
,3,"qdioact");
3291 return qdio_activate(CARD_DDEV(card
), 0);
3295 qeth_clear_channel(struct qeth_channel
*channel
)
3297 unsigned long flags
;
3298 struct qeth_card
*card
;
3301 QETH_DBF_TEXT(trace
,3,"clearch");
3302 card
= CARD_FROM_CDEV(channel
->ccwdev
);
3303 spin_lock_irqsave(get_ccwdev_lock(channel
->ccwdev
), flags
);
3304 rc
= ccw_device_clear(channel
->ccwdev
, QETH_CLEAR_CHANNEL_PARM
);
3305 spin_unlock_irqrestore(get_ccwdev_lock(channel
->ccwdev
), flags
);
3309 rc
= wait_event_interruptible_timeout(card
->wait_q
,
3310 channel
->state
==CH_STATE_STOPPED
, QETH_TIMEOUT
);
3311 if (rc
== -ERESTARTSYS
)
3313 if (channel
->state
!= CH_STATE_STOPPED
)
3315 channel
->state
= CH_STATE_DOWN
;
3320 qeth_halt_channel(struct qeth_channel
*channel
)
3322 unsigned long flags
;
3323 struct qeth_card
*card
;
3326 QETH_DBF_TEXT(trace
,3,"haltch");
3327 card
= CARD_FROM_CDEV(channel
->ccwdev
);
3328 spin_lock_irqsave(get_ccwdev_lock(channel
->ccwdev
), flags
);
3329 rc
= ccw_device_halt(channel
->ccwdev
, QETH_HALT_CHANNEL_PARM
);
3330 spin_unlock_irqrestore(get_ccwdev_lock(channel
->ccwdev
), flags
);
3334 rc
= wait_event_interruptible_timeout(card
->wait_q
,
3335 channel
->state
==CH_STATE_HALTED
, QETH_TIMEOUT
);
3336 if (rc
== -ERESTARTSYS
)
3338 if (channel
->state
!= CH_STATE_HALTED
)
3344 qeth_halt_channels(struct qeth_card
*card
)
3346 int rc1
= 0, rc2
=0, rc3
= 0;
3348 QETH_DBF_TEXT(trace
,3,"haltchs");
3349 rc1
= qeth_halt_channel(&card
->read
);
3350 rc2
= qeth_halt_channel(&card
->write
);
3351 rc3
= qeth_halt_channel(&card
->data
);
3359 qeth_clear_channels(struct qeth_card
*card
)
3361 int rc1
= 0, rc2
=0, rc3
= 0;
3363 QETH_DBF_TEXT(trace
,3,"clearchs");
3364 rc1
= qeth_clear_channel(&card
->read
);
3365 rc2
= qeth_clear_channel(&card
->write
);
3366 rc3
= qeth_clear_channel(&card
->data
);
3375 qeth_clear_halt_card(struct qeth_card
*card
, int halt
)
3379 QETH_DBF_TEXT(trace
,3,"clhacrd");
3380 QETH_DBF_HEX(trace
, 3, &card
, sizeof(void *));
3383 rc
= qeth_halt_channels(card
);
3386 return qeth_clear_channels(card
);
3390 qeth_qdio_clear_card(struct qeth_card
*card
, int use_halt
)
3394 QETH_DBF_TEXT(trace
,3,"qdioclr");
3395 if (card
->qdio
.state
== QETH_QDIO_ESTABLISHED
){
3396 if ((rc
= qdio_cleanup(CARD_DDEV(card
),
3397 (card
->info
.type
== QETH_CARD_TYPE_IQD
) ?
3398 QDIO_FLAG_CLEANUP_USING_HALT
:
3399 QDIO_FLAG_CLEANUP_USING_CLEAR
)))
3400 QETH_DBF_TEXT_(trace
, 3, "1err%d", rc
);
3401 card
->qdio
.state
= QETH_QDIO_ALLOCATED
;
3403 if ((rc
= qeth_clear_halt_card(card
, use_halt
)))
3404 QETH_DBF_TEXT_(trace
, 3, "2err%d", rc
);
3405 card
->state
= CARD_STATE_DOWN
;
3410 qeth_dm_act(struct qeth_card
*card
)
3413 struct qeth_cmd_buffer
*iob
;
3415 QETH_DBF_TEXT(setup
,2,"dmact");
3417 iob
= qeth_wait_for_buffer(&card
->write
);
3418 memcpy(iob
->data
, DM_ACT
, DM_ACT_SIZE
);
3420 memcpy(QETH_DM_ACT_DEST_ADDR(iob
->data
),
3421 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
3422 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob
->data
),
3423 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
3424 rc
= qeth_send_control_data(card
, DM_ACT_SIZE
, iob
, NULL
, NULL
);
3429 qeth_mpc_initialize(struct qeth_card
*card
)
3433 QETH_DBF_TEXT(setup
,2,"mpcinit");
3435 if ((rc
= qeth_issue_next_read(card
))){
3436 QETH_DBF_TEXT_(setup
, 2, "1err%d", rc
);
3439 if ((rc
= qeth_cm_enable(card
))){
3440 QETH_DBF_TEXT_(setup
, 2, "2err%d", rc
);
3443 if ((rc
= qeth_cm_setup(card
))){
3444 QETH_DBF_TEXT_(setup
, 2, "3err%d", rc
);
3447 if ((rc
= qeth_ulp_enable(card
))){
3448 QETH_DBF_TEXT_(setup
, 2, "4err%d", rc
);
3451 if ((rc
= qeth_ulp_setup(card
))){
3452 QETH_DBF_TEXT_(setup
, 2, "5err%d", rc
);
3455 if ((rc
= qeth_alloc_qdio_buffers(card
))){
3456 QETH_DBF_TEXT_(setup
, 2, "5err%d", rc
);
3459 if ((rc
= qeth_qdio_establish(card
))){
3460 QETH_DBF_TEXT_(setup
, 2, "6err%d", rc
);
3461 qeth_free_qdio_buffers(card
);
3464 if ((rc
= qeth_qdio_activate(card
))){
3465 QETH_DBF_TEXT_(setup
, 2, "7err%d", rc
);
3468 if ((rc
= qeth_dm_act(card
))){
3469 QETH_DBF_TEXT_(setup
, 2, "8err%d", rc
);
3475 qeth_qdio_clear_card(card
, card
->info
.type
==QETH_CARD_TYPE_OSAE
);
3479 static struct net_device
*
3480 qeth_get_netdevice(enum qeth_card_types type
, enum qeth_link_types linktype
)
3482 struct net_device
*dev
= NULL
;
3485 case QETH_CARD_TYPE_OSAE
:
3487 case QETH_LINK_TYPE_LANE_TR
:
3488 case QETH_LINK_TYPE_HSTR
:
3490 dev
= alloc_trdev(0);
3491 #endif /* CONFIG_TR */
3494 dev
= alloc_etherdev(0);
3497 case QETH_CARD_TYPE_IQD
:
3498 dev
= alloc_netdev(0, "hsi%d", ether_setup
);
3501 dev
= alloc_etherdev(0);
3506 /*hard_header fake function; used in case fake_ll is set */
3508 qeth_fake_header(struct sk_buff
*skb
, struct net_device
*dev
,
3509 unsigned short type
, void *daddr
, void *saddr
,
3512 if(dev
->type
== ARPHRD_IEEE802_TR
){
3513 struct trh_hdr
*hdr
;
3514 hdr
= (struct trh_hdr
*)skb_push(skb
, QETH_FAKE_LL_LEN_TR
);
3515 memcpy(hdr
->saddr
, dev
->dev_addr
, TR_ALEN
);
3516 memcpy(hdr
->daddr
, "FAKELL", TR_ALEN
);
3517 return QETH_FAKE_LL_LEN_TR
;
3521 hdr
= (struct ethhdr
*)skb_push(skb
, QETH_FAKE_LL_LEN_ETH
);
3522 memcpy(hdr
->h_source
, dev
->dev_addr
, ETH_ALEN
);
3523 memcpy(hdr
->h_dest
, "FAKELL", ETH_ALEN
);
3524 if (type
!= ETH_P_802_3
)
3525 hdr
->h_proto
= htons(type
);
3527 hdr
->h_proto
= htons(len
);
3528 return QETH_FAKE_LL_LEN_ETH
;
3534 qeth_send_packet(struct qeth_card
*, struct sk_buff
*);
3537 qeth_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3540 struct qeth_card
*card
;
3542 QETH_DBF_TEXT(trace
, 6, "hrdstxmi");
3543 card
= (struct qeth_card
*)dev
->priv
;
3545 card
->stats
.tx_dropped
++;
3546 card
->stats
.tx_errors
++;
3547 /* return OK; otherwise ksoftirqd goes to 100% */
3548 return NETDEV_TX_OK
;
3550 if ((card
->state
!= CARD_STATE_UP
) || !card
->lan_online
) {
3551 card
->stats
.tx_dropped
++;
3552 card
->stats
.tx_errors
++;
3553 card
->stats
.tx_carrier_errors
++;
3554 dev_kfree_skb_any(skb
);
3555 /* return OK; otherwise ksoftirqd goes to 100% */
3556 return NETDEV_TX_OK
;
3558 #ifdef CONFIG_QETH_PERF_STATS
3559 card
->perf_stats
.outbound_cnt
++;
3560 card
->perf_stats
.outbound_start_time
= qeth_get_micros();
3562 netif_stop_queue(dev
);
3563 if ((rc
= qeth_send_packet(card
, skb
))) {
3565 return NETDEV_TX_BUSY
;
3567 card
->stats
.tx_errors
++;
3568 card
->stats
.tx_dropped
++;
3569 dev_kfree_skb_any(skb
);
3570 /*set to OK; otherwise ksoftirqd goes to 100% */
3574 netif_wake_queue(dev
);
3575 #ifdef CONFIG_QETH_PERF_STATS
3576 card
->perf_stats
.outbound_time
+= qeth_get_micros() -
3577 card
->perf_stats
.outbound_start_time
;
3583 qeth_verify_vlan_dev(struct net_device
*dev
, struct qeth_card
*card
)
3586 #ifdef CONFIG_QETH_VLAN
3587 struct vlan_group
*vg
;
3590 if (!(vg
= card
->vlangrp
))
3593 for (i
= 0; i
< VLAN_GROUP_ARRAY_LEN
; i
++){
3594 if (vg
->vlan_devices
[i
] == dev
){
3595 rc
= QETH_VLAN_CARD
;
3604 qeth_verify_dev(struct net_device
*dev
)
3606 struct qeth_card
*card
;
3607 unsigned long flags
;
3610 read_lock_irqsave(&qeth_card_list
.rwlock
, flags
);
3611 list_for_each_entry(card
, &qeth_card_list
.list
, list
){
3612 if (card
->dev
== dev
){
3613 rc
= QETH_REAL_CARD
;
3616 rc
= qeth_verify_vlan_dev(dev
, card
);
3620 read_unlock_irqrestore(&qeth_card_list
.rwlock
, flags
);
3625 static struct qeth_card
*
3626 qeth_get_card_from_dev(struct net_device
*dev
)
3628 struct qeth_card
*card
= NULL
;
3631 rc
= qeth_verify_dev(dev
);
3632 if (rc
== QETH_REAL_CARD
)
3633 card
= (struct qeth_card
*)dev
->priv
;
3634 else if (rc
== QETH_VLAN_CARD
)
3635 card
= (struct qeth_card
*)
3636 VLAN_DEV_INFO(dev
)->real_dev
->priv
;
3638 QETH_DBF_TEXT_(trace
, 4, "%d", rc
);
3643 qeth_tx_timeout(struct net_device
*dev
)
3645 struct qeth_card
*card
;
3647 card
= (struct qeth_card
*) dev
->priv
;
3648 card
->stats
.tx_errors
++;
3649 qeth_schedule_recovery(card
);
3653 qeth_open(struct net_device
*dev
)
3655 struct qeth_card
*card
;
3657 QETH_DBF_TEXT(trace
, 4, "qethopen");
3659 card
= (struct qeth_card
*) dev
->priv
;
3661 if (card
->state
!= CARD_STATE_SOFTSETUP
)
3664 if ( (card
->options
.layer2
) &&
3665 (!card
->info
.layer2_mac_registered
)) {
3666 QETH_DBF_TEXT(trace
,4,"nomacadr");
3669 card
->dev
->flags
|= IFF_UP
;
3670 netif_start_queue(dev
);
3671 card
->data
.state
= CH_STATE_UP
;
3672 card
->state
= CARD_STATE_UP
;
3674 if (!card
->lan_online
){
3675 if (netif_carrier_ok(dev
))
3676 netif_carrier_off(dev
);
3682 qeth_stop(struct net_device
*dev
)
3684 struct qeth_card
*card
;
3686 QETH_DBF_TEXT(trace
, 4, "qethstop");
3688 card
= (struct qeth_card
*) dev
->priv
;
3690 netif_stop_queue(dev
);
3691 card
->dev
->flags
&= ~IFF_UP
;
3692 if (card
->state
== CARD_STATE_UP
)
3693 card
->state
= CARD_STATE_SOFTSETUP
;
3698 qeth_get_cast_type(struct qeth_card
*card
, struct sk_buff
*skb
)
3700 int cast_type
= RTN_UNSPEC
;
3702 if (skb
->dst
&& skb
->dst
->neighbour
){
3703 cast_type
= skb
->dst
->neighbour
->type
;
3704 if ((cast_type
== RTN_BROADCAST
) ||
3705 (cast_type
== RTN_MULTICAST
) ||
3706 (cast_type
== RTN_ANYCAST
))
3711 /* try something else */
3712 if (skb
->protocol
== ETH_P_IPV6
)
3713 return (skb
->nh
.raw
[24] == 0xff) ? RTN_MULTICAST
: 0;
3714 else if (skb
->protocol
== ETH_P_IP
)
3715 return ((skb
->nh
.raw
[16] & 0xf0) == 0xe0) ? RTN_MULTICAST
: 0;
3717 if (!memcmp(skb
->data
, skb
->dev
->broadcast
, 6))
3718 return RTN_BROADCAST
;
3722 hdr_mac
= *((u16
*)skb
->data
);
3724 switch (card
->info
.link_type
) {
3725 case QETH_LINK_TYPE_HSTR
:
3726 case QETH_LINK_TYPE_LANE_TR
:
3727 if ((hdr_mac
== QETH_TR_MAC_NC
) ||
3728 (hdr_mac
== QETH_TR_MAC_C
))
3729 return RTN_MULTICAST
;
3730 /* eth or so multicast? */
3732 if ((hdr_mac
== QETH_ETH_MAC_V4
) ||
3733 (hdr_mac
== QETH_ETH_MAC_V6
))
3734 return RTN_MULTICAST
;
3741 qeth_get_priority_queue(struct qeth_card
*card
, struct sk_buff
*skb
,
3742 int ipv
, int cast_type
)
3744 if (!ipv
&& (card
->info
.type
== QETH_CARD_TYPE_OSAE
))
3745 return card
->qdio
.default_out_queue
;
3746 switch (card
->qdio
.no_out_queues
) {
3748 if (cast_type
&& card
->info
.is_multicast_different
)
3749 return card
->info
.is_multicast_different
&
3750 (card
->qdio
.no_out_queues
- 1);
3751 if (card
->qdio
.do_prio_queueing
&& (ipv
== 4)) {
3752 if (card
->qdio
.do_prio_queueing
==QETH_PRIO_Q_ING_TOS
){
3753 if (skb
->nh
.iph
->tos
& IP_TOS_NOTIMPORTANT
)
3755 if (skb
->nh
.iph
->tos
& IP_TOS_HIGHRELIABILITY
)
3757 if (skb
->nh
.iph
->tos
& IP_TOS_HIGHTHROUGHPUT
)
3759 if (skb
->nh
.iph
->tos
& IP_TOS_LOWDELAY
)
3762 if (card
->qdio
.do_prio_queueing
==QETH_PRIO_Q_ING_PREC
)
3763 return 3 - (skb
->nh
.iph
->tos
>> 6);
3764 } else if (card
->qdio
.do_prio_queueing
&& (ipv
== 6)) {
3767 return card
->qdio
.default_out_queue
;
3768 case 1: /* fallthrough for single-out-queue 1920-device */
3770 return card
->qdio
.default_out_queue
;
3775 qeth_get_ip_version(struct sk_buff
*skb
)
3777 switch (skb
->protocol
) {
3788 qeth_prepare_skb(struct qeth_card
*card
, struct sk_buff
**skb
,
3789 struct qeth_hdr
**hdr
, int ipv
)
3792 #ifdef CONFIG_QETH_VLAN
3796 QETH_DBF_TEXT(trace
, 6, "prepskb");
3798 rc
= qeth_realloc_headroom(card
, skb
, sizeof(struct qeth_hdr
));
3801 #ifdef CONFIG_QETH_VLAN
3802 if (card
->vlangrp
&& vlan_tx_tag_present(*skb
) &&
3803 ((ipv
== 6) || card
->options
.layer2
) ) {
3805 * Move the mac addresses (6 bytes src, 6 bytes dest)
3806 * to the beginning of the new header. We are using three
3807 * memcpys instead of one memmove to save cycles.
3809 skb_push(*skb
, VLAN_HLEN
);
3810 memcpy((*skb
)->data
, (*skb
)->data
+ 4, 4);
3811 memcpy((*skb
)->data
+ 4, (*skb
)->data
+ 8, 4);
3812 memcpy((*skb
)->data
+ 8, (*skb
)->data
+ 12, 4);
3813 tag
= (u16
*)((*skb
)->data
+ 12);
3815 * first two bytes = ETH_P_8021Q (0x8100)
3816 * second two bytes = VLANID
3818 *tag
= __constant_htons(ETH_P_8021Q
);
3819 *(tag
+ 1) = htons(vlan_tx_tag_get(*skb
));
3822 *hdr
= (struct qeth_hdr
*)
3823 qeth_push_skb(card
, skb
, sizeof(struct qeth_hdr
));
3830 qeth_get_qeth_hdr_flags4(int cast_type
)
3832 if (cast_type
== RTN_MULTICAST
)
3833 return QETH_CAST_MULTICAST
;
3834 if (cast_type
== RTN_BROADCAST
)
3835 return QETH_CAST_BROADCAST
;
3836 return QETH_CAST_UNICAST
;
3840 qeth_get_qeth_hdr_flags6(int cast_type
)
3842 u8 ct
= QETH_HDR_PASSTHRU
| QETH_HDR_IPV6
;
3843 if (cast_type
== RTN_MULTICAST
)
3844 return ct
| QETH_CAST_MULTICAST
;
3845 if (cast_type
== RTN_ANYCAST
)
3846 return ct
| QETH_CAST_ANYCAST
;
3847 if (cast_type
== RTN_BROADCAST
)
3848 return ct
| QETH_CAST_BROADCAST
;
3849 return ct
| QETH_CAST_UNICAST
;
3853 qeth_layer2_get_packet_type(struct qeth_card
*card
, struct qeth_hdr
*hdr
,
3854 struct sk_buff
*skb
)
3858 if (!memcmp(skb
->data
+QETH_HEADER_SIZE
,
3859 skb
->dev
->broadcast
,6)) { /* broadcast? */
3860 *(__u32
*)hdr
->hdr
.l2
.flags
|=
3861 QETH_LAYER2_FLAG_BROADCAST
<< 8;
3864 hdr_mac
=*((__u16
*)skb
->data
);
3866 switch (card
->info
.link_type
) {
3867 case QETH_LINK_TYPE_HSTR
:
3868 case QETH_LINK_TYPE_LANE_TR
:
3869 if ((hdr_mac
== QETH_TR_MAC_NC
) ||
3870 (hdr_mac
== QETH_TR_MAC_C
) )
3871 *(__u32
*)hdr
->hdr
.l2
.flags
|=
3872 QETH_LAYER2_FLAG_MULTICAST
<< 8;
3874 *(__u32
*)hdr
->hdr
.l2
.flags
|=
3875 QETH_LAYER2_FLAG_UNICAST
<< 8;
3877 /* eth or so multicast? */
3879 if ( (hdr_mac
==QETH_ETH_MAC_V4
) ||
3880 (hdr_mac
==QETH_ETH_MAC_V6
) )
3881 *(__u32
*)hdr
->hdr
.l2
.flags
|=
3882 QETH_LAYER2_FLAG_MULTICAST
<< 8;
3884 *(__u32
*)hdr
->hdr
.l2
.flags
|=
3885 QETH_LAYER2_FLAG_UNICAST
<< 8;
3890 qeth_layer2_fill_header(struct qeth_card
*card
, struct qeth_hdr
*hdr
,
3891 struct sk_buff
*skb
, int cast_type
)
3893 memset(hdr
, 0, sizeof(struct qeth_hdr
));
3894 hdr
->hdr
.l2
.id
= QETH_HEADER_TYPE_LAYER2
;
3896 /* set byte 0 to "0x02" and byte 3 to casting flags */
3897 if (cast_type
==RTN_MULTICAST
)
3898 *(__u32
*)hdr
->hdr
.l2
.flags
|= QETH_LAYER2_FLAG_MULTICAST
<< 8;
3899 else if (cast_type
==RTN_BROADCAST
)
3900 *(__u32
*)hdr
->hdr
.l2
.flags
|= QETH_LAYER2_FLAG_BROADCAST
<< 8;
3902 qeth_layer2_get_packet_type(card
, hdr
, skb
);
3904 hdr
->hdr
.l2
.pkt_length
= skb
->len
-QETH_HEADER_SIZE
;
3905 #ifdef CONFIG_QETH_VLAN
3906 /* VSWITCH relies on the VLAN
3907 * information to be present in
3908 * the QDIO header */
3909 if ((card
->vlangrp
!= NULL
) &&
3910 vlan_tx_tag_present(skb
)) {
3911 *(__u32
*)hdr
->hdr
.l2
.flags
|= QETH_LAYER2_FLAG_VLAN
<< 8;
3912 hdr
->hdr
.l2
.vlan_id
= vlan_tx_tag_get(skb
);
3918 qeth_fill_header(struct qeth_card
*card
, struct qeth_hdr
*hdr
,
3919 struct sk_buff
*skb
, int ipv
, int cast_type
)
3921 QETH_DBF_TEXT(trace
, 6, "fillhdr");
3923 memset(hdr
, 0, sizeof(struct qeth_hdr
));
3924 if (card
->options
.layer2
) {
3925 qeth_layer2_fill_header(card
, hdr
, skb
, cast_type
);
3928 hdr
->hdr
.l3
.id
= QETH_HEADER_TYPE_LAYER3
;
3929 hdr
->hdr
.l3
.ext_flags
= 0;
3930 #ifdef CONFIG_QETH_VLAN
3932 * before we're going to overwrite this location with next hop ip.
3933 * v6 uses passthrough, v4 sets the tag in the QDIO header.
3935 if (card
->vlangrp
&& vlan_tx_tag_present(skb
)) {
3936 hdr
->hdr
.l3
.ext_flags
= (ipv
== 4) ?
3937 QETH_HDR_EXT_VLAN_FRAME
:
3938 QETH_HDR_EXT_INCLUDE_VLAN_TAG
;
3939 hdr
->hdr
.l3
.vlan_id
= vlan_tx_tag_get(skb
);
3941 #endif /* CONFIG_QETH_VLAN */
3942 hdr
->hdr
.l3
.length
= skb
->len
- sizeof(struct qeth_hdr
);
3943 if (ipv
== 4) { /* IPv4 */
3944 hdr
->hdr
.l3
.flags
= qeth_get_qeth_hdr_flags4(cast_type
);
3945 memset(hdr
->hdr
.l3
.dest_addr
, 0, 12);
3946 if ((skb
->dst
) && (skb
->dst
->neighbour
)) {
3947 *((u32
*) (&hdr
->hdr
.l3
.dest_addr
[12])) =
3948 *((u32
*) skb
->dst
->neighbour
->primary_key
);
3950 /* fill in destination address used in ip header */
3951 *((u32
*) (&hdr
->hdr
.l3
.dest_addr
[12])) = skb
->nh
.iph
->daddr
;
3953 } else if (ipv
== 6) { /* IPv6 or passthru */
3954 hdr
->hdr
.l3
.flags
= qeth_get_qeth_hdr_flags6(cast_type
);
3955 if ((skb
->dst
) && (skb
->dst
->neighbour
)) {
3956 memcpy(hdr
->hdr
.l3
.dest_addr
,
3957 skb
->dst
->neighbour
->primary_key
, 16);
3959 /* fill in destination address used in ip header */
3960 memcpy(hdr
->hdr
.l3
.dest_addr
, &skb
->nh
.ipv6h
->daddr
, 16);
3962 } else { /* passthrough */
3963 if((skb
->dev
->type
== ARPHRD_IEEE802_TR
) &&
3964 !memcmp(skb
->data
+ sizeof(struct qeth_hdr
) +
3965 sizeof(__u16
), skb
->dev
->broadcast
, 6)) {
3966 hdr
->hdr
.l3
.flags
= QETH_CAST_BROADCAST
|
3968 } else if (!memcmp(skb
->data
+ sizeof(struct qeth_hdr
),
3969 skb
->dev
->broadcast
, 6)) { /* broadcast? */
3970 hdr
->hdr
.l3
.flags
= QETH_CAST_BROADCAST
|
3973 hdr
->hdr
.l3
.flags
= (cast_type
== RTN_MULTICAST
) ?
3974 QETH_CAST_MULTICAST
| QETH_HDR_PASSTHRU
:
3975 QETH_CAST_UNICAST
| QETH_HDR_PASSTHRU
;
3981 __qeth_fill_buffer(struct sk_buff
*skb
, struct qdio_buffer
*buffer
,
3982 int is_tso
, int *next_element_to_fill
)
3984 int length
= skb
->len
;
3990 element
= *next_element_to_fill
;
3992 first_lap
= (is_tso
== 0 ? 1 : 0);
3994 while (length
> 0) {
3995 /* length_here is the remaining amount of data in this page */
3996 length_here
= PAGE_SIZE
- ((unsigned long) data
% PAGE_SIZE
);
3997 if (length
< length_here
)
3998 length_here
= length
;
4000 buffer
->element
[element
].addr
= data
;
4001 buffer
->element
[element
].length
= length_here
;
4002 length
-= length_here
;
4005 buffer
->element
[element
].flags
= 0;
4007 buffer
->element
[element
].flags
=
4008 SBAL_FLAGS_LAST_FRAG
;
4011 buffer
->element
[element
].flags
=
4012 SBAL_FLAGS_FIRST_FRAG
;
4014 buffer
->element
[element
].flags
=
4015 SBAL_FLAGS_MIDDLE_FRAG
;
4017 data
+= length_here
;
4021 *next_element_to_fill
= element
;
4025 qeth_fill_buffer(struct qeth_qdio_out_q
*queue
,
4026 struct qeth_qdio_out_buffer
*buf
,
4027 struct sk_buff
*skb
)
4029 struct qdio_buffer
*buffer
;
4030 struct qeth_hdr_tso
*hdr
;
4031 int flush_cnt
= 0, hdr_len
, large_send
= 0;
4033 QETH_DBF_TEXT(trace
, 6, "qdfillbf");
4035 buffer
= buf
->buffer
;
4036 atomic_inc(&skb
->users
);
4037 skb_queue_tail(&buf
->skb_list
, skb
);
4039 hdr
= (struct qeth_hdr_tso
*) skb
->data
;
4040 /*check first on TSO ....*/
4041 if (hdr
->hdr
.hdr
.l3
.id
== QETH_HEADER_TYPE_TSO
) {
4042 int element
= buf
->next_element_to_fill
;
4044 hdr_len
= sizeof(struct qeth_hdr_tso
) + hdr
->ext
.dg_hdr_len
;
4045 /*fill first buffer entry only with header information */
4046 buffer
->element
[element
].addr
= skb
->data
;
4047 buffer
->element
[element
].length
= hdr_len
;
4048 buffer
->element
[element
].flags
= SBAL_FLAGS_FIRST_FRAG
;
4049 buf
->next_element_to_fill
++;
4050 skb
->data
+= hdr_len
;
4051 skb
->len
-= hdr_len
;
4054 if (skb_shinfo(skb
)->nr_frags
== 0)
4055 __qeth_fill_buffer(skb
, buffer
, large_send
,
4056 (int *)&buf
->next_element_to_fill
);
4058 __qeth_fill_buffer_frag(skb
, buffer
, large_send
,
4059 (int *)&buf
->next_element_to_fill
);
4061 if (!queue
->do_pack
) {
4062 QETH_DBF_TEXT(trace
, 6, "fillbfnp");
4063 /* set state to PRIMED -> will be flushed */
4064 atomic_set(&buf
->state
, QETH_QDIO_BUF_PRIMED
);
4067 QETH_DBF_TEXT(trace
, 6, "fillbfpa");
4068 #ifdef CONFIG_QETH_PERF_STATS
4069 queue
->card
->perf_stats
.skbs_sent_pack
++;
4071 if (buf
->next_element_to_fill
>=
4072 QETH_MAX_BUFFER_ELEMENTS(queue
->card
)) {
4074 * packed buffer if full -> set state PRIMED
4075 * -> will be flushed
4077 atomic_set(&buf
->state
, QETH_QDIO_BUF_PRIMED
);
4085 qeth_do_send_packet_fast(struct qeth_card
*card
, struct qeth_qdio_out_q
*queue
,
4086 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
4087 int elements_needed
,
4088 struct qeth_eddp_context
*ctx
)
4090 struct qeth_qdio_out_buffer
*buffer
;
4091 int buffers_needed
= 0;
4095 QETH_DBF_TEXT(trace
, 6, "dosndpfa");
4097 /* spin until we get the queue ... */
4098 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED
,
4101 /* ... now we've got the queue */
4102 index
= queue
->next_buf_to_fill
;
4103 buffer
= &queue
->bufs
[queue
->next_buf_to_fill
];
4105 * check if buffer is empty to make sure that we do not 'overtake'
4106 * ourselves and try to fill a buffer that is already primed
4108 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
) {
4109 card
->stats
.tx_dropped
++;
4110 atomic_set(&queue
->state
, QETH_OUT_Q_UNLOCKED
);
4114 queue
->next_buf_to_fill
= (queue
->next_buf_to_fill
+ 1) %
4115 QDIO_MAX_BUFFERS_PER_Q
;
4117 buffers_needed
= qeth_eddp_check_buffers_for_context(queue
,ctx
);
4118 if (buffers_needed
< 0) {
4119 card
->stats
.tx_dropped
++;
4120 atomic_set(&queue
->state
, QETH_OUT_Q_UNLOCKED
);
4123 queue
->next_buf_to_fill
=
4124 (queue
->next_buf_to_fill
+ buffers_needed
) %
4125 QDIO_MAX_BUFFERS_PER_Q
;
4127 atomic_set(&queue
->state
, QETH_OUT_Q_UNLOCKED
);
4129 qeth_fill_buffer(queue
, buffer
, skb
);
4130 qeth_flush_buffers(queue
, 0, index
, 1);
4132 flush_cnt
= qeth_eddp_fill_buffer(queue
, ctx
, index
);
4133 WARN_ON(buffers_needed
!= flush_cnt
);
4134 qeth_flush_buffers(queue
, 0, index
, flush_cnt
);
4140 qeth_do_send_packet(struct qeth_card
*card
, struct qeth_qdio_out_q
*queue
,
4141 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
4142 int elements_needed
, struct qeth_eddp_context
*ctx
)
4144 struct qeth_qdio_out_buffer
*buffer
;
4146 int flush_count
= 0;
4151 QETH_DBF_TEXT(trace
, 6, "dosndpkt");
4153 /* spin until we get the queue ... */
4154 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED
,
4157 start_index
= queue
->next_buf_to_fill
;
4158 buffer
= &queue
->bufs
[queue
->next_buf_to_fill
];
4160 * check if buffer is empty to make sure that we do not 'overtake'
4161 * ourselves and try to fill a buffer that is already primed
4163 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
){
4164 card
->stats
.tx_dropped
++;
4165 atomic_set(&queue
->state
, QETH_OUT_Q_UNLOCKED
);
4168 /* check if we need to switch packing state of this queue */
4169 qeth_switch_to_packing_if_needed(queue
);
4170 if (queue
->do_pack
){
4173 /* does packet fit in current buffer? */
4174 if((QETH_MAX_BUFFER_ELEMENTS(card
) -
4175 buffer
->next_element_to_fill
) < elements_needed
){
4176 /* ... no -> set state PRIMED */
4177 atomic_set(&buffer
->state
,QETH_QDIO_BUF_PRIMED
);
4179 queue
->next_buf_to_fill
=
4180 (queue
->next_buf_to_fill
+ 1) %
4181 QDIO_MAX_BUFFERS_PER_Q
;
4182 buffer
= &queue
->bufs
[queue
->next_buf_to_fill
];
4183 /* we did a step forward, so check buffer state
4185 if (atomic_read(&buffer
->state
) !=
4186 QETH_QDIO_BUF_EMPTY
){
4187 card
->stats
.tx_dropped
++;
4188 qeth_flush_buffers(queue
, 0, start_index
, flush_count
);
4189 atomic_set(&queue
->state
, QETH_OUT_Q_UNLOCKED
);
4194 /* check if we have enough elements (including following
4195 * free buffers) to handle eddp context */
4196 if (qeth_eddp_check_buffers_for_context(queue
,ctx
) < 0){
4197 printk("eddp tx_dropped 1\n");
4198 card
->stats
.tx_dropped
++;
4205 tmp
= qeth_fill_buffer(queue
, buffer
, skb
);
4207 tmp
= qeth_eddp_fill_buffer(queue
,ctx
,queue
->next_buf_to_fill
);
4209 printk("eddp tx_dropped 2\n");
4210 card
->stats
.tx_dropped
++;
4215 queue
->next_buf_to_fill
= (queue
->next_buf_to_fill
+ tmp
) %
4216 QDIO_MAX_BUFFERS_PER_Q
;
4220 qeth_flush_buffers(queue
, 0, start_index
, flush_count
);
4222 * queue->state will go from LOCKED -> UNLOCKED or from
4223 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4224 * (switch packing state or flush buffer to get another pci flag out).
4225 * In that case we will enter this loop
4227 while (atomic_dec_return(&queue
->state
)){
4229 start_index
= queue
->next_buf_to_fill
;
4230 /* check if we can go back to non-packing state */
4231 flush_count
+= qeth_switch_to_nonpacking_if_needed(queue
);
4233 * check if we need to flush a packing buffer to get a pci
4234 * flag out on the queue
4236 if (!flush_count
&& !atomic_read(&queue
->set_pci_flags_count
))
4237 flush_count
+= qeth_flush_buffers_on_no_pci(queue
);
4239 qeth_flush_buffers(queue
, 0, start_index
, flush_count
);
4241 /* at this point the queue is UNLOCKED again */
4242 #ifdef CONFIG_QETH_PERF_STATS
4244 queue
->card
->perf_stats
.bufs_sent_pack
+= flush_count
;
4245 #endif /* CONFIG_QETH_PERF_STATS */
4251 qeth_get_elements_no(struct qeth_card
*card
, void *hdr
,
4252 struct sk_buff
*skb
, int elems
)
4254 int elements_needed
= 0;
4256 if (skb_shinfo(skb
)->nr_frags
> 0) {
4257 elements_needed
= (skb_shinfo(skb
)->nr_frags
+ 1);
4259 if (elements_needed
== 0 )
4260 elements_needed
= 1 + (((((unsigned long) hdr
) % PAGE_SIZE
)
4261 + skb
->len
) >> PAGE_SHIFT
);
4262 if ((elements_needed
+ elems
) > QETH_MAX_BUFFER_ELEMENTS(card
)){
4263 PRINT_ERR("qeth_do_send_packet: invalid size of "
4264 "IP packet (Number=%d / Length=%d). Discarded.\n",
4265 (elements_needed
+elems
), skb
->len
);
4268 return elements_needed
;
4272 qeth_send_packet(struct qeth_card
*card
, struct sk_buff
*skb
)
4276 struct qeth_qdio_out_q
*queue
;
4277 struct qeth_hdr
*hdr
= NULL
;
4278 int elements_needed
= 0;
4279 enum qeth_large_send_types large_send
= QETH_LARGE_SEND_NO
;
4280 struct qeth_eddp_context
*ctx
= NULL
;
4283 QETH_DBF_TEXT(trace
, 6, "sendpkt");
4285 if (!card
->options
.layer2
) {
4286 ipv
= qeth_get_ip_version(skb
);
4287 if ((card
->dev
->hard_header
== qeth_fake_header
) && ipv
) {
4288 if ((skb
= qeth_pskb_unshare(skb
,GFP_ATOMIC
)) == NULL
) {
4289 card
->stats
.tx_dropped
++;
4290 dev_kfree_skb_irq(skb
);
4293 if(card
->dev
->type
== ARPHRD_IEEE802_TR
){
4294 skb_pull(skb
, QETH_FAKE_LL_LEN_TR
);
4296 skb_pull(skb
, QETH_FAKE_LL_LEN_ETH
);
4300 cast_type
= qeth_get_cast_type(card
, skb
);
4301 if ((cast_type
== RTN_BROADCAST
) && (card
->info
.broadcast_capable
== 0)){
4302 card
->stats
.tx_dropped
++;
4303 card
->stats
.tx_errors
++;
4304 dev_kfree_skb_any(skb
);
4305 return NETDEV_TX_OK
;
4307 queue
= card
->qdio
.out_qs
4308 [qeth_get_priority_queue(card
, skb
, ipv
, cast_type
)];
4310 if (skb_shinfo(skb
)->tso_size
)
4311 large_send
= card
->options
.large_send
;
4313 /*are we able to do TSO ? If so ,prepare and send it from here */
4314 if ((large_send
== QETH_LARGE_SEND_TSO
) &&
4315 (cast_type
== RTN_UNSPEC
)) {
4316 rc
= qeth_tso_prepare_packet(card
, skb
, ipv
, cast_type
);
4318 card
->stats
.tx_dropped
++;
4319 card
->stats
.tx_errors
++;
4320 dev_kfree_skb_any(skb
);
4321 return NETDEV_TX_OK
;
4325 if ((rc
= qeth_prepare_skb(card
, &skb
, &hdr
, ipv
))) {
4326 QETH_DBF_TEXT_(trace
, 4, "pskbe%d", rc
);
4329 qeth_fill_header(card
, hdr
, skb
, ipv
, cast_type
);
4332 if (large_send
== QETH_LARGE_SEND_EDDP
) {
4333 ctx
= qeth_eddp_create_context(card
, skb
, hdr
);
4335 PRINT_WARN("could not create eddp context\n");
4339 int elems
= qeth_get_elements_no(card
,(void*) hdr
, skb
,
4343 elements_needed
+= elems
;
4346 if (card
->info
.type
!= QETH_CARD_TYPE_IQD
)
4347 rc
= qeth_do_send_packet(card
, queue
, skb
, hdr
,
4348 elements_needed
, ctx
);
4350 rc
= qeth_do_send_packet_fast(card
, queue
, skb
, hdr
,
4351 elements_needed
, ctx
);
4353 card
->stats
.tx_packets
++;
4354 card
->stats
.tx_bytes
+= skb
->len
;
4355 #ifdef CONFIG_QETH_PERF_STATS
4356 if (skb_shinfo(skb
)->tso_size
&&
4357 !(large_send
== QETH_LARGE_SEND_NO
)) {
4358 card
->perf_stats
.large_send_bytes
+= skb
->len
;
4359 card
->perf_stats
.large_send_cnt
++;
4361 if (skb_shinfo(skb
)->nr_frags
> 0){
4362 card
->perf_stats
.sg_skbs_sent
++;
4363 /* nr_frags + skb->data */
4364 card
->perf_stats
.sg_frags_sent
+=
4365 skb_shinfo(skb
)->nr_frags
+ 1;
4367 #endif /* CONFIG_QETH_PERF_STATS */
4370 /* drop creator's reference */
4371 qeth_eddp_put_context(ctx
);
4372 /* free skb; it's not referenced by a buffer */
4374 dev_kfree_skb_any(skb
);
4381 qeth_mdio_read(struct net_device
*dev
, int phy_id
, int regnum
)
4383 struct qeth_card
*card
= (struct qeth_card
*) dev
->priv
;
4387 case MII_BMCR
: /* Basic mode control register */
4389 if ((card
->info
.link_type
!= QETH_LINK_TYPE_GBIT_ETH
)&&
4390 (card
->info
.link_type
!= QETH_LINK_TYPE_10GBIT_ETH
))
4391 rc
|= BMCR_SPEED100
;
4393 case MII_BMSR
: /* Basic mode status register */
4394 rc
= BMSR_ERCAP
| BMSR_ANEGCOMPLETE
| BMSR_LSTATUS
|
4395 BMSR_10HALF
| BMSR_10FULL
| BMSR_100HALF
| BMSR_100FULL
|
4398 case MII_PHYSID1
: /* PHYS ID 1 */
4399 rc
= (dev
->dev_addr
[0] << 16) | (dev
->dev_addr
[1] << 8) |
4401 rc
= (rc
>> 5) & 0xFFFF;
4403 case MII_PHYSID2
: /* PHYS ID 2 */
4404 rc
= (dev
->dev_addr
[2] << 10) & 0xFFFF;
4406 case MII_ADVERTISE
: /* Advertisement control reg */
4409 case MII_LPA
: /* Link partner ability reg */
4410 rc
= LPA_10HALF
| LPA_10FULL
| LPA_100HALF
| LPA_100FULL
|
4411 LPA_100BASE4
| LPA_LPACK
;
4413 case MII_EXPANSION
: /* Expansion register */
4415 case MII_DCOUNTER
: /* disconnect counter */
4417 case MII_FCSCOUNTER
: /* false carrier counter */
4419 case MII_NWAYTEST
: /* N-way auto-neg test register */
4421 case MII_RERRCOUNTER
: /* rx error counter */
4422 rc
= card
->stats
.rx_errors
;
4424 case MII_SREVISION
: /* silicon revision */
4426 case MII_RESV1
: /* reserved 1 */
4428 case MII_LBRERROR
: /* loopback, rx, bypass error */
4430 case MII_PHYADDR
: /* physical address */
4432 case MII_RESV2
: /* reserved 2 */
4434 case MII_TPISTATUS
: /* TPI status for 10mbps */
4436 case MII_NCONFIG
: /* network interface config */
4446 qeth_mdio_write(struct net_device
*dev
, int phy_id
, int regnum
, int value
)
4449 case MII_BMCR
: /* Basic mode control register */
4450 case MII_BMSR
: /* Basic mode status register */
4451 case MII_PHYSID1
: /* PHYS ID 1 */
4452 case MII_PHYSID2
: /* PHYS ID 2 */
4453 case MII_ADVERTISE
: /* Advertisement control reg */
4454 case MII_LPA
: /* Link partner ability reg */
4455 case MII_EXPANSION
: /* Expansion register */
4456 case MII_DCOUNTER
: /* disconnect counter */
4457 case MII_FCSCOUNTER
: /* false carrier counter */
4458 case MII_NWAYTEST
: /* N-way auto-neg test register */
4459 case MII_RERRCOUNTER
: /* rx error counter */
4460 case MII_SREVISION
: /* silicon revision */
4461 case MII_RESV1
: /* reserved 1 */
4462 case MII_LBRERROR
: /* loopback, rx, bypass error */
4463 case MII_PHYADDR
: /* physical address */
4464 case MII_RESV2
: /* reserved 2 */
4465 case MII_TPISTATUS
: /* TPI status for 10mbps */
4466 case MII_NCONFIG
: /* network interface config */
4472 static inline const char *
4473 qeth_arp_get_error_cause(int *rc
)
4476 case QETH_IPA_ARP_RC_FAILED
:
4478 return "operation failed";
4479 case QETH_IPA_ARP_RC_NOTSUPP
:
4481 return "operation not supported";
4482 case QETH_IPA_ARP_RC_OUT_OF_RANGE
:
4484 return "argument out of range";
4485 case QETH_IPA_ARP_RC_Q_NOTSUPP
:
4487 return "query operation not supported";
4488 case QETH_IPA_ARP_RC_Q_NO_DATA
:
4490 return "no query data available";
4492 return "unknown error";
4497 qeth_send_simple_setassparms(struct qeth_card
*, enum qeth_ipa_funcs
,
4501 qeth_arp_set_no_entries(struct qeth_card
*card
, int no_entries
)
4506 QETH_DBF_TEXT(trace
,3,"arpstnoe");
4509 * currently GuestLAN only supports the ARP assist function
4510 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
4511 * thus we say EOPNOTSUPP for this ARP function
4513 if (card
->info
.guestlan
)
4515 if (!qeth_is_supported(card
,IPA_ARP_PROCESSING
)) {
4516 PRINT_WARN("ARP processing not supported "
4517 "on %s!\n", QETH_CARD_IFNAME(card
));
4520 rc
= qeth_send_simple_setassparms(card
, IPA_ARP_PROCESSING
,
4521 IPA_CMD_ASS_ARP_SET_NO_ENTRIES
,
4525 PRINT_WARN("Could not set number of ARP entries on %s: "
4527 QETH_CARD_IFNAME(card
), qeth_arp_get_error_cause(&rc
),
4534 qeth_copy_arp_entries_stripped(struct qeth_arp_query_info
*qinfo
,
4535 struct qeth_arp_query_data
*qdata
,
4536 int entry_size
, int uentry_size
)
4542 entry_ptr
= (char *)&qdata
->data
;
4543 uentry_ptr
= (char *)(qinfo
->udata
+ qinfo
->udata_offset
);
4544 for (i
= 0; i
< qdata
->no_entries
; ++i
){
4545 /* strip off 32 bytes "media specific information" */
4546 memcpy(uentry_ptr
, (entry_ptr
+ 32), entry_size
- 32);
4547 entry_ptr
+= entry_size
;
4548 uentry_ptr
+= uentry_size
;
4553 qeth_arp_query_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
4556 struct qeth_ipa_cmd
*cmd
;
4557 struct qeth_arp_query_data
*qdata
;
4558 struct qeth_arp_query_info
*qinfo
;
4563 QETH_DBF_TEXT(trace
,4,"arpquecb");
4565 qinfo
= (struct qeth_arp_query_info
*) reply
->param
;
4566 cmd
= (struct qeth_ipa_cmd
*) data
;
4567 if (cmd
->hdr
.return_code
) {
4568 QETH_DBF_TEXT_(trace
,4,"qaer1%i", cmd
->hdr
.return_code
);
4571 if (cmd
->data
.setassparms
.hdr
.return_code
) {
4572 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
4573 QETH_DBF_TEXT_(trace
,4,"qaer2%i", cmd
->hdr
.return_code
);
4576 qdata
= &cmd
->data
.setassparms
.data
.query_arp
;
4577 switch(qdata
->reply_bits
){
4579 uentry_size
= entry_size
= sizeof(struct qeth_arp_qi_entry5
);
4580 if (qinfo
->mask_bits
& QETH_QARP_STRIP_ENTRIES
)
4581 uentry_size
= sizeof(struct qeth_arp_qi_entry5_short
);
4584 /* fall through to default */
4586 /* tr is the same as eth -> entry7 */
4587 uentry_size
= entry_size
= sizeof(struct qeth_arp_qi_entry7
);
4588 if (qinfo
->mask_bits
& QETH_QARP_STRIP_ENTRIES
)
4589 uentry_size
= sizeof(struct qeth_arp_qi_entry7_short
);
4592 /* check if there is enough room in userspace */
4593 if ((qinfo
->udata_len
- qinfo
->udata_offset
) <
4594 qdata
->no_entries
* uentry_size
){
4595 QETH_DBF_TEXT_(trace
, 4, "qaer3%i", -ENOMEM
);
4596 cmd
->hdr
.return_code
= -ENOMEM
;
4597 PRINT_WARN("query ARP user space buffer is too small for "
4598 "the returned number of ARP entries. "
4599 "Aborting query!\n");
4602 QETH_DBF_TEXT_(trace
, 4, "anore%i",
4603 cmd
->data
.setassparms
.hdr
.number_of_replies
);
4604 QETH_DBF_TEXT_(trace
, 4, "aseqn%i", cmd
->data
.setassparms
.hdr
.seq_no
);
4605 QETH_DBF_TEXT_(trace
, 4, "anoen%i", qdata
->no_entries
);
4607 if (qinfo
->mask_bits
& QETH_QARP_STRIP_ENTRIES
) {
4608 /* strip off "media specific information" */
4609 qeth_copy_arp_entries_stripped(qinfo
, qdata
, entry_size
,
4612 /*copy entries to user buffer*/
4613 memcpy(qinfo
->udata
+ qinfo
->udata_offset
,
4614 (char *)&qdata
->data
, qdata
->no_entries
*uentry_size
);
4616 qinfo
->no_entries
+= qdata
->no_entries
;
4617 qinfo
->udata_offset
+= (qdata
->no_entries
*uentry_size
);
4618 /* check if all replies received ... */
4619 if (cmd
->data
.setassparms
.hdr
.seq_no
<
4620 cmd
->data
.setassparms
.hdr
.number_of_replies
)
4622 memcpy(qinfo
->udata
, &qinfo
->no_entries
, 4);
4623 /* keep STRIP_ENTRIES flag so the user program can distinguish
4624 * stripped entries from normal ones */
4625 if (qinfo
->mask_bits
& QETH_QARP_STRIP_ENTRIES
)
4626 qdata
->reply_bits
|= QETH_QARP_STRIP_ENTRIES
;
4627 memcpy(qinfo
->udata
+ QETH_QARP_MASK_OFFSET
,&qdata
->reply_bits
,2);
4631 memcpy(qinfo
->udata
, &i
, 4);
4636 qeth_send_ipa_arp_cmd(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
4637 int len
, int (*reply_cb
)(struct qeth_card
*,
4638 struct qeth_reply
*,
4642 QETH_DBF_TEXT(trace
,4,"sendarp");
4644 memcpy(iob
->data
, IPA_PDU_HEADER
, IPA_PDU_HEADER_SIZE
);
4645 memcpy(QETH_IPA_CMD_DEST_ADDR(iob
->data
),
4646 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
4647 return qeth_send_control_data(card
, IPA_PDU_HEADER_SIZE
+ len
, iob
,
4648 reply_cb
, reply_param
);
4652 qeth_send_ipa_snmp_cmd(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
4653 int len
, int (*reply_cb
)(struct qeth_card
*,
4654 struct qeth_reply
*,
4660 QETH_DBF_TEXT(trace
,4,"sendsnmp");
4662 memcpy(iob
->data
, IPA_PDU_HEADER
, IPA_PDU_HEADER_SIZE
);
4663 memcpy(QETH_IPA_CMD_DEST_ADDR(iob
->data
),
4664 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
4665 /* adjust PDU length fields in IPA_PDU_HEADER */
4666 s1
= (u32
) IPA_PDU_HEADER_SIZE
+ len
;
4668 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob
->data
), &s1
, 2);
4669 memcpy(QETH_IPA_PDU_LEN_PDU1(iob
->data
), &s2
, 2);
4670 memcpy(QETH_IPA_PDU_LEN_PDU2(iob
->data
), &s2
, 2);
4671 memcpy(QETH_IPA_PDU_LEN_PDU3(iob
->data
), &s2
, 2);
4672 return qeth_send_control_data(card
, IPA_PDU_HEADER_SIZE
+ len
, iob
,
4673 reply_cb
, reply_param
);
4676 static struct qeth_cmd_buffer
*
4677 qeth_get_setassparms_cmd(struct qeth_card
*, enum qeth_ipa_funcs
,
4678 __u16
, __u16
, enum qeth_prot_versions
);
4680 qeth_arp_query(struct qeth_card
*card
, char *udata
)
4682 struct qeth_cmd_buffer
*iob
;
4683 struct qeth_arp_query_info qinfo
= {0, };
4687 QETH_DBF_TEXT(trace
,3,"arpquery");
4689 if (!qeth_is_supported(card
,/*IPA_QUERY_ARP_ADDR_INFO*/
4690 IPA_ARP_PROCESSING
)) {
4691 PRINT_WARN("ARP processing not supported "
4692 "on %s!\n", QETH_CARD_IFNAME(card
));
4695 /* get size of userspace buffer and mask_bits -> 6 bytes */
4696 if (copy_from_user(&qinfo
, udata
, 6))
4698 if (!(qinfo
.udata
= kmalloc(qinfo
.udata_len
, GFP_KERNEL
)))
4700 memset(qinfo
.udata
, 0, qinfo
.udata_len
);
4701 qinfo
.udata_offset
= QETH_QARP_ENTRIES_OFFSET
;
4702 iob
= qeth_get_setassparms_cmd(card
, IPA_ARP_PROCESSING
,
4703 IPA_CMD_ASS_ARP_QUERY_INFO
,
4704 sizeof(int),QETH_PROT_IPV4
);
4706 rc
= qeth_send_ipa_arp_cmd(card
, iob
,
4707 QETH_SETASS_BASE_LEN
+QETH_ARP_CMD_LEN
,
4708 qeth_arp_query_cb
, (void *)&qinfo
);
4711 PRINT_WARN("Error while querying ARP cache on %s: %s "
4713 QETH_CARD_IFNAME(card
), qeth_arp_get_error_cause(&rc
),
4715 copy_to_user(udata
, qinfo
.udata
, 4);
4717 copy_to_user(udata
, qinfo
.udata
, qinfo
.udata_len
);
4724 * SNMP command callback
4727 qeth_snmp_command_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
4728 unsigned long sdata
)
4730 struct qeth_ipa_cmd
*cmd
;
4731 struct qeth_arp_query_info
*qinfo
;
4732 struct qeth_snmp_cmd
*snmp
;
4733 unsigned char *data
;
4736 QETH_DBF_TEXT(trace
,3,"snpcmdcb");
4738 cmd
= (struct qeth_ipa_cmd
*) sdata
;
4739 data
= (unsigned char *)((char *)cmd
- reply
->offset
);
4740 qinfo
= (struct qeth_arp_query_info
*) reply
->param
;
4741 snmp
= &cmd
->data
.setadapterparms
.data
.snmp
;
4743 if (cmd
->hdr
.return_code
) {
4744 QETH_DBF_TEXT_(trace
,4,"scer1%i", cmd
->hdr
.return_code
);
4747 if (cmd
->data
.setadapterparms
.hdr
.return_code
) {
4748 cmd
->hdr
.return_code
= cmd
->data
.setadapterparms
.hdr
.return_code
;
4749 QETH_DBF_TEXT_(trace
,4,"scer2%i", cmd
->hdr
.return_code
);
4752 data_len
= *((__u16
*)QETH_IPA_PDU_LEN_PDU1(data
));
4753 if (cmd
->data
.setadapterparms
.hdr
.seq_no
== 1)
4754 data_len
-= (__u16
)((char *)&snmp
->data
- (char *)cmd
);
4756 data_len
-= (__u16
)((char*)&snmp
->request
- (char *)cmd
);
4758 /* check if there is enough room in userspace */
4759 if ((qinfo
->udata_len
- qinfo
->udata_offset
) < data_len
) {
4760 QETH_DBF_TEXT_(trace
, 4, "scer3%i", -ENOMEM
);
4761 cmd
->hdr
.return_code
= -ENOMEM
;
4764 QETH_DBF_TEXT_(trace
, 4, "snore%i",
4765 cmd
->data
.setadapterparms
.hdr
.used_total
);
4766 QETH_DBF_TEXT_(trace
, 4, "sseqn%i", cmd
->data
.setadapterparms
.hdr
.seq_no
);
4767 /*copy entries to user buffer*/
4768 if (cmd
->data
.setadapterparms
.hdr
.seq_no
== 1) {
4769 memcpy(qinfo
->udata
+ qinfo
->udata_offset
,
4771 data_len
+ offsetof(struct qeth_snmp_cmd
,data
));
4772 qinfo
->udata_offset
+= offsetof(struct qeth_snmp_cmd
, data
);
4774 memcpy(qinfo
->udata
+ qinfo
->udata_offset
,
4775 (char *)&snmp
->request
, data_len
);
4777 qinfo
->udata_offset
+= data_len
;
4778 /* check if all replies received ... */
4779 QETH_DBF_TEXT_(trace
, 4, "srtot%i",
4780 cmd
->data
.setadapterparms
.hdr
.used_total
);
4781 QETH_DBF_TEXT_(trace
, 4, "srseq%i",
4782 cmd
->data
.setadapterparms
.hdr
.seq_no
);
4783 if (cmd
->data
.setadapterparms
.hdr
.seq_no
<
4784 cmd
->data
.setadapterparms
.hdr
.used_total
)
4789 static struct qeth_cmd_buffer
*
4790 qeth_get_ipacmd_buffer(struct qeth_card
*, enum qeth_ipa_cmds
,
4791 enum qeth_prot_versions
);
4793 static struct qeth_cmd_buffer
*
4794 qeth_get_adapter_cmd(struct qeth_card
*card
, __u32 command
, __u32 cmdlen
)
4796 struct qeth_cmd_buffer
*iob
;
4797 struct qeth_ipa_cmd
*cmd
;
4799 iob
= qeth_get_ipacmd_buffer(card
,IPA_CMD_SETADAPTERPARMS
,
4801 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
4802 cmd
->data
.setadapterparms
.hdr
.cmdlength
= cmdlen
;
4803 cmd
->data
.setadapterparms
.hdr
.command_code
= command
;
4804 cmd
->data
.setadapterparms
.hdr
.used_total
= 1;
4805 cmd
->data
.setadapterparms
.hdr
.seq_no
= 1;
4811 * function to send SNMP commands to OSA-E card
4814 qeth_snmp_command(struct qeth_card
*card
, char *udata
)
4816 struct qeth_cmd_buffer
*iob
;
4817 struct qeth_ipa_cmd
*cmd
;
4818 struct qeth_snmp_ureq
*ureq
;
4820 struct qeth_arp_query_info qinfo
= {0, };
4823 QETH_DBF_TEXT(trace
,3,"snmpcmd");
4825 if (card
->info
.guestlan
)
4828 if ((!qeth_adp_supported(card
,IPA_SETADP_SET_SNMP_CONTROL
)) &&
4829 (!card
->options
.layer2
) ) {
4830 PRINT_WARN("SNMP Query MIBS not supported "
4831 "on %s!\n", QETH_CARD_IFNAME(card
));
4834 /* skip 4 bytes (data_len struct member) to get req_len */
4835 if (copy_from_user(&req_len
, udata
+ sizeof(int), sizeof(int)))
4837 ureq
= kmalloc(req_len
+sizeof(struct qeth_snmp_ureq_hdr
), GFP_KERNEL
);
4839 QETH_DBF_TEXT(trace
, 2, "snmpnome");
4842 if (copy_from_user(ureq
, udata
,
4843 req_len
+sizeof(struct qeth_snmp_ureq_hdr
))){
4847 qinfo
.udata_len
= ureq
->hdr
.data_len
;
4848 if (!(qinfo
.udata
= kmalloc(qinfo
.udata_len
, GFP_KERNEL
))){
4852 memset(qinfo
.udata
, 0, qinfo
.udata_len
);
4853 qinfo
.udata_offset
= sizeof(struct qeth_snmp_ureq_hdr
);
4855 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_SNMP_CONTROL
,
4856 QETH_SNMP_SETADP_CMDLENGTH
+ req_len
);
4857 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
4858 memcpy(&cmd
->data
.setadapterparms
.data
.snmp
, &ureq
->cmd
, req_len
);
4859 rc
= qeth_send_ipa_snmp_cmd(card
, iob
, QETH_SETADP_BASE_LEN
+ req_len
,
4860 qeth_snmp_command_cb
, (void *)&qinfo
);
4862 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
4863 QETH_CARD_IFNAME(card
), rc
);
4865 copy_to_user(udata
, qinfo
.udata
, qinfo
.udata_len
);
4873 qeth_default_setassparms_cb(struct qeth_card
*, struct qeth_reply
*,
4877 qeth_send_setassparms(struct qeth_card
*, struct qeth_cmd_buffer
*,
4880 (struct qeth_card
*, struct qeth_reply
*, unsigned long),
4884 qeth_arp_add_entry(struct qeth_card
*card
, struct qeth_arp_cache_entry
*entry
)
4886 struct qeth_cmd_buffer
*iob
;
4891 QETH_DBF_TEXT(trace
,3,"arpadent");
4894 * currently GuestLAN only supports the ARP assist function
4895 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
4896 * thus we say EOPNOTSUPP for this ARP function
4898 if (card
->info
.guestlan
)
4900 if (!qeth_is_supported(card
,IPA_ARP_PROCESSING
)) {
4901 PRINT_WARN("ARP processing not supported "
4902 "on %s!\n", QETH_CARD_IFNAME(card
));
4906 iob
= qeth_get_setassparms_cmd(card
, IPA_ARP_PROCESSING
,
4907 IPA_CMD_ASS_ARP_ADD_ENTRY
,
4908 sizeof(struct qeth_arp_cache_entry
),
4910 rc
= qeth_send_setassparms(card
, iob
,
4911 sizeof(struct qeth_arp_cache_entry
),
4912 (unsigned long) entry
,
4913 qeth_default_setassparms_cb
, NULL
);
4916 qeth_ipaddr4_to_string((u8
*)entry
->ipaddr
, buf
);
4917 PRINT_WARN("Could not add ARP entry for address %s on %s: "
4919 buf
, QETH_CARD_IFNAME(card
),
4920 qeth_arp_get_error_cause(&rc
), tmp
, tmp
);
4926 qeth_arp_remove_entry(struct qeth_card
*card
, struct qeth_arp_cache_entry
*entry
)
4928 struct qeth_cmd_buffer
*iob
;
4929 char buf
[16] = {0, };
4933 QETH_DBF_TEXT(trace
,3,"arprment");
4936 * currently GuestLAN only supports the ARP assist function
4937 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
4938 * thus we say EOPNOTSUPP for this ARP function
4940 if (card
->info
.guestlan
)
4942 if (!qeth_is_supported(card
,IPA_ARP_PROCESSING
)) {
4943 PRINT_WARN("ARP processing not supported "
4944 "on %s!\n", QETH_CARD_IFNAME(card
));
4947 memcpy(buf
, entry
, 12);
4948 iob
= qeth_get_setassparms_cmd(card
, IPA_ARP_PROCESSING
,
4949 IPA_CMD_ASS_ARP_REMOVE_ENTRY
,
4952 rc
= qeth_send_setassparms(card
, iob
,
4953 12, (unsigned long)buf
,
4954 qeth_default_setassparms_cb
, NULL
);
4958 qeth_ipaddr4_to_string((u8
*)entry
->ipaddr
, buf
);
4959 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
4961 buf
, QETH_CARD_IFNAME(card
),
4962 qeth_arp_get_error_cause(&rc
), tmp
, tmp
);
4968 qeth_arp_flush_cache(struct qeth_card
*card
)
4973 QETH_DBF_TEXT(trace
,3,"arpflush");
4976 * currently GuestLAN only supports the ARP assist function
4977 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
4978 * thus we say EOPNOTSUPP for this ARP function
4980 if (card
->info
.guestlan
|| (card
->info
.type
== QETH_CARD_TYPE_IQD
))
4982 if (!qeth_is_supported(card
,IPA_ARP_PROCESSING
)) {
4983 PRINT_WARN("ARP processing not supported "
4984 "on %s!\n", QETH_CARD_IFNAME(card
));
4987 rc
= qeth_send_simple_setassparms(card
, IPA_ARP_PROCESSING
,
4988 IPA_CMD_ASS_ARP_FLUSH_CACHE
, 0);
4991 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
4992 QETH_CARD_IFNAME(card
), qeth_arp_get_error_cause(&rc
),
4999 qeth_do_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
5001 struct qeth_card
*card
= (struct qeth_card
*)dev
->priv
;
5002 struct qeth_arp_cache_entry arp_entry
;
5003 struct mii_ioctl_data
*mii_data
;
5009 if ((card
->state
!= CARD_STATE_UP
) &&
5010 (card
->state
!= CARD_STATE_SOFTSETUP
))
5014 case SIOC_QETH_ARP_SET_NO_ENTRIES
:
5015 if ( !capable(CAP_NET_ADMIN
) ||
5016 (card
->options
.layer2
) ) {
5020 rc
= qeth_arp_set_no_entries(card
, rq
->ifr_ifru
.ifru_ivalue
);
5022 case SIOC_QETH_ARP_QUERY_INFO
:
5023 if ( !capable(CAP_NET_ADMIN
) ||
5024 (card
->options
.layer2
) ) {
5028 rc
= qeth_arp_query(card
, rq
->ifr_ifru
.ifru_data
);
5030 case SIOC_QETH_ARP_ADD_ENTRY
:
5031 if ( !capable(CAP_NET_ADMIN
) ||
5032 (card
->options
.layer2
) ) {
5036 if (copy_from_user(&arp_entry
, rq
->ifr_ifru
.ifru_data
,
5037 sizeof(struct qeth_arp_cache_entry
)))
5040 rc
= qeth_arp_add_entry(card
, &arp_entry
);
5042 case SIOC_QETH_ARP_REMOVE_ENTRY
:
5043 if ( !capable(CAP_NET_ADMIN
) ||
5044 (card
->options
.layer2
) ) {
5048 if (copy_from_user(&arp_entry
, rq
->ifr_ifru
.ifru_data
,
5049 sizeof(struct qeth_arp_cache_entry
)))
5052 rc
= qeth_arp_remove_entry(card
, &arp_entry
);
5054 case SIOC_QETH_ARP_FLUSH_CACHE
:
5055 if ( !capable(CAP_NET_ADMIN
) ||
5056 (card
->options
.layer2
) ) {
5060 rc
= qeth_arp_flush_cache(card
);
5062 case SIOC_QETH_ADP_SET_SNMP_CONTROL
:
5063 rc
= qeth_snmp_command(card
, rq
->ifr_ifru
.ifru_data
);
5065 case SIOC_QETH_GET_CARD_TYPE
:
5066 if ((card
->info
.type
== QETH_CARD_TYPE_OSAE
) &&
5067 !card
->info
.guestlan
)
5072 mii_data
= if_mii(rq
);
5073 mii_data
->phy_id
= 0;
5076 mii_data
= if_mii(rq
);
5077 if (mii_data
->phy_id
!= 0)
5080 mii_data
->val_out
= qeth_mdio_read(dev
,mii_data
->phy_id
,
5086 /* TODO: remove return if qeth_mdio_write does something */
5087 if (!capable(CAP_NET_ADMIN
)){
5091 mii_data
= if_mii(rq
);
5092 if (mii_data
->phy_id
!= 0)
5095 qeth_mdio_write(dev
, mii_data
->phy_id
, mii_data
->reg_num
,
5102 QETH_DBF_TEXT_(trace
, 2, "ioce%d", rc
);
5106 static struct net_device_stats
*
5107 qeth_get_stats(struct net_device
*dev
)
5109 struct qeth_card
*card
;
5111 card
= (struct qeth_card
*) (dev
->priv
);
5113 QETH_DBF_TEXT(trace
,5,"getstat");
5115 return &card
->stats
;
5119 qeth_change_mtu(struct net_device
*dev
, int new_mtu
)
5121 struct qeth_card
*card
;
5124 card
= (struct qeth_card
*) (dev
->priv
);
5126 QETH_DBF_TEXT(trace
,4,"chgmtu");
5127 sprintf(dbf_text
, "%8x", new_mtu
);
5128 QETH_DBF_TEXT(trace
,4,dbf_text
);
5132 if (new_mtu
> 65535)
5134 if ((!qeth_is_supported(card
,IPA_IP_FRAGMENTATION
)) &&
5135 (!qeth_mtu_is_valid(card
, new_mtu
)))
5141 #ifdef CONFIG_QETH_VLAN
5143 qeth_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
5145 struct qeth_card
*card
;
5146 unsigned long flags
;
5148 QETH_DBF_TEXT(trace
,4,"vlanreg");
5150 card
= (struct qeth_card
*) dev
->priv
;
5151 spin_lock_irqsave(&card
->vlanlock
, flags
);
5152 card
->vlangrp
= grp
;
5153 spin_unlock_irqrestore(&card
->vlanlock
, flags
);
5157 qeth_free_vlan_buffer(struct qeth_card
*card
, struct qeth_qdio_out_buffer
*buf
,
5161 struct sk_buff
*skb
;
5162 struct sk_buff_head tmp_list
;
5164 skb_queue_head_init(&tmp_list
);
5165 for(i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
){
5166 while ((skb
= skb_dequeue(&buf
->skb_list
))){
5167 if (vlan_tx_tag_present(skb
) &&
5168 (vlan_tx_tag_get(skb
) == vid
)) {
5169 atomic_dec(&skb
->users
);
5172 skb_queue_tail(&tmp_list
, skb
);
5175 while ((skb
= skb_dequeue(&tmp_list
)))
5176 skb_queue_tail(&buf
->skb_list
, skb
);
5180 qeth_free_vlan_skbs(struct qeth_card
*card
, unsigned short vid
)
5184 QETH_DBF_TEXT(trace
, 4, "frvlskbs");
5185 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
){
5186 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
)
5187 qeth_free_vlan_buffer(card
, &card
->qdio
.
5188 out_qs
[i
]->bufs
[j
], vid
);
5193 qeth_free_vlan_addresses4(struct qeth_card
*card
, unsigned short vid
)
5195 struct in_device
*in_dev
;
5196 struct in_ifaddr
*ifa
;
5197 struct qeth_ipaddr
*addr
;
5199 QETH_DBF_TEXT(trace
, 4, "frvaddr4");
5203 in_dev
= __in_dev_get(card
->vlangrp
->vlan_devices
[vid
]);
5206 for (ifa
= in_dev
->ifa_list
; ifa
; ifa
= ifa
->ifa_next
) {
5207 addr
= qeth_get_addr_buffer(QETH_PROT_IPV4
);
5209 addr
->u
.a4
.addr
= ifa
->ifa_address
;
5210 addr
->u
.a4
.mask
= ifa
->ifa_mask
;
5211 addr
->type
= QETH_IP_TYPE_NORMAL
;
5212 if (!qeth_delete_ip(card
, addr
))
5221 qeth_free_vlan_addresses6(struct qeth_card
*card
, unsigned short vid
)
5223 #ifdef CONFIG_QETH_IPV6
5224 struct inet6_dev
*in6_dev
;
5225 struct inet6_ifaddr
*ifa
;
5226 struct qeth_ipaddr
*addr
;
5228 QETH_DBF_TEXT(trace
, 4, "frvaddr6");
5231 in6_dev
= in6_dev_get(card
->vlangrp
->vlan_devices
[vid
]);
5234 for (ifa
= in6_dev
->addr_list
; ifa
; ifa
= ifa
->lst_next
){
5235 addr
= qeth_get_addr_buffer(QETH_PROT_IPV6
);
5237 memcpy(&addr
->u
.a6
.addr
, &ifa
->addr
,
5238 sizeof(struct in6_addr
));
5239 addr
->u
.a6
.pfxlen
= ifa
->prefix_len
;
5240 addr
->type
= QETH_IP_TYPE_NORMAL
;
5241 if (!qeth_delete_ip(card
, addr
))
5245 in6_dev_put(in6_dev
);
5246 #endif /* CONFIG_QETH_IPV6 */
5250 qeth_layer2_send_setdelvlan(struct qeth_card
*card
, __u16 i
,
5251 enum qeth_ipa_cmds ipacmd
)
5254 struct qeth_ipa_cmd
*cmd
;
5255 struct qeth_cmd_buffer
*iob
;
5257 QETH_DBF_TEXT_(trace
, 4, "L2sdv%x",ipacmd
);
5258 iob
= qeth_get_ipacmd_buffer(card
, ipacmd
, QETH_PROT_IPV4
);
5259 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
5260 cmd
->data
.setdelvlan
.vlan_id
= i
;
5262 rc
= qeth_send_ipa_cmd(card
, iob
, NULL
, NULL
);
5264 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
5265 "Continuing\n",i
, QETH_CARD_IFNAME(card
), rc
);
5266 QETH_DBF_TEXT_(trace
, 2, "L2VL%4x", ipacmd
);
5267 QETH_DBF_TEXT_(trace
, 2, "L2%s", CARD_BUS_ID(card
));
5268 QETH_DBF_TEXT_(trace
, 2, "err%d", rc
);
5273 qeth_layer2_process_vlans(struct qeth_card
*card
, int clear
)
5277 QETH_DBF_TEXT(trace
, 3, "L2prcvln");
5281 for (i
= 0; i
< VLAN_GROUP_ARRAY_LEN
; i
++) {
5282 if (card
->vlangrp
->vlan_devices
[i
] == NULL
)
5285 qeth_layer2_send_setdelvlan(card
, i
, IPA_CMD_DELVLAN
);
5287 qeth_layer2_send_setdelvlan(card
, i
, IPA_CMD_SETVLAN
);
5291 /*add_vid is layer 2 used only ....*/
5293 qeth_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
5295 struct qeth_card
*card
;
5297 QETH_DBF_TEXT_(trace
, 4, "aid:%d", vid
);
5299 card
= (struct qeth_card
*) dev
->priv
;
5300 if (!card
->options
.layer2
)
5302 qeth_layer2_send_setdelvlan(card
, vid
, IPA_CMD_SETVLAN
);
5305 /*... kill_vid used for both modes*/
5307 qeth_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
5309 struct qeth_card
*card
;
5310 unsigned long flags
;
5312 QETH_DBF_TEXT_(trace
, 4, "kid:%d", vid
);
5314 card
= (struct qeth_card
*) dev
->priv
;
5315 /* free all skbs for the vlan device */
5316 qeth_free_vlan_skbs(card
, vid
);
5317 spin_lock_irqsave(&card
->vlanlock
, flags
);
5318 /* unregister IP addresses of vlan device */
5319 qeth_free_vlan_addresses4(card
, vid
);
5320 qeth_free_vlan_addresses6(card
, vid
);
5322 card
->vlangrp
->vlan_devices
[vid
] = NULL
;
5323 spin_unlock_irqrestore(&card
->vlanlock
, flags
);
5324 if (card
->options
.layer2
)
5325 qeth_layer2_send_setdelvlan(card
, vid
, IPA_CMD_DELVLAN
);
5326 qeth_set_multicast_list(card
->dev
);
5331 * set multicast address on card
5334 qeth_set_multicast_list(struct net_device
*dev
)
5336 struct qeth_card
*card
= (struct qeth_card
*) dev
->priv
;
5338 QETH_DBF_TEXT(trace
,3,"setmulti");
5339 qeth_delete_mc_addresses(card
);
5340 qeth_add_multicast_ipv4(card
);
5341 #ifdef CONFIG_QETH_IPV6
5342 qeth_add_multicast_ipv6(card
);
5344 if (qeth_set_thread_start_bit(card
, QETH_SET_IP_THREAD
) == 0)
5345 schedule_work(&card
->kernel_thread_starter
);
5349 qeth_neigh_setup(struct net_device
*dev
, struct neigh_parms
*np
)
5355 qeth_get_mac_for_ipm(__u32 ipm
, char *mac
, struct net_device
*dev
)
5357 if (dev
->type
== ARPHRD_IEEE802_TR
)
5358 ip_tr_mc_map(ipm
, mac
);
5360 ip_eth_mc_map(ipm
, mac
);
5363 static struct qeth_ipaddr
*
5364 qeth_get_addr_buffer(enum qeth_prot_versions prot
)
5366 struct qeth_ipaddr
*addr
;
5368 addr
= kmalloc(sizeof(struct qeth_ipaddr
), GFP_ATOMIC
);
5370 PRINT_WARN("Not enough memory to add address\n");
5373 memset(addr
,0,sizeof(struct qeth_ipaddr
));
5374 addr
->type
= QETH_IP_TYPE_NORMAL
;
5380 qeth_delete_mc_addresses(struct qeth_card
*card
)
5382 struct qeth_ipaddr
*iptodo
;
5383 unsigned long flags
;
5385 QETH_DBF_TEXT(trace
,4,"delmc");
5386 iptodo
= qeth_get_addr_buffer(QETH_PROT_IPV4
);
5388 QETH_DBF_TEXT(trace
, 2, "dmcnomem");
5391 iptodo
->type
= QETH_IP_TYPE_DEL_ALL_MC
;
5392 spin_lock_irqsave(&card
->ip_lock
, flags
);
5393 if (!__qeth_insert_ip_todo(card
, iptodo
, 0))
5395 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
5399 qeth_add_mc(struct qeth_card
*card
, struct in_device
*in4_dev
)
5401 struct qeth_ipaddr
*ipm
;
5402 struct ip_mc_list
*im4
;
5403 char buf
[MAX_ADDR_LEN
];
5405 QETH_DBF_TEXT(trace
,4,"addmc");
5406 for (im4
= in4_dev
->mc_list
; im4
; im4
= im4
->next
) {
5407 qeth_get_mac_for_ipm(im4
->multiaddr
, buf
, in4_dev
->dev
);
5408 ipm
= qeth_get_addr_buffer(QETH_PROT_IPV4
);
5411 ipm
->u
.a4
.addr
= im4
->multiaddr
;
5412 memcpy(ipm
->mac
,buf
,OSA_ADDR_LEN
);
5413 ipm
->is_multicast
= 1;
5414 if (!qeth_add_ip(card
,ipm
))
5420 qeth_add_vlan_mc(struct qeth_card
*card
)
5422 #ifdef CONFIG_QETH_VLAN
5423 struct in_device
*in_dev
;
5424 struct vlan_group
*vg
;
5427 QETH_DBF_TEXT(trace
,4,"addmcvl");
5428 if ( ((card
->options
.layer2
== 0) &&
5429 (!qeth_is_supported(card
,IPA_FULL_VLAN
))) ||
5430 (card
->vlangrp
== NULL
) )
5434 for (i
= 0; i
< VLAN_GROUP_ARRAY_LEN
; i
++) {
5435 if (vg
->vlan_devices
[i
] == NULL
||
5436 !(vg
->vlan_devices
[i
]->flags
& IFF_UP
))
5438 in_dev
= in_dev_get(vg
->vlan_devices
[i
]);
5441 read_lock(&in_dev
->mc_list_lock
);
5442 qeth_add_mc(card
,in_dev
);
5443 read_unlock(&in_dev
->mc_list_lock
);
5450 qeth_add_multicast_ipv4(struct qeth_card
*card
)
5452 struct in_device
*in4_dev
;
5454 QETH_DBF_TEXT(trace
,4,"chkmcv4");
5455 in4_dev
= in_dev_get(card
->dev
);
5456 if (in4_dev
== NULL
)
5458 read_lock(&in4_dev
->mc_list_lock
);
5459 qeth_add_mc(card
, in4_dev
);
5460 qeth_add_vlan_mc(card
);
5461 read_unlock(&in4_dev
->mc_list_lock
);
5462 in_dev_put(in4_dev
);
5465 #ifdef CONFIG_QETH_IPV6
5467 qeth_add_mc6(struct qeth_card
*card
, struct inet6_dev
*in6_dev
)
5469 struct qeth_ipaddr
*ipm
;
5470 struct ifmcaddr6
*im6
;
5471 char buf
[MAX_ADDR_LEN
];
5473 QETH_DBF_TEXT(trace
,4,"addmc6");
5474 for (im6
= in6_dev
->mc_list
; im6
!= NULL
; im6
= im6
->next
) {
5475 ndisc_mc_map(&im6
->mca_addr
, buf
, in6_dev
->dev
, 0);
5476 ipm
= qeth_get_addr_buffer(QETH_PROT_IPV6
);
5479 ipm
->is_multicast
= 1;
5480 memcpy(ipm
->mac
,buf
,OSA_ADDR_LEN
);
5481 memcpy(&ipm
->u
.a6
.addr
,&im6
->mca_addr
.s6_addr
,
5482 sizeof(struct in6_addr
));
5483 if (!qeth_add_ip(card
,ipm
))
5489 qeth_add_vlan_mc6(struct qeth_card
*card
)
5491 #ifdef CONFIG_QETH_VLAN
5492 struct inet6_dev
*in_dev
;
5493 struct vlan_group
*vg
;
5496 QETH_DBF_TEXT(trace
,4,"admc6vl");
5497 if ( ((card
->options
.layer2
== 0) &&
5498 (!qeth_is_supported(card
,IPA_FULL_VLAN
))) ||
5499 (card
->vlangrp
== NULL
))
5503 for (i
= 0; i
< VLAN_GROUP_ARRAY_LEN
; i
++) {
5504 if (vg
->vlan_devices
[i
] == NULL
||
5505 !(vg
->vlan_devices
[i
]->flags
& IFF_UP
))
5507 in_dev
= in6_dev_get(vg
->vlan_devices
[i
]);
5510 read_lock(&in_dev
->lock
);
5511 qeth_add_mc6(card
,in_dev
);
5512 read_unlock(&in_dev
->lock
);
5513 in6_dev_put(in_dev
);
5515 #endif /* CONFIG_QETH_VLAN */
5519 qeth_add_multicast_ipv6(struct qeth_card
*card
)
5521 struct inet6_dev
*in6_dev
;
5523 QETH_DBF_TEXT(trace
,4,"chkmcv6");
5524 if ((card
->options
.layer2
== 0) &&
5525 (!qeth_is_supported(card
, IPA_IPV6
)) )
5528 in6_dev
= in6_dev_get(card
->dev
);
5529 if (in6_dev
== NULL
)
5531 read_lock(&in6_dev
->lock
);
5532 qeth_add_mc6(card
, in6_dev
);
5533 qeth_add_vlan_mc6(card
);
5534 read_unlock(&in6_dev
->lock
);
5535 in6_dev_put(in6_dev
);
5537 #endif /* CONFIG_QETH_IPV6 */
5540 qeth_layer2_send_setdelmac(struct qeth_card
*card
, __u8
*mac
,
5541 enum qeth_ipa_cmds ipacmd
,
5542 int (*reply_cb
) (struct qeth_card
*,
5546 struct qeth_ipa_cmd
*cmd
;
5547 struct qeth_cmd_buffer
*iob
;
5549 QETH_DBF_TEXT(trace
, 2, "L2sdmac");
5550 iob
= qeth_get_ipacmd_buffer(card
, ipacmd
, QETH_PROT_IPV4
);
5551 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
5552 cmd
->data
.setdelmac
.mac_length
= OSA_ADDR_LEN
;
5553 memcpy(&cmd
->data
.setdelmac
.mac
, mac
, OSA_ADDR_LEN
);
5554 return qeth_send_ipa_cmd(card
, iob
, reply_cb
, NULL
);
5558 qeth_layer2_send_setgroupmac_cb(struct qeth_card
*card
,
5559 struct qeth_reply
*reply
,
5562 struct qeth_ipa_cmd
*cmd
;
5565 QETH_DBF_TEXT(trace
, 2, "L2Sgmacb");
5566 cmd
= (struct qeth_ipa_cmd
*) data
;
5567 mac
= &cmd
->data
.setdelmac
.mac
[0];
5568 /* MAC already registered, needed in couple/uncouple case */
5569 if (cmd
->hdr
.return_code
== 0x2005) {
5570 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
5571 "already existing on %s \n",
5572 mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5],
5573 QETH_CARD_IFNAME(card
));
5574 cmd
->hdr
.return_code
= 0;
5576 if (cmd
->hdr
.return_code
)
5577 PRINT_ERR("Could not set group MAC " \
5578 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5579 mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5],
5580 QETH_CARD_IFNAME(card
),cmd
->hdr
.return_code
);
5585 qeth_layer2_send_setgroupmac(struct qeth_card
*card
, __u8
*mac
)
5587 QETH_DBF_TEXT(trace
, 2, "L2Sgmac");
5588 return qeth_layer2_send_setdelmac(card
, mac
, IPA_CMD_SETGMAC
,
5589 qeth_layer2_send_setgroupmac_cb
);
5593 qeth_layer2_send_delgroupmac_cb(struct qeth_card
*card
,
5594 struct qeth_reply
*reply
,
5597 struct qeth_ipa_cmd
*cmd
;
5600 QETH_DBF_TEXT(trace
, 2, "L2Dgmacb");
5601 cmd
= (struct qeth_ipa_cmd
*) data
;
5602 mac
= &cmd
->data
.setdelmac
.mac
[0];
5603 if (cmd
->hdr
.return_code
)
5604 PRINT_ERR("Could not delete group MAC " \
5605 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5606 mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5],
5607 QETH_CARD_IFNAME(card
), cmd
->hdr
.return_code
);
5612 qeth_layer2_send_delgroupmac(struct qeth_card
*card
, __u8
*mac
)
5614 QETH_DBF_TEXT(trace
, 2, "L2Dgmac");
5615 return qeth_layer2_send_setdelmac(card
, mac
, IPA_CMD_DELGMAC
,
5616 qeth_layer2_send_delgroupmac_cb
);
5620 qeth_layer2_send_setmac_cb(struct qeth_card
*card
,
5621 struct qeth_reply
*reply
,
5624 struct qeth_ipa_cmd
*cmd
;
5626 QETH_DBF_TEXT(trace
, 2, "L2Smaccb");
5627 cmd
= (struct qeth_ipa_cmd
*) data
;
5628 if (cmd
->hdr
.return_code
) {
5629 QETH_DBF_TEXT_(trace
, 2, "L2er%x", cmd
->hdr
.return_code
);
5630 PRINT_WARN("Error in registering MAC address on " \
5631 "device %s: x%x\n", CARD_BUS_ID(card
),
5632 cmd
->hdr
.return_code
);
5633 card
->info
.layer2_mac_registered
= 0;
5634 cmd
->hdr
.return_code
= -EIO
;
5636 card
->info
.layer2_mac_registered
= 1;
5637 memcpy(card
->dev
->dev_addr
,cmd
->data
.setdelmac
.mac
,
5639 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
5640 "successfully registered on device %s\n",
5641 card
->dev
->dev_addr
[0], card
->dev
->dev_addr
[1],
5642 card
->dev
->dev_addr
[2], card
->dev
->dev_addr
[3],
5643 card
->dev
->dev_addr
[4], card
->dev
->dev_addr
[5],
5650 qeth_layer2_send_setmac(struct qeth_card
*card
, __u8
*mac
)
5652 QETH_DBF_TEXT(trace
, 2, "L2Setmac");
5653 return qeth_layer2_send_setdelmac(card
, mac
, IPA_CMD_SETVMAC
,
5654 qeth_layer2_send_setmac_cb
);
5658 qeth_layer2_send_delmac_cb(struct qeth_card
*card
,
5659 struct qeth_reply
*reply
,
5662 struct qeth_ipa_cmd
*cmd
;
5664 QETH_DBF_TEXT(trace
, 2, "L2Dmaccb");
5665 cmd
= (struct qeth_ipa_cmd
*) data
;
5666 if (cmd
->hdr
.return_code
) {
5667 PRINT_WARN("Error in deregistering MAC address on " \
5668 "device %s: x%x\n", CARD_BUS_ID(card
),
5669 cmd
->hdr
.return_code
);
5670 QETH_DBF_TEXT_(trace
, 2, "err%d", cmd
->hdr
.return_code
);
5671 cmd
->hdr
.return_code
= -EIO
;
5674 card
->info
.layer2_mac_registered
= 0;
5679 qeth_layer2_send_delmac(struct qeth_card
*card
, __u8
*mac
)
5681 QETH_DBF_TEXT(trace
, 2, "L2Delmac");
5682 if (!card
->info
.layer2_mac_registered
)
5684 return qeth_layer2_send_setdelmac(card
, mac
, IPA_CMD_DELVMAC
,
5685 qeth_layer2_send_delmac_cb
);
5689 qeth_layer2_set_mac_address(struct net_device
*dev
, void *p
)
5691 struct sockaddr
*addr
= p
;
5692 struct qeth_card
*card
;
5695 QETH_DBF_TEXT(trace
, 3, "setmac");
5697 if (qeth_verify_dev(dev
) != QETH_REAL_CARD
) {
5698 QETH_DBF_TEXT(trace
, 3, "setmcINV");
5701 card
= (struct qeth_card
*) dev
->priv
;
5703 if (!card
->options
.layer2
) {
5704 PRINT_WARN("Setting MAC address on %s is not supported"
5705 "in Layer 3 mode.\n", dev
->name
);
5706 QETH_DBF_TEXT(trace
, 3, "setmcLY3");
5709 QETH_DBF_TEXT_(trace
, 3, "%s", CARD_BUS_ID(card
));
5710 QETH_DBF_HEX(trace
, 3, addr
->sa_data
, OSA_ADDR_LEN
);
5711 rc
= qeth_layer2_send_delmac(card
, &card
->dev
->dev_addr
[0]);
5713 rc
= qeth_layer2_send_setmac(card
, addr
->sa_data
);
5718 qeth_fill_ipacmd_header(struct qeth_card
*card
, struct qeth_ipa_cmd
*cmd
,
5719 __u8 command
, enum qeth_prot_versions prot
)
5721 memset(cmd
, 0, sizeof (struct qeth_ipa_cmd
));
5722 cmd
->hdr
.command
= command
;
5723 cmd
->hdr
.initiator
= IPA_CMD_INITIATOR_HOST
;
5724 cmd
->hdr
.seqno
= card
->seqno
.ipa
;
5725 cmd
->hdr
.adapter_type
= qeth_get_ipa_adp_type(card
->info
.link_type
);
5726 cmd
->hdr
.rel_adapter_no
= (__u8
) card
->info
.portno
;
5727 if (card
->options
.layer2
)
5728 cmd
->hdr
.prim_version_no
= 2;
5730 cmd
->hdr
.prim_version_no
= 1;
5731 cmd
->hdr
.param_count
= 1;
5732 cmd
->hdr
.prot_version
= prot
;
5733 cmd
->hdr
.ipa_supported
= 0;
5734 cmd
->hdr
.ipa_enabled
= 0;
5737 static struct qeth_cmd_buffer
*
5738 qeth_get_ipacmd_buffer(struct qeth_card
*card
, enum qeth_ipa_cmds ipacmd
,
5739 enum qeth_prot_versions prot
)
5741 struct qeth_cmd_buffer
*iob
;
5742 struct qeth_ipa_cmd
*cmd
;
5744 iob
= qeth_wait_for_buffer(&card
->write
);
5745 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
5746 qeth_fill_ipacmd_header(card
, cmd
, ipacmd
, prot
);
5752 qeth_send_setdelmc(struct qeth_card
*card
, struct qeth_ipaddr
*addr
, int ipacmd
)
5755 struct qeth_cmd_buffer
*iob
;
5756 struct qeth_ipa_cmd
*cmd
;
5758 QETH_DBF_TEXT(trace
,4,"setdelmc");
5760 iob
= qeth_get_ipacmd_buffer(card
, ipacmd
, addr
->proto
);
5761 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
5762 memcpy(&cmd
->data
.setdelipm
.mac
,addr
->mac
, OSA_ADDR_LEN
);
5763 if (addr
->proto
== QETH_PROT_IPV6
)
5764 memcpy(cmd
->data
.setdelipm
.ip6
, &addr
->u
.a6
.addr
,
5765 sizeof(struct in6_addr
));
5767 memcpy(&cmd
->data
.setdelipm
.ip4
, &addr
->u
.a4
.addr
,4);
5769 rc
= qeth_send_ipa_cmd(card
, iob
, NULL
, NULL
);
5774 qeth_fill_netmask(u8
*netmask
, unsigned int len
)
5777 for (i
=0;i
<16;i
++) {
5782 netmask
[i
] = (u8
)(0xFF00>>j
);
5789 qeth_send_setdelip(struct qeth_card
*card
, struct qeth_ipaddr
*addr
,
5790 int ipacmd
, unsigned int flags
)
5793 struct qeth_cmd_buffer
*iob
;
5794 struct qeth_ipa_cmd
*cmd
;
5797 QETH_DBF_TEXT(trace
,4,"setdelip");
5798 QETH_DBF_TEXT_(trace
,4,"flags%02X", flags
);
5800 iob
= qeth_get_ipacmd_buffer(card
, ipacmd
, addr
->proto
);
5801 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
5802 if (addr
->proto
== QETH_PROT_IPV6
) {
5803 memcpy(cmd
->data
.setdelip6
.ip_addr
, &addr
->u
.a6
.addr
,
5804 sizeof(struct in6_addr
));
5805 qeth_fill_netmask(netmask
,addr
->u
.a6
.pfxlen
);
5806 memcpy(cmd
->data
.setdelip6
.mask
, netmask
,
5807 sizeof(struct in6_addr
));
5808 cmd
->data
.setdelip6
.flags
= flags
;
5810 memcpy(cmd
->data
.setdelip4
.ip_addr
, &addr
->u
.a4
.addr
, 4);
5811 memcpy(cmd
->data
.setdelip4
.mask
, &addr
->u
.a4
.mask
, 4);
5812 cmd
->data
.setdelip4
.flags
= flags
;
5815 rc
= qeth_send_ipa_cmd(card
, iob
, NULL
, NULL
);
5821 qeth_layer2_register_addr_entry(struct qeth_card
*card
,
5822 struct qeth_ipaddr
*addr
)
5824 if (!addr
->is_multicast
)
5826 QETH_DBF_TEXT(trace
, 2, "setgmac");
5827 QETH_DBF_HEX(trace
,3,&addr
->mac
[0],OSA_ADDR_LEN
);
5828 return qeth_layer2_send_setgroupmac(card
, &addr
->mac
[0]);
5832 qeth_layer2_deregister_addr_entry(struct qeth_card
*card
,
5833 struct qeth_ipaddr
*addr
)
5835 if (!addr
->is_multicast
)
5837 QETH_DBF_TEXT(trace
, 2, "delgmac");
5838 QETH_DBF_HEX(trace
,3,&addr
->mac
[0],OSA_ADDR_LEN
);
5839 return qeth_layer2_send_delgroupmac(card
, &addr
->mac
[0]);
5843 qeth_layer3_register_addr_entry(struct qeth_card
*card
,
5844 struct qeth_ipaddr
*addr
)
5850 if (addr
->proto
== QETH_PROT_IPV4
) {
5851 QETH_DBF_TEXT(trace
, 2,"setaddr4");
5852 QETH_DBF_HEX(trace
, 3, &addr
->u
.a4
.addr
, sizeof(int));
5853 } else if (addr
->proto
== QETH_PROT_IPV6
) {
5854 QETH_DBF_TEXT(trace
, 2, "setaddr6");
5855 QETH_DBF_HEX(trace
,3,&addr
->u
.a6
.addr
,8);
5856 QETH_DBF_HEX(trace
,3,((char *)&addr
->u
.a6
.addr
)+8,8);
5858 QETH_DBF_TEXT(trace
, 2, "setaddr?");
5859 QETH_DBF_HEX(trace
, 3, addr
, sizeof(struct qeth_ipaddr
));
5862 if (addr
->is_multicast
)
5863 rc
= qeth_send_setdelmc(card
, addr
, IPA_CMD_SETIPM
);
5865 rc
= qeth_send_setdelip(card
, addr
, IPA_CMD_SETIP
,
5868 QETH_DBF_TEXT(trace
, 2, "failed");
5869 } while ((--cnt
> 0) && rc
);
5871 QETH_DBF_TEXT(trace
, 2, "FAILED");
5872 qeth_ipaddr_to_string(addr
->proto
, (u8
*)&addr
->u
, buf
);
5873 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
5880 qeth_layer3_deregister_addr_entry(struct qeth_card
*card
,
5881 struct qeth_ipaddr
*addr
)
5886 if (addr
->proto
== QETH_PROT_IPV4
) {
5887 QETH_DBF_TEXT(trace
, 2,"deladdr4");
5888 QETH_DBF_HEX(trace
, 3, &addr
->u
.a4
.addr
, sizeof(int));
5889 } else if (addr
->proto
== QETH_PROT_IPV6
) {
5890 QETH_DBF_TEXT(trace
, 2, "deladdr6");
5891 QETH_DBF_HEX(trace
,3,&addr
->u
.a6
.addr
,8);
5892 QETH_DBF_HEX(trace
,3,((char *)&addr
->u
.a6
.addr
)+8,8);
5894 QETH_DBF_TEXT(trace
, 2, "deladdr?");
5895 QETH_DBF_HEX(trace
, 3, addr
, sizeof(struct qeth_ipaddr
));
5897 if (addr
->is_multicast
)
5898 rc
= qeth_send_setdelmc(card
, addr
, IPA_CMD_DELIPM
);
5900 rc
= qeth_send_setdelip(card
, addr
, IPA_CMD_DELIP
,
5903 QETH_DBF_TEXT(trace
, 2, "failed");
5904 /* TODO: re-activate this warning as soon as we have a
5906 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5907 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
5915 qeth_register_addr_entry(struct qeth_card
*card
, struct qeth_ipaddr
*addr
)
5917 if (card
->options
.layer2
)
5918 return qeth_layer2_register_addr_entry(card
, addr
);
5920 return qeth_layer3_register_addr_entry(card
, addr
);
5924 qeth_deregister_addr_entry(struct qeth_card
*card
, struct qeth_ipaddr
*addr
)
5926 if (card
->options
.layer2
)
5927 return qeth_layer2_deregister_addr_entry(card
, addr
);
5929 return qeth_layer3_deregister_addr_entry(card
, addr
);
5933 qeth_ethtool_get_tx_csum(struct net_device
*dev
)
5935 /* We may need to say that we support tx csum offload if
5936 * we do EDDP or TSO. There are discussions going on to
5937 * enforce rules in the stack and in ethtool that make
5938 * SG and TSO depend on HW_CSUM. At the moment there are
5940 * If we say yes here, we have to checksum outbound packets
5946 qeth_ethtool_set_tx_csum(struct net_device
*dev
, u32 data
)
5952 qeth_ethtool_get_rx_csum(struct net_device
*dev
)
5954 struct qeth_card
*card
= (struct qeth_card
*)dev
->priv
;
5956 return (card
->options
.checksum_type
== HW_CHECKSUMMING
);
5960 qeth_ethtool_set_rx_csum(struct net_device
*dev
, u32 data
)
5962 struct qeth_card
*card
= (struct qeth_card
*)dev
->priv
;
5964 if ((card
->state
!= CARD_STATE_DOWN
) &&
5965 (card
->state
!= CARD_STATE_RECOVER
))
5968 card
->options
.checksum_type
= HW_CHECKSUMMING
;
5970 card
->options
.checksum_type
= SW_CHECKSUMMING
;
5975 qeth_ethtool_get_sg(struct net_device
*dev
)
5977 struct qeth_card
*card
= (struct qeth_card
*)dev
->priv
;
5979 return ((card
->options
.large_send
!= QETH_LARGE_SEND_NO
) &&
5980 (dev
->features
& NETIF_F_SG
));
5984 qeth_ethtool_set_sg(struct net_device
*dev
, u32 data
)
5986 struct qeth_card
*card
= (struct qeth_card
*)dev
->priv
;
5989 if (card
->options
.large_send
!= QETH_LARGE_SEND_NO
)
5990 dev
->features
|= NETIF_F_SG
;
5992 dev
->features
&= ~NETIF_F_SG
;
5996 dev
->features
&= ~NETIF_F_SG
;
6001 qeth_ethtool_get_tso(struct net_device
*dev
)
6003 struct qeth_card
*card
= (struct qeth_card
*)dev
->priv
;
6005 return ((card
->options
.large_send
!= QETH_LARGE_SEND_NO
) &&
6006 (dev
->features
& NETIF_F_TSO
));
6010 qeth_ethtool_set_tso(struct net_device
*dev
, u32 data
)
6012 struct qeth_card
*card
= (struct qeth_card
*)dev
->priv
;
6015 if (card
->options
.large_send
!= QETH_LARGE_SEND_NO
)
6016 dev
->features
|= NETIF_F_TSO
;
6018 dev
->features
&= ~NETIF_F_TSO
;
6022 dev
->features
&= ~NETIF_F_TSO
;
6026 static struct ethtool_ops qeth_ethtool_ops
= {
6027 .get_tx_csum
= qeth_ethtool_get_tx_csum
,
6028 .set_tx_csum
= qeth_ethtool_set_tx_csum
,
6029 .get_rx_csum
= qeth_ethtool_get_rx_csum
,
6030 .set_rx_csum
= qeth_ethtool_set_rx_csum
,
6031 .get_sg
= qeth_ethtool_get_sg
,
6032 .set_sg
= qeth_ethtool_set_sg
,
6033 .get_tso
= qeth_ethtool_get_tso
,
6034 .set_tso
= qeth_ethtool_set_tso
,
6038 qeth_netdev_init(struct net_device
*dev
)
6040 struct qeth_card
*card
;
6042 card
= (struct qeth_card
*) dev
->priv
;
6044 QETH_DBF_TEXT(trace
,3,"initdev");
6046 dev
->tx_timeout
= &qeth_tx_timeout
;
6047 dev
->watchdog_timeo
= QETH_TX_TIMEOUT
;
6048 dev
->open
= qeth_open
;
6049 dev
->stop
= qeth_stop
;
6050 dev
->hard_start_xmit
= qeth_hard_start_xmit
;
6051 dev
->do_ioctl
= qeth_do_ioctl
;
6052 dev
->get_stats
= qeth_get_stats
;
6053 dev
->change_mtu
= qeth_change_mtu
;
6054 dev
->neigh_setup
= qeth_neigh_setup
;
6055 dev
->set_multicast_list
= qeth_set_multicast_list
;
6056 #ifdef CONFIG_QETH_VLAN
6057 dev
->vlan_rx_register
= qeth_vlan_rx_register
;
6058 dev
->vlan_rx_kill_vid
= qeth_vlan_rx_kill_vid
;
6059 dev
->vlan_rx_add_vid
= qeth_vlan_rx_add_vid
;
6061 dev
->hard_header
= card
->orig_hard_header
;
6062 if (qeth_get_netdev_flags(card
) & IFF_NOARP
) {
6063 dev
->rebuild_header
= NULL
;
6064 dev
->hard_header
= NULL
;
6065 if (card
->options
.fake_ll
)
6066 dev
->hard_header
= qeth_fake_header
;
6067 dev
->header_cache_update
= NULL
;
6068 dev
->hard_header_cache
= NULL
;
6070 #ifdef CONFIG_QETH_IPV6
6071 /*IPv6 address autoconfiguration stuff*/
6072 if (!(card
->info
.unique_id
& UNIQUE_ID_NOT_BY_CARD
))
6073 card
->dev
->dev_id
= card
->info
.unique_id
& 0xffff;
6075 dev
->hard_header_parse
= NULL
;
6076 dev
->set_mac_address
= qeth_layer2_set_mac_address
;
6077 dev
->flags
|= qeth_get_netdev_flags(card
);
6078 if ((card
->options
.fake_broadcast
) ||
6079 (card
->info
.broadcast_capable
))
6080 dev
->flags
|= IFF_BROADCAST
;
6081 dev
->hard_header_len
=
6082 qeth_get_hlen(card
->info
.link_type
) + card
->options
.add_hhlen
;
6083 dev
->addr_len
= OSA_ADDR_LEN
;
6084 dev
->mtu
= card
->info
.initial_mtu
;
6086 SET_ETHTOOL_OPS(dev
, &qeth_ethtool_ops
);
6088 SET_MODULE_OWNER(dev
);
6093 qeth_init_func_level(struct qeth_card
*card
)
6095 if (card
->ipato
.enabled
) {
6096 if (card
->info
.type
== QETH_CARD_TYPE_IQD
)
6097 card
->info
.func_level
=
6098 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT
;
6100 card
->info
.func_level
=
6101 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT
;
6103 if (card
->info
.type
== QETH_CARD_TYPE_IQD
)
6104 card
->info
.func_level
=
6105 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT
;
6107 card
->info
.func_level
=
6108 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT
;
6113 * hardsetup card, initialize MPC and QDIO stuff
6116 qeth_hardsetup_card(struct qeth_card
*card
)
6121 QETH_DBF_TEXT(setup
, 2, "hrdsetup");
6125 PRINT_WARN("Retrying to do IDX activates.\n");
6126 ccw_device_set_offline(CARD_DDEV(card
));
6127 ccw_device_set_offline(CARD_WDEV(card
));
6128 ccw_device_set_offline(CARD_RDEV(card
));
6129 ccw_device_set_online(CARD_RDEV(card
));
6130 ccw_device_set_online(CARD_WDEV(card
));
6131 ccw_device_set_online(CARD_DDEV(card
));
6133 rc
= qeth_qdio_clear_card(card
,card
->info
.type
==QETH_CARD_TYPE_OSAE
);
6134 if (rc
== -ERESTARTSYS
) {
6135 QETH_DBF_TEXT(setup
, 2, "break1");
6138 QETH_DBF_TEXT_(setup
, 2, "1err%d", rc
);
6144 if ((rc
= qeth_get_unitaddr(card
))){
6145 QETH_DBF_TEXT_(setup
, 2, "2err%d", rc
);
6148 qeth_init_tokens(card
);
6149 qeth_init_func_level(card
);
6150 rc
= qeth_idx_activate_channel(&card
->read
, qeth_idx_read_cb
);
6151 if (rc
== -ERESTARTSYS
) {
6152 QETH_DBF_TEXT(setup
, 2, "break2");
6155 QETH_DBF_TEXT_(setup
, 2, "3err%d", rc
);
6161 rc
= qeth_idx_activate_channel(&card
->write
, qeth_idx_write_cb
);
6162 if (rc
== -ERESTARTSYS
) {
6163 QETH_DBF_TEXT(setup
, 2, "break3");
6166 QETH_DBF_TEXT_(setup
, 2, "4err%d", rc
);
6172 if ((rc
= qeth_mpc_initialize(card
))){
6173 QETH_DBF_TEXT_(setup
, 2, "5err%d", rc
);
6176 /*network device will be recovered*/
6178 card
->dev
->hard_header
= card
->orig_hard_header
;
6181 /* at first set_online allocate netdev */
6182 card
->dev
= qeth_get_netdevice(card
->info
.type
,
6183 card
->info
.link_type
);
6185 qeth_qdio_clear_card(card
, card
->info
.type
==
6186 QETH_CARD_TYPE_OSAE
);
6188 QETH_DBF_TEXT_(setup
, 2, "6err%d", rc
);
6191 card
->dev
->priv
= card
;
6192 card
->orig_hard_header
= card
->dev
->hard_header
;
6193 card
->dev
->type
= qeth_get_arphdr_type(card
->info
.type
,
6194 card
->info
.link_type
);
6195 card
->dev
->init
= qeth_netdev_init
;
6198 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc
);
6203 qeth_default_setassparms_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6206 struct qeth_ipa_cmd
*cmd
;
6208 QETH_DBF_TEXT(trace
,4,"defadpcb");
6210 cmd
= (struct qeth_ipa_cmd
*) data
;
6211 if (cmd
->hdr
.return_code
== 0){
6212 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
6213 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
6214 card
->options
.ipa4
.enabled_funcs
= cmd
->hdr
.ipa_enabled
;
6215 #ifdef CONFIG_QETH_IPV6
6216 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
6217 card
->options
.ipa6
.enabled_funcs
= cmd
->hdr
.ipa_enabled
;
6220 if (cmd
->data
.setassparms
.hdr
.assist_no
== IPA_INBOUND_CHECKSUM
&&
6221 cmd
->data
.setassparms
.hdr
.command_code
== IPA_CMD_ASS_START
) {
6222 card
->info
.csum_mask
= cmd
->data
.setassparms
.data
.flags_32bit
;
6223 QETH_DBF_TEXT_(trace
, 3, "csum:%d", card
->info
.csum_mask
);
6229 qeth_default_setadapterparms_cb(struct qeth_card
*card
,
6230 struct qeth_reply
*reply
,
6233 struct qeth_ipa_cmd
*cmd
;
6235 QETH_DBF_TEXT(trace
,4,"defadpcb");
6237 cmd
= (struct qeth_ipa_cmd
*) data
;
6238 if (cmd
->hdr
.return_code
== 0)
6239 cmd
->hdr
.return_code
= cmd
->data
.setadapterparms
.hdr
.return_code
;
6244 qeth_query_setadapterparms_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6247 struct qeth_ipa_cmd
*cmd
;
6249 QETH_DBF_TEXT(trace
,3,"quyadpcb");
6251 cmd
= (struct qeth_ipa_cmd
*) data
;
6252 if (cmd
->data
.setadapterparms
.data
.query_cmds_supp
.lan_type
& 0x7f)
6253 card
->info
.link_type
=
6254 cmd
->data
.setadapterparms
.data
.query_cmds_supp
.lan_type
;
6255 card
->options
.adp
.supported_funcs
=
6256 cmd
->data
.setadapterparms
.data
.query_cmds_supp
.supported_cmds
;
6257 return qeth_default_setadapterparms_cb(card
, reply
, (unsigned long)cmd
);
6261 qeth_query_setadapterparms(struct qeth_card
*card
)
6264 struct qeth_cmd_buffer
*iob
;
6266 QETH_DBF_TEXT(trace
,3,"queryadp");
6267 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_COMMANDS_SUPPORTED
,
6268 sizeof(struct qeth_ipacmd_setadpparms
));
6269 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_setadapterparms_cb
, NULL
);
6274 qeth_setadpparms_change_macaddr_cb(struct qeth_card
*card
,
6275 struct qeth_reply
*reply
,
6278 struct qeth_ipa_cmd
*cmd
;
6280 QETH_DBF_TEXT(trace
,4,"chgmaccb");
6282 cmd
= (struct qeth_ipa_cmd
*) data
;
6283 memcpy(card
->dev
->dev_addr
,
6284 &cmd
->data
.setadapterparms
.data
.change_addr
.addr
,OSA_ADDR_LEN
);
6285 qeth_default_setadapterparms_cb(card
, reply
, (unsigned long) cmd
);
6290 qeth_setadpparms_change_macaddr(struct qeth_card
*card
)
6293 struct qeth_cmd_buffer
*iob
;
6294 struct qeth_ipa_cmd
*cmd
;
6296 QETH_DBF_TEXT(trace
,4,"chgmac");
6298 iob
= qeth_get_adapter_cmd(card
,IPA_SETADP_ALTER_MAC_ADDRESS
,
6299 sizeof(struct qeth_ipacmd_setadpparms
));
6300 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
6301 cmd
->data
.setadapterparms
.data
.change_addr
.cmd
= CHANGE_ADDR_READ_MAC
;
6302 cmd
->data
.setadapterparms
.data
.change_addr
.addr_size
= OSA_ADDR_LEN
;
6303 memcpy(&cmd
->data
.setadapterparms
.data
.change_addr
.addr
,
6304 card
->dev
->dev_addr
, OSA_ADDR_LEN
);
6305 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_change_macaddr_cb
,
6311 qeth_send_setadp_mode(struct qeth_card
*card
, __u32 command
, __u32 mode
)
6314 struct qeth_cmd_buffer
*iob
;
6315 struct qeth_ipa_cmd
*cmd
;
6317 QETH_DBF_TEXT(trace
,4,"adpmode");
6319 iob
= qeth_get_adapter_cmd(card
, command
,
6320 sizeof(struct qeth_ipacmd_setadpparms
));
6321 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
6322 cmd
->data
.setadapterparms
.data
.mode
= mode
;
6323 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_default_setadapterparms_cb
,
6329 qeth_setadapter_hstr(struct qeth_card
*card
)
6333 QETH_DBF_TEXT(trace
,4,"adphstr");
6335 if (qeth_adp_supported(card
,IPA_SETADP_SET_BROADCAST_MODE
)) {
6336 rc
= qeth_send_setadp_mode(card
, IPA_SETADP_SET_BROADCAST_MODE
,
6337 card
->options
.broadcast_mode
);
6339 PRINT_WARN("couldn't set broadcast mode on "
6341 CARD_BUS_ID(card
), rc
);
6342 rc
= qeth_send_setadp_mode(card
, IPA_SETADP_ALTER_MAC_ADDRESS
,
6343 card
->options
.macaddr_mode
);
6345 PRINT_WARN("couldn't set macaddr mode on "
6346 "device %s: x%x\n", CARD_BUS_ID(card
), rc
);
6349 if (card
->options
.broadcast_mode
== QETH_TR_BROADCAST_LOCAL
)
6350 PRINT_WARN("set adapter parameters not available "
6351 "to set broadcast mode, using ALLRINGS "
6352 "on device %s:\n", CARD_BUS_ID(card
));
6353 if (card
->options
.macaddr_mode
== QETH_TR_MACADDR_CANONICAL
)
6354 PRINT_WARN("set adapter parameters not available "
6355 "to set macaddr mode, using NONCANONICAL "
6356 "on device %s:\n", CARD_BUS_ID(card
));
6361 qeth_setadapter_parms(struct qeth_card
*card
)
6365 QETH_DBF_TEXT(setup
, 2, "setadprm");
6367 if (!qeth_is_supported(card
, IPA_SETADAPTERPARMS
)){
6368 PRINT_WARN("set adapter parameters not supported "
6371 QETH_DBF_TEXT(setup
, 2, " notsupp");
6374 rc
= qeth_query_setadapterparms(card
);
6376 PRINT_WARN("couldn't set adapter parameters on device %s: "
6377 "x%x\n", CARD_BUS_ID(card
), rc
);
6380 if (qeth_adp_supported(card
,IPA_SETADP_ALTER_MAC_ADDRESS
)) {
6381 rc
= qeth_setadpparms_change_macaddr(card
);
6383 PRINT_WARN("couldn't get MAC address on "
6385 CARD_BUS_ID(card
), rc
);
6388 if ((card
->info
.link_type
== QETH_LINK_TYPE_HSTR
) ||
6389 (card
->info
.link_type
== QETH_LINK_TYPE_LANE_TR
))
6390 rc
= qeth_setadapter_hstr(card
);
6396 qeth_layer2_initialize(struct qeth_card
*card
)
6401 QETH_DBF_TEXT(setup
, 2, "doL2init");
6402 QETH_DBF_TEXT_(setup
, 2, "doL2%s", CARD_BUS_ID(card
));
6404 rc
= qeth_setadpparms_change_macaddr(card
);
6406 PRINT_WARN("couldn't get MAC address on "
6408 CARD_BUS_ID(card
), rc
);
6409 QETH_DBF_TEXT_(setup
, 2,"1err%d",rc
);
6412 QETH_DBF_HEX(setup
,2, card
->dev
->dev_addr
, OSA_ADDR_LEN
);
6414 rc
= qeth_layer2_send_setmac(card
, &card
->dev
->dev_addr
[0]);
6416 QETH_DBF_TEXT_(setup
, 2,"2err%d",rc
);
6422 qeth_send_startstoplan(struct qeth_card
*card
, enum qeth_ipa_cmds ipacmd
,
6423 enum qeth_prot_versions prot
)
6426 struct qeth_cmd_buffer
*iob
;
6428 iob
= qeth_get_ipacmd_buffer(card
,ipacmd
,prot
);
6429 rc
= qeth_send_ipa_cmd(card
, iob
, NULL
, NULL
);
6435 qeth_send_startlan(struct qeth_card
*card
, enum qeth_prot_versions prot
)
6439 QETH_DBF_TEXT_(setup
, 2, "strtlan%i", prot
);
6441 rc
= qeth_send_startstoplan(card
, IPA_CMD_STARTLAN
, prot
);
6446 qeth_send_stoplan(struct qeth_card
*card
)
6451 * TODO: according to the IPA format document page 14,
6452 * TCP/IP (we!) never issue a STOPLAN
6455 QETH_DBF_TEXT(trace
, 2, "stoplan");
6457 rc
= qeth_send_startstoplan(card
, IPA_CMD_STOPLAN
, QETH_PROT_IPV4
);
6462 qeth_query_ipassists_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6465 struct qeth_ipa_cmd
*cmd
;
6467 QETH_DBF_TEXT(setup
, 2, "qipasscb");
6469 cmd
= (struct qeth_ipa_cmd
*) data
;
6470 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
) {
6471 card
->options
.ipa4
.supported_funcs
= cmd
->hdr
.ipa_supported
;
6472 card
->options
.ipa4
.enabled_funcs
= cmd
->hdr
.ipa_enabled
;
6474 #ifdef CONFIG_QETH_IPV6
6475 card
->options
.ipa6
.supported_funcs
= cmd
->hdr
.ipa_supported
;
6476 card
->options
.ipa6
.enabled_funcs
= cmd
->hdr
.ipa_enabled
;
6479 QETH_DBF_TEXT(setup
, 2, "suppenbl");
6480 QETH_DBF_TEXT_(setup
, 2, "%x",cmd
->hdr
.ipa_supported
);
6481 QETH_DBF_TEXT_(setup
, 2, "%x",cmd
->hdr
.ipa_enabled
);
6486 qeth_query_ipassists(struct qeth_card
*card
, enum qeth_prot_versions prot
)
6489 struct qeth_cmd_buffer
*iob
;
6491 QETH_DBF_TEXT_(setup
, 2, "qipassi%i", prot
);
6492 if (card
->options
.layer2
) {
6493 QETH_DBF_TEXT(setup
, 2, "noprmly2");
6497 iob
= qeth_get_ipacmd_buffer(card
,IPA_CMD_QIPASSIST
,prot
);
6498 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_ipassists_cb
, NULL
);
6502 static struct qeth_cmd_buffer
*
6503 qeth_get_setassparms_cmd(struct qeth_card
*card
, enum qeth_ipa_funcs ipa_func
,
6504 __u16 cmd_code
, __u16 len
,
6505 enum qeth_prot_versions prot
)
6507 struct qeth_cmd_buffer
*iob
;
6508 struct qeth_ipa_cmd
*cmd
;
6510 QETH_DBF_TEXT(trace
,4,"getasscm");
6511 iob
= qeth_get_ipacmd_buffer(card
,IPA_CMD_SETASSPARMS
,prot
);
6513 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
6514 cmd
->data
.setassparms
.hdr
.assist_no
= ipa_func
;
6515 cmd
->data
.setassparms
.hdr
.length
= 8 + len
;
6516 cmd
->data
.setassparms
.hdr
.command_code
= cmd_code
;
6517 cmd
->data
.setassparms
.hdr
.return_code
= 0;
6518 cmd
->data
.setassparms
.hdr
.seq_no
= 0;
6524 qeth_send_setassparms(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
6525 __u16 len
, long data
,
6527 (struct qeth_card
*,struct qeth_reply
*,unsigned long),
6531 struct qeth_ipa_cmd
*cmd
;
6533 QETH_DBF_TEXT(trace
,4,"sendassp");
6535 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
6536 if (len
<= sizeof(__u32
))
6537 cmd
->data
.setassparms
.data
.flags_32bit
= (__u32
) data
;
6538 else if (len
> sizeof(__u32
))
6539 memcpy(&cmd
->data
.setassparms
.data
, (void *) data
, len
);
6541 rc
= qeth_send_ipa_cmd(card
, iob
, reply_cb
, reply_param
);
6545 #ifdef CONFIG_QETH_IPV6
6547 qeth_send_simple_setassparms_ipv6(struct qeth_card
*card
,
6548 enum qeth_ipa_funcs ipa_func
, __u16 cmd_code
)
6552 struct qeth_cmd_buffer
*iob
;
6554 QETH_DBF_TEXT(trace
,4,"simassp6");
6555 iob
= qeth_get_setassparms_cmd(card
, ipa_func
, cmd_code
,
6557 rc
= qeth_send_setassparms(card
, iob
, 0, 0,
6558 qeth_default_setassparms_cb
, NULL
);
6564 qeth_send_simple_setassparms(struct qeth_card
*card
,
6565 enum qeth_ipa_funcs ipa_func
,
6566 __u16 cmd_code
, long data
)
6570 struct qeth_cmd_buffer
*iob
;
6572 QETH_DBF_TEXT(trace
,4,"simassp4");
6574 length
= sizeof(__u32
);
6575 iob
= qeth_get_setassparms_cmd(card
, ipa_func
, cmd_code
,
6576 length
, QETH_PROT_IPV4
);
6577 rc
= qeth_send_setassparms(card
, iob
, length
, data
,
6578 qeth_default_setassparms_cb
, NULL
);
6583 qeth_start_ipa_arp_processing(struct qeth_card
*card
)
6587 QETH_DBF_TEXT(trace
,3,"ipaarp");
6589 if (!qeth_is_supported(card
,IPA_ARP_PROCESSING
)) {
6590 PRINT_WARN("ARP processing not supported "
6591 "on %s!\n", QETH_CARD_IFNAME(card
));
6594 rc
= qeth_send_simple_setassparms(card
,IPA_ARP_PROCESSING
,
6595 IPA_CMD_ASS_START
, 0);
6597 PRINT_WARN("Could not start ARP processing "
6598 "assist on %s: 0x%x\n",
6599 QETH_CARD_IFNAME(card
), rc
);
6605 qeth_start_ipa_ip_fragmentation(struct qeth_card
*card
)
6609 QETH_DBF_TEXT(trace
,3,"ipaipfrg");
6611 if (!qeth_is_supported(card
, IPA_IP_FRAGMENTATION
)) {
6612 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
6613 QETH_CARD_IFNAME(card
));
6617 rc
= qeth_send_simple_setassparms(card
, IPA_IP_FRAGMENTATION
,
6618 IPA_CMD_ASS_START
, 0);
6620 PRINT_WARN("Could not start Hardware IP fragmentation "
6621 "assist on %s: 0x%x\n",
6622 QETH_CARD_IFNAME(card
), rc
);
6624 PRINT_INFO("Hardware IP fragmentation enabled \n");
6629 qeth_start_ipa_source_mac(struct qeth_card
*card
)
6633 QETH_DBF_TEXT(trace
,3,"stsrcmac");
6635 if (!card
->options
.fake_ll
)
6638 if (!qeth_is_supported(card
, IPA_SOURCE_MAC
)) {
6639 PRINT_INFO("Inbound source address not "
6640 "supported on %s\n", QETH_CARD_IFNAME(card
));
6644 rc
= qeth_send_simple_setassparms(card
, IPA_SOURCE_MAC
,
6645 IPA_CMD_ASS_START
, 0);
6647 PRINT_WARN("Could not start inbound source "
6648 "assist on %s: 0x%x\n",
6649 QETH_CARD_IFNAME(card
), rc
);
6654 qeth_start_ipa_vlan(struct qeth_card
*card
)
6658 QETH_DBF_TEXT(trace
,3,"strtvlan");
6660 #ifdef CONFIG_QETH_VLAN
6661 if (!qeth_is_supported(card
, IPA_FULL_VLAN
)) {
6662 PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card
));
6666 rc
= qeth_send_simple_setassparms(card
, IPA_VLAN_PRIO
,
6667 IPA_CMD_ASS_START
,0);
6669 PRINT_WARN("Could not start vlan "
6670 "assist on %s: 0x%x\n",
6671 QETH_CARD_IFNAME(card
), rc
);
6673 PRINT_INFO("VLAN enabled \n");
6674 card
->dev
->features
|=
6675 NETIF_F_HW_VLAN_FILTER
|
6676 NETIF_F_HW_VLAN_TX
|
6679 #endif /* QETH_VLAN */
6684 qeth_start_ipa_multicast(struct qeth_card
*card
)
6688 QETH_DBF_TEXT(trace
,3,"stmcast");
6690 if (!qeth_is_supported(card
, IPA_MULTICASTING
)) {
6691 PRINT_WARN("Multicast not supported on %s\n",
6692 QETH_CARD_IFNAME(card
));
6696 rc
= qeth_send_simple_setassparms(card
, IPA_MULTICASTING
,
6697 IPA_CMD_ASS_START
,0);
6699 PRINT_WARN("Could not start multicast "
6700 "assist on %s: rc=%i\n",
6701 QETH_CARD_IFNAME(card
), rc
);
6703 PRINT_INFO("Multicast enabled\n");
6704 card
->dev
->flags
|= IFF_MULTICAST
;
6709 #ifdef CONFIG_QETH_IPV6
6711 qeth_softsetup_ipv6(struct qeth_card
*card
)
6715 QETH_DBF_TEXT(trace
,3,"softipv6");
6717 netif_stop_queue(card
->dev
);
6718 rc
= qeth_send_startlan(card
, QETH_PROT_IPV6
);
6720 PRINT_ERR("IPv6 startlan failed on %s\n",
6721 QETH_CARD_IFNAME(card
));
6724 netif_wake_queue(card
->dev
);
6725 rc
= qeth_query_ipassists(card
,QETH_PROT_IPV6
);
6727 PRINT_ERR("IPv6 query ipassist failed on %s\n",
6728 QETH_CARD_IFNAME(card
));
6731 rc
= qeth_send_simple_setassparms(card
, IPA_IPV6
,
6732 IPA_CMD_ASS_START
, 3);
6734 PRINT_WARN("IPv6 start assist (version 4) failed "
6736 QETH_CARD_IFNAME(card
), rc
);
6739 rc
= qeth_send_simple_setassparms_ipv6(card
, IPA_IPV6
,
6742 PRINT_WARN("IPV6 start assist (version 6) failed "
6744 QETH_CARD_IFNAME(card
), rc
);
6747 rc
= qeth_send_simple_setassparms_ipv6(card
, IPA_PASSTHRU
,
6750 PRINT_WARN("Could not enable passthrough "
6752 QETH_CARD_IFNAME(card
), rc
);
6755 PRINT_INFO("IPV6 enabled \n");
6762 qeth_start_ipa_ipv6(struct qeth_card
*card
)
6765 #ifdef CONFIG_QETH_IPV6
6766 QETH_DBF_TEXT(trace
,3,"strtipv6");
6768 if (!qeth_is_supported(card
, IPA_IPV6
)) {
6769 PRINT_WARN("IPv6 not supported on %s\n",
6770 QETH_CARD_IFNAME(card
));
6773 rc
= qeth_softsetup_ipv6(card
);
6779 qeth_start_ipa_broadcast(struct qeth_card
*card
)
6783 QETH_DBF_TEXT(trace
,3,"stbrdcst");
6784 card
->info
.broadcast_capable
= 0;
6785 if (!qeth_is_supported(card
, IPA_FILTERING
)) {
6786 PRINT_WARN("Broadcast not supported on %s\n",
6787 QETH_CARD_IFNAME(card
));
6791 rc
= qeth_send_simple_setassparms(card
, IPA_FILTERING
,
6792 IPA_CMD_ASS_START
, 0);
6794 PRINT_WARN("Could not enable broadcasting filtering "
6796 QETH_CARD_IFNAME(card
), rc
);
6800 rc
= qeth_send_simple_setassparms(card
, IPA_FILTERING
,
6801 IPA_CMD_ASS_CONFIGURE
, 1);
6803 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
6804 QETH_CARD_IFNAME(card
), rc
);
6807 card
->info
.broadcast_capable
= QETH_BROADCAST_WITH_ECHO
;
6808 PRINT_INFO("Broadcast enabled \n");
6809 rc
= qeth_send_simple_setassparms(card
, IPA_FILTERING
,
6810 IPA_CMD_ASS_ENABLE
, 1);
6812 PRINT_WARN("Could not set up broadcast echo filtering on "
6813 "%s: 0x%x\n", QETH_CARD_IFNAME(card
), rc
);
6816 card
->info
.broadcast_capable
= QETH_BROADCAST_WITHOUT_ECHO
;
6818 if (card
->info
.broadcast_capable
)
6819 card
->dev
->flags
|= IFF_BROADCAST
;
6821 card
->dev
->flags
&= ~IFF_BROADCAST
;
6826 qeth_send_checksum_command(struct qeth_card
*card
)
6830 rc
= qeth_send_simple_setassparms(card
, IPA_INBOUND_CHECKSUM
,
6831 IPA_CMD_ASS_START
, 0);
6833 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
6834 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
6835 QETH_CARD_IFNAME(card
), rc
);
6838 rc
= qeth_send_simple_setassparms(card
, IPA_INBOUND_CHECKSUM
,
6840 card
->info
.csum_mask
);
6842 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
6843 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
6844 QETH_CARD_IFNAME(card
), rc
);
6851 qeth_start_ipa_checksum(struct qeth_card
*card
)
6855 QETH_DBF_TEXT(trace
,3,"strtcsum");
6857 if (card
->options
.checksum_type
== NO_CHECKSUMMING
) {
6858 PRINT_WARN("Using no checksumming on %s.\n",
6859 QETH_CARD_IFNAME(card
));
6862 if (card
->options
.checksum_type
== SW_CHECKSUMMING
) {
6863 PRINT_WARN("Using SW checksumming on %s.\n",
6864 QETH_CARD_IFNAME(card
));
6867 if (!qeth_is_supported(card
, IPA_INBOUND_CHECKSUM
)) {
6868 PRINT_WARN("Inbound HW Checksumming not "
6869 "supported on %s,\ncontinuing "
6870 "using Inbound SW Checksumming\n",
6871 QETH_CARD_IFNAME(card
));
6872 card
->options
.checksum_type
= SW_CHECKSUMMING
;
6875 rc
= qeth_send_checksum_command(card
);
6877 PRINT_INFO("HW Checksumming (inbound) enabled \n");
6883 qeth_start_ipa_tso(struct qeth_card
*card
)
6887 QETH_DBF_TEXT(trace
,3,"sttso");
6889 if (!qeth_is_supported(card
, IPA_OUTBOUND_TSO
)) {
6890 PRINT_WARN("Outbound TSO not supported on %s\n",
6891 QETH_CARD_IFNAME(card
));
6894 rc
= qeth_send_simple_setassparms(card
, IPA_OUTBOUND_TSO
,
6895 IPA_CMD_ASS_START
,0);
6897 PRINT_WARN("Could not start outbound TSO "
6898 "assist on %s: rc=%i\n",
6899 QETH_CARD_IFNAME(card
), rc
);
6901 PRINT_INFO("Outbound TSO enabled\n");
6903 if (rc
&& (card
->options
.large_send
== QETH_LARGE_SEND_TSO
)){
6904 card
->options
.large_send
= QETH_LARGE_SEND_NO
;
6905 card
->dev
->features
&= ~ (NETIF_F_TSO
| NETIF_F_SG
);
6911 qeth_start_ipassists(struct qeth_card
*card
)
6913 QETH_DBF_TEXT(trace
,3,"strtipas");
6914 qeth_start_ipa_arp_processing(card
); /* go on*/
6915 qeth_start_ipa_ip_fragmentation(card
); /* go on*/
6916 qeth_start_ipa_source_mac(card
); /* go on*/
6917 qeth_start_ipa_vlan(card
); /* go on*/
6918 qeth_start_ipa_multicast(card
); /* go on*/
6919 qeth_start_ipa_ipv6(card
); /* go on*/
6920 qeth_start_ipa_broadcast(card
); /* go on*/
6921 qeth_start_ipa_checksum(card
); /* go on*/
6922 qeth_start_ipa_tso(card
); /* go on*/
6927 qeth_send_setrouting(struct qeth_card
*card
, enum qeth_routing_types type
,
6928 enum qeth_prot_versions prot
)
6931 struct qeth_ipa_cmd
*cmd
;
6932 struct qeth_cmd_buffer
*iob
;
6934 QETH_DBF_TEXT(trace
,4,"setroutg");
6935 iob
= qeth_get_ipacmd_buffer(card
, IPA_CMD_SETRTG
, prot
);
6936 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
6937 cmd
->data
.setrtg
.type
= (type
);
6938 rc
= qeth_send_ipa_cmd(card
, iob
, NULL
, NULL
);
6945 qeth_correct_routing_type(struct qeth_card
*card
, enum qeth_routing_types
*type
,
6946 enum qeth_prot_versions prot
)
6948 if (card
->info
.type
== QETH_CARD_TYPE_IQD
) {
6951 case PRIMARY_CONNECTOR
:
6952 case SECONDARY_CONNECTOR
:
6953 case MULTICAST_ROUTER
:
6961 case PRIMARY_ROUTER
:
6962 case SECONDARY_ROUTER
:
6964 case MULTICAST_ROUTER
:
6965 if (qeth_is_ipafunc_supported(card
, prot
,
6973 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
6974 "Router status set to 'no router'.\n",
6975 ((*type
== PRIMARY_ROUTER
)? "primary router" :
6976 (*type
== SECONDARY_ROUTER
)? "secondary router" :
6977 (*type
== PRIMARY_CONNECTOR
)? "primary connector" :
6978 (*type
== SECONDARY_CONNECTOR
)? "secondary connector" :
6979 (*type
== MULTICAST_ROUTER
)? "multicast router" :
6986 qeth_setrouting_v4(struct qeth_card
*card
)
6990 QETH_DBF_TEXT(trace
,3,"setrtg4");
6992 qeth_correct_routing_type(card
, &card
->options
.route4
.type
,
6995 rc
= qeth_send_setrouting(card
, card
->options
.route4
.type
,
6998 card
->options
.route4
.type
= NO_ROUTER
;
6999 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7000 "Type set to 'no router'.\n",
7001 rc
, QETH_CARD_IFNAME(card
));
7007 qeth_setrouting_v6(struct qeth_card
*card
)
7011 QETH_DBF_TEXT(trace
,3,"setrtg6");
7012 #ifdef CONFIG_QETH_IPV6
7014 qeth_correct_routing_type(card
, &card
->options
.route6
.type
,
7017 if ((card
->options
.route6
.type
== NO_ROUTER
) ||
7018 ((card
->info
.type
== QETH_CARD_TYPE_OSAE
) &&
7019 (card
->options
.route6
.type
== MULTICAST_ROUTER
) &&
7020 !qeth_is_supported6(card
,IPA_OSA_MC_ROUTER
)))
7022 rc
= qeth_send_setrouting(card
, card
->options
.route6
.type
,
7025 card
->options
.route6
.type
= NO_ROUTER
;
7026 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7027 "Type set to 'no router'.\n",
7028 rc
, QETH_CARD_IFNAME(card
));
7035 qeth_set_large_send(struct qeth_card
*card
, enum qeth_large_send_types type
)
7039 if (card
->dev
== NULL
) {
7040 card
->options
.large_send
= type
;
7043 netif_stop_queue(card
->dev
);
7044 card
->options
.large_send
= type
;
7045 switch (card
->options
.large_send
) {
7046 case QETH_LARGE_SEND_EDDP
:
7047 card
->dev
->features
|= NETIF_F_TSO
| NETIF_F_SG
;
7049 case QETH_LARGE_SEND_TSO
:
7050 if (qeth_is_supported(card
, IPA_OUTBOUND_TSO
)){
7051 card
->dev
->features
|= NETIF_F_TSO
| NETIF_F_SG
;
7053 PRINT_WARN("TSO not supported on %s. "
7054 "large_send set to 'no'.\n",
7056 card
->dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_SG
);
7057 card
->options
.large_send
= QETH_LARGE_SEND_NO
;
7061 default: /* includes QETH_LARGE_SEND_NO */
7062 card
->dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_SG
);
7065 netif_wake_queue(card
->dev
);
7070 * softsetup card: init IPA stuff
7073 qeth_softsetup_card(struct qeth_card
*card
)
7077 QETH_DBF_TEXT(setup
, 2, "softsetp");
7079 if ((rc
= qeth_send_startlan(card
, QETH_PROT_IPV4
))){
7080 QETH_DBF_TEXT_(setup
, 2, "1err%d", rc
);
7082 PRINT_WARN("LAN on card %s if offline! "
7083 "Continuing softsetup.\n",
7085 card
->lan_online
= 0;
7089 card
->lan_online
= 1;
7090 if (card
->options
.layer2
) {
7091 card
->dev
->features
|=
7092 NETIF_F_HW_VLAN_FILTER
|
7093 NETIF_F_HW_VLAN_TX
|
7095 card
->dev
->flags
|=IFF_MULTICAST
|IFF_BROADCAST
;
7096 card
->info
.broadcast_capable
=1;
7097 if ((rc
= qeth_layer2_initialize(card
))) {
7098 QETH_DBF_TEXT_(setup
, 2, "L2err%d", rc
);
7101 #ifdef CONFIG_QETH_VLAN
7102 qeth_layer2_process_vlans(card
, 0);
7106 if ((card
->options
.large_send
== QETH_LARGE_SEND_EDDP
) ||
7107 (card
->options
.large_send
== QETH_LARGE_SEND_TSO
))
7108 card
->dev
->features
|= NETIF_F_TSO
| NETIF_F_SG
;
7110 card
->dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_SG
);
7112 if ((rc
= qeth_setadapter_parms(card
)))
7113 QETH_DBF_TEXT_(setup
, 2, "2err%d", rc
);
7114 if ((rc
= qeth_start_ipassists(card
)))
7115 QETH_DBF_TEXT_(setup
, 2, "3err%d", rc
);
7116 if ((rc
= qeth_setrouting_v4(card
)))
7117 QETH_DBF_TEXT_(setup
, 2, "4err%d", rc
);
7118 if ((rc
= qeth_setrouting_v6(card
)))
7119 QETH_DBF_TEXT_(setup
, 2, "5err%d", rc
);
7121 netif_stop_queue(card
->dev
);
7125 #ifdef CONFIG_QETH_IPV6
7127 qeth_get_unique_id_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
7130 struct qeth_ipa_cmd
*cmd
;
7132 cmd
= (struct qeth_ipa_cmd
*) data
;
7133 if (cmd
->hdr
.return_code
== 0)
7134 card
->info
.unique_id
= *((__u16
*)
7135 &cmd
->data
.create_destroy_addr
.unique_id
[6]);
7137 card
->info
.unique_id
= UNIQUE_ID_IF_CREATE_ADDR_FAILED
|
7138 UNIQUE_ID_NOT_BY_CARD
;
7139 PRINT_WARN("couldn't get a unique id from the card on device "
7140 "%s (result=x%x), using default id. ipv6 "
7141 "autoconfig on other lpars may lead to duplicate "
7142 "ip addresses. please use manually "
7143 "configured ones.\n",
7144 CARD_BUS_ID(card
), cmd
->hdr
.return_code
);
7151 qeth_put_unique_id(struct qeth_card
*card
)
7155 #ifdef CONFIG_QETH_IPV6
7156 struct qeth_cmd_buffer
*iob
;
7157 struct qeth_ipa_cmd
*cmd
;
7159 QETH_DBF_TEXT(trace
,2,"puniqeid");
7161 if ((card
->info
.unique_id
& UNIQUE_ID_NOT_BY_CARD
) ==
7162 UNIQUE_ID_NOT_BY_CARD
)
7164 iob
= qeth_get_ipacmd_buffer(card
, IPA_CMD_DESTROY_ADDR
,
7166 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
7167 *((__u16
*) &cmd
->data
.create_destroy_addr
.unique_id
[6]) =
7168 card
->info
.unique_id
;
7169 memcpy(&cmd
->data
.create_destroy_addr
.unique_id
[0],
7170 card
->dev
->dev_addr
, OSA_ADDR_LEN
);
7171 rc
= qeth_send_ipa_cmd(card
, iob
, NULL
, NULL
);
7173 card
->info
.unique_id
= UNIQUE_ID_IF_CREATE_ADDR_FAILED
|
7174 UNIQUE_ID_NOT_BY_CARD
;
7183 qeth_clear_ip_list(struct qeth_card
*card
, int clean
, int recover
)
7185 struct qeth_ipaddr
*addr
, *tmp
;
7186 unsigned long flags
;
7188 QETH_DBF_TEXT(trace
,4,"clearip");
7189 spin_lock_irqsave(&card
->ip_lock
, flags
);
7190 /* clear todo list */
7191 list_for_each_entry_safe(addr
, tmp
, card
->ip_tbd_list
, entry
){
7192 list_del(&addr
->entry
);
7196 while (!list_empty(&card
->ip_list
)) {
7197 addr
= list_entry(card
->ip_list
.next
,
7198 struct qeth_ipaddr
, entry
);
7199 list_del_init(&addr
->entry
);
7201 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
7202 qeth_deregister_addr_entry(card
, addr
);
7203 spin_lock_irqsave(&card
->ip_lock
, flags
);
7205 if (!recover
|| addr
->is_multicast
) {
7209 list_add_tail(&addr
->entry
, card
->ip_tbd_list
);
7211 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
7215 qeth_set_allowed_threads(struct qeth_card
*card
, unsigned long threads
,
7216 int clear_start_mask
)
7218 unsigned long flags
;
7220 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
7221 card
->thread_allowed_mask
= threads
;
7222 if (clear_start_mask
)
7223 card
->thread_start_mask
&= threads
;
7224 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
7225 wake_up(&card
->wait_q
);
7229 qeth_threads_running(struct qeth_card
*card
, unsigned long threads
)
7231 unsigned long flags
;
7234 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
7235 rc
= (card
->thread_running_mask
& threads
);
7236 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
7241 qeth_wait_for_threads(struct qeth_card
*card
, unsigned long threads
)
7243 return wait_event_interruptible(card
->wait_q
,
7244 qeth_threads_running(card
, threads
) == 0);
7248 qeth_stop_card(struct qeth_card
*card
, int recovery_mode
)
7252 QETH_DBF_TEXT(setup
,2,"stopcard");
7253 QETH_DBF_HEX(setup
, 2, &card
, sizeof(void *));
7255 qeth_set_allowed_threads(card
, 0, 1);
7256 if (qeth_wait_for_threads(card
, ~QETH_RECOVER_THREAD
))
7257 return -ERESTARTSYS
;
7258 if (card
->read
.state
== CH_STATE_UP
&&
7259 card
->write
.state
== CH_STATE_UP
&&
7260 (card
->state
== CARD_STATE_UP
)) {
7262 qeth_stop(card
->dev
);
7265 dev_close(card
->dev
);
7268 if (!card
->use_hard_stop
) {
7269 __u8
*mac
= &card
->dev
->dev_addr
[0];
7270 rc
= qeth_layer2_send_delmac(card
, mac
);
7271 QETH_DBF_TEXT_(setup
, 2, "Lerr%d", rc
);
7272 if ((rc
= qeth_send_stoplan(card
)))
7273 QETH_DBF_TEXT_(setup
, 2, "1err%d", rc
);
7275 card
->state
= CARD_STATE_SOFTSETUP
;
7277 if (card
->state
== CARD_STATE_SOFTSETUP
) {
7278 #ifdef CONFIG_QETH_VLAN
7279 if (card
->options
.layer2
)
7280 qeth_layer2_process_vlans(card
, 1);
7282 qeth_clear_ip_list(card
, !card
->use_hard_stop
, 1);
7283 qeth_clear_ipacmd_list(card
);
7284 card
->state
= CARD_STATE_HARDSETUP
;
7286 if (card
->state
== CARD_STATE_HARDSETUP
) {
7287 if ((!card
->use_hard_stop
) &&
7288 (!card
->options
.layer2
))
7289 if ((rc
= qeth_put_unique_id(card
)))
7290 QETH_DBF_TEXT_(setup
, 2, "2err%d", rc
);
7291 qeth_qdio_clear_card(card
, 0);
7292 qeth_clear_qdio_buffers(card
);
7293 qeth_clear_working_pool_list(card
);
7294 card
->state
= CARD_STATE_DOWN
;
7296 if (card
->state
== CARD_STATE_DOWN
) {
7297 qeth_clear_cmd_buffers(&card
->read
);
7298 qeth_clear_cmd_buffers(&card
->write
);
7300 card
->use_hard_stop
= 0;
7306 qeth_get_unique_id(struct qeth_card
*card
)
7309 #ifdef CONFIG_QETH_IPV6
7310 struct qeth_cmd_buffer
*iob
;
7311 struct qeth_ipa_cmd
*cmd
;
7313 QETH_DBF_TEXT(setup
, 2, "guniqeid");
7315 if (!qeth_is_supported(card
,IPA_IPV6
)) {
7316 card
->info
.unique_id
= UNIQUE_ID_IF_CREATE_ADDR_FAILED
|
7317 UNIQUE_ID_NOT_BY_CARD
;
7321 iob
= qeth_get_ipacmd_buffer(card
, IPA_CMD_CREATE_ADDR
,
7323 cmd
= (struct qeth_ipa_cmd
*)(iob
->data
+IPA_PDU_HEADER_SIZE
);
7324 *((__u16
*) &cmd
->data
.create_destroy_addr
.unique_id
[6]) =
7325 card
->info
.unique_id
;
7327 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_get_unique_id_cb
, NULL
);
7329 card
->info
.unique_id
= UNIQUE_ID_IF_CREATE_ADDR_FAILED
|
7330 UNIQUE_ID_NOT_BY_CARD
;
7335 qeth_print_status_with_portname(struct qeth_card
*card
)
7340 sprintf(dbf_text
, "%s", card
->info
.portname
+ 1);
7341 for (i
= 0; i
< 8; i
++)
7343 (char) _ebcasc
[(__u8
) dbf_text
[i
]];
7345 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
7346 "with link type %s (portname: %s)\n",
7350 qeth_get_cardname(card
),
7351 (card
->info
.mcl_level
[0]) ? " (level: " : "",
7352 (card
->info
.mcl_level
[0]) ? card
->info
.mcl_level
: "",
7353 (card
->info
.mcl_level
[0]) ? ")" : "",
7354 qeth_get_cardname_short(card
),
7360 qeth_print_status_no_portname(struct qeth_card
*card
)
7362 if (card
->info
.portname
[0])
7363 printk("qeth: Device %s/%s/%s is a%s "
7364 "card%s%s%s\nwith link type %s "
7365 "(no portname needed by interface).\n",
7369 qeth_get_cardname(card
),
7370 (card
->info
.mcl_level
[0]) ? " (level: " : "",
7371 (card
->info
.mcl_level
[0]) ? card
->info
.mcl_level
: "",
7372 (card
->info
.mcl_level
[0]) ? ")" : "",
7373 qeth_get_cardname_short(card
));
7375 printk("qeth: Device %s/%s/%s is a%s "
7376 "card%s%s%s\nwith link type %s.\n",
7380 qeth_get_cardname(card
),
7381 (card
->info
.mcl_level
[0]) ? " (level: " : "",
7382 (card
->info
.mcl_level
[0]) ? card
->info
.mcl_level
: "",
7383 (card
->info
.mcl_level
[0]) ? ")" : "",
7384 qeth_get_cardname_short(card
));
7388 qeth_print_status_message(struct qeth_card
*card
)
7390 switch (card
->info
.type
) {
7391 case QETH_CARD_TYPE_OSAE
:
7392 /* VM will use a non-zero first character
7393 * to indicate a HiperSockets like reporting
7394 * of the level OSA sets the first character to zero
7396 if (!card
->info
.mcl_level
[0]) {
7397 sprintf(card
->info
.mcl_level
,"%02x%02x",
7398 card
->info
.mcl_level
[2],
7399 card
->info
.mcl_level
[3]);
7401 card
->info
.mcl_level
[QETH_MCL_LENGTH
] = 0;
7405 case QETH_CARD_TYPE_IQD
:
7406 card
->info
.mcl_level
[0] = (char) _ebcasc
[(__u8
)
7407 card
->info
.mcl_level
[0]];
7408 card
->info
.mcl_level
[1] = (char) _ebcasc
[(__u8
)
7409 card
->info
.mcl_level
[1]];
7410 card
->info
.mcl_level
[2] = (char) _ebcasc
[(__u8
)
7411 card
->info
.mcl_level
[2]];
7412 card
->info
.mcl_level
[3] = (char) _ebcasc
[(__u8
)
7413 card
->info
.mcl_level
[3]];
7414 card
->info
.mcl_level
[QETH_MCL_LENGTH
] = 0;
7417 memset(&card
->info
.mcl_level
[0], 0, QETH_MCL_LENGTH
+ 1);
7419 if (card
->info
.portname_required
)
7420 qeth_print_status_with_portname(card
);
7422 qeth_print_status_no_portname(card
);
7426 qeth_register_netdev(struct qeth_card
*card
)
7428 QETH_DBF_TEXT(setup
, 3, "regnetd");
7429 if (card
->dev
->reg_state
!= NETREG_UNINITIALIZED
) {
7430 qeth_netdev_init(card
->dev
);
7434 SET_NETDEV_DEV(card
->dev
, &card
->gdev
->dev
);
7435 return register_netdev(card
->dev
);
7439 qeth_start_again(struct qeth_card
*card
, int recovery_mode
)
7441 QETH_DBF_TEXT(setup
,2, "startag");
7444 qeth_open(card
->dev
);
7447 dev_open(card
->dev
);
7450 /* this also sets saved unicast addresses */
7451 qeth_set_multicast_list(card
->dev
);
7455 /* Layer 2 specific stuff */
7456 #define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
7457 if (card->options.option == value) { \
7458 PRINT_ERR("%s not supported with layer 2 " \
7459 "functionality, ignoring option on read" \
7460 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7461 card->options.option = reset_value; \
7463 #define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
7464 if (card->options.option != value) { \
7465 PRINT_ERR("%s not supported with layer 2 " \
7466 "functionality, ignoring option on read" \
7467 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7468 card->options.option = reset_value; \
7472 static void qeth_make_parameters_consistent(struct qeth_card
*card
)
7475 if (card
->options
.layer2
) {
7476 if (card
->info
.type
== QETH_CARD_TYPE_IQD
) {
7477 PRINT_ERR("Device %s does not support " \
7478 "layer 2 functionality. " \
7479 "Ignoring layer2 option.\n",CARD_BUS_ID(card
));
7481 IGNORE_PARAM_NEQ(route4
.type
, NO_ROUTER
, NO_ROUTER
,
7482 "Routing options are");
7483 #ifdef CONFIG_QETH_IPV6
7484 IGNORE_PARAM_NEQ(route6
.type
, NO_ROUTER
, NO_ROUTER
,
7485 "Routing options are");
7487 IGNORE_PARAM_EQ(checksum_type
, HW_CHECKSUMMING
,
7488 QETH_CHECKSUM_DEFAULT
,
7489 "Checksumming options are");
7490 IGNORE_PARAM_NEQ(broadcast_mode
, QETH_TR_BROADCAST_ALLRINGS
,
7491 QETH_TR_BROADCAST_ALLRINGS
,
7492 "Broadcast mode options are");
7493 IGNORE_PARAM_NEQ(macaddr_mode
, QETH_TR_MACADDR_NONCANONICAL
,
7494 QETH_TR_MACADDR_NONCANONICAL
,
7495 "Canonical MAC addr options are");
7496 IGNORE_PARAM_NEQ(fake_broadcast
, 0, 0,
7497 "Broadcast faking options are");
7498 IGNORE_PARAM_NEQ(add_hhlen
, DEFAULT_ADD_HHLEN
,
7499 DEFAULT_ADD_HHLEN
,"Option add_hhlen is");
7500 IGNORE_PARAM_NEQ(fake_ll
, 0, 0,"Option fake_ll is");
7506 __qeth_set_online(struct ccwgroup_device
*gdev
, int recovery_mode
)
7508 struct qeth_card
*card
= gdev
->dev
.driver_data
;
7510 enum qeth_card_states recover_flag
;
7513 QETH_DBF_TEXT(setup
,2, "setonlin");
7514 QETH_DBF_HEX(setup
, 2, &card
, sizeof(void *));
7516 qeth_set_allowed_threads(card
, QETH_RECOVER_THREAD
, 1);
7517 if (qeth_wait_for_threads(card
, ~QETH_RECOVER_THREAD
)){
7518 PRINT_WARN("set_online of card %s interrupted by user!\n",
7520 return -ERESTARTSYS
;
7523 recover_flag
= card
->state
;
7524 if ((rc
= ccw_device_set_online(CARD_RDEV(card
))) ||
7525 (rc
= ccw_device_set_online(CARD_WDEV(card
))) ||
7526 (rc
= ccw_device_set_online(CARD_DDEV(card
)))){
7527 QETH_DBF_TEXT_(setup
, 2, "1err%d", rc
);
7531 if (card
->options
.layer2
)
7532 qeth_make_parameters_consistent(card
);
7534 if ((rc
= qeth_hardsetup_card(card
))){
7535 QETH_DBF_TEXT_(setup
, 2, "2err%d", rc
);
7538 card
->state
= CARD_STATE_HARDSETUP
;
7540 if (!(rc
= qeth_query_ipassists(card
,QETH_PROT_IPV4
)))
7541 rc
= qeth_get_unique_id(card
);
7543 if (rc
&& card
->options
.layer2
== 0) {
7544 QETH_DBF_TEXT_(setup
, 2, "3err%d", rc
);
7547 qeth_print_status_message(card
);
7548 if ((rc
= qeth_register_netdev(card
))){
7549 QETH_DBF_TEXT_(setup
, 2, "4err%d", rc
);
7552 if ((rc
= qeth_softsetup_card(card
))){
7553 QETH_DBF_TEXT_(setup
, 2, "5err%d", rc
);
7556 card
->state
= CARD_STATE_SOFTSETUP
;
7558 if ((rc
= qeth_init_qdio_queues(card
))){
7559 QETH_DBF_TEXT_(setup
, 2, "6err%d", rc
);
7562 /*maybe it was set offline without ifconfig down
7563 * we can also use this state for recovery purposes*/
7564 qeth_set_allowed_threads(card
, 0xffffffff, 0);
7565 if (recover_flag
== CARD_STATE_RECOVER
)
7566 qeth_start_again(card
, recovery_mode
);
7567 qeth_notify_processes();
7570 card
->use_hard_stop
= 1;
7571 qeth_stop_card(card
, 0);
7572 ccw_device_set_offline(CARD_DDEV(card
));
7573 ccw_device_set_offline(CARD_WDEV(card
));
7574 ccw_device_set_offline(CARD_RDEV(card
));
7575 if (recover_flag
== CARD_STATE_RECOVER
)
7576 card
->state
= CARD_STATE_RECOVER
;
7578 card
->state
= CARD_STATE_DOWN
;
7583 qeth_set_online(struct ccwgroup_device
*gdev
)
7585 return __qeth_set_online(gdev
, 0);
7588 static struct ccw_device_id qeth_ids
[] = {
7589 {CCW_DEVICE(0x1731, 0x01), driver_info
:QETH_CARD_TYPE_OSAE
},
7590 {CCW_DEVICE(0x1731, 0x05), driver_info
:QETH_CARD_TYPE_IQD
},
7593 MODULE_DEVICE_TABLE(ccw
, qeth_ids
);
7595 struct device
*qeth_root_dev
= NULL
;
7597 struct ccwgroup_driver qeth_ccwgroup_driver
= {
7598 .owner
= THIS_MODULE
,
7600 .driver_id
= 0xD8C5E3C8,
7601 .probe
= qeth_probe_device
,
7602 .remove
= qeth_remove_device
,
7603 .set_online
= qeth_set_online
,
7604 .set_offline
= qeth_set_offline
,
7607 struct ccw_driver qeth_ccw_driver
= {
7610 .probe
= ccwgroup_probe_ccwdev
,
7611 .remove
= ccwgroup_remove_ccwdev
,
7616 qeth_unregister_dbf_views(void)
7619 debug_unregister(qeth_dbf_setup
);
7621 debug_unregister(qeth_dbf_qerr
);
7623 debug_unregister(qeth_dbf_sense
);
7625 debug_unregister(qeth_dbf_misc
);
7627 debug_unregister(qeth_dbf_data
);
7628 if (qeth_dbf_control
)
7629 debug_unregister(qeth_dbf_control
);
7631 debug_unregister(qeth_dbf_trace
);
7634 qeth_register_dbf_views(void)
7636 qeth_dbf_setup
= debug_register(QETH_DBF_SETUP_NAME
,
7637 QETH_DBF_SETUP_PAGES
,
7638 QETH_DBF_SETUP_NR_AREAS
,
7639 QETH_DBF_SETUP_LEN
);
7640 qeth_dbf_misc
= debug_register(QETH_DBF_MISC_NAME
,
7641 QETH_DBF_MISC_PAGES
,
7642 QETH_DBF_MISC_NR_AREAS
,
7644 qeth_dbf_data
= debug_register(QETH_DBF_DATA_NAME
,
7645 QETH_DBF_DATA_PAGES
,
7646 QETH_DBF_DATA_NR_AREAS
,
7648 qeth_dbf_control
= debug_register(QETH_DBF_CONTROL_NAME
,
7649 QETH_DBF_CONTROL_PAGES
,
7650 QETH_DBF_CONTROL_NR_AREAS
,
7651 QETH_DBF_CONTROL_LEN
);
7652 qeth_dbf_sense
= debug_register(QETH_DBF_SENSE_NAME
,
7653 QETH_DBF_SENSE_PAGES
,
7654 QETH_DBF_SENSE_NR_AREAS
,
7655 QETH_DBF_SENSE_LEN
);
7656 qeth_dbf_qerr
= debug_register(QETH_DBF_QERR_NAME
,
7657 QETH_DBF_QERR_PAGES
,
7658 QETH_DBF_QERR_NR_AREAS
,
7660 qeth_dbf_trace
= debug_register(QETH_DBF_TRACE_NAME
,
7661 QETH_DBF_TRACE_PAGES
,
7662 QETH_DBF_TRACE_NR_AREAS
,
7663 QETH_DBF_TRACE_LEN
);
7665 if ((qeth_dbf_setup
== NULL
) || (qeth_dbf_misc
== NULL
) ||
7666 (qeth_dbf_data
== NULL
) || (qeth_dbf_control
== NULL
) ||
7667 (qeth_dbf_sense
== NULL
) || (qeth_dbf_qerr
== NULL
) ||
7668 (qeth_dbf_trace
== NULL
)) {
7669 qeth_unregister_dbf_views();
7672 debug_register_view(qeth_dbf_setup
, &debug_hex_ascii_view
);
7673 debug_set_level(qeth_dbf_setup
, QETH_DBF_SETUP_LEVEL
);
7675 debug_register_view(qeth_dbf_misc
, &debug_hex_ascii_view
);
7676 debug_set_level(qeth_dbf_misc
, QETH_DBF_MISC_LEVEL
);
7678 debug_register_view(qeth_dbf_data
, &debug_hex_ascii_view
);
7679 debug_set_level(qeth_dbf_data
, QETH_DBF_DATA_LEVEL
);
7681 debug_register_view(qeth_dbf_control
, &debug_hex_ascii_view
);
7682 debug_set_level(qeth_dbf_control
, QETH_DBF_CONTROL_LEVEL
);
7684 debug_register_view(qeth_dbf_sense
, &debug_hex_ascii_view
);
7685 debug_set_level(qeth_dbf_sense
, QETH_DBF_SENSE_LEVEL
);
7687 debug_register_view(qeth_dbf_qerr
, &debug_hex_ascii_view
);
7688 debug_set_level(qeth_dbf_qerr
, QETH_DBF_QERR_LEVEL
);
7690 debug_register_view(qeth_dbf_trace
, &debug_hex_ascii_view
);
7691 debug_set_level(qeth_dbf_trace
, QETH_DBF_TRACE_LEVEL
);
7696 #ifdef CONFIG_QETH_IPV6
7697 extern struct neigh_table arp_tbl
;
7698 static struct neigh_ops
*arp_direct_ops
;
7699 static int (*qeth_old_arp_constructor
) (struct neighbour
*);
7701 static struct neigh_ops arp_direct_ops_template
= {
7705 .error_report
= NULL
,
7706 .output
= dev_queue_xmit
,
7707 .connected_output
= dev_queue_xmit
,
7708 .hh_output
= dev_queue_xmit
,
7709 .queue_xmit
= dev_queue_xmit
7713 qeth_arp_constructor(struct neighbour
*neigh
)
7715 struct net_device
*dev
= neigh
->dev
;
7716 struct in_device
*in_dev
;
7717 struct neigh_parms
*parms
;
7718 struct qeth_card
*card
;
7720 card
= qeth_get_card_from_dev(dev
);
7723 if((card
->options
.layer2
) ||
7724 (card
->dev
->hard_header
== qeth_fake_header
))
7728 in_dev
= rcu_dereference(__in_dev_get(dev
));
7729 if (in_dev
== NULL
) {
7734 parms
= in_dev
->arp_parms
;
7735 __neigh_parms_put(neigh
->parms
);
7736 neigh
->parms
= neigh_parms_clone(parms
);
7739 neigh
->type
= inet_addr_type(*(u32
*) neigh
->primary_key
);
7740 neigh
->nud_state
= NUD_NOARP
;
7741 neigh
->ops
= arp_direct_ops
;
7742 neigh
->output
= neigh
->ops
->queue_xmit
;
7745 return qeth_old_arp_constructor(neigh
);
7747 #endif /*CONFIG_QETH_IPV6*/
7750 * IP address takeover related functions
7753 qeth_clear_ipato_list(struct qeth_card
*card
)
7755 struct qeth_ipato_entry
*ipatoe
, *tmp
;
7756 unsigned long flags
;
7758 spin_lock_irqsave(&card
->ip_lock
, flags
);
7759 list_for_each_entry_safe(ipatoe
, tmp
, &card
->ipato
.entries
, entry
) {
7760 list_del(&ipatoe
->entry
);
7763 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
7767 qeth_add_ipato_entry(struct qeth_card
*card
, struct qeth_ipato_entry
*new)
7769 struct qeth_ipato_entry
*ipatoe
;
7770 unsigned long flags
;
7773 QETH_DBF_TEXT(trace
, 2, "addipato");
7774 spin_lock_irqsave(&card
->ip_lock
, flags
);
7775 list_for_each_entry(ipatoe
, &card
->ipato
.entries
, entry
){
7776 if (ipatoe
->proto
!= new->proto
)
7778 if (!memcmp(ipatoe
->addr
, new->addr
,
7779 (ipatoe
->proto
== QETH_PROT_IPV4
)? 4:16) &&
7780 (ipatoe
->mask_bits
== new->mask_bits
)){
7781 PRINT_WARN("ipato entry already exists!\n");
7787 list_add_tail(&new->entry
, &card
->ipato
.entries
);
7789 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
7794 qeth_del_ipato_entry(struct qeth_card
*card
, enum qeth_prot_versions proto
,
7795 u8
*addr
, int mask_bits
)
7797 struct qeth_ipato_entry
*ipatoe
, *tmp
;
7798 unsigned long flags
;
7800 QETH_DBF_TEXT(trace
, 2, "delipato");
7801 spin_lock_irqsave(&card
->ip_lock
, flags
);
7802 list_for_each_entry_safe(ipatoe
, tmp
, &card
->ipato
.entries
, entry
){
7803 if (ipatoe
->proto
!= proto
)
7805 if (!memcmp(ipatoe
->addr
, addr
,
7806 (proto
== QETH_PROT_IPV4
)? 4:16) &&
7807 (ipatoe
->mask_bits
== mask_bits
)){
7808 list_del(&ipatoe
->entry
);
7812 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
7816 qeth_convert_addr_to_bits(u8
*addr
, u8
*bits
, int len
)
7821 for (i
= 0; i
< len
; ++i
){
7823 for (j
= 7; j
>= 0; --j
){
7824 bits
[i
*8 + j
] = octet
& 1;
7831 qeth_is_addr_covered_by_ipato(struct qeth_card
*card
, struct qeth_ipaddr
*addr
)
7833 struct qeth_ipato_entry
*ipatoe
;
7834 u8 addr_bits
[128] = {0, };
7835 u8 ipatoe_bits
[128] = {0, };
7838 if (!card
->ipato
.enabled
)
7841 qeth_convert_addr_to_bits((u8
*) &addr
->u
, addr_bits
,
7842 (addr
->proto
== QETH_PROT_IPV4
)? 4:16);
7843 list_for_each_entry(ipatoe
, &card
->ipato
.entries
, entry
){
7844 if (addr
->proto
!= ipatoe
->proto
)
7846 qeth_convert_addr_to_bits(ipatoe
->addr
, ipatoe_bits
,
7847 (ipatoe
->proto
==QETH_PROT_IPV4
) ?
7849 if (addr
->proto
== QETH_PROT_IPV4
)
7850 rc
= !memcmp(addr_bits
, ipatoe_bits
,
7851 min(32, ipatoe
->mask_bits
));
7853 rc
= !memcmp(addr_bits
, ipatoe_bits
,
7854 min(128, ipatoe
->mask_bits
));
7859 if ((addr
->proto
== QETH_PROT_IPV4
) && card
->ipato
.invert4
)
7861 else if ((addr
->proto
== QETH_PROT_IPV6
) && card
->ipato
.invert6
)
7868 * VIPA related functions
7871 qeth_add_vipa(struct qeth_card
*card
, enum qeth_prot_versions proto
,
7874 struct qeth_ipaddr
*ipaddr
;
7875 unsigned long flags
;
7878 ipaddr
= qeth_get_addr_buffer(proto
);
7880 if (proto
== QETH_PROT_IPV4
){
7881 QETH_DBF_TEXT(trace
, 2, "addvipa4");
7882 memcpy(&ipaddr
->u
.a4
.addr
, addr
, 4);
7883 ipaddr
->u
.a4
.mask
= 0;
7884 #ifdef CONFIG_QETH_IPV6
7885 } else if (proto
== QETH_PROT_IPV6
){
7886 QETH_DBF_TEXT(trace
, 2, "addvipa6");
7887 memcpy(&ipaddr
->u
.a6
.addr
, addr
, 16);
7888 ipaddr
->u
.a6
.pfxlen
= 0;
7891 ipaddr
->type
= QETH_IP_TYPE_VIPA
;
7892 ipaddr
->set_flags
= QETH_IPA_SETIP_VIPA_FLAG
;
7893 ipaddr
->del_flags
= QETH_IPA_DELIP_VIPA_FLAG
;
7896 spin_lock_irqsave(&card
->ip_lock
, flags
);
7897 if (__qeth_address_exists_in_list(&card
->ip_list
, ipaddr
, 0) ||
7898 __qeth_address_exists_in_list(card
->ip_tbd_list
, ipaddr
, 0))
7900 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
7902 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
7905 if (!qeth_add_ip(card
, ipaddr
))
7907 if (qeth_set_thread_start_bit(card
, QETH_SET_IP_THREAD
) == 0)
7908 schedule_work(&card
->kernel_thread_starter
);
7913 qeth_del_vipa(struct qeth_card
*card
, enum qeth_prot_versions proto
,
7916 struct qeth_ipaddr
*ipaddr
;
7918 ipaddr
= qeth_get_addr_buffer(proto
);
7920 if (proto
== QETH_PROT_IPV4
){
7921 QETH_DBF_TEXT(trace
, 2, "delvipa4");
7922 memcpy(&ipaddr
->u
.a4
.addr
, addr
, 4);
7923 ipaddr
->u
.a4
.mask
= 0;
7924 #ifdef CONFIG_QETH_IPV6
7925 } else if (proto
== QETH_PROT_IPV6
){
7926 QETH_DBF_TEXT(trace
, 2, "delvipa6");
7927 memcpy(&ipaddr
->u
.a6
.addr
, addr
, 16);
7928 ipaddr
->u
.a6
.pfxlen
= 0;
7931 ipaddr
->type
= QETH_IP_TYPE_VIPA
;
7934 if (!qeth_delete_ip(card
, ipaddr
))
7936 if (qeth_set_thread_start_bit(card
, QETH_SET_IP_THREAD
) == 0)
7937 schedule_work(&card
->kernel_thread_starter
);
7941 * proxy ARP related functions
7944 qeth_add_rxip(struct qeth_card
*card
, enum qeth_prot_versions proto
,
7947 struct qeth_ipaddr
*ipaddr
;
7948 unsigned long flags
;
7951 ipaddr
= qeth_get_addr_buffer(proto
);
7953 if (proto
== QETH_PROT_IPV4
){
7954 QETH_DBF_TEXT(trace
, 2, "addrxip4");
7955 memcpy(&ipaddr
->u
.a4
.addr
, addr
, 4);
7956 ipaddr
->u
.a4
.mask
= 0;
7957 #ifdef CONFIG_QETH_IPV6
7958 } else if (proto
== QETH_PROT_IPV6
){
7959 QETH_DBF_TEXT(trace
, 2, "addrxip6");
7960 memcpy(&ipaddr
->u
.a6
.addr
, addr
, 16);
7961 ipaddr
->u
.a6
.pfxlen
= 0;
7964 ipaddr
->type
= QETH_IP_TYPE_RXIP
;
7965 ipaddr
->set_flags
= QETH_IPA_SETIP_TAKEOVER_FLAG
;
7966 ipaddr
->del_flags
= 0;
7969 spin_lock_irqsave(&card
->ip_lock
, flags
);
7970 if (__qeth_address_exists_in_list(&card
->ip_list
, ipaddr
, 0) ||
7971 __qeth_address_exists_in_list(card
->ip_tbd_list
, ipaddr
, 0))
7973 spin_unlock_irqrestore(&card
->ip_lock
, flags
);
7975 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
7978 if (!qeth_add_ip(card
, ipaddr
))
7980 if (qeth_set_thread_start_bit(card
, QETH_SET_IP_THREAD
) == 0)
7981 schedule_work(&card
->kernel_thread_starter
);
7986 qeth_del_rxip(struct qeth_card
*card
, enum qeth_prot_versions proto
,
7989 struct qeth_ipaddr
*ipaddr
;
7991 ipaddr
= qeth_get_addr_buffer(proto
);
7993 if (proto
== QETH_PROT_IPV4
){
7994 QETH_DBF_TEXT(trace
, 2, "addrxip4");
7995 memcpy(&ipaddr
->u
.a4
.addr
, addr
, 4);
7996 ipaddr
->u
.a4
.mask
= 0;
7997 #ifdef CONFIG_QETH_IPV6
7998 } else if (proto
== QETH_PROT_IPV6
){
7999 QETH_DBF_TEXT(trace
, 2, "addrxip6");
8000 memcpy(&ipaddr
->u
.a6
.addr
, addr
, 16);
8001 ipaddr
->u
.a6
.pfxlen
= 0;
8004 ipaddr
->type
= QETH_IP_TYPE_RXIP
;
8007 if (!qeth_delete_ip(card
, ipaddr
))
8009 if (qeth_set_thread_start_bit(card
, QETH_SET_IP_THREAD
) == 0)
8010 schedule_work(&card
->kernel_thread_starter
);
8017 qeth_ip_event(struct notifier_block
*this,
8018 unsigned long event
,void *ptr
)
8020 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
8021 struct net_device
*dev
=(struct net_device
*) ifa
->ifa_dev
->dev
;
8022 struct qeth_ipaddr
*addr
;
8023 struct qeth_card
*card
;
8025 QETH_DBF_TEXT(trace
,3,"ipevent");
8026 card
= qeth_get_card_from_dev(dev
);
8029 if (card
->options
.layer2
)
8032 addr
= qeth_get_addr_buffer(QETH_PROT_IPV4
);
8034 addr
->u
.a4
.addr
= ifa
->ifa_address
;
8035 addr
->u
.a4
.mask
= ifa
->ifa_mask
;
8036 addr
->type
= QETH_IP_TYPE_NORMAL
;
8042 if (!qeth_add_ip(card
, addr
))
8046 if (!qeth_delete_ip(card
, addr
))
8052 if (qeth_set_thread_start_bit(card
, QETH_SET_IP_THREAD
) == 0)
8053 schedule_work(&card
->kernel_thread_starter
);
8058 static struct notifier_block qeth_ip_notifier
= {
8063 #ifdef CONFIG_QETH_IPV6
8065 * IPv6 event handler
8068 qeth_ip6_event(struct notifier_block
*this,
8069 unsigned long event
,void *ptr
)
8072 struct inet6_ifaddr
*ifa
= (struct inet6_ifaddr
*)ptr
;
8073 struct net_device
*dev
= (struct net_device
*)ifa
->idev
->dev
;
8074 struct qeth_ipaddr
*addr
;
8075 struct qeth_card
*card
;
8077 QETH_DBF_TEXT(trace
,3,"ip6event");
8079 card
= qeth_get_card_from_dev(dev
);
8082 if (!qeth_is_supported(card
, IPA_IPV6
))
8085 addr
= qeth_get_addr_buffer(QETH_PROT_IPV6
);
8087 memcpy(&addr
->u
.a6
.addr
, &ifa
->addr
, sizeof(struct in6_addr
));
8088 addr
->u
.a6
.pfxlen
= ifa
->prefix_len
;
8089 addr
->type
= QETH_IP_TYPE_NORMAL
;
8095 if (!qeth_add_ip(card
, addr
))
8099 if (!qeth_delete_ip(card
, addr
))
8105 if (qeth_set_thread_start_bit(card
, QETH_SET_IP_THREAD
) == 0)
8106 schedule_work(&card
->kernel_thread_starter
);
8111 static struct notifier_block qeth_ip6_notifier
= {
8118 __qeth_reboot_event_card(struct device
*dev
, void *data
)
8120 struct qeth_card
*card
;
8122 card
= (struct qeth_card
*) dev
->driver_data
;
8123 qeth_clear_ip_list(card
, 0, 0);
8124 qeth_qdio_clear_card(card
, 0);
8129 qeth_reboot_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
8132 driver_for_each_device(&qeth_ccwgroup_driver
.driver
, NULL
, NULL
,
8133 __qeth_reboot_event_card
);
8138 static struct notifier_block qeth_reboot_notifier
= {
8144 qeth_register_notifiers(void)
8148 QETH_DBF_TEXT(trace
,5,"regnotif");
8149 if ((r
= register_reboot_notifier(&qeth_reboot_notifier
)))
8151 if ((r
= register_inetaddr_notifier(&qeth_ip_notifier
)))
8153 #ifdef CONFIG_QETH_IPV6
8154 if ((r
= register_inet6addr_notifier(&qeth_ip6_notifier
)))
8159 #ifdef CONFIG_QETH_IPV6
8161 unregister_inetaddr_notifier(&qeth_ip_notifier
);
8164 unregister_reboot_notifier(&qeth_reboot_notifier
);
8169 * unregister all event notifiers
8172 qeth_unregister_notifiers(void)
8175 QETH_DBF_TEXT(trace
,5,"unregnot");
8176 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier
));
8177 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier
));
8178 #ifdef CONFIG_QETH_IPV6
8179 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier
));
8180 #endif /* QETH_IPV6 */
8184 #ifdef CONFIG_QETH_IPV6
8186 qeth_ipv6_init(void)
8188 qeth_old_arp_constructor
= arp_tbl
.constructor
;
8189 write_lock(&arp_tbl
.lock
);
8190 arp_tbl
.constructor
= qeth_arp_constructor
;
8191 write_unlock(&arp_tbl
.lock
);
8193 arp_direct_ops
= (struct neigh_ops
*)
8194 kmalloc(sizeof(struct neigh_ops
), GFP_KERNEL
);
8195 if (!arp_direct_ops
)
8198 memcpy(arp_direct_ops
, &arp_direct_ops_template
,
8199 sizeof(struct neigh_ops
));
8205 qeth_ipv6_uninit(void)
8207 write_lock(&arp_tbl
.lock
);
8208 arp_tbl
.constructor
= qeth_old_arp_constructor
;
8209 write_unlock(&arp_tbl
.lock
);
8210 kfree(arp_direct_ops
);
8212 #endif /* CONFIG_QETH_IPV6 */
8215 qeth_sysfs_unregister(void)
8217 qeth_remove_driver_attributes();
8218 ccw_driver_unregister(&qeth_ccw_driver
);
8219 ccwgroup_driver_unregister(&qeth_ccwgroup_driver
);
8220 s390_root_dev_unregister(qeth_root_dev
);
8223 * register qeth at sysfs
8226 qeth_sysfs_register(void)
8230 rc
= ccwgroup_driver_register(&qeth_ccwgroup_driver
);
8233 rc
= ccw_driver_register(&qeth_ccw_driver
);
8236 rc
= qeth_create_driver_attributes();
8239 qeth_root_dev
= s390_root_dev_register("qeth");
8240 if (IS_ERR(qeth_root_dev
)) {
8241 rc
= PTR_ERR(qeth_root_dev
);
8255 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
8256 version
, VERSION_QETH_C
, VERSION_QETH_H
,
8257 VERSION_QETH_MPC_H
, VERSION_QETH_MPC_C
,
8258 VERSION_QETH_FS_H
, VERSION_QETH_PROC_C
,
8259 VERSION_QETH_SYS_C
, QETH_VERSION_IPV6
,
8262 INIT_LIST_HEAD(&qeth_card_list
.list
);
8263 INIT_LIST_HEAD(&qeth_notify_list
);
8264 spin_lock_init(&qeth_notify_lock
);
8265 rwlock_init(&qeth_card_list
.rwlock
);
8267 if (qeth_register_dbf_views())
8269 if (qeth_sysfs_register())
8272 #ifdef CONFIG_QETH_IPV6
8273 if (qeth_ipv6_init()) {
8274 PRINT_ERR("Out of memory during ipv6 init.\n");
8277 #endif /* QETH_IPV6 */
8278 if (qeth_register_notifiers())
8280 if (qeth_create_procfs_entries())
8286 qeth_unregister_notifiers();
8288 #ifdef CONFIG_QETH_IPV6
8290 #endif /* QETH_IPV6 */
8292 qeth_sysfs_unregister();
8293 qeth_unregister_dbf_views();
8295 PRINT_ERR("Initialization failed");
8300 __exit
qeth_exit(void)
8302 struct qeth_card
*card
, *tmp
;
8303 unsigned long flags
;
8305 QETH_DBF_TEXT(trace
,1, "cleanup.");
8308 * Weed would not need to clean up our devices here, because the
8309 * common device layer calls qeth_remove_device for each device
8310 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
8311 * But we do cleanup here so we can do a "soft" shutdown of our cards.
8312 * qeth_remove_device called by the common device layer would otherwise
8313 * do a "hard" shutdown (card->use_hard_stop is set to one in
8314 * qeth_remove_device).
8317 read_lock_irqsave(&qeth_card_list
.rwlock
, flags
);
8318 list_for_each_entry_safe(card
, tmp
, &qeth_card_list
.list
, list
){
8319 read_unlock_irqrestore(&qeth_card_list
.rwlock
, flags
);
8320 qeth_set_offline(card
->gdev
);
8321 qeth_remove_device(card
->gdev
);
8324 read_unlock_irqrestore(&qeth_card_list
.rwlock
, flags
);
8325 #ifdef CONFIG_QETH_IPV6
8328 qeth_unregister_notifiers();
8329 qeth_remove_procfs_entries();
8330 qeth_sysfs_unregister();
8331 qeth_unregister_dbf_views();
8332 printk("qeth: removed\n");
8335 module_init(qeth_init
);
8336 module_exit(qeth_exit
);
8337 MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
8338 MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
8339 "Copyright 2000,2003 IBM Corporation\n");
8341 MODULE_LICENSE("GPL");