1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/netdevice.h>
28 #include <linux/netdev_features.h>
29 #include <linux/skbuff.h>
30 #include <linux/vmalloc.h>
32 #include <net/iucv/af_iucv.h>
33 #include <net/dsfield.h>
35 #include <asm/ebcdic.h>
36 #include <asm/chpid.h>
38 #include <asm/sysinfo.h>
41 #include <asm/ccwdev.h>
42 #include <asm/cpcmd.h>
44 #include "qeth_core.h"
46 struct qeth_dbf_info qeth_dbf
[QETH_DBF_INFOS
] = {
47 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
49 [QETH_DBF_SETUP
] = {"qeth_setup",
50 8, 1, 8, 5, &debug_hex_ascii_view
, NULL
},
51 [QETH_DBF_MSG
] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
52 &debug_sprintf_view
, NULL
},
53 [QETH_DBF_CTRL
] = {"qeth_control",
54 8, 1, QETH_DBF_CTRL_LEN
, 5, &debug_hex_ascii_view
, NULL
},
56 EXPORT_SYMBOL_GPL(qeth_dbf
);
58 struct kmem_cache
*qeth_core_header_cache
;
59 EXPORT_SYMBOL_GPL(qeth_core_header_cache
);
60 static struct kmem_cache
*qeth_qdio_outbuf_cache
;
62 static struct device
*qeth_core_root_dev
;
63 static struct lock_class_key qdio_out_skb_queue_key
;
65 static void qeth_issue_next_read_cb(struct qeth_card
*card
,
66 struct qeth_cmd_buffer
*iob
,
67 unsigned int data_length
);
68 static void qeth_free_buffer_pool(struct qeth_card
*);
69 static int qeth_qdio_establish(struct qeth_card
*);
70 static void qeth_free_qdio_queues(struct qeth_card
*card
);
71 static void qeth_notify_skbs(struct qeth_qdio_out_q
*queue
,
72 struct qeth_qdio_out_buffer
*buf
,
73 enum iucv_tx_notify notification
);
74 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer
*buf
, bool error
,
76 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q
*, int);
78 static void qeth_close_dev_handler(struct work_struct
*work
)
80 struct qeth_card
*card
;
82 card
= container_of(work
, struct qeth_card
, close_dev_work
);
83 QETH_CARD_TEXT(card
, 2, "cldevhdl");
84 ccwgroup_set_offline(card
->gdev
);
87 static const char *qeth_get_cardname(struct qeth_card
*card
)
89 if (IS_VM_NIC(card
)) {
90 switch (card
->info
.type
) {
91 case QETH_CARD_TYPE_OSD
:
92 return " Virtual NIC QDIO";
93 case QETH_CARD_TYPE_IQD
:
94 return " Virtual NIC Hiper";
95 case QETH_CARD_TYPE_OSM
:
96 return " Virtual NIC QDIO - OSM";
97 case QETH_CARD_TYPE_OSX
:
98 return " Virtual NIC QDIO - OSX";
103 switch (card
->info
.type
) {
104 case QETH_CARD_TYPE_OSD
:
105 return " OSD Express";
106 case QETH_CARD_TYPE_IQD
:
107 return " HiperSockets";
108 case QETH_CARD_TYPE_OSN
:
110 case QETH_CARD_TYPE_OSM
:
112 case QETH_CARD_TYPE_OSX
:
121 /* max length to be returned: 14 */
122 const char *qeth_get_cardname_short(struct qeth_card
*card
)
124 if (IS_VM_NIC(card
)) {
125 switch (card
->info
.type
) {
126 case QETH_CARD_TYPE_OSD
:
127 return "Virt.NIC QDIO";
128 case QETH_CARD_TYPE_IQD
:
129 return "Virt.NIC Hiper";
130 case QETH_CARD_TYPE_OSM
:
131 return "Virt.NIC OSM";
132 case QETH_CARD_TYPE_OSX
:
133 return "Virt.NIC OSX";
138 switch (card
->info
.type
) {
139 case QETH_CARD_TYPE_OSD
:
140 switch (card
->info
.link_type
) {
141 case QETH_LINK_TYPE_FAST_ETH
:
143 case QETH_LINK_TYPE_HSTR
:
145 case QETH_LINK_TYPE_GBIT_ETH
:
147 case QETH_LINK_TYPE_10GBIT_ETH
:
149 case QETH_LINK_TYPE_25GBIT_ETH
:
151 case QETH_LINK_TYPE_LANE_ETH100
:
152 return "OSD_FE_LANE";
153 case QETH_LINK_TYPE_LANE_TR
:
154 return "OSD_TR_LANE";
155 case QETH_LINK_TYPE_LANE_ETH1000
:
156 return "OSD_GbE_LANE";
157 case QETH_LINK_TYPE_LANE
:
158 return "OSD_ATM_LANE";
160 return "OSD_Express";
162 case QETH_CARD_TYPE_IQD
:
163 return "HiperSockets";
164 case QETH_CARD_TYPE_OSN
:
166 case QETH_CARD_TYPE_OSM
:
168 case QETH_CARD_TYPE_OSX
:
177 void qeth_set_allowed_threads(struct qeth_card
*card
, unsigned long threads
,
178 int clear_start_mask
)
182 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
183 card
->thread_allowed_mask
= threads
;
184 if (clear_start_mask
)
185 card
->thread_start_mask
&= threads
;
186 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
187 wake_up(&card
->wait_q
);
189 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads
);
191 int qeth_threads_running(struct qeth_card
*card
, unsigned long threads
)
196 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
197 rc
= (card
->thread_running_mask
& threads
);
198 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
201 EXPORT_SYMBOL_GPL(qeth_threads_running
);
203 void qeth_clear_working_pool_list(struct qeth_card
*card
)
205 struct qeth_buffer_pool_entry
*pool_entry
, *tmp
;
207 QETH_CARD_TEXT(card
, 5, "clwrklst");
208 list_for_each_entry_safe(pool_entry
, tmp
,
209 &card
->qdio
.in_buf_pool
.entry_list
, list
){
210 list_del(&pool_entry
->list
);
213 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list
);
215 static int qeth_alloc_buffer_pool(struct qeth_card
*card
)
217 struct qeth_buffer_pool_entry
*pool_entry
;
221 QETH_CARD_TEXT(card
, 5, "alocpool");
222 for (i
= 0; i
< card
->qdio
.init_pool
.buf_count
; ++i
) {
223 pool_entry
= kzalloc(sizeof(*pool_entry
), GFP_KERNEL
);
225 qeth_free_buffer_pool(card
);
228 for (j
= 0; j
< QETH_MAX_BUFFER_ELEMENTS(card
); ++j
) {
229 ptr
= (void *) __get_free_page(GFP_KERNEL
);
232 free_page((unsigned long)
233 pool_entry
->elements
[--j
]);
235 qeth_free_buffer_pool(card
);
238 pool_entry
->elements
[j
] = ptr
;
240 list_add(&pool_entry
->init_list
,
241 &card
->qdio
.init_pool
.entry_list
);
246 int qeth_realloc_buffer_pool(struct qeth_card
*card
, int bufcnt
)
248 QETH_CARD_TEXT(card
, 2, "realcbp");
250 if (card
->state
!= CARD_STATE_DOWN
)
253 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
254 qeth_clear_working_pool_list(card
);
255 qeth_free_buffer_pool(card
);
256 card
->qdio
.in_buf_pool
.buf_count
= bufcnt
;
257 card
->qdio
.init_pool
.buf_count
= bufcnt
;
258 return qeth_alloc_buffer_pool(card
);
260 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool
);
262 static void qeth_free_qdio_queue(struct qeth_qdio_q
*q
)
267 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
271 static struct qeth_qdio_q
*qeth_alloc_qdio_queue(void)
273 struct qeth_qdio_q
*q
= kzalloc(sizeof(*q
), GFP_KERNEL
);
279 if (qdio_alloc_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
)) {
284 for (i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; ++i
)
285 q
->bufs
[i
].buffer
= q
->qdio_bufs
[i
];
287 QETH_DBF_HEX(SETUP
, 2, &q
, sizeof(void *));
291 static int qeth_cq_init(struct qeth_card
*card
)
295 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
296 QETH_CARD_TEXT(card
, 2, "cqinit");
297 qdio_reset_buffers(card
->qdio
.c_q
->qdio_bufs
,
298 QDIO_MAX_BUFFERS_PER_Q
);
299 card
->qdio
.c_q
->next_buf_to_init
= 127;
300 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
,
301 card
->qdio
.no_in_queues
- 1, 0,
304 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
313 static int qeth_alloc_cq(struct qeth_card
*card
)
317 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
319 struct qdio_outbuf_state
*outbuf_states
;
321 QETH_CARD_TEXT(card
, 2, "cqon");
322 card
->qdio
.c_q
= qeth_alloc_qdio_queue();
323 if (!card
->qdio
.c_q
) {
327 card
->qdio
.no_in_queues
= 2;
328 card
->qdio
.out_bufstates
=
329 kcalloc(card
->qdio
.no_out_queues
*
330 QDIO_MAX_BUFFERS_PER_Q
,
331 sizeof(struct qdio_outbuf_state
),
333 outbuf_states
= card
->qdio
.out_bufstates
;
334 if (outbuf_states
== NULL
) {
338 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
339 card
->qdio
.out_qs
[i
]->bufstates
= outbuf_states
;
340 outbuf_states
+= QDIO_MAX_BUFFERS_PER_Q
;
343 QETH_CARD_TEXT(card
, 2, "nocq");
344 card
->qdio
.c_q
= NULL
;
345 card
->qdio
.no_in_queues
= 1;
347 QETH_CARD_TEXT_(card
, 2, "iqc%d", card
->qdio
.no_in_queues
);
352 qeth_free_qdio_queue(card
->qdio
.c_q
);
353 card
->qdio
.c_q
= NULL
;
355 dev_err(&card
->gdev
->dev
, "Failed to create completion queue\n");
359 static void qeth_free_cq(struct qeth_card
*card
)
361 if (card
->qdio
.c_q
) {
362 --card
->qdio
.no_in_queues
;
363 qeth_free_qdio_queue(card
->qdio
.c_q
);
364 card
->qdio
.c_q
= NULL
;
366 kfree(card
->qdio
.out_bufstates
);
367 card
->qdio
.out_bufstates
= NULL
;
370 static enum iucv_tx_notify
qeth_compute_cq_notification(int sbalf15
,
373 enum iucv_tx_notify n
;
377 n
= delayed
? TX_NOTIFY_DELAYED_OK
: TX_NOTIFY_OK
;
383 n
= delayed
? TX_NOTIFY_DELAYED_UNREACHABLE
:
384 TX_NOTIFY_UNREACHABLE
;
387 n
= delayed
? TX_NOTIFY_DELAYED_GENERALERROR
:
388 TX_NOTIFY_GENERALERROR
;
395 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q
*q
, int bidx
,
398 if (q
->card
->options
.cq
!= QETH_CQ_ENABLED
)
401 if (q
->bufs
[bidx
]->next_pending
!= NULL
) {
402 struct qeth_qdio_out_buffer
*head
= q
->bufs
[bidx
];
403 struct qeth_qdio_out_buffer
*c
= q
->bufs
[bidx
]->next_pending
;
406 if (forced_cleanup
||
407 atomic_read(&c
->state
) ==
408 QETH_QDIO_BUF_HANDLED_DELAYED
) {
409 struct qeth_qdio_out_buffer
*f
= c
;
410 QETH_CARD_TEXT(f
->q
->card
, 5, "fp");
411 QETH_CARD_TEXT_(f
->q
->card
, 5, "%lx", (long) f
);
412 /* release here to avoid interleaving between
413 outbound tasklet and inbound tasklet
414 regarding notifications and lifecycle */
415 qeth_tx_complete_buf(c
, forced_cleanup
, 0);
418 WARN_ON_ONCE(head
->next_pending
!= f
);
419 head
->next_pending
= c
;
420 kmem_cache_free(qeth_qdio_outbuf_cache
, f
);
428 if (forced_cleanup
&& (atomic_read(&(q
->bufs
[bidx
]->state
)) ==
429 QETH_QDIO_BUF_HANDLED_DELAYED
)) {
430 /* for recovery situations */
431 qeth_init_qdio_out_buf(q
, bidx
);
432 QETH_CARD_TEXT(q
->card
, 2, "clprecov");
437 static void qeth_qdio_handle_aob(struct qeth_card
*card
,
438 unsigned long phys_aob_addr
)
441 struct qeth_qdio_out_buffer
*buffer
;
442 enum iucv_tx_notify notification
;
445 aob
= (struct qaob
*) phys_to_virt(phys_aob_addr
);
446 QETH_CARD_TEXT(card
, 5, "haob");
447 QETH_CARD_TEXT_(card
, 5, "%lx", phys_aob_addr
);
448 buffer
= (struct qeth_qdio_out_buffer
*) aob
->user1
;
449 QETH_CARD_TEXT_(card
, 5, "%lx", aob
->user1
);
451 if (atomic_cmpxchg(&buffer
->state
, QETH_QDIO_BUF_PRIMED
,
452 QETH_QDIO_BUF_IN_CQ
) == QETH_QDIO_BUF_PRIMED
) {
453 notification
= TX_NOTIFY_OK
;
455 WARN_ON_ONCE(atomic_read(&buffer
->state
) !=
456 QETH_QDIO_BUF_PENDING
);
457 atomic_set(&buffer
->state
, QETH_QDIO_BUF_IN_CQ
);
458 notification
= TX_NOTIFY_DELAYED_OK
;
461 if (aob
->aorc
!= 0) {
462 QETH_CARD_TEXT_(card
, 2, "aorc%02X", aob
->aorc
);
463 notification
= qeth_compute_cq_notification(aob
->aorc
, 1);
465 qeth_notify_skbs(buffer
->q
, buffer
, notification
);
467 /* Free dangling allocations. The attached skbs are handled by
468 * qeth_cleanup_handled_pending().
471 i
< aob
->sb_count
&& i
< QETH_MAX_BUFFER_ELEMENTS(card
);
473 if (aob
->sba
[i
] && buffer
->is_header
[i
])
474 kmem_cache_free(qeth_core_header_cache
,
475 (void *) aob
->sba
[i
]);
477 atomic_set(&buffer
->state
, QETH_QDIO_BUF_HANDLED_DELAYED
);
479 qdio_release_aob(aob
);
482 static inline int qeth_is_cq(struct qeth_card
*card
, unsigned int queue
)
484 return card
->options
.cq
== QETH_CQ_ENABLED
&&
485 card
->qdio
.c_q
!= NULL
&&
487 queue
== card
->qdio
.no_in_queues
- 1;
490 static void qeth_setup_ccw(struct ccw1
*ccw
, u8 cmd_code
, u8 flags
, u32 len
,
493 ccw
->cmd_code
= cmd_code
;
494 ccw
->flags
= flags
| CCW_FLAG_SLI
;
496 ccw
->cda
= (__u32
) __pa(data
);
499 static int __qeth_issue_next_read(struct qeth_card
*card
)
501 struct qeth_cmd_buffer
*iob
= card
->read_cmd
;
502 struct qeth_channel
*channel
= iob
->channel
;
503 struct ccw1
*ccw
= __ccw_from_cmd(iob
);
506 QETH_CARD_TEXT(card
, 5, "issnxrd");
507 if (channel
->state
!= CH_STATE_UP
)
510 memset(iob
->data
, 0, iob
->length
);
511 qeth_setup_ccw(ccw
, CCW_CMD_READ
, 0, iob
->length
, iob
->data
);
512 iob
->callback
= qeth_issue_next_read_cb
;
513 /* keep the cmd alive after completion: */
516 QETH_CARD_TEXT(card
, 6, "noirqpnd");
517 rc
= ccw_device_start(channel
->ccwdev
, ccw
, (addr_t
) iob
, 0, 0);
519 channel
->active_cmd
= iob
;
521 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
522 rc
, CARD_DEVID(card
));
523 atomic_set(&channel
->irq_pending
, 0);
525 card
->read_or_write_problem
= 1;
526 qeth_schedule_recovery(card
);
527 wake_up(&card
->wait_q
);
532 static int qeth_issue_next_read(struct qeth_card
*card
)
536 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card
)));
537 ret
= __qeth_issue_next_read(card
);
538 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card
)));
543 static void qeth_enqueue_cmd(struct qeth_card
*card
,
544 struct qeth_cmd_buffer
*iob
)
546 spin_lock_irq(&card
->lock
);
547 list_add_tail(&iob
->list
, &card
->cmd_waiter_list
);
548 spin_unlock_irq(&card
->lock
);
551 static void qeth_dequeue_cmd(struct qeth_card
*card
,
552 struct qeth_cmd_buffer
*iob
)
554 spin_lock_irq(&card
->lock
);
555 list_del(&iob
->list
);
556 spin_unlock_irq(&card
->lock
);
559 void qeth_notify_cmd(struct qeth_cmd_buffer
*iob
, int reason
)
562 complete(&iob
->done
);
564 EXPORT_SYMBOL_GPL(qeth_notify_cmd
);
566 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd
*cmd
, int rc
,
567 struct qeth_card
*card
)
569 const char *ipa_name
;
570 int com
= cmd
->hdr
.command
;
571 ipa_name
= qeth_get_ipa_cmd_name(com
);
574 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
575 ipa_name
, com
, CARD_DEVID(card
), rc
,
576 qeth_get_ipa_msg(rc
));
578 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
579 ipa_name
, com
, CARD_DEVID(card
));
582 static struct qeth_ipa_cmd
*qeth_check_ipa_data(struct qeth_card
*card
,
583 struct qeth_ipa_cmd
*cmd
)
585 QETH_CARD_TEXT(card
, 5, "chkipad");
587 if (IS_IPA_REPLY(cmd
)) {
588 if (cmd
->hdr
.command
!= IPA_CMD_SETCCID
&&
589 cmd
->hdr
.command
!= IPA_CMD_DELCCID
&&
590 cmd
->hdr
.command
!= IPA_CMD_MODCCID
&&
591 cmd
->hdr
.command
!= IPA_CMD_SET_DIAG_ASS
)
592 qeth_issue_ipa_msg(cmd
, cmd
->hdr
.return_code
, card
);
596 /* handle unsolicited event: */
597 switch (cmd
->hdr
.command
) {
598 case IPA_CMD_STOPLAN
:
599 if (cmd
->hdr
.return_code
== IPA_RC_VEPA_TO_VEB_TRANSITION
) {
600 dev_err(&card
->gdev
->dev
,
601 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
602 QETH_CARD_IFNAME(card
));
603 schedule_work(&card
->close_dev_work
);
605 dev_warn(&card
->gdev
->dev
,
606 "The link for interface %s on CHPID 0x%X failed\n",
607 QETH_CARD_IFNAME(card
), card
->info
.chpid
);
608 qeth_issue_ipa_msg(cmd
, cmd
->hdr
.return_code
, card
);
609 netif_carrier_off(card
->dev
);
612 case IPA_CMD_STARTLAN
:
613 dev_info(&card
->gdev
->dev
,
614 "The link for %s on CHPID 0x%X has been restored\n",
615 QETH_CARD_IFNAME(card
), card
->info
.chpid
);
616 if (card
->info
.hwtrap
)
617 card
->info
.hwtrap
= 2;
618 qeth_schedule_recovery(card
);
620 case IPA_CMD_SETBRIDGEPORT_IQD
:
621 case IPA_CMD_SETBRIDGEPORT_OSA
:
622 case IPA_CMD_ADDRESS_CHANGE_NOTIF
:
623 if (card
->discipline
->control_event_handler(card
, cmd
))
626 case IPA_CMD_MODCCID
:
628 case IPA_CMD_REGISTER_LOCAL_ADDR
:
629 QETH_CARD_TEXT(card
, 3, "irla");
631 case IPA_CMD_UNREGISTER_LOCAL_ADDR
:
632 QETH_CARD_TEXT(card
, 3, "urla");
635 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
640 void qeth_clear_ipacmd_list(struct qeth_card
*card
)
642 struct qeth_cmd_buffer
*iob
;
645 QETH_CARD_TEXT(card
, 4, "clipalst");
647 spin_lock_irqsave(&card
->lock
, flags
);
648 list_for_each_entry(iob
, &card
->cmd_waiter_list
, list
)
649 qeth_notify_cmd(iob
, -EIO
);
650 spin_unlock_irqrestore(&card
->lock
, flags
);
652 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list
);
654 static int qeth_check_idx_response(struct qeth_card
*card
,
655 unsigned char *buffer
)
657 QETH_DBF_HEX(CTRL
, 2, buffer
, QETH_DBF_CTRL_LEN
);
658 if ((buffer
[2] & QETH_IDX_TERMINATE_MASK
) == QETH_IDX_TERMINATE
) {
659 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
661 QETH_CARD_TEXT(card
, 2, "ckidxres");
662 QETH_CARD_TEXT(card
, 2, " idxterm");
663 QETH_CARD_TEXT_(card
, 2, "rc%x", buffer
[4]);
664 if (buffer
[4] == QETH_IDX_TERM_BAD_TRANSPORT
||
665 buffer
[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM
) {
666 dev_err(&card
->gdev
->dev
,
667 "The device does not support the configured transport mode\n");
668 return -EPROTONOSUPPORT
;
675 void qeth_put_cmd(struct qeth_cmd_buffer
*iob
)
677 if (refcount_dec_and_test(&iob
->ref_count
)) {
682 EXPORT_SYMBOL_GPL(qeth_put_cmd
);
684 static void qeth_release_buffer_cb(struct qeth_card
*card
,
685 struct qeth_cmd_buffer
*iob
,
686 unsigned int data_length
)
691 static void qeth_cancel_cmd(struct qeth_cmd_buffer
*iob
, int rc
)
693 qeth_notify_cmd(iob
, rc
);
697 struct qeth_cmd_buffer
*qeth_alloc_cmd(struct qeth_channel
*channel
,
698 unsigned int length
, unsigned int ccws
,
701 struct qeth_cmd_buffer
*iob
;
703 if (length
> QETH_BUFSIZE
)
706 iob
= kzalloc(sizeof(*iob
), GFP_KERNEL
);
710 iob
->data
= kzalloc(ALIGN(length
, 8) + ccws
* sizeof(struct ccw1
),
711 GFP_KERNEL
| GFP_DMA
);
717 init_completion(&iob
->done
);
718 spin_lock_init(&iob
->lock
);
719 INIT_LIST_HEAD(&iob
->list
);
720 refcount_set(&iob
->ref_count
, 1);
721 iob
->channel
= channel
;
722 iob
->timeout
= timeout
;
723 iob
->length
= length
;
726 EXPORT_SYMBOL_GPL(qeth_alloc_cmd
);
728 static void qeth_issue_next_read_cb(struct qeth_card
*card
,
729 struct qeth_cmd_buffer
*iob
,
730 unsigned int data_length
)
732 struct qeth_cmd_buffer
*request
= NULL
;
733 struct qeth_ipa_cmd
*cmd
= NULL
;
734 struct qeth_reply
*reply
= NULL
;
735 struct qeth_cmd_buffer
*tmp
;
739 QETH_CARD_TEXT(card
, 4, "sndctlcb");
740 rc
= qeth_check_idx_response(card
, iob
->data
);
745 qeth_schedule_recovery(card
);
748 qeth_clear_ipacmd_list(card
);
752 if (IS_IPA(iob
->data
)) {
753 cmd
= (struct qeth_ipa_cmd
*) PDU_ENCAPSULATION(iob
->data
);
754 cmd
= qeth_check_ipa_data(card
, cmd
);
757 if (IS_OSN(card
) && card
->osn_info
.assist_cb
&&
758 cmd
->hdr
.command
!= IPA_CMD_STARTLAN
) {
759 card
->osn_info
.assist_cb(card
->dev
, cmd
);
763 /* non-IPA commands should only flow during initialization */
764 if (card
->state
!= CARD_STATE_DOWN
)
768 /* match against pending cmd requests */
769 spin_lock_irqsave(&card
->lock
, flags
);
770 list_for_each_entry(tmp
, &card
->cmd_waiter_list
, list
) {
771 if (!IS_IPA(tmp
->data
) ||
772 __ipa_cmd(tmp
)->hdr
.seqno
== cmd
->hdr
.seqno
) {
774 /* take the object outside the lock */
775 qeth_get_cmd(request
);
779 spin_unlock_irqrestore(&card
->lock
, flags
);
784 reply
= &request
->reply
;
785 if (!reply
->callback
) {
790 spin_lock_irqsave(&request
->lock
, flags
);
792 /* Bail out when the requestor has already left: */
795 rc
= reply
->callback(card
, reply
, cmd
? (unsigned long)cmd
:
797 spin_unlock_irqrestore(&request
->lock
, flags
);
801 qeth_notify_cmd(request
, rc
);
802 qeth_put_cmd(request
);
804 memcpy(&card
->seqno
.pdu_hdr_ack
,
805 QETH_PDU_HEADER_SEQ_NO(iob
->data
),
808 __qeth_issue_next_read(card
);
811 static int qeth_set_thread_start_bit(struct qeth_card
*card
,
812 unsigned long thread
)
816 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
817 if (!(card
->thread_allowed_mask
& thread
) ||
818 (card
->thread_start_mask
& thread
)) {
819 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
822 card
->thread_start_mask
|= thread
;
823 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
827 void qeth_clear_thread_start_bit(struct qeth_card
*card
, unsigned long thread
)
831 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
832 card
->thread_start_mask
&= ~thread
;
833 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
834 wake_up(&card
->wait_q
);
836 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit
);
838 void qeth_clear_thread_running_bit(struct qeth_card
*card
, unsigned long thread
)
842 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
843 card
->thread_running_mask
&= ~thread
;
844 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
845 wake_up_all(&card
->wait_q
);
847 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit
);
849 static int __qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
854 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
855 if (card
->thread_start_mask
& thread
) {
856 if ((card
->thread_allowed_mask
& thread
) &&
857 !(card
->thread_running_mask
& thread
)) {
859 card
->thread_start_mask
&= ~thread
;
860 card
->thread_running_mask
|= thread
;
864 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
868 int qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
872 wait_event(card
->wait_q
,
873 (rc
= __qeth_do_run_thread(card
, thread
)) >= 0);
876 EXPORT_SYMBOL_GPL(qeth_do_run_thread
);
878 void qeth_schedule_recovery(struct qeth_card
*card
)
880 QETH_CARD_TEXT(card
, 2, "startrec");
881 if (qeth_set_thread_start_bit(card
, QETH_RECOVER_THREAD
) == 0)
882 schedule_work(&card
->kernel_thread_starter
);
884 EXPORT_SYMBOL_GPL(qeth_schedule_recovery
);
886 static int qeth_get_problem(struct qeth_card
*card
, struct ccw_device
*cdev
,
892 sense
= (char *) irb
->ecw
;
893 cstat
= irb
->scsw
.cmd
.cstat
;
894 dstat
= irb
->scsw
.cmd
.dstat
;
896 if (cstat
& (SCHN_STAT_CHN_CTRL_CHK
| SCHN_STAT_INTF_CTRL_CHK
|
897 SCHN_STAT_CHN_DATA_CHK
| SCHN_STAT_CHAIN_CHECK
|
898 SCHN_STAT_PROT_CHECK
| SCHN_STAT_PROG_CHECK
)) {
899 QETH_CARD_TEXT(card
, 2, "CGENCHK");
900 dev_warn(&cdev
->dev
, "The qeth device driver "
901 "failed to recover an error on the device\n");
902 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
903 CCW_DEVID(cdev
), dstat
, cstat
);
904 print_hex_dump(KERN_WARNING
, "qeth: irb ", DUMP_PREFIX_OFFSET
,
909 if (dstat
& DEV_STAT_UNIT_CHECK
) {
910 if (sense
[SENSE_RESETTING_EVENT_BYTE
] &
911 SENSE_RESETTING_EVENT_FLAG
) {
912 QETH_CARD_TEXT(card
, 2, "REVIND");
915 if (sense
[SENSE_COMMAND_REJECT_BYTE
] &
916 SENSE_COMMAND_REJECT_FLAG
) {
917 QETH_CARD_TEXT(card
, 2, "CMDREJi");
920 if ((sense
[2] == 0xaf) && (sense
[3] == 0xfe)) {
921 QETH_CARD_TEXT(card
, 2, "AFFE");
924 if ((!sense
[0]) && (!sense
[1]) && (!sense
[2]) && (!sense
[3])) {
925 QETH_CARD_TEXT(card
, 2, "ZEROSEN");
928 QETH_CARD_TEXT(card
, 2, "DGENCHK");
934 static int qeth_check_irb_error(struct qeth_card
*card
, struct ccw_device
*cdev
,
940 switch (PTR_ERR(irb
)) {
942 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
944 QETH_CARD_TEXT(card
, 2, "ckirberr");
945 QETH_CARD_TEXT_(card
, 2, " rc%d", -EIO
);
948 dev_warn(&cdev
->dev
, "A hardware operation timed out"
950 QETH_CARD_TEXT(card
, 2, "ckirberr");
951 QETH_CARD_TEXT_(card
, 2, " rc%d", -ETIMEDOUT
);
954 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
955 PTR_ERR(irb
), CCW_DEVID(cdev
));
956 QETH_CARD_TEXT(card
, 2, "ckirberr");
957 QETH_CARD_TEXT(card
, 2, " rc???");
962 static void qeth_irq(struct ccw_device
*cdev
, unsigned long intparm
,
967 struct qeth_cmd_buffer
*iob
= NULL
;
968 struct ccwgroup_device
*gdev
;
969 struct qeth_channel
*channel
;
970 struct qeth_card
*card
;
972 /* while we hold the ccwdev lock, this stays valid: */
973 gdev
= dev_get_drvdata(&cdev
->dev
);
974 card
= dev_get_drvdata(&gdev
->dev
);
978 QETH_CARD_TEXT(card
, 5, "irq");
980 if (card
->read
.ccwdev
== cdev
) {
981 channel
= &card
->read
;
982 QETH_CARD_TEXT(card
, 5, "read");
983 } else if (card
->write
.ccwdev
== cdev
) {
984 channel
= &card
->write
;
985 QETH_CARD_TEXT(card
, 5, "write");
987 channel
= &card
->data
;
988 QETH_CARD_TEXT(card
, 5, "data");
992 QETH_CARD_TEXT(card
, 5, "irqunsol");
993 } else if ((addr_t
)intparm
!= (addr_t
)channel
->active_cmd
) {
994 QETH_CARD_TEXT(card
, 5, "irqunexp");
997 "Received IRQ with intparm %lx, expected %px\n",
998 intparm
, channel
->active_cmd
);
999 if (channel
->active_cmd
)
1000 qeth_cancel_cmd(channel
->active_cmd
, -EIO
);
1002 iob
= (struct qeth_cmd_buffer
*) (addr_t
)intparm
;
1005 channel
->active_cmd
= NULL
;
1007 rc
= qeth_check_irb_error(card
, cdev
, irb
);
1009 /* IO was terminated, free its resources. */
1011 qeth_cancel_cmd(iob
, rc
);
1012 atomic_set(&channel
->irq_pending
, 0);
1013 wake_up(&card
->wait_q
);
1017 atomic_set(&channel
->irq_pending
, 0);
1019 if (irb
->scsw
.cmd
.fctl
& (SCSW_FCTL_CLEAR_FUNC
))
1020 channel
->state
= CH_STATE_STOPPED
;
1022 if (irb
->scsw
.cmd
.fctl
& (SCSW_FCTL_HALT_FUNC
))
1023 channel
->state
= CH_STATE_HALTED
;
1025 if (iob
&& (irb
->scsw
.cmd
.fctl
& (SCSW_FCTL_CLEAR_FUNC
|
1026 SCSW_FCTL_HALT_FUNC
))) {
1027 qeth_cancel_cmd(iob
, -ECANCELED
);
1031 cstat
= irb
->scsw
.cmd
.cstat
;
1032 dstat
= irb
->scsw
.cmd
.dstat
;
1034 if ((dstat
& DEV_STAT_UNIT_EXCEP
) ||
1035 (dstat
& DEV_STAT_UNIT_CHECK
) ||
1037 if (irb
->esw
.esw0
.erw
.cons
) {
1038 dev_warn(&channel
->ccwdev
->dev
,
1039 "The qeth device driver failed to recover "
1040 "an error on the device\n");
1041 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1042 CCW_DEVID(channel
->ccwdev
), cstat
,
1044 print_hex_dump(KERN_WARNING
, "qeth: irb ",
1045 DUMP_PREFIX_OFFSET
, 16, 1, irb
, 32, 1);
1046 print_hex_dump(KERN_WARNING
, "qeth: sense data ",
1047 DUMP_PREFIX_OFFSET
, 16, 1, irb
->ecw
, 32, 1);
1050 rc
= qeth_get_problem(card
, cdev
, irb
);
1052 card
->read_or_write_problem
= 1;
1054 qeth_cancel_cmd(iob
, rc
);
1055 qeth_clear_ipacmd_list(card
);
1056 qeth_schedule_recovery(card
);
1063 if (irb
->scsw
.cmd
.count
> iob
->length
) {
1064 qeth_cancel_cmd(iob
, -EIO
);
1068 iob
->callback(card
, iob
,
1069 iob
->length
- irb
->scsw
.cmd
.count
);
1073 wake_up(&card
->wait_q
);
1077 static void qeth_notify_skbs(struct qeth_qdio_out_q
*q
,
1078 struct qeth_qdio_out_buffer
*buf
,
1079 enum iucv_tx_notify notification
)
1081 struct sk_buff
*skb
;
1083 skb_queue_walk(&buf
->skb_list
, skb
) {
1084 QETH_CARD_TEXT_(q
->card
, 5, "skbn%d", notification
);
1085 QETH_CARD_TEXT_(q
->card
, 5, "%lx", (long) skb
);
1086 if (skb
->protocol
== htons(ETH_P_AF_IUCV
) && skb
->sk
)
1087 iucv_sk(skb
->sk
)->sk_txnotify(skb
, notification
);
1091 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer
*buf
, bool error
,
1094 struct qeth_qdio_out_q
*queue
= buf
->q
;
1095 struct sk_buff
*skb
;
1097 /* release may never happen from within CQ tasklet scope */
1098 WARN_ON_ONCE(atomic_read(&buf
->state
) == QETH_QDIO_BUF_IN_CQ
);
1100 if (atomic_read(&buf
->state
) == QETH_QDIO_BUF_PENDING
)
1101 qeth_notify_skbs(queue
, buf
, TX_NOTIFY_GENERALERROR
);
1104 if (buf
->next_element_to_fill
== 0)
1107 QETH_TXQ_STAT_INC(queue
, bufs
);
1108 QETH_TXQ_STAT_ADD(queue
, buf_elements
, buf
->next_element_to_fill
);
1109 while ((skb
= __skb_dequeue(&buf
->skb_list
)) != NULL
) {
1110 unsigned int bytes
= qdisc_pkt_len(skb
);
1111 bool is_tso
= skb_is_gso(skb
);
1112 unsigned int packets
;
1114 packets
= is_tso
? skb_shinfo(skb
)->gso_segs
: 1;
1116 QETH_TXQ_STAT_ADD(queue
, tx_errors
, packets
);
1118 QETH_TXQ_STAT_ADD(queue
, tx_packets
, packets
);
1119 QETH_TXQ_STAT_ADD(queue
, tx_bytes
, bytes
);
1120 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1121 QETH_TXQ_STAT_ADD(queue
, skbs_csum
, packets
);
1122 if (skb_is_nonlinear(skb
))
1123 QETH_TXQ_STAT_INC(queue
, skbs_sg
);
1125 QETH_TXQ_STAT_INC(queue
, skbs_tso
);
1126 QETH_TXQ_STAT_ADD(queue
, tso_bytes
, bytes
);
1130 napi_consume_skb(skb
, budget
);
1134 static void qeth_clear_output_buffer(struct qeth_qdio_out_q
*queue
,
1135 struct qeth_qdio_out_buffer
*buf
,
1136 bool error
, int budget
)
1140 /* is PCI flag set on buffer? */
1141 if (buf
->buffer
->element
[0].sflags
& SBAL_SFLAGS0_PCI_REQ
)
1142 atomic_dec(&queue
->set_pci_flags_count
);
1144 qeth_tx_complete_buf(buf
, error
, budget
);
1146 for (i
= 0; i
< queue
->max_elements
; ++i
) {
1147 if (buf
->buffer
->element
[i
].addr
&& buf
->is_header
[i
])
1148 kmem_cache_free(qeth_core_header_cache
,
1149 buf
->buffer
->element
[i
].addr
);
1150 buf
->is_header
[i
] = 0;
1153 qeth_scrub_qdio_buffer(buf
->buffer
, queue
->max_elements
);
1154 buf
->next_element_to_fill
= 0;
1156 atomic_set(&buf
->state
, QETH_QDIO_BUF_EMPTY
);
1159 static void qeth_drain_output_queue(struct qeth_qdio_out_q
*q
, bool free
)
1163 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
1166 qeth_cleanup_handled_pending(q
, j
, 1);
1167 qeth_clear_output_buffer(q
, q
->bufs
[j
], true, 0);
1169 kmem_cache_free(qeth_qdio_outbuf_cache
, q
->bufs
[j
]);
1175 void qeth_drain_output_queues(struct qeth_card
*card
)
1179 QETH_CARD_TEXT(card
, 2, "clearqdbf");
1180 /* clear outbound buffers to free skbs */
1181 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
1182 if (card
->qdio
.out_qs
[i
])
1183 qeth_drain_output_queue(card
->qdio
.out_qs
[i
], false);
1186 EXPORT_SYMBOL_GPL(qeth_drain_output_queues
);
1188 static void qeth_free_buffer_pool(struct qeth_card
*card
)
1190 struct qeth_buffer_pool_entry
*pool_entry
, *tmp
;
1192 list_for_each_entry_safe(pool_entry
, tmp
,
1193 &card
->qdio
.init_pool
.entry_list
, init_list
){
1194 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
)
1195 free_page((unsigned long)pool_entry
->elements
[i
]);
1196 list_del(&pool_entry
->init_list
);
1201 static void qeth_clean_channel(struct qeth_channel
*channel
)
1203 struct ccw_device
*cdev
= channel
->ccwdev
;
1205 QETH_DBF_TEXT(SETUP
, 2, "freech");
1207 spin_lock_irq(get_ccwdev_lock(cdev
));
1208 cdev
->handler
= NULL
;
1209 spin_unlock_irq(get_ccwdev_lock(cdev
));
1212 static void qeth_setup_channel(struct qeth_channel
*channel
)
1214 struct ccw_device
*cdev
= channel
->ccwdev
;
1216 QETH_DBF_TEXT(SETUP
, 2, "setupch");
1218 channel
->state
= CH_STATE_DOWN
;
1219 atomic_set(&channel
->irq_pending
, 0);
1221 spin_lock_irq(get_ccwdev_lock(cdev
));
1222 cdev
->handler
= qeth_irq
;
1223 spin_unlock_irq(get_ccwdev_lock(cdev
));
1226 static int qeth_osa_set_output_queues(struct qeth_card
*card
, bool single
)
1228 unsigned int count
= single
? 1 : card
->dev
->num_tx_queues
;
1232 rc
= netif_set_real_num_tx_queues(card
->dev
, count
);
1238 if (card
->qdio
.no_out_queues
== count
)
1241 if (atomic_read(&card
->qdio
.state
) != QETH_QDIO_UNINITIALIZED
)
1242 qeth_free_qdio_queues(card
);
1245 dev_info(&card
->gdev
->dev
, "Priority Queueing not supported\n");
1247 card
->qdio
.no_out_queues
= count
;
1251 static int qeth_update_from_chp_desc(struct qeth_card
*card
)
1253 struct ccw_device
*ccwdev
;
1254 struct channel_path_desc_fmt0
*chp_dsc
;
1257 QETH_CARD_TEXT(card
, 2, "chp_desc");
1259 ccwdev
= card
->data
.ccwdev
;
1260 chp_dsc
= ccw_device_get_chp_desc(ccwdev
, 0);
1264 card
->info
.func_level
= 0x4100 + chp_dsc
->desc
;
1266 if (IS_OSD(card
) || IS_OSX(card
))
1267 /* CHPP field bit 6 == 1 -> single queue */
1268 rc
= qeth_osa_set_output_queues(card
, chp_dsc
->chpp
& 0x02);
1271 QETH_CARD_TEXT_(card
, 2, "nr:%x", card
->qdio
.no_out_queues
);
1272 QETH_CARD_TEXT_(card
, 2, "lvl:%02x", card
->info
.func_level
);
1276 static void qeth_init_qdio_info(struct qeth_card
*card
)
1278 QETH_CARD_TEXT(card
, 4, "intqdinf");
1279 atomic_set(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
);
1280 card
->qdio
.do_prio_queueing
= QETH_PRIOQ_DEFAULT
;
1281 card
->qdio
.default_out_queue
= QETH_DEFAULT_QUEUE
;
1284 card
->qdio
.no_in_queues
= 1;
1285 card
->qdio
.in_buf_size
= QETH_IN_BUF_SIZE_DEFAULT
;
1287 card
->qdio
.init_pool
.buf_count
= QETH_IN_BUF_COUNT_HSDEFAULT
;
1289 card
->qdio
.init_pool
.buf_count
= QETH_IN_BUF_COUNT_DEFAULT
;
1290 card
->qdio
.in_buf_pool
.buf_count
= card
->qdio
.init_pool
.buf_count
;
1291 INIT_LIST_HEAD(&card
->qdio
.in_buf_pool
.entry_list
);
1292 INIT_LIST_HEAD(&card
->qdio
.init_pool
.entry_list
);
1295 static void qeth_set_initial_options(struct qeth_card
*card
)
1297 card
->options
.route4
.type
= NO_ROUTER
;
1298 card
->options
.route6
.type
= NO_ROUTER
;
1299 card
->options
.rx_sg_cb
= QETH_RX_SG_CB
;
1300 card
->options
.isolation
= ISOLATION_MODE_NONE
;
1301 card
->options
.cq
= QETH_CQ_DISABLED
;
1302 card
->options
.layer
= QETH_DISCIPLINE_UNDETERMINED
;
1305 static int qeth_do_start_thread(struct qeth_card
*card
, unsigned long thread
)
1307 unsigned long flags
;
1310 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1311 QETH_CARD_TEXT_(card
, 4, " %02x%02x%02x",
1312 (u8
) card
->thread_start_mask
,
1313 (u8
) card
->thread_allowed_mask
,
1314 (u8
) card
->thread_running_mask
);
1315 rc
= (card
->thread_start_mask
& thread
);
1316 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1320 static void qeth_start_kernel_thread(struct work_struct
*work
)
1322 struct task_struct
*ts
;
1323 struct qeth_card
*card
= container_of(work
, struct qeth_card
,
1324 kernel_thread_starter
);
1325 QETH_CARD_TEXT(card
, 2, "strthrd");
1327 if (card
->read
.state
!= CH_STATE_UP
&&
1328 card
->write
.state
!= CH_STATE_UP
)
1330 if (qeth_do_start_thread(card
, QETH_RECOVER_THREAD
)) {
1331 ts
= kthread_run(card
->discipline
->recover
, (void *)card
,
1334 qeth_clear_thread_start_bit(card
, QETH_RECOVER_THREAD
);
1335 qeth_clear_thread_running_bit(card
,
1336 QETH_RECOVER_THREAD
);
1341 static void qeth_buffer_reclaim_work(struct work_struct
*);
1342 static void qeth_setup_card(struct qeth_card
*card
)
1344 QETH_CARD_TEXT(card
, 2, "setupcrd");
1346 card
->info
.type
= CARD_RDEV(card
)->id
.driver_info
;
1347 card
->state
= CARD_STATE_DOWN
;
1348 spin_lock_init(&card
->lock
);
1349 spin_lock_init(&card
->thread_mask_lock
);
1350 mutex_init(&card
->conf_mutex
);
1351 mutex_init(&card
->discipline_mutex
);
1352 INIT_WORK(&card
->kernel_thread_starter
, qeth_start_kernel_thread
);
1353 INIT_LIST_HEAD(&card
->cmd_waiter_list
);
1354 init_waitqueue_head(&card
->wait_q
);
1355 qeth_set_initial_options(card
);
1356 /* IP address takeover */
1357 INIT_LIST_HEAD(&card
->ipato
.entries
);
1358 qeth_init_qdio_info(card
);
1359 INIT_DELAYED_WORK(&card
->buffer_reclaim_work
, qeth_buffer_reclaim_work
);
1360 INIT_WORK(&card
->close_dev_work
, qeth_close_dev_handler
);
1363 static void qeth_core_sl_print(struct seq_file
*m
, struct service_level
*slr
)
1365 struct qeth_card
*card
= container_of(slr
, struct qeth_card
,
1366 qeth_service_level
);
1367 if (card
->info
.mcl_level
[0])
1368 seq_printf(m
, "qeth: %s firmware level %s\n",
1369 CARD_BUS_ID(card
), card
->info
.mcl_level
);
1372 static struct qeth_card
*qeth_alloc_card(struct ccwgroup_device
*gdev
)
1374 struct qeth_card
*card
;
1376 QETH_DBF_TEXT(SETUP
, 2, "alloccrd");
1377 card
= kzalloc(sizeof(*card
), GFP_KERNEL
);
1380 QETH_DBF_HEX(SETUP
, 2, &card
, sizeof(void *));
1383 dev_set_drvdata(&gdev
->dev
, card
);
1384 CARD_RDEV(card
) = gdev
->cdev
[0];
1385 CARD_WDEV(card
) = gdev
->cdev
[1];
1386 CARD_DDEV(card
) = gdev
->cdev
[2];
1388 card
->event_wq
= alloc_ordered_workqueue("%s_event", 0,
1389 dev_name(&gdev
->dev
));
1390 if (!card
->event_wq
)
1393 card
->read_cmd
= qeth_alloc_cmd(&card
->read
, QETH_BUFSIZE
, 1, 0);
1394 if (!card
->read_cmd
)
1397 qeth_setup_channel(&card
->read
);
1398 qeth_setup_channel(&card
->write
);
1399 qeth_setup_channel(&card
->data
);
1400 card
->qeth_service_level
.seq_print
= qeth_core_sl_print
;
1401 register_service_level(&card
->qeth_service_level
);
1405 destroy_workqueue(card
->event_wq
);
1407 dev_set_drvdata(&gdev
->dev
, NULL
);
1413 static int qeth_clear_channel(struct qeth_card
*card
,
1414 struct qeth_channel
*channel
)
1418 QETH_CARD_TEXT(card
, 3, "clearch");
1419 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1420 rc
= ccw_device_clear(channel
->ccwdev
, (addr_t
)channel
->active_cmd
);
1421 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1425 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1426 channel
->state
== CH_STATE_STOPPED
, QETH_TIMEOUT
);
1427 if (rc
== -ERESTARTSYS
)
1429 if (channel
->state
!= CH_STATE_STOPPED
)
1431 channel
->state
= CH_STATE_DOWN
;
1435 static int qeth_halt_channel(struct qeth_card
*card
,
1436 struct qeth_channel
*channel
)
1440 QETH_CARD_TEXT(card
, 3, "haltch");
1441 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1442 rc
= ccw_device_halt(channel
->ccwdev
, (addr_t
)channel
->active_cmd
);
1443 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1447 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1448 channel
->state
== CH_STATE_HALTED
, QETH_TIMEOUT
);
1449 if (rc
== -ERESTARTSYS
)
1451 if (channel
->state
!= CH_STATE_HALTED
)
1456 int qeth_stop_channel(struct qeth_channel
*channel
)
1458 struct ccw_device
*cdev
= channel
->ccwdev
;
1461 rc
= ccw_device_set_offline(cdev
);
1463 spin_lock_irq(get_ccwdev_lock(cdev
));
1464 if (channel
->active_cmd
) {
1465 dev_err(&cdev
->dev
, "Stopped channel while cmd %px was still active\n",
1466 channel
->active_cmd
);
1467 channel
->active_cmd
= NULL
;
1469 spin_unlock_irq(get_ccwdev_lock(cdev
));
1473 EXPORT_SYMBOL_GPL(qeth_stop_channel
);
1475 static int qeth_halt_channels(struct qeth_card
*card
)
1477 int rc1
= 0, rc2
= 0, rc3
= 0;
1479 QETH_CARD_TEXT(card
, 3, "haltchs");
1480 rc1
= qeth_halt_channel(card
, &card
->read
);
1481 rc2
= qeth_halt_channel(card
, &card
->write
);
1482 rc3
= qeth_halt_channel(card
, &card
->data
);
1490 static int qeth_clear_channels(struct qeth_card
*card
)
1492 int rc1
= 0, rc2
= 0, rc3
= 0;
1494 QETH_CARD_TEXT(card
, 3, "clearchs");
1495 rc1
= qeth_clear_channel(card
, &card
->read
);
1496 rc2
= qeth_clear_channel(card
, &card
->write
);
1497 rc3
= qeth_clear_channel(card
, &card
->data
);
1505 static int qeth_clear_halt_card(struct qeth_card
*card
, int halt
)
1509 QETH_CARD_TEXT(card
, 3, "clhacrd");
1512 rc
= qeth_halt_channels(card
);
1515 return qeth_clear_channels(card
);
1518 int qeth_qdio_clear_card(struct qeth_card
*card
, int use_halt
)
1522 QETH_CARD_TEXT(card
, 3, "qdioclr");
1523 switch (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_ESTABLISHED
,
1524 QETH_QDIO_CLEANING
)) {
1525 case QETH_QDIO_ESTABLISHED
:
1527 rc
= qdio_shutdown(CARD_DDEV(card
),
1528 QDIO_FLAG_CLEANUP_USING_HALT
);
1530 rc
= qdio_shutdown(CARD_DDEV(card
),
1531 QDIO_FLAG_CLEANUP_USING_CLEAR
);
1533 QETH_CARD_TEXT_(card
, 3, "1err%d", rc
);
1534 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
1536 case QETH_QDIO_CLEANING
:
1541 rc
= qeth_clear_halt_card(card
, use_halt
);
1543 QETH_CARD_TEXT_(card
, 3, "2err%d", rc
);
1546 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card
);
1548 static enum qeth_discipline_id
qeth_vm_detect_layer(struct qeth_card
*card
)
1550 enum qeth_discipline_id disc
= QETH_DISCIPLINE_UNDETERMINED
;
1551 struct diag26c_vnic_resp
*response
= NULL
;
1552 struct diag26c_vnic_req
*request
= NULL
;
1553 struct ccw_dev_id id
;
1557 QETH_CARD_TEXT(card
, 2, "vmlayer");
1559 cpcmd("QUERY USERID", userid
, sizeof(userid
), &rc
);
1563 request
= kzalloc(sizeof(*request
), GFP_KERNEL
| GFP_DMA
);
1564 response
= kzalloc(sizeof(*response
), GFP_KERNEL
| GFP_DMA
);
1565 if (!request
|| !response
) {
1570 ccw_device_get_id(CARD_RDEV(card
), &id
);
1571 request
->resp_buf_len
= sizeof(*response
);
1572 request
->resp_version
= DIAG26C_VERSION6_VM65918
;
1573 request
->req_format
= DIAG26C_VNIC_INFO
;
1575 memcpy(&request
->sys_name
, userid
, 8);
1576 request
->devno
= id
.devno
;
1578 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
1579 rc
= diag26c(request
, response
, DIAG26C_PORT_VNIC
);
1580 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
1583 QETH_DBF_HEX(CTRL
, 2, response
, sizeof(*response
));
1585 if (request
->resp_buf_len
< sizeof(*response
) ||
1586 response
->version
!= request
->resp_version
) {
1591 if (response
->protocol
== VNIC_INFO_PROT_L2
)
1592 disc
= QETH_DISCIPLINE_LAYER2
;
1593 else if (response
->protocol
== VNIC_INFO_PROT_L3
)
1594 disc
= QETH_DISCIPLINE_LAYER3
;
1600 QETH_CARD_TEXT_(card
, 2, "err%x", rc
);
1604 /* Determine whether the device requires a specific layer discipline */
1605 static enum qeth_discipline_id
qeth_enforce_discipline(struct qeth_card
*card
)
1607 enum qeth_discipline_id disc
= QETH_DISCIPLINE_UNDETERMINED
;
1609 if (IS_OSM(card
) || IS_OSN(card
))
1610 disc
= QETH_DISCIPLINE_LAYER2
;
1611 else if (IS_VM_NIC(card
))
1612 disc
= IS_IQD(card
) ? QETH_DISCIPLINE_LAYER3
:
1613 qeth_vm_detect_layer(card
);
1616 case QETH_DISCIPLINE_LAYER2
:
1617 QETH_CARD_TEXT(card
, 3, "force l2");
1619 case QETH_DISCIPLINE_LAYER3
:
1620 QETH_CARD_TEXT(card
, 3, "force l3");
1623 QETH_CARD_TEXT(card
, 3, "force no");
1629 static void qeth_set_blkt_defaults(struct qeth_card
*card
)
1631 QETH_CARD_TEXT(card
, 2, "cfgblkt");
1633 if (card
->info
.use_v1_blkt
) {
1634 card
->info
.blkt
.time_total
= 0;
1635 card
->info
.blkt
.inter_packet
= 0;
1636 card
->info
.blkt
.inter_packet_jumbo
= 0;
1638 card
->info
.blkt
.time_total
= 250;
1639 card
->info
.blkt
.inter_packet
= 5;
1640 card
->info
.blkt
.inter_packet_jumbo
= 15;
1644 static void qeth_init_tokens(struct qeth_card
*card
)
1646 card
->token
.issuer_rm_w
= 0x00010103UL
;
1647 card
->token
.cm_filter_w
= 0x00010108UL
;
1648 card
->token
.cm_connection_w
= 0x0001010aUL
;
1649 card
->token
.ulp_filter_w
= 0x0001010bUL
;
1650 card
->token
.ulp_connection_w
= 0x0001010dUL
;
1653 static void qeth_init_func_level(struct qeth_card
*card
)
1655 switch (card
->info
.type
) {
1656 case QETH_CARD_TYPE_IQD
:
1657 card
->info
.func_level
= QETH_IDX_FUNC_LEVEL_IQD
;
1659 case QETH_CARD_TYPE_OSD
:
1660 case QETH_CARD_TYPE_OSN
:
1661 card
->info
.func_level
= QETH_IDX_FUNC_LEVEL_OSD
;
1668 static void qeth_idx_finalize_cmd(struct qeth_card
*card
,
1669 struct qeth_cmd_buffer
*iob
)
1671 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob
->data
), &card
->seqno
.trans_hdr
,
1672 QETH_SEQ_NO_LENGTH
);
1673 if (iob
->channel
== &card
->write
)
1674 card
->seqno
.trans_hdr
++;
1677 static int qeth_peer_func_level(int level
)
1679 if ((level
& 0xff) == 8)
1680 return (level
& 0xff) + 0x400;
1681 if (((level
>> 8) & 3) == 1)
1682 return (level
& 0xff) + 0x200;
1686 static void qeth_mpc_finalize_cmd(struct qeth_card
*card
,
1687 struct qeth_cmd_buffer
*iob
)
1689 qeth_idx_finalize_cmd(card
, iob
);
1691 memcpy(QETH_PDU_HEADER_SEQ_NO(iob
->data
),
1692 &card
->seqno
.pdu_hdr
, QETH_SEQ_NO_LENGTH
);
1693 card
->seqno
.pdu_hdr
++;
1694 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob
->data
),
1695 &card
->seqno
.pdu_hdr_ack
, QETH_SEQ_NO_LENGTH
);
1697 iob
->callback
= qeth_release_buffer_cb
;
1700 static struct qeth_cmd_buffer
*qeth_mpc_alloc_cmd(struct qeth_card
*card
,
1702 unsigned int data_length
)
1704 struct qeth_cmd_buffer
*iob
;
1706 iob
= qeth_alloc_cmd(&card
->write
, data_length
, 1, QETH_TIMEOUT
);
1710 memcpy(iob
->data
, data
, data_length
);
1711 qeth_setup_ccw(__ccw_from_cmd(iob
), CCW_CMD_WRITE
, 0, data_length
,
1713 iob
->finalize
= qeth_mpc_finalize_cmd
;
1718 * qeth_send_control_data() - send control command to the card
1719 * @card: qeth_card structure pointer
1720 * @iob: qeth_cmd_buffer pointer
1721 * @reply_cb: callback function pointer
1722 * @cb_card: pointer to the qeth_card structure
1723 * @cb_reply: pointer to the qeth_reply structure
1724 * @cb_cmd: pointer to the original iob for non-IPA
1725 * commands, or to the qeth_ipa_cmd structure
1726 * for the IPA commands.
1727 * @reply_param: private pointer passed to the callback
1729 * Callback function gets called one or more times, with cb_cmd
1730 * pointing to the response returned by the hardware. Callback
1731 * function must return
1732 * > 0 if more reply blocks are expected,
1733 * 0 if the last or only reply block is received, and
1735 * Callback function can get the value of the reply_param pointer from the
1736 * field 'param' of the structure qeth_reply.
1739 static int qeth_send_control_data(struct qeth_card
*card
,
1740 struct qeth_cmd_buffer
*iob
,
1741 int (*reply_cb
)(struct qeth_card
*cb_card
,
1742 struct qeth_reply
*cb_reply
,
1743 unsigned long cb_cmd
),
1746 struct qeth_channel
*channel
= iob
->channel
;
1747 struct qeth_reply
*reply
= &iob
->reply
;
1748 long timeout
= iob
->timeout
;
1751 QETH_CARD_TEXT(card
, 2, "sendctl");
1753 reply
->callback
= reply_cb
;
1754 reply
->param
= reply_param
;
1756 timeout
= wait_event_interruptible_timeout(card
->wait_q
,
1757 qeth_trylock_channel(channel
),
1761 return (timeout
== -ERESTARTSYS
) ? -EINTR
: -ETIME
;
1765 iob
->finalize(card
, iob
);
1766 QETH_DBF_HEX(CTRL
, 2, iob
->data
, min(iob
->length
, QETH_DBF_CTRL_LEN
));
1768 qeth_enqueue_cmd(card
, iob
);
1770 /* This pairs with iob->callback, and keeps the iob alive after IO: */
1773 QETH_CARD_TEXT(card
, 6, "noirqpnd");
1774 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1775 rc
= ccw_device_start_timeout(channel
->ccwdev
, __ccw_from_cmd(iob
),
1776 (addr_t
) iob
, 0, 0, timeout
);
1778 channel
->active_cmd
= iob
;
1779 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1781 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1782 CARD_DEVID(card
), rc
);
1783 QETH_CARD_TEXT_(card
, 2, " err%d", rc
);
1784 qeth_dequeue_cmd(card
, iob
);
1786 atomic_set(&channel
->irq_pending
, 0);
1787 wake_up(&card
->wait_q
);
1791 timeout
= wait_for_completion_interruptible_timeout(&iob
->done
,
1794 rc
= (timeout
== -ERESTARTSYS
) ? -EINTR
: -ETIME
;
1796 qeth_dequeue_cmd(card
, iob
);
1799 /* Wait until the callback for a late reply has completed: */
1800 spin_lock_irq(&iob
->lock
);
1802 /* Zap any callback that's still pending: */
1804 spin_unlock_irq(&iob
->lock
);
1815 struct qeth_node_desc
{
1816 struct node_descriptor nd1
;
1817 struct node_descriptor nd2
;
1818 struct node_descriptor nd3
;
1821 static void qeth_read_conf_data_cb(struct qeth_card
*card
,
1822 struct qeth_cmd_buffer
*iob
,
1823 unsigned int data_length
)
1825 struct qeth_node_desc
*nd
= (struct qeth_node_desc
*) iob
->data
;
1829 QETH_CARD_TEXT(card
, 2, "cfgunit");
1831 if (data_length
< sizeof(*nd
)) {
1836 card
->info
.is_vm_nic
= nd
->nd1
.plant
[0] == _ascebc
['V'] &&
1837 nd
->nd1
.plant
[1] == _ascebc
['M'];
1838 tag
= (u8
*)&nd
->nd1
.tag
;
1839 card
->info
.chpid
= tag
[0];
1840 card
->info
.unit_addr2
= tag
[1];
1842 tag
= (u8
*)&nd
->nd2
.tag
;
1843 card
->info
.cula
= tag
[1];
1845 card
->info
.use_v1_blkt
= nd
->nd3
.model
[0] == 0xF0 &&
1846 nd
->nd3
.model
[1] == 0xF0 &&
1847 nd
->nd3
.model
[2] >= 0xF1 &&
1848 nd
->nd3
.model
[2] <= 0xF4;
1851 qeth_notify_cmd(iob
, rc
);
1855 static int qeth_read_conf_data(struct qeth_card
*card
)
1857 struct qeth_channel
*channel
= &card
->data
;
1858 struct qeth_cmd_buffer
*iob
;
1861 /* scan for RCD command in extended SenseID data */
1862 ciw
= ccw_device_get_ciw(channel
->ccwdev
, CIW_TYPE_RCD
);
1863 if (!ciw
|| ciw
->cmd
== 0)
1865 if (ciw
->count
< sizeof(struct qeth_node_desc
))
1868 iob
= qeth_alloc_cmd(channel
, ciw
->count
, 1, QETH_RCD_TIMEOUT
);
1872 iob
->callback
= qeth_read_conf_data_cb
;
1873 qeth_setup_ccw(__ccw_from_cmd(iob
), ciw
->cmd
, 0, iob
->length
,
1876 return qeth_send_control_data(card
, iob
, NULL
, NULL
);
1879 static int qeth_idx_check_activate_response(struct qeth_card
*card
,
1880 struct qeth_channel
*channel
,
1881 struct qeth_cmd_buffer
*iob
)
1885 rc
= qeth_check_idx_response(card
, iob
->data
);
1889 if (QETH_IS_IDX_ACT_POS_REPLY(iob
->data
))
1892 /* negative reply: */
1893 QETH_CARD_TEXT_(card
, 2, "idxneg%c",
1894 QETH_IDX_ACT_CAUSE_CODE(iob
->data
));
1896 switch (QETH_IDX_ACT_CAUSE_CODE(iob
->data
)) {
1897 case QETH_IDX_ACT_ERR_EXCL
:
1898 dev_err(&channel
->ccwdev
->dev
,
1899 "The adapter is used exclusively by another host\n");
1901 case QETH_IDX_ACT_ERR_AUTH
:
1902 case QETH_IDX_ACT_ERR_AUTH_USER
:
1903 dev_err(&channel
->ccwdev
->dev
,
1904 "Setting the device online failed because of insufficient authorization\n");
1907 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1908 CCW_DEVID(channel
->ccwdev
));
1913 static void qeth_idx_activate_read_channel_cb(struct qeth_card
*card
,
1914 struct qeth_cmd_buffer
*iob
,
1915 unsigned int data_length
)
1917 struct qeth_channel
*channel
= iob
->channel
;
1921 QETH_CARD_TEXT(card
, 2, "idxrdcb");
1923 rc
= qeth_idx_check_activate_response(card
, channel
, iob
);
1927 memcpy(&peer_level
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
1928 if (peer_level
!= qeth_peer_func_level(card
->info
.func_level
)) {
1929 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1930 CCW_DEVID(channel
->ccwdev
),
1931 card
->info
.func_level
, peer_level
);
1936 memcpy(&card
->token
.issuer_rm_r
,
1937 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
1938 QETH_MPC_TOKEN_LENGTH
);
1939 memcpy(&card
->info
.mcl_level
[0],
1940 QETH_IDX_REPLY_LEVEL(iob
->data
), QETH_MCL_LENGTH
);
1943 qeth_notify_cmd(iob
, rc
);
1947 static void qeth_idx_activate_write_channel_cb(struct qeth_card
*card
,
1948 struct qeth_cmd_buffer
*iob
,
1949 unsigned int data_length
)
1951 struct qeth_channel
*channel
= iob
->channel
;
1955 QETH_CARD_TEXT(card
, 2, "idxwrcb");
1957 rc
= qeth_idx_check_activate_response(card
, channel
, iob
);
1961 memcpy(&peer_level
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
1962 if ((peer_level
& ~0x0100) !=
1963 qeth_peer_func_level(card
->info
.func_level
)) {
1964 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1965 CCW_DEVID(channel
->ccwdev
),
1966 card
->info
.func_level
, peer_level
);
1971 qeth_notify_cmd(iob
, rc
);
1975 static void qeth_idx_setup_activate_cmd(struct qeth_card
*card
,
1976 struct qeth_cmd_buffer
*iob
)
1978 u16 addr
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
1979 u8 port
= ((u8
)card
->dev
->dev_port
) | 0x80;
1980 struct ccw1
*ccw
= __ccw_from_cmd(iob
);
1981 struct ccw_dev_id dev_id
;
1983 qeth_setup_ccw(&ccw
[0], CCW_CMD_WRITE
, CCW_FLAG_CC
, IDX_ACTIVATE_SIZE
,
1985 qeth_setup_ccw(&ccw
[1], CCW_CMD_READ
, 0, iob
->length
, iob
->data
);
1986 ccw_device_get_id(CARD_DDEV(card
), &dev_id
);
1987 iob
->finalize
= qeth_idx_finalize_cmd
;
1989 port
|= QETH_IDX_ACT_INVAL_FRAME
;
1990 memcpy(QETH_IDX_ACT_PNO(iob
->data
), &port
, 1);
1991 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
1992 &card
->token
.issuer_rm_w
, QETH_MPC_TOKEN_LENGTH
);
1993 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob
->data
),
1994 &card
->info
.func_level
, 2);
1995 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob
->data
), &dev_id
.devno
, 2);
1996 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob
->data
), &addr
, 2);
1999 static int qeth_idx_activate_read_channel(struct qeth_card
*card
)
2001 struct qeth_channel
*channel
= &card
->read
;
2002 struct qeth_cmd_buffer
*iob
;
2005 QETH_CARD_TEXT(card
, 2, "idxread");
2007 iob
= qeth_alloc_cmd(channel
, QETH_BUFSIZE
, 2, QETH_TIMEOUT
);
2011 memcpy(iob
->data
, IDX_ACTIVATE_READ
, IDX_ACTIVATE_SIZE
);
2012 qeth_idx_setup_activate_cmd(card
, iob
);
2013 iob
->callback
= qeth_idx_activate_read_channel_cb
;
2015 rc
= qeth_send_control_data(card
, iob
, NULL
, NULL
);
2019 channel
->state
= CH_STATE_UP
;
2023 static int qeth_idx_activate_write_channel(struct qeth_card
*card
)
2025 struct qeth_channel
*channel
= &card
->write
;
2026 struct qeth_cmd_buffer
*iob
;
2029 QETH_CARD_TEXT(card
, 2, "idxwrite");
2031 iob
= qeth_alloc_cmd(channel
, QETH_BUFSIZE
, 2, QETH_TIMEOUT
);
2035 memcpy(iob
->data
, IDX_ACTIVATE_WRITE
, IDX_ACTIVATE_SIZE
);
2036 qeth_idx_setup_activate_cmd(card
, iob
);
2037 iob
->callback
= qeth_idx_activate_write_channel_cb
;
2039 rc
= qeth_send_control_data(card
, iob
, NULL
, NULL
);
2043 channel
->state
= CH_STATE_UP
;
2047 static int qeth_cm_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2050 struct qeth_cmd_buffer
*iob
;
2052 QETH_CARD_TEXT(card
, 2, "cmenblcb");
2054 iob
= (struct qeth_cmd_buffer
*) data
;
2055 memcpy(&card
->token
.cm_filter_r
,
2056 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
2057 QETH_MPC_TOKEN_LENGTH
);
2061 static int qeth_cm_enable(struct qeth_card
*card
)
2063 struct qeth_cmd_buffer
*iob
;
2065 QETH_CARD_TEXT(card
, 2, "cmenable");
2067 iob
= qeth_mpc_alloc_cmd(card
, CM_ENABLE
, CM_ENABLE_SIZE
);
2071 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob
->data
),
2072 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
2073 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob
->data
),
2074 &card
->token
.cm_filter_w
, QETH_MPC_TOKEN_LENGTH
);
2076 return qeth_send_control_data(card
, iob
, qeth_cm_enable_cb
, NULL
);
2079 static int qeth_cm_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2082 struct qeth_cmd_buffer
*iob
;
2084 QETH_CARD_TEXT(card
, 2, "cmsetpcb");
2086 iob
= (struct qeth_cmd_buffer
*) data
;
2087 memcpy(&card
->token
.cm_connection_r
,
2088 QETH_CM_SETUP_RESP_DEST_ADDR(iob
->data
),
2089 QETH_MPC_TOKEN_LENGTH
);
2093 static int qeth_cm_setup(struct qeth_card
*card
)
2095 struct qeth_cmd_buffer
*iob
;
2097 QETH_CARD_TEXT(card
, 2, "cmsetup");
2099 iob
= qeth_mpc_alloc_cmd(card
, CM_SETUP
, CM_SETUP_SIZE
);
2103 memcpy(QETH_CM_SETUP_DEST_ADDR(iob
->data
),
2104 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
2105 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob
->data
),
2106 &card
->token
.cm_connection_w
, QETH_MPC_TOKEN_LENGTH
);
2107 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob
->data
),
2108 &card
->token
.cm_filter_r
, QETH_MPC_TOKEN_LENGTH
);
2109 return qeth_send_control_data(card
, iob
, qeth_cm_setup_cb
, NULL
);
2112 static int qeth_update_max_mtu(struct qeth_card
*card
, unsigned int max_mtu
)
2114 struct net_device
*dev
= card
->dev
;
2115 unsigned int new_mtu
;
2118 /* IQD needs accurate max MTU to set up its RX buffers: */
2121 /* tolerate quirky HW: */
2122 max_mtu
= ETH_MAX_MTU
;
2127 /* move any device with default MTU to new max MTU: */
2128 new_mtu
= (dev
->mtu
== dev
->max_mtu
) ? max_mtu
: dev
->mtu
;
2130 /* adjust RX buffer size to new max MTU: */
2131 card
->qdio
.in_buf_size
= max_mtu
+ 2 * PAGE_SIZE
;
2132 if (dev
->max_mtu
&& dev
->max_mtu
!= max_mtu
)
2133 qeth_free_qdio_queues(card
);
2137 /* default MTUs for first setup: */
2138 else if (IS_LAYER2(card
))
2139 new_mtu
= ETH_DATA_LEN
;
2141 new_mtu
= ETH_DATA_LEN
- 8; /* allow for LLC + SNAP */
2144 dev
->max_mtu
= max_mtu
;
2145 dev
->mtu
= min(new_mtu
, max_mtu
);
2150 static int qeth_get_mtu_outof_framesize(int framesize
)
2152 switch (framesize
) {
2166 static int qeth_ulp_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2169 __u16 mtu
, framesize
;
2172 struct qeth_cmd_buffer
*iob
;
2174 QETH_CARD_TEXT(card
, 2, "ulpenacb");
2176 iob
= (struct qeth_cmd_buffer
*) data
;
2177 memcpy(&card
->token
.ulp_filter_r
,
2178 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
2179 QETH_MPC_TOKEN_LENGTH
);
2181 memcpy(&framesize
, QETH_ULP_ENABLE_RESP_MAX_MTU(iob
->data
), 2);
2182 mtu
= qeth_get_mtu_outof_framesize(framesize
);
2184 mtu
= *(__u16
*)QETH_ULP_ENABLE_RESP_MAX_MTU(iob
->data
);
2186 *(u16
*)reply
->param
= mtu
;
2188 memcpy(&len
, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob
->data
), 2);
2189 if (len
>= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE
) {
2191 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob
->data
), 1);
2192 card
->info
.link_type
= link_type
;
2194 card
->info
.link_type
= 0;
2195 QETH_CARD_TEXT_(card
, 2, "link%d", card
->info
.link_type
);
2199 static u8
qeth_mpc_select_prot_type(struct qeth_card
*card
)
2202 return QETH_PROT_OSN2
;
2203 return IS_LAYER2(card
) ? QETH_PROT_LAYER2
: QETH_PROT_TCPIP
;
2206 static int qeth_ulp_enable(struct qeth_card
*card
)
2208 u8 prot_type
= qeth_mpc_select_prot_type(card
);
2209 struct qeth_cmd_buffer
*iob
;
2213 QETH_CARD_TEXT(card
, 2, "ulpenabl");
2215 iob
= qeth_mpc_alloc_cmd(card
, ULP_ENABLE
, ULP_ENABLE_SIZE
);
2219 *(QETH_ULP_ENABLE_LINKNUM(iob
->data
)) = (u8
) card
->dev
->dev_port
;
2220 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob
->data
), &prot_type
, 1);
2221 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob
->data
),
2222 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2223 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob
->data
),
2224 &card
->token
.ulp_filter_w
, QETH_MPC_TOKEN_LENGTH
);
2225 rc
= qeth_send_control_data(card
, iob
, qeth_ulp_enable_cb
, &max_mtu
);
2228 return qeth_update_max_mtu(card
, max_mtu
);
2231 static int qeth_ulp_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2234 struct qeth_cmd_buffer
*iob
;
2236 QETH_CARD_TEXT(card
, 2, "ulpstpcb");
2238 iob
= (struct qeth_cmd_buffer
*) data
;
2239 memcpy(&card
->token
.ulp_connection_r
,
2240 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob
->data
),
2241 QETH_MPC_TOKEN_LENGTH
);
2242 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob
->data
),
2244 QETH_CARD_TEXT(card
, 2, "olmlimit");
2245 dev_err(&card
->gdev
->dev
, "A connection could not be "
2246 "established because of an OLM limit\n");
2252 static int qeth_ulp_setup(struct qeth_card
*card
)
2255 struct qeth_cmd_buffer
*iob
;
2256 struct ccw_dev_id dev_id
;
2258 QETH_CARD_TEXT(card
, 2, "ulpsetup");
2260 iob
= qeth_mpc_alloc_cmd(card
, ULP_SETUP
, ULP_SETUP_SIZE
);
2264 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob
->data
),
2265 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2266 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob
->data
),
2267 &card
->token
.ulp_connection_w
, QETH_MPC_TOKEN_LENGTH
);
2268 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob
->data
),
2269 &card
->token
.ulp_filter_r
, QETH_MPC_TOKEN_LENGTH
);
2271 ccw_device_get_id(CARD_DDEV(card
), &dev_id
);
2272 memcpy(QETH_ULP_SETUP_CUA(iob
->data
), &dev_id
.devno
, 2);
2273 temp
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
2274 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob
->data
), &temp
, 2);
2275 return qeth_send_control_data(card
, iob
, qeth_ulp_setup_cb
, NULL
);
2278 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q
*q
, int bidx
)
2280 struct qeth_qdio_out_buffer
*newbuf
;
2282 newbuf
= kmem_cache_zalloc(qeth_qdio_outbuf_cache
, GFP_ATOMIC
);
2286 newbuf
->buffer
= q
->qdio_bufs
[bidx
];
2287 skb_queue_head_init(&newbuf
->skb_list
);
2288 lockdep_set_class(&newbuf
->skb_list
.lock
, &qdio_out_skb_queue_key
);
2290 newbuf
->next_pending
= q
->bufs
[bidx
];
2291 atomic_set(&newbuf
->state
, QETH_QDIO_BUF_EMPTY
);
2292 q
->bufs
[bidx
] = newbuf
;
2296 static void qeth_free_output_queue(struct qeth_qdio_out_q
*q
)
2301 qeth_drain_output_queue(q
, true);
2302 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2306 static struct qeth_qdio_out_q
*qeth_alloc_output_queue(void)
2308 struct qeth_qdio_out_q
*q
= kzalloc(sizeof(*q
), GFP_KERNEL
);
2313 if (qdio_alloc_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
)) {
2320 static void qeth_tx_completion_timer(struct timer_list
*timer
)
2322 struct qeth_qdio_out_q
*queue
= from_timer(queue
, timer
, timer
);
2324 napi_schedule(&queue
->napi
);
2325 QETH_TXQ_STAT_INC(queue
, completion_timer
);
2328 static int qeth_alloc_qdio_queues(struct qeth_card
*card
)
2332 QETH_CARD_TEXT(card
, 2, "allcqdbf");
2334 if (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
,
2335 QETH_QDIO_ALLOCATED
) != QETH_QDIO_UNINITIALIZED
)
2338 QETH_CARD_TEXT(card
, 2, "inq");
2339 card
->qdio
.in_q
= qeth_alloc_qdio_queue();
2340 if (!card
->qdio
.in_q
)
2343 /* inbound buffer pool */
2344 if (qeth_alloc_buffer_pool(card
))
2348 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
2349 struct qeth_qdio_out_q
*queue
;
2351 queue
= qeth_alloc_output_queue();
2354 QETH_CARD_TEXT_(card
, 2, "outq %i", i
);
2355 QETH_CARD_HEX(card
, 2, &queue
, sizeof(void *));
2356 card
->qdio
.out_qs
[i
] = queue
;
2358 queue
->queue_no
= i
;
2359 timer_setup(&queue
->timer
, qeth_tx_completion_timer
, 0);
2361 /* give outbound qeth_qdio_buffers their qdio_buffers */
2362 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
2363 WARN_ON(queue
->bufs
[j
]);
2364 if (qeth_init_qdio_out_buf(queue
, j
))
2365 goto out_freeoutqbufs
;
2370 if (qeth_alloc_cq(card
))
2378 kmem_cache_free(qeth_qdio_outbuf_cache
,
2379 card
->qdio
.out_qs
[i
]->bufs
[j
]);
2380 card
->qdio
.out_qs
[i
]->bufs
[j
] = NULL
;
2384 qeth_free_output_queue(card
->qdio
.out_qs
[--i
]);
2385 card
->qdio
.out_qs
[i
] = NULL
;
2387 qeth_free_buffer_pool(card
);
2389 qeth_free_qdio_queue(card
->qdio
.in_q
);
2390 card
->qdio
.in_q
= NULL
;
2392 atomic_set(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
);
2396 static void qeth_free_qdio_queues(struct qeth_card
*card
)
2400 if (atomic_xchg(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
) ==
2401 QETH_QDIO_UNINITIALIZED
)
2405 cancel_delayed_work_sync(&card
->buffer_reclaim_work
);
2406 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
2407 if (card
->qdio
.in_q
->bufs
[j
].rx_skb
)
2408 dev_kfree_skb_any(card
->qdio
.in_q
->bufs
[j
].rx_skb
);
2410 qeth_free_qdio_queue(card
->qdio
.in_q
);
2411 card
->qdio
.in_q
= NULL
;
2412 /* inbound buffer pool */
2413 qeth_free_buffer_pool(card
);
2414 /* free outbound qdio_qs */
2415 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++) {
2416 qeth_free_output_queue(card
->qdio
.out_qs
[i
]);
2417 card
->qdio
.out_qs
[i
] = NULL
;
2421 static void qeth_create_qib_param_field(struct qeth_card
*card
,
2425 param_field
[0] = _ascebc
['P'];
2426 param_field
[1] = _ascebc
['C'];
2427 param_field
[2] = _ascebc
['I'];
2428 param_field
[3] = _ascebc
['T'];
2429 *((unsigned int *) (¶m_field
[4])) = QETH_PCI_THRESHOLD_A(card
);
2430 *((unsigned int *) (¶m_field
[8])) = QETH_PCI_THRESHOLD_B(card
);
2431 *((unsigned int *) (¶m_field
[12])) = QETH_PCI_TIMER_VALUE(card
);
2434 static void qeth_create_qib_param_field_blkt(struct qeth_card
*card
,
2437 param_field
[16] = _ascebc
['B'];
2438 param_field
[17] = _ascebc
['L'];
2439 param_field
[18] = _ascebc
['K'];
2440 param_field
[19] = _ascebc
['T'];
2441 *((unsigned int *) (¶m_field
[20])) = card
->info
.blkt
.time_total
;
2442 *((unsigned int *) (¶m_field
[24])) = card
->info
.blkt
.inter_packet
;
2443 *((unsigned int *) (¶m_field
[28])) =
2444 card
->info
.blkt
.inter_packet_jumbo
;
2447 static int qeth_qdio_activate(struct qeth_card
*card
)
2449 QETH_CARD_TEXT(card
, 3, "qdioact");
2450 return qdio_activate(CARD_DDEV(card
));
2453 static int qeth_dm_act(struct qeth_card
*card
)
2455 struct qeth_cmd_buffer
*iob
;
2457 QETH_CARD_TEXT(card
, 2, "dmact");
2459 iob
= qeth_mpc_alloc_cmd(card
, DM_ACT
, DM_ACT_SIZE
);
2463 memcpy(QETH_DM_ACT_DEST_ADDR(iob
->data
),
2464 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2465 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob
->data
),
2466 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2467 return qeth_send_control_data(card
, iob
, NULL
, NULL
);
2470 static int qeth_mpc_initialize(struct qeth_card
*card
)
2474 QETH_CARD_TEXT(card
, 2, "mpcinit");
2476 rc
= qeth_issue_next_read(card
);
2478 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
2481 rc
= qeth_cm_enable(card
);
2483 QETH_CARD_TEXT_(card
, 2, "2err%d", rc
);
2486 rc
= qeth_cm_setup(card
);
2488 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
2491 rc
= qeth_ulp_enable(card
);
2493 QETH_CARD_TEXT_(card
, 2, "4err%d", rc
);
2496 rc
= qeth_ulp_setup(card
);
2498 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
2501 rc
= qeth_alloc_qdio_queues(card
);
2503 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
2506 rc
= qeth_qdio_establish(card
);
2508 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
2509 qeth_free_qdio_queues(card
);
2512 rc
= qeth_qdio_activate(card
);
2514 QETH_CARD_TEXT_(card
, 2, "7err%d", rc
);
2517 rc
= qeth_dm_act(card
);
2519 QETH_CARD_TEXT_(card
, 2, "8err%d", rc
);
2526 void qeth_print_status_message(struct qeth_card
*card
)
2528 switch (card
->info
.type
) {
2529 case QETH_CARD_TYPE_OSD
:
2530 case QETH_CARD_TYPE_OSM
:
2531 case QETH_CARD_TYPE_OSX
:
2532 /* VM will use a non-zero first character
2533 * to indicate a HiperSockets like reporting
2534 * of the level OSA sets the first character to zero
2536 if (!card
->info
.mcl_level
[0]) {
2537 sprintf(card
->info
.mcl_level
, "%02x%02x",
2538 card
->info
.mcl_level
[2],
2539 card
->info
.mcl_level
[3]);
2543 case QETH_CARD_TYPE_IQD
:
2544 if (IS_VM_NIC(card
) || (card
->info
.mcl_level
[0] & 0x80)) {
2545 card
->info
.mcl_level
[0] = (char) _ebcasc
[(__u8
)
2546 card
->info
.mcl_level
[0]];
2547 card
->info
.mcl_level
[1] = (char) _ebcasc
[(__u8
)
2548 card
->info
.mcl_level
[1]];
2549 card
->info
.mcl_level
[2] = (char) _ebcasc
[(__u8
)
2550 card
->info
.mcl_level
[2]];
2551 card
->info
.mcl_level
[3] = (char) _ebcasc
[(__u8
)
2552 card
->info
.mcl_level
[3]];
2553 card
->info
.mcl_level
[QETH_MCL_LENGTH
] = 0;
2557 memset(&card
->info
.mcl_level
[0], 0, QETH_MCL_LENGTH
+ 1);
2559 dev_info(&card
->gdev
->dev
,
2560 "Device is a%s card%s%s%s\nwith link type %s.\n",
2561 qeth_get_cardname(card
),
2562 (card
->info
.mcl_level
[0]) ? " (level: " : "",
2563 (card
->info
.mcl_level
[0]) ? card
->info
.mcl_level
: "",
2564 (card
->info
.mcl_level
[0]) ? ")" : "",
2565 qeth_get_cardname_short(card
));
2567 EXPORT_SYMBOL_GPL(qeth_print_status_message
);
2569 static void qeth_initialize_working_pool_list(struct qeth_card
*card
)
2571 struct qeth_buffer_pool_entry
*entry
;
2573 QETH_CARD_TEXT(card
, 5, "inwrklst");
2575 list_for_each_entry(entry
,
2576 &card
->qdio
.init_pool
.entry_list
, init_list
) {
2577 qeth_put_buffer_pool_entry(card
, entry
);
2581 static struct qeth_buffer_pool_entry
*qeth_find_free_buffer_pool_entry(
2582 struct qeth_card
*card
)
2584 struct list_head
*plh
;
2585 struct qeth_buffer_pool_entry
*entry
;
2589 if (list_empty(&card
->qdio
.in_buf_pool
.entry_list
))
2592 list_for_each(plh
, &card
->qdio
.in_buf_pool
.entry_list
) {
2593 entry
= list_entry(plh
, struct qeth_buffer_pool_entry
, list
);
2595 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2596 if (page_count(virt_to_page(entry
->elements
[i
])) > 1) {
2602 list_del_init(&entry
->list
);
2607 /* no free buffer in pool so take first one and swap pages */
2608 entry
= list_entry(card
->qdio
.in_buf_pool
.entry_list
.next
,
2609 struct qeth_buffer_pool_entry
, list
);
2610 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2611 if (page_count(virt_to_page(entry
->elements
[i
])) > 1) {
2612 page
= alloc_page(GFP_ATOMIC
);
2616 free_page((unsigned long)entry
->elements
[i
]);
2617 entry
->elements
[i
] = page_address(page
);
2618 QETH_CARD_STAT_INC(card
, rx_sg_alloc_page
);
2622 list_del_init(&entry
->list
);
2626 static int qeth_init_input_buffer(struct qeth_card
*card
,
2627 struct qeth_qdio_buffer
*buf
)
2629 struct qeth_buffer_pool_entry
*pool_entry
;
2632 if ((card
->options
.cq
== QETH_CQ_ENABLED
) && (!buf
->rx_skb
)) {
2633 buf
->rx_skb
= netdev_alloc_skb(card
->dev
,
2634 QETH_RX_PULL_LEN
+ ETH_HLEN
);
2639 pool_entry
= qeth_find_free_buffer_pool_entry(card
);
2644 * since the buffer is accessed only from the input_tasklet
2645 * there shouldn't be a need to synchronize; also, since we use
2646 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2650 buf
->pool_entry
= pool_entry
;
2651 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2652 buf
->buffer
->element
[i
].length
= PAGE_SIZE
;
2653 buf
->buffer
->element
[i
].addr
= pool_entry
->elements
[i
];
2654 if (i
== QETH_MAX_BUFFER_ELEMENTS(card
) - 1)
2655 buf
->buffer
->element
[i
].eflags
= SBAL_EFLAGS_LAST_ENTRY
;
2657 buf
->buffer
->element
[i
].eflags
= 0;
2658 buf
->buffer
->element
[i
].sflags
= 0;
2663 static unsigned int qeth_tx_select_bulk_max(struct qeth_card
*card
,
2664 struct qeth_qdio_out_q
*queue
)
2666 if (!IS_IQD(card
) ||
2667 qeth_iqd_is_mcast_queue(card
, queue
) ||
2668 card
->options
.cq
== QETH_CQ_ENABLED
||
2669 qdio_get_ssqd_desc(CARD_DDEV(card
), &card
->ssqd
))
2672 return card
->ssqd
.mmwc
? card
->ssqd
.mmwc
: 1;
2675 int qeth_init_qdio_queues(struct qeth_card
*card
)
2680 QETH_CARD_TEXT(card
, 2, "initqdqs");
2683 qdio_reset_buffers(card
->qdio
.in_q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2684 memset(&card
->rx
, 0, sizeof(struct qeth_rx
));
2686 qeth_initialize_working_pool_list(card
);
2687 /*give only as many buffers to hardware as we have buffer pool entries*/
2688 for (i
= 0; i
< card
->qdio
.in_buf_pool
.buf_count
- 1; i
++) {
2689 rc
= qeth_init_input_buffer(card
, &card
->qdio
.in_q
->bufs
[i
]);
2694 card
->qdio
.in_q
->next_buf_to_init
=
2695 card
->qdio
.in_buf_pool
.buf_count
- 1;
2696 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, 0, 0,
2697 card
->qdio
.in_buf_pool
.buf_count
- 1);
2699 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
2704 rc
= qeth_cq_init(card
);
2709 /* outbound queue */
2710 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
2711 struct qeth_qdio_out_q
*queue
= card
->qdio
.out_qs
[i
];
2713 qdio_reset_buffers(queue
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2714 queue
->max_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
2715 queue
->next_buf_to_fill
= 0;
2717 queue
->prev_hdr
= NULL
;
2718 queue
->bulk_start
= 0;
2719 queue
->bulk_count
= 0;
2720 queue
->bulk_max
= qeth_tx_select_bulk_max(card
, queue
);
2721 atomic_set(&queue
->used_buffers
, 0);
2722 atomic_set(&queue
->set_pci_flags_count
, 0);
2723 atomic_set(&queue
->state
, QETH_OUT_Q_UNLOCKED
);
2724 netdev_tx_reset_queue(netdev_get_tx_queue(card
->dev
, i
));
2728 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues
);
2730 static void qeth_ipa_finalize_cmd(struct qeth_card
*card
,
2731 struct qeth_cmd_buffer
*iob
)
2733 qeth_mpc_finalize_cmd(card
, iob
);
2735 /* override with IPA-specific values: */
2736 __ipa_cmd(iob
)->hdr
.seqno
= card
->seqno
.ipa
++;
2739 void qeth_prepare_ipa_cmd(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
2742 u8 prot_type
= qeth_mpc_select_prot_type(card
);
2743 u16 total_length
= iob
->length
;
2745 qeth_setup_ccw(__ccw_from_cmd(iob
), CCW_CMD_WRITE
, 0, total_length
,
2747 iob
->finalize
= qeth_ipa_finalize_cmd
;
2749 memcpy(iob
->data
, IPA_PDU_HEADER
, IPA_PDU_HEADER_SIZE
);
2750 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob
->data
), &total_length
, 2);
2751 memcpy(QETH_IPA_CMD_PROT_TYPE(iob
->data
), &prot_type
, 1);
2752 memcpy(QETH_IPA_PDU_LEN_PDU1(iob
->data
), &cmd_length
, 2);
2753 memcpy(QETH_IPA_PDU_LEN_PDU2(iob
->data
), &cmd_length
, 2);
2754 memcpy(QETH_IPA_CMD_DEST_ADDR(iob
->data
),
2755 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2756 memcpy(QETH_IPA_PDU_LEN_PDU3(iob
->data
), &cmd_length
, 2);
2758 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd
);
2760 struct qeth_cmd_buffer
*qeth_ipa_alloc_cmd(struct qeth_card
*card
,
2761 enum qeth_ipa_cmds cmd_code
,
2762 enum qeth_prot_versions prot
,
2763 unsigned int data_length
)
2765 enum qeth_link_types link_type
= card
->info
.link_type
;
2766 struct qeth_cmd_buffer
*iob
;
2767 struct qeth_ipacmd_hdr
*hdr
;
2769 data_length
+= offsetof(struct qeth_ipa_cmd
, data
);
2770 iob
= qeth_alloc_cmd(&card
->write
, IPA_PDU_HEADER_SIZE
+ data_length
, 1,
2775 qeth_prepare_ipa_cmd(card
, iob
, data_length
);
2777 hdr
= &__ipa_cmd(iob
)->hdr
;
2778 hdr
->command
= cmd_code
;
2779 hdr
->initiator
= IPA_CMD_INITIATOR_HOST
;
2780 /* hdr->seqno is set by qeth_send_control_data() */
2781 hdr
->adapter_type
= (link_type
== QETH_LINK_TYPE_HSTR
) ? 2 : 1;
2782 hdr
->rel_adapter_no
= (u8
) card
->dev
->dev_port
;
2783 hdr
->prim_version_no
= IS_LAYER2(card
) ? 2 : 1;
2784 hdr
->param_count
= 1;
2785 hdr
->prot_version
= prot
;
2788 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd
);
2790 static int qeth_send_ipa_cmd_cb(struct qeth_card
*card
,
2791 struct qeth_reply
*reply
, unsigned long data
)
2793 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
2795 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
2799 * qeth_send_ipa_cmd() - send an IPA command
2801 * See qeth_send_control_data() for explanation of the arguments.
2804 int qeth_send_ipa_cmd(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
2805 int (*reply_cb
)(struct qeth_card
*, struct qeth_reply
*,
2811 QETH_CARD_TEXT(card
, 4, "sendipa");
2813 if (card
->read_or_write_problem
) {
2818 if (reply_cb
== NULL
)
2819 reply_cb
= qeth_send_ipa_cmd_cb
;
2820 rc
= qeth_send_control_data(card
, iob
, reply_cb
, reply_param
);
2822 qeth_clear_ipacmd_list(card
);
2823 qeth_schedule_recovery(card
);
2827 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd
);
2829 static int qeth_send_startlan_cb(struct qeth_card
*card
,
2830 struct qeth_reply
*reply
, unsigned long data
)
2832 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
2834 if (cmd
->hdr
.return_code
== IPA_RC_LAN_OFFLINE
)
2837 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
2840 static int qeth_send_startlan(struct qeth_card
*card
)
2842 struct qeth_cmd_buffer
*iob
;
2844 QETH_CARD_TEXT(card
, 2, "strtlan");
2846 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_STARTLAN
, QETH_PROT_NONE
, 0);
2849 return qeth_send_ipa_cmd(card
, iob
, qeth_send_startlan_cb
, NULL
);
2852 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd
*cmd
)
2854 if (!cmd
->hdr
.return_code
)
2855 cmd
->hdr
.return_code
=
2856 cmd
->data
.setadapterparms
.hdr
.return_code
;
2857 return cmd
->hdr
.return_code
;
2860 static int qeth_query_setadapterparms_cb(struct qeth_card
*card
,
2861 struct qeth_reply
*reply
, unsigned long data
)
2863 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
2865 QETH_CARD_TEXT(card
, 3, "quyadpcb");
2866 if (qeth_setadpparms_inspect_rc(cmd
))
2869 if (cmd
->data
.setadapterparms
.data
.query_cmds_supp
.lan_type
& 0x7f) {
2870 card
->info
.link_type
=
2871 cmd
->data
.setadapterparms
.data
.query_cmds_supp
.lan_type
;
2872 QETH_CARD_TEXT_(card
, 2, "lnk %d", card
->info
.link_type
);
2874 card
->options
.adp
.supported_funcs
=
2875 cmd
->data
.setadapterparms
.data
.query_cmds_supp
.supported_cmds
;
2879 static struct qeth_cmd_buffer
*qeth_get_adapter_cmd(struct qeth_card
*card
,
2880 enum qeth_ipa_setadp_cmd adp_cmd
,
2881 unsigned int data_length
)
2883 struct qeth_ipacmd_setadpparms_hdr
*hdr
;
2884 struct qeth_cmd_buffer
*iob
;
2886 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SETADAPTERPARMS
, QETH_PROT_IPV4
,
2888 offsetof(struct qeth_ipacmd_setadpparms
,
2893 hdr
= &__ipa_cmd(iob
)->data
.setadapterparms
.hdr
;
2894 hdr
->cmdlength
= sizeof(*hdr
) + data_length
;
2895 hdr
->command_code
= adp_cmd
;
2896 hdr
->used_total
= 1;
2901 static int qeth_query_setadapterparms(struct qeth_card
*card
)
2904 struct qeth_cmd_buffer
*iob
;
2906 QETH_CARD_TEXT(card
, 3, "queryadp");
2907 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_COMMANDS_SUPPORTED
,
2908 SETADP_DATA_SIZEOF(query_cmds_supp
));
2911 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_setadapterparms_cb
, NULL
);
2915 static int qeth_query_ipassists_cb(struct qeth_card
*card
,
2916 struct qeth_reply
*reply
, unsigned long data
)
2918 struct qeth_ipa_cmd
*cmd
;
2920 QETH_CARD_TEXT(card
, 2, "qipasscb");
2922 cmd
= (struct qeth_ipa_cmd
*) data
;
2924 switch (cmd
->hdr
.return_code
) {
2925 case IPA_RC_SUCCESS
:
2927 case IPA_RC_NOTSUPP
:
2928 case IPA_RC_L2_UNSUPPORTED_CMD
:
2929 QETH_CARD_TEXT(card
, 2, "ipaunsup");
2930 card
->options
.ipa4
.supported_funcs
|= IPA_SETADAPTERPARMS
;
2931 card
->options
.ipa6
.supported_funcs
|= IPA_SETADAPTERPARMS
;
2934 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2935 CARD_DEVID(card
), cmd
->hdr
.return_code
);
2939 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
) {
2940 card
->options
.ipa4
.supported_funcs
= cmd
->hdr
.ipa_supported
;
2941 card
->options
.ipa4
.enabled_funcs
= cmd
->hdr
.ipa_enabled
;
2942 } else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
) {
2943 card
->options
.ipa6
.supported_funcs
= cmd
->hdr
.ipa_supported
;
2944 card
->options
.ipa6
.enabled_funcs
= cmd
->hdr
.ipa_enabled
;
2946 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
2951 static int qeth_query_ipassists(struct qeth_card
*card
,
2952 enum qeth_prot_versions prot
)
2955 struct qeth_cmd_buffer
*iob
;
2957 QETH_CARD_TEXT_(card
, 2, "qipassi%i", prot
);
2958 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_QIPASSIST
, prot
, 0);
2961 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_ipassists_cb
, NULL
);
2965 static int qeth_query_switch_attributes_cb(struct qeth_card
*card
,
2966 struct qeth_reply
*reply
, unsigned long data
)
2968 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
2969 struct qeth_query_switch_attributes
*attrs
;
2970 struct qeth_switch_info
*sw_info
;
2972 QETH_CARD_TEXT(card
, 2, "qswiatcb");
2973 if (qeth_setadpparms_inspect_rc(cmd
))
2976 sw_info
= (struct qeth_switch_info
*)reply
->param
;
2977 attrs
= &cmd
->data
.setadapterparms
.data
.query_switch_attributes
;
2978 sw_info
->capabilities
= attrs
->capabilities
;
2979 sw_info
->settings
= attrs
->settings
;
2980 QETH_CARD_TEXT_(card
, 2, "%04x%04x", sw_info
->capabilities
,
2985 int qeth_query_switch_attributes(struct qeth_card
*card
,
2986 struct qeth_switch_info
*sw_info
)
2988 struct qeth_cmd_buffer
*iob
;
2990 QETH_CARD_TEXT(card
, 2, "qswiattr");
2991 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES
))
2993 if (!netif_carrier_ok(card
->dev
))
2995 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES
, 0);
2998 return qeth_send_ipa_cmd(card
, iob
,
2999 qeth_query_switch_attributes_cb
, sw_info
);
3002 struct qeth_cmd_buffer
*qeth_get_diag_cmd(struct qeth_card
*card
,
3003 enum qeth_diags_cmds sub_cmd
,
3004 unsigned int data_length
)
3006 struct qeth_ipacmd_diagass
*cmd
;
3007 struct qeth_cmd_buffer
*iob
;
3009 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SET_DIAG_ASS
, QETH_PROT_NONE
,
3010 DIAG_HDR_LEN
+ data_length
);
3014 cmd
= &__ipa_cmd(iob
)->data
.diagass
;
3015 cmd
->subcmd_len
= DIAG_SUB_HDR_LEN
+ data_length
;
3016 cmd
->subcmd
= sub_cmd
;
3019 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd
);
3021 static int qeth_query_setdiagass_cb(struct qeth_card
*card
,
3022 struct qeth_reply
*reply
, unsigned long data
)
3024 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3025 u16 rc
= cmd
->hdr
.return_code
;
3028 QETH_CARD_TEXT_(card
, 2, "diagq:%x", rc
);
3032 card
->info
.diagass_support
= cmd
->data
.diagass
.ext
;
3036 static int qeth_query_setdiagass(struct qeth_card
*card
)
3038 struct qeth_cmd_buffer
*iob
;
3040 QETH_CARD_TEXT(card
, 2, "qdiagass");
3041 iob
= qeth_get_diag_cmd(card
, QETH_DIAGS_CMD_QUERY
, 0);
3044 return qeth_send_ipa_cmd(card
, iob
, qeth_query_setdiagass_cb
, NULL
);
3047 static void qeth_get_trap_id(struct qeth_card
*card
, struct qeth_trap_id
*tid
)
3049 unsigned long info
= get_zeroed_page(GFP_KERNEL
);
3050 struct sysinfo_2_2_2
*info222
= (struct sysinfo_2_2_2
*)info
;
3051 struct sysinfo_3_2_2
*info322
= (struct sysinfo_3_2_2
*)info
;
3052 struct ccw_dev_id ccwid
;
3055 tid
->chpid
= card
->info
.chpid
;
3056 ccw_device_get_id(CARD_RDEV(card
), &ccwid
);
3057 tid
->ssid
= ccwid
.ssid
;
3058 tid
->devno
= ccwid
.devno
;
3061 level
= stsi(NULL
, 0, 0, 0);
3062 if ((level
>= 2) && (stsi(info222
, 2, 2, 2) == 0))
3063 tid
->lparnr
= info222
->lpar_number
;
3064 if ((level
>= 3) && (stsi(info322
, 3, 2, 2) == 0)) {
3065 EBCASC(info322
->vm
[0].name
, sizeof(info322
->vm
[0].name
));
3066 memcpy(tid
->vmname
, info322
->vm
[0].name
, sizeof(tid
->vmname
));
3072 static int qeth_hw_trap_cb(struct qeth_card
*card
,
3073 struct qeth_reply
*reply
, unsigned long data
)
3075 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3076 u16 rc
= cmd
->hdr
.return_code
;
3079 QETH_CARD_TEXT_(card
, 2, "trapc:%x", rc
);
3085 int qeth_hw_trap(struct qeth_card
*card
, enum qeth_diags_trap_action action
)
3087 struct qeth_cmd_buffer
*iob
;
3088 struct qeth_ipa_cmd
*cmd
;
3090 QETH_CARD_TEXT(card
, 2, "diagtrap");
3091 iob
= qeth_get_diag_cmd(card
, QETH_DIAGS_CMD_TRAP
, 64);
3094 cmd
= __ipa_cmd(iob
);
3095 cmd
->data
.diagass
.type
= 1;
3096 cmd
->data
.diagass
.action
= action
;
3098 case QETH_DIAGS_TRAP_ARM
:
3099 cmd
->data
.diagass
.options
= 0x0003;
3100 cmd
->data
.diagass
.ext
= 0x00010000 +
3101 sizeof(struct qeth_trap_id
);
3102 qeth_get_trap_id(card
,
3103 (struct qeth_trap_id
*)cmd
->data
.diagass
.cdata
);
3105 case QETH_DIAGS_TRAP_DISARM
:
3106 cmd
->data
.diagass
.options
= 0x0001;
3108 case QETH_DIAGS_TRAP_CAPTURE
:
3111 return qeth_send_ipa_cmd(card
, iob
, qeth_hw_trap_cb
, NULL
);
3113 EXPORT_SYMBOL_GPL(qeth_hw_trap
);
3115 static int qeth_check_qdio_errors(struct qeth_card
*card
,
3116 struct qdio_buffer
*buf
,
3117 unsigned int qdio_error
,
3118 const char *dbftext
)
3121 QETH_CARD_TEXT(card
, 2, dbftext
);
3122 QETH_CARD_TEXT_(card
, 2, " F15=%02X",
3123 buf
->element
[15].sflags
);
3124 QETH_CARD_TEXT_(card
, 2, " F14=%02X",
3125 buf
->element
[14].sflags
);
3126 QETH_CARD_TEXT_(card
, 2, " qerr=%X", qdio_error
);
3127 if ((buf
->element
[15].sflags
) == 0x12) {
3128 QETH_CARD_STAT_INC(card
, rx_fifo_errors
);
3136 static void qeth_queue_input_buffer(struct qeth_card
*card
, int index
)
3138 struct qeth_qdio_q
*queue
= card
->qdio
.in_q
;
3139 struct list_head
*lh
;
3145 count
= (index
< queue
->next_buf_to_init
)?
3146 card
->qdio
.in_buf_pool
.buf_count
-
3147 (queue
->next_buf_to_init
- index
) :
3148 card
->qdio
.in_buf_pool
.buf_count
-
3149 (queue
->next_buf_to_init
+ QDIO_MAX_BUFFERS_PER_Q
- index
);
3150 /* only requeue at a certain threshold to avoid SIGAs */
3151 if (count
>= QETH_IN_BUF_REQUEUE_THRESHOLD(card
)) {
3152 for (i
= queue
->next_buf_to_init
;
3153 i
< queue
->next_buf_to_init
+ count
; ++i
) {
3154 if (qeth_init_input_buffer(card
,
3155 &queue
->bufs
[QDIO_BUFNR(i
)])) {
3162 if (newcount
< count
) {
3163 /* we are in memory shortage so we switch back to
3164 traditional skb allocation and drop packages */
3165 atomic_set(&card
->force_alloc_skb
, 3);
3168 atomic_add_unless(&card
->force_alloc_skb
, -1, 0);
3173 list_for_each(lh
, &card
->qdio
.in_buf_pool
.entry_list
)
3175 if (i
== card
->qdio
.in_buf_pool
.buf_count
) {
3176 QETH_CARD_TEXT(card
, 2, "qsarbw");
3177 card
->reclaim_index
= index
;
3178 schedule_delayed_work(
3179 &card
->buffer_reclaim_work
,
3180 QETH_RECLAIM_WORK_TIME
);
3186 * according to old code it should be avoided to requeue all
3187 * 128 buffers in order to benefit from PCI avoidance.
3188 * this function keeps at least one buffer (the buffer at
3189 * 'index') un-requeued -> this buffer is the first buffer that
3190 * will be requeued the next time
3192 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, 0,
3193 queue
->next_buf_to_init
, count
);
3195 QETH_CARD_TEXT(card
, 2, "qinberr");
3197 queue
->next_buf_to_init
= QDIO_BUFNR(queue
->next_buf_to_init
+
3202 static void qeth_buffer_reclaim_work(struct work_struct
*work
)
3204 struct qeth_card
*card
= container_of(work
, struct qeth_card
,
3205 buffer_reclaim_work
.work
);
3207 QETH_CARD_TEXT_(card
, 2, "brw:%x", card
->reclaim_index
);
3208 qeth_queue_input_buffer(card
, card
->reclaim_index
);
3211 static void qeth_handle_send_error(struct qeth_card
*card
,
3212 struct qeth_qdio_out_buffer
*buffer
, unsigned int qdio_err
)
3214 int sbalf15
= buffer
->buffer
->element
[15].sflags
;
3216 QETH_CARD_TEXT(card
, 6, "hdsnderr");
3217 qeth_check_qdio_errors(card
, buffer
->buffer
, qdio_err
, "qouterr");
3222 if ((sbalf15
>= 15) && (sbalf15
<= 31))
3225 QETH_CARD_TEXT(card
, 1, "lnkfail");
3226 QETH_CARD_TEXT_(card
, 1, "%04x %02x",
3227 (u16
)qdio_err
, (u8
)sbalf15
);
3231 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3232 * @queue: queue to check for packing buffer
3234 * Returns number of buffers that were prepared for flush.
3236 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q
*queue
)
3238 struct qeth_qdio_out_buffer
*buffer
;
3240 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
3241 if ((atomic_read(&buffer
->state
) == QETH_QDIO_BUF_EMPTY
) &&
3242 (buffer
->next_element_to_fill
> 0)) {
3243 /* it's a packing buffer */
3244 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
3245 queue
->next_buf_to_fill
=
3246 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
3253 * Switched to packing state if the number of used buffers on a queue
3254 * reaches a certain limit.
3256 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q
*queue
)
3258 if (!queue
->do_pack
) {
3259 if (atomic_read(&queue
->used_buffers
)
3260 >= QETH_HIGH_WATERMARK_PACK
){
3261 /* switch non-PACKING -> PACKING */
3262 QETH_CARD_TEXT(queue
->card
, 6, "np->pack");
3263 QETH_TXQ_STAT_INC(queue
, packing_mode_switch
);
3270 * Switches from packing to non-packing mode. If there is a packing
3271 * buffer on the queue this buffer will be prepared to be flushed.
3272 * In that case 1 is returned to inform the caller. If no buffer
3273 * has to be flushed, zero is returned.
3275 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q
*queue
)
3277 if (queue
->do_pack
) {
3278 if (atomic_read(&queue
->used_buffers
)
3279 <= QETH_LOW_WATERMARK_PACK
) {
3280 /* switch PACKING -> non-PACKING */
3281 QETH_CARD_TEXT(queue
->card
, 6, "pack->np");
3282 QETH_TXQ_STAT_INC(queue
, packing_mode_switch
);
3284 return qeth_prep_flush_pack_buffer(queue
);
3290 static void qeth_flush_buffers(struct qeth_qdio_out_q
*queue
, int index
,
3293 struct qeth_card
*card
= queue
->card
;
3294 struct qeth_qdio_out_buffer
*buf
;
3297 unsigned int qdio_flags
;
3299 for (i
= index
; i
< index
+ count
; ++i
) {
3300 unsigned int bidx
= QDIO_BUFNR(i
);
3302 buf
= queue
->bufs
[bidx
];
3303 buf
->buffer
->element
[buf
->next_element_to_fill
- 1].eflags
|=
3304 SBAL_EFLAGS_LAST_ENTRY
;
3306 if (queue
->bufstates
)
3307 queue
->bufstates
[bidx
].user
= buf
;
3309 if (IS_IQD(queue
->card
))
3312 if (!queue
->do_pack
) {
3313 if ((atomic_read(&queue
->used_buffers
) >=
3314 (QETH_HIGH_WATERMARK_PACK
-
3315 QETH_WATERMARK_PACK_FUZZ
)) &&
3316 !atomic_read(&queue
->set_pci_flags_count
)) {
3317 /* it's likely that we'll go to packing
3319 atomic_inc(&queue
->set_pci_flags_count
);
3320 buf
->buffer
->element
[0].sflags
|= SBAL_SFLAGS0_PCI_REQ
;
3323 if (!atomic_read(&queue
->set_pci_flags_count
)) {
3325 * there's no outstanding PCI any more, so we
3326 * have to request a PCI to be sure the the PCI
3327 * will wake at some time in the future then we
3328 * can flush packed buffers that might still be
3329 * hanging around, which can happen if no
3330 * further send was requested by the stack
3332 atomic_inc(&queue
->set_pci_flags_count
);
3333 buf
->buffer
->element
[0].sflags
|= SBAL_SFLAGS0_PCI_REQ
;
3338 qdio_flags
= QDIO_FLAG_SYNC_OUTPUT
;
3339 if (atomic_read(&queue
->set_pci_flags_count
))
3340 qdio_flags
|= QDIO_FLAG_PCI_OUT
;
3341 rc
= do_QDIO(CARD_DDEV(queue
->card
), qdio_flags
,
3342 queue
->queue_no
, index
, count
);
3344 /* Fake the TX completion interrupt: */
3346 napi_schedule(&queue
->napi
);
3349 /* ignore temporary SIGA errors without busy condition */
3352 QETH_CARD_TEXT(queue
->card
, 2, "flushbuf");
3353 QETH_CARD_TEXT_(queue
->card
, 2, " q%d", queue
->queue_no
);
3354 QETH_CARD_TEXT_(queue
->card
, 2, " idx%d", index
);
3355 QETH_CARD_TEXT_(queue
->card
, 2, " c%d", count
);
3356 QETH_CARD_TEXT_(queue
->card
, 2, " err%d", rc
);
3358 /* this must not happen under normal circumstances. if it
3359 * happens something is really wrong -> recover */
3360 qeth_schedule_recovery(queue
->card
);
3365 static void qeth_flush_queue(struct qeth_qdio_out_q
*queue
)
3367 qeth_flush_buffers(queue
, queue
->bulk_start
, queue
->bulk_count
);
3369 queue
->bulk_start
= QDIO_BUFNR(queue
->bulk_start
+ queue
->bulk_count
);
3370 queue
->prev_hdr
= NULL
;
3371 queue
->bulk_count
= 0;
3374 static void qeth_check_outbound_queue(struct qeth_qdio_out_q
*queue
)
3378 int q_was_packing
= 0;
3381 * check if weed have to switch to non-packing mode or if
3382 * we have to get a pci flag out on the queue
3384 if ((atomic_read(&queue
->used_buffers
) <= QETH_LOW_WATERMARK_PACK
) ||
3385 !atomic_read(&queue
->set_pci_flags_count
)) {
3386 if (atomic_xchg(&queue
->state
, QETH_OUT_Q_LOCKED_FLUSH
) ==
3387 QETH_OUT_Q_UNLOCKED
) {
3389 * If we get in here, there was no action in
3390 * do_send_packet. So, we check if there is a
3391 * packing buffer to be flushed here.
3393 index
= queue
->next_buf_to_fill
;
3394 q_was_packing
= queue
->do_pack
;
3395 /* queue->do_pack may change */
3397 flush_cnt
+= qeth_switch_to_nonpacking_if_needed(queue
);
3399 !atomic_read(&queue
->set_pci_flags_count
))
3400 flush_cnt
+= qeth_prep_flush_pack_buffer(queue
);
3402 QETH_TXQ_STAT_ADD(queue
, bufs_pack
, flush_cnt
);
3404 qeth_flush_buffers(queue
, index
, flush_cnt
);
3405 atomic_set(&queue
->state
, QETH_OUT_Q_UNLOCKED
);
3410 static void qeth_qdio_start_poll(struct ccw_device
*ccwdev
, int queue
,
3411 unsigned long card_ptr
)
3413 struct qeth_card
*card
= (struct qeth_card
*)card_ptr
;
3415 if (card
->dev
->flags
& IFF_UP
)
3416 napi_schedule(&card
->napi
);
3419 int qeth_configure_cq(struct qeth_card
*card
, enum qeth_cq cq
)
3423 if (card
->options
.cq
== QETH_CQ_NOTAVAILABLE
) {
3427 if (card
->options
.cq
== cq
) {
3432 qeth_free_qdio_queues(card
);
3433 card
->options
.cq
= cq
;
3440 EXPORT_SYMBOL_GPL(qeth_configure_cq
);
3442 static void qeth_qdio_cq_handler(struct qeth_card
*card
, unsigned int qdio_err
,
3443 unsigned int queue
, int first_element
,
3446 struct qeth_qdio_q
*cq
= card
->qdio
.c_q
;
3450 if (!qeth_is_cq(card
, queue
))
3453 QETH_CARD_TEXT_(card
, 5, "qcqhe%d", first_element
);
3454 QETH_CARD_TEXT_(card
, 5, "qcqhc%d", count
);
3455 QETH_CARD_TEXT_(card
, 5, "qcqherr%d", qdio_err
);
3458 netif_tx_stop_all_queues(card
->dev
);
3459 qeth_schedule_recovery(card
);
3463 for (i
= first_element
; i
< first_element
+ count
; ++i
) {
3464 struct qdio_buffer
*buffer
= cq
->qdio_bufs
[QDIO_BUFNR(i
)];
3467 while ((e
< QDIO_MAX_ELEMENTS_PER_BUFFER
) &&
3468 buffer
->element
[e
].addr
) {
3469 unsigned long phys_aob_addr
;
3471 phys_aob_addr
= (unsigned long) buffer
->element
[e
].addr
;
3472 qeth_qdio_handle_aob(card
, phys_aob_addr
);
3475 qeth_scrub_qdio_buffer(buffer
, QDIO_MAX_ELEMENTS_PER_BUFFER
);
3477 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, queue
,
3478 card
->qdio
.c_q
->next_buf_to_init
,
3481 dev_warn(&card
->gdev
->dev
,
3482 "QDIO reported an error, rc=%i\n", rc
);
3483 QETH_CARD_TEXT(card
, 2, "qcqherr");
3486 cq
->next_buf_to_init
= QDIO_BUFNR(cq
->next_buf_to_init
+ count
);
3489 static void qeth_qdio_input_handler(struct ccw_device
*ccwdev
,
3490 unsigned int qdio_err
, int queue
,
3491 int first_elem
, int count
,
3492 unsigned long card_ptr
)
3494 struct qeth_card
*card
= (struct qeth_card
*)card_ptr
;
3496 QETH_CARD_TEXT_(card
, 2, "qihq%d", queue
);
3497 QETH_CARD_TEXT_(card
, 2, "qiec%d", qdio_err
);
3499 if (qeth_is_cq(card
, queue
))
3500 qeth_qdio_cq_handler(card
, qdio_err
, queue
, first_elem
, count
);
3502 qeth_schedule_recovery(card
);
3505 static void qeth_qdio_output_handler(struct ccw_device
*ccwdev
,
3506 unsigned int qdio_error
, int __queue
,
3507 int first_element
, int count
,
3508 unsigned long card_ptr
)
3510 struct qeth_card
*card
= (struct qeth_card
*) card_ptr
;
3511 struct qeth_qdio_out_q
*queue
= card
->qdio
.out_qs
[__queue
];
3512 struct net_device
*dev
= card
->dev
;
3513 struct netdev_queue
*txq
;
3516 QETH_CARD_TEXT(card
, 6, "qdouhdl");
3517 if (qdio_error
& QDIO_ERROR_FATAL
) {
3518 QETH_CARD_TEXT(card
, 2, "achkcond");
3519 netif_tx_stop_all_queues(dev
);
3520 qeth_schedule_recovery(card
);
3524 for (i
= first_element
; i
< (first_element
+ count
); ++i
) {
3525 struct qeth_qdio_out_buffer
*buf
= queue
->bufs
[QDIO_BUFNR(i
)];
3527 qeth_handle_send_error(card
, buf
, qdio_error
);
3528 qeth_clear_output_buffer(queue
, buf
, qdio_error
, 0);
3531 atomic_sub(count
, &queue
->used_buffers
);
3532 qeth_check_outbound_queue(queue
);
3534 txq
= netdev_get_tx_queue(dev
, __queue
);
3535 /* xmit may have observed the full-condition, but not yet stopped the
3536 * txq. In which case the code below won't trigger. So before returning,
3537 * xmit will re-check the txq's fill level and wake it up if needed.
3539 if (netif_tx_queue_stopped(txq
) && !qeth_out_queue_is_full(queue
))
3540 netif_tx_wake_queue(txq
);
3544 * Note: Function assumes that we have 4 outbound queues.
3546 int qeth_get_priority_queue(struct qeth_card
*card
, struct sk_buff
*skb
)
3548 struct vlan_ethhdr
*veth
= vlan_eth_hdr(skb
);
3551 switch (card
->qdio
.do_prio_queueing
) {
3552 case QETH_PRIO_Q_ING_TOS
:
3553 case QETH_PRIO_Q_ING_PREC
:
3554 switch (qeth_get_ip_version(skb
)) {
3556 tos
= ipv4_get_dsfield(ip_hdr(skb
));
3559 tos
= ipv6_get_dsfield(ipv6_hdr(skb
));
3562 return card
->qdio
.default_out_queue
;
3564 if (card
->qdio
.do_prio_queueing
== QETH_PRIO_Q_ING_PREC
)
3565 return ~tos
>> 6 & 3;
3566 if (tos
& IPTOS_MINCOST
)
3568 if (tos
& IPTOS_RELIABILITY
)
3570 if (tos
& IPTOS_THROUGHPUT
)
3572 if (tos
& IPTOS_LOWDELAY
)
3575 case QETH_PRIO_Q_ING_SKB
:
3576 if (skb
->priority
> 5)
3578 return ~skb
->priority
>> 1 & 3;
3579 case QETH_PRIO_Q_ING_VLAN
:
3580 if (veth
->h_vlan_proto
== htons(ETH_P_8021Q
))
3581 return ~ntohs(veth
->h_vlan_TCI
) >>
3582 (VLAN_PRIO_SHIFT
+ 1) & 3;
3587 return card
->qdio
.default_out_queue
;
3589 EXPORT_SYMBOL_GPL(qeth_get_priority_queue
);
3592 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3595 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3596 * fragmented part of the SKB. Returns zero for linear SKB.
3598 static int qeth_get_elements_for_frags(struct sk_buff
*skb
)
3600 int cnt
, elements
= 0;
3602 for (cnt
= 0; cnt
< skb_shinfo(skb
)->nr_frags
; cnt
++) {
3603 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[cnt
];
3605 elements
+= qeth_get_elements_for_range(
3606 (addr_t
)skb_frag_address(frag
),
3607 (addr_t
)skb_frag_address(frag
) + skb_frag_size(frag
));
3613 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3614 * to transmit an skb.
3615 * @skb: the skb to operate on.
3616 * @data_offset: skip this part of the skb's linear data
3618 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3619 * skb's data (both its linear part and paged fragments).
3621 unsigned int qeth_count_elements(struct sk_buff
*skb
, unsigned int data_offset
)
3623 unsigned int elements
= qeth_get_elements_for_frags(skb
);
3624 addr_t end
= (addr_t
)skb
->data
+ skb_headlen(skb
);
3625 addr_t start
= (addr_t
)skb
->data
+ data_offset
;
3628 elements
+= qeth_get_elements_for_range(start
, end
);
3631 EXPORT_SYMBOL_GPL(qeth_count_elements
);
3633 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3637 * qeth_add_hw_header() - add a HW header to an skb.
3638 * @skb: skb that the HW header should be added to.
3639 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3640 * it contains a valid pointer to a qeth_hdr.
3641 * @hdr_len: length of the HW header.
3642 * @proto_len: length of protocol headers that need to be in same page as the
3645 * Returns the pushed length. If the header can't be pushed on
3646 * (eg. because it would cross a page boundary), it is allocated from
3647 * the cache instead and 0 is returned.
3648 * The number of needed buffer elements is returned in @elements.
3649 * Error to create the hdr is indicated by returning with < 0.
3651 static int qeth_add_hw_header(struct qeth_qdio_out_q
*queue
,
3652 struct sk_buff
*skb
, struct qeth_hdr
**hdr
,
3653 unsigned int hdr_len
, unsigned int proto_len
,
3654 unsigned int *elements
)
3656 const unsigned int contiguous
= proto_len
? proto_len
: 1;
3657 const unsigned int max_elements
= queue
->max_elements
;
3658 unsigned int __elements
;
3664 start
= (addr_t
)skb
->data
- hdr_len
;
3665 end
= (addr_t
)skb
->data
;
3667 if (qeth_get_elements_for_range(start
, end
+ contiguous
) == 1) {
3668 /* Push HW header into same page as first protocol header. */
3670 /* ... but TSO always needs a separate element for headers: */
3671 if (skb_is_gso(skb
))
3672 __elements
= 1 + qeth_count_elements(skb
, proto_len
);
3674 __elements
= qeth_count_elements(skb
, 0);
3675 } else if (!proto_len
&& PAGE_ALIGNED(skb
->data
)) {
3676 /* Push HW header into preceding page, flush with skb->data. */
3678 __elements
= 1 + qeth_count_elements(skb
, 0);
3680 /* Use header cache, copy protocol headers up. */
3682 __elements
= 1 + qeth_count_elements(skb
, proto_len
);
3685 /* Compress skb to fit into one IO buffer: */
3686 if (__elements
> max_elements
) {
3687 if (!skb_is_nonlinear(skb
)) {
3688 /* Drop it, no easy way of shrinking it further. */
3689 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3690 max_elements
, __elements
, skb
->len
);
3694 rc
= skb_linearize(skb
);
3696 QETH_TXQ_STAT_INC(queue
, skbs_linearized_fail
);
3700 QETH_TXQ_STAT_INC(queue
, skbs_linearized
);
3701 /* Linearization changed the layout, re-evaluate: */
3705 *elements
= __elements
;
3706 /* Add the header: */
3708 *hdr
= skb_push(skb
, hdr_len
);
3712 if (hdr_len
+ proto_len
> QETH_HDR_CACHE_OBJ_SIZE
)
3714 *hdr
= kmem_cache_alloc(qeth_core_header_cache
, GFP_ATOMIC
);
3717 /* Copy protocol headers behind HW header: */
3718 skb_copy_from_linear_data(skb
, ((char *)*hdr
) + hdr_len
, proto_len
);
3722 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q
*queue
,
3723 struct sk_buff
*curr_skb
,
3724 struct qeth_hdr
*curr_hdr
)
3726 struct qeth_qdio_out_buffer
*buffer
= queue
->bufs
[queue
->bulk_start
];
3727 struct qeth_hdr
*prev_hdr
= queue
->prev_hdr
;
3732 /* All packets must have the same target: */
3733 if (curr_hdr
->hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
) {
3734 struct sk_buff
*prev_skb
= skb_peek(&buffer
->skb_list
);
3736 return ether_addr_equal(eth_hdr(prev_skb
)->h_dest
,
3737 eth_hdr(curr_skb
)->h_dest
) &&
3738 qeth_l2_same_vlan(&prev_hdr
->hdr
.l2
, &curr_hdr
->hdr
.l2
);
3741 return qeth_l3_same_next_hop(&prev_hdr
->hdr
.l3
, &curr_hdr
->hdr
.l3
) &&
3742 qeth_l3_iqd_same_vlan(&prev_hdr
->hdr
.l3
, &curr_hdr
->hdr
.l3
);
3745 static unsigned int __qeth_fill_buffer(struct sk_buff
*skb
,
3746 struct qeth_qdio_out_buffer
*buf
,
3747 bool is_first_elem
, unsigned int offset
)
3749 struct qdio_buffer
*buffer
= buf
->buffer
;
3750 int element
= buf
->next_element_to_fill
;
3751 int length
= skb_headlen(skb
) - offset
;
3752 char *data
= skb
->data
+ offset
;
3753 unsigned int elem_length
, cnt
;
3755 /* map linear part into buffer element(s) */
3756 while (length
> 0) {
3757 elem_length
= min_t(unsigned int, length
,
3758 PAGE_SIZE
- offset_in_page(data
));
3760 buffer
->element
[element
].addr
= data
;
3761 buffer
->element
[element
].length
= elem_length
;
3762 length
-= elem_length
;
3763 if (is_first_elem
) {
3764 is_first_elem
= false;
3765 if (length
|| skb_is_nonlinear(skb
))
3766 /* skb needs additional elements */
3767 buffer
->element
[element
].eflags
=
3768 SBAL_EFLAGS_FIRST_FRAG
;
3770 buffer
->element
[element
].eflags
= 0;
3772 buffer
->element
[element
].eflags
=
3773 SBAL_EFLAGS_MIDDLE_FRAG
;
3776 data
+= elem_length
;
3780 /* map page frags into buffer element(s) */
3781 for (cnt
= 0; cnt
< skb_shinfo(skb
)->nr_frags
; cnt
++) {
3782 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[cnt
];
3784 data
= skb_frag_address(frag
);
3785 length
= skb_frag_size(frag
);
3786 while (length
> 0) {
3787 elem_length
= min_t(unsigned int, length
,
3788 PAGE_SIZE
- offset_in_page(data
));
3790 buffer
->element
[element
].addr
= data
;
3791 buffer
->element
[element
].length
= elem_length
;
3792 buffer
->element
[element
].eflags
=
3793 SBAL_EFLAGS_MIDDLE_FRAG
;
3795 length
-= elem_length
;
3796 data
+= elem_length
;
3801 if (buffer
->element
[element
- 1].eflags
)
3802 buffer
->element
[element
- 1].eflags
= SBAL_EFLAGS_LAST_FRAG
;
3803 buf
->next_element_to_fill
= element
;
3808 * qeth_fill_buffer() - map skb into an output buffer
3809 * @buf: buffer to transport the skb
3810 * @skb: skb to map into the buffer
3811 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
3812 * from qeth_core_header_cache.
3813 * @offset: when mapping the skb, start at skb->data + offset
3814 * @hd_len: if > 0, build a dedicated header element of this size
3816 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer
*buf
,
3817 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
3818 unsigned int offset
, unsigned int hd_len
)
3820 struct qdio_buffer
*buffer
= buf
->buffer
;
3821 bool is_first_elem
= true;
3823 __skb_queue_tail(&buf
->skb_list
, skb
);
3825 /* build dedicated header element */
3827 int element
= buf
->next_element_to_fill
;
3828 is_first_elem
= false;
3830 buffer
->element
[element
].addr
= hdr
;
3831 buffer
->element
[element
].length
= hd_len
;
3832 buffer
->element
[element
].eflags
= SBAL_EFLAGS_FIRST_FRAG
;
3833 /* remember to free cache-allocated qeth_hdr: */
3834 buf
->is_header
[element
] = ((void *)hdr
!= skb
->data
);
3835 buf
->next_element_to_fill
++;
3838 return __qeth_fill_buffer(skb
, buf
, is_first_elem
, offset
);
3841 static int __qeth_xmit(struct qeth_card
*card
, struct qeth_qdio_out_q
*queue
,
3842 struct sk_buff
*skb
, unsigned int elements
,
3843 struct qeth_hdr
*hdr
, unsigned int offset
,
3844 unsigned int hd_len
)
3846 unsigned int bytes
= qdisc_pkt_len(skb
);
3847 struct qeth_qdio_out_buffer
*buffer
;
3848 unsigned int next_element
;
3849 struct netdev_queue
*txq
;
3850 bool stopped
= false;
3853 buffer
= queue
->bufs
[QDIO_BUFNR(queue
->bulk_start
+ queue
->bulk_count
)];
3854 txq
= netdev_get_tx_queue(card
->dev
, skb_get_queue_mapping(skb
));
3856 /* Just a sanity check, the wake/stop logic should ensure that we always
3857 * get a free buffer.
3859 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
3862 flush
= !qeth_iqd_may_bulk(queue
, skb
, hdr
);
3865 (buffer
->next_element_to_fill
+ elements
> queue
->max_elements
)) {
3866 if (buffer
->next_element_to_fill
> 0) {
3867 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
3868 queue
->bulk_count
++;
3871 if (queue
->bulk_count
>= queue
->bulk_max
)
3875 qeth_flush_queue(queue
);
3877 buffer
= queue
->bufs
[QDIO_BUFNR(queue
->bulk_start
+
3878 queue
->bulk_count
)];
3880 /* Sanity-check again: */
3881 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
3885 if (buffer
->next_element_to_fill
== 0 &&
3886 atomic_inc_return(&queue
->used_buffers
) >= QDIO_MAX_BUFFERS_PER_Q
) {
3887 /* If a TX completion happens right _here_ and misses to wake
3888 * the txq, then our re-check below will catch the race.
3890 QETH_TXQ_STAT_INC(queue
, stopped
);
3891 netif_tx_stop_queue(txq
);
3895 next_element
= qeth_fill_buffer(buffer
, skb
, hdr
, offset
, hd_len
);
3896 buffer
->bytes
+= bytes
;
3897 queue
->prev_hdr
= hdr
;
3899 flush
= __netdev_tx_sent_queue(txq
, bytes
,
3900 !stopped
&& netdev_xmit_more());
3902 if (flush
|| next_element
>= queue
->max_elements
) {
3903 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
3904 queue
->bulk_count
++;
3906 if (queue
->bulk_count
>= queue
->bulk_max
)
3910 qeth_flush_queue(queue
);
3913 if (stopped
&& !qeth_out_queue_is_full(queue
))
3914 netif_tx_start_queue(txq
);
3918 int qeth_do_send_packet(struct qeth_card
*card
, struct qeth_qdio_out_q
*queue
,
3919 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
3920 unsigned int offset
, unsigned int hd_len
,
3921 int elements_needed
)
3923 struct qeth_qdio_out_buffer
*buffer
;
3924 unsigned int next_element
;
3925 struct netdev_queue
*txq
;
3926 bool stopped
= false;
3928 int flush_count
= 0;
3933 /* spin until we get the queue ... */
3934 while (atomic_cmpxchg(&queue
->state
, QETH_OUT_Q_UNLOCKED
,
3935 QETH_OUT_Q_LOCKED
) != QETH_OUT_Q_UNLOCKED
);
3936 start_index
= queue
->next_buf_to_fill
;
3937 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
3939 /* Just a sanity check, the wake/stop logic should ensure that we always
3940 * get a free buffer.
3942 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
) {
3943 atomic_set(&queue
->state
, QETH_OUT_Q_UNLOCKED
);
3947 txq
= netdev_get_tx_queue(card
->dev
, skb_get_queue_mapping(skb
));
3949 /* check if we need to switch packing state of this queue */
3950 qeth_switch_to_packing_if_needed(queue
);
3951 if (queue
->do_pack
) {
3953 /* does packet fit in current buffer? */
3954 if (buffer
->next_element_to_fill
+ elements_needed
>
3955 queue
->max_elements
) {
3956 /* ... no -> set state PRIMED */
3957 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
3959 queue
->next_buf_to_fill
=
3960 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
3961 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
3963 /* We stepped forward, so sanity-check again: */
3964 if (atomic_read(&buffer
->state
) !=
3965 QETH_QDIO_BUF_EMPTY
) {
3966 qeth_flush_buffers(queue
, start_index
,
3968 atomic_set(&queue
->state
,
3969 QETH_OUT_Q_UNLOCKED
);
3976 if (buffer
->next_element_to_fill
== 0 &&
3977 atomic_inc_return(&queue
->used_buffers
) >= QDIO_MAX_BUFFERS_PER_Q
) {
3978 /* If a TX completion happens right _here_ and misses to wake
3979 * the txq, then our re-check below will catch the race.
3981 QETH_TXQ_STAT_INC(queue
, stopped
);
3982 netif_tx_stop_queue(txq
);
3986 next_element
= qeth_fill_buffer(buffer
, skb
, hdr
, offset
, hd_len
);
3989 QETH_TXQ_STAT_INC(queue
, skbs_pack
);
3990 if (!queue
->do_pack
|| stopped
|| next_element
>= queue
->max_elements
) {
3992 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
3993 queue
->next_buf_to_fill
=
3994 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
3998 qeth_flush_buffers(queue
, start_index
, flush_count
);
3999 else if (!atomic_read(&queue
->set_pci_flags_count
))
4000 atomic_xchg(&queue
->state
, QETH_OUT_Q_LOCKED_FLUSH
);
4002 * queue->state will go from LOCKED -> UNLOCKED or from
4003 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4004 * (switch packing state or flush buffer to get another pci flag out).
4005 * In that case we will enter this loop
4007 while (atomic_dec_return(&queue
->state
)) {
4008 start_index
= queue
->next_buf_to_fill
;
4009 /* check if we can go back to non-packing state */
4010 tmp
= qeth_switch_to_nonpacking_if_needed(queue
);
4012 * check if we need to flush a packing buffer to get a pci
4013 * flag out on the queue
4015 if (!tmp
&& !atomic_read(&queue
->set_pci_flags_count
))
4016 tmp
= qeth_prep_flush_pack_buffer(queue
);
4018 qeth_flush_buffers(queue
, start_index
, tmp
);
4023 /* at this point the queue is UNLOCKED again */
4025 QETH_TXQ_STAT_ADD(queue
, bufs_pack
, flush_count
);
4027 if (stopped
&& !qeth_out_queue_is_full(queue
))
4028 netif_tx_start_queue(txq
);
4031 EXPORT_SYMBOL_GPL(qeth_do_send_packet
);
4033 static void qeth_fill_tso_ext(struct qeth_hdr_tso
*hdr
,
4034 unsigned int payload_len
, struct sk_buff
*skb
,
4035 unsigned int proto_len
)
4037 struct qeth_hdr_ext_tso
*ext
= &hdr
->ext
;
4039 ext
->hdr_tot_len
= sizeof(*ext
);
4040 ext
->imb_hdr_no
= 1;
4042 ext
->hdr_version
= 1;
4044 ext
->payload_len
= payload_len
;
4045 ext
->mss
= skb_shinfo(skb
)->gso_size
;
4046 ext
->dg_hdr_len
= proto_len
;
4049 int qeth_xmit(struct qeth_card
*card
, struct sk_buff
*skb
,
4050 struct qeth_qdio_out_q
*queue
, int ipv
,
4051 void (*fill_header
)(struct qeth_qdio_out_q
*queue
,
4052 struct qeth_hdr
*hdr
, struct sk_buff
*skb
,
4053 int ipv
, unsigned int data_len
))
4055 unsigned int proto_len
, hw_hdr_len
;
4056 unsigned int frame_len
= skb
->len
;
4057 bool is_tso
= skb_is_gso(skb
);
4058 unsigned int data_offset
= 0;
4059 struct qeth_hdr
*hdr
= NULL
;
4060 unsigned int hd_len
= 0;
4061 unsigned int elements
;
4065 hw_hdr_len
= sizeof(struct qeth_hdr_tso
);
4066 proto_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
4068 hw_hdr_len
= sizeof(struct qeth_hdr
);
4069 proto_len
= (IS_IQD(card
) && IS_LAYER2(card
)) ? ETH_HLEN
: 0;
4072 rc
= skb_cow_head(skb
, hw_hdr_len
);
4076 push_len
= qeth_add_hw_header(queue
, skb
, &hdr
, hw_hdr_len
, proto_len
,
4080 if (is_tso
|| !push_len
) {
4081 /* HW header needs its own buffer element. */
4082 hd_len
= hw_hdr_len
+ proto_len
;
4083 data_offset
= push_len
+ proto_len
;
4085 memset(hdr
, 0, hw_hdr_len
);
4086 fill_header(queue
, hdr
, skb
, ipv
, frame_len
);
4088 qeth_fill_tso_ext((struct qeth_hdr_tso
*) hdr
,
4089 frame_len
- proto_len
, skb
, proto_len
);
4092 rc
= __qeth_xmit(card
, queue
, skb
, elements
, hdr
, data_offset
,
4095 /* TODO: drop skb_orphan() once TX completion is fast enough */
4097 rc
= qeth_do_send_packet(card
, queue
, skb
, hdr
, data_offset
,
4101 if (rc
&& !push_len
)
4102 kmem_cache_free(qeth_core_header_cache
, hdr
);
4106 EXPORT_SYMBOL_GPL(qeth_xmit
);
4108 static int qeth_setadp_promisc_mode_cb(struct qeth_card
*card
,
4109 struct qeth_reply
*reply
, unsigned long data
)
4111 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4112 struct qeth_ipacmd_setadpparms
*setparms
;
4114 QETH_CARD_TEXT(card
, 4, "prmadpcb");
4116 setparms
= &(cmd
->data
.setadapterparms
);
4117 if (qeth_setadpparms_inspect_rc(cmd
)) {
4118 QETH_CARD_TEXT_(card
, 4, "prmrc%x", cmd
->hdr
.return_code
);
4119 setparms
->data
.mode
= SET_PROMISC_MODE_OFF
;
4121 card
->info
.promisc_mode
= setparms
->data
.mode
;
4122 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
4125 void qeth_setadp_promisc_mode(struct qeth_card
*card
, bool enable
)
4127 enum qeth_ipa_promisc_modes mode
= enable
? SET_PROMISC_MODE_ON
:
4128 SET_PROMISC_MODE_OFF
;
4129 struct qeth_cmd_buffer
*iob
;
4130 struct qeth_ipa_cmd
*cmd
;
4132 QETH_CARD_TEXT(card
, 4, "setprom");
4133 QETH_CARD_TEXT_(card
, 4, "mode:%x", mode
);
4135 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_PROMISC_MODE
,
4136 SETADP_DATA_SIZEOF(mode
));
4139 cmd
= __ipa_cmd(iob
);
4140 cmd
->data
.setadapterparms
.data
.mode
= mode
;
4141 qeth_send_ipa_cmd(card
, iob
, qeth_setadp_promisc_mode_cb
, NULL
);
4143 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode
);
4145 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card
*card
,
4146 struct qeth_reply
*reply
, unsigned long data
)
4148 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4149 struct qeth_ipacmd_setadpparms
*adp_cmd
;
4151 QETH_CARD_TEXT(card
, 4, "chgmaccb");
4152 if (qeth_setadpparms_inspect_rc(cmd
))
4155 adp_cmd
= &cmd
->data
.setadapterparms
;
4156 if (!is_valid_ether_addr(adp_cmd
->data
.change_addr
.addr
))
4157 return -EADDRNOTAVAIL
;
4159 if (IS_LAYER2(card
) && IS_OSD(card
) && !IS_VM_NIC(card
) &&
4160 !(adp_cmd
->hdr
.flags
& QETH_SETADP_FLAGS_VIRTUAL_MAC
))
4161 return -EADDRNOTAVAIL
;
4163 ether_addr_copy(card
->dev
->dev_addr
, adp_cmd
->data
.change_addr
.addr
);
4167 int qeth_setadpparms_change_macaddr(struct qeth_card
*card
)
4170 struct qeth_cmd_buffer
*iob
;
4171 struct qeth_ipa_cmd
*cmd
;
4173 QETH_CARD_TEXT(card
, 4, "chgmac");
4175 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_ALTER_MAC_ADDRESS
,
4176 SETADP_DATA_SIZEOF(change_addr
));
4179 cmd
= __ipa_cmd(iob
);
4180 cmd
->data
.setadapterparms
.data
.change_addr
.cmd
= CHANGE_ADDR_READ_MAC
;
4181 cmd
->data
.setadapterparms
.data
.change_addr
.addr_size
= ETH_ALEN
;
4182 ether_addr_copy(cmd
->data
.setadapterparms
.data
.change_addr
.addr
,
4183 card
->dev
->dev_addr
);
4184 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_change_macaddr_cb
,
4188 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr
);
4190 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card
*card
,
4191 struct qeth_reply
*reply
, unsigned long data
)
4193 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4194 struct qeth_set_access_ctrl
*access_ctrl_req
;
4195 int fallback
= *(int *)reply
->param
;
4197 QETH_CARD_TEXT(card
, 4, "setaccb");
4198 if (cmd
->hdr
.return_code
)
4200 qeth_setadpparms_inspect_rc(cmd
);
4202 access_ctrl_req
= &cmd
->data
.setadapterparms
.data
.set_access_ctrl
;
4203 QETH_CARD_TEXT_(card
, 2, "rc=%d",
4204 cmd
->data
.setadapterparms
.hdr
.return_code
);
4205 if (cmd
->data
.setadapterparms
.hdr
.return_code
!=
4206 SET_ACCESS_CTRL_RC_SUCCESS
)
4207 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4208 access_ctrl_req
->subcmd_code
, CARD_DEVID(card
),
4209 cmd
->data
.setadapterparms
.hdr
.return_code
);
4210 switch (cmd
->data
.setadapterparms
.hdr
.return_code
) {
4211 case SET_ACCESS_CTRL_RC_SUCCESS
:
4212 if (card
->options
.isolation
== ISOLATION_MODE_NONE
) {
4213 dev_info(&card
->gdev
->dev
,
4214 "QDIO data connection isolation is deactivated\n");
4216 dev_info(&card
->gdev
->dev
,
4217 "QDIO data connection isolation is activated\n");
4220 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED
:
4221 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4224 card
->options
.isolation
= card
->options
.prev_isolation
;
4226 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED
:
4227 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4230 card
->options
.isolation
= card
->options
.prev_isolation
;
4232 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED
:
4233 dev_err(&card
->gdev
->dev
, "Adapter does not "
4234 "support QDIO data connection isolation\n");
4236 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER
:
4237 dev_err(&card
->gdev
->dev
,
4238 "Adapter is dedicated. "
4239 "QDIO data connection isolation not supported\n");
4241 card
->options
.isolation
= card
->options
.prev_isolation
;
4243 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF
:
4244 dev_err(&card
->gdev
->dev
,
4245 "TSO does not permit QDIO data connection isolation\n");
4247 card
->options
.isolation
= card
->options
.prev_isolation
;
4249 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED
:
4250 dev_err(&card
->gdev
->dev
, "The adjacent switch port does not "
4251 "support reflective relay mode\n");
4253 card
->options
.isolation
= card
->options
.prev_isolation
;
4255 case SET_ACCESS_CTRL_RC_REFLREL_FAILED
:
4256 dev_err(&card
->gdev
->dev
, "The reflective relay mode cannot be "
4257 "enabled at the adjacent switch port");
4259 card
->options
.isolation
= card
->options
.prev_isolation
;
4261 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED
:
4262 dev_warn(&card
->gdev
->dev
, "Turning off reflective relay mode "
4263 "at the adjacent switch failed\n");
4266 /* this should never happen */
4268 card
->options
.isolation
= card
->options
.prev_isolation
;
4271 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
4274 static int qeth_setadpparms_set_access_ctrl(struct qeth_card
*card
,
4275 enum qeth_ipa_isolation_modes isolation
, int fallback
)
4278 struct qeth_cmd_buffer
*iob
;
4279 struct qeth_ipa_cmd
*cmd
;
4280 struct qeth_set_access_ctrl
*access_ctrl_req
;
4282 QETH_CARD_TEXT(card
, 4, "setacctl");
4284 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_ACCESS_CONTROL
,
4285 SETADP_DATA_SIZEOF(set_access_ctrl
));
4288 cmd
= __ipa_cmd(iob
);
4289 access_ctrl_req
= &cmd
->data
.setadapterparms
.data
.set_access_ctrl
;
4290 access_ctrl_req
->subcmd_code
= isolation
;
4292 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_set_access_ctrl_cb
,
4294 QETH_CARD_TEXT_(card
, 2, "rc=%d", rc
);
4298 int qeth_set_access_ctrl_online(struct qeth_card
*card
, int fallback
)
4302 QETH_CARD_TEXT(card
, 4, "setactlo");
4304 if ((IS_OSD(card
) || IS_OSX(card
)) &&
4305 qeth_adp_supported(card
, IPA_SETADP_SET_ACCESS_CONTROL
)) {
4306 rc
= qeth_setadpparms_set_access_ctrl(card
,
4307 card
->options
.isolation
, fallback
);
4309 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4310 rc
, CARD_DEVID(card
));
4313 } else if (card
->options
.isolation
!= ISOLATION_MODE_NONE
) {
4314 card
->options
.isolation
= ISOLATION_MODE_NONE
;
4316 dev_err(&card
->gdev
->dev
, "Adapter does not "
4317 "support QDIO data connection isolation\n");
4323 void qeth_tx_timeout(struct net_device
*dev
)
4325 struct qeth_card
*card
;
4327 card
= dev
->ml_priv
;
4328 QETH_CARD_TEXT(card
, 4, "txtimeo");
4329 qeth_schedule_recovery(card
);
4331 EXPORT_SYMBOL_GPL(qeth_tx_timeout
);
4333 static int qeth_mdio_read(struct net_device
*dev
, int phy_id
, int regnum
)
4335 struct qeth_card
*card
= dev
->ml_priv
;
4339 case MII_BMCR
: /* Basic mode control register */
4341 if ((card
->info
.link_type
!= QETH_LINK_TYPE_GBIT_ETH
) &&
4342 (card
->info
.link_type
!= QETH_LINK_TYPE_OSN
) &&
4343 (card
->info
.link_type
!= QETH_LINK_TYPE_10GBIT_ETH
) &&
4344 (card
->info
.link_type
!= QETH_LINK_TYPE_25GBIT_ETH
))
4345 rc
|= BMCR_SPEED100
;
4347 case MII_BMSR
: /* Basic mode status register */
4348 rc
= BMSR_ERCAP
| BMSR_ANEGCOMPLETE
| BMSR_LSTATUS
|
4349 BMSR_10HALF
| BMSR_10FULL
| BMSR_100HALF
| BMSR_100FULL
|
4352 case MII_PHYSID1
: /* PHYS ID 1 */
4353 rc
= (dev
->dev_addr
[0] << 16) | (dev
->dev_addr
[1] << 8) |
4355 rc
= (rc
>> 5) & 0xFFFF;
4357 case MII_PHYSID2
: /* PHYS ID 2 */
4358 rc
= (dev
->dev_addr
[2] << 10) & 0xFFFF;
4360 case MII_ADVERTISE
: /* Advertisement control reg */
4363 case MII_LPA
: /* Link partner ability reg */
4364 rc
= LPA_10HALF
| LPA_10FULL
| LPA_100HALF
| LPA_100FULL
|
4365 LPA_100BASE4
| LPA_LPACK
;
4367 case MII_EXPANSION
: /* Expansion register */
4369 case MII_DCOUNTER
: /* disconnect counter */
4371 case MII_FCSCOUNTER
: /* false carrier counter */
4373 case MII_NWAYTEST
: /* N-way auto-neg test register */
4375 case MII_RERRCOUNTER
: /* rx error counter */
4376 rc
= card
->stats
.rx_length_errors
+
4377 card
->stats
.rx_frame_errors
+
4378 card
->stats
.rx_fifo_errors
;
4380 case MII_SREVISION
: /* silicon revision */
4382 case MII_RESV1
: /* reserved 1 */
4384 case MII_LBRERROR
: /* loopback, rx, bypass error */
4386 case MII_PHYADDR
: /* physical address */
4388 case MII_RESV2
: /* reserved 2 */
4390 case MII_TPISTATUS
: /* TPI status for 10mbps */
4392 case MII_NCONFIG
: /* network interface config */
4400 static int qeth_snmp_command_cb(struct qeth_card
*card
,
4401 struct qeth_reply
*reply
, unsigned long data
)
4403 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4404 struct qeth_arp_query_info
*qinfo
= reply
->param
;
4405 struct qeth_ipacmd_setadpparms
*adp_cmd
;
4406 unsigned int data_len
;
4409 QETH_CARD_TEXT(card
, 3, "snpcmdcb");
4411 if (cmd
->hdr
.return_code
) {
4412 QETH_CARD_TEXT_(card
, 4, "scer1%x", cmd
->hdr
.return_code
);
4415 if (cmd
->data
.setadapterparms
.hdr
.return_code
) {
4416 cmd
->hdr
.return_code
=
4417 cmd
->data
.setadapterparms
.hdr
.return_code
;
4418 QETH_CARD_TEXT_(card
, 4, "scer2%x", cmd
->hdr
.return_code
);
4422 adp_cmd
= &cmd
->data
.setadapterparms
;
4423 data_len
= adp_cmd
->hdr
.cmdlength
- sizeof(adp_cmd
->hdr
);
4424 if (adp_cmd
->hdr
.seq_no
== 1) {
4425 snmp_data
= &adp_cmd
->data
.snmp
;
4427 snmp_data
= &adp_cmd
->data
.snmp
.request
;
4428 data_len
-= offsetof(struct qeth_snmp_cmd
, request
);
4431 /* check if there is enough room in userspace */
4432 if ((qinfo
->udata_len
- qinfo
->udata_offset
) < data_len
) {
4433 QETH_CARD_TEXT_(card
, 4, "scer3%i", -ENOSPC
);
4436 QETH_CARD_TEXT_(card
, 4, "snore%i",
4437 cmd
->data
.setadapterparms
.hdr
.used_total
);
4438 QETH_CARD_TEXT_(card
, 4, "sseqn%i",
4439 cmd
->data
.setadapterparms
.hdr
.seq_no
);
4440 /*copy entries to user buffer*/
4441 memcpy(qinfo
->udata
+ qinfo
->udata_offset
, snmp_data
, data_len
);
4442 qinfo
->udata_offset
+= data_len
;
4444 if (cmd
->data
.setadapterparms
.hdr
.seq_no
<
4445 cmd
->data
.setadapterparms
.hdr
.used_total
)
4450 static int qeth_snmp_command(struct qeth_card
*card
, char __user
*udata
)
4452 struct qeth_snmp_ureq __user
*ureq
;
4453 struct qeth_cmd_buffer
*iob
;
4454 unsigned int req_len
;
4455 struct qeth_arp_query_info qinfo
= {0, };
4458 QETH_CARD_TEXT(card
, 3, "snmpcmd");
4460 if (IS_VM_NIC(card
))
4463 if ((!qeth_adp_supported(card
, IPA_SETADP_SET_SNMP_CONTROL
)) &&
4467 ureq
= (struct qeth_snmp_ureq __user
*) udata
;
4468 if (get_user(qinfo
.udata_len
, &ureq
->hdr
.data_len
) ||
4469 get_user(req_len
, &ureq
->hdr
.req_len
))
4472 /* Sanitize user input, to avoid overflows in iob size calculation: */
4473 if (req_len
> QETH_BUFSIZE
)
4476 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_SNMP_CONTROL
, req_len
);
4480 if (copy_from_user(&__ipa_cmd(iob
)->data
.setadapterparms
.data
.snmp
,
4481 &ureq
->cmd
, req_len
)) {
4486 qinfo
.udata
= kzalloc(qinfo
.udata_len
, GFP_KERNEL
);
4491 qinfo
.udata_offset
= sizeof(struct qeth_snmp_ureq_hdr
);
4493 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_snmp_command_cb
, &qinfo
);
4495 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4496 CARD_DEVID(card
), rc
);
4498 if (copy_to_user(udata
, qinfo
.udata
, qinfo
.udata_len
))
4506 static int qeth_setadpparms_query_oat_cb(struct qeth_card
*card
,
4507 struct qeth_reply
*reply
, unsigned long data
)
4509 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*)data
;
4510 struct qeth_qoat_priv
*priv
;
4514 QETH_CARD_TEXT(card
, 3, "qoatcb");
4515 if (qeth_setadpparms_inspect_rc(cmd
))
4518 priv
= (struct qeth_qoat_priv
*)reply
->param
;
4519 resdatalen
= cmd
->data
.setadapterparms
.hdr
.cmdlength
;
4520 resdata
= (char *)data
+ 28;
4522 if (resdatalen
> (priv
->buffer_len
- priv
->response_len
))
4525 memcpy((priv
->buffer
+ priv
->response_len
), resdata
,
4527 priv
->response_len
+= resdatalen
;
4529 if (cmd
->data
.setadapterparms
.hdr
.seq_no
<
4530 cmd
->data
.setadapterparms
.hdr
.used_total
)
4535 static int qeth_query_oat_command(struct qeth_card
*card
, char __user
*udata
)
4538 struct qeth_cmd_buffer
*iob
;
4539 struct qeth_ipa_cmd
*cmd
;
4540 struct qeth_query_oat
*oat_req
;
4541 struct qeth_query_oat_data oat_data
;
4542 struct qeth_qoat_priv priv
;
4545 QETH_CARD_TEXT(card
, 3, "qoatcmd");
4547 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_OAT
)) {
4552 if (copy_from_user(&oat_data
, udata
,
4553 sizeof(struct qeth_query_oat_data
))) {
4558 priv
.buffer_len
= oat_data
.buffer_len
;
4559 priv
.response_len
= 0;
4560 priv
.buffer
= vzalloc(oat_data
.buffer_len
);
4566 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_OAT
,
4567 SETADP_DATA_SIZEOF(query_oat
));
4572 cmd
= __ipa_cmd(iob
);
4573 oat_req
= &cmd
->data
.setadapterparms
.data
.query_oat
;
4574 oat_req
->subcmd_code
= oat_data
.command
;
4576 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_query_oat_cb
,
4579 if (is_compat_task())
4580 tmp
= compat_ptr(oat_data
.ptr
);
4582 tmp
= (void __user
*)(unsigned long)oat_data
.ptr
;
4584 if (copy_to_user(tmp
, priv
.buffer
,
4585 priv
.response_len
)) {
4590 oat_data
.response_len
= priv
.response_len
;
4592 if (copy_to_user(udata
, &oat_data
,
4593 sizeof(struct qeth_query_oat_data
)))
4603 static int qeth_query_card_info_cb(struct qeth_card
*card
,
4604 struct qeth_reply
*reply
, unsigned long data
)
4606 struct carrier_info
*carrier_info
= (struct carrier_info
*)reply
->param
;
4607 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*)data
;
4608 struct qeth_query_card_info
*card_info
;
4610 QETH_CARD_TEXT(card
, 2, "qcrdincb");
4611 if (qeth_setadpparms_inspect_rc(cmd
))
4614 card_info
= &cmd
->data
.setadapterparms
.data
.card_info
;
4615 carrier_info
->card_type
= card_info
->card_type
;
4616 carrier_info
->port_mode
= card_info
->port_mode
;
4617 carrier_info
->port_speed
= card_info
->port_speed
;
4621 int qeth_query_card_info(struct qeth_card
*card
,
4622 struct carrier_info
*carrier_info
)
4624 struct qeth_cmd_buffer
*iob
;
4626 QETH_CARD_TEXT(card
, 2, "qcrdinfo");
4627 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_CARD_INFO
))
4629 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_CARD_INFO
, 0);
4632 return qeth_send_ipa_cmd(card
, iob
, qeth_query_card_info_cb
,
4633 (void *)carrier_info
);
4637 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4638 * @card: pointer to a qeth_card
4641 * 0, if a MAC address has been set for the card's netdevice
4642 * a return code, for various error conditions
4644 int qeth_vm_request_mac(struct qeth_card
*card
)
4646 struct diag26c_mac_resp
*response
;
4647 struct diag26c_mac_req
*request
;
4648 struct ccw_dev_id id
;
4651 QETH_CARD_TEXT(card
, 2, "vmreqmac");
4653 request
= kzalloc(sizeof(*request
), GFP_KERNEL
| GFP_DMA
);
4654 response
= kzalloc(sizeof(*response
), GFP_KERNEL
| GFP_DMA
);
4655 if (!request
|| !response
) {
4660 ccw_device_get_id(CARD_DDEV(card
), &id
);
4661 request
->resp_buf_len
= sizeof(*response
);
4662 request
->resp_version
= DIAG26C_VERSION2
;
4663 request
->op_code
= DIAG26C_GET_MAC
;
4664 request
->devno
= id
.devno
;
4666 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
4667 rc
= diag26c(request
, response
, DIAG26C_MAC_SERVICES
);
4668 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
4671 QETH_DBF_HEX(CTRL
, 2, response
, sizeof(*response
));
4673 if (request
->resp_buf_len
< sizeof(*response
) ||
4674 response
->version
!= request
->resp_version
) {
4676 QETH_CARD_TEXT(card
, 2, "badresp");
4677 QETH_CARD_HEX(card
, 2, &request
->resp_buf_len
,
4678 sizeof(request
->resp_buf_len
));
4679 } else if (!is_valid_ether_addr(response
->mac
)) {
4681 QETH_CARD_TEXT(card
, 2, "badmac");
4682 QETH_CARD_HEX(card
, 2, response
->mac
, ETH_ALEN
);
4684 ether_addr_copy(card
->dev
->dev_addr
, response
->mac
);
4692 EXPORT_SYMBOL_GPL(qeth_vm_request_mac
);
4694 static void qeth_determine_capabilities(struct qeth_card
*card
)
4696 struct qeth_channel
*channel
= &card
->data
;
4697 struct ccw_device
*ddev
= channel
->ccwdev
;
4699 int ddev_offline
= 0;
4701 QETH_CARD_TEXT(card
, 2, "detcapab");
4702 if (!ddev
->online
) {
4704 rc
= ccw_device_set_online(ddev
);
4706 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
4711 rc
= qeth_read_conf_data(card
);
4713 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4714 CARD_DEVID(card
), rc
);
4715 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
4719 rc
= qdio_get_ssqd_desc(ddev
, &card
->ssqd
);
4721 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
4723 QETH_CARD_TEXT_(card
, 2, "qfmt%d", card
->ssqd
.qfmt
);
4724 QETH_CARD_TEXT_(card
, 2, "ac1:%02x", card
->ssqd
.qdioac1
);
4725 QETH_CARD_TEXT_(card
, 2, "ac2:%04x", card
->ssqd
.qdioac2
);
4726 QETH_CARD_TEXT_(card
, 2, "ac3:%04x", card
->ssqd
.qdioac3
);
4727 QETH_CARD_TEXT_(card
, 2, "icnt%d", card
->ssqd
.icnt
);
4728 if (!((card
->ssqd
.qfmt
!= QDIO_IQDIO_QFMT
) ||
4729 ((card
->ssqd
.qdioac1
& CHSC_AC1_INITIATE_INPUTQ
) == 0) ||
4730 ((card
->ssqd
.qdioac3
& CHSC_AC3_FORMAT2_CQ_AVAILABLE
) == 0))) {
4731 dev_info(&card
->gdev
->dev
,
4732 "Completion Queueing supported\n");
4734 card
->options
.cq
= QETH_CQ_NOTAVAILABLE
;
4739 if (ddev_offline
== 1)
4740 qeth_stop_channel(channel
);
4745 static void qeth_qdio_establish_cq(struct qeth_card
*card
,
4746 struct qdio_buffer
**in_sbal_ptrs
,
4747 void (**queue_start_poll
)
4748 (struct ccw_device
*, int,
4753 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
4754 int offset
= QDIO_MAX_BUFFERS_PER_Q
*
4755 (card
->qdio
.no_in_queues
- 1);
4757 for (i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; i
++)
4758 in_sbal_ptrs
[offset
+ i
] =
4759 card
->qdio
.c_q
->bufs
[i
].buffer
;
4761 queue_start_poll
[card
->qdio
.no_in_queues
- 1] = NULL
;
4765 static int qeth_qdio_establish(struct qeth_card
*card
)
4767 struct qdio_initialize init_data
;
4768 char *qib_param_field
;
4769 struct qdio_buffer
**in_sbal_ptrs
;
4770 void (**queue_start_poll
) (struct ccw_device
*, int, unsigned long);
4771 struct qdio_buffer
**out_sbal_ptrs
;
4775 QETH_CARD_TEXT(card
, 2, "qdioest");
4777 qib_param_field
= kzalloc(sizeof_field(struct qib
, parm
), GFP_KERNEL
);
4778 if (!qib_param_field
) {
4780 goto out_free_nothing
;
4783 qeth_create_qib_param_field(card
, qib_param_field
);
4784 qeth_create_qib_param_field_blkt(card
, qib_param_field
);
4786 in_sbal_ptrs
= kcalloc(card
->qdio
.no_in_queues
* QDIO_MAX_BUFFERS_PER_Q
,
4789 if (!in_sbal_ptrs
) {
4791 goto out_free_qib_param
;
4794 for (i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; i
++)
4795 in_sbal_ptrs
[i
] = card
->qdio
.in_q
->bufs
[i
].buffer
;
4797 queue_start_poll
= kcalloc(card
->qdio
.no_in_queues
, sizeof(void *),
4799 if (!queue_start_poll
) {
4801 goto out_free_in_sbals
;
4803 for (i
= 0; i
< card
->qdio
.no_in_queues
; ++i
)
4804 queue_start_poll
[i
] = qeth_qdio_start_poll
;
4806 qeth_qdio_establish_cq(card
, in_sbal_ptrs
, queue_start_poll
);
4809 kcalloc(card
->qdio
.no_out_queues
* QDIO_MAX_BUFFERS_PER_Q
,
4812 if (!out_sbal_ptrs
) {
4814 goto out_free_queue_start_poll
;
4817 for (i
= 0, k
= 0; i
< card
->qdio
.no_out_queues
; ++i
)
4818 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; j
++, k
++)
4820 card
->qdio
.out_qs
[i
]->bufs
[j
]->buffer
;
4822 memset(&init_data
, 0, sizeof(struct qdio_initialize
));
4823 init_data
.cdev
= CARD_DDEV(card
);
4824 init_data
.q_format
= IS_IQD(card
) ? QDIO_IQDIO_QFMT
:
4826 init_data
.qib_param_field_format
= 0;
4827 init_data
.qib_param_field
= qib_param_field
;
4828 init_data
.no_input_qs
= card
->qdio
.no_in_queues
;
4829 init_data
.no_output_qs
= card
->qdio
.no_out_queues
;
4830 init_data
.input_handler
= qeth_qdio_input_handler
;
4831 init_data
.output_handler
= qeth_qdio_output_handler
;
4832 init_data
.queue_start_poll_array
= queue_start_poll
;
4833 init_data
.int_parm
= (unsigned long) card
;
4834 init_data
.input_sbal_addr_array
= in_sbal_ptrs
;
4835 init_data
.output_sbal_addr_array
= out_sbal_ptrs
;
4836 init_data
.output_sbal_state_array
= card
->qdio
.out_bufstates
;
4837 init_data
.scan_threshold
= IS_IQD(card
) ? 0 : 32;
4839 if (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
,
4840 QETH_QDIO_ESTABLISHED
) == QETH_QDIO_ALLOCATED
) {
4841 rc
= qdio_allocate(&init_data
);
4843 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
4846 rc
= qdio_establish(&init_data
);
4848 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
4849 qdio_free(CARD_DDEV(card
));
4853 switch (card
->options
.cq
) {
4854 case QETH_CQ_ENABLED
:
4855 dev_info(&card
->gdev
->dev
, "Completion Queue support enabled");
4857 case QETH_CQ_DISABLED
:
4858 dev_info(&card
->gdev
->dev
, "Completion Queue support disabled");
4864 kfree(out_sbal_ptrs
);
4865 out_free_queue_start_poll
:
4866 kfree(queue_start_poll
);
4868 kfree(in_sbal_ptrs
);
4870 kfree(qib_param_field
);
4875 static void qeth_core_free_card(struct qeth_card
*card
)
4877 QETH_CARD_TEXT(card
, 2, "freecrd");
4878 qeth_clean_channel(&card
->read
);
4879 qeth_clean_channel(&card
->write
);
4880 qeth_clean_channel(&card
->data
);
4881 qeth_put_cmd(card
->read_cmd
);
4882 destroy_workqueue(card
->event_wq
);
4883 unregister_service_level(&card
->qeth_service_level
);
4884 dev_set_drvdata(&card
->gdev
->dev
, NULL
);
4888 void qeth_trace_features(struct qeth_card
*card
)
4890 QETH_CARD_TEXT(card
, 2, "features");
4891 QETH_CARD_HEX(card
, 2, &card
->options
.ipa4
, sizeof(card
->options
.ipa4
));
4892 QETH_CARD_HEX(card
, 2, &card
->options
.ipa6
, sizeof(card
->options
.ipa6
));
4893 QETH_CARD_HEX(card
, 2, &card
->options
.adp
, sizeof(card
->options
.adp
));
4894 QETH_CARD_HEX(card
, 2, &card
->info
.diagass_support
,
4895 sizeof(card
->info
.diagass_support
));
4897 EXPORT_SYMBOL_GPL(qeth_trace_features
);
4899 static struct ccw_device_id qeth_ids
[] = {
4900 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4901 .driver_info
= QETH_CARD_TYPE_OSD
},
4902 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
4903 .driver_info
= QETH_CARD_TYPE_IQD
},
4904 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
4905 .driver_info
= QETH_CARD_TYPE_OSN
},
4906 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
4907 .driver_info
= QETH_CARD_TYPE_OSM
},
4908 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
4909 .driver_info
= QETH_CARD_TYPE_OSX
},
4912 MODULE_DEVICE_TABLE(ccw
, qeth_ids
);
4914 static struct ccw_driver qeth_ccw_driver
= {
4916 .owner
= THIS_MODULE
,
4920 .probe
= ccwgroup_probe_ccwdev
,
4921 .remove
= ccwgroup_remove_ccwdev
,
4924 int qeth_core_hardsetup_card(struct qeth_card
*card
, bool *carrier_ok
)
4929 QETH_CARD_TEXT(card
, 2, "hrdsetup");
4930 atomic_set(&card
->force_alloc_skb
, 0);
4931 rc
= qeth_update_from_chp_desc(card
);
4936 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
4938 rc
= qeth_qdio_clear_card(card
, !IS_IQD(card
));
4939 qeth_stop_channel(&card
->data
);
4940 qeth_stop_channel(&card
->write
);
4941 qeth_stop_channel(&card
->read
);
4942 qdio_free(CARD_DDEV(card
));
4943 rc
= ccw_device_set_online(CARD_RDEV(card
));
4946 rc
= ccw_device_set_online(CARD_WDEV(card
));
4949 rc
= ccw_device_set_online(CARD_DDEV(card
));
4953 if (rc
== -ERESTARTSYS
) {
4954 QETH_CARD_TEXT(card
, 2, "break1");
4957 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
4963 qeth_determine_capabilities(card
);
4964 qeth_init_tokens(card
);
4965 qeth_init_func_level(card
);
4967 rc
= qeth_idx_activate_read_channel(card
);
4969 QETH_CARD_TEXT(card
, 2, "break2");
4972 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
4979 rc
= qeth_idx_activate_write_channel(card
);
4981 QETH_CARD_TEXT(card
, 2, "break3");
4984 QETH_CARD_TEXT_(card
, 2, "4err%d", rc
);
4990 card
->read_or_write_problem
= 0;
4991 rc
= qeth_mpc_initialize(card
);
4993 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
4997 rc
= qeth_send_startlan(card
);
4999 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
5000 if (rc
== -ENETDOWN
) {
5001 dev_warn(&card
->gdev
->dev
, "The LAN is offline\n");
5002 *carrier_ok
= false;
5010 card
->options
.ipa4
.supported_funcs
= 0;
5011 card
->options
.ipa6
.supported_funcs
= 0;
5012 card
->options
.adp
.supported_funcs
= 0;
5013 card
->options
.sbp
.supported_funcs
= 0;
5014 card
->info
.diagass_support
= 0;
5015 rc
= qeth_query_ipassists(card
, QETH_PROT_IPV4
);
5018 if (qeth_is_supported(card
, IPA_IPV6
)) {
5019 rc
= qeth_query_ipassists(card
, QETH_PROT_IPV6
);
5023 if (qeth_is_supported(card
, IPA_SETADAPTERPARMS
)) {
5024 rc
= qeth_query_setadapterparms(card
);
5026 QETH_CARD_TEXT_(card
, 2, "7err%d", rc
);
5030 if (qeth_adp_supported(card
, IPA_SETADP_SET_DIAG_ASSIST
)) {
5031 rc
= qeth_query_setdiagass(card
);
5033 QETH_CARD_TEXT_(card
, 2, "8err%d", rc
);
5036 if (!qeth_is_diagass_supported(card
, QETH_DIAGS_CMD_TRAP
) ||
5037 (card
->info
.hwtrap
&& qeth_hw_trap(card
, QETH_DIAGS_TRAP_ARM
)))
5038 card
->info
.hwtrap
= 0;
5040 rc
= qeth_set_access_ctrl_online(card
, 0);
5046 dev_warn(&card
->gdev
->dev
, "The qeth device driver failed to recover "
5047 "an error on the device\n");
5048 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5049 CARD_DEVID(card
), rc
);
5052 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card
);
5054 static void qeth_create_skb_frag(struct sk_buff
*skb
, char *data
, int data_len
)
5056 struct page
*page
= virt_to_page(data
);
5057 unsigned int next_frag
;
5059 next_frag
= skb_shinfo(skb
)->nr_frags
;
5061 skb_add_rx_frag(skb
, next_frag
, page
, offset_in_page(data
), data_len
,
5065 static inline int qeth_is_last_sbale(struct qdio_buffer_element
*sbale
)
5067 return (sbale
->eflags
& SBAL_EFLAGS_LAST_ENTRY
);
5070 struct sk_buff
*qeth_core_get_next_skb(struct qeth_card
*card
,
5071 struct qeth_qdio_buffer
*qethbuffer
,
5072 struct qdio_buffer_element
**__element
, int *__offset
,
5073 struct qeth_hdr
**hdr
)
5075 struct qdio_buffer_element
*element
= *__element
;
5076 struct qdio_buffer
*buffer
= qethbuffer
->buffer
;
5077 unsigned int linear_len
= 0;
5078 int offset
= *__offset
;
5079 bool use_rx_sg
= false;
5080 unsigned int headroom
;
5081 struct sk_buff
*skb
;
5085 /* qeth_hdr must not cross element boundaries */
5086 while (element
->length
< offset
+ sizeof(struct qeth_hdr
)) {
5087 if (qeth_is_last_sbale(element
))
5092 *hdr
= element
->addr
+ offset
;
5094 offset
+= sizeof(struct qeth_hdr
);
5097 switch ((*hdr
)->hdr
.l2
.id
) {
5098 case QETH_HEADER_TYPE_LAYER2
:
5099 skb_len
= (*hdr
)->hdr
.l2
.pkt_length
;
5100 linear_len
= ETH_HLEN
;
5103 case QETH_HEADER_TYPE_LAYER3
:
5104 skb_len
= (*hdr
)->hdr
.l3
.length
;
5105 if (!IS_LAYER3(card
)) {
5106 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5110 if ((*hdr
)->hdr
.l3
.flags
& QETH_HDR_PASSTHRU
) {
5111 linear_len
= ETH_HLEN
;
5116 if ((*hdr
)->hdr
.l3
.flags
& QETH_HDR_IPV6
)
5117 linear_len
= sizeof(struct ipv6hdr
);
5119 linear_len
= sizeof(struct iphdr
);
5120 headroom
= ETH_HLEN
;
5122 case QETH_HEADER_TYPE_OSN
:
5123 skb_len
= (*hdr
)->hdr
.osn
.pdu_length
;
5124 if (!IS_OSN(card
)) {
5125 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5129 linear_len
= skb_len
;
5130 headroom
= sizeof(struct qeth_hdr
);
5133 if ((*hdr
)->hdr
.l2
.id
& QETH_HEADER_MASK_INVAL
)
5134 QETH_CARD_STAT_INC(card
, rx_frame_errors
);
5136 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5138 /* Can't determine packet length, drop the whole buffer. */
5142 if (skb_len
< linear_len
) {
5143 QETH_CARD_STAT_INC(card
, rx_dropped_runt
);
5147 use_rx_sg
= (card
->options
.cq
== QETH_CQ_ENABLED
) ||
5148 (skb_len
> card
->options
.rx_sg_cb
&&
5149 !atomic_read(&card
->force_alloc_skb
) &&
5152 if (use_rx_sg
&& qethbuffer
->rx_skb
) {
5153 /* QETH_CQ_ENABLED only: */
5154 skb
= qethbuffer
->rx_skb
;
5155 qethbuffer
->rx_skb
= NULL
;
5158 linear_len
= skb_len
;
5159 skb
= napi_alloc_skb(&card
->napi
, linear_len
+ headroom
);
5163 QETH_CARD_STAT_INC(card
, rx_dropped_nomem
);
5165 skb_reserve(skb
, headroom
);
5169 int data_len
= min(skb_len
, (int)(element
->length
- offset
));
5170 char *data
= element
->addr
+ offset
;
5172 skb_len
-= data_len
;
5175 /* Extract data from current element: */
5176 if (skb
&& data_len
) {
5178 unsigned int copy_len
;
5180 copy_len
= min_t(unsigned int, linear_len
,
5183 skb_put_data(skb
, data
, copy_len
);
5184 linear_len
-= copy_len
;
5185 data_len
-= copy_len
;
5190 qeth_create_skb_frag(skb
, data
, data_len
);
5193 /* Step forward to next element: */
5195 if (qeth_is_last_sbale(element
)) {
5196 QETH_CARD_TEXT(card
, 4, "unexeob");
5197 QETH_CARD_HEX(card
, 2, buffer
, sizeof(void *));
5199 dev_kfree_skb_any(skb
);
5200 QETH_CARD_STAT_INC(card
,
5210 /* This packet was skipped, go get another one: */
5214 *__element
= element
;
5217 QETH_CARD_STAT_INC(card
, rx_sg_skbs
);
5218 QETH_CARD_STAT_ADD(card
, rx_sg_frags
,
5219 skb_shinfo(skb
)->nr_frags
);
5223 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb
);
5225 int qeth_poll(struct napi_struct
*napi
, int budget
)
5227 struct qeth_card
*card
= container_of(napi
, struct qeth_card
, napi
);
5229 struct qeth_qdio_buffer
*buffer
;
5231 int new_budget
= budget
;
5234 if (!card
->rx
.b_count
) {
5235 card
->rx
.qdio_err
= 0;
5236 card
->rx
.b_count
= qdio_get_next_buffers(
5237 card
->data
.ccwdev
, 0, &card
->rx
.b_index
,
5238 &card
->rx
.qdio_err
);
5239 if (card
->rx
.b_count
<= 0) {
5240 card
->rx
.b_count
= 0;
5243 card
->rx
.b_element
=
5244 &card
->qdio
.in_q
->bufs
[card
->rx
.b_index
]
5245 .buffer
->element
[0];
5246 card
->rx
.e_offset
= 0;
5249 while (card
->rx
.b_count
) {
5250 buffer
= &card
->qdio
.in_q
->bufs
[card
->rx
.b_index
];
5251 if (!(card
->rx
.qdio_err
&&
5252 qeth_check_qdio_errors(card
, buffer
->buffer
,
5253 card
->rx
.qdio_err
, "qinerr")))
5255 card
->discipline
->process_rx_buffer(
5256 card
, new_budget
, &done
);
5261 QETH_CARD_STAT_INC(card
, rx_bufs
);
5262 qeth_put_buffer_pool_entry(card
,
5263 buffer
->pool_entry
);
5264 qeth_queue_input_buffer(card
, card
->rx
.b_index
);
5266 if (card
->rx
.b_count
) {
5268 QDIO_BUFNR(card
->rx
.b_index
+ 1);
5269 card
->rx
.b_element
=
5271 ->bufs
[card
->rx
.b_index
]
5272 .buffer
->element
[0];
5273 card
->rx
.e_offset
= 0;
5277 if (work_done
>= budget
)
5280 new_budget
= budget
- work_done
;
5284 if (napi_complete_done(napi
, work_done
) &&
5285 qdio_start_irq(CARD_DDEV(card
), 0))
5286 napi_schedule(napi
);
5290 EXPORT_SYMBOL_GPL(qeth_poll
);
5292 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q
*queue
,
5293 unsigned int bidx
, bool error
, int budget
)
5295 struct qeth_qdio_out_buffer
*buffer
= queue
->bufs
[bidx
];
5296 u8 sflags
= buffer
->buffer
->element
[15].sflags
;
5297 struct qeth_card
*card
= queue
->card
;
5299 if (queue
->bufstates
&& (queue
->bufstates
[bidx
].flags
&
5300 QDIO_OUTBUF_STATE_FLAG_PENDING
)) {
5301 WARN_ON_ONCE(card
->options
.cq
!= QETH_CQ_ENABLED
);
5303 if (atomic_cmpxchg(&buffer
->state
, QETH_QDIO_BUF_PRIMED
,
5304 QETH_QDIO_BUF_PENDING
) ==
5305 QETH_QDIO_BUF_PRIMED
)
5306 qeth_notify_skbs(queue
, buffer
, TX_NOTIFY_PENDING
);
5308 QETH_CARD_TEXT_(card
, 5, "pel%u", bidx
);
5310 /* prepare the queue slot for re-use: */
5311 qeth_scrub_qdio_buffer(buffer
->buffer
, queue
->max_elements
);
5312 if (qeth_init_qdio_out_buf(queue
, bidx
)) {
5313 QETH_CARD_TEXT(card
, 2, "outofbuf");
5314 qeth_schedule_recovery(card
);
5320 if (card
->options
.cq
== QETH_CQ_ENABLED
)
5321 qeth_notify_skbs(queue
, buffer
,
5322 qeth_compute_cq_notification(sflags
, 0));
5323 qeth_clear_output_buffer(queue
, buffer
, error
, budget
);
5326 static int qeth_tx_poll(struct napi_struct
*napi
, int budget
)
5328 struct qeth_qdio_out_q
*queue
= qeth_napi_to_out_queue(napi
);
5329 unsigned int queue_no
= queue
->queue_no
;
5330 struct qeth_card
*card
= queue
->card
;
5331 struct net_device
*dev
= card
->dev
;
5332 unsigned int work_done
= 0;
5333 struct netdev_queue
*txq
;
5335 txq
= netdev_get_tx_queue(dev
, qeth_iqd_translate_txq(dev
, queue_no
));
5338 unsigned int start
, error
, i
;
5339 unsigned int packets
= 0;
5340 unsigned int bytes
= 0;
5343 if (qeth_out_queue_is_empty(queue
)) {
5344 napi_complete(napi
);
5348 /* Give the CPU a breather: */
5349 if (work_done
>= QDIO_MAX_BUFFERS_PER_Q
) {
5350 QETH_TXQ_STAT_INC(queue
, completion_yield
);
5351 if (napi_complete_done(napi
, 0))
5352 napi_schedule(napi
);
5356 completed
= qdio_inspect_queue(CARD_DDEV(card
), queue_no
, false,
5358 if (completed
<= 0) {
5359 /* Ensure we see TX completion for pending work: */
5360 if (napi_complete_done(napi
, 0))
5361 qeth_tx_arm_timer(queue
);
5365 for (i
= start
; i
< start
+ completed
; i
++) {
5366 struct qeth_qdio_out_buffer
*buffer
;
5367 unsigned int bidx
= QDIO_BUFNR(i
);
5369 buffer
= queue
->bufs
[bidx
];
5370 packets
+= skb_queue_len(&buffer
->skb_list
);
5371 bytes
+= buffer
->bytes
;
5373 qeth_handle_send_error(card
, buffer
, error
);
5374 qeth_iqd_tx_complete(queue
, bidx
, error
, budget
);
5375 qeth_cleanup_handled_pending(queue
, bidx
, false);
5378 netdev_tx_completed_queue(txq
, packets
, bytes
);
5379 atomic_sub(completed
, &queue
->used_buffers
);
5380 work_done
+= completed
;
5382 /* xmit may have observed the full-condition, but not yet
5383 * stopped the txq. In which case the code below won't trigger.
5384 * So before returning, xmit will re-check the txq's fill level
5385 * and wake it up if needed.
5387 if (netif_tx_queue_stopped(txq
) &&
5388 !qeth_out_queue_is_full(queue
))
5389 netif_tx_wake_queue(txq
);
5393 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd
*cmd
)
5395 if (!cmd
->hdr
.return_code
)
5396 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
5397 return cmd
->hdr
.return_code
;
5400 static int qeth_setassparms_get_caps_cb(struct qeth_card
*card
,
5401 struct qeth_reply
*reply
,
5404 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
5405 struct qeth_ipa_caps
*caps
= reply
->param
;
5407 if (qeth_setassparms_inspect_rc(cmd
))
5410 caps
->supported
= cmd
->data
.setassparms
.data
.caps
.supported
;
5411 caps
->enabled
= cmd
->data
.setassparms
.data
.caps
.enabled
;
5415 int qeth_setassparms_cb(struct qeth_card
*card
,
5416 struct qeth_reply
*reply
, unsigned long data
)
5418 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
5420 QETH_CARD_TEXT(card
, 4, "defadpcb");
5422 if (cmd
->hdr
.return_code
)
5425 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
5426 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
5427 card
->options
.ipa4
.enabled_funcs
= cmd
->hdr
.ipa_enabled
;
5428 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
5429 card
->options
.ipa6
.enabled_funcs
= cmd
->hdr
.ipa_enabled
;
5432 EXPORT_SYMBOL_GPL(qeth_setassparms_cb
);
5434 struct qeth_cmd_buffer
*qeth_get_setassparms_cmd(struct qeth_card
*card
,
5435 enum qeth_ipa_funcs ipa_func
,
5437 unsigned int data_length
,
5438 enum qeth_prot_versions prot
)
5440 struct qeth_ipacmd_setassparms
*setassparms
;
5441 struct qeth_ipacmd_setassparms_hdr
*hdr
;
5442 struct qeth_cmd_buffer
*iob
;
5444 QETH_CARD_TEXT(card
, 4, "getasscm");
5445 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SETASSPARMS
, prot
,
5447 offsetof(struct qeth_ipacmd_setassparms
,
5452 setassparms
= &__ipa_cmd(iob
)->data
.setassparms
;
5453 setassparms
->assist_no
= ipa_func
;
5455 hdr
= &setassparms
->hdr
;
5456 hdr
->length
= sizeof(*hdr
) + data_length
;
5457 hdr
->command_code
= cmd_code
;
5460 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd
);
5462 int qeth_send_simple_setassparms_prot(struct qeth_card
*card
,
5463 enum qeth_ipa_funcs ipa_func
,
5464 u16 cmd_code
, u32
*data
,
5465 enum qeth_prot_versions prot
)
5467 unsigned int length
= data
? SETASS_DATA_SIZEOF(flags_32bit
) : 0;
5468 struct qeth_cmd_buffer
*iob
;
5470 QETH_CARD_TEXT_(card
, 4, "simassp%i", prot
);
5471 iob
= qeth_get_setassparms_cmd(card
, ipa_func
, cmd_code
, length
, prot
);
5476 __ipa_cmd(iob
)->data
.setassparms
.data
.flags_32bit
= *data
;
5477 return qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_cb
, NULL
);
5479 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot
);
5481 static void qeth_unregister_dbf_views(void)
5484 for (x
= 0; x
< QETH_DBF_INFOS
; x
++) {
5485 debug_unregister(qeth_dbf
[x
].id
);
5486 qeth_dbf
[x
].id
= NULL
;
5490 void qeth_dbf_longtext(debug_info_t
*id
, int level
, char *fmt
, ...)
5492 char dbf_txt_buf
[32];
5495 if (!debug_level_enabled(id
, level
))
5497 va_start(args
, fmt
);
5498 vsnprintf(dbf_txt_buf
, sizeof(dbf_txt_buf
), fmt
, args
);
5500 debug_text_event(id
, level
, dbf_txt_buf
);
5502 EXPORT_SYMBOL_GPL(qeth_dbf_longtext
);
5504 static int qeth_register_dbf_views(void)
5509 for (x
= 0; x
< QETH_DBF_INFOS
; x
++) {
5510 /* register the areas */
5511 qeth_dbf
[x
].id
= debug_register(qeth_dbf
[x
].name
,
5515 if (qeth_dbf
[x
].id
== NULL
) {
5516 qeth_unregister_dbf_views();
5520 /* register a view */
5521 ret
= debug_register_view(qeth_dbf
[x
].id
, qeth_dbf
[x
].view
);
5523 qeth_unregister_dbf_views();
5527 /* set a passing level */
5528 debug_set_level(qeth_dbf
[x
].id
, qeth_dbf
[x
].level
);
5534 static DEFINE_MUTEX(qeth_mod_mutex
); /* for synchronized module loading */
5536 int qeth_core_load_discipline(struct qeth_card
*card
,
5537 enum qeth_discipline_id discipline
)
5539 mutex_lock(&qeth_mod_mutex
);
5540 switch (discipline
) {
5541 case QETH_DISCIPLINE_LAYER3
:
5542 card
->discipline
= try_then_request_module(
5543 symbol_get(qeth_l3_discipline
), "qeth_l3");
5545 case QETH_DISCIPLINE_LAYER2
:
5546 card
->discipline
= try_then_request_module(
5547 symbol_get(qeth_l2_discipline
), "qeth_l2");
5552 mutex_unlock(&qeth_mod_mutex
);
5554 if (!card
->discipline
) {
5555 dev_err(&card
->gdev
->dev
, "There is no kernel module to "
5556 "support discipline %d\n", discipline
);
5560 card
->options
.layer
= discipline
;
5564 void qeth_core_free_discipline(struct qeth_card
*card
)
5566 if (IS_LAYER2(card
))
5567 symbol_put(qeth_l2_discipline
);
5569 symbol_put(qeth_l3_discipline
);
5570 card
->options
.layer
= QETH_DISCIPLINE_UNDETERMINED
;
5571 card
->discipline
= NULL
;
5574 const struct device_type qeth_generic_devtype
= {
5575 .name
= "qeth_generic",
5576 .groups
= qeth_generic_attr_groups
,
5578 EXPORT_SYMBOL_GPL(qeth_generic_devtype
);
5580 static const struct device_type qeth_osn_devtype
= {
5582 .groups
= qeth_osn_attr_groups
,
5585 #define DBF_NAME_LEN 20
5587 struct qeth_dbf_entry
{
5588 char dbf_name
[DBF_NAME_LEN
];
5589 debug_info_t
*dbf_info
;
5590 struct list_head dbf_list
;
5593 static LIST_HEAD(qeth_dbf_list
);
5594 static DEFINE_MUTEX(qeth_dbf_list_mutex
);
5596 static debug_info_t
*qeth_get_dbf_entry(char *name
)
5598 struct qeth_dbf_entry
*entry
;
5599 debug_info_t
*rc
= NULL
;
5601 mutex_lock(&qeth_dbf_list_mutex
);
5602 list_for_each_entry(entry
, &qeth_dbf_list
, dbf_list
) {
5603 if (strcmp(entry
->dbf_name
, name
) == 0) {
5604 rc
= entry
->dbf_info
;
5608 mutex_unlock(&qeth_dbf_list_mutex
);
5612 static int qeth_add_dbf_entry(struct qeth_card
*card
, char *name
)
5614 struct qeth_dbf_entry
*new_entry
;
5616 card
->debug
= debug_register(name
, 2, 1, 8);
5618 QETH_DBF_TEXT_(SETUP
, 2, "%s", "qcdbf");
5621 if (debug_register_view(card
->debug
, &debug_hex_ascii_view
))
5623 new_entry
= kzalloc(sizeof(struct qeth_dbf_entry
), GFP_KERNEL
);
5626 strncpy(new_entry
->dbf_name
, name
, DBF_NAME_LEN
);
5627 new_entry
->dbf_info
= card
->debug
;
5628 mutex_lock(&qeth_dbf_list_mutex
);
5629 list_add(&new_entry
->dbf_list
, &qeth_dbf_list
);
5630 mutex_unlock(&qeth_dbf_list_mutex
);
5635 debug_unregister(card
->debug
);
5640 static void qeth_clear_dbf_list(void)
5642 struct qeth_dbf_entry
*entry
, *tmp
;
5644 mutex_lock(&qeth_dbf_list_mutex
);
5645 list_for_each_entry_safe(entry
, tmp
, &qeth_dbf_list
, dbf_list
) {
5646 list_del(&entry
->dbf_list
);
5647 debug_unregister(entry
->dbf_info
);
5650 mutex_unlock(&qeth_dbf_list_mutex
);
5653 static struct net_device
*qeth_alloc_netdev(struct qeth_card
*card
)
5655 struct net_device
*dev
;
5657 switch (card
->info
.type
) {
5658 case QETH_CARD_TYPE_IQD
:
5659 dev
= alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN
,
5660 ether_setup
, QETH_MAX_QUEUES
, 1);
5662 case QETH_CARD_TYPE_OSM
:
5663 dev
= alloc_etherdev(0);
5665 case QETH_CARD_TYPE_OSN
:
5666 dev
= alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN
, ether_setup
);
5669 dev
= alloc_etherdev_mqs(0, QETH_MAX_QUEUES
, 1);
5675 dev
->ml_priv
= card
;
5676 dev
->watchdog_timeo
= QETH_TX_TIMEOUT
;
5677 dev
->min_mtu
= IS_OSN(card
) ? 64 : 576;
5678 /* initialized when device first goes online: */
5681 SET_NETDEV_DEV(dev
, &card
->gdev
->dev
);
5682 netif_carrier_off(dev
);
5685 dev
->ethtool_ops
= &qeth_osn_ethtool_ops
;
5687 dev
->ethtool_ops
= &qeth_ethtool_ops
;
5688 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
5689 dev
->hw_features
|= NETIF_F_SG
;
5690 dev
->vlan_features
|= NETIF_F_SG
;
5692 dev
->features
|= NETIF_F_SG
;
5693 if (netif_set_real_num_tx_queues(dev
,
5694 QETH_IQD_MIN_TXQ
)) {
5704 struct net_device
*qeth_clone_netdev(struct net_device
*orig
)
5706 struct net_device
*clone
= qeth_alloc_netdev(orig
->ml_priv
);
5711 clone
->dev_port
= orig
->dev_port
;
5715 static int qeth_core_probe_device(struct ccwgroup_device
*gdev
)
5717 struct qeth_card
*card
;
5720 enum qeth_discipline_id enforced_disc
;
5721 char dbf_name
[DBF_NAME_LEN
];
5723 QETH_DBF_TEXT(SETUP
, 2, "probedev");
5726 if (!get_device(dev
))
5729 QETH_DBF_TEXT_(SETUP
, 2, "%s", dev_name(&gdev
->dev
));
5731 card
= qeth_alloc_card(gdev
);
5733 QETH_DBF_TEXT_(SETUP
, 2, "1err%d", -ENOMEM
);
5738 snprintf(dbf_name
, sizeof(dbf_name
), "qeth_card_%s",
5739 dev_name(&gdev
->dev
));
5740 card
->debug
= qeth_get_dbf_entry(dbf_name
);
5742 rc
= qeth_add_dbf_entry(card
, dbf_name
);
5747 qeth_setup_card(card
);
5748 card
->dev
= qeth_alloc_netdev(card
);
5754 card
->qdio
.no_out_queues
= card
->dev
->num_tx_queues
;
5755 rc
= qeth_update_from_chp_desc(card
);
5758 qeth_determine_capabilities(card
);
5759 qeth_set_blkt_defaults(card
);
5761 enforced_disc
= qeth_enforce_discipline(card
);
5762 switch (enforced_disc
) {
5763 case QETH_DISCIPLINE_UNDETERMINED
:
5764 gdev
->dev
.type
= &qeth_generic_devtype
;
5767 card
->info
.layer_enforced
= true;
5768 rc
= qeth_core_load_discipline(card
, enforced_disc
);
5772 gdev
->dev
.type
= IS_OSN(card
) ? &qeth_osn_devtype
:
5773 card
->discipline
->devtype
;
5774 rc
= card
->discipline
->setup(card
->gdev
);
5783 qeth_core_free_discipline(card
);
5786 free_netdev(card
->dev
);
5788 qeth_core_free_card(card
);
5794 static void qeth_core_remove_device(struct ccwgroup_device
*gdev
)
5796 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
5798 QETH_CARD_TEXT(card
, 2, "removedv");
5800 if (card
->discipline
) {
5801 card
->discipline
->remove(gdev
);
5802 qeth_core_free_discipline(card
);
5805 qeth_free_qdio_queues(card
);
5807 free_netdev(card
->dev
);
5808 qeth_core_free_card(card
);
5809 put_device(&gdev
->dev
);
5812 static int qeth_core_set_online(struct ccwgroup_device
*gdev
)
5814 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
5816 enum qeth_discipline_id def_discipline
;
5818 if (!card
->discipline
) {
5819 def_discipline
= IS_IQD(card
) ? QETH_DISCIPLINE_LAYER3
:
5820 QETH_DISCIPLINE_LAYER2
;
5821 rc
= qeth_core_load_discipline(card
, def_discipline
);
5824 rc
= card
->discipline
->setup(card
->gdev
);
5826 qeth_core_free_discipline(card
);
5830 rc
= card
->discipline
->set_online(gdev
);
5835 static int qeth_core_set_offline(struct ccwgroup_device
*gdev
)
5837 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
5838 return card
->discipline
->set_offline(gdev
);
5841 static void qeth_core_shutdown(struct ccwgroup_device
*gdev
)
5843 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
5844 qeth_set_allowed_threads(card
, 0, 1);
5845 if ((gdev
->state
== CCWGROUP_ONLINE
) && card
->info
.hwtrap
)
5846 qeth_hw_trap(card
, QETH_DIAGS_TRAP_DISARM
);
5847 qeth_qdio_clear_card(card
, 0);
5848 qeth_drain_output_queues(card
);
5849 qdio_free(CARD_DDEV(card
));
5852 static int qeth_suspend(struct ccwgroup_device
*gdev
)
5854 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
5856 qeth_set_allowed_threads(card
, 0, 1);
5857 wait_event(card
->wait_q
, qeth_threads_running(card
, 0xffffffff) == 0);
5858 if (gdev
->state
== CCWGROUP_OFFLINE
)
5861 card
->discipline
->set_offline(gdev
);
5865 static int qeth_resume(struct ccwgroup_device
*gdev
)
5867 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
5870 rc
= card
->discipline
->set_online(gdev
);
5872 qeth_set_allowed_threads(card
, 0xffffffff, 0);
5874 dev_warn(&card
->gdev
->dev
, "The qeth device driver failed to recover an error on the device\n");
5878 static ssize_t
group_store(struct device_driver
*ddrv
, const char *buf
,
5883 err
= ccwgroup_create_dev(qeth_core_root_dev
, to_ccwgroupdrv(ddrv
), 3,
5886 return err
? err
: count
;
5888 static DRIVER_ATTR_WO(group
);
5890 static struct attribute
*qeth_drv_attrs
[] = {
5891 &driver_attr_group
.attr
,
5894 static struct attribute_group qeth_drv_attr_group
= {
5895 .attrs
= qeth_drv_attrs
,
5897 static const struct attribute_group
*qeth_drv_attr_groups
[] = {
5898 &qeth_drv_attr_group
,
5902 static struct ccwgroup_driver qeth_core_ccwgroup_driver
= {
5904 .groups
= qeth_drv_attr_groups
,
5905 .owner
= THIS_MODULE
,
5908 .ccw_driver
= &qeth_ccw_driver
,
5909 .setup
= qeth_core_probe_device
,
5910 .remove
= qeth_core_remove_device
,
5911 .set_online
= qeth_core_set_online
,
5912 .set_offline
= qeth_core_set_offline
,
5913 .shutdown
= qeth_core_shutdown
,
5916 .freeze
= qeth_suspend
,
5917 .thaw
= qeth_resume
,
5918 .restore
= qeth_resume
,
5921 struct qeth_card
*qeth_get_card_by_busid(char *bus_id
)
5923 struct ccwgroup_device
*gdev
;
5924 struct qeth_card
*card
;
5926 gdev
= get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver
, bus_id
);
5930 card
= dev_get_drvdata(&gdev
->dev
);
5931 put_device(&gdev
->dev
);
5934 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid
);
5936 int qeth_do_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
5938 struct qeth_card
*card
= dev
->ml_priv
;
5939 struct mii_ioctl_data
*mii_data
;
5946 case SIOC_QETH_ADP_SET_SNMP_CONTROL
:
5947 rc
= qeth_snmp_command(card
, rq
->ifr_ifru
.ifru_data
);
5949 case SIOC_QETH_GET_CARD_TYPE
:
5950 if ((IS_OSD(card
) || IS_OSM(card
) || IS_OSX(card
)) &&
5955 mii_data
= if_mii(rq
);
5956 mii_data
->phy_id
= 0;
5959 mii_data
= if_mii(rq
);
5960 if (mii_data
->phy_id
!= 0)
5963 mii_data
->val_out
= qeth_mdio_read(dev
,
5964 mii_data
->phy_id
, mii_data
->reg_num
);
5966 case SIOC_QETH_QUERY_OAT
:
5967 rc
= qeth_query_oat_command(card
, rq
->ifr_ifru
.ifru_data
);
5970 if (card
->discipline
->do_ioctl
)
5971 rc
= card
->discipline
->do_ioctl(dev
, rq
, cmd
);
5976 QETH_CARD_TEXT_(card
, 2, "ioce%x", rc
);
5979 EXPORT_SYMBOL_GPL(qeth_do_ioctl
);
5981 static int qeth_start_csum_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
5984 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
5985 u32
*features
= reply
->param
;
5987 if (qeth_setassparms_inspect_rc(cmd
))
5990 *features
= cmd
->data
.setassparms
.data
.flags_32bit
;
5994 static int qeth_set_csum_off(struct qeth_card
*card
, enum qeth_ipa_funcs cstype
,
5995 enum qeth_prot_versions prot
)
5997 return qeth_send_simple_setassparms_prot(card
, cstype
, IPA_CMD_ASS_STOP
,
6001 static int qeth_set_csum_on(struct qeth_card
*card
, enum qeth_ipa_funcs cstype
,
6002 enum qeth_prot_versions prot
)
6004 u32 required_features
= QETH_IPA_CHECKSUM_UDP
| QETH_IPA_CHECKSUM_TCP
;
6005 struct qeth_cmd_buffer
*iob
;
6006 struct qeth_ipa_caps caps
;
6010 /* some L3 HW requires combined L3+L4 csum offload: */
6011 if (IS_LAYER3(card
) && prot
== QETH_PROT_IPV4
&&
6012 cstype
== IPA_OUTBOUND_CHECKSUM
)
6013 required_features
|= QETH_IPA_CHECKSUM_IP_HDR
;
6015 iob
= qeth_get_setassparms_cmd(card
, cstype
, IPA_CMD_ASS_START
, 0,
6020 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_start_csum_cb
, &features
);
6024 if ((required_features
& features
) != required_features
) {
6025 qeth_set_csum_off(card
, cstype
, prot
);
6029 iob
= qeth_get_setassparms_cmd(card
, cstype
, IPA_CMD_ASS_ENABLE
,
6030 SETASS_DATA_SIZEOF(flags_32bit
),
6033 qeth_set_csum_off(card
, cstype
, prot
);
6037 if (features
& QETH_IPA_CHECKSUM_LP2LP
)
6038 required_features
|= QETH_IPA_CHECKSUM_LP2LP
;
6039 __ipa_cmd(iob
)->data
.setassparms
.data
.flags_32bit
= required_features
;
6040 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_get_caps_cb
, &caps
);
6042 qeth_set_csum_off(card
, cstype
, prot
);
6046 if (!qeth_ipa_caps_supported(&caps
, required_features
) ||
6047 !qeth_ipa_caps_enabled(&caps
, required_features
)) {
6048 qeth_set_csum_off(card
, cstype
, prot
);
6052 dev_info(&card
->gdev
->dev
, "HW Checksumming (%sbound IPv%d) enabled\n",
6053 cstype
== IPA_INBOUND_CHECKSUM
? "in" : "out", prot
);
6054 if (!qeth_ipa_caps_enabled(&caps
, QETH_IPA_CHECKSUM_LP2LP
) &&
6055 cstype
== IPA_OUTBOUND_CHECKSUM
)
6056 dev_warn(&card
->gdev
->dev
,
6057 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
6058 QETH_CARD_IFNAME(card
));
6062 static int qeth_set_ipa_csum(struct qeth_card
*card
, bool on
, int cstype
,
6063 enum qeth_prot_versions prot
)
6065 return on
? qeth_set_csum_on(card
, cstype
, prot
) :
6066 qeth_set_csum_off(card
, cstype
, prot
);
6069 static int qeth_start_tso_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6072 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6073 struct qeth_tso_start_data
*tso_data
= reply
->param
;
6075 if (qeth_setassparms_inspect_rc(cmd
))
6078 tso_data
->mss
= cmd
->data
.setassparms
.data
.tso
.mss
;
6079 tso_data
->supported
= cmd
->data
.setassparms
.data
.tso
.supported
;
6083 static int qeth_set_tso_off(struct qeth_card
*card
,
6084 enum qeth_prot_versions prot
)
6086 return qeth_send_simple_setassparms_prot(card
, IPA_OUTBOUND_TSO
,
6087 IPA_CMD_ASS_STOP
, NULL
, prot
);
6090 static int qeth_set_tso_on(struct qeth_card
*card
,
6091 enum qeth_prot_versions prot
)
6093 struct qeth_tso_start_data tso_data
;
6094 struct qeth_cmd_buffer
*iob
;
6095 struct qeth_ipa_caps caps
;
6098 iob
= qeth_get_setassparms_cmd(card
, IPA_OUTBOUND_TSO
,
6099 IPA_CMD_ASS_START
, 0, prot
);
6103 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_start_tso_cb
, &tso_data
);
6107 if (!tso_data
.mss
|| !(tso_data
.supported
& QETH_IPA_LARGE_SEND_TCP
)) {
6108 qeth_set_tso_off(card
, prot
);
6112 iob
= qeth_get_setassparms_cmd(card
, IPA_OUTBOUND_TSO
,
6114 SETASS_DATA_SIZEOF(caps
), prot
);
6116 qeth_set_tso_off(card
, prot
);
6120 /* enable TSO capability */
6121 __ipa_cmd(iob
)->data
.setassparms
.data
.caps
.enabled
=
6122 QETH_IPA_LARGE_SEND_TCP
;
6123 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_get_caps_cb
, &caps
);
6125 qeth_set_tso_off(card
, prot
);
6129 if (!qeth_ipa_caps_supported(&caps
, QETH_IPA_LARGE_SEND_TCP
) ||
6130 !qeth_ipa_caps_enabled(&caps
, QETH_IPA_LARGE_SEND_TCP
)) {
6131 qeth_set_tso_off(card
, prot
);
6135 dev_info(&card
->gdev
->dev
, "TSOv%u enabled (MSS: %u)\n", prot
,
6140 static int qeth_set_ipa_tso(struct qeth_card
*card
, bool on
,
6141 enum qeth_prot_versions prot
)
6143 return on
? qeth_set_tso_on(card
, prot
) : qeth_set_tso_off(card
, prot
);
6146 static int qeth_set_ipa_rx_csum(struct qeth_card
*card
, bool on
)
6148 int rc_ipv4
= (on
) ? -EOPNOTSUPP
: 0;
6151 if (qeth_is_supported(card
, IPA_INBOUND_CHECKSUM
))
6152 rc_ipv4
= qeth_set_ipa_csum(card
, on
, IPA_INBOUND_CHECKSUM
,
6154 if (!qeth_is_supported6(card
, IPA_INBOUND_CHECKSUM_V6
))
6155 /* no/one Offload Assist available, so the rc is trivial */
6158 rc_ipv6
= qeth_set_ipa_csum(card
, on
, IPA_INBOUND_CHECKSUM
,
6162 /* enable: success if any Assist is active */
6163 return (rc_ipv6
) ? rc_ipv4
: 0;
6165 /* disable: failure if any Assist is still active */
6166 return (rc_ipv6
) ? rc_ipv6
: rc_ipv4
;
6170 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6171 * @dev: a net_device
6173 void qeth_enable_hw_features(struct net_device
*dev
)
6175 struct qeth_card
*card
= dev
->ml_priv
;
6176 netdev_features_t features
;
6178 features
= dev
->features
;
6179 /* force-off any feature that might need an IPA sequence.
6180 * netdev_update_features() will restart them.
6182 dev
->features
&= ~dev
->hw_features
;
6183 /* toggle VLAN filter, so that VIDs are re-programmed: */
6184 if (IS_LAYER2(card
) && IS_VM_NIC(card
)) {
6185 dev
->features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
6186 dev
->wanted_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
6188 netdev_update_features(dev
);
6189 if (features
!= dev
->features
)
6190 dev_warn(&card
->gdev
->dev
,
6191 "Device recovery failed to restore all offload features\n");
6193 EXPORT_SYMBOL_GPL(qeth_enable_hw_features
);
6195 int qeth_set_features(struct net_device
*dev
, netdev_features_t features
)
6197 struct qeth_card
*card
= dev
->ml_priv
;
6198 netdev_features_t changed
= dev
->features
^ features
;
6201 QETH_CARD_TEXT(card
, 2, "setfeat");
6202 QETH_CARD_HEX(card
, 2, &features
, sizeof(features
));
6204 if ((changed
& NETIF_F_IP_CSUM
)) {
6205 rc
= qeth_set_ipa_csum(card
, features
& NETIF_F_IP_CSUM
,
6206 IPA_OUTBOUND_CHECKSUM
, QETH_PROT_IPV4
);
6208 changed
^= NETIF_F_IP_CSUM
;
6210 if (changed
& NETIF_F_IPV6_CSUM
) {
6211 rc
= qeth_set_ipa_csum(card
, features
& NETIF_F_IPV6_CSUM
,
6212 IPA_OUTBOUND_CHECKSUM
, QETH_PROT_IPV6
);
6214 changed
^= NETIF_F_IPV6_CSUM
;
6216 if (changed
& NETIF_F_RXCSUM
) {
6217 rc
= qeth_set_ipa_rx_csum(card
, features
& NETIF_F_RXCSUM
);
6219 changed
^= NETIF_F_RXCSUM
;
6221 if (changed
& NETIF_F_TSO
) {
6222 rc
= qeth_set_ipa_tso(card
, features
& NETIF_F_TSO
,
6225 changed
^= NETIF_F_TSO
;
6227 if (changed
& NETIF_F_TSO6
) {
6228 rc
= qeth_set_ipa_tso(card
, features
& NETIF_F_TSO6
,
6231 changed
^= NETIF_F_TSO6
;
6234 /* everything changed successfully? */
6235 if ((dev
->features
^ features
) == changed
)
6237 /* something went wrong. save changed features and return error */
6238 dev
->features
^= changed
;
6241 EXPORT_SYMBOL_GPL(qeth_set_features
);
6243 netdev_features_t
qeth_fix_features(struct net_device
*dev
,
6244 netdev_features_t features
)
6246 struct qeth_card
*card
= dev
->ml_priv
;
6248 QETH_CARD_TEXT(card
, 2, "fixfeat");
6249 if (!qeth_is_supported(card
, IPA_OUTBOUND_CHECKSUM
))
6250 features
&= ~NETIF_F_IP_CSUM
;
6251 if (!qeth_is_supported6(card
, IPA_OUTBOUND_CHECKSUM_V6
))
6252 features
&= ~NETIF_F_IPV6_CSUM
;
6253 if (!qeth_is_supported(card
, IPA_INBOUND_CHECKSUM
) &&
6254 !qeth_is_supported6(card
, IPA_INBOUND_CHECKSUM_V6
))
6255 features
&= ~NETIF_F_RXCSUM
;
6256 if (!qeth_is_supported(card
, IPA_OUTBOUND_TSO
))
6257 features
&= ~NETIF_F_TSO
;
6258 if (!qeth_is_supported6(card
, IPA_OUTBOUND_TSO
))
6259 features
&= ~NETIF_F_TSO6
;
6261 QETH_CARD_HEX(card
, 2, &features
, sizeof(features
));
6264 EXPORT_SYMBOL_GPL(qeth_fix_features
);
6266 netdev_features_t
qeth_features_check(struct sk_buff
*skb
,
6267 struct net_device
*dev
,
6268 netdev_features_t features
)
6270 /* GSO segmentation builds skbs with
6271 * a (small) linear part for the headers, and
6272 * page frags for the data.
6273 * Compared to a linear skb, the header-only part consumes an
6274 * additional buffer element. This reduces buffer utilization, and
6275 * hurts throughput. So compress small segments into one element.
6277 if (netif_needs_gso(skb
, features
)) {
6278 /* match skb_segment(): */
6279 unsigned int doffset
= skb
->data
- skb_mac_header(skb
);
6280 unsigned int hsize
= skb_shinfo(skb
)->gso_size
;
6281 unsigned int hroom
= skb_headroom(skb
);
6283 /* linearize only if resulting skb allocations are order-0: */
6284 if (SKB_DATA_ALIGN(hroom
+ doffset
+ hsize
) <= SKB_MAX_HEAD(0))
6285 features
&= ~NETIF_F_SG
;
6288 return vlan_features_check(skb
, features
);
6290 EXPORT_SYMBOL_GPL(qeth_features_check
);
6292 void qeth_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6294 struct qeth_card
*card
= dev
->ml_priv
;
6295 struct qeth_qdio_out_q
*queue
;
6298 QETH_CARD_TEXT(card
, 5, "getstat");
6300 stats
->rx_packets
= card
->stats
.rx_packets
;
6301 stats
->rx_bytes
= card
->stats
.rx_bytes
;
6302 stats
->rx_errors
= card
->stats
.rx_length_errors
+
6303 card
->stats
.rx_frame_errors
+
6304 card
->stats
.rx_fifo_errors
;
6305 stats
->rx_dropped
= card
->stats
.rx_dropped_nomem
+
6306 card
->stats
.rx_dropped_notsupp
+
6307 card
->stats
.rx_dropped_runt
;
6308 stats
->multicast
= card
->stats
.rx_multicast
;
6309 stats
->rx_length_errors
= card
->stats
.rx_length_errors
;
6310 stats
->rx_frame_errors
= card
->stats
.rx_frame_errors
;
6311 stats
->rx_fifo_errors
= card
->stats
.rx_fifo_errors
;
6313 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++) {
6314 queue
= card
->qdio
.out_qs
[i
];
6316 stats
->tx_packets
+= queue
->stats
.tx_packets
;
6317 stats
->tx_bytes
+= queue
->stats
.tx_bytes
;
6318 stats
->tx_errors
+= queue
->stats
.tx_errors
;
6319 stats
->tx_dropped
+= queue
->stats
.tx_dropped
;
6322 EXPORT_SYMBOL_GPL(qeth_get_stats64
);
6324 u16
qeth_iqd_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
6325 u8 cast_type
, struct net_device
*sb_dev
)
6327 if (cast_type
!= RTN_UNICAST
)
6328 return QETH_IQD_MCAST_TXQ
;
6329 return QETH_IQD_MIN_UCAST_TXQ
;
6331 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue
);
6333 int qeth_open(struct net_device
*dev
)
6335 struct qeth_card
*card
= dev
->ml_priv
;
6337 QETH_CARD_TEXT(card
, 4, "qethopen");
6339 if (qdio_stop_irq(CARD_DDEV(card
), 0) < 0)
6342 card
->data
.state
= CH_STATE_UP
;
6343 netif_tx_start_all_queues(dev
);
6345 napi_enable(&card
->napi
);
6347 napi_schedule(&card
->napi
);
6349 struct qeth_qdio_out_q
*queue
;
6352 qeth_for_each_output_queue(card
, queue
, i
) {
6353 netif_tx_napi_add(dev
, &queue
->napi
, qeth_tx_poll
,
6355 napi_enable(&queue
->napi
);
6356 napi_schedule(&queue
->napi
);
6359 /* kick-start the NAPI softirq: */
6363 EXPORT_SYMBOL_GPL(qeth_open
);
6365 int qeth_stop(struct net_device
*dev
)
6367 struct qeth_card
*card
= dev
->ml_priv
;
6369 QETH_CARD_TEXT(card
, 4, "qethstop");
6371 struct qeth_qdio_out_q
*queue
;
6374 /* Quiesce the NAPI instances: */
6375 qeth_for_each_output_queue(card
, queue
, i
) {
6376 napi_disable(&queue
->napi
);
6377 del_timer_sync(&queue
->timer
);
6380 /* Stop .ndo_start_xmit, might still access queue->napi. */
6381 netif_tx_disable(dev
);
6383 /* Queues may get re-allocated, so remove the NAPIs here. */
6384 qeth_for_each_output_queue(card
, queue
, i
)
6385 netif_napi_del(&queue
->napi
);
6387 netif_tx_disable(dev
);
6390 napi_disable(&card
->napi
);
6393 EXPORT_SYMBOL_GPL(qeth_stop
);
6395 static int __init
qeth_core_init(void)
6399 pr_info("loading core functions\n");
6401 rc
= qeth_register_dbf_views();
6404 qeth_core_root_dev
= root_device_register("qeth");
6405 rc
= PTR_ERR_OR_ZERO(qeth_core_root_dev
);
6408 qeth_core_header_cache
=
6409 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE
,
6410 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE
),
6412 if (!qeth_core_header_cache
) {
6416 qeth_qdio_outbuf_cache
= kmem_cache_create("qeth_buf",
6417 sizeof(struct qeth_qdio_out_buffer
), 0, 0, NULL
);
6418 if (!qeth_qdio_outbuf_cache
) {
6422 rc
= ccw_driver_register(&qeth_ccw_driver
);
6425 rc
= ccwgroup_driver_register(&qeth_core_ccwgroup_driver
);
6432 ccw_driver_unregister(&qeth_ccw_driver
);
6434 kmem_cache_destroy(qeth_qdio_outbuf_cache
);
6436 kmem_cache_destroy(qeth_core_header_cache
);
6438 root_device_unregister(qeth_core_root_dev
);
6440 qeth_unregister_dbf_views();
6442 pr_err("Initializing the qeth device driver failed\n");
6446 static void __exit
qeth_core_exit(void)
6448 qeth_clear_dbf_list();
6449 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver
);
6450 ccw_driver_unregister(&qeth_ccw_driver
);
6451 kmem_cache_destroy(qeth_qdio_outbuf_cache
);
6452 kmem_cache_destroy(qeth_core_header_cache
);
6453 root_device_unregister(qeth_core_root_dev
);
6454 qeth_unregister_dbf_views();
6455 pr_info("core functions removed\n");
6458 module_init(qeth_core_init
);
6459 module_exit(qeth_core_exit
);
6460 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6461 MODULE_DESCRIPTION("qeth core functions");
6462 MODULE_LICENSE("GPL");