1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
22 #include <linux/tcp.h>
23 #include <linux/mii.h>
25 #include <linux/kthread.h>
26 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <linux/netdevice.h>
29 #include <linux/netdev_features.h>
30 #include <linux/rcutree.h>
31 #include <linux/skbuff.h>
32 #include <linux/vmalloc.h>
34 #include <net/iucv/af_iucv.h>
35 #include <net/dsfield.h>
38 #include <asm/ebcdic.h>
39 #include <asm/chpid.h>
40 #include <asm/sysinfo.h>
43 #include <asm/ccwdev.h>
44 #include <asm/cpcmd.h>
46 #include "qeth_core.h"
48 struct qeth_dbf_info qeth_dbf
[QETH_DBF_INFOS
] = {
49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
51 [QETH_DBF_SETUP
] = {"qeth_setup",
52 8, 1, 8, 5, &debug_hex_ascii_view
, NULL
},
53 [QETH_DBF_MSG
] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
54 &debug_sprintf_view
, NULL
},
55 [QETH_DBF_CTRL
] = {"qeth_control",
56 8, 1, QETH_DBF_CTRL_LEN
, 5, &debug_hex_ascii_view
, NULL
},
58 EXPORT_SYMBOL_GPL(qeth_dbf
);
60 static struct kmem_cache
*qeth_core_header_cache
;
61 static struct kmem_cache
*qeth_qdio_outbuf_cache
;
62 static struct kmem_cache
*qeth_qaob_cache
;
64 static struct device
*qeth_core_root_dev
;
65 static struct dentry
*qeth_debugfs_root
;
66 static struct lock_class_key qdio_out_skb_queue_key
;
68 static void qeth_issue_next_read_cb(struct qeth_card
*card
,
69 struct qeth_cmd_buffer
*iob
,
70 unsigned int data_length
);
71 static int qeth_qdio_establish(struct qeth_card
*);
72 static void qeth_free_qdio_queues(struct qeth_card
*card
);
74 static const char *qeth_get_cardname(struct qeth_card
*card
)
76 if (IS_VM_NIC(card
)) {
77 switch (card
->info
.type
) {
78 case QETH_CARD_TYPE_OSD
:
79 return " Virtual NIC QDIO";
80 case QETH_CARD_TYPE_IQD
:
81 return " Virtual NIC Hiper";
82 case QETH_CARD_TYPE_OSM
:
83 return " Virtual NIC QDIO - OSM";
84 case QETH_CARD_TYPE_OSX
:
85 return " Virtual NIC QDIO - OSX";
90 switch (card
->info
.type
) {
91 case QETH_CARD_TYPE_OSD
:
92 return " OSD Express";
93 case QETH_CARD_TYPE_IQD
:
94 return " HiperSockets";
95 case QETH_CARD_TYPE_OSM
:
97 case QETH_CARD_TYPE_OSX
:
106 /* max length to be returned: 14 */
107 const char *qeth_get_cardname_short(struct qeth_card
*card
)
109 if (IS_VM_NIC(card
)) {
110 switch (card
->info
.type
) {
111 case QETH_CARD_TYPE_OSD
:
112 return "Virt.NIC QDIO";
113 case QETH_CARD_TYPE_IQD
:
114 return "Virt.NIC Hiper";
115 case QETH_CARD_TYPE_OSM
:
116 return "Virt.NIC OSM";
117 case QETH_CARD_TYPE_OSX
:
118 return "Virt.NIC OSX";
123 switch (card
->info
.type
) {
124 case QETH_CARD_TYPE_OSD
:
125 switch (card
->info
.link_type
) {
126 case QETH_LINK_TYPE_FAST_ETH
:
128 case QETH_LINK_TYPE_HSTR
:
130 case QETH_LINK_TYPE_GBIT_ETH
:
132 case QETH_LINK_TYPE_10GBIT_ETH
:
134 case QETH_LINK_TYPE_25GBIT_ETH
:
136 case QETH_LINK_TYPE_LANE_ETH100
:
137 return "OSD_FE_LANE";
138 case QETH_LINK_TYPE_LANE_TR
:
139 return "OSD_TR_LANE";
140 case QETH_LINK_TYPE_LANE_ETH1000
:
141 return "OSD_GbE_LANE";
142 case QETH_LINK_TYPE_LANE
:
143 return "OSD_ATM_LANE";
145 return "OSD_Express";
147 case QETH_CARD_TYPE_IQD
:
148 return "HiperSockets";
149 case QETH_CARD_TYPE_OSM
:
151 case QETH_CARD_TYPE_OSX
:
160 void qeth_set_allowed_threads(struct qeth_card
*card
, unsigned long threads
,
161 int clear_start_mask
)
165 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
166 card
->thread_allowed_mask
= threads
;
167 if (clear_start_mask
)
168 card
->thread_start_mask
&= threads
;
169 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
170 wake_up(&card
->wait_q
);
172 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads
);
174 int qeth_threads_running(struct qeth_card
*card
, unsigned long threads
)
179 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
180 rc
= (card
->thread_running_mask
& threads
);
181 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
184 EXPORT_SYMBOL_GPL(qeth_threads_running
);
186 static void qeth_clear_working_pool_list(struct qeth_card
*card
)
188 struct qeth_buffer_pool_entry
*pool_entry
, *tmp
;
189 struct qeth_qdio_q
*queue
= card
->qdio
.in_q
;
192 QETH_CARD_TEXT(card
, 5, "clwrklst");
193 list_for_each_entry_safe(pool_entry
, tmp
,
194 &card
->qdio
.in_buf_pool
.entry_list
, list
)
195 list_del(&pool_entry
->list
);
197 for (i
= 0; i
< ARRAY_SIZE(queue
->bufs
); i
++)
198 queue
->bufs
[i
].pool_entry
= NULL
;
201 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry
*entry
)
205 for (i
= 0; i
< ARRAY_SIZE(entry
->elements
); i
++) {
206 if (entry
->elements
[i
])
207 __free_page(entry
->elements
[i
]);
213 static void qeth_free_buffer_pool(struct qeth_card
*card
)
215 struct qeth_buffer_pool_entry
*entry
, *tmp
;
217 list_for_each_entry_safe(entry
, tmp
, &card
->qdio
.init_pool
.entry_list
,
219 list_del(&entry
->init_list
);
220 qeth_free_pool_entry(entry
);
224 static struct qeth_buffer_pool_entry
*qeth_alloc_pool_entry(unsigned int pages
)
226 struct qeth_buffer_pool_entry
*entry
;
229 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
233 for (i
= 0; i
< pages
; i
++) {
234 entry
->elements
[i
] = __dev_alloc_page(GFP_KERNEL
);
236 if (!entry
->elements
[i
]) {
237 qeth_free_pool_entry(entry
);
245 static int qeth_alloc_buffer_pool(struct qeth_card
*card
)
247 unsigned int buf_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
250 QETH_CARD_TEXT(card
, 5, "alocpool");
251 for (i
= 0; i
< card
->qdio
.init_pool
.buf_count
; ++i
) {
252 struct qeth_buffer_pool_entry
*entry
;
254 entry
= qeth_alloc_pool_entry(buf_elements
);
256 qeth_free_buffer_pool(card
);
260 list_add(&entry
->init_list
, &card
->qdio
.init_pool
.entry_list
);
265 int qeth_resize_buffer_pool(struct qeth_card
*card
, unsigned int count
)
267 unsigned int buf_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
268 struct qeth_qdio_buffer_pool
*pool
= &card
->qdio
.init_pool
;
269 struct qeth_buffer_pool_entry
*entry
, *tmp
;
270 int delta
= count
- pool
->buf_count
;
273 QETH_CARD_TEXT(card
, 2, "realcbp");
275 /* Defer until pool is allocated: */
276 if (list_empty(&pool
->entry_list
))
279 /* Remove entries from the pool: */
281 entry
= list_first_entry(&pool
->entry_list
,
282 struct qeth_buffer_pool_entry
,
284 list_del(&entry
->init_list
);
285 qeth_free_pool_entry(entry
);
290 /* Allocate additional entries: */
292 entry
= qeth_alloc_pool_entry(buf_elements
);
294 list_for_each_entry_safe(entry
, tmp
, &entries
,
296 list_del(&entry
->init_list
);
297 qeth_free_pool_entry(entry
);
303 list_add(&entry
->init_list
, &entries
);
308 list_splice(&entries
, &pool
->entry_list
);
311 card
->qdio
.in_buf_pool
.buf_count
= count
;
312 pool
->buf_count
= count
;
315 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool
);
317 static void qeth_free_qdio_queue(struct qeth_qdio_q
*q
)
322 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
326 static struct qeth_qdio_q
*qeth_alloc_qdio_queue(void)
328 struct qeth_qdio_q
*q
= kzalloc(sizeof(*q
), GFP_KERNEL
);
334 if (qdio_alloc_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
)) {
339 for (i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; ++i
)
340 q
->bufs
[i
].buffer
= q
->qdio_bufs
[i
];
342 QETH_DBF_HEX(SETUP
, 2, &q
, sizeof(void *));
346 static int qeth_cq_init(struct qeth_card
*card
)
350 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
351 QETH_CARD_TEXT(card
, 2, "cqinit");
352 qdio_reset_buffers(card
->qdio
.c_q
->qdio_bufs
,
353 QDIO_MAX_BUFFERS_PER_Q
);
354 card
->qdio
.c_q
->next_buf_to_init
= 127;
356 rc
= qdio_add_bufs_to_input_queue(CARD_DDEV(card
), 1, 0, 127);
358 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
367 static void qeth_free_cq(struct qeth_card
*card
)
369 if (card
->qdio
.c_q
) {
370 qeth_free_qdio_queue(card
->qdio
.c_q
);
371 card
->qdio
.c_q
= NULL
;
375 static int qeth_alloc_cq(struct qeth_card
*card
)
377 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
378 QETH_CARD_TEXT(card
, 2, "cqon");
379 if (!card
->qdio
.c_q
) {
380 card
->qdio
.c_q
= qeth_alloc_qdio_queue();
381 if (!card
->qdio
.c_q
) {
382 dev_err(&card
->gdev
->dev
,
383 "Failed to create completion queue\n");
388 QETH_CARD_TEXT(card
, 2, "nocq");
394 static enum iucv_tx_notify
qeth_compute_cq_notification(int sbalf15
,
397 enum iucv_tx_notify n
;
401 n
= delayed
? TX_NOTIFY_DELAYED_OK
: TX_NOTIFY_OK
;
407 n
= delayed
? TX_NOTIFY_DELAYED_UNREACHABLE
:
408 TX_NOTIFY_UNREACHABLE
;
411 n
= delayed
? TX_NOTIFY_DELAYED_GENERALERROR
:
412 TX_NOTIFY_GENERALERROR
;
419 static void qeth_put_cmd(struct qeth_cmd_buffer
*iob
)
421 if (refcount_dec_and_test(&iob
->ref_count
)) {
426 static void qeth_setup_ccw(struct ccw1
*ccw
, u8 cmd_code
, u8 flags
, u32 len
,
429 ccw
->cmd_code
= cmd_code
;
430 ccw
->flags
= flags
| CCW_FLAG_SLI
;
432 ccw
->cda
= virt_to_dma32(data
);
435 static int __qeth_issue_next_read(struct qeth_card
*card
)
437 struct qeth_cmd_buffer
*iob
= card
->read_cmd
;
438 struct qeth_channel
*channel
= iob
->channel
;
439 struct ccw1
*ccw
= __ccw_from_cmd(iob
);
442 QETH_CARD_TEXT(card
, 5, "issnxrd");
443 if (channel
->state
!= CH_STATE_UP
)
446 memset(iob
->data
, 0, iob
->length
);
447 qeth_setup_ccw(ccw
, CCW_CMD_READ
, 0, iob
->length
, iob
->data
);
448 iob
->callback
= qeth_issue_next_read_cb
;
449 /* keep the cmd alive after completion: */
452 QETH_CARD_TEXT(card
, 6, "noirqpnd");
453 rc
= ccw_device_start(channel
->ccwdev
, ccw
, (addr_t
) iob
, 0, 0);
455 channel
->active_cmd
= iob
;
457 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
458 rc
, CARD_DEVID(card
));
459 qeth_unlock_channel(card
, channel
);
461 card
->read_or_write_problem
= 1;
462 qeth_schedule_recovery(card
);
467 static int qeth_issue_next_read(struct qeth_card
*card
)
471 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card
)));
472 ret
= __qeth_issue_next_read(card
);
473 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card
)));
478 static void qeth_enqueue_cmd(struct qeth_card
*card
,
479 struct qeth_cmd_buffer
*iob
)
481 spin_lock_irq(&card
->lock
);
482 list_add_tail(&iob
->list_entry
, &card
->cmd_waiter_list
);
483 spin_unlock_irq(&card
->lock
);
486 static void qeth_dequeue_cmd(struct qeth_card
*card
,
487 struct qeth_cmd_buffer
*iob
)
489 spin_lock_irq(&card
->lock
);
490 list_del(&iob
->list_entry
);
491 spin_unlock_irq(&card
->lock
);
494 static void qeth_notify_cmd(struct qeth_cmd_buffer
*iob
, int reason
)
497 complete(&iob
->done
);
500 static void qeth_flush_local_addrs4(struct qeth_card
*card
)
502 struct qeth_local_addr
*addr
;
503 struct hlist_node
*tmp
;
506 spin_lock_irq(&card
->local_addrs4_lock
);
507 hash_for_each_safe(card
->local_addrs4
, i
, tmp
, addr
, hnode
) {
508 hash_del_rcu(&addr
->hnode
);
509 kfree_rcu(addr
, rcu
);
511 spin_unlock_irq(&card
->local_addrs4_lock
);
514 static void qeth_flush_local_addrs6(struct qeth_card
*card
)
516 struct qeth_local_addr
*addr
;
517 struct hlist_node
*tmp
;
520 spin_lock_irq(&card
->local_addrs6_lock
);
521 hash_for_each_safe(card
->local_addrs6
, i
, tmp
, addr
, hnode
) {
522 hash_del_rcu(&addr
->hnode
);
523 kfree_rcu(addr
, rcu
);
525 spin_unlock_irq(&card
->local_addrs6_lock
);
528 static void qeth_flush_local_addrs(struct qeth_card
*card
)
530 qeth_flush_local_addrs4(card
);
531 qeth_flush_local_addrs6(card
);
534 static void qeth_add_local_addrs4(struct qeth_card
*card
,
535 struct qeth_ipacmd_local_addrs4
*cmd
)
539 if (cmd
->addr_length
!=
540 sizeof_field(struct qeth_ipacmd_local_addr4
, addr
)) {
541 dev_err_ratelimited(&card
->gdev
->dev
,
542 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
547 spin_lock(&card
->local_addrs4_lock
);
548 for (i
= 0; i
< cmd
->count
; i
++) {
549 unsigned int key
= ipv4_addr_hash(cmd
->addrs
[i
].addr
);
550 struct qeth_local_addr
*addr
;
551 bool duplicate
= false;
553 hash_for_each_possible(card
->local_addrs4
, addr
, hnode
, key
) {
554 if (addr
->addr
.s6_addr32
[3] == cmd
->addrs
[i
].addr
) {
563 addr
= kmalloc(sizeof(*addr
), GFP_ATOMIC
);
565 dev_err(&card
->gdev
->dev
,
566 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
567 &cmd
->addrs
[i
].addr
);
571 ipv6_addr_set(&addr
->addr
, 0, 0, 0, cmd
->addrs
[i
].addr
);
572 hash_add_rcu(card
->local_addrs4
, &addr
->hnode
, key
);
574 spin_unlock(&card
->local_addrs4_lock
);
577 static void qeth_add_local_addrs6(struct qeth_card
*card
,
578 struct qeth_ipacmd_local_addrs6
*cmd
)
582 if (cmd
->addr_length
!=
583 sizeof_field(struct qeth_ipacmd_local_addr6
, addr
)) {
584 dev_err_ratelimited(&card
->gdev
->dev
,
585 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
590 spin_lock(&card
->local_addrs6_lock
);
591 for (i
= 0; i
< cmd
->count
; i
++) {
592 u32 key
= ipv6_addr_hash(&cmd
->addrs
[i
].addr
);
593 struct qeth_local_addr
*addr
;
594 bool duplicate
= false;
596 hash_for_each_possible(card
->local_addrs6
, addr
, hnode
, key
) {
597 if (ipv6_addr_equal(&addr
->addr
, &cmd
->addrs
[i
].addr
)) {
606 addr
= kmalloc(sizeof(*addr
), GFP_ATOMIC
);
608 dev_err(&card
->gdev
->dev
,
609 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
610 &cmd
->addrs
[i
].addr
);
614 addr
->addr
= cmd
->addrs
[i
].addr
;
615 hash_add_rcu(card
->local_addrs6
, &addr
->hnode
, key
);
617 spin_unlock(&card
->local_addrs6_lock
);
620 static void qeth_del_local_addrs4(struct qeth_card
*card
,
621 struct qeth_ipacmd_local_addrs4
*cmd
)
625 if (cmd
->addr_length
!=
626 sizeof_field(struct qeth_ipacmd_local_addr4
, addr
)) {
627 dev_err_ratelimited(&card
->gdev
->dev
,
628 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
633 spin_lock(&card
->local_addrs4_lock
);
634 for (i
= 0; i
< cmd
->count
; i
++) {
635 struct qeth_ipacmd_local_addr4
*addr
= &cmd
->addrs
[i
];
636 unsigned int key
= ipv4_addr_hash(addr
->addr
);
637 struct qeth_local_addr
*tmp
;
639 hash_for_each_possible(card
->local_addrs4
, tmp
, hnode
, key
) {
640 if (tmp
->addr
.s6_addr32
[3] == addr
->addr
) {
641 hash_del_rcu(&tmp
->hnode
);
647 spin_unlock(&card
->local_addrs4_lock
);
650 static void qeth_del_local_addrs6(struct qeth_card
*card
,
651 struct qeth_ipacmd_local_addrs6
*cmd
)
655 if (cmd
->addr_length
!=
656 sizeof_field(struct qeth_ipacmd_local_addr6
, addr
)) {
657 dev_err_ratelimited(&card
->gdev
->dev
,
658 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
663 spin_lock(&card
->local_addrs6_lock
);
664 for (i
= 0; i
< cmd
->count
; i
++) {
665 struct qeth_ipacmd_local_addr6
*addr
= &cmd
->addrs
[i
];
666 u32 key
= ipv6_addr_hash(&addr
->addr
);
667 struct qeth_local_addr
*tmp
;
669 hash_for_each_possible(card
->local_addrs6
, tmp
, hnode
, key
) {
670 if (ipv6_addr_equal(&tmp
->addr
, &addr
->addr
)) {
671 hash_del_rcu(&tmp
->hnode
);
677 spin_unlock(&card
->local_addrs6_lock
);
680 static bool qeth_next_hop_is_local_v4(struct qeth_card
*card
,
683 struct qeth_local_addr
*tmp
;
684 bool is_local
= false;
688 if (hash_empty(card
->local_addrs4
))
692 next_hop
= qeth_next_hop_v4_rcu(skb
,
693 qeth_dst_check_rcu(skb
, htons(ETH_P_IP
)));
694 key
= ipv4_addr_hash(next_hop
);
696 hash_for_each_possible_rcu(card
->local_addrs4
, tmp
, hnode
, key
) {
697 if (tmp
->addr
.s6_addr32
[3] == next_hop
) {
707 static bool qeth_next_hop_is_local_v6(struct qeth_card
*card
,
710 struct qeth_local_addr
*tmp
;
711 struct in6_addr
*next_hop
;
712 bool is_local
= false;
715 if (hash_empty(card
->local_addrs6
))
719 next_hop
= qeth_next_hop_v6_rcu(skb
,
720 qeth_dst_check_rcu(skb
, htons(ETH_P_IPV6
)));
721 key
= ipv6_addr_hash(next_hop
);
723 hash_for_each_possible_rcu(card
->local_addrs6
, tmp
, hnode
, key
) {
724 if (ipv6_addr_equal(&tmp
->addr
, next_hop
)) {
734 static int qeth_debugfs_local_addr_show(struct seq_file
*m
, void *v
)
736 struct qeth_card
*card
= m
->private;
737 struct qeth_local_addr
*tmp
;
741 hash_for_each_rcu(card
->local_addrs4
, i
, tmp
, hnode
)
742 seq_printf(m
, "%pI4\n", &tmp
->addr
.s6_addr32
[3]);
743 hash_for_each_rcu(card
->local_addrs6
, i
, tmp
, hnode
)
744 seq_printf(m
, "%pI6c\n", &tmp
->addr
);
750 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr
);
752 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd
*cmd
, int rc
,
753 struct qeth_card
*card
)
755 const char *ipa_name
;
756 int com
= cmd
->hdr
.command
;
758 ipa_name
= qeth_get_ipa_cmd_name(com
);
761 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
762 ipa_name
, com
, CARD_DEVID(card
), rc
,
763 qeth_get_ipa_msg(rc
));
765 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
766 ipa_name
, com
, CARD_DEVID(card
));
769 static void qeth_default_link_info(struct qeth_card
*card
)
771 struct qeth_link_info
*link_info
= &card
->info
.link_info
;
773 QETH_CARD_TEXT(card
, 2, "dftlinfo");
774 link_info
->duplex
= DUPLEX_FULL
;
776 if (IS_IQD(card
) || IS_VM_NIC(card
)) {
777 link_info
->speed
= SPEED_10000
;
778 link_info
->port
= PORT_FIBRE
;
779 link_info
->link_mode
= QETH_LINK_MODE_FIBRE_SHORT
;
781 switch (card
->info
.link_type
) {
782 case QETH_LINK_TYPE_FAST_ETH
:
783 case QETH_LINK_TYPE_LANE_ETH100
:
784 link_info
->speed
= SPEED_100
;
785 link_info
->port
= PORT_TP
;
787 case QETH_LINK_TYPE_GBIT_ETH
:
788 case QETH_LINK_TYPE_LANE_ETH1000
:
789 link_info
->speed
= SPEED_1000
;
790 link_info
->port
= PORT_FIBRE
;
792 case QETH_LINK_TYPE_10GBIT_ETH
:
793 link_info
->speed
= SPEED_10000
;
794 link_info
->port
= PORT_FIBRE
;
796 case QETH_LINK_TYPE_25GBIT_ETH
:
797 link_info
->speed
= SPEED_25000
;
798 link_info
->port
= PORT_FIBRE
;
801 dev_info(&card
->gdev
->dev
,
802 "Unknown link type %x\n",
803 card
->info
.link_type
);
804 link_info
->speed
= SPEED_UNKNOWN
;
805 link_info
->port
= PORT_OTHER
;
808 link_info
->link_mode
= QETH_LINK_MODE_UNKNOWN
;
812 static struct qeth_ipa_cmd
*qeth_check_ipa_data(struct qeth_card
*card
,
813 struct qeth_ipa_cmd
*cmd
)
815 QETH_CARD_TEXT(card
, 5, "chkipad");
817 if (IS_IPA_REPLY(cmd
)) {
818 if (cmd
->hdr
.command
!= IPA_CMD_SET_DIAG_ASS
)
819 qeth_issue_ipa_msg(cmd
, cmd
->hdr
.return_code
, card
);
823 /* handle unsolicited event: */
824 switch (cmd
->hdr
.command
) {
825 case IPA_CMD_STOPLAN
:
826 if (cmd
->hdr
.return_code
== IPA_RC_VEPA_TO_VEB_TRANSITION
) {
827 dev_err(&card
->gdev
->dev
,
828 "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
829 netdev_name(card
->dev
));
830 /* Set offline, then probably fail to set online: */
831 qeth_schedule_recovery(card
);
833 /* stay online for subsequent STARTLAN */
834 dev_warn(&card
->gdev
->dev
,
835 "The link for interface %s on CHPID 0x%X failed\n",
836 netdev_name(card
->dev
), card
->info
.chpid
);
837 qeth_issue_ipa_msg(cmd
, cmd
->hdr
.return_code
, card
);
838 netif_carrier_off(card
->dev
);
839 qeth_default_link_info(card
);
842 case IPA_CMD_STARTLAN
:
843 dev_info(&card
->gdev
->dev
,
844 "The link for %s on CHPID 0x%X has been restored\n",
845 netdev_name(card
->dev
), card
->info
.chpid
);
846 if (card
->info
.hwtrap
)
847 card
->info
.hwtrap
= 2;
848 qeth_schedule_recovery(card
);
850 case IPA_CMD_SETBRIDGEPORT_IQD
:
851 case IPA_CMD_SETBRIDGEPORT_OSA
:
852 case IPA_CMD_ADDRESS_CHANGE_NOTIF
:
853 if (card
->discipline
->control_event_handler(card
, cmd
))
856 case IPA_CMD_REGISTER_LOCAL_ADDR
:
857 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
858 qeth_add_local_addrs4(card
, &cmd
->data
.local_addrs4
);
859 else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
860 qeth_add_local_addrs6(card
, &cmd
->data
.local_addrs6
);
862 QETH_CARD_TEXT(card
, 3, "irla");
864 case IPA_CMD_UNREGISTER_LOCAL_ADDR
:
865 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
866 qeth_del_local_addrs4(card
, &cmd
->data
.local_addrs4
);
867 else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
868 qeth_del_local_addrs6(card
, &cmd
->data
.local_addrs6
);
870 QETH_CARD_TEXT(card
, 3, "urla");
873 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
878 static void qeth_clear_ipacmd_list(struct qeth_card
*card
)
880 struct qeth_cmd_buffer
*iob
;
883 QETH_CARD_TEXT(card
, 4, "clipalst");
885 spin_lock_irqsave(&card
->lock
, flags
);
886 list_for_each_entry(iob
, &card
->cmd_waiter_list
, list_entry
)
887 qeth_notify_cmd(iob
, -ECANCELED
);
888 spin_unlock_irqrestore(&card
->lock
, flags
);
891 static int qeth_check_idx_response(struct qeth_card
*card
,
892 unsigned char *buffer
)
894 QETH_DBF_HEX(CTRL
, 2, buffer
, QETH_DBF_CTRL_LEN
);
895 if ((buffer
[2] & QETH_IDX_TERMINATE_MASK
) == QETH_IDX_TERMINATE
) {
896 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
898 QETH_CARD_TEXT(card
, 2, "ckidxres");
899 QETH_CARD_TEXT(card
, 2, " idxterm");
900 QETH_CARD_TEXT_(card
, 2, "rc%x", buffer
[4]);
901 if (buffer
[4] == QETH_IDX_TERM_BAD_TRANSPORT
||
902 buffer
[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM
) {
903 dev_err(&card
->gdev
->dev
,
904 "The device does not support the configured transport mode\n");
905 return -EPROTONOSUPPORT
;
912 static void qeth_release_buffer_cb(struct qeth_card
*card
,
913 struct qeth_cmd_buffer
*iob
,
914 unsigned int data_length
)
919 static void qeth_cancel_cmd(struct qeth_cmd_buffer
*iob
, int rc
)
921 qeth_notify_cmd(iob
, rc
);
925 static struct qeth_cmd_buffer
*qeth_alloc_cmd(struct qeth_channel
*channel
,
927 unsigned int ccws
, long timeout
)
929 struct qeth_cmd_buffer
*iob
;
931 if (length
> QETH_BUFSIZE
)
934 iob
= kzalloc(sizeof(*iob
), GFP_KERNEL
);
938 iob
->data
= kzalloc(ALIGN(length
, 8) + ccws
* sizeof(struct ccw1
),
939 GFP_KERNEL
| GFP_DMA
);
945 init_completion(&iob
->done
);
946 spin_lock_init(&iob
->lock
);
947 refcount_set(&iob
->ref_count
, 1);
948 iob
->channel
= channel
;
949 iob
->timeout
= timeout
;
950 iob
->length
= length
;
954 static void qeth_issue_next_read_cb(struct qeth_card
*card
,
955 struct qeth_cmd_buffer
*iob
,
956 unsigned int data_length
)
958 struct qeth_cmd_buffer
*request
= NULL
;
959 struct qeth_ipa_cmd
*cmd
= NULL
;
960 struct qeth_reply
*reply
= NULL
;
961 struct qeth_cmd_buffer
*tmp
;
965 QETH_CARD_TEXT(card
, 4, "sndctlcb");
966 rc
= qeth_check_idx_response(card
, iob
->data
);
971 qeth_schedule_recovery(card
);
974 qeth_clear_ipacmd_list(card
);
978 cmd
= __ipa_reply(iob
);
980 cmd
= qeth_check_ipa_data(card
, cmd
);
985 /* match against pending cmd requests */
986 spin_lock_irqsave(&card
->lock
, flags
);
987 list_for_each_entry(tmp
, &card
->cmd_waiter_list
, list_entry
) {
988 if (tmp
->match
&& tmp
->match(tmp
, iob
)) {
990 /* take the object outside the lock */
991 qeth_get_cmd(request
);
995 spin_unlock_irqrestore(&card
->lock
, flags
);
1000 reply
= &request
->reply
;
1001 if (!reply
->callback
) {
1006 spin_lock_irqsave(&request
->lock
, flags
);
1008 /* Bail out when the requestor has already left: */
1011 rc
= reply
->callback(card
, reply
, cmd
? (unsigned long)cmd
:
1012 (unsigned long)iob
);
1013 spin_unlock_irqrestore(&request
->lock
, flags
);
1017 qeth_notify_cmd(request
, rc
);
1018 qeth_put_cmd(request
);
1020 memcpy(&card
->seqno
.pdu_hdr_ack
,
1021 QETH_PDU_HEADER_SEQ_NO(iob
->data
),
1022 QETH_SEQ_NO_LENGTH
);
1023 __qeth_issue_next_read(card
);
1028 static int qeth_set_thread_start_bit(struct qeth_card
*card
,
1029 unsigned long thread
)
1031 unsigned long flags
;
1034 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1035 if (!(card
->thread_allowed_mask
& thread
))
1037 else if (card
->thread_start_mask
& thread
)
1040 card
->thread_start_mask
|= thread
;
1041 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1046 static void qeth_clear_thread_start_bit(struct qeth_card
*card
,
1047 unsigned long thread
)
1049 unsigned long flags
;
1051 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1052 card
->thread_start_mask
&= ~thread
;
1053 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1054 wake_up(&card
->wait_q
);
1057 static void qeth_clear_thread_running_bit(struct qeth_card
*card
,
1058 unsigned long thread
)
1060 unsigned long flags
;
1062 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1063 card
->thread_running_mask
&= ~thread
;
1064 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1065 wake_up_all(&card
->wait_q
);
1068 static int __qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
1070 unsigned long flags
;
1073 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1074 if (card
->thread_start_mask
& thread
) {
1075 if ((card
->thread_allowed_mask
& thread
) &&
1076 !(card
->thread_running_mask
& thread
)) {
1078 card
->thread_start_mask
&= ~thread
;
1079 card
->thread_running_mask
|= thread
;
1083 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1087 static int qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
1091 wait_event(card
->wait_q
,
1092 (rc
= __qeth_do_run_thread(card
, thread
)) >= 0);
1096 int qeth_schedule_recovery(struct qeth_card
*card
)
1100 QETH_CARD_TEXT(card
, 2, "startrec");
1102 rc
= qeth_set_thread_start_bit(card
, QETH_RECOVER_THREAD
);
1104 schedule_work(&card
->kernel_thread_starter
);
1109 static int qeth_get_problem(struct qeth_card
*card
, struct ccw_device
*cdev
,
1115 sense
= (char *) irb
->ecw
;
1116 cstat
= irb
->scsw
.cmd
.cstat
;
1117 dstat
= irb
->scsw
.cmd
.dstat
;
1119 if (cstat
& (SCHN_STAT_CHN_CTRL_CHK
| SCHN_STAT_INTF_CTRL_CHK
|
1120 SCHN_STAT_CHN_DATA_CHK
| SCHN_STAT_CHAIN_CHECK
|
1121 SCHN_STAT_PROT_CHECK
| SCHN_STAT_PROG_CHECK
)) {
1122 QETH_CARD_TEXT(card
, 2, "CGENCHK");
1123 dev_warn(&cdev
->dev
, "The qeth device driver "
1124 "failed to recover an error on the device\n");
1125 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1126 CCW_DEVID(cdev
), dstat
, cstat
);
1127 print_hex_dump(KERN_WARNING
, "qeth: irb ", DUMP_PREFIX_OFFSET
,
1132 if (dstat
& DEV_STAT_UNIT_CHECK
) {
1133 if (sense
[SENSE_RESETTING_EVENT_BYTE
] &
1134 SENSE_RESETTING_EVENT_FLAG
) {
1135 QETH_CARD_TEXT(card
, 2, "REVIND");
1138 if (sense
[SENSE_COMMAND_REJECT_BYTE
] &
1139 SENSE_COMMAND_REJECT_FLAG
) {
1140 QETH_CARD_TEXT(card
, 2, "CMDREJi");
1143 if ((sense
[2] == 0xaf) && (sense
[3] == 0xfe)) {
1144 QETH_CARD_TEXT(card
, 2, "AFFE");
1147 if ((!sense
[0]) && (!sense
[1]) && (!sense
[2]) && (!sense
[3])) {
1148 QETH_CARD_TEXT(card
, 2, "ZEROSEN");
1151 QETH_CARD_TEXT(card
, 2, "DGENCHK");
1157 static int qeth_check_irb_error(struct qeth_card
*card
, struct ccw_device
*cdev
,
1163 switch (PTR_ERR(irb
)) {
1165 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1167 QETH_CARD_TEXT(card
, 2, "ckirberr");
1168 QETH_CARD_TEXT_(card
, 2, " rc%d", -EIO
);
1171 dev_warn(&cdev
->dev
, "A hardware operation timed out"
1172 " on the device\n");
1173 QETH_CARD_TEXT(card
, 2, "ckirberr");
1174 QETH_CARD_TEXT_(card
, 2, " rc%d", -ETIMEDOUT
);
1177 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1178 PTR_ERR(irb
), CCW_DEVID(cdev
));
1179 QETH_CARD_TEXT(card
, 2, "ckirberr");
1180 QETH_CARD_TEXT(card
, 2, " rc???");
1181 return PTR_ERR(irb
);
1186 * qeth_irq() - qeth interrupt handler
1188 * @intparm: expect pointer to iob
1189 * @irb: Interruption Response Block
1192 * corresponding qeth channel is locked with last used iob as active_cmd.
1193 * But this function is also called for error interrupts.
1195 * Caller ensures that:
1196 * Interrupts are disabled; ccw device lock is held;
1199 static void qeth_irq(struct ccw_device
*cdev
, unsigned long intparm
,
1204 struct qeth_cmd_buffer
*iob
= NULL
;
1205 struct ccwgroup_device
*gdev
;
1206 struct qeth_channel
*channel
;
1207 struct qeth_card
*card
;
1209 /* while we hold the ccwdev lock, this stays valid: */
1210 gdev
= dev_get_drvdata(&cdev
->dev
);
1211 card
= dev_get_drvdata(&gdev
->dev
);
1213 QETH_CARD_TEXT(card
, 5, "irq");
1215 if (card
->read
.ccwdev
== cdev
) {
1216 channel
= &card
->read
;
1217 QETH_CARD_TEXT(card
, 5, "read");
1218 } else if (card
->write
.ccwdev
== cdev
) {
1219 channel
= &card
->write
;
1220 QETH_CARD_TEXT(card
, 5, "write");
1222 channel
= &card
->data
;
1223 QETH_CARD_TEXT(card
, 5, "data");
1227 QETH_CARD_TEXT(card
, 5, "irqunsol");
1228 } else if ((addr_t
)intparm
!= (addr_t
)channel
->active_cmd
) {
1229 QETH_CARD_TEXT(card
, 5, "irqunexp");
1232 "Received IRQ with intparm %lx, expected %px\n",
1233 intparm
, channel
->active_cmd
);
1234 if (channel
->active_cmd
)
1235 qeth_cancel_cmd(channel
->active_cmd
, -EIO
);
1237 iob
= (struct qeth_cmd_buffer
*) (addr_t
)intparm
;
1240 rc
= qeth_check_irb_error(card
, cdev
, irb
);
1242 /* IO was terminated, free its resources. */
1243 qeth_unlock_channel(card
, channel
);
1245 qeth_cancel_cmd(iob
, rc
);
1249 if (irb
->scsw
.cmd
.fctl
& SCSW_FCTL_CLEAR_FUNC
) {
1250 channel
->state
= CH_STATE_STOPPED
;
1251 wake_up(&card
->wait_q
);
1254 if (irb
->scsw
.cmd
.fctl
& SCSW_FCTL_HALT_FUNC
) {
1255 channel
->state
= CH_STATE_HALTED
;
1256 wake_up(&card
->wait_q
);
1259 if (iob
&& (irb
->scsw
.cmd
.fctl
& (SCSW_FCTL_CLEAR_FUNC
|
1260 SCSW_FCTL_HALT_FUNC
))) {
1261 qeth_cancel_cmd(iob
, -ECANCELED
);
1265 cstat
= irb
->scsw
.cmd
.cstat
;
1266 dstat
= irb
->scsw
.cmd
.dstat
;
1268 if ((dstat
& DEV_STAT_UNIT_EXCEP
) ||
1269 (dstat
& DEV_STAT_UNIT_CHECK
) ||
1271 if (irb
->esw
.esw0
.erw
.cons
) {
1272 dev_warn(&channel
->ccwdev
->dev
,
1273 "The qeth device driver failed to recover "
1274 "an error on the device\n");
1275 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1276 CCW_DEVID(channel
->ccwdev
), cstat
,
1278 print_hex_dump(KERN_WARNING
, "qeth: irb ",
1279 DUMP_PREFIX_OFFSET
, 16, 1, irb
, 32, 1);
1280 print_hex_dump(KERN_WARNING
, "qeth: sense data ",
1281 DUMP_PREFIX_OFFSET
, 16, 1, irb
->ecw
, 32, 1);
1284 rc
= qeth_get_problem(card
, cdev
, irb
);
1286 card
->read_or_write_problem
= 1;
1287 qeth_unlock_channel(card
, channel
);
1289 qeth_cancel_cmd(iob
, rc
);
1290 qeth_clear_ipacmd_list(card
);
1291 qeth_schedule_recovery(card
);
1296 if (scsw_cmd_is_valid_cc(&irb
->scsw
) && irb
->scsw
.cmd
.cc
== 1 && iob
) {
1297 /* channel command hasn't started: retry.
1298 * active_cmd is still set to last iob
1300 QETH_CARD_TEXT(card
, 2, "irqcc1");
1301 rc
= ccw_device_start_timeout(cdev
, __ccw_from_cmd(iob
),
1302 (addr_t
)iob
, 0, 0, iob
->timeout
);
1305 "ccw retry on %x failed, rc = %i\n",
1306 CARD_DEVID(card
), rc
);
1307 QETH_CARD_TEXT_(card
, 2, " err%d", rc
);
1308 qeth_unlock_channel(card
, channel
);
1309 qeth_cancel_cmd(iob
, rc
);
1314 qeth_unlock_channel(card
, channel
);
1318 if (irb
->scsw
.cmd
.count
> iob
->length
) {
1319 qeth_cancel_cmd(iob
, -EIO
);
1323 iob
->callback(card
, iob
,
1324 iob
->length
- irb
->scsw
.cmd
.count
);
1328 static void qeth_notify_skbs(struct qeth_qdio_out_q
*q
,
1329 struct qeth_qdio_out_buffer
*buf
,
1330 enum iucv_tx_notify notification
)
1332 struct sk_buff
*skb
;
1334 skb_queue_walk(&buf
->skb_list
, skb
) {
1335 struct sock
*sk
= skb
->sk
;
1337 QETH_CARD_TEXT_(q
->card
, 5, "skbn%d", notification
);
1338 QETH_CARD_TEXT_(q
->card
, 5, "%lx", (long) skb
);
1339 if (sk
&& sk
->sk_family
== PF_IUCV
)
1340 iucv_sk(sk
)->sk_txnotify(sk
, notification
);
1344 static void qeth_tx_complete_buf(struct qeth_qdio_out_q
*queue
,
1345 struct qeth_qdio_out_buffer
*buf
, bool error
,
1348 struct sk_buff
*skb
;
1351 if (buf
->next_element_to_fill
== 0)
1354 QETH_TXQ_STAT_INC(queue
, bufs
);
1355 QETH_TXQ_STAT_ADD(queue
, buf_elements
, buf
->next_element_to_fill
);
1357 QETH_TXQ_STAT_ADD(queue
, tx_errors
, buf
->frames
);
1359 QETH_TXQ_STAT_ADD(queue
, tx_packets
, buf
->frames
);
1360 QETH_TXQ_STAT_ADD(queue
, tx_bytes
, buf
->bytes
);
1363 while ((skb
= __skb_dequeue(&buf
->skb_list
)) != NULL
) {
1364 unsigned int bytes
= qdisc_pkt_len(skb
);
1365 bool is_tso
= skb_is_gso(skb
);
1366 unsigned int packets
;
1368 packets
= is_tso
? skb_shinfo(skb
)->gso_segs
: 1;
1370 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1371 QETH_TXQ_STAT_ADD(queue
, skbs_csum
, packets
);
1372 if (skb_is_nonlinear(skb
))
1373 QETH_TXQ_STAT_INC(queue
, skbs_sg
);
1375 QETH_TXQ_STAT_INC(queue
, skbs_tso
);
1376 QETH_TXQ_STAT_ADD(queue
, tso_bytes
, bytes
);
1380 napi_consume_skb(skb
, budget
);
1384 static void qeth_clear_output_buffer(struct qeth_qdio_out_q
*queue
,
1385 struct qeth_qdio_out_buffer
*buf
,
1386 bool error
, int budget
)
1390 /* is PCI flag set on buffer? */
1391 if (buf
->buffer
->element
[0].sflags
& SBAL_SFLAGS0_PCI_REQ
) {
1392 atomic_dec(&queue
->set_pci_flags_count
);
1393 QETH_TXQ_STAT_INC(queue
, completion_irq
);
1396 qeth_tx_complete_buf(queue
, buf
, error
, budget
);
1398 for (i
= 0; i
< queue
->max_elements
; ++i
) {
1399 void *data
= dma64_to_virt(buf
->buffer
->element
[i
].addr
);
1401 if (__test_and_clear_bit(i
, buf
->from_kmem_cache
) && data
)
1402 kmem_cache_free(qeth_core_header_cache
, data
);
1405 qeth_scrub_qdio_buffer(buf
->buffer
, queue
->max_elements
);
1406 buf
->next_element_to_fill
= 0;
1409 atomic_set(&buf
->state
, QETH_QDIO_BUF_EMPTY
);
1412 static void qeth_free_out_buf(struct qeth_qdio_out_buffer
*buf
)
1415 kmem_cache_free(qeth_qaob_cache
, buf
->aob
);
1416 kmem_cache_free(qeth_qdio_outbuf_cache
, buf
);
1419 static void qeth_tx_complete_pending_bufs(struct qeth_card
*card
,
1420 struct qeth_qdio_out_q
*queue
,
1421 bool drain
, int budget
)
1423 struct qeth_qdio_out_buffer
*buf
, *tmp
;
1425 list_for_each_entry_safe(buf
, tmp
, &queue
->pending_bufs
, list_entry
) {
1426 struct qeth_qaob_priv1
*priv
;
1427 struct qaob
*aob
= buf
->aob
;
1428 enum iucv_tx_notify notify
;
1431 priv
= (struct qeth_qaob_priv1
*)&aob
->user1
;
1432 if (drain
|| READ_ONCE(priv
->state
) == QETH_QAOB_DONE
) {
1433 QETH_CARD_TEXT(card
, 5, "fp");
1434 QETH_CARD_TEXT_(card
, 5, "%lx", (long) buf
);
1436 notify
= drain
? TX_NOTIFY_GENERALERROR
:
1437 qeth_compute_cq_notification(aob
->aorc
, 1);
1438 qeth_notify_skbs(queue
, buf
, notify
);
1439 qeth_tx_complete_buf(queue
, buf
, drain
, budget
);
1442 i
< aob
->sb_count
&& i
< queue
->max_elements
;
1444 void *data
= dma64_to_virt(aob
->sba
[i
]);
1446 if (test_bit(i
, buf
->from_kmem_cache
) && data
)
1447 kmem_cache_free(qeth_core_header_cache
,
1451 list_del(&buf
->list_entry
);
1452 qeth_free_out_buf(buf
);
1457 static void qeth_drain_output_queue(struct qeth_qdio_out_q
*q
, bool free
)
1461 qeth_tx_complete_pending_bufs(q
->card
, q
, true, 0);
1463 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
1467 qeth_clear_output_buffer(q
, q
->bufs
[j
], true, 0);
1469 qeth_free_out_buf(q
->bufs
[j
]);
1475 static void qeth_drain_output_queues(struct qeth_card
*card
)
1479 QETH_CARD_TEXT(card
, 2, "clearqdbf");
1480 /* clear outbound buffers to free skbs */
1481 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
1482 if (card
->qdio
.out_qs
[i
])
1483 qeth_drain_output_queue(card
->qdio
.out_qs
[i
], false);
1487 static void qeth_osa_set_output_queues(struct qeth_card
*card
, bool single
)
1489 unsigned int max
= single
? 1 : card
->dev
->num_tx_queues
;
1491 if (card
->qdio
.no_out_queues
== max
)
1494 if (atomic_read(&card
->qdio
.state
) != QETH_QDIO_UNINITIALIZED
)
1495 qeth_free_qdio_queues(card
);
1497 if (max
== 1 && card
->qdio
.do_prio_queueing
!= QETH_PRIOQ_DEFAULT
)
1498 dev_info(&card
->gdev
->dev
, "Priority Queueing not supported\n");
1500 card
->qdio
.no_out_queues
= max
;
1503 static int qeth_update_from_chp_desc(struct qeth_card
*card
)
1505 struct ccw_device
*ccwdev
;
1506 struct channel_path_desc_fmt0
*chp_dsc
;
1508 QETH_CARD_TEXT(card
, 2, "chp_desc");
1510 ccwdev
= card
->data
.ccwdev
;
1511 chp_dsc
= ccw_device_get_chp_desc(ccwdev
, 0);
1515 card
->info
.func_level
= 0x4100 + chp_dsc
->desc
;
1517 if (IS_OSD(card
) || IS_OSX(card
))
1518 /* CHPP field bit 6 == 1 -> single queue */
1519 qeth_osa_set_output_queues(card
, chp_dsc
->chpp
& 0x02);
1522 QETH_CARD_TEXT_(card
, 2, "nr:%x", card
->qdio
.no_out_queues
);
1523 QETH_CARD_TEXT_(card
, 2, "lvl:%02x", card
->info
.func_level
);
1527 static void qeth_init_qdio_info(struct qeth_card
*card
)
1529 QETH_CARD_TEXT(card
, 4, "intqdinf");
1530 atomic_set(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
);
1531 card
->qdio
.do_prio_queueing
= QETH_PRIOQ_DEFAULT
;
1532 card
->qdio
.default_out_queue
= QETH_DEFAULT_QUEUE
;
1535 card
->qdio
.in_buf_size
= QETH_IN_BUF_SIZE_DEFAULT
;
1537 card
->qdio
.init_pool
.buf_count
= QETH_IN_BUF_COUNT_HSDEFAULT
;
1539 card
->qdio
.init_pool
.buf_count
= QETH_IN_BUF_COUNT_DEFAULT
;
1540 card
->qdio
.in_buf_pool
.buf_count
= card
->qdio
.init_pool
.buf_count
;
1541 INIT_LIST_HEAD(&card
->qdio
.in_buf_pool
.entry_list
);
1542 INIT_LIST_HEAD(&card
->qdio
.init_pool
.entry_list
);
1545 static void qeth_set_initial_options(struct qeth_card
*card
)
1547 card
->options
.route4
.type
= NO_ROUTER
;
1548 card
->options
.route6
.type
= NO_ROUTER
;
1549 card
->options
.isolation
= ISOLATION_MODE_NONE
;
1550 card
->options
.cq
= QETH_CQ_DISABLED
;
1551 card
->options
.layer
= QETH_DISCIPLINE_UNDETERMINED
;
1554 static int qeth_do_start_thread(struct qeth_card
*card
, unsigned long thread
)
1556 unsigned long flags
;
1559 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1560 QETH_CARD_TEXT_(card
, 4, " %02x%02x%02x",
1561 (u8
) card
->thread_start_mask
,
1562 (u8
) card
->thread_allowed_mask
,
1563 (u8
) card
->thread_running_mask
);
1564 rc
= (card
->thread_start_mask
& thread
);
1565 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1569 static int qeth_do_reset(void *data
);
1570 static void qeth_start_kernel_thread(struct work_struct
*work
)
1572 struct task_struct
*ts
;
1573 struct qeth_card
*card
= container_of(work
, struct qeth_card
,
1574 kernel_thread_starter
);
1575 QETH_CARD_TEXT(card
, 2, "strthrd");
1577 if (card
->read
.state
!= CH_STATE_UP
&&
1578 card
->write
.state
!= CH_STATE_UP
)
1580 if (qeth_do_start_thread(card
, QETH_RECOVER_THREAD
)) {
1581 ts
= kthread_run(qeth_do_reset
, card
, "qeth_recover");
1583 qeth_clear_thread_start_bit(card
, QETH_RECOVER_THREAD
);
1584 qeth_clear_thread_running_bit(card
,
1585 QETH_RECOVER_THREAD
);
1590 static void qeth_buffer_reclaim_work(struct work_struct
*);
1591 static void qeth_setup_card(struct qeth_card
*card
)
1593 QETH_CARD_TEXT(card
, 2, "setupcrd");
1595 card
->info
.type
= CARD_RDEV(card
)->id
.driver_info
;
1596 card
->state
= CARD_STATE_DOWN
;
1597 spin_lock_init(&card
->lock
);
1598 spin_lock_init(&card
->thread_mask_lock
);
1599 mutex_init(&card
->conf_mutex
);
1600 mutex_init(&card
->discipline_mutex
);
1601 INIT_WORK(&card
->kernel_thread_starter
, qeth_start_kernel_thread
);
1602 INIT_LIST_HEAD(&card
->cmd_waiter_list
);
1603 init_waitqueue_head(&card
->wait_q
);
1604 qeth_set_initial_options(card
);
1605 /* IP address takeover */
1606 INIT_LIST_HEAD(&card
->ipato
.entries
);
1607 qeth_init_qdio_info(card
);
1608 INIT_DELAYED_WORK(&card
->buffer_reclaim_work
, qeth_buffer_reclaim_work
);
1609 hash_init(card
->rx_mode_addrs
);
1610 hash_init(card
->local_addrs4
);
1611 hash_init(card
->local_addrs6
);
1612 spin_lock_init(&card
->local_addrs4_lock
);
1613 spin_lock_init(&card
->local_addrs6_lock
);
1616 static void qeth_core_sl_print(struct seq_file
*m
, struct service_level
*slr
)
1618 struct qeth_card
*card
= container_of(slr
, struct qeth_card
,
1619 qeth_service_level
);
1620 if (card
->info
.mcl_level
[0])
1621 seq_printf(m
, "qeth: %s firmware level %s\n",
1622 CARD_BUS_ID(card
), card
->info
.mcl_level
);
1625 static struct qeth_card
*qeth_alloc_card(struct ccwgroup_device
*gdev
)
1627 struct qeth_card
*card
;
1629 QETH_DBF_TEXT(SETUP
, 2, "alloccrd");
1630 card
= kzalloc(sizeof(*card
), GFP_KERNEL
);
1633 QETH_DBF_HEX(SETUP
, 2, &card
, sizeof(void *));
1636 dev_set_drvdata(&gdev
->dev
, card
);
1637 CARD_RDEV(card
) = gdev
->cdev
[0];
1638 CARD_WDEV(card
) = gdev
->cdev
[1];
1639 CARD_DDEV(card
) = gdev
->cdev
[2];
1641 card
->event_wq
= alloc_ordered_workqueue("%s_event", 0,
1642 dev_name(&gdev
->dev
));
1643 if (!card
->event_wq
)
1646 card
->read_cmd
= qeth_alloc_cmd(&card
->read
, QETH_BUFSIZE
, 1, 0);
1647 if (!card
->read_cmd
)
1650 card
->debugfs
= debugfs_create_dir(dev_name(&gdev
->dev
),
1652 debugfs_create_file("local_addrs", 0400, card
->debugfs
, card
,
1653 &qeth_debugfs_local_addr_fops
);
1655 card
->qeth_service_level
.seq_print
= qeth_core_sl_print
;
1656 register_service_level(&card
->qeth_service_level
);
1660 destroy_workqueue(card
->event_wq
);
1662 dev_set_drvdata(&gdev
->dev
, NULL
);
1668 static int qeth_clear_channel(struct qeth_card
*card
,
1669 struct qeth_channel
*channel
)
1673 QETH_CARD_TEXT(card
, 3, "clearch");
1674 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1675 rc
= ccw_device_clear(channel
->ccwdev
, (addr_t
)channel
->active_cmd
);
1676 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1680 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1681 channel
->state
== CH_STATE_STOPPED
, QETH_TIMEOUT
);
1682 if (rc
== -ERESTARTSYS
)
1684 if (channel
->state
!= CH_STATE_STOPPED
)
1686 channel
->state
= CH_STATE_DOWN
;
1690 static int qeth_halt_channel(struct qeth_card
*card
,
1691 struct qeth_channel
*channel
)
1695 QETH_CARD_TEXT(card
, 3, "haltch");
1696 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1697 rc
= ccw_device_halt(channel
->ccwdev
, (addr_t
)channel
->active_cmd
);
1698 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1702 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1703 channel
->state
== CH_STATE_HALTED
, QETH_TIMEOUT
);
1704 if (rc
== -ERESTARTSYS
)
1706 if (channel
->state
!= CH_STATE_HALTED
)
1711 static int qeth_stop_channel(struct qeth_channel
*channel
)
1713 struct ccw_device
*cdev
= channel
->ccwdev
;
1716 rc
= ccw_device_set_offline(cdev
);
1718 spin_lock_irq(get_ccwdev_lock(cdev
));
1719 if (channel
->active_cmd
)
1720 dev_err(&cdev
->dev
, "Stopped channel while cmd %px was still active\n",
1721 channel
->active_cmd
);
1723 cdev
->handler
= NULL
;
1724 spin_unlock_irq(get_ccwdev_lock(cdev
));
1729 static int qeth_start_channel(struct qeth_channel
*channel
)
1731 struct ccw_device
*cdev
= channel
->ccwdev
;
1734 channel
->state
= CH_STATE_DOWN
;
1735 xchg(&channel
->active_cmd
, NULL
);
1737 spin_lock_irq(get_ccwdev_lock(cdev
));
1738 cdev
->handler
= qeth_irq
;
1739 spin_unlock_irq(get_ccwdev_lock(cdev
));
1741 rc
= ccw_device_set_online(cdev
);
1748 spin_lock_irq(get_ccwdev_lock(cdev
));
1749 cdev
->handler
= NULL
;
1750 spin_unlock_irq(get_ccwdev_lock(cdev
));
1754 static int qeth_halt_channels(struct qeth_card
*card
)
1756 int rc1
= 0, rc2
= 0, rc3
= 0;
1758 QETH_CARD_TEXT(card
, 3, "haltchs");
1759 rc1
= qeth_halt_channel(card
, &card
->read
);
1760 rc2
= qeth_halt_channel(card
, &card
->write
);
1761 rc3
= qeth_halt_channel(card
, &card
->data
);
1769 static int qeth_clear_channels(struct qeth_card
*card
)
1771 int rc1
= 0, rc2
= 0, rc3
= 0;
1773 QETH_CARD_TEXT(card
, 3, "clearchs");
1774 rc1
= qeth_clear_channel(card
, &card
->read
);
1775 rc2
= qeth_clear_channel(card
, &card
->write
);
1776 rc3
= qeth_clear_channel(card
, &card
->data
);
1784 static int qeth_clear_halt_card(struct qeth_card
*card
, int halt
)
1788 QETH_CARD_TEXT(card
, 3, "clhacrd");
1791 rc
= qeth_halt_channels(card
);
1794 return qeth_clear_channels(card
);
1797 static int qeth_qdio_clear_card(struct qeth_card
*card
, int use_halt
)
1801 QETH_CARD_TEXT(card
, 3, "qdioclr");
1802 switch (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_ESTABLISHED
,
1803 QETH_QDIO_CLEANING
)) {
1804 case QETH_QDIO_ESTABLISHED
:
1806 rc
= qdio_shutdown(CARD_DDEV(card
),
1807 QDIO_FLAG_CLEANUP_USING_HALT
);
1809 rc
= qdio_shutdown(CARD_DDEV(card
),
1810 QDIO_FLAG_CLEANUP_USING_CLEAR
);
1812 QETH_CARD_TEXT_(card
, 3, "1err%d", rc
);
1813 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
1815 case QETH_QDIO_CLEANING
:
1820 rc
= qeth_clear_halt_card(card
, use_halt
);
1822 QETH_CARD_TEXT_(card
, 3, "2err%d", rc
);
1826 static enum qeth_discipline_id
qeth_vm_detect_layer(struct qeth_card
*card
)
1828 enum qeth_discipline_id disc
= QETH_DISCIPLINE_UNDETERMINED
;
1829 struct diag26c_vnic_resp
*response
= NULL
;
1830 struct diag26c_vnic_req
*request
= NULL
;
1831 struct ccw_dev_id id
;
1835 QETH_CARD_TEXT(card
, 2, "vmlayer");
1837 cpcmd("QUERY USERID", userid
, sizeof(userid
), &rc
);
1841 request
= kzalloc(sizeof(*request
), GFP_KERNEL
| GFP_DMA
);
1842 response
= kzalloc(sizeof(*response
), GFP_KERNEL
| GFP_DMA
);
1843 if (!request
|| !response
) {
1848 ccw_device_get_id(CARD_RDEV(card
), &id
);
1849 request
->resp_buf_len
= sizeof(*response
);
1850 request
->resp_version
= DIAG26C_VERSION6_VM65918
;
1851 request
->req_format
= DIAG26C_VNIC_INFO
;
1853 memcpy(&request
->sys_name
, userid
, 8);
1854 request
->devno
= id
.devno
;
1856 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
1857 rc
= diag26c(request
, response
, DIAG26C_PORT_VNIC
);
1858 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
1861 QETH_DBF_HEX(CTRL
, 2, response
, sizeof(*response
));
1863 if (request
->resp_buf_len
< sizeof(*response
) ||
1864 response
->version
!= request
->resp_version
) {
1869 if (response
->protocol
== VNIC_INFO_PROT_L2
)
1870 disc
= QETH_DISCIPLINE_LAYER2
;
1871 else if (response
->protocol
== VNIC_INFO_PROT_L3
)
1872 disc
= QETH_DISCIPLINE_LAYER3
;
1878 QETH_CARD_TEXT_(card
, 2, "err%x", rc
);
1882 /* Determine whether the device requires a specific layer discipline */
1883 static enum qeth_discipline_id
qeth_enforce_discipline(struct qeth_card
*card
)
1885 enum qeth_discipline_id disc
= QETH_DISCIPLINE_UNDETERMINED
;
1888 disc
= QETH_DISCIPLINE_LAYER2
;
1889 else if (IS_VM_NIC(card
))
1890 disc
= IS_IQD(card
) ? QETH_DISCIPLINE_LAYER3
:
1891 qeth_vm_detect_layer(card
);
1894 case QETH_DISCIPLINE_LAYER2
:
1895 QETH_CARD_TEXT(card
, 3, "force l2");
1897 case QETH_DISCIPLINE_LAYER3
:
1898 QETH_CARD_TEXT(card
, 3, "force l3");
1901 QETH_CARD_TEXT(card
, 3, "force no");
1907 static void qeth_set_blkt_defaults(struct qeth_card
*card
)
1909 QETH_CARD_TEXT(card
, 2, "cfgblkt");
1911 if (card
->info
.use_v1_blkt
) {
1912 card
->info
.blkt
.time_total
= 0;
1913 card
->info
.blkt
.inter_packet
= 0;
1914 card
->info
.blkt
.inter_packet_jumbo
= 0;
1916 card
->info
.blkt
.time_total
= 250;
1917 card
->info
.blkt
.inter_packet
= 5;
1918 card
->info
.blkt
.inter_packet_jumbo
= 15;
1922 static void qeth_idx_init(struct qeth_card
*card
)
1924 memset(&card
->seqno
, 0, sizeof(card
->seqno
));
1926 card
->token
.issuer_rm_w
= 0x00010103UL
;
1927 card
->token
.cm_filter_w
= 0x00010108UL
;
1928 card
->token
.cm_connection_w
= 0x0001010aUL
;
1929 card
->token
.ulp_filter_w
= 0x0001010bUL
;
1930 card
->token
.ulp_connection_w
= 0x0001010dUL
;
1932 switch (card
->info
.type
) {
1933 case QETH_CARD_TYPE_IQD
:
1934 card
->info
.func_level
= QETH_IDX_FUNC_LEVEL_IQD
;
1936 case QETH_CARD_TYPE_OSD
:
1937 card
->info
.func_level
= QETH_IDX_FUNC_LEVEL_OSD
;
1944 static void qeth_idx_finalize_cmd(struct qeth_card
*card
,
1945 struct qeth_cmd_buffer
*iob
)
1947 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob
->data
), &card
->seqno
.trans_hdr
,
1948 QETH_SEQ_NO_LENGTH
);
1949 if (iob
->channel
== &card
->write
)
1950 card
->seqno
.trans_hdr
++;
1953 static int qeth_peer_func_level(int level
)
1955 if ((level
& 0xff) == 8)
1956 return (level
& 0xff) + 0x400;
1957 if (((level
>> 8) & 3) == 1)
1958 return (level
& 0xff) + 0x200;
1962 static void qeth_mpc_finalize_cmd(struct qeth_card
*card
,
1963 struct qeth_cmd_buffer
*iob
)
1965 qeth_idx_finalize_cmd(card
, iob
);
1967 memcpy(QETH_PDU_HEADER_SEQ_NO(iob
->data
),
1968 &card
->seqno
.pdu_hdr
, QETH_SEQ_NO_LENGTH
);
1969 card
->seqno
.pdu_hdr
++;
1970 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob
->data
),
1971 &card
->seqno
.pdu_hdr_ack
, QETH_SEQ_NO_LENGTH
);
1973 iob
->callback
= qeth_release_buffer_cb
;
1976 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer
*iob
,
1977 struct qeth_cmd_buffer
*reply
)
1979 /* MPC cmds are issued strictly in sequence. */
1980 return !IS_IPA(reply
->data
);
1983 static struct qeth_cmd_buffer
*qeth_mpc_alloc_cmd(struct qeth_card
*card
,
1985 unsigned int data_length
)
1987 struct qeth_cmd_buffer
*iob
;
1989 iob
= qeth_alloc_cmd(&card
->write
, data_length
, 1, QETH_TIMEOUT
);
1993 memcpy(iob
->data
, data
, data_length
);
1994 qeth_setup_ccw(__ccw_from_cmd(iob
), CCW_CMD_WRITE
, 0, data_length
,
1996 iob
->finalize
= qeth_mpc_finalize_cmd
;
1997 iob
->match
= qeth_mpc_match_reply
;
2002 * qeth_send_control_data() - send control command to the card
2003 * @card: qeth_card structure pointer
2004 * @iob: qeth_cmd_buffer pointer
2005 * @reply_cb: callback function pointer
2006 * cb_card: pointer to the qeth_card structure
2007 * cb_reply: pointer to the qeth_reply structure
2008 * cb_cmd: pointer to the original iob for non-IPA
2009 * commands, or to the qeth_ipa_cmd structure
2010 * for the IPA commands.
2011 * @reply_param: private pointer passed to the callback
2013 * Callback function gets called one or more times, with cb_cmd
2014 * pointing to the response returned by the hardware. Callback
2015 * function must return
2016 * > 0 if more reply blocks are expected,
2017 * 0 if the last or only reply block is received, and
2019 * Callback function can get the value of the reply_param pointer from the
2020 * field 'param' of the structure qeth_reply.
2023 static int qeth_send_control_data(struct qeth_card
*card
,
2024 struct qeth_cmd_buffer
*iob
,
2025 int (*reply_cb
)(struct qeth_card
*cb_card
,
2026 struct qeth_reply
*cb_reply
,
2027 unsigned long cb_cmd
),
2030 struct qeth_channel
*channel
= iob
->channel
;
2031 struct qeth_reply
*reply
= &iob
->reply
;
2032 long timeout
= iob
->timeout
;
2035 QETH_CARD_TEXT(card
, 2, "sendctl");
2037 reply
->callback
= reply_cb
;
2038 reply
->param
= reply_param
;
2040 timeout
= wait_event_interruptible_timeout(card
->wait_q
,
2041 qeth_trylock_channel(channel
, iob
),
2045 return (timeout
== -ERESTARTSYS
) ? -EINTR
: -ETIME
;
2049 iob
->finalize(card
, iob
);
2050 QETH_DBF_HEX(CTRL
, 2, iob
->data
, min(iob
->length
, QETH_DBF_CTRL_LEN
));
2052 qeth_enqueue_cmd(card
, iob
);
2054 /* This pairs with iob->callback, and keeps the iob alive after IO: */
2057 QETH_CARD_TEXT(card
, 6, "noirqpnd");
2058 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
2059 rc
= ccw_device_start_timeout(channel
->ccwdev
, __ccw_from_cmd(iob
),
2060 (addr_t
) iob
, 0, 0, timeout
);
2061 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
2063 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2064 CARD_DEVID(card
), rc
);
2065 QETH_CARD_TEXT_(card
, 2, " err%d", rc
);
2066 qeth_dequeue_cmd(card
, iob
);
2068 qeth_unlock_channel(card
, channel
);
2072 timeout
= wait_for_completion_interruptible_timeout(&iob
->done
,
2075 rc
= (timeout
== -ERESTARTSYS
) ? -EINTR
: -ETIME
;
2077 qeth_dequeue_cmd(card
, iob
);
2080 /* Wait until the callback for a late reply has completed: */
2081 spin_lock_irq(&iob
->lock
);
2083 /* Zap any callback that's still pending: */
2085 spin_unlock_irq(&iob
->lock
);
2096 struct qeth_node_desc
{
2097 struct node_descriptor nd1
;
2098 struct node_descriptor nd2
;
2099 struct node_descriptor nd3
;
2102 static void qeth_read_conf_data_cb(struct qeth_card
*card
,
2103 struct qeth_cmd_buffer
*iob
,
2104 unsigned int data_length
)
2106 struct qeth_node_desc
*nd
= (struct qeth_node_desc
*) iob
->data
;
2110 QETH_CARD_TEXT(card
, 2, "cfgunit");
2112 if (data_length
< sizeof(*nd
)) {
2117 card
->info
.is_vm_nic
= nd
->nd1
.plant
[0] == _ascebc
['V'] &&
2118 nd
->nd1
.plant
[1] == _ascebc
['M'];
2119 tag
= (u8
*)&nd
->nd1
.tag
;
2120 card
->info
.chpid
= tag
[0];
2121 card
->info
.unit_addr2
= tag
[1];
2123 tag
= (u8
*)&nd
->nd2
.tag
;
2124 card
->info
.cula
= tag
[1];
2126 card
->info
.use_v1_blkt
= nd
->nd3
.model
[0] == 0xF0 &&
2127 nd
->nd3
.model
[1] == 0xF0 &&
2128 nd
->nd3
.model
[2] >= 0xF1 &&
2129 nd
->nd3
.model
[2] <= 0xF4;
2132 qeth_notify_cmd(iob
, rc
);
2136 static int qeth_read_conf_data(struct qeth_card
*card
)
2138 struct qeth_channel
*channel
= &card
->data
;
2139 struct qeth_cmd_buffer
*iob
;
2142 /* scan for RCD command in extended SenseID data */
2143 ciw
= ccw_device_get_ciw(channel
->ccwdev
, CIW_TYPE_RCD
);
2144 if (!ciw
|| ciw
->cmd
== 0)
2146 if (ciw
->count
< sizeof(struct qeth_node_desc
))
2149 iob
= qeth_alloc_cmd(channel
, ciw
->count
, 1, QETH_RCD_TIMEOUT
);
2153 iob
->callback
= qeth_read_conf_data_cb
;
2154 qeth_setup_ccw(__ccw_from_cmd(iob
), ciw
->cmd
, 0, iob
->length
,
2157 return qeth_send_control_data(card
, iob
, NULL
, NULL
);
2160 static int qeth_idx_check_activate_response(struct qeth_card
*card
,
2161 struct qeth_channel
*channel
,
2162 struct qeth_cmd_buffer
*iob
)
2166 rc
= qeth_check_idx_response(card
, iob
->data
);
2170 if (QETH_IS_IDX_ACT_POS_REPLY(iob
->data
))
2173 /* negative reply: */
2174 QETH_CARD_TEXT_(card
, 2, "idxneg%c",
2175 QETH_IDX_ACT_CAUSE_CODE(iob
->data
));
2177 switch (QETH_IDX_ACT_CAUSE_CODE(iob
->data
)) {
2178 case QETH_IDX_ACT_ERR_EXCL
:
2179 dev_err(&channel
->ccwdev
->dev
,
2180 "The adapter is used exclusively by another host\n");
2182 case QETH_IDX_ACT_ERR_AUTH
:
2183 case QETH_IDX_ACT_ERR_AUTH_USER
:
2184 dev_err(&channel
->ccwdev
->dev
,
2185 "Setting the device online failed because of insufficient authorization\n");
2188 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2189 CCW_DEVID(channel
->ccwdev
));
2194 static void qeth_idx_activate_read_channel_cb(struct qeth_card
*card
,
2195 struct qeth_cmd_buffer
*iob
,
2196 unsigned int data_length
)
2198 struct qeth_channel
*channel
= iob
->channel
;
2202 QETH_CARD_TEXT(card
, 2, "idxrdcb");
2204 rc
= qeth_idx_check_activate_response(card
, channel
, iob
);
2208 memcpy(&peer_level
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
2209 if (peer_level
!= qeth_peer_func_level(card
->info
.func_level
)) {
2210 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2211 CCW_DEVID(channel
->ccwdev
),
2212 card
->info
.func_level
, peer_level
);
2217 memcpy(&card
->token
.issuer_rm_r
,
2218 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
2219 QETH_MPC_TOKEN_LENGTH
);
2220 memcpy(&card
->info
.mcl_level
[0],
2221 QETH_IDX_REPLY_LEVEL(iob
->data
), QETH_MCL_LENGTH
);
2224 qeth_notify_cmd(iob
, rc
);
2228 static void qeth_idx_activate_write_channel_cb(struct qeth_card
*card
,
2229 struct qeth_cmd_buffer
*iob
,
2230 unsigned int data_length
)
2232 struct qeth_channel
*channel
= iob
->channel
;
2236 QETH_CARD_TEXT(card
, 2, "idxwrcb");
2238 rc
= qeth_idx_check_activate_response(card
, channel
, iob
);
2242 memcpy(&peer_level
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
2243 if ((peer_level
& ~0x0100) !=
2244 qeth_peer_func_level(card
->info
.func_level
)) {
2245 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2246 CCW_DEVID(channel
->ccwdev
),
2247 card
->info
.func_level
, peer_level
);
2252 qeth_notify_cmd(iob
, rc
);
2256 static void qeth_idx_setup_activate_cmd(struct qeth_card
*card
,
2257 struct qeth_cmd_buffer
*iob
)
2259 u16 addr
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
2260 u8 port
= ((u8
)card
->dev
->dev_port
) | 0x80;
2261 struct ccw1
*ccw
= __ccw_from_cmd(iob
);
2263 qeth_setup_ccw(&ccw
[0], CCW_CMD_WRITE
, CCW_FLAG_CC
, IDX_ACTIVATE_SIZE
,
2265 qeth_setup_ccw(&ccw
[1], CCW_CMD_READ
, 0, iob
->length
, iob
->data
);
2266 iob
->finalize
= qeth_idx_finalize_cmd
;
2268 port
|= QETH_IDX_ACT_INVAL_FRAME
;
2269 memcpy(QETH_IDX_ACT_PNO(iob
->data
), &port
, 1);
2270 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
2271 &card
->token
.issuer_rm_w
, QETH_MPC_TOKEN_LENGTH
);
2272 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob
->data
),
2273 &card
->info
.func_level
, 2);
2274 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob
->data
), &card
->info
.ddev_devno
, 2);
2275 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob
->data
), &addr
, 2);
2278 static int qeth_idx_activate_read_channel(struct qeth_card
*card
)
2280 struct qeth_channel
*channel
= &card
->read
;
2281 struct qeth_cmd_buffer
*iob
;
2284 QETH_CARD_TEXT(card
, 2, "idxread");
2286 iob
= qeth_alloc_cmd(channel
, QETH_BUFSIZE
, 2, QETH_TIMEOUT
);
2290 memcpy(iob
->data
, IDX_ACTIVATE_READ
, IDX_ACTIVATE_SIZE
);
2291 qeth_idx_setup_activate_cmd(card
, iob
);
2292 iob
->callback
= qeth_idx_activate_read_channel_cb
;
2294 rc
= qeth_send_control_data(card
, iob
, NULL
, NULL
);
2298 channel
->state
= CH_STATE_UP
;
2302 static int qeth_idx_activate_write_channel(struct qeth_card
*card
)
2304 struct qeth_channel
*channel
= &card
->write
;
2305 struct qeth_cmd_buffer
*iob
;
2308 QETH_CARD_TEXT(card
, 2, "idxwrite");
2310 iob
= qeth_alloc_cmd(channel
, QETH_BUFSIZE
, 2, QETH_TIMEOUT
);
2314 memcpy(iob
->data
, IDX_ACTIVATE_WRITE
, IDX_ACTIVATE_SIZE
);
2315 qeth_idx_setup_activate_cmd(card
, iob
);
2316 iob
->callback
= qeth_idx_activate_write_channel_cb
;
2318 rc
= qeth_send_control_data(card
, iob
, NULL
, NULL
);
2322 channel
->state
= CH_STATE_UP
;
2326 static int qeth_cm_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2329 struct qeth_cmd_buffer
*iob
;
2331 QETH_CARD_TEXT(card
, 2, "cmenblcb");
2333 iob
= (struct qeth_cmd_buffer
*) data
;
2334 memcpy(&card
->token
.cm_filter_r
,
2335 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
2336 QETH_MPC_TOKEN_LENGTH
);
2340 static int qeth_cm_enable(struct qeth_card
*card
)
2342 struct qeth_cmd_buffer
*iob
;
2344 QETH_CARD_TEXT(card
, 2, "cmenable");
2346 iob
= qeth_mpc_alloc_cmd(card
, CM_ENABLE
, CM_ENABLE_SIZE
);
2350 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob
->data
),
2351 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
2352 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob
->data
),
2353 &card
->token
.cm_filter_w
, QETH_MPC_TOKEN_LENGTH
);
2355 return qeth_send_control_data(card
, iob
, qeth_cm_enable_cb
, NULL
);
2358 static int qeth_cm_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2361 struct qeth_cmd_buffer
*iob
;
2363 QETH_CARD_TEXT(card
, 2, "cmsetpcb");
2365 iob
= (struct qeth_cmd_buffer
*) data
;
2366 memcpy(&card
->token
.cm_connection_r
,
2367 QETH_CM_SETUP_RESP_DEST_ADDR(iob
->data
),
2368 QETH_MPC_TOKEN_LENGTH
);
2372 static int qeth_cm_setup(struct qeth_card
*card
)
2374 struct qeth_cmd_buffer
*iob
;
2376 QETH_CARD_TEXT(card
, 2, "cmsetup");
2378 iob
= qeth_mpc_alloc_cmd(card
, CM_SETUP
, CM_SETUP_SIZE
);
2382 memcpy(QETH_CM_SETUP_DEST_ADDR(iob
->data
),
2383 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
2384 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob
->data
),
2385 &card
->token
.cm_connection_w
, QETH_MPC_TOKEN_LENGTH
);
2386 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob
->data
),
2387 &card
->token
.cm_filter_r
, QETH_MPC_TOKEN_LENGTH
);
2388 return qeth_send_control_data(card
, iob
, qeth_cm_setup_cb
, NULL
);
2391 static bool qeth_is_supported_link_type(struct qeth_card
*card
, u8 link_type
)
2393 if (link_type
== QETH_LINK_TYPE_LANE_TR
||
2394 link_type
== QETH_LINK_TYPE_HSTR
) {
2395 dev_err(&card
->gdev
->dev
, "Unsupported Token Ring device\n");
2402 static int qeth_update_max_mtu(struct qeth_card
*card
, unsigned int max_mtu
)
2404 struct net_device
*dev
= card
->dev
;
2405 unsigned int new_mtu
;
2408 /* IQD needs accurate max MTU to set up its RX buffers: */
2411 /* tolerate quirky HW: */
2412 max_mtu
= ETH_MAX_MTU
;
2417 /* move any device with default MTU to new max MTU: */
2418 new_mtu
= (dev
->mtu
== dev
->max_mtu
) ? max_mtu
: dev
->mtu
;
2420 /* adjust RX buffer size to new max MTU: */
2421 card
->qdio
.in_buf_size
= max_mtu
+ 2 * PAGE_SIZE
;
2422 if (dev
->max_mtu
&& dev
->max_mtu
!= max_mtu
)
2423 qeth_free_qdio_queues(card
);
2427 /* default MTUs for first setup: */
2428 else if (IS_LAYER2(card
))
2429 new_mtu
= ETH_DATA_LEN
;
2431 new_mtu
= ETH_DATA_LEN
- 8; /* allow for LLC + SNAP */
2434 dev
->max_mtu
= max_mtu
;
2435 dev
->mtu
= min(new_mtu
, max_mtu
);
2440 static int qeth_get_mtu_outof_framesize(int framesize
)
2442 switch (framesize
) {
2456 static int qeth_ulp_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2459 __u16 mtu
, framesize
;
2461 struct qeth_cmd_buffer
*iob
;
2464 QETH_CARD_TEXT(card
, 2, "ulpenacb");
2466 iob
= (struct qeth_cmd_buffer
*) data
;
2467 memcpy(&card
->token
.ulp_filter_r
,
2468 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
2469 QETH_MPC_TOKEN_LENGTH
);
2471 memcpy(&framesize
, QETH_ULP_ENABLE_RESP_MAX_MTU(iob
->data
), 2);
2472 mtu
= qeth_get_mtu_outof_framesize(framesize
);
2474 mtu
= *(__u16
*)QETH_ULP_ENABLE_RESP_MAX_MTU(iob
->data
);
2476 *(u16
*)reply
->param
= mtu
;
2478 memcpy(&len
, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob
->data
), 2);
2479 if (len
>= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE
) {
2481 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob
->data
), 1);
2482 if (!qeth_is_supported_link_type(card
, link_type
))
2483 return -EPROTONOSUPPORT
;
2486 card
->info
.link_type
= link_type
;
2487 QETH_CARD_TEXT_(card
, 2, "link%d", card
->info
.link_type
);
2491 static u8
qeth_mpc_select_prot_type(struct qeth_card
*card
)
2493 return IS_LAYER2(card
) ? QETH_MPC_PROT_L2
: QETH_MPC_PROT_L3
;
2496 static int qeth_ulp_enable(struct qeth_card
*card
)
2498 u8 prot_type
= qeth_mpc_select_prot_type(card
);
2499 struct qeth_cmd_buffer
*iob
;
2503 QETH_CARD_TEXT(card
, 2, "ulpenabl");
2505 iob
= qeth_mpc_alloc_cmd(card
, ULP_ENABLE
, ULP_ENABLE_SIZE
);
2509 *(QETH_ULP_ENABLE_LINKNUM(iob
->data
)) = (u8
) card
->dev
->dev_port
;
2510 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob
->data
), &prot_type
, 1);
2511 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob
->data
),
2512 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2513 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob
->data
),
2514 &card
->token
.ulp_filter_w
, QETH_MPC_TOKEN_LENGTH
);
2515 rc
= qeth_send_control_data(card
, iob
, qeth_ulp_enable_cb
, &max_mtu
);
2518 return qeth_update_max_mtu(card
, max_mtu
);
2521 static int qeth_ulp_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2524 struct qeth_cmd_buffer
*iob
;
2526 QETH_CARD_TEXT(card
, 2, "ulpstpcb");
2528 iob
= (struct qeth_cmd_buffer
*) data
;
2529 memcpy(&card
->token
.ulp_connection_r
,
2530 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob
->data
),
2531 QETH_MPC_TOKEN_LENGTH
);
2532 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob
->data
),
2534 QETH_CARD_TEXT(card
, 2, "olmlimit");
2535 dev_err(&card
->gdev
->dev
, "A connection could not be "
2536 "established because of an OLM limit\n");
2542 static int qeth_ulp_setup(struct qeth_card
*card
)
2545 struct qeth_cmd_buffer
*iob
;
2547 QETH_CARD_TEXT(card
, 2, "ulpsetup");
2549 iob
= qeth_mpc_alloc_cmd(card
, ULP_SETUP
, ULP_SETUP_SIZE
);
2553 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob
->data
),
2554 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2555 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob
->data
),
2556 &card
->token
.ulp_connection_w
, QETH_MPC_TOKEN_LENGTH
);
2557 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob
->data
),
2558 &card
->token
.ulp_filter_r
, QETH_MPC_TOKEN_LENGTH
);
2560 memcpy(QETH_ULP_SETUP_CUA(iob
->data
), &card
->info
.ddev_devno
, 2);
2561 temp
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
2562 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob
->data
), &temp
, 2);
2563 return qeth_send_control_data(card
, iob
, qeth_ulp_setup_cb
, NULL
);
2566 static int qeth_alloc_out_buf(struct qeth_qdio_out_q
*q
, unsigned int bidx
,
2569 struct qeth_qdio_out_buffer
*newbuf
;
2571 newbuf
= kmem_cache_zalloc(qeth_qdio_outbuf_cache
, gfp
);
2575 newbuf
->buffer
= q
->qdio_bufs
[bidx
];
2576 skb_queue_head_init(&newbuf
->skb_list
);
2577 lockdep_set_class(&newbuf
->skb_list
.lock
, &qdio_out_skb_queue_key
);
2578 atomic_set(&newbuf
->state
, QETH_QDIO_BUF_EMPTY
);
2579 q
->bufs
[bidx
] = newbuf
;
2583 static void qeth_free_output_queue(struct qeth_qdio_out_q
*q
)
2588 qeth_drain_output_queue(q
, true);
2589 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2593 static struct qeth_qdio_out_q
*qeth_alloc_output_queue(void)
2595 struct qeth_qdio_out_q
*q
= kzalloc(sizeof(*q
), GFP_KERNEL
);
2601 if (qdio_alloc_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
))
2604 for (i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; i
++) {
2605 if (qeth_alloc_out_buf(q
, i
, GFP_KERNEL
))
2613 qeth_free_out_buf(q
->bufs
[--i
]);
2614 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2620 static void qeth_tx_completion_timer(struct timer_list
*timer
)
2622 struct qeth_qdio_out_q
*queue
= from_timer(queue
, timer
, timer
);
2624 napi_schedule(&queue
->napi
);
2625 QETH_TXQ_STAT_INC(queue
, completion_timer
);
2628 static int qeth_alloc_qdio_queues(struct qeth_card
*card
)
2632 QETH_CARD_TEXT(card
, 2, "allcqdbf");
2635 if (qeth_alloc_cq(card
))
2638 if (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
,
2639 QETH_QDIO_ALLOCATED
) != QETH_QDIO_UNINITIALIZED
)
2642 /* inbound buffer pool */
2643 if (qeth_alloc_buffer_pool(card
))
2644 goto out_buffer_pool
;
2647 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
2648 struct qeth_qdio_out_q
*queue
;
2650 queue
= qeth_alloc_output_queue();
2653 QETH_CARD_TEXT_(card
, 2, "outq %i", i
);
2654 QETH_CARD_HEX(card
, 2, &queue
, sizeof(void *));
2655 card
->qdio
.out_qs
[i
] = queue
;
2657 queue
->queue_no
= i
;
2658 INIT_LIST_HEAD(&queue
->pending_bufs
);
2659 spin_lock_init(&queue
->lock
);
2660 timer_setup(&queue
->timer
, qeth_tx_completion_timer
, 0);
2662 queue
->coalesce_usecs
= QETH_TX_COALESCE_USECS
;
2663 queue
->max_coalesced_frames
= QETH_TX_MAX_COALESCED_FRAMES
;
2664 queue
->rescan_usecs
= QETH_TX_TIMER_USECS
;
2666 queue
->coalesce_usecs
= USEC_PER_SEC
;
2667 queue
->max_coalesced_frames
= 0;
2668 queue
->rescan_usecs
= 10 * USEC_PER_SEC
;
2670 queue
->priority
= QETH_QIB_PQUE_PRIO_DEFAULT
;
2677 qeth_free_output_queue(card
->qdio
.out_qs
[--i
]);
2678 card
->qdio
.out_qs
[i
] = NULL
;
2680 qeth_free_buffer_pool(card
);
2682 atomic_set(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
);
2688 static void qeth_free_qdio_queues(struct qeth_card
*card
)
2694 if (atomic_xchg(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
) ==
2695 QETH_QDIO_UNINITIALIZED
)
2698 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
2699 if (card
->qdio
.in_q
->bufs
[j
].rx_skb
) {
2700 consume_skb(card
->qdio
.in_q
->bufs
[j
].rx_skb
);
2701 card
->qdio
.in_q
->bufs
[j
].rx_skb
= NULL
;
2705 /* inbound buffer pool */
2706 qeth_free_buffer_pool(card
);
2707 /* free outbound qdio_qs */
2708 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++) {
2709 qeth_free_output_queue(card
->qdio
.out_qs
[i
]);
2710 card
->qdio
.out_qs
[i
] = NULL
;
2714 static void qeth_fill_qib_parms(struct qeth_card
*card
,
2715 struct qeth_qib_parms
*parms
)
2717 struct qeth_qdio_out_q
*queue
;
2720 parms
->pcit_magic
[0] = 'P';
2721 parms
->pcit_magic
[1] = 'C';
2722 parms
->pcit_magic
[2] = 'I';
2723 parms
->pcit_magic
[3] = 'T';
2724 ASCEBC(parms
->pcit_magic
, sizeof(parms
->pcit_magic
));
2725 parms
->pcit_a
= QETH_PCI_THRESHOLD_A(card
);
2726 parms
->pcit_b
= QETH_PCI_THRESHOLD_B(card
);
2727 parms
->pcit_c
= QETH_PCI_TIMER_VALUE(card
);
2729 parms
->blkt_magic
[0] = 'B';
2730 parms
->blkt_magic
[1] = 'L';
2731 parms
->blkt_magic
[2] = 'K';
2732 parms
->blkt_magic
[3] = 'T';
2733 ASCEBC(parms
->blkt_magic
, sizeof(parms
->blkt_magic
));
2734 parms
->blkt_total
= card
->info
.blkt
.time_total
;
2735 parms
->blkt_inter_packet
= card
->info
.blkt
.inter_packet
;
2736 parms
->blkt_inter_packet_jumbo
= card
->info
.blkt
.inter_packet_jumbo
;
2738 /* Prio-queueing implicitly uses the default priorities: */
2739 if (qeth_uses_tx_prio_queueing(card
) || card
->qdio
.no_out_queues
== 1)
2742 parms
->pque_magic
[0] = 'P';
2743 parms
->pque_magic
[1] = 'Q';
2744 parms
->pque_magic
[2] = 'U';
2745 parms
->pque_magic
[3] = 'E';
2746 ASCEBC(parms
->pque_magic
, sizeof(parms
->pque_magic
));
2747 parms
->pque_order
= QETH_QIB_PQUE_ORDER_RR
;
2748 parms
->pque_units
= QETH_QIB_PQUE_UNITS_SBAL
;
2750 qeth_for_each_output_queue(card
, queue
, i
)
2751 parms
->pque_priority
[i
] = queue
->priority
;
2754 static int qeth_qdio_activate(struct qeth_card
*card
)
2756 QETH_CARD_TEXT(card
, 3, "qdioact");
2757 return qdio_activate(CARD_DDEV(card
));
2760 static int qeth_dm_act(struct qeth_card
*card
)
2762 struct qeth_cmd_buffer
*iob
;
2764 QETH_CARD_TEXT(card
, 2, "dmact");
2766 iob
= qeth_mpc_alloc_cmd(card
, DM_ACT
, DM_ACT_SIZE
);
2770 memcpy(QETH_DM_ACT_DEST_ADDR(iob
->data
),
2771 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2772 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob
->data
),
2773 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2774 return qeth_send_control_data(card
, iob
, NULL
, NULL
);
2777 static int qeth_mpc_initialize(struct qeth_card
*card
)
2781 QETH_CARD_TEXT(card
, 2, "mpcinit");
2783 rc
= qeth_issue_next_read(card
);
2785 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
2788 rc
= qeth_cm_enable(card
);
2790 QETH_CARD_TEXT_(card
, 2, "2err%d", rc
);
2793 rc
= qeth_cm_setup(card
);
2795 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
2798 rc
= qeth_ulp_enable(card
);
2800 QETH_CARD_TEXT_(card
, 2, "4err%d", rc
);
2803 rc
= qeth_ulp_setup(card
);
2805 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
2808 rc
= qeth_alloc_qdio_queues(card
);
2810 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
2813 rc
= qeth_qdio_establish(card
);
2815 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
2816 qeth_free_qdio_queues(card
);
2819 rc
= qeth_qdio_activate(card
);
2821 QETH_CARD_TEXT_(card
, 2, "7err%d", rc
);
2824 rc
= qeth_dm_act(card
);
2826 QETH_CARD_TEXT_(card
, 2, "8err%d", rc
);
2833 static void qeth_print_status_message(struct qeth_card
*card
)
2835 switch (card
->info
.type
) {
2836 case QETH_CARD_TYPE_OSD
:
2837 case QETH_CARD_TYPE_OSM
:
2838 case QETH_CARD_TYPE_OSX
:
2839 /* VM will use a non-zero first character
2840 * to indicate a HiperSockets like reporting
2841 * of the level OSA sets the first character to zero
2843 if (!card
->info
.mcl_level
[0]) {
2844 scnprintf(card
->info
.mcl_level
,
2845 sizeof(card
->info
.mcl_level
),
2847 card
->info
.mcl_level
[2],
2848 card
->info
.mcl_level
[3]);
2852 case QETH_CARD_TYPE_IQD
:
2853 if (IS_VM_NIC(card
) || (card
->info
.mcl_level
[0] & 0x80)) {
2854 card
->info
.mcl_level
[0] = (char) _ebcasc
[(__u8
)
2855 card
->info
.mcl_level
[0]];
2856 card
->info
.mcl_level
[1] = (char) _ebcasc
[(__u8
)
2857 card
->info
.mcl_level
[1]];
2858 card
->info
.mcl_level
[2] = (char) _ebcasc
[(__u8
)
2859 card
->info
.mcl_level
[2]];
2860 card
->info
.mcl_level
[3] = (char) _ebcasc
[(__u8
)
2861 card
->info
.mcl_level
[3]];
2862 card
->info
.mcl_level
[QETH_MCL_LENGTH
] = 0;
2866 memset(&card
->info
.mcl_level
[0], 0, QETH_MCL_LENGTH
+ 1);
2868 dev_info(&card
->gdev
->dev
,
2869 "Device is a%s card%s%s%s\nwith link type %s.\n",
2870 qeth_get_cardname(card
),
2871 (card
->info
.mcl_level
[0]) ? " (level: " : "",
2872 (card
->info
.mcl_level
[0]) ? card
->info
.mcl_level
: "",
2873 (card
->info
.mcl_level
[0]) ? ")" : "",
2874 qeth_get_cardname_short(card
));
2877 static void qeth_initialize_working_pool_list(struct qeth_card
*card
)
2879 struct qeth_buffer_pool_entry
*entry
;
2881 QETH_CARD_TEXT(card
, 5, "inwrklst");
2883 list_for_each_entry(entry
,
2884 &card
->qdio
.init_pool
.entry_list
, init_list
) {
2885 qeth_put_buffer_pool_entry(card
, entry
);
2889 static struct qeth_buffer_pool_entry
*qeth_find_free_buffer_pool_entry(
2890 struct qeth_card
*card
)
2892 struct qeth_buffer_pool_entry
*entry
;
2895 if (list_empty(&card
->qdio
.in_buf_pool
.entry_list
))
2898 list_for_each_entry(entry
, &card
->qdio
.in_buf_pool
.entry_list
, list
) {
2900 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2901 if (page_count(entry
->elements
[i
]) > 1) {
2907 list_del_init(&entry
->list
);
2912 /* no free buffer in pool so take first one and swap pages */
2913 entry
= list_first_entry(&card
->qdio
.in_buf_pool
.entry_list
,
2914 struct qeth_buffer_pool_entry
, list
);
2915 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2916 if (page_count(entry
->elements
[i
]) > 1) {
2917 struct page
*page
= dev_alloc_page();
2922 __free_page(entry
->elements
[i
]);
2923 entry
->elements
[i
] = page
;
2924 QETH_CARD_STAT_INC(card
, rx_sg_alloc_page
);
2927 list_del_init(&entry
->list
);
2931 static int qeth_init_input_buffer(struct qeth_card
*card
,
2932 struct qeth_qdio_buffer
*buf
)
2934 struct qeth_buffer_pool_entry
*pool_entry
= buf
->pool_entry
;
2937 if ((card
->options
.cq
== QETH_CQ_ENABLED
) && (!buf
->rx_skb
)) {
2938 buf
->rx_skb
= netdev_alloc_skb(card
->dev
,
2940 sizeof(struct ipv6hdr
));
2946 pool_entry
= qeth_find_free_buffer_pool_entry(card
);
2950 buf
->pool_entry
= pool_entry
;
2954 * since the buffer is accessed only from the input_tasklet
2955 * there shouldn't be a need to synchronize; also, since we use
2956 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2959 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2960 buf
->buffer
->element
[i
].length
= PAGE_SIZE
;
2961 buf
->buffer
->element
[i
].addr
= u64_to_dma64(
2962 page_to_phys(pool_entry
->elements
[i
]));
2963 if (i
== QETH_MAX_BUFFER_ELEMENTS(card
) - 1)
2964 buf
->buffer
->element
[i
].eflags
= SBAL_EFLAGS_LAST_ENTRY
;
2966 buf
->buffer
->element
[i
].eflags
= 0;
2967 buf
->buffer
->element
[i
].sflags
= 0;
2972 static unsigned int qeth_tx_select_bulk_max(struct qeth_card
*card
,
2973 struct qeth_qdio_out_q
*queue
)
2975 if (!IS_IQD(card
) ||
2976 qeth_iqd_is_mcast_queue(card
, queue
) ||
2977 card
->options
.cq
== QETH_CQ_ENABLED
||
2978 qdio_get_ssqd_desc(CARD_DDEV(card
), &card
->ssqd
))
2981 return card
->ssqd
.mmwc
? card
->ssqd
.mmwc
: 1;
2984 static int qeth_init_qdio_queues(struct qeth_card
*card
)
2986 unsigned int rx_bufs
= card
->qdio
.in_buf_pool
.buf_count
;
2990 QETH_CARD_TEXT(card
, 2, "initqdqs");
2993 qdio_reset_buffers(card
->qdio
.in_q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2994 memset(&card
->rx
, 0, sizeof(struct qeth_rx
));
2996 qeth_initialize_working_pool_list(card
);
2997 /*give only as many buffers to hardware as we have buffer pool entries*/
2998 for (i
= 0; i
< rx_bufs
; i
++) {
2999 rc
= qeth_init_input_buffer(card
, &card
->qdio
.in_q
->bufs
[i
]);
3004 card
->qdio
.in_q
->next_buf_to_init
= QDIO_BUFNR(rx_bufs
);
3005 rc
= qdio_add_bufs_to_input_queue(CARD_DDEV(card
), 0, 0, rx_bufs
);
3007 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
3012 rc
= qeth_cq_init(card
);
3017 /* outbound queue */
3018 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
3019 struct qeth_qdio_out_q
*queue
= card
->qdio
.out_qs
[i
];
3021 qdio_reset_buffers(queue
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
3022 queue
->max_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
3023 queue
->next_buf_to_fill
= 0;
3025 queue
->prev_hdr
= NULL
;
3026 queue
->coalesced_frames
= 0;
3027 queue
->bulk_start
= 0;
3028 queue
->bulk_count
= 0;
3029 queue
->bulk_max
= qeth_tx_select_bulk_max(card
, queue
);
3030 atomic_set(&queue
->used_buffers
, 0);
3031 atomic_set(&queue
->set_pci_flags_count
, 0);
3032 netdev_tx_reset_queue(netdev_get_tx_queue(card
->dev
, i
));
3037 static void qeth_ipa_finalize_cmd(struct qeth_card
*card
,
3038 struct qeth_cmd_buffer
*iob
)
3040 qeth_mpc_finalize_cmd(card
, iob
);
3042 /* override with IPA-specific values: */
3043 __ipa_cmd(iob
)->hdr
.seqno
= card
->seqno
.ipa
++;
3046 static void qeth_prepare_ipa_cmd(struct qeth_card
*card
,
3047 struct qeth_cmd_buffer
*iob
, u16 cmd_length
)
3049 u8 prot_type
= qeth_mpc_select_prot_type(card
);
3050 u16 total_length
= iob
->length
;
3052 qeth_setup_ccw(__ccw_from_cmd(iob
), CCW_CMD_WRITE
, 0, total_length
,
3054 iob
->finalize
= qeth_ipa_finalize_cmd
;
3056 memcpy(iob
->data
, IPA_PDU_HEADER
, IPA_PDU_HEADER_SIZE
);
3057 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob
->data
), &total_length
, 2);
3058 memcpy(QETH_IPA_CMD_PROT_TYPE(iob
->data
), &prot_type
, 1);
3059 memcpy(QETH_IPA_PDU_LEN_PDU1(iob
->data
), &cmd_length
, 2);
3060 memcpy(QETH_IPA_PDU_LEN_PDU2(iob
->data
), &cmd_length
, 2);
3061 memcpy(QETH_IPA_CMD_DEST_ADDR(iob
->data
),
3062 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
3063 memcpy(QETH_IPA_PDU_LEN_PDU3(iob
->data
), &cmd_length
, 2);
3066 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer
*iob
,
3067 struct qeth_cmd_buffer
*reply
)
3069 struct qeth_ipa_cmd
*ipa_reply
= __ipa_reply(reply
);
3071 return ipa_reply
&& (__ipa_cmd(iob
)->hdr
.seqno
== ipa_reply
->hdr
.seqno
);
3074 struct qeth_cmd_buffer
*qeth_ipa_alloc_cmd(struct qeth_card
*card
,
3075 enum qeth_ipa_cmds cmd_code
,
3076 enum qeth_prot_versions prot
,
3077 unsigned int data_length
)
3079 struct qeth_cmd_buffer
*iob
;
3080 struct qeth_ipacmd_hdr
*hdr
;
3082 data_length
+= offsetof(struct qeth_ipa_cmd
, data
);
3083 iob
= qeth_alloc_cmd(&card
->write
, IPA_PDU_HEADER_SIZE
+ data_length
, 1,
3088 qeth_prepare_ipa_cmd(card
, iob
, data_length
);
3089 iob
->match
= qeth_ipa_match_reply
;
3091 hdr
= &__ipa_cmd(iob
)->hdr
;
3092 hdr
->command
= cmd_code
;
3093 hdr
->initiator
= IPA_CMD_INITIATOR_HOST
;
3094 /* hdr->seqno is set by qeth_send_control_data() */
3095 hdr
->adapter_type
= QETH_LINK_TYPE_FAST_ETH
;
3096 hdr
->rel_adapter_no
= (u8
) card
->dev
->dev_port
;
3097 hdr
->prim_version_no
= IS_LAYER2(card
) ? 2 : 1;
3098 hdr
->param_count
= 1;
3099 hdr
->prot_version
= prot
;
3102 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd
);
3104 static int qeth_send_ipa_cmd_cb(struct qeth_card
*card
,
3105 struct qeth_reply
*reply
, unsigned long data
)
3107 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3109 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
3113 * qeth_send_ipa_cmd() - send an IPA command
3115 * See qeth_send_control_data() for explanation of the arguments.
3118 int qeth_send_ipa_cmd(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
3119 int (*reply_cb
)(struct qeth_card
*, struct qeth_reply
*,
3125 QETH_CARD_TEXT(card
, 4, "sendipa");
3127 if (card
->read_or_write_problem
) {
3132 if (reply_cb
== NULL
)
3133 reply_cb
= qeth_send_ipa_cmd_cb
;
3134 rc
= qeth_send_control_data(card
, iob
, reply_cb
, reply_param
);
3136 qeth_clear_ipacmd_list(card
);
3137 qeth_schedule_recovery(card
);
3141 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd
);
3143 static int qeth_send_startlan_cb(struct qeth_card
*card
,
3144 struct qeth_reply
*reply
, unsigned long data
)
3146 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3148 if (cmd
->hdr
.return_code
== IPA_RC_LAN_OFFLINE
)
3151 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
3154 static int qeth_send_startlan(struct qeth_card
*card
)
3156 struct qeth_cmd_buffer
*iob
;
3158 QETH_CARD_TEXT(card
, 2, "strtlan");
3160 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_STARTLAN
, QETH_PROT_NONE
, 0);
3163 return qeth_send_ipa_cmd(card
, iob
, qeth_send_startlan_cb
, NULL
);
3166 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd
*cmd
)
3168 if (!cmd
->hdr
.return_code
)
3169 cmd
->hdr
.return_code
=
3170 cmd
->data
.setadapterparms
.hdr
.return_code
;
3171 return cmd
->hdr
.return_code
;
3174 static int qeth_query_setadapterparms_cb(struct qeth_card
*card
,
3175 struct qeth_reply
*reply
, unsigned long data
)
3177 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3178 struct qeth_query_cmds_supp
*query_cmd
;
3180 QETH_CARD_TEXT(card
, 3, "quyadpcb");
3181 if (qeth_setadpparms_inspect_rc(cmd
))
3184 query_cmd
= &cmd
->data
.setadapterparms
.data
.query_cmds_supp
;
3185 if (query_cmd
->lan_type
& 0x7f) {
3186 if (!qeth_is_supported_link_type(card
, query_cmd
->lan_type
))
3187 return -EPROTONOSUPPORT
;
3189 card
->info
.link_type
= query_cmd
->lan_type
;
3190 QETH_CARD_TEXT_(card
, 2, "lnk %d", card
->info
.link_type
);
3193 card
->options
.adp
.supported
= query_cmd
->supported_cmds
;
3197 static struct qeth_cmd_buffer
*qeth_get_adapter_cmd(struct qeth_card
*card
,
3198 enum qeth_ipa_setadp_cmd adp_cmd
,
3199 unsigned int data_length
)
3201 struct qeth_ipacmd_setadpparms_hdr
*hdr
;
3202 struct qeth_cmd_buffer
*iob
;
3204 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SETADAPTERPARMS
, QETH_PROT_IPV4
,
3206 offsetof(struct qeth_ipacmd_setadpparms
,
3211 hdr
= &__ipa_cmd(iob
)->data
.setadapterparms
.hdr
;
3212 hdr
->cmdlength
= sizeof(*hdr
) + data_length
;
3213 hdr
->command_code
= adp_cmd
;
3214 hdr
->used_total
= 1;
3219 static int qeth_query_setadapterparms(struct qeth_card
*card
)
3222 struct qeth_cmd_buffer
*iob
;
3224 QETH_CARD_TEXT(card
, 3, "queryadp");
3225 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_COMMANDS_SUPPORTED
,
3226 SETADP_DATA_SIZEOF(query_cmds_supp
));
3229 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_setadapterparms_cb
, NULL
);
3233 static int qeth_query_ipassists_cb(struct qeth_card
*card
,
3234 struct qeth_reply
*reply
, unsigned long data
)
3236 struct qeth_ipa_cmd
*cmd
;
3238 QETH_CARD_TEXT(card
, 2, "qipasscb");
3240 cmd
= (struct qeth_ipa_cmd
*) data
;
3242 switch (cmd
->hdr
.return_code
) {
3243 case IPA_RC_SUCCESS
:
3245 case IPA_RC_NOTSUPP
:
3246 case IPA_RC_L2_UNSUPPORTED_CMD
:
3247 QETH_CARD_TEXT(card
, 2, "ipaunsup");
3248 card
->options
.ipa4
.supported
|= IPA_SETADAPTERPARMS
;
3249 card
->options
.ipa6
.supported
|= IPA_SETADAPTERPARMS
;
3252 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3253 CARD_DEVID(card
), cmd
->hdr
.return_code
);
3257 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
3258 card
->options
.ipa4
= cmd
->hdr
.assists
;
3259 else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
3260 card
->options
.ipa6
= cmd
->hdr
.assists
;
3262 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3267 static int qeth_query_ipassists(struct qeth_card
*card
,
3268 enum qeth_prot_versions prot
)
3271 struct qeth_cmd_buffer
*iob
;
3273 QETH_CARD_TEXT_(card
, 2, "qipassi%i", prot
);
3274 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_QIPASSIST
, prot
, 0);
3277 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_ipassists_cb
, NULL
);
3281 static int qeth_query_switch_attributes_cb(struct qeth_card
*card
,
3282 struct qeth_reply
*reply
, unsigned long data
)
3284 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3285 struct qeth_query_switch_attributes
*attrs
;
3286 struct qeth_switch_info
*sw_info
;
3288 QETH_CARD_TEXT(card
, 2, "qswiatcb");
3289 if (qeth_setadpparms_inspect_rc(cmd
))
3292 sw_info
= (struct qeth_switch_info
*)reply
->param
;
3293 attrs
= &cmd
->data
.setadapterparms
.data
.query_switch_attributes
;
3294 sw_info
->capabilities
= attrs
->capabilities
;
3295 sw_info
->settings
= attrs
->settings
;
3296 QETH_CARD_TEXT_(card
, 2, "%04x%04x", sw_info
->capabilities
,
3301 int qeth_query_switch_attributes(struct qeth_card
*card
,
3302 struct qeth_switch_info
*sw_info
)
3304 struct qeth_cmd_buffer
*iob
;
3306 QETH_CARD_TEXT(card
, 2, "qswiattr");
3307 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES
))
3309 if (!netif_carrier_ok(card
->dev
))
3311 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES
, 0);
3314 return qeth_send_ipa_cmd(card
, iob
,
3315 qeth_query_switch_attributes_cb
, sw_info
);
3318 struct qeth_cmd_buffer
*qeth_get_diag_cmd(struct qeth_card
*card
,
3319 enum qeth_diags_cmds sub_cmd
,
3320 unsigned int data_length
)
3322 struct qeth_ipacmd_diagass
*cmd
;
3323 struct qeth_cmd_buffer
*iob
;
3325 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SET_DIAG_ASS
, QETH_PROT_NONE
,
3326 DIAG_HDR_LEN
+ data_length
);
3330 cmd
= &__ipa_cmd(iob
)->data
.diagass
;
3331 cmd
->subcmd_len
= DIAG_SUB_HDR_LEN
+ data_length
;
3332 cmd
->subcmd
= sub_cmd
;
3335 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd
);
3337 static int qeth_query_setdiagass_cb(struct qeth_card
*card
,
3338 struct qeth_reply
*reply
, unsigned long data
)
3340 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3341 u16 rc
= cmd
->hdr
.return_code
;
3344 QETH_CARD_TEXT_(card
, 2, "diagq:%x", rc
);
3348 card
->info
.diagass_support
= cmd
->data
.diagass
.ext
;
3352 static int qeth_query_setdiagass(struct qeth_card
*card
)
3354 struct qeth_cmd_buffer
*iob
;
3356 QETH_CARD_TEXT(card
, 2, "qdiagass");
3357 iob
= qeth_get_diag_cmd(card
, QETH_DIAGS_CMD_QUERY
, 0);
3360 return qeth_send_ipa_cmd(card
, iob
, qeth_query_setdiagass_cb
, NULL
);
3363 static void qeth_get_trap_id(struct qeth_card
*card
, struct qeth_trap_id
*tid
)
3365 unsigned long info
= get_zeroed_page(GFP_KERNEL
);
3366 struct sysinfo_2_2_2
*info222
= (struct sysinfo_2_2_2
*)info
;
3367 struct sysinfo_3_2_2
*info322
= (struct sysinfo_3_2_2
*)info
;
3368 struct ccw_dev_id ccwid
;
3371 tid
->chpid
= card
->info
.chpid
;
3372 ccw_device_get_id(CARD_RDEV(card
), &ccwid
);
3373 tid
->ssid
= ccwid
.ssid
;
3374 tid
->devno
= ccwid
.devno
;
3377 level
= stsi(NULL
, 0, 0, 0);
3378 if ((level
>= 2) && (stsi(info222
, 2, 2, 2) == 0))
3379 tid
->lparnr
= info222
->lpar_number
;
3380 if ((level
>= 3) && (stsi(info322
, 3, 2, 2) == 0)) {
3381 EBCASC(info322
->vm
[0].name
, sizeof(info322
->vm
[0].name
));
3382 memcpy(tid
->vmname
, info322
->vm
[0].name
, sizeof(tid
->vmname
));
3387 static int qeth_hw_trap_cb(struct qeth_card
*card
,
3388 struct qeth_reply
*reply
, unsigned long data
)
3390 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3391 u16 rc
= cmd
->hdr
.return_code
;
3394 QETH_CARD_TEXT_(card
, 2, "trapc:%x", rc
);
3400 int qeth_hw_trap(struct qeth_card
*card
, enum qeth_diags_trap_action action
)
3402 struct qeth_cmd_buffer
*iob
;
3403 struct qeth_ipa_cmd
*cmd
;
3405 QETH_CARD_TEXT(card
, 2, "diagtrap");
3406 iob
= qeth_get_diag_cmd(card
, QETH_DIAGS_CMD_TRAP
, 64);
3409 cmd
= __ipa_cmd(iob
);
3410 cmd
->data
.diagass
.type
= 1;
3411 cmd
->data
.diagass
.action
= action
;
3413 case QETH_DIAGS_TRAP_ARM
:
3414 cmd
->data
.diagass
.options
= 0x0003;
3415 cmd
->data
.diagass
.ext
= 0x00010000 +
3416 sizeof(struct qeth_trap_id
);
3417 qeth_get_trap_id(card
,
3418 (struct qeth_trap_id
*)cmd
->data
.diagass
.cdata
);
3420 case QETH_DIAGS_TRAP_DISARM
:
3421 cmd
->data
.diagass
.options
= 0x0001;
3423 case QETH_DIAGS_TRAP_CAPTURE
:
3426 return qeth_send_ipa_cmd(card
, iob
, qeth_hw_trap_cb
, NULL
);
3429 static int qeth_check_qdio_errors(struct qeth_card
*card
,
3430 struct qdio_buffer
*buf
,
3431 unsigned int qdio_error
,
3432 const char *dbftext
)
3435 QETH_CARD_TEXT(card
, 2, dbftext
);
3436 QETH_CARD_TEXT_(card
, 2, " F15=%02X",
3437 buf
->element
[15].sflags
);
3438 QETH_CARD_TEXT_(card
, 2, " F14=%02X",
3439 buf
->element
[14].sflags
);
3440 QETH_CARD_TEXT_(card
, 2, " qerr=%X", qdio_error
);
3441 if ((buf
->element
[15].sflags
) == 0x12) {
3442 QETH_CARD_STAT_INC(card
, rx_fifo_errors
);
3450 static unsigned int qeth_rx_refill_queue(struct qeth_card
*card
,
3453 struct qeth_qdio_q
*queue
= card
->qdio
.in_q
;
3454 struct list_head
*lh
;
3459 /* only requeue at a certain threshold to avoid SIGAs */
3460 if (count
>= QETH_IN_BUF_REQUEUE_THRESHOLD(card
)) {
3461 for (i
= queue
->next_buf_to_init
;
3462 i
< queue
->next_buf_to_init
+ count
; ++i
) {
3463 if (qeth_init_input_buffer(card
,
3464 &queue
->bufs
[QDIO_BUFNR(i
)])) {
3471 if (newcount
< count
) {
3472 /* we are in memory shortage so we switch back to
3473 traditional skb allocation and drop packages */
3474 atomic_set(&card
->force_alloc_skb
, 3);
3477 atomic_add_unless(&card
->force_alloc_skb
, -1, 0);
3482 list_for_each(lh
, &card
->qdio
.in_buf_pool
.entry_list
)
3484 if (i
== card
->qdio
.in_buf_pool
.buf_count
) {
3485 QETH_CARD_TEXT(card
, 2, "qsarbw");
3486 schedule_delayed_work(
3487 &card
->buffer_reclaim_work
,
3488 QETH_RECLAIM_WORK_TIME
);
3493 rc
= qdio_add_bufs_to_input_queue(CARD_DDEV(card
), 0,
3494 queue
->next_buf_to_init
,
3497 QETH_CARD_TEXT(card
, 2, "qinberr");
3499 queue
->next_buf_to_init
= QDIO_BUFNR(queue
->next_buf_to_init
+
3507 static void qeth_buffer_reclaim_work(struct work_struct
*work
)
3509 struct qeth_card
*card
= container_of(to_delayed_work(work
),
3511 buffer_reclaim_work
);
3514 napi_schedule(&card
->napi
);
3515 /* kick-start the NAPI softirq: */
3519 static void qeth_handle_send_error(struct qeth_card
*card
,
3520 struct qeth_qdio_out_buffer
*buffer
, unsigned int qdio_err
)
3522 int sbalf15
= buffer
->buffer
->element
[15].sflags
;
3524 QETH_CARD_TEXT(card
, 6, "hdsnderr");
3525 qeth_check_qdio_errors(card
, buffer
->buffer
, qdio_err
, "qouterr");
3530 if ((sbalf15
>= 15) && (sbalf15
<= 31))
3533 QETH_CARD_TEXT(card
, 1, "lnkfail");
3534 QETH_CARD_TEXT_(card
, 1, "%04x %02x",
3535 (u16
)qdio_err
, (u8
)sbalf15
);
3539 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3540 * @queue: queue to check for packing buffer
3542 * Returns number of buffers that were prepared for flush.
3544 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q
*queue
)
3546 struct qeth_qdio_out_buffer
*buffer
;
3548 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
3549 if ((atomic_read(&buffer
->state
) == QETH_QDIO_BUF_EMPTY
) &&
3550 (buffer
->next_element_to_fill
> 0)) {
3551 /* it's a packing buffer */
3552 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
3553 queue
->next_buf_to_fill
=
3554 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
3561 * Switched to packing state if the number of used buffers on a queue
3562 * reaches a certain limit.
3564 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q
*queue
)
3566 if (!queue
->do_pack
) {
3567 if (atomic_read(&queue
->used_buffers
)
3568 >= QETH_HIGH_WATERMARK_PACK
){
3569 /* switch non-PACKING -> PACKING */
3570 QETH_CARD_TEXT(queue
->card
, 6, "np->pack");
3571 QETH_TXQ_STAT_INC(queue
, packing_mode_switch
);
3578 * Switches from packing to non-packing mode. If there is a packing
3579 * buffer on the queue this buffer will be prepared to be flushed.
3580 * In that case 1 is returned to inform the caller. If no buffer
3581 * has to be flushed, zero is returned.
3583 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q
*queue
)
3585 if (queue
->do_pack
) {
3586 if (atomic_read(&queue
->used_buffers
)
3587 <= QETH_LOW_WATERMARK_PACK
) {
3588 /* switch PACKING -> non-PACKING */
3589 QETH_CARD_TEXT(queue
->card
, 6, "pack->np");
3590 QETH_TXQ_STAT_INC(queue
, packing_mode_switch
);
3592 return qeth_prep_flush_pack_buffer(queue
);
3598 static void qeth_flush_buffers(struct qeth_qdio_out_q
*queue
, int index
,
3601 struct qeth_qdio_out_buffer
*buf
= queue
->bufs
[index
];
3602 struct qeth_card
*card
= queue
->card
;
3603 unsigned int frames
, usecs
;
3604 struct qaob
*aob
= NULL
;
3608 for (i
= index
; i
< index
+ count
; ++i
) {
3609 unsigned int bidx
= QDIO_BUFNR(i
);
3610 struct sk_buff
*skb
;
3612 buf
= queue
->bufs
[bidx
];
3613 buf
->buffer
->element
[buf
->next_element_to_fill
- 1].eflags
|=
3614 SBAL_EFLAGS_LAST_ENTRY
;
3615 queue
->coalesced_frames
+= buf
->frames
;
3618 skb_queue_walk(&buf
->skb_list
, skb
)
3619 skb_tx_timestamp(skb
);
3624 if (card
->options
.cq
== QETH_CQ_ENABLED
&&
3625 !qeth_iqd_is_mcast_queue(card
, queue
) &&
3628 buf
->aob
= kmem_cache_zalloc(qeth_qaob_cache
,
3631 struct qeth_qaob_priv1
*priv
;
3634 priv
= (struct qeth_qaob_priv1
*)&aob
->user1
;
3635 priv
->state
= QETH_QAOB_ISSUED
;
3636 priv
->queue_no
= queue
->queue_no
;
3640 if (!queue
->do_pack
) {
3641 if ((atomic_read(&queue
->used_buffers
) >=
3642 (QETH_HIGH_WATERMARK_PACK
-
3643 QETH_WATERMARK_PACK_FUZZ
)) &&
3644 !atomic_read(&queue
->set_pci_flags_count
)) {
3645 /* it's likely that we'll go to packing
3647 atomic_inc(&queue
->set_pci_flags_count
);
3648 buf
->buffer
->element
[0].sflags
|= SBAL_SFLAGS0_PCI_REQ
;
3651 if (!atomic_read(&queue
->set_pci_flags_count
)) {
3653 * there's no outstanding PCI any more, so we
3654 * have to request a PCI to be sure the PCI
3655 * will wake at some time in the future then we
3656 * can flush packed buffers that might still be
3657 * hanging around, which can happen if no
3658 * further send was requested by the stack
3660 atomic_inc(&queue
->set_pci_flags_count
);
3661 buf
->buffer
->element
[0].sflags
|= SBAL_SFLAGS0_PCI_REQ
;
3666 QETH_TXQ_STAT_INC(queue
, doorbell
);
3667 rc
= qdio_add_bufs_to_output_queue(CARD_DDEV(card
), queue
->queue_no
,
3673 /* ignore temporary SIGA errors without busy condition */
3675 /* Fake the TX completion interrupt: */
3676 frames
= READ_ONCE(queue
->max_coalesced_frames
);
3677 usecs
= READ_ONCE(queue
->coalesce_usecs
);
3679 if (frames
&& queue
->coalesced_frames
>= frames
) {
3680 napi_schedule(&queue
->napi
);
3681 queue
->coalesced_frames
= 0;
3682 QETH_TXQ_STAT_INC(queue
, coal_frames
);
3683 } else if (qeth_use_tx_irqs(card
) &&
3684 atomic_read(&queue
->used_buffers
) >= 32) {
3685 /* Old behaviour carried over from the qdio layer: */
3686 napi_schedule(&queue
->napi
);
3687 QETH_TXQ_STAT_INC(queue
, coal_frames
);
3689 qeth_tx_arm_timer(queue
, usecs
);
3694 QETH_CARD_TEXT(queue
->card
, 2, "flushbuf");
3695 QETH_CARD_TEXT_(queue
->card
, 2, " q%d", queue
->queue_no
);
3696 QETH_CARD_TEXT_(queue
->card
, 2, " idx%d", index
);
3697 QETH_CARD_TEXT_(queue
->card
, 2, " c%d", count
);
3698 QETH_CARD_TEXT_(queue
->card
, 2, " err%d", rc
);
3700 /* this must not happen under normal circumstances. if it
3701 * happens something is really wrong -> recover */
3702 qeth_schedule_recovery(queue
->card
);
3706 static void qeth_flush_queue(struct qeth_qdio_out_q
*queue
)
3708 qeth_flush_buffers(queue
, queue
->bulk_start
, queue
->bulk_count
);
3710 queue
->bulk_start
= QDIO_BUFNR(queue
->bulk_start
+ queue
->bulk_count
);
3711 queue
->prev_hdr
= NULL
;
3712 queue
->bulk_count
= 0;
3715 static void qeth_check_outbound_queue(struct qeth_qdio_out_q
*queue
)
3718 * check if we have to switch to non-packing mode or if
3719 * we have to get a pci flag out on the queue
3721 if ((atomic_read(&queue
->used_buffers
) <= QETH_LOW_WATERMARK_PACK
) ||
3722 !atomic_read(&queue
->set_pci_flags_count
)) {
3723 unsigned int index
, flush_cnt
;
3725 spin_lock(&queue
->lock
);
3727 index
= queue
->next_buf_to_fill
;
3729 flush_cnt
= qeth_switch_to_nonpacking_if_needed(queue
);
3730 if (!flush_cnt
&& !atomic_read(&queue
->set_pci_flags_count
))
3731 flush_cnt
= qeth_prep_flush_pack_buffer(queue
);
3734 qeth_flush_buffers(queue
, index
, flush_cnt
);
3735 QETH_TXQ_STAT_ADD(queue
, bufs_pack
, flush_cnt
);
3738 spin_unlock(&queue
->lock
);
3742 static void qeth_qdio_poll(struct ccw_device
*cdev
, unsigned long card_ptr
)
3744 struct qeth_card
*card
= (struct qeth_card
*)card_ptr
;
3746 napi_schedule_irqoff(&card
->napi
);
3749 int qeth_configure_cq(struct qeth_card
*card
, enum qeth_cq cq
)
3751 if (card
->options
.cq
== QETH_CQ_NOTAVAILABLE
)
3754 card
->options
.cq
= cq
;
3757 EXPORT_SYMBOL_GPL(qeth_configure_cq
);
3759 static void qeth_qdio_handle_aob(struct qeth_card
*card
, struct qaob
*aob
)
3761 struct qeth_qaob_priv1
*priv
= (struct qeth_qaob_priv1
*)&aob
->user1
;
3762 unsigned int queue_no
= priv
->queue_no
;
3764 BUILD_BUG_ON(sizeof(*priv
) > ARRAY_SIZE(aob
->user1
));
3766 if (xchg(&priv
->state
, QETH_QAOB_DONE
) == QETH_QAOB_PENDING
&&
3767 queue_no
< card
->qdio
.no_out_queues
)
3768 napi_schedule(&card
->qdio
.out_qs
[queue_no
]->napi
);
3771 static void qeth_qdio_cq_handler(struct qeth_card
*card
, unsigned int qdio_err
,
3772 unsigned int queue
, int first_element
,
3775 struct qeth_qdio_q
*cq
= card
->qdio
.c_q
;
3779 QETH_CARD_TEXT_(card
, 5, "qcqhe%d", first_element
);
3780 QETH_CARD_TEXT_(card
, 5, "qcqhc%d", count
);
3781 QETH_CARD_TEXT_(card
, 5, "qcqherr%d", qdio_err
);
3784 netif_tx_stop_all_queues(card
->dev
);
3785 qeth_schedule_recovery(card
);
3789 for (i
= first_element
; i
< first_element
+ count
; ++i
) {
3790 struct qdio_buffer
*buffer
= cq
->qdio_bufs
[QDIO_BUFNR(i
)];
3793 while ((e
< QDIO_MAX_ELEMENTS_PER_BUFFER
) &&
3794 buffer
->element
[e
].addr
) {
3795 dma64_t phys_aob_addr
= buffer
->element
[e
].addr
;
3797 qeth_qdio_handle_aob(card
, dma64_to_virt(phys_aob_addr
));
3800 qeth_scrub_qdio_buffer(buffer
, QDIO_MAX_ELEMENTS_PER_BUFFER
);
3802 rc
= qdio_add_bufs_to_input_queue(CARD_DDEV(card
), queue
,
3803 cq
->next_buf_to_init
, count
);
3805 dev_warn(&card
->gdev
->dev
,
3806 "QDIO reported an error, rc=%i\n", rc
);
3807 QETH_CARD_TEXT(card
, 2, "qcqherr");
3810 cq
->next_buf_to_init
= QDIO_BUFNR(cq
->next_buf_to_init
+ count
);
3813 static void qeth_qdio_input_handler(struct ccw_device
*ccwdev
,
3814 unsigned int qdio_err
, int queue
,
3815 int first_elem
, int count
,
3816 unsigned long card_ptr
)
3818 struct qeth_card
*card
= (struct qeth_card
*)card_ptr
;
3820 QETH_CARD_TEXT_(card
, 2, "qihq%d", queue
);
3821 QETH_CARD_TEXT_(card
, 2, "qiec%d", qdio_err
);
3824 qeth_schedule_recovery(card
);
3827 static void qeth_qdio_output_handler(struct ccw_device
*ccwdev
,
3828 unsigned int qdio_error
, int __queue
,
3829 int first_element
, int count
,
3830 unsigned long card_ptr
)
3832 struct qeth_card
*card
= (struct qeth_card
*) card_ptr
;
3834 QETH_CARD_TEXT(card
, 2, "achkcond");
3835 netif_tx_stop_all_queues(card
->dev
);
3836 qeth_schedule_recovery(card
);
3840 * Note: Function assumes that we have 4 outbound queues.
3842 static int qeth_get_priority_queue(struct qeth_card
*card
, struct sk_buff
*skb
)
3844 struct vlan_ethhdr
*veth
= vlan_eth_hdr(skb
);
3847 switch (card
->qdio
.do_prio_queueing
) {
3848 case QETH_PRIO_Q_ING_TOS
:
3849 case QETH_PRIO_Q_ING_PREC
:
3850 switch (vlan_get_protocol(skb
)) {
3851 case htons(ETH_P_IP
):
3852 tos
= ipv4_get_dsfield(ip_hdr(skb
));
3854 case htons(ETH_P_IPV6
):
3855 tos
= ipv6_get_dsfield(ipv6_hdr(skb
));
3858 return card
->qdio
.default_out_queue
;
3860 if (card
->qdio
.do_prio_queueing
== QETH_PRIO_Q_ING_PREC
)
3861 return ~tos
>> 6 & 3;
3862 if (tos
& IPTOS_MINCOST
)
3864 if (tos
& IPTOS_RELIABILITY
)
3866 if (tos
& IPTOS_THROUGHPUT
)
3868 if (tos
& IPTOS_LOWDELAY
)
3871 case QETH_PRIO_Q_ING_SKB
:
3872 if (skb
->priority
> 5)
3874 return ~skb
->priority
>> 1 & 3;
3875 case QETH_PRIO_Q_ING_VLAN
:
3876 if (veth
->h_vlan_proto
== htons(ETH_P_8021Q
))
3877 return ~ntohs(veth
->h_vlan_TCI
) >>
3878 (VLAN_PRIO_SHIFT
+ 1) & 3;
3880 case QETH_PRIO_Q_ING_FIXED
:
3881 return card
->qdio
.default_out_queue
;
3885 return card
->qdio
.default_out_queue
;
3889 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3892 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3893 * fragmented part of the SKB. Returns zero for linear SKB.
3895 static int qeth_get_elements_for_frags(struct sk_buff
*skb
)
3897 int cnt
, elements
= 0;
3899 for (cnt
= 0; cnt
< skb_shinfo(skb
)->nr_frags
; cnt
++) {
3900 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[cnt
];
3902 elements
+= qeth_get_elements_for_range(
3903 (addr_t
)skb_frag_address(frag
),
3904 (addr_t
)skb_frag_address(frag
) + skb_frag_size(frag
));
3910 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3911 * to transmit an skb.
3912 * @skb: the skb to operate on.
3913 * @data_offset: skip this part of the skb's linear data
3915 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3916 * skb's data (both its linear part and paged fragments).
3918 static unsigned int qeth_count_elements(struct sk_buff
*skb
,
3919 unsigned int data_offset
)
3921 unsigned int elements
= qeth_get_elements_for_frags(skb
);
3922 addr_t end
= (addr_t
)skb
->data
+ skb_headlen(skb
);
3923 addr_t start
= (addr_t
)skb
->data
+ data_offset
;
3926 elements
+= qeth_get_elements_for_range(start
, end
);
3930 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3934 * qeth_add_hw_header() - add a HW header to an skb.
3935 * @queue: TX queue that the skb will be placed on.
3936 * @skb: skb that the HW header should be added to.
3937 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3938 * it contains a valid pointer to a qeth_hdr.
3939 * @hdr_len: length of the HW header.
3940 * @proto_len: length of protocol headers that need to be in same page as the
3942 * @elements: returns the required number of buffer elements for this skb.
3944 * Returns the pushed length. If the header can't be pushed on
3945 * (eg. because it would cross a page boundary), it is allocated from
3946 * the cache instead and 0 is returned.
3947 * The number of needed buffer elements is returned in @elements.
3948 * Error to create the hdr is indicated by returning with < 0.
3950 static int qeth_add_hw_header(struct qeth_qdio_out_q
*queue
,
3951 struct sk_buff
*skb
, struct qeth_hdr
**hdr
,
3952 unsigned int hdr_len
, unsigned int proto_len
,
3953 unsigned int *elements
)
3955 gfp_t gfp
= GFP_ATOMIC
| (skb_pfmemalloc(skb
) ? __GFP_MEMALLOC
: 0);
3956 const unsigned int contiguous
= proto_len
? proto_len
: 1;
3957 const unsigned int max_elements
= queue
->max_elements
;
3958 unsigned int __elements
;
3964 start
= (addr_t
)skb
->data
- hdr_len
;
3965 end
= (addr_t
)skb
->data
;
3967 if (qeth_get_elements_for_range(start
, end
+ contiguous
) == 1) {
3968 /* Push HW header into same page as first protocol header. */
3970 /* ... but TSO always needs a separate element for headers: */
3971 if (skb_is_gso(skb
))
3972 __elements
= 1 + qeth_count_elements(skb
, proto_len
);
3974 __elements
= qeth_count_elements(skb
, 0);
3975 } else if (!proto_len
&& PAGE_ALIGNED(skb
->data
)) {
3976 /* Push HW header into preceding page, flush with skb->data. */
3978 __elements
= 1 + qeth_count_elements(skb
, 0);
3980 /* Use header cache, copy protocol headers up. */
3982 __elements
= 1 + qeth_count_elements(skb
, proto_len
);
3985 /* Compress skb to fit into one IO buffer: */
3986 if (__elements
> max_elements
) {
3987 if (!skb_is_nonlinear(skb
)) {
3988 /* Drop it, no easy way of shrinking it further. */
3989 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3990 max_elements
, __elements
, skb
->len
);
3994 rc
= skb_linearize(skb
);
3996 QETH_TXQ_STAT_INC(queue
, skbs_linearized_fail
);
4000 QETH_TXQ_STAT_INC(queue
, skbs_linearized
);
4001 /* Linearization changed the layout, re-evaluate: */
4005 *elements
= __elements
;
4006 /* Add the header: */
4008 *hdr
= skb_push(skb
, hdr_len
);
4012 /* Fall back to cache element with known-good alignment: */
4013 if (hdr_len
+ proto_len
> QETH_HDR_CACHE_OBJ_SIZE
)
4015 *hdr
= kmem_cache_alloc(qeth_core_header_cache
, gfp
);
4018 /* Copy protocol headers behind HW header: */
4019 skb_copy_from_linear_data(skb
, ((char *)*hdr
) + hdr_len
, proto_len
);
4023 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q
*queue
,
4024 struct sk_buff
*curr_skb
,
4025 struct qeth_hdr
*curr_hdr
)
4027 struct qeth_qdio_out_buffer
*buffer
= queue
->bufs
[queue
->bulk_start
];
4028 struct qeth_hdr
*prev_hdr
= queue
->prev_hdr
;
4033 /* All packets must have the same target: */
4034 if (curr_hdr
->hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
) {
4035 struct sk_buff
*prev_skb
= skb_peek(&buffer
->skb_list
);
4037 return ether_addr_equal(eth_hdr(prev_skb
)->h_dest
,
4038 eth_hdr(curr_skb
)->h_dest
) &&
4039 qeth_l2_same_vlan(&prev_hdr
->hdr
.l2
, &curr_hdr
->hdr
.l2
);
4042 return qeth_l3_same_next_hop(&prev_hdr
->hdr
.l3
, &curr_hdr
->hdr
.l3
) &&
4043 qeth_l3_iqd_same_vlan(&prev_hdr
->hdr
.l3
, &curr_hdr
->hdr
.l3
);
4047 * qeth_fill_buffer() - map skb into an output buffer
4048 * @buf: buffer to transport the skb
4049 * @skb: skb to map into the buffer
4050 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
4051 * from qeth_core_header_cache.
4052 * @offset: when mapping the skb, start at skb->data + offset
4053 * @hd_len: if > 0, build a dedicated header element of this size
4055 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer
*buf
,
4056 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
4057 unsigned int offset
, unsigned int hd_len
)
4059 struct qdio_buffer
*buffer
= buf
->buffer
;
4060 int element
= buf
->next_element_to_fill
;
4061 int length
= skb_headlen(skb
) - offset
;
4062 char *data
= skb
->data
+ offset
;
4063 unsigned int elem_length
, cnt
;
4064 bool is_first_elem
= true;
4066 __skb_queue_tail(&buf
->skb_list
, skb
);
4068 /* build dedicated element for HW Header */
4070 is_first_elem
= false;
4072 buffer
->element
[element
].addr
= virt_to_dma64(hdr
);
4073 buffer
->element
[element
].length
= hd_len
;
4074 buffer
->element
[element
].eflags
= SBAL_EFLAGS_FIRST_FRAG
;
4076 /* HW header is allocated from cache: */
4077 if ((void *)hdr
!= skb
->data
)
4078 __set_bit(element
, buf
->from_kmem_cache
);
4079 /* HW header was pushed and is contiguous with linear part: */
4080 else if (length
> 0 && !PAGE_ALIGNED(data
) &&
4081 (data
== (char *)hdr
+ hd_len
))
4082 buffer
->element
[element
].eflags
|=
4083 SBAL_EFLAGS_CONTIGUOUS
;
4088 /* map linear part into buffer element(s) */
4089 while (length
> 0) {
4090 elem_length
= min_t(unsigned int, length
,
4091 PAGE_SIZE
- offset_in_page(data
));
4093 buffer
->element
[element
].addr
= virt_to_dma64(data
);
4094 buffer
->element
[element
].length
= elem_length
;
4095 length
-= elem_length
;
4096 if (is_first_elem
) {
4097 is_first_elem
= false;
4098 if (length
|| skb_is_nonlinear(skb
))
4099 /* skb needs additional elements */
4100 buffer
->element
[element
].eflags
=
4101 SBAL_EFLAGS_FIRST_FRAG
;
4103 buffer
->element
[element
].eflags
= 0;
4105 buffer
->element
[element
].eflags
=
4106 SBAL_EFLAGS_MIDDLE_FRAG
;
4109 data
+= elem_length
;
4113 /* map page frags into buffer element(s) */
4114 for (cnt
= 0; cnt
< skb_shinfo(skb
)->nr_frags
; cnt
++) {
4115 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[cnt
];
4117 data
= skb_frag_address(frag
);
4118 length
= skb_frag_size(frag
);
4119 while (length
> 0) {
4120 elem_length
= min_t(unsigned int, length
,
4121 PAGE_SIZE
- offset_in_page(data
));
4123 buffer
->element
[element
].addr
= virt_to_dma64(data
);
4124 buffer
->element
[element
].length
= elem_length
;
4125 buffer
->element
[element
].eflags
=
4126 SBAL_EFLAGS_MIDDLE_FRAG
;
4128 length
-= elem_length
;
4129 data
+= elem_length
;
4134 if (buffer
->element
[element
- 1].eflags
)
4135 buffer
->element
[element
- 1].eflags
= SBAL_EFLAGS_LAST_FRAG
;
4136 buf
->next_element_to_fill
= element
;
4140 static int __qeth_xmit(struct qeth_card
*card
, struct qeth_qdio_out_q
*queue
,
4141 struct sk_buff
*skb
, unsigned int elements
,
4142 struct qeth_hdr
*hdr
, unsigned int offset
,
4143 unsigned int hd_len
)
4145 unsigned int bytes
= qdisc_pkt_len(skb
);
4146 struct qeth_qdio_out_buffer
*buffer
;
4147 unsigned int next_element
;
4148 struct netdev_queue
*txq
;
4149 bool stopped
= false;
4152 buffer
= queue
->bufs
[QDIO_BUFNR(queue
->bulk_start
+ queue
->bulk_count
)];
4153 txq
= netdev_get_tx_queue(card
->dev
, skb_get_queue_mapping(skb
));
4155 /* Just a sanity check, the wake/stop logic should ensure that we always
4156 * get a free buffer.
4158 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
4161 flush
= !qeth_iqd_may_bulk(queue
, skb
, hdr
);
4164 (buffer
->next_element_to_fill
+ elements
> queue
->max_elements
)) {
4165 if (buffer
->next_element_to_fill
> 0) {
4166 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4167 queue
->bulk_count
++;
4170 if (queue
->bulk_count
>= queue
->bulk_max
)
4174 qeth_flush_queue(queue
);
4176 buffer
= queue
->bufs
[QDIO_BUFNR(queue
->bulk_start
+
4177 queue
->bulk_count
)];
4179 /* Sanity-check again: */
4180 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
4184 if (buffer
->next_element_to_fill
== 0 &&
4185 atomic_inc_return(&queue
->used_buffers
) >= QDIO_MAX_BUFFERS_PER_Q
) {
4186 /* If a TX completion happens right _here_ and misses to wake
4187 * the txq, then our re-check below will catch the race.
4189 QETH_TXQ_STAT_INC(queue
, stopped
);
4190 netif_tx_stop_queue(txq
);
4194 next_element
= qeth_fill_buffer(buffer
, skb
, hdr
, offset
, hd_len
);
4195 buffer
->bytes
+= bytes
;
4196 buffer
->frames
+= skb_is_gso(skb
) ? skb_shinfo(skb
)->gso_segs
: 1;
4197 queue
->prev_hdr
= hdr
;
4199 flush
= __netdev_tx_sent_queue(txq
, bytes
,
4200 !stopped
&& netdev_xmit_more());
4202 if (flush
|| next_element
>= queue
->max_elements
) {
4203 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4204 queue
->bulk_count
++;
4206 if (queue
->bulk_count
>= queue
->bulk_max
)
4210 qeth_flush_queue(queue
);
4213 if (stopped
&& !qeth_out_queue_is_full(queue
))
4214 netif_tx_start_queue(txq
);
4218 static int qeth_do_send_packet(struct qeth_card
*card
,
4219 struct qeth_qdio_out_q
*queue
,
4220 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
4221 unsigned int offset
, unsigned int hd_len
,
4222 unsigned int elements_needed
)
4224 unsigned int start_index
= queue
->next_buf_to_fill
;
4225 struct qeth_qdio_out_buffer
*buffer
;
4226 unsigned int next_element
;
4227 struct netdev_queue
*txq
;
4228 bool stopped
= false;
4229 int flush_count
= 0;
4233 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
4235 /* Just a sanity check, the wake/stop logic should ensure that we always
4236 * get a free buffer.
4238 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
4241 txq
= netdev_get_tx_queue(card
->dev
, skb_get_queue_mapping(skb
));
4243 /* check if we need to switch packing state of this queue */
4244 qeth_switch_to_packing_if_needed(queue
);
4245 if (queue
->do_pack
) {
4247 /* does packet fit in current buffer? */
4248 if (buffer
->next_element_to_fill
+ elements_needed
>
4249 queue
->max_elements
) {
4250 /* ... no -> set state PRIMED */
4251 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4253 queue
->next_buf_to_fill
=
4254 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
4255 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
4257 /* We stepped forward, so sanity-check again: */
4258 if (atomic_read(&buffer
->state
) !=
4259 QETH_QDIO_BUF_EMPTY
) {
4260 qeth_flush_buffers(queue
, start_index
,
4268 if (buffer
->next_element_to_fill
== 0 &&
4269 atomic_inc_return(&queue
->used_buffers
) >= QDIO_MAX_BUFFERS_PER_Q
) {
4270 /* If a TX completion happens right _here_ and misses to wake
4271 * the txq, then our re-check below will catch the race.
4273 QETH_TXQ_STAT_INC(queue
, stopped
);
4274 netif_tx_stop_queue(txq
);
4278 next_element
= qeth_fill_buffer(buffer
, skb
, hdr
, offset
, hd_len
);
4279 buffer
->bytes
+= qdisc_pkt_len(skb
);
4280 buffer
->frames
+= skb_is_gso(skb
) ? skb_shinfo(skb
)->gso_segs
: 1;
4283 QETH_TXQ_STAT_INC(queue
, skbs_pack
);
4284 if (!queue
->do_pack
|| stopped
|| next_element
>= queue
->max_elements
) {
4286 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4287 queue
->next_buf_to_fill
=
4288 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
4292 qeth_flush_buffers(queue
, start_index
, flush_count
);
4296 QETH_TXQ_STAT_ADD(queue
, bufs_pack
, flush_count
);
4298 if (stopped
&& !qeth_out_queue_is_full(queue
))
4299 netif_tx_start_queue(txq
);
4303 static void qeth_fill_tso_ext(struct qeth_hdr_tso
*hdr
,
4304 unsigned int payload_len
, struct sk_buff
*skb
,
4305 unsigned int proto_len
)
4307 struct qeth_hdr_ext_tso
*ext
= &hdr
->ext
;
4309 ext
->hdr_tot_len
= sizeof(*ext
);
4310 ext
->imb_hdr_no
= 1;
4312 ext
->hdr_version
= 1;
4314 ext
->payload_len
= payload_len
;
4315 ext
->mss
= skb_shinfo(skb
)->gso_size
;
4316 ext
->dg_hdr_len
= proto_len
;
4319 int qeth_xmit(struct qeth_card
*card
, struct sk_buff
*skb
,
4320 struct qeth_qdio_out_q
*queue
, __be16 proto
,
4321 void (*fill_header
)(struct qeth_qdio_out_q
*queue
,
4322 struct qeth_hdr
*hdr
, struct sk_buff
*skb
,
4323 __be16 proto
, unsigned int data_len
))
4325 unsigned int proto_len
, hw_hdr_len
;
4326 unsigned int frame_len
= skb
->len
;
4327 bool is_tso
= skb_is_gso(skb
);
4328 unsigned int data_offset
= 0;
4329 struct qeth_hdr
*hdr
= NULL
;
4330 unsigned int hd_len
= 0;
4331 unsigned int elements
;
4335 hw_hdr_len
= sizeof(struct qeth_hdr_tso
);
4336 proto_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
4338 hw_hdr_len
= sizeof(struct qeth_hdr
);
4339 proto_len
= (IS_IQD(card
) && IS_LAYER2(card
)) ? ETH_HLEN
: 0;
4342 rc
= skb_cow_head(skb
, hw_hdr_len
);
4346 push_len
= qeth_add_hw_header(queue
, skb
, &hdr
, hw_hdr_len
, proto_len
,
4350 if (is_tso
|| !push_len
) {
4351 /* HW header needs its own buffer element. */
4352 hd_len
= hw_hdr_len
+ proto_len
;
4353 data_offset
= push_len
+ proto_len
;
4355 memset(hdr
, 0, hw_hdr_len
);
4356 fill_header(queue
, hdr
, skb
, proto
, frame_len
);
4358 qeth_fill_tso_ext((struct qeth_hdr_tso
*) hdr
,
4359 frame_len
- proto_len
, skb
, proto_len
);
4362 rc
= __qeth_xmit(card
, queue
, skb
, elements
, hdr
, data_offset
,
4365 /* TODO: drop skb_orphan() once TX completion is fast enough */
4367 spin_lock(&queue
->lock
);
4368 rc
= qeth_do_send_packet(card
, queue
, skb
, hdr
, data_offset
,
4370 spin_unlock(&queue
->lock
);
4373 if (rc
&& !push_len
)
4374 kmem_cache_free(qeth_core_header_cache
, hdr
);
4378 EXPORT_SYMBOL_GPL(qeth_xmit
);
4380 static int qeth_setadp_promisc_mode_cb(struct qeth_card
*card
,
4381 struct qeth_reply
*reply
, unsigned long data
)
4383 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4384 struct qeth_ipacmd_setadpparms
*setparms
;
4386 QETH_CARD_TEXT(card
, 4, "prmadpcb");
4388 setparms
= &(cmd
->data
.setadapterparms
);
4389 if (qeth_setadpparms_inspect_rc(cmd
)) {
4390 QETH_CARD_TEXT_(card
, 4, "prmrc%x", cmd
->hdr
.return_code
);
4391 setparms
->data
.mode
= SET_PROMISC_MODE_OFF
;
4393 card
->info
.promisc_mode
= setparms
->data
.mode
;
4394 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
4397 void qeth_setadp_promisc_mode(struct qeth_card
*card
, bool enable
)
4399 enum qeth_ipa_promisc_modes mode
= enable
? SET_PROMISC_MODE_ON
:
4400 SET_PROMISC_MODE_OFF
;
4401 struct qeth_cmd_buffer
*iob
;
4402 struct qeth_ipa_cmd
*cmd
;
4404 QETH_CARD_TEXT(card
, 4, "setprom");
4405 QETH_CARD_TEXT_(card
, 4, "mode:%x", mode
);
4407 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_PROMISC_MODE
,
4408 SETADP_DATA_SIZEOF(mode
));
4411 cmd
= __ipa_cmd(iob
);
4412 cmd
->data
.setadapterparms
.data
.mode
= mode
;
4413 qeth_send_ipa_cmd(card
, iob
, qeth_setadp_promisc_mode_cb
, NULL
);
4415 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode
);
4417 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card
*card
,
4418 struct qeth_reply
*reply
, unsigned long data
)
4420 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4421 struct qeth_ipacmd_setadpparms
*adp_cmd
;
4423 QETH_CARD_TEXT(card
, 4, "chgmaccb");
4424 if (qeth_setadpparms_inspect_rc(cmd
))
4427 adp_cmd
= &cmd
->data
.setadapterparms
;
4428 if (!is_valid_ether_addr(adp_cmd
->data
.change_addr
.addr
))
4429 return -EADDRNOTAVAIL
;
4431 if (IS_LAYER2(card
) && IS_OSD(card
) && !IS_VM_NIC(card
) &&
4432 !(adp_cmd
->hdr
.flags
& QETH_SETADP_FLAGS_VIRTUAL_MAC
))
4433 return -EADDRNOTAVAIL
;
4435 eth_hw_addr_set(card
->dev
, adp_cmd
->data
.change_addr
.addr
);
4439 int qeth_setadpparms_change_macaddr(struct qeth_card
*card
)
4442 struct qeth_cmd_buffer
*iob
;
4443 struct qeth_ipa_cmd
*cmd
;
4445 QETH_CARD_TEXT(card
, 4, "chgmac");
4447 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_ALTER_MAC_ADDRESS
,
4448 SETADP_DATA_SIZEOF(change_addr
));
4451 cmd
= __ipa_cmd(iob
);
4452 cmd
->data
.setadapterparms
.data
.change_addr
.cmd
= CHANGE_ADDR_READ_MAC
;
4453 cmd
->data
.setadapterparms
.data
.change_addr
.addr_size
= ETH_ALEN
;
4454 ether_addr_copy(cmd
->data
.setadapterparms
.data
.change_addr
.addr
,
4455 card
->dev
->dev_addr
);
4456 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_change_macaddr_cb
,
4460 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr
);
4462 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card
*card
,
4463 struct qeth_reply
*reply
, unsigned long data
)
4465 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4466 struct qeth_set_access_ctrl
*access_ctrl_req
;
4468 QETH_CARD_TEXT(card
, 4, "setaccb");
4470 access_ctrl_req
= &cmd
->data
.setadapterparms
.data
.set_access_ctrl
;
4471 QETH_CARD_TEXT_(card
, 2, "rc=%d",
4472 cmd
->data
.setadapterparms
.hdr
.return_code
);
4473 if (cmd
->data
.setadapterparms
.hdr
.return_code
!=
4474 SET_ACCESS_CTRL_RC_SUCCESS
)
4475 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4476 access_ctrl_req
->subcmd_code
, CARD_DEVID(card
),
4477 cmd
->data
.setadapterparms
.hdr
.return_code
);
4478 switch (qeth_setadpparms_inspect_rc(cmd
)) {
4479 case SET_ACCESS_CTRL_RC_SUCCESS
:
4480 if (access_ctrl_req
->subcmd_code
== ISOLATION_MODE_NONE
)
4481 dev_info(&card
->gdev
->dev
,
4482 "QDIO data connection isolation is deactivated\n");
4484 dev_info(&card
->gdev
->dev
,
4485 "QDIO data connection isolation is activated\n");
4487 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED
:
4488 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4491 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED
:
4492 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4495 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED
:
4496 dev_err(&card
->gdev
->dev
, "Adapter does not "
4497 "support QDIO data connection isolation\n");
4499 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER
:
4500 dev_err(&card
->gdev
->dev
,
4501 "Adapter is dedicated. "
4502 "QDIO data connection isolation not supported\n");
4504 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF
:
4505 dev_err(&card
->gdev
->dev
,
4506 "TSO does not permit QDIO data connection isolation\n");
4508 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED
:
4509 dev_err(&card
->gdev
->dev
, "The adjacent switch port does not "
4510 "support reflective relay mode\n");
4512 case SET_ACCESS_CTRL_RC_REFLREL_FAILED
:
4513 dev_err(&card
->gdev
->dev
, "The reflective relay mode cannot be "
4514 "enabled at the adjacent switch port");
4516 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED
:
4517 dev_warn(&card
->gdev
->dev
, "Turning off reflective relay mode "
4518 "at the adjacent switch failed\n");
4519 /* benign error while disabling ISOLATION_MODE_FWD */
4526 int qeth_setadpparms_set_access_ctrl(struct qeth_card
*card
,
4527 enum qeth_ipa_isolation_modes mode
)
4530 struct qeth_cmd_buffer
*iob
;
4531 struct qeth_ipa_cmd
*cmd
;
4532 struct qeth_set_access_ctrl
*access_ctrl_req
;
4534 QETH_CARD_TEXT(card
, 4, "setacctl");
4536 if (!qeth_adp_supported(card
, IPA_SETADP_SET_ACCESS_CONTROL
)) {
4537 dev_err(&card
->gdev
->dev
,
4538 "Adapter does not support QDIO data connection isolation\n");
4542 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_ACCESS_CONTROL
,
4543 SETADP_DATA_SIZEOF(set_access_ctrl
));
4546 cmd
= __ipa_cmd(iob
);
4547 access_ctrl_req
= &cmd
->data
.setadapterparms
.data
.set_access_ctrl
;
4548 access_ctrl_req
->subcmd_code
= mode
;
4550 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_set_access_ctrl_cb
,
4553 QETH_CARD_TEXT_(card
, 2, "rc=%d", rc
);
4554 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4555 rc
, CARD_DEVID(card
));
4561 void qeth_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
4563 struct qeth_card
*card
;
4565 card
= dev
->ml_priv
;
4566 QETH_CARD_TEXT(card
, 4, "txtimeo");
4567 qeth_schedule_recovery(card
);
4569 EXPORT_SYMBOL_GPL(qeth_tx_timeout
);
4571 static int qeth_mdio_read(struct net_device
*dev
, int phy_id
, int regnum
)
4573 struct qeth_card
*card
= dev
->ml_priv
;
4577 case MII_BMCR
: /* Basic mode control register */
4579 if ((card
->info
.link_type
!= QETH_LINK_TYPE_GBIT_ETH
) &&
4580 (card
->info
.link_type
!= QETH_LINK_TYPE_10GBIT_ETH
) &&
4581 (card
->info
.link_type
!= QETH_LINK_TYPE_25GBIT_ETH
))
4582 rc
|= BMCR_SPEED100
;
4584 case MII_BMSR
: /* Basic mode status register */
4585 rc
= BMSR_ERCAP
| BMSR_ANEGCOMPLETE
| BMSR_LSTATUS
|
4586 BMSR_10HALF
| BMSR_10FULL
| BMSR_100HALF
| BMSR_100FULL
|
4589 case MII_PHYSID1
: /* PHYS ID 1 */
4590 rc
= (dev
->dev_addr
[0] << 16) | (dev
->dev_addr
[1] << 8) |
4592 rc
= (rc
>> 5) & 0xFFFF;
4594 case MII_PHYSID2
: /* PHYS ID 2 */
4595 rc
= (dev
->dev_addr
[2] << 10) & 0xFFFF;
4597 case MII_ADVERTISE
: /* Advertisement control reg */
4600 case MII_LPA
: /* Link partner ability reg */
4601 rc
= LPA_10HALF
| LPA_10FULL
| LPA_100HALF
| LPA_100FULL
|
4602 LPA_100BASE4
| LPA_LPACK
;
4604 case MII_EXPANSION
: /* Expansion register */
4606 case MII_DCOUNTER
: /* disconnect counter */
4608 case MII_FCSCOUNTER
: /* false carrier counter */
4610 case MII_NWAYTEST
: /* N-way auto-neg test register */
4612 case MII_RERRCOUNTER
: /* rx error counter */
4613 rc
= card
->stats
.rx_length_errors
+
4614 card
->stats
.rx_frame_errors
+
4615 card
->stats
.rx_fifo_errors
;
4617 case MII_SREVISION
: /* silicon revision */
4619 case MII_RESV1
: /* reserved 1 */
4621 case MII_LBRERROR
: /* loopback, rx, bypass error */
4623 case MII_PHYADDR
: /* physical address */
4625 case MII_RESV2
: /* reserved 2 */
4627 case MII_TPISTATUS
: /* TPI status for 10mbps */
4629 case MII_NCONFIG
: /* network interface config */
4637 static int qeth_snmp_command_cb(struct qeth_card
*card
,
4638 struct qeth_reply
*reply
, unsigned long data
)
4640 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4641 struct qeth_arp_query_info
*qinfo
= reply
->param
;
4642 struct qeth_ipacmd_setadpparms
*adp_cmd
;
4643 unsigned int data_len
;
4646 QETH_CARD_TEXT(card
, 3, "snpcmdcb");
4648 if (cmd
->hdr
.return_code
) {
4649 QETH_CARD_TEXT_(card
, 4, "scer1%x", cmd
->hdr
.return_code
);
4652 if (cmd
->data
.setadapterparms
.hdr
.return_code
) {
4653 cmd
->hdr
.return_code
=
4654 cmd
->data
.setadapterparms
.hdr
.return_code
;
4655 QETH_CARD_TEXT_(card
, 4, "scer2%x", cmd
->hdr
.return_code
);
4659 adp_cmd
= &cmd
->data
.setadapterparms
;
4660 data_len
= adp_cmd
->hdr
.cmdlength
- sizeof(adp_cmd
->hdr
);
4661 if (adp_cmd
->hdr
.seq_no
== 1) {
4662 snmp_data
= &adp_cmd
->data
.snmp
;
4664 snmp_data
= &adp_cmd
->data
.snmp
.request
;
4665 data_len
-= offsetof(struct qeth_snmp_cmd
, request
);
4668 /* check if there is enough room in userspace */
4669 if ((qinfo
->udata_len
- qinfo
->udata_offset
) < data_len
) {
4670 QETH_CARD_TEXT_(card
, 4, "scer3%i", -ENOSPC
);
4673 QETH_CARD_TEXT_(card
, 4, "snore%i",
4674 cmd
->data
.setadapterparms
.hdr
.used_total
);
4675 QETH_CARD_TEXT_(card
, 4, "sseqn%i",
4676 cmd
->data
.setadapterparms
.hdr
.seq_no
);
4677 /*copy entries to user buffer*/
4678 memcpy(qinfo
->udata
+ qinfo
->udata_offset
, snmp_data
, data_len
);
4679 qinfo
->udata_offset
+= data_len
;
4681 if (cmd
->data
.setadapterparms
.hdr
.seq_no
<
4682 cmd
->data
.setadapterparms
.hdr
.used_total
)
4687 static int qeth_snmp_command(struct qeth_card
*card
, char __user
*udata
)
4689 struct qeth_snmp_ureq __user
*ureq
;
4690 struct qeth_cmd_buffer
*iob
;
4691 unsigned int req_len
;
4692 struct qeth_arp_query_info qinfo
= {0, };
4695 QETH_CARD_TEXT(card
, 3, "snmpcmd");
4697 if (IS_VM_NIC(card
))
4700 if ((!qeth_adp_supported(card
, IPA_SETADP_SET_SNMP_CONTROL
)) &&
4704 ureq
= (struct qeth_snmp_ureq __user
*) udata
;
4705 if (get_user(qinfo
.udata_len
, &ureq
->hdr
.data_len
) ||
4706 get_user(req_len
, &ureq
->hdr
.req_len
))
4709 /* Sanitize user input, to avoid overflows in iob size calculation: */
4710 if (req_len
> QETH_BUFSIZE
)
4713 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_SNMP_CONTROL
, req_len
);
4717 if (copy_from_user(&__ipa_cmd(iob
)->data
.setadapterparms
.data
.snmp
,
4718 &ureq
->cmd
, req_len
)) {
4723 qinfo
.udata
= kzalloc(qinfo
.udata_len
, GFP_KERNEL
);
4728 qinfo
.udata_offset
= sizeof(struct qeth_snmp_ureq_hdr
);
4730 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_snmp_command_cb
, &qinfo
);
4732 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4733 CARD_DEVID(card
), rc
);
4735 if (copy_to_user(udata
, qinfo
.udata
, qinfo
.udata_len
))
4743 static int qeth_setadpparms_query_oat_cb(struct qeth_card
*card
,
4744 struct qeth_reply
*reply
,
4747 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*)data
;
4748 struct qeth_qoat_priv
*priv
= reply
->param
;
4751 QETH_CARD_TEXT(card
, 3, "qoatcb");
4752 if (qeth_setadpparms_inspect_rc(cmd
))
4755 resdatalen
= cmd
->data
.setadapterparms
.hdr
.cmdlength
;
4757 if (resdatalen
> (priv
->buffer_len
- priv
->response_len
))
4760 memcpy(priv
->buffer
+ priv
->response_len
,
4761 &cmd
->data
.setadapterparms
.hdr
, resdatalen
);
4762 priv
->response_len
+= resdatalen
;
4764 if (cmd
->data
.setadapterparms
.hdr
.seq_no
<
4765 cmd
->data
.setadapterparms
.hdr
.used_total
)
4770 static int qeth_query_oat_command(struct qeth_card
*card
, char __user
*udata
)
4773 struct qeth_cmd_buffer
*iob
;
4774 struct qeth_ipa_cmd
*cmd
;
4775 struct qeth_query_oat
*oat_req
;
4776 struct qeth_query_oat_data oat_data
;
4777 struct qeth_qoat_priv priv
;
4780 QETH_CARD_TEXT(card
, 3, "qoatcmd");
4782 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_OAT
))
4785 if (copy_from_user(&oat_data
, udata
, sizeof(oat_data
)))
4788 priv
.buffer_len
= oat_data
.buffer_len
;
4789 priv
.response_len
= 0;
4790 priv
.buffer
= vzalloc(oat_data
.buffer_len
);
4794 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_OAT
,
4795 SETADP_DATA_SIZEOF(query_oat
));
4800 cmd
= __ipa_cmd(iob
);
4801 oat_req
= &cmd
->data
.setadapterparms
.data
.query_oat
;
4802 oat_req
->subcmd_code
= oat_data
.command
;
4804 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_query_oat_cb
, &priv
);
4806 tmp
= is_compat_task() ? compat_ptr(oat_data
.ptr
) :
4807 u64_to_user_ptr(oat_data
.ptr
);
4808 oat_data
.response_len
= priv
.response_len
;
4810 if (copy_to_user(tmp
, priv
.buffer
, priv
.response_len
) ||
4811 copy_to_user(udata
, &oat_data
, sizeof(oat_data
)))
4820 static int qeth_init_link_info_oat_cb(struct qeth_card
*card
,
4821 struct qeth_reply
*reply_priv
,
4824 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*)data
;
4825 struct qeth_link_info
*link_info
= reply_priv
->param
;
4826 struct qeth_query_oat_physical_if
*phys_if
;
4827 struct qeth_query_oat_reply
*reply
;
4829 QETH_CARD_TEXT(card
, 2, "qoatincb");
4830 if (qeth_setadpparms_inspect_rc(cmd
))
4833 /* Multi-part reply is unexpected, don't bother: */
4834 if (cmd
->data
.setadapterparms
.hdr
.used_total
> 1)
4837 /* Expect the reply to start with phys_if data: */
4838 reply
= &cmd
->data
.setadapterparms
.data
.query_oat
.reply
[0];
4839 if (reply
->type
!= QETH_QOAT_REPLY_TYPE_PHYS_IF
||
4840 reply
->length
< sizeof(*reply
))
4843 phys_if
= &reply
->phys_if
;
4845 switch (phys_if
->speed_duplex
) {
4846 case QETH_QOAT_PHYS_SPEED_10M_HALF
:
4847 link_info
->speed
= SPEED_10
;
4848 link_info
->duplex
= DUPLEX_HALF
;
4850 case QETH_QOAT_PHYS_SPEED_10M_FULL
:
4851 link_info
->speed
= SPEED_10
;
4852 link_info
->duplex
= DUPLEX_FULL
;
4854 case QETH_QOAT_PHYS_SPEED_100M_HALF
:
4855 link_info
->speed
= SPEED_100
;
4856 link_info
->duplex
= DUPLEX_HALF
;
4858 case QETH_QOAT_PHYS_SPEED_100M_FULL
:
4859 link_info
->speed
= SPEED_100
;
4860 link_info
->duplex
= DUPLEX_FULL
;
4862 case QETH_QOAT_PHYS_SPEED_1000M_HALF
:
4863 link_info
->speed
= SPEED_1000
;
4864 link_info
->duplex
= DUPLEX_HALF
;
4866 case QETH_QOAT_PHYS_SPEED_1000M_FULL
:
4867 link_info
->speed
= SPEED_1000
;
4868 link_info
->duplex
= DUPLEX_FULL
;
4870 case QETH_QOAT_PHYS_SPEED_10G_FULL
:
4871 link_info
->speed
= SPEED_10000
;
4872 link_info
->duplex
= DUPLEX_FULL
;
4874 case QETH_QOAT_PHYS_SPEED_25G_FULL
:
4875 link_info
->speed
= SPEED_25000
;
4876 link_info
->duplex
= DUPLEX_FULL
;
4878 case QETH_QOAT_PHYS_SPEED_UNKNOWN
:
4880 link_info
->speed
= SPEED_UNKNOWN
;
4881 link_info
->duplex
= DUPLEX_UNKNOWN
;
4885 switch (phys_if
->media_type
) {
4886 case QETH_QOAT_PHYS_MEDIA_COPPER
:
4887 link_info
->port
= PORT_TP
;
4888 link_info
->link_mode
= QETH_LINK_MODE_UNKNOWN
;
4890 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT
:
4891 link_info
->port
= PORT_FIBRE
;
4892 link_info
->link_mode
= QETH_LINK_MODE_FIBRE_SHORT
;
4894 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG
:
4895 link_info
->port
= PORT_FIBRE
;
4896 link_info
->link_mode
= QETH_LINK_MODE_FIBRE_LONG
;
4899 link_info
->port
= PORT_OTHER
;
4900 link_info
->link_mode
= QETH_LINK_MODE_UNKNOWN
;
4907 static void qeth_init_link_info(struct qeth_card
*card
)
4909 qeth_default_link_info(card
);
4911 /* Get more accurate data via QUERY OAT: */
4912 if (qeth_adp_supported(card
, IPA_SETADP_QUERY_OAT
)) {
4913 struct qeth_link_info link_info
;
4914 struct qeth_cmd_buffer
*iob
;
4916 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_OAT
,
4917 SETADP_DATA_SIZEOF(query_oat
));
4919 struct qeth_ipa_cmd
*cmd
= __ipa_cmd(iob
);
4920 struct qeth_query_oat
*oat_req
;
4922 oat_req
= &cmd
->data
.setadapterparms
.data
.query_oat
;
4923 oat_req
->subcmd_code
= QETH_QOAT_SCOPE_INTERFACE
;
4925 if (!qeth_send_ipa_cmd(card
, iob
,
4926 qeth_init_link_info_oat_cb
,
4928 if (link_info
.speed
!= SPEED_UNKNOWN
)
4929 card
->info
.link_info
.speed
= link_info
.speed
;
4930 if (link_info
.duplex
!= DUPLEX_UNKNOWN
)
4931 card
->info
.link_info
.duplex
= link_info
.duplex
;
4932 if (link_info
.port
!= PORT_OTHER
)
4933 card
->info
.link_info
.port
= link_info
.port
;
4934 if (link_info
.link_mode
!= QETH_LINK_MODE_UNKNOWN
)
4935 card
->info
.link_info
.link_mode
= link_info
.link_mode
;
4942 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4943 * @card: pointer to a qeth_card
4946 * 0, if a MAC address has been set for the card's netdevice
4947 * a return code, for various error conditions
4949 int qeth_vm_request_mac(struct qeth_card
*card
)
4951 struct diag26c_mac_resp
*response
;
4952 struct diag26c_mac_req
*request
;
4955 QETH_CARD_TEXT(card
, 2, "vmreqmac");
4957 request
= kzalloc(sizeof(*request
), GFP_KERNEL
| GFP_DMA
);
4958 response
= kzalloc(sizeof(*response
), GFP_KERNEL
| GFP_DMA
);
4959 if (!request
|| !response
) {
4964 request
->resp_buf_len
= sizeof(*response
);
4965 request
->resp_version
= DIAG26C_VERSION2
;
4966 request
->op_code
= DIAG26C_GET_MAC
;
4967 request
->devno
= card
->info
.ddev_devno
;
4969 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
4970 rc
= diag26c(request
, response
, DIAG26C_MAC_SERVICES
);
4971 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
4974 QETH_DBF_HEX(CTRL
, 2, response
, sizeof(*response
));
4976 if (request
->resp_buf_len
< sizeof(*response
) ||
4977 response
->version
!= request
->resp_version
) {
4979 QETH_CARD_TEXT(card
, 2, "badresp");
4980 QETH_CARD_HEX(card
, 2, &request
->resp_buf_len
,
4981 sizeof(request
->resp_buf_len
));
4982 } else if (!is_valid_ether_addr(response
->mac
)) {
4984 QETH_CARD_TEXT(card
, 2, "badmac");
4985 QETH_CARD_HEX(card
, 2, response
->mac
, ETH_ALEN
);
4987 eth_hw_addr_set(card
->dev
, response
->mac
);
4995 EXPORT_SYMBOL_GPL(qeth_vm_request_mac
);
4997 static void qeth_determine_capabilities(struct qeth_card
*card
)
4999 struct qeth_channel
*channel
= &card
->data
;
5000 struct ccw_device
*ddev
= channel
->ccwdev
;
5002 int ddev_offline
= 0;
5004 QETH_CARD_TEXT(card
, 2, "detcapab");
5005 if (!ddev
->online
) {
5007 rc
= qeth_start_channel(channel
);
5009 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
5014 rc
= qeth_read_conf_data(card
);
5016 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5017 CARD_DEVID(card
), rc
);
5018 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
5022 rc
= qdio_get_ssqd_desc(ddev
, &card
->ssqd
);
5024 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
5026 QETH_CARD_TEXT_(card
, 2, "qfmt%d", card
->ssqd
.qfmt
);
5027 QETH_CARD_TEXT_(card
, 2, "ac1:%02x", card
->ssqd
.qdioac1
);
5028 QETH_CARD_TEXT_(card
, 2, "ac2:%04x", card
->ssqd
.qdioac2
);
5029 QETH_CARD_TEXT_(card
, 2, "ac3:%04x", card
->ssqd
.qdioac3
);
5030 QETH_CARD_TEXT_(card
, 2, "icnt%d", card
->ssqd
.icnt
);
5031 if (!((card
->ssqd
.qfmt
!= QDIO_IQDIO_QFMT
) ||
5032 ((card
->ssqd
.qdioac1
& CHSC_AC1_INITIATE_INPUTQ
) == 0) ||
5033 ((card
->ssqd
.qdioac3
& CHSC_AC3_FORMAT2_CQ_AVAILABLE
) == 0))) {
5034 dev_info(&card
->gdev
->dev
,
5035 "Completion Queueing supported\n");
5037 card
->options
.cq
= QETH_CQ_NOTAVAILABLE
;
5041 if (ddev_offline
== 1)
5042 qeth_stop_channel(channel
);
5047 static void qeth_read_ccw_conf_data(struct qeth_card
*card
)
5049 struct qeth_card_info
*info
= &card
->info
;
5050 struct ccw_device
*cdev
= CARD_DDEV(card
);
5051 struct ccw_dev_id dev_id
;
5053 QETH_CARD_TEXT(card
, 2, "ccwconfd");
5054 ccw_device_get_id(cdev
, &dev_id
);
5056 info
->ddev_devno
= dev_id
.devno
;
5057 info
->ids_valid
= !ccw_device_get_cssid(cdev
, &info
->cssid
) &&
5058 !ccw_device_get_iid(cdev
, &info
->iid
) &&
5059 !ccw_device_get_chid(cdev
, 0, &info
->chid
);
5060 info
->ssid
= dev_id
.ssid
;
5062 dev_info(&card
->gdev
->dev
, "CHID: %x CHPID: %x\n",
5063 info
->chid
, info
->chpid
);
5065 QETH_CARD_TEXT_(card
, 3, "devn%x", info
->ddev_devno
);
5066 QETH_CARD_TEXT_(card
, 3, "cssid:%x", info
->cssid
);
5067 QETH_CARD_TEXT_(card
, 3, "iid:%x", info
->iid
);
5068 QETH_CARD_TEXT_(card
, 3, "ssid:%x", info
->ssid
);
5069 QETH_CARD_TEXT_(card
, 3, "chpid:%x", info
->chpid
);
5070 QETH_CARD_TEXT_(card
, 3, "chid:%x", info
->chid
);
5071 QETH_CARD_TEXT_(card
, 3, "idval%x", info
->ids_valid
);
5074 static int qeth_qdio_establish(struct qeth_card
*card
)
5076 struct qdio_buffer
**out_sbal_ptrs
[QETH_MAX_OUT_QUEUES
];
5077 struct qdio_buffer
**in_sbal_ptrs
[QETH_MAX_IN_QUEUES
];
5078 struct qeth_qib_parms
*qib_parms
= NULL
;
5079 struct qdio_initialize init_data
;
5080 unsigned int no_input_qs
= 1;
5084 QETH_CARD_TEXT(card
, 2, "qdioest");
5086 if (!IS_IQD(card
) && !IS_VM_NIC(card
)) {
5087 qib_parms
= kzalloc(sizeof_field(struct qib
, parm
), GFP_KERNEL
);
5091 qeth_fill_qib_parms(card
, qib_parms
);
5094 in_sbal_ptrs
[0] = card
->qdio
.in_q
->qdio_bufs
;
5095 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
5096 in_sbal_ptrs
[1] = card
->qdio
.c_q
->qdio_bufs
;
5100 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++)
5101 out_sbal_ptrs
[i
] = card
->qdio
.out_qs
[i
]->qdio_bufs
;
5103 memset(&init_data
, 0, sizeof(struct qdio_initialize
));
5104 init_data
.q_format
= IS_IQD(card
) ? QDIO_IQDIO_QFMT
:
5106 init_data
.qib_param_field_format
= 0;
5107 init_data
.qib_param_field
= (void *)qib_parms
;
5108 init_data
.no_input_qs
= no_input_qs
;
5109 init_data
.no_output_qs
= card
->qdio
.no_out_queues
;
5110 init_data
.input_handler
= qeth_qdio_input_handler
;
5111 init_data
.output_handler
= qeth_qdio_output_handler
;
5112 init_data
.irq_poll
= qeth_qdio_poll
;
5113 init_data
.int_parm
= (unsigned long) card
;
5114 init_data
.input_sbal_addr_array
= in_sbal_ptrs
;
5115 init_data
.output_sbal_addr_array
= out_sbal_ptrs
;
5117 if (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
,
5118 QETH_QDIO_ESTABLISHED
) == QETH_QDIO_ALLOCATED
) {
5119 rc
= qdio_allocate(CARD_DDEV(card
), init_data
.no_input_qs
,
5120 init_data
.no_output_qs
);
5122 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
5125 rc
= qdio_establish(CARD_DDEV(card
), &init_data
);
5127 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
5128 qdio_free(CARD_DDEV(card
));
5132 switch (card
->options
.cq
) {
5133 case QETH_CQ_ENABLED
:
5134 dev_info(&card
->gdev
->dev
, "Completion Queue support enabled");
5136 case QETH_CQ_DISABLED
:
5137 dev_info(&card
->gdev
->dev
, "Completion Queue support disabled");
5148 static void qeth_core_free_card(struct qeth_card
*card
)
5150 QETH_CARD_TEXT(card
, 2, "freecrd");
5152 unregister_service_level(&card
->qeth_service_level
);
5153 debugfs_remove_recursive(card
->debugfs
);
5154 qeth_put_cmd(card
->read_cmd
);
5155 destroy_workqueue(card
->event_wq
);
5156 dev_set_drvdata(&card
->gdev
->dev
, NULL
);
5160 static void qeth_trace_features(struct qeth_card
*card
)
5162 QETH_CARD_TEXT(card
, 2, "features");
5163 QETH_CARD_HEX(card
, 2, &card
->options
.ipa4
, sizeof(card
->options
.ipa4
));
5164 QETH_CARD_HEX(card
, 2, &card
->options
.ipa6
, sizeof(card
->options
.ipa6
));
5165 QETH_CARD_HEX(card
, 2, &card
->options
.adp
, sizeof(card
->options
.adp
));
5166 QETH_CARD_HEX(card
, 2, &card
->info
.diagass_support
,
5167 sizeof(card
->info
.diagass_support
));
5170 static struct ccw_device_id qeth_ids
[] = {
5171 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5172 .driver_info
= QETH_CARD_TYPE_OSD
},
5173 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5174 .driver_info
= QETH_CARD_TYPE_IQD
},
5175 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5176 .driver_info
= QETH_CARD_TYPE_OSM
},
5177 #ifdef CONFIG_QETH_OSX
5178 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5179 .driver_info
= QETH_CARD_TYPE_OSX
},
5183 MODULE_DEVICE_TABLE(ccw
, qeth_ids
);
5185 static struct ccw_driver qeth_ccw_driver
= {
5187 .owner
= THIS_MODULE
,
5191 .probe
= ccwgroup_probe_ccwdev
,
5192 .remove
= ccwgroup_remove_ccwdev
,
5195 static int qeth_hardsetup_card(struct qeth_card
*card
, bool *carrier_ok
)
5200 QETH_CARD_TEXT(card
, 2, "hrdsetup");
5201 atomic_set(&card
->force_alloc_skb
, 0);
5202 rc
= qeth_update_from_chp_desc(card
);
5207 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5209 rc
= qeth_qdio_clear_card(card
, !IS_IQD(card
));
5210 qeth_stop_channel(&card
->data
);
5211 qeth_stop_channel(&card
->write
);
5212 qeth_stop_channel(&card
->read
);
5213 qdio_free(CARD_DDEV(card
));
5215 rc
= qeth_start_channel(&card
->read
);
5218 rc
= qeth_start_channel(&card
->write
);
5221 rc
= qeth_start_channel(&card
->data
);
5225 if (rc
== -ERESTARTSYS
) {
5226 QETH_CARD_TEXT(card
, 2, "break1");
5229 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
5236 qeth_determine_capabilities(card
);
5237 qeth_read_ccw_conf_data(card
);
5238 qeth_idx_init(card
);
5240 rc
= qeth_idx_activate_read_channel(card
);
5242 QETH_CARD_TEXT(card
, 2, "break2");
5245 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
5252 rc
= qeth_idx_activate_write_channel(card
);
5254 QETH_CARD_TEXT(card
, 2, "break3");
5257 QETH_CARD_TEXT_(card
, 2, "4err%d", rc
);
5263 card
->read_or_write_problem
= 0;
5264 rc
= qeth_mpc_initialize(card
);
5266 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
5270 rc
= qeth_send_startlan(card
);
5272 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
5273 if (rc
== -ENETDOWN
) {
5274 dev_warn(&card
->gdev
->dev
, "The LAN is offline\n");
5275 *carrier_ok
= false;
5283 card
->options
.ipa4
.supported
= 0;
5284 card
->options
.ipa6
.supported
= 0;
5285 card
->options
.adp
.supported
= 0;
5286 card
->options
.sbp
.supported_funcs
= 0;
5287 card
->info
.diagass_support
= 0;
5288 rc
= qeth_query_ipassists(card
, QETH_PROT_IPV4
);
5291 if (qeth_is_supported(card
, IPA_IPV6
)) {
5292 rc
= qeth_query_ipassists(card
, QETH_PROT_IPV6
);
5296 if (qeth_is_supported(card
, IPA_SETADAPTERPARMS
)) {
5297 rc
= qeth_query_setadapterparms(card
);
5299 QETH_CARD_TEXT_(card
, 2, "7err%d", rc
);
5303 if (qeth_adp_supported(card
, IPA_SETADP_SET_DIAG_ASSIST
)) {
5304 rc
= qeth_query_setdiagass(card
);
5306 QETH_CARD_TEXT_(card
, 2, "8err%d", rc
);
5309 qeth_trace_features(card
);
5311 if (!qeth_is_diagass_supported(card
, QETH_DIAGS_CMD_TRAP
) ||
5312 (card
->info
.hwtrap
&& qeth_hw_trap(card
, QETH_DIAGS_TRAP_ARM
)))
5313 card
->info
.hwtrap
= 0;
5315 if (card
->options
.isolation
!= ISOLATION_MODE_NONE
) {
5316 rc
= qeth_setadpparms_set_access_ctrl(card
,
5317 card
->options
.isolation
);
5322 qeth_init_link_info(card
);
5324 rc
= qeth_init_qdio_queues(card
);
5326 QETH_CARD_TEXT_(card
, 2, "9err%d", rc
);
5332 dev_warn(&card
->gdev
->dev
, "The qeth device driver failed to recover "
5333 "an error on the device\n");
5334 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5335 CARD_DEVID(card
), rc
);
5339 static int qeth_set_online(struct qeth_card
*card
,
5340 const struct qeth_discipline
*disc
)
5345 mutex_lock(&card
->conf_mutex
);
5346 QETH_CARD_TEXT(card
, 2, "setonlin");
5348 rc
= qeth_hardsetup_card(card
, &carrier_ok
);
5350 QETH_CARD_TEXT_(card
, 2, "2err%04x", rc
);
5355 qeth_print_status_message(card
);
5357 if (card
->dev
->reg_state
!= NETREG_REGISTERED
)
5358 /* no need for locking / error handling at this early stage: */
5359 qeth_set_real_num_tx_queues(card
, qeth_tx_actual_queues(card
));
5361 rc
= disc
->set_online(card
, carrier_ok
);
5365 /* let user_space know that device is online */
5366 kobject_uevent(&card
->gdev
->dev
.kobj
, KOBJ_CHANGE
);
5368 mutex_unlock(&card
->conf_mutex
);
5373 qeth_qdio_clear_card(card
, 0);
5374 qeth_clear_working_pool_list(card
);
5375 qeth_flush_local_addrs(card
);
5377 qeth_stop_channel(&card
->data
);
5378 qeth_stop_channel(&card
->write
);
5379 qeth_stop_channel(&card
->read
);
5380 qdio_free(CARD_DDEV(card
));
5382 mutex_unlock(&card
->conf_mutex
);
5386 int qeth_set_offline(struct qeth_card
*card
, const struct qeth_discipline
*disc
,
5391 mutex_lock(&card
->conf_mutex
);
5392 QETH_CARD_TEXT(card
, 3, "setoffl");
5394 if ((!resetting
&& card
->info
.hwtrap
) || card
->info
.hwtrap
== 2) {
5395 qeth_hw_trap(card
, QETH_DIAGS_TRAP_DISARM
);
5396 card
->info
.hwtrap
= 1;
5399 /* cancel any stalled cmd that might block the rtnl: */
5400 qeth_clear_ipacmd_list(card
);
5403 netif_device_detach(card
->dev
);
5404 netif_carrier_off(card
->dev
);
5407 cancel_work_sync(&card
->rx_mode_work
);
5409 disc
->set_offline(card
);
5411 qeth_qdio_clear_card(card
, 0);
5412 qeth_drain_output_queues(card
);
5413 qeth_clear_working_pool_list(card
);
5414 qeth_flush_local_addrs(card
);
5415 card
->info
.promisc_mode
= 0;
5416 qeth_default_link_info(card
);
5418 rc
= qeth_stop_channel(&card
->data
);
5419 rc2
= qeth_stop_channel(&card
->write
);
5420 rc3
= qeth_stop_channel(&card
->read
);
5422 rc
= (rc2
) ? rc2
: rc3
;
5424 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
5425 qdio_free(CARD_DDEV(card
));
5427 /* let user_space know that device is offline */
5428 kobject_uevent(&card
->gdev
->dev
.kobj
, KOBJ_CHANGE
);
5430 mutex_unlock(&card
->conf_mutex
);
5433 EXPORT_SYMBOL_GPL(qeth_set_offline
);
5435 static int qeth_do_reset(void *data
)
5437 const struct qeth_discipline
*disc
;
5438 struct qeth_card
*card
= data
;
5441 /* Lock-free, other users will block until we are done. */
5442 disc
= card
->discipline
;
5444 QETH_CARD_TEXT(card
, 2, "recover1");
5445 if (!qeth_do_run_thread(card
, QETH_RECOVER_THREAD
))
5447 QETH_CARD_TEXT(card
, 2, "recover2");
5448 dev_warn(&card
->gdev
->dev
,
5449 "A recovery process has been started for the device\n");
5451 qeth_set_offline(card
, disc
, true);
5452 rc
= qeth_set_online(card
, disc
);
5454 dev_info(&card
->gdev
->dev
,
5455 "Device successfully recovered!\n");
5457 qeth_set_offline(card
, disc
, true);
5458 ccwgroup_set_offline(card
->gdev
, false);
5459 dev_warn(&card
->gdev
->dev
,
5460 "The qeth device driver failed to recover an error on the device\n");
5462 qeth_clear_thread_start_bit(card
, QETH_RECOVER_THREAD
);
5463 qeth_clear_thread_running_bit(card
, QETH_RECOVER_THREAD
);
5467 #if IS_ENABLED(CONFIG_QETH_L3)
5468 static void qeth_l3_rebuild_skb(struct qeth_card
*card
, struct sk_buff
*skb
,
5469 struct qeth_hdr
*hdr
)
5471 struct af_iucv_trans_hdr
*iucv
= (struct af_iucv_trans_hdr
*) skb
->data
;
5472 struct qeth_hdr_layer3
*l3_hdr
= &hdr
->hdr
.l3
;
5473 struct net_device
*dev
= skb
->dev
;
5475 if (IS_IQD(card
) && iucv
->magic
== ETH_P_AF_IUCV
) {
5476 dev_hard_header(skb
, dev
, ETH_P_AF_IUCV
, dev
->dev_addr
,
5477 "FAKELL", skb
->len
);
5481 if (!(l3_hdr
->flags
& QETH_HDR_PASSTHRU
)) {
5482 u16 prot
= (l3_hdr
->flags
& QETH_HDR_IPV6
) ? ETH_P_IPV6
:
5484 unsigned char tg_addr
[ETH_ALEN
];
5486 skb_reset_network_header(skb
);
5487 switch (l3_hdr
->flags
& QETH_HDR_CAST_MASK
) {
5488 case QETH_CAST_MULTICAST
:
5489 if (prot
== ETH_P_IP
)
5490 ip_eth_mc_map(ip_hdr(skb
)->daddr
, tg_addr
);
5492 ipv6_eth_mc_map(&ipv6_hdr(skb
)->daddr
, tg_addr
);
5493 QETH_CARD_STAT_INC(card
, rx_multicast
);
5495 case QETH_CAST_BROADCAST
:
5496 ether_addr_copy(tg_addr
, dev
->broadcast
);
5497 QETH_CARD_STAT_INC(card
, rx_multicast
);
5500 if (card
->options
.sniffer
)
5501 skb
->pkt_type
= PACKET_OTHERHOST
;
5502 ether_addr_copy(tg_addr
, dev
->dev_addr
);
5505 if (l3_hdr
->ext_flags
& QETH_HDR_EXT_SRC_MAC_ADDR
)
5506 dev_hard_header(skb
, dev
, prot
, tg_addr
,
5507 &l3_hdr
->next_hop
.rx
.src_mac
, skb
->len
);
5509 dev_hard_header(skb
, dev
, prot
, tg_addr
, "FAKELL",
5513 /* copy VLAN tag from hdr into skb */
5514 if (!card
->options
.sniffer
&&
5515 (l3_hdr
->ext_flags
& (QETH_HDR_EXT_VLAN_FRAME
|
5516 QETH_HDR_EXT_INCLUDE_VLAN_TAG
))) {
5517 u16 tag
= (l3_hdr
->ext_flags
& QETH_HDR_EXT_VLAN_FRAME
) ?
5519 l3_hdr
->next_hop
.rx
.vlan_id
;
5521 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), tag
);
5526 static void qeth_receive_skb(struct qeth_card
*card
, struct sk_buff
*skb
,
5527 bool uses_frags
, bool is_cso
)
5529 struct napi_struct
*napi
= &card
->napi
;
5531 if (is_cso
&& (card
->dev
->features
& NETIF_F_RXCSUM
)) {
5532 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5533 QETH_CARD_STAT_INC(card
, rx_skb_csum
);
5535 skb
->ip_summed
= CHECKSUM_NONE
;
5538 QETH_CARD_STAT_ADD(card
, rx_bytes
, skb
->len
);
5539 QETH_CARD_STAT_INC(card
, rx_packets
);
5540 if (skb_is_nonlinear(skb
)) {
5541 QETH_CARD_STAT_INC(card
, rx_sg_skbs
);
5542 QETH_CARD_STAT_ADD(card
, rx_sg_frags
,
5543 skb_shinfo(skb
)->nr_frags
);
5547 napi_gro_frags(napi
);
5549 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5550 napi_gro_receive(napi
, skb
);
5554 static void qeth_create_skb_frag(struct sk_buff
*skb
, char *data
, int data_len
)
5556 struct page
*page
= virt_to_page(data
);
5557 unsigned int next_frag
;
5559 next_frag
= skb_shinfo(skb
)->nr_frags
;
5561 skb_add_rx_frag(skb
, next_frag
, page
, offset_in_page(data
), data_len
,
5565 static inline int qeth_is_last_sbale(struct qdio_buffer_element
*sbale
)
5567 return (sbale
->eflags
& SBAL_EFLAGS_LAST_ENTRY
);
5570 static int qeth_extract_skb(struct qeth_card
*card
,
5571 struct qeth_qdio_buffer
*qethbuffer
, u8
*element_no
,
5574 struct qeth_priv
*priv
= netdev_priv(card
->dev
);
5575 struct qdio_buffer
*buffer
= qethbuffer
->buffer
;
5576 struct napi_struct
*napi
= &card
->napi
;
5577 struct qdio_buffer_element
*element
;
5578 unsigned int linear_len
= 0;
5579 bool uses_frags
= false;
5580 int offset
= *__offset
;
5581 bool use_rx_sg
= false;
5582 unsigned int headroom
;
5583 struct qeth_hdr
*hdr
;
5584 struct sk_buff
*skb
;
5588 element
= &buffer
->element
[*element_no
];
5591 /* qeth_hdr must not cross element boundaries */
5592 while (element
->length
< offset
+ sizeof(struct qeth_hdr
)) {
5593 if (qeth_is_last_sbale(element
))
5599 hdr
= dma64_to_virt(element
->addr
) + offset
;
5600 offset
+= sizeof(*hdr
);
5603 switch (hdr
->hdr
.l2
.id
) {
5604 case QETH_HEADER_TYPE_LAYER2
:
5605 skb_len
= hdr
->hdr
.l2
.pkt_length
;
5606 is_cso
= hdr
->hdr
.l2
.flags
[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ
;
5608 linear_len
= ETH_HLEN
;
5611 case QETH_HEADER_TYPE_LAYER3
:
5612 skb_len
= hdr
->hdr
.l3
.length
;
5613 is_cso
= hdr
->hdr
.l3
.ext_flags
& QETH_HDR_EXT_CSUM_TRANSP_REQ
;
5615 if (!IS_LAYER3(card
)) {
5616 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5620 if (hdr
->hdr
.l3
.flags
& QETH_HDR_PASSTHRU
) {
5621 linear_len
= ETH_HLEN
;
5626 if (hdr
->hdr
.l3
.flags
& QETH_HDR_IPV6
)
5627 linear_len
= sizeof(struct ipv6hdr
);
5629 linear_len
= sizeof(struct iphdr
);
5630 headroom
= ETH_HLEN
;
5633 if (hdr
->hdr
.l2
.id
& QETH_HEADER_MASK_INVAL
)
5634 QETH_CARD_STAT_INC(card
, rx_frame_errors
);
5636 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5638 /* Can't determine packet length, drop the whole buffer. */
5639 return -EPROTONOSUPPORT
;
5642 if (skb_len
< linear_len
) {
5643 QETH_CARD_STAT_INC(card
, rx_dropped_runt
);
5647 use_rx_sg
= (card
->options
.cq
== QETH_CQ_ENABLED
) ||
5648 (skb_len
> READ_ONCE(priv
->rx_copybreak
) &&
5649 !atomic_read(&card
->force_alloc_skb
));
5652 /* QETH_CQ_ENABLED only: */
5653 if (qethbuffer
->rx_skb
&&
5654 skb_tailroom(qethbuffer
->rx_skb
) >= linear_len
+ headroom
) {
5655 skb
= qethbuffer
->rx_skb
;
5656 qethbuffer
->rx_skb
= NULL
;
5660 skb
= napi_get_frags(napi
);
5662 /* -ENOMEM, no point in falling back further. */
5663 QETH_CARD_STAT_INC(card
, rx_dropped_nomem
);
5667 if (skb_tailroom(skb
) >= linear_len
+ headroom
) {
5672 netdev_info_once(card
->dev
,
5673 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5674 linear_len
+ headroom
, skb_tailroom(skb
));
5675 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5678 linear_len
= skb_len
;
5679 skb
= napi_alloc_skb(napi
, linear_len
+ headroom
);
5681 QETH_CARD_STAT_INC(card
, rx_dropped_nomem
);
5687 skb_reserve(skb
, headroom
);
5690 int data_len
= min(skb_len
, (int)(element
->length
- offset
));
5691 char *data
= dma64_to_virt(element
->addr
) + offset
;
5693 skb_len
-= data_len
;
5696 /* Extract data from current element: */
5697 if (skb
&& data_len
) {
5699 unsigned int copy_len
;
5701 copy_len
= min_t(unsigned int, linear_len
,
5704 skb_put_data(skb
, data
, copy_len
);
5705 linear_len
-= copy_len
;
5706 data_len
-= copy_len
;
5711 qeth_create_skb_frag(skb
, data
, data_len
);
5714 /* Step forward to next element: */
5716 if (qeth_is_last_sbale(element
)) {
5717 QETH_CARD_TEXT(card
, 4, "unexeob");
5718 QETH_CARD_HEX(card
, 2, buffer
, sizeof(void *));
5721 napi_free_frags(napi
);
5724 QETH_CARD_STAT_INC(card
,
5734 /* This packet was skipped, go get another one: */
5738 *element_no
= element
- &buffer
->element
[0];
5741 #if IS_ENABLED(CONFIG_QETH_L3)
5742 if (hdr
->hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER3
)
5743 qeth_l3_rebuild_skb(card
, skb
, hdr
);
5746 qeth_receive_skb(card
, skb
, uses_frags
, is_cso
);
5750 static unsigned int qeth_extract_skbs(struct qeth_card
*card
, int budget
,
5751 struct qeth_qdio_buffer
*buf
, bool *done
)
5753 unsigned int work_done
= 0;
5756 if (qeth_extract_skb(card
, buf
, &card
->rx
.buf_element
,
5757 &card
->rx
.e_offset
)) {
5769 static unsigned int qeth_rx_poll(struct qeth_card
*card
, int budget
)
5771 struct qeth_rx
*ctx
= &card
->rx
;
5772 unsigned int work_done
= 0;
5774 while (budget
> 0) {
5775 struct qeth_qdio_buffer
*buffer
;
5776 unsigned int skbs_done
= 0;
5779 /* Fetch completed RX buffers: */
5780 if (!card
->rx
.b_count
) {
5781 card
->rx
.qdio_err
= 0;
5783 qdio_inspect_input_queue(CARD_DDEV(card
), 0,
5785 &card
->rx
.qdio_err
);
5786 if (card
->rx
.b_count
<= 0) {
5787 card
->rx
.b_count
= 0;
5792 /* Process one completed RX buffer: */
5793 buffer
= &card
->qdio
.in_q
->bufs
[card
->rx
.b_index
];
5794 if (!(card
->rx
.qdio_err
&&
5795 qeth_check_qdio_errors(card
, buffer
->buffer
,
5796 card
->rx
.qdio_err
, "qinerr")))
5797 skbs_done
= qeth_extract_skbs(card
, budget
, buffer
,
5802 work_done
+= skbs_done
;
5803 budget
-= skbs_done
;
5806 QETH_CARD_STAT_INC(card
, rx_bufs
);
5807 qeth_put_buffer_pool_entry(card
, buffer
->pool_entry
);
5808 buffer
->pool_entry
= NULL
;
5811 ctx
->bufs_refill
-= qeth_rx_refill_queue(card
,
5814 /* Step forward to next buffer: */
5815 card
->rx
.b_index
= QDIO_BUFNR(card
->rx
.b_index
+ 1);
5816 card
->rx
.buf_element
= 0;
5817 card
->rx
.e_offset
= 0;
5824 static void qeth_cq_poll(struct qeth_card
*card
)
5826 unsigned int work_done
= 0;
5828 while (work_done
< QDIO_MAX_BUFFERS_PER_Q
) {
5829 unsigned int start
, error
;
5832 completed
= qdio_inspect_input_queue(CARD_DDEV(card
), 1, &start
,
5837 qeth_qdio_cq_handler(card
, error
, 1, start
, completed
);
5838 work_done
+= completed
;
5842 int qeth_poll(struct napi_struct
*napi
, int budget
)
5844 struct qeth_card
*card
= container_of(napi
, struct qeth_card
, napi
);
5845 unsigned int work_done
;
5847 work_done
= qeth_rx_poll(card
, budget
);
5849 if (qeth_use_tx_irqs(card
)) {
5850 struct qeth_qdio_out_q
*queue
;
5853 qeth_for_each_output_queue(card
, queue
, i
) {
5854 if (!qeth_out_queue_is_empty(queue
))
5855 napi_schedule(&queue
->napi
);
5859 if (card
->options
.cq
== QETH_CQ_ENABLED
)
5863 struct qeth_rx
*ctx
= &card
->rx
;
5865 /* Process any substantial refill backlog: */
5866 ctx
->bufs_refill
-= qeth_rx_refill_queue(card
, ctx
->bufs_refill
);
5868 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5869 if (work_done
>= budget
)
5873 if (napi_complete_done(napi
, work_done
) &&
5874 qdio_start_irq(CARD_DDEV(card
)))
5875 napi_schedule(napi
);
5879 EXPORT_SYMBOL_GPL(qeth_poll
);
5881 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q
*queue
,
5882 unsigned int bidx
, unsigned int qdio_error
,
5885 struct qeth_qdio_out_buffer
*buffer
= queue
->bufs
[bidx
];
5886 u8 sflags
= buffer
->buffer
->element
[15].sflags
;
5887 struct qeth_card
*card
= queue
->card
;
5888 bool error
= !!qdio_error
;
5890 if (qdio_error
== QDIO_ERROR_SLSB_PENDING
) {
5891 struct qaob
*aob
= buffer
->aob
;
5892 struct qeth_qaob_priv1
*priv
;
5893 enum iucv_tx_notify notify
;
5896 netdev_WARN_ONCE(card
->dev
,
5897 "Pending TX buffer %#x without QAOB on TX queue %u\n",
5898 bidx
, queue
->queue_no
);
5899 qeth_schedule_recovery(card
);
5903 QETH_CARD_TEXT_(card
, 5, "pel%u", bidx
);
5905 priv
= (struct qeth_qaob_priv1
*)&aob
->user1
;
5906 /* QAOB hasn't completed yet: */
5907 if (xchg(&priv
->state
, QETH_QAOB_PENDING
) != QETH_QAOB_DONE
) {
5908 qeth_notify_skbs(queue
, buffer
, TX_NOTIFY_PENDING
);
5910 /* Prepare the queue slot for immediate re-use: */
5911 qeth_scrub_qdio_buffer(buffer
->buffer
, queue
->max_elements
);
5912 if (qeth_alloc_out_buf(queue
, bidx
, GFP_ATOMIC
)) {
5913 QETH_CARD_TEXT(card
, 2, "outofbuf");
5914 qeth_schedule_recovery(card
);
5917 list_add(&buffer
->list_entry
, &queue
->pending_bufs
);
5918 /* Skip clearing the buffer: */
5922 /* QAOB already completed: */
5923 notify
= qeth_compute_cq_notification(aob
->aorc
, 0);
5924 qeth_notify_skbs(queue
, buffer
, notify
);
5925 error
= !!aob
->aorc
;
5926 memset(aob
, 0, sizeof(*aob
));
5927 } else if (card
->options
.cq
== QETH_CQ_ENABLED
) {
5928 qeth_notify_skbs(queue
, buffer
,
5929 qeth_compute_cq_notification(sflags
, 0));
5932 qeth_clear_output_buffer(queue
, buffer
, error
, budget
);
5935 static int qeth_tx_poll(struct napi_struct
*napi
, int budget
)
5937 struct qeth_qdio_out_q
*queue
= qeth_napi_to_out_queue(napi
);
5938 unsigned int queue_no
= queue
->queue_no
;
5939 struct qeth_card
*card
= queue
->card
;
5940 struct net_device
*dev
= card
->dev
;
5941 unsigned int work_done
= 0;
5942 struct netdev_queue
*txq
;
5945 txq
= netdev_get_tx_queue(dev
, qeth_iqd_translate_txq(dev
, queue_no
));
5947 txq
= netdev_get_tx_queue(dev
, queue_no
);
5950 unsigned int start
, error
, i
;
5951 unsigned int packets
= 0;
5952 unsigned int bytes
= 0;
5955 qeth_tx_complete_pending_bufs(card
, queue
, false, budget
);
5957 if (qeth_out_queue_is_empty(queue
)) {
5958 napi_complete(napi
);
5962 /* Give the CPU a breather: */
5963 if (work_done
>= QDIO_MAX_BUFFERS_PER_Q
) {
5964 QETH_TXQ_STAT_INC(queue
, completion_yield
);
5965 if (napi_complete_done(napi
, 0))
5966 napi_schedule(napi
);
5970 completed
= qdio_inspect_output_queue(CARD_DDEV(card
), queue_no
,
5972 if (completed
<= 0) {
5973 /* Ensure we see TX completion for pending work: */
5974 if (napi_complete_done(napi
, 0) &&
5975 !atomic_read(&queue
->set_pci_flags_count
))
5976 qeth_tx_arm_timer(queue
, queue
->rescan_usecs
);
5980 for (i
= start
; i
< start
+ completed
; i
++) {
5981 struct qeth_qdio_out_buffer
*buffer
;
5982 unsigned int bidx
= QDIO_BUFNR(i
);
5984 buffer
= queue
->bufs
[bidx
];
5985 packets
+= buffer
->frames
;
5986 bytes
+= buffer
->bytes
;
5988 qeth_handle_send_error(card
, buffer
, error
);
5990 qeth_iqd_tx_complete(queue
, bidx
, error
, budget
);
5992 qeth_clear_output_buffer(queue
, buffer
, error
,
5996 atomic_sub(completed
, &queue
->used_buffers
);
5997 work_done
+= completed
;
5999 netdev_tx_completed_queue(txq
, packets
, bytes
);
6001 qeth_check_outbound_queue(queue
);
6003 /* xmit may have observed the full-condition, but not yet
6004 * stopped the txq. In which case the code below won't trigger.
6005 * So before returning, xmit will re-check the txq's fill level
6006 * and wake it up if needed.
6008 if (netif_tx_queue_stopped(txq
) &&
6009 !qeth_out_queue_is_full(queue
))
6010 netif_tx_wake_queue(txq
);
6014 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd
*cmd
)
6016 if (!cmd
->hdr
.return_code
)
6017 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
6018 return cmd
->hdr
.return_code
;
6021 static int qeth_setassparms_get_caps_cb(struct qeth_card
*card
,
6022 struct qeth_reply
*reply
,
6025 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6026 struct qeth_ipa_caps
*caps
= reply
->param
;
6028 if (qeth_setassparms_inspect_rc(cmd
))
6031 caps
->supported
= cmd
->data
.setassparms
.data
.caps
.supported
;
6032 caps
->enabled
= cmd
->data
.setassparms
.data
.caps
.enabled
;
6036 int qeth_setassparms_cb(struct qeth_card
*card
,
6037 struct qeth_reply
*reply
, unsigned long data
)
6039 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6041 QETH_CARD_TEXT(card
, 4, "defadpcb");
6043 if (cmd
->hdr
.return_code
)
6046 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
6047 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
6048 card
->options
.ipa4
.enabled
= cmd
->hdr
.assists
.enabled
;
6049 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
6050 card
->options
.ipa6
.enabled
= cmd
->hdr
.assists
.enabled
;
6053 EXPORT_SYMBOL_GPL(qeth_setassparms_cb
);
6055 struct qeth_cmd_buffer
*qeth_get_setassparms_cmd(struct qeth_card
*card
,
6056 enum qeth_ipa_funcs ipa_func
,
6058 unsigned int data_length
,
6059 enum qeth_prot_versions prot
)
6061 struct qeth_ipacmd_setassparms
*setassparms
;
6062 struct qeth_ipacmd_setassparms_hdr
*hdr
;
6063 struct qeth_cmd_buffer
*iob
;
6065 QETH_CARD_TEXT(card
, 4, "getasscm");
6066 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SETASSPARMS
, prot
,
6068 offsetof(struct qeth_ipacmd_setassparms
,
6073 setassparms
= &__ipa_cmd(iob
)->data
.setassparms
;
6074 setassparms
->assist_no
= ipa_func
;
6076 hdr
= &setassparms
->hdr
;
6077 hdr
->length
= sizeof(*hdr
) + data_length
;
6078 hdr
->command_code
= cmd_code
;
6081 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd
);
6083 int qeth_send_simple_setassparms_prot(struct qeth_card
*card
,
6084 enum qeth_ipa_funcs ipa_func
,
6085 u16 cmd_code
, u32
*data
,
6086 enum qeth_prot_versions prot
)
6088 unsigned int length
= data
? SETASS_DATA_SIZEOF(flags_32bit
) : 0;
6089 struct qeth_cmd_buffer
*iob
;
6091 QETH_CARD_TEXT_(card
, 4, "simassp%i", prot
);
6092 iob
= qeth_get_setassparms_cmd(card
, ipa_func
, cmd_code
, length
, prot
);
6097 __ipa_cmd(iob
)->data
.setassparms
.data
.flags_32bit
= *data
;
6098 return qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_cb
, NULL
);
6100 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot
);
6102 static void qeth_unregister_dbf_views(void)
6106 for (x
= 0; x
< QETH_DBF_INFOS
; x
++) {
6107 debug_unregister(qeth_dbf
[x
].id
);
6108 qeth_dbf
[x
].id
= NULL
;
6112 void qeth_dbf_longtext(debug_info_t
*id
, int level
, char *fmt
, ...)
6114 char dbf_txt_buf
[32];
6117 if (!debug_level_enabled(id
, level
))
6119 va_start(args
, fmt
);
6120 vscnprintf(dbf_txt_buf
, sizeof(dbf_txt_buf
), fmt
, args
);
6122 debug_text_event(id
, level
, dbf_txt_buf
);
6124 EXPORT_SYMBOL_GPL(qeth_dbf_longtext
);
6126 static int qeth_register_dbf_views(void)
6131 for (x
= 0; x
< QETH_DBF_INFOS
; x
++) {
6132 /* register the areas */
6133 qeth_dbf
[x
].id
= debug_register(qeth_dbf
[x
].name
,
6137 if (qeth_dbf
[x
].id
== NULL
) {
6138 qeth_unregister_dbf_views();
6142 /* register a view */
6143 ret
= debug_register_view(qeth_dbf
[x
].id
, qeth_dbf
[x
].view
);
6145 qeth_unregister_dbf_views();
6149 /* set a passing level */
6150 debug_set_level(qeth_dbf
[x
].id
, qeth_dbf
[x
].level
);
6156 static DEFINE_MUTEX(qeth_mod_mutex
); /* for synchronized module loading */
6158 int qeth_setup_discipline(struct qeth_card
*card
,
6159 enum qeth_discipline_id discipline
)
6163 mutex_lock(&qeth_mod_mutex
);
6164 switch (discipline
) {
6165 case QETH_DISCIPLINE_LAYER3
:
6166 card
->discipline
= try_then_request_module(
6167 symbol_get(qeth_l3_discipline
), "qeth_l3");
6169 case QETH_DISCIPLINE_LAYER2
:
6170 card
->discipline
= try_then_request_module(
6171 symbol_get(qeth_l2_discipline
), "qeth_l2");
6176 mutex_unlock(&qeth_mod_mutex
);
6178 if (!card
->discipline
) {
6179 dev_err(&card
->gdev
->dev
, "There is no kernel module to "
6180 "support discipline %d\n", discipline
);
6184 rc
= card
->discipline
->setup(card
->gdev
);
6186 if (discipline
== QETH_DISCIPLINE_LAYER2
)
6187 symbol_put(qeth_l2_discipline
);
6189 symbol_put(qeth_l3_discipline
);
6190 card
->discipline
= NULL
;
6195 card
->options
.layer
= discipline
;
6199 void qeth_remove_discipline(struct qeth_card
*card
)
6201 card
->discipline
->remove(card
->gdev
);
6203 if (IS_LAYER2(card
))
6204 symbol_put(qeth_l2_discipline
);
6206 symbol_put(qeth_l3_discipline
);
6207 card
->options
.layer
= QETH_DISCIPLINE_UNDETERMINED
;
6208 card
->discipline
= NULL
;
6211 static const struct device_type qeth_generic_devtype
= {
6212 .name
= "qeth_generic",
6215 #define DBF_NAME_LEN 20
6217 struct qeth_dbf_entry
{
6218 char dbf_name
[DBF_NAME_LEN
];
6219 debug_info_t
*dbf_info
;
6220 struct list_head dbf_list
;
6223 static LIST_HEAD(qeth_dbf_list
);
6224 static DEFINE_MUTEX(qeth_dbf_list_mutex
);
6226 static debug_info_t
*qeth_get_dbf_entry(char *name
)
6228 struct qeth_dbf_entry
*entry
;
6229 debug_info_t
*rc
= NULL
;
6231 mutex_lock(&qeth_dbf_list_mutex
);
6232 list_for_each_entry(entry
, &qeth_dbf_list
, dbf_list
) {
6233 if (strcmp(entry
->dbf_name
, name
) == 0) {
6234 rc
= entry
->dbf_info
;
6238 mutex_unlock(&qeth_dbf_list_mutex
);
6242 static int qeth_add_dbf_entry(struct qeth_card
*card
, char *name
)
6244 struct qeth_dbf_entry
*new_entry
;
6246 card
->debug
= debug_register(name
, 2, 1, 8);
6248 QETH_DBF_TEXT_(SETUP
, 2, "%s", "qcdbf");
6251 if (debug_register_view(card
->debug
, &debug_hex_ascii_view
))
6253 new_entry
= kzalloc(sizeof(struct qeth_dbf_entry
), GFP_KERNEL
);
6256 strscpy(new_entry
->dbf_name
, name
, sizeof(new_entry
->dbf_name
));
6257 new_entry
->dbf_info
= card
->debug
;
6258 mutex_lock(&qeth_dbf_list_mutex
);
6259 list_add(&new_entry
->dbf_list
, &qeth_dbf_list
);
6260 mutex_unlock(&qeth_dbf_list_mutex
);
6265 debug_unregister(card
->debug
);
6270 static void qeth_clear_dbf_list(void)
6272 struct qeth_dbf_entry
*entry
, *tmp
;
6274 mutex_lock(&qeth_dbf_list_mutex
);
6275 list_for_each_entry_safe(entry
, tmp
, &qeth_dbf_list
, dbf_list
) {
6276 list_del(&entry
->dbf_list
);
6277 debug_unregister(entry
->dbf_info
);
6280 mutex_unlock(&qeth_dbf_list_mutex
);
6283 static struct net_device
*qeth_alloc_netdev(struct qeth_card
*card
)
6285 struct net_device
*dev
;
6286 struct qeth_priv
*priv
;
6288 switch (card
->info
.type
) {
6289 case QETH_CARD_TYPE_IQD
:
6290 dev
= alloc_netdev_mqs(sizeof(*priv
), "hsi%d", NET_NAME_UNKNOWN
,
6291 ether_setup
, QETH_MAX_OUT_QUEUES
, 1);
6293 case QETH_CARD_TYPE_OSM
:
6294 dev
= alloc_etherdev(sizeof(*priv
));
6297 dev
= alloc_etherdev_mqs(sizeof(*priv
), QETH_MAX_OUT_QUEUES
, 1);
6303 priv
= netdev_priv(dev
);
6304 priv
->rx_copybreak
= QETH_RX_COPYBREAK
;
6305 priv
->tx_wanted_queues
= IS_IQD(card
) ? QETH_IQD_MIN_TXQ
: 1;
6307 dev
->ml_priv
= card
;
6308 dev
->watchdog_timeo
= QETH_TX_TIMEOUT
;
6310 /* initialized when device first goes online: */
6313 SET_NETDEV_DEV(dev
, &card
->gdev
->dev
);
6314 netif_carrier_off(dev
);
6316 dev
->ethtool_ops
= &qeth_ethtool_ops
;
6317 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
6318 dev
->hw_features
|= NETIF_F_SG
;
6319 dev
->vlan_features
|= NETIF_F_SG
;
6321 dev
->features
|= NETIF_F_SG
;
6326 struct net_device
*qeth_clone_netdev(struct net_device
*orig
)
6328 struct net_device
*clone
= qeth_alloc_netdev(orig
->ml_priv
);
6333 clone
->dev_port
= orig
->dev_port
;
6337 static int qeth_core_probe_device(struct ccwgroup_device
*gdev
)
6339 struct qeth_card
*card
;
6342 enum qeth_discipline_id enforced_disc
;
6343 char dbf_name
[DBF_NAME_LEN
];
6345 QETH_DBF_TEXT(SETUP
, 2, "probedev");
6348 if (!get_device(dev
))
6351 QETH_DBF_TEXT_(SETUP
, 2, "%s", dev_name(&gdev
->dev
));
6353 card
= qeth_alloc_card(gdev
);
6355 QETH_DBF_TEXT_(SETUP
, 2, "1err%d", -ENOMEM
);
6360 scnprintf(dbf_name
, sizeof(dbf_name
), "qeth_card_%s",
6361 dev_name(&gdev
->dev
));
6362 card
->debug
= qeth_get_dbf_entry(dbf_name
);
6364 rc
= qeth_add_dbf_entry(card
, dbf_name
);
6369 qeth_setup_card(card
);
6370 card
->dev
= qeth_alloc_netdev(card
);
6376 qeth_determine_capabilities(card
);
6377 qeth_set_blkt_defaults(card
);
6379 card
->qdio
.in_q
= qeth_alloc_qdio_queue();
6380 if (!card
->qdio
.in_q
) {
6385 card
->qdio
.no_out_queues
= card
->dev
->num_tx_queues
;
6386 rc
= qeth_update_from_chp_desc(card
);
6390 gdev
->dev
.groups
= qeth_dev_groups
;
6392 enforced_disc
= qeth_enforce_discipline(card
);
6393 switch (enforced_disc
) {
6394 case QETH_DISCIPLINE_UNDETERMINED
:
6395 gdev
->dev
.type
= &qeth_generic_devtype
;
6398 card
->info
.layer_enforced
= true;
6399 /* It's so early that we don't need the discipline_mutex yet. */
6400 rc
= qeth_setup_discipline(card
, enforced_disc
);
6402 goto err_setup_disc
;
6411 qeth_free_qdio_queue(card
->qdio
.in_q
);
6413 free_netdev(card
->dev
);
6415 qeth_core_free_card(card
);
6421 static void qeth_core_remove_device(struct ccwgroup_device
*gdev
)
6423 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6425 QETH_CARD_TEXT(card
, 2, "removedv");
6427 mutex_lock(&card
->discipline_mutex
);
6428 if (card
->discipline
)
6429 qeth_remove_discipline(card
);
6430 mutex_unlock(&card
->discipline_mutex
);
6432 qeth_free_qdio_queues(card
);
6434 qeth_free_qdio_queue(card
->qdio
.in_q
);
6435 free_netdev(card
->dev
);
6436 qeth_core_free_card(card
);
6437 put_device(&gdev
->dev
);
6440 static int qeth_core_set_online(struct ccwgroup_device
*gdev
)
6442 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6444 enum qeth_discipline_id def_discipline
;
6446 mutex_lock(&card
->discipline_mutex
);
6447 if (!card
->discipline
) {
6448 def_discipline
= IS_IQD(card
) ? QETH_DISCIPLINE_LAYER3
:
6449 QETH_DISCIPLINE_LAYER2
;
6450 rc
= qeth_setup_discipline(card
, def_discipline
);
6455 rc
= qeth_set_online(card
, card
->discipline
);
6458 mutex_unlock(&card
->discipline_mutex
);
6462 static int qeth_core_set_offline(struct ccwgroup_device
*gdev
)
6464 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6467 mutex_lock(&card
->discipline_mutex
);
6468 rc
= qeth_set_offline(card
, card
->discipline
, false);
6469 mutex_unlock(&card
->discipline_mutex
);
6474 static void qeth_core_shutdown(struct ccwgroup_device
*gdev
)
6476 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6478 qeth_set_allowed_threads(card
, 0, 1);
6479 if ((gdev
->state
== CCWGROUP_ONLINE
) && card
->info
.hwtrap
)
6480 qeth_hw_trap(card
, QETH_DIAGS_TRAP_DISARM
);
6481 qeth_qdio_clear_card(card
, 0);
6482 qeth_drain_output_queues(card
);
6483 qdio_free(CARD_DDEV(card
));
6486 static ssize_t
group_store(struct device_driver
*ddrv
, const char *buf
,
6491 err
= ccwgroup_create_dev(qeth_core_root_dev
, to_ccwgroupdrv(ddrv
), 3,
6494 return err
? err
: count
;
6496 static DRIVER_ATTR_WO(group
);
6498 static struct attribute
*qeth_drv_attrs
[] = {
6499 &driver_attr_group
.attr
,
6502 static struct attribute_group qeth_drv_attr_group
= {
6503 .attrs
= qeth_drv_attrs
,
6505 static const struct attribute_group
*qeth_drv_attr_groups
[] = {
6506 &qeth_drv_attr_group
,
6510 static struct ccwgroup_driver qeth_core_ccwgroup_driver
= {
6512 .groups
= qeth_drv_attr_groups
,
6513 .owner
= THIS_MODULE
,
6516 .ccw_driver
= &qeth_ccw_driver
,
6517 .setup
= qeth_core_probe_device
,
6518 .remove
= qeth_core_remove_device
,
6519 .set_online
= qeth_core_set_online
,
6520 .set_offline
= qeth_core_set_offline
,
6521 .shutdown
= qeth_core_shutdown
,
6524 int qeth_siocdevprivate(struct net_device
*dev
, struct ifreq
*rq
, void __user
*data
, int cmd
)
6526 struct qeth_card
*card
= dev
->ml_priv
;
6530 case SIOC_QETH_ADP_SET_SNMP_CONTROL
:
6531 rc
= qeth_snmp_command(card
, data
);
6533 case SIOC_QETH_GET_CARD_TYPE
:
6534 if ((IS_OSD(card
) || IS_OSM(card
) || IS_OSX(card
)) &&
6538 case SIOC_QETH_QUERY_OAT
:
6539 rc
= qeth_query_oat_command(card
, data
);
6545 QETH_CARD_TEXT_(card
, 2, "ioce%x", rc
);
6548 EXPORT_SYMBOL_GPL(qeth_siocdevprivate
);
6550 int qeth_do_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
6552 struct qeth_card
*card
= dev
->ml_priv
;
6553 struct mii_ioctl_data
*mii_data
;
6558 mii_data
= if_mii(rq
);
6559 mii_data
->phy_id
= 0;
6562 mii_data
= if_mii(rq
);
6563 if (mii_data
->phy_id
!= 0)
6566 mii_data
->val_out
= qeth_mdio_read(dev
,
6567 mii_data
->phy_id
, mii_data
->reg_num
);
6573 QETH_CARD_TEXT_(card
, 2, "ioce%x", rc
);
6576 EXPORT_SYMBOL_GPL(qeth_do_ioctl
);
6578 static int qeth_start_csum_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6581 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6582 u32
*features
= reply
->param
;
6584 if (qeth_setassparms_inspect_rc(cmd
))
6587 *features
= cmd
->data
.setassparms
.data
.flags_32bit
;
6591 static int qeth_set_csum_off(struct qeth_card
*card
, enum qeth_ipa_funcs cstype
,
6592 enum qeth_prot_versions prot
)
6594 return qeth_send_simple_setassparms_prot(card
, cstype
, IPA_CMD_ASS_STOP
,
6598 static int qeth_set_csum_on(struct qeth_card
*card
, enum qeth_ipa_funcs cstype
,
6599 enum qeth_prot_versions prot
, u8
*lp2lp
)
6601 u32 required_features
= QETH_IPA_CHECKSUM_UDP
| QETH_IPA_CHECKSUM_TCP
;
6602 struct qeth_cmd_buffer
*iob
;
6603 struct qeth_ipa_caps caps
;
6607 /* some L3 HW requires combined L3+L4 csum offload: */
6608 if (IS_LAYER3(card
) && prot
== QETH_PROT_IPV4
&&
6609 cstype
== IPA_OUTBOUND_CHECKSUM
)
6610 required_features
|= QETH_IPA_CHECKSUM_IP_HDR
;
6612 iob
= qeth_get_setassparms_cmd(card
, cstype
, IPA_CMD_ASS_START
, 0,
6617 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_start_csum_cb
, &features
);
6621 if ((required_features
& features
) != required_features
) {
6622 qeth_set_csum_off(card
, cstype
, prot
);
6626 iob
= qeth_get_setassparms_cmd(card
, cstype
, IPA_CMD_ASS_ENABLE
,
6627 SETASS_DATA_SIZEOF(flags_32bit
),
6630 qeth_set_csum_off(card
, cstype
, prot
);
6634 if (features
& QETH_IPA_CHECKSUM_LP2LP
)
6635 required_features
|= QETH_IPA_CHECKSUM_LP2LP
;
6636 __ipa_cmd(iob
)->data
.setassparms
.data
.flags_32bit
= required_features
;
6637 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_get_caps_cb
, &caps
);
6639 qeth_set_csum_off(card
, cstype
, prot
);
6643 if (!qeth_ipa_caps_supported(&caps
, required_features
) ||
6644 !qeth_ipa_caps_enabled(&caps
, required_features
)) {
6645 qeth_set_csum_off(card
, cstype
, prot
);
6649 dev_info(&card
->gdev
->dev
, "HW Checksumming (%sbound IPv%d) enabled\n",
6650 cstype
== IPA_INBOUND_CHECKSUM
? "in" : "out", prot
);
6653 *lp2lp
= qeth_ipa_caps_enabled(&caps
, QETH_IPA_CHECKSUM_LP2LP
);
6658 static int qeth_set_ipa_csum(struct qeth_card
*card
, bool on
, int cstype
,
6659 enum qeth_prot_versions prot
, u8
*lp2lp
)
6661 return on
? qeth_set_csum_on(card
, cstype
, prot
, lp2lp
) :
6662 qeth_set_csum_off(card
, cstype
, prot
);
6665 static int qeth_start_tso_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6668 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6669 struct qeth_tso_start_data
*tso_data
= reply
->param
;
6671 if (qeth_setassparms_inspect_rc(cmd
))
6674 tso_data
->mss
= cmd
->data
.setassparms
.data
.tso
.mss
;
6675 tso_data
->supported
= cmd
->data
.setassparms
.data
.tso
.supported
;
6679 static int qeth_set_tso_off(struct qeth_card
*card
,
6680 enum qeth_prot_versions prot
)
6682 return qeth_send_simple_setassparms_prot(card
, IPA_OUTBOUND_TSO
,
6683 IPA_CMD_ASS_STOP
, NULL
, prot
);
6686 static int qeth_set_tso_on(struct qeth_card
*card
,
6687 enum qeth_prot_versions prot
)
6689 struct qeth_tso_start_data tso_data
;
6690 struct qeth_cmd_buffer
*iob
;
6691 struct qeth_ipa_caps caps
;
6694 iob
= qeth_get_setassparms_cmd(card
, IPA_OUTBOUND_TSO
,
6695 IPA_CMD_ASS_START
, 0, prot
);
6699 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_start_tso_cb
, &tso_data
);
6703 if (!tso_data
.mss
|| !(tso_data
.supported
& QETH_IPA_LARGE_SEND_TCP
)) {
6704 qeth_set_tso_off(card
, prot
);
6708 iob
= qeth_get_setassparms_cmd(card
, IPA_OUTBOUND_TSO
,
6710 SETASS_DATA_SIZEOF(caps
), prot
);
6712 qeth_set_tso_off(card
, prot
);
6716 /* enable TSO capability */
6717 __ipa_cmd(iob
)->data
.setassparms
.data
.caps
.enabled
=
6718 QETH_IPA_LARGE_SEND_TCP
;
6719 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_get_caps_cb
, &caps
);
6721 qeth_set_tso_off(card
, prot
);
6725 if (!qeth_ipa_caps_supported(&caps
, QETH_IPA_LARGE_SEND_TCP
) ||
6726 !qeth_ipa_caps_enabled(&caps
, QETH_IPA_LARGE_SEND_TCP
)) {
6727 qeth_set_tso_off(card
, prot
);
6731 dev_info(&card
->gdev
->dev
, "TSOv%u enabled (MSS: %u)\n", prot
,
6736 static int qeth_set_ipa_tso(struct qeth_card
*card
, bool on
,
6737 enum qeth_prot_versions prot
)
6739 return on
? qeth_set_tso_on(card
, prot
) : qeth_set_tso_off(card
, prot
);
6742 static int qeth_set_ipa_rx_csum(struct qeth_card
*card
, bool on
)
6744 int rc_ipv4
= (on
) ? -EOPNOTSUPP
: 0;
6747 if (qeth_is_supported(card
, IPA_INBOUND_CHECKSUM
))
6748 rc_ipv4
= qeth_set_ipa_csum(card
, on
, IPA_INBOUND_CHECKSUM
,
6749 QETH_PROT_IPV4
, NULL
);
6750 if (!qeth_is_supported6(card
, IPA_INBOUND_CHECKSUM_V6
))
6751 /* no/one Offload Assist available, so the rc is trivial */
6754 rc_ipv6
= qeth_set_ipa_csum(card
, on
, IPA_INBOUND_CHECKSUM
,
6755 QETH_PROT_IPV6
, NULL
);
6758 /* enable: success if any Assist is active */
6759 return (rc_ipv6
) ? rc_ipv4
: 0;
6761 /* disable: failure if any Assist is still active */
6762 return (rc_ipv6
) ? rc_ipv6
: rc_ipv4
;
6766 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6767 * @dev: a net_device
6769 void qeth_enable_hw_features(struct net_device
*dev
)
6771 struct qeth_card
*card
= dev
->ml_priv
;
6772 netdev_features_t features
;
6774 features
= dev
->features
;
6775 /* force-off any feature that might need an IPA sequence.
6776 * netdev_update_features() will restart them.
6778 dev
->features
&= ~dev
->hw_features
;
6779 /* toggle VLAN filter, so that VIDs are re-programmed: */
6780 if (IS_LAYER2(card
) && IS_VM_NIC(card
)) {
6781 dev
->features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
6782 dev
->wanted_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
6784 netdev_update_features(dev
);
6785 if (features
!= dev
->features
)
6786 dev_warn(&card
->gdev
->dev
,
6787 "Device recovery failed to restore all offload features\n");
6789 EXPORT_SYMBOL_GPL(qeth_enable_hw_features
);
6791 static void qeth_check_restricted_features(struct qeth_card
*card
,
6792 netdev_features_t changed
,
6793 netdev_features_t actual
)
6795 netdev_features_t ipv6_features
= NETIF_F_TSO6
;
6796 netdev_features_t ipv4_features
= NETIF_F_TSO
;
6798 if (!card
->info
.has_lp2lp_cso_v6
)
6799 ipv6_features
|= NETIF_F_IPV6_CSUM
;
6800 if (!card
->info
.has_lp2lp_cso_v4
)
6801 ipv4_features
|= NETIF_F_IP_CSUM
;
6803 if ((changed
& ipv6_features
) && !(actual
& ipv6_features
))
6804 qeth_flush_local_addrs6(card
);
6805 if ((changed
& ipv4_features
) && !(actual
& ipv4_features
))
6806 qeth_flush_local_addrs4(card
);
6809 int qeth_set_features(struct net_device
*dev
, netdev_features_t features
)
6811 struct qeth_card
*card
= dev
->ml_priv
;
6812 netdev_features_t changed
= dev
->features
^ features
;
6815 QETH_CARD_TEXT(card
, 2, "setfeat");
6816 QETH_CARD_HEX(card
, 2, &features
, sizeof(features
));
6818 if ((changed
& NETIF_F_IP_CSUM
)) {
6819 rc
= qeth_set_ipa_csum(card
, features
& NETIF_F_IP_CSUM
,
6820 IPA_OUTBOUND_CHECKSUM
, QETH_PROT_IPV4
,
6821 &card
->info
.has_lp2lp_cso_v4
);
6823 changed
^= NETIF_F_IP_CSUM
;
6825 if (changed
& NETIF_F_IPV6_CSUM
) {
6826 rc
= qeth_set_ipa_csum(card
, features
& NETIF_F_IPV6_CSUM
,
6827 IPA_OUTBOUND_CHECKSUM
, QETH_PROT_IPV6
,
6828 &card
->info
.has_lp2lp_cso_v6
);
6830 changed
^= NETIF_F_IPV6_CSUM
;
6832 if (changed
& NETIF_F_RXCSUM
) {
6833 rc
= qeth_set_ipa_rx_csum(card
, features
& NETIF_F_RXCSUM
);
6835 changed
^= NETIF_F_RXCSUM
;
6837 if (changed
& NETIF_F_TSO
) {
6838 rc
= qeth_set_ipa_tso(card
, features
& NETIF_F_TSO
,
6841 changed
^= NETIF_F_TSO
;
6843 if (changed
& NETIF_F_TSO6
) {
6844 rc
= qeth_set_ipa_tso(card
, features
& NETIF_F_TSO6
,
6847 changed
^= NETIF_F_TSO6
;
6850 qeth_check_restricted_features(card
, dev
->features
^ features
,
6851 dev
->features
^ changed
);
6853 /* everything changed successfully? */
6854 if ((dev
->features
^ features
) == changed
)
6856 /* something went wrong. save changed features and return error */
6857 dev
->features
^= changed
;
6860 EXPORT_SYMBOL_GPL(qeth_set_features
);
6862 netdev_features_t
qeth_fix_features(struct net_device
*dev
,
6863 netdev_features_t features
)
6865 struct qeth_card
*card
= dev
->ml_priv
;
6867 QETH_CARD_TEXT(card
, 2, "fixfeat");
6868 if (!qeth_is_supported(card
, IPA_OUTBOUND_CHECKSUM
))
6869 features
&= ~NETIF_F_IP_CSUM
;
6870 if (!qeth_is_supported6(card
, IPA_OUTBOUND_CHECKSUM_V6
))
6871 features
&= ~NETIF_F_IPV6_CSUM
;
6872 if (!qeth_is_supported(card
, IPA_INBOUND_CHECKSUM
) &&
6873 !qeth_is_supported6(card
, IPA_INBOUND_CHECKSUM_V6
))
6874 features
&= ~NETIF_F_RXCSUM
;
6875 if (!qeth_is_supported(card
, IPA_OUTBOUND_TSO
))
6876 features
&= ~NETIF_F_TSO
;
6877 if (!qeth_is_supported6(card
, IPA_OUTBOUND_TSO
))
6878 features
&= ~NETIF_F_TSO6
;
6880 QETH_CARD_HEX(card
, 2, &features
, sizeof(features
));
6883 EXPORT_SYMBOL_GPL(qeth_fix_features
);
6885 netdev_features_t
qeth_features_check(struct sk_buff
*skb
,
6886 struct net_device
*dev
,
6887 netdev_features_t features
)
6889 struct qeth_card
*card
= dev
->ml_priv
;
6891 /* Traffic with local next-hop is not eligible for some offloads: */
6892 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
6893 READ_ONCE(card
->options
.isolation
) != ISOLATION_MODE_FWD
) {
6894 netdev_features_t restricted
= 0;
6896 if (skb_is_gso(skb
) && !netif_needs_gso(skb
, features
))
6897 restricted
|= NETIF_F_ALL_TSO
;
6899 switch (vlan_get_protocol(skb
)) {
6900 case htons(ETH_P_IP
):
6901 if (!card
->info
.has_lp2lp_cso_v4
)
6902 restricted
|= NETIF_F_IP_CSUM
;
6904 if (restricted
&& qeth_next_hop_is_local_v4(card
, skb
))
6905 features
&= ~restricted
;
6907 case htons(ETH_P_IPV6
):
6908 if (!card
->info
.has_lp2lp_cso_v6
)
6909 restricted
|= NETIF_F_IPV6_CSUM
;
6911 if (restricted
&& qeth_next_hop_is_local_v6(card
, skb
))
6912 features
&= ~restricted
;
6919 /* GSO segmentation builds skbs with
6920 * a (small) linear part for the headers, and
6921 * page frags for the data.
6922 * Compared to a linear skb, the header-only part consumes an
6923 * additional buffer element. This reduces buffer utilization, and
6924 * hurts throughput. So compress small segments into one element.
6926 if (netif_needs_gso(skb
, features
)) {
6927 /* match skb_segment(): */
6928 unsigned int doffset
= skb
->data
- skb_mac_header(skb
);
6929 unsigned int hsize
= skb_shinfo(skb
)->gso_size
;
6930 unsigned int hroom
= skb_headroom(skb
);
6932 /* linearize only if resulting skb allocations are order-0: */
6933 if (SKB_DATA_ALIGN(hroom
+ doffset
+ hsize
) <= SKB_MAX_HEAD(0))
6934 features
&= ~NETIF_F_SG
;
6937 return vlan_features_check(skb
, features
);
6939 EXPORT_SYMBOL_GPL(qeth_features_check
);
6941 void qeth_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6943 struct qeth_card
*card
= dev
->ml_priv
;
6944 struct qeth_qdio_out_q
*queue
;
6947 QETH_CARD_TEXT(card
, 5, "getstat");
6949 stats
->rx_packets
= card
->stats
.rx_packets
;
6950 stats
->rx_bytes
= card
->stats
.rx_bytes
;
6951 stats
->rx_errors
= card
->stats
.rx_length_errors
+
6952 card
->stats
.rx_frame_errors
+
6953 card
->stats
.rx_fifo_errors
;
6954 stats
->rx_dropped
= card
->stats
.rx_dropped_nomem
+
6955 card
->stats
.rx_dropped_notsupp
+
6956 card
->stats
.rx_dropped_runt
;
6957 stats
->multicast
= card
->stats
.rx_multicast
;
6958 stats
->rx_length_errors
= card
->stats
.rx_length_errors
;
6959 stats
->rx_frame_errors
= card
->stats
.rx_frame_errors
;
6960 stats
->rx_fifo_errors
= card
->stats
.rx_fifo_errors
;
6962 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++) {
6963 queue
= card
->qdio
.out_qs
[i
];
6965 stats
->tx_packets
+= queue
->stats
.tx_packets
;
6966 stats
->tx_bytes
+= queue
->stats
.tx_bytes
;
6967 stats
->tx_errors
+= queue
->stats
.tx_errors
;
6968 stats
->tx_dropped
+= queue
->stats
.tx_dropped
;
6971 EXPORT_SYMBOL_GPL(qeth_get_stats64
);
6973 #define TC_IQD_UCAST 0
6974 static void qeth_iqd_set_prio_tc_map(struct net_device
*dev
,
6975 unsigned int ucast_txqs
)
6979 /* IQD requires mcast traffic to be placed on a dedicated queue, and
6980 * qeth_iqd_select_queue() deals with this.
6981 * For unicast traffic, we defer the queue selection to the stack.
6982 * By installing a trivial prio map that spans over only the unicast
6983 * queues, we can encourage the stack to spread the ucast traffic evenly
6984 * without selecting the mcast queue.
6987 /* One traffic class, spanning over all active ucast queues: */
6988 netdev_set_num_tc(dev
, 1);
6989 netdev_set_tc_queue(dev
, TC_IQD_UCAST
, ucast_txqs
,
6990 QETH_IQD_MIN_UCAST_TXQ
);
6992 /* Map all priorities to this traffic class: */
6993 for (prio
= 0; prio
<= TC_BITMASK
; prio
++)
6994 netdev_set_prio_tc_map(dev
, prio
, TC_IQD_UCAST
);
6997 int qeth_set_real_num_tx_queues(struct qeth_card
*card
, unsigned int count
)
6999 struct net_device
*dev
= card
->dev
;
7002 /* Per netif_setup_tc(), adjust the mapping first: */
7004 qeth_iqd_set_prio_tc_map(dev
, count
- 1);
7006 rc
= netif_set_real_num_tx_queues(dev
, count
);
7008 if (rc
&& IS_IQD(card
))
7009 qeth_iqd_set_prio_tc_map(dev
, dev
->real_num_tx_queues
- 1);
7013 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues
);
7015 u16
qeth_iqd_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
7016 u8 cast_type
, struct net_device
*sb_dev
)
7020 if (cast_type
!= RTN_UNICAST
)
7021 return QETH_IQD_MCAST_TXQ
;
7022 if (dev
->real_num_tx_queues
== QETH_IQD_MIN_TXQ
)
7023 return QETH_IQD_MIN_UCAST_TXQ
;
7025 txq
= netdev_pick_tx(dev
, skb
, sb_dev
);
7026 return (txq
== QETH_IQD_MCAST_TXQ
) ? QETH_IQD_MIN_UCAST_TXQ
: txq
;
7028 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue
);
7030 u16
qeth_osa_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
7031 struct net_device
*sb_dev
)
7033 struct qeth_card
*card
= dev
->ml_priv
;
7035 if (qeth_uses_tx_prio_queueing(card
))
7036 return qeth_get_priority_queue(card
, skb
);
7038 return netdev_pick_tx(dev
, skb
, sb_dev
);
7040 EXPORT_SYMBOL_GPL(qeth_osa_select_queue
);
7042 int qeth_open(struct net_device
*dev
)
7044 struct qeth_card
*card
= dev
->ml_priv
;
7045 struct qeth_qdio_out_q
*queue
;
7048 QETH_CARD_TEXT(card
, 4, "qethopen");
7050 card
->data
.state
= CH_STATE_UP
;
7051 netif_tx_start_all_queues(dev
);
7054 qeth_for_each_output_queue(card
, queue
, i
) {
7055 netif_napi_add_tx(dev
, &queue
->napi
, qeth_tx_poll
);
7056 napi_enable(&queue
->napi
);
7057 napi_schedule(&queue
->napi
);
7060 napi_enable(&card
->napi
);
7061 napi_schedule(&card
->napi
);
7062 /* kick-start the NAPI softirq: */
7067 EXPORT_SYMBOL_GPL(qeth_open
);
7069 int qeth_stop(struct net_device
*dev
)
7071 struct qeth_card
*card
= dev
->ml_priv
;
7072 struct qeth_qdio_out_q
*queue
;
7075 QETH_CARD_TEXT(card
, 4, "qethstop");
7077 napi_disable(&card
->napi
);
7078 cancel_delayed_work_sync(&card
->buffer_reclaim_work
);
7079 qdio_stop_irq(CARD_DDEV(card
));
7081 /* Quiesce the NAPI instances: */
7082 qeth_for_each_output_queue(card
, queue
, i
)
7083 napi_disable(&queue
->napi
);
7085 /* Stop .ndo_start_xmit, might still access queue->napi. */
7086 netif_tx_disable(dev
);
7088 qeth_for_each_output_queue(card
, queue
, i
) {
7089 del_timer_sync(&queue
->timer
);
7090 /* Queues may get re-allocated, so remove the NAPIs. */
7091 netif_napi_del(&queue
->napi
);
7096 EXPORT_SYMBOL_GPL(qeth_stop
);
7098 static int __init
qeth_core_init(void)
7102 pr_info("loading core functions\n");
7104 qeth_debugfs_root
= debugfs_create_dir("qeth", NULL
);
7106 rc
= qeth_register_dbf_views();
7109 qeth_core_root_dev
= root_device_register("qeth");
7110 rc
= PTR_ERR_OR_ZERO(qeth_core_root_dev
);
7113 qeth_core_header_cache
=
7114 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE
,
7115 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE
),
7117 if (!qeth_core_header_cache
) {
7121 qeth_qdio_outbuf_cache
= kmem_cache_create("qeth_buf",
7122 sizeof(struct qeth_qdio_out_buffer
), 0, 0, NULL
);
7123 if (!qeth_qdio_outbuf_cache
) {
7128 qeth_qaob_cache
= kmem_cache_create("qeth_qaob",
7129 sizeof(struct qaob
),
7130 sizeof(struct qaob
),
7132 if (!qeth_qaob_cache
) {
7137 rc
= ccw_driver_register(&qeth_ccw_driver
);
7140 rc
= ccwgroup_driver_register(&qeth_core_ccwgroup_driver
);
7147 ccw_driver_unregister(&qeth_ccw_driver
);
7149 kmem_cache_destroy(qeth_qaob_cache
);
7151 kmem_cache_destroy(qeth_qdio_outbuf_cache
);
7153 kmem_cache_destroy(qeth_core_header_cache
);
7155 root_device_unregister(qeth_core_root_dev
);
7157 qeth_unregister_dbf_views();
7159 debugfs_remove_recursive(qeth_debugfs_root
);
7160 pr_err("Initializing the qeth device driver failed\n");
7164 static void __exit
qeth_core_exit(void)
7166 qeth_clear_dbf_list();
7167 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver
);
7168 ccw_driver_unregister(&qeth_ccw_driver
);
7169 kmem_cache_destroy(qeth_qaob_cache
);
7170 kmem_cache_destroy(qeth_qdio_outbuf_cache
);
7171 kmem_cache_destroy(qeth_core_header_cache
);
7172 root_device_unregister(qeth_core_root_dev
);
7173 qeth_unregister_dbf_views();
7174 debugfs_remove_recursive(qeth_debugfs_root
);
7175 pr_info("core functions removed\n");
7178 module_init(qeth_core_init
);
7179 module_exit(qeth_core_exit
);
7180 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7181 MODULE_DESCRIPTION("qeth core functions");
7182 MODULE_LICENSE("GPL");