2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/if_ether.h>
37 #include <linux/notifier.h>
38 #include <linux/reboot.h>
39 #include <linux/memory.h>
40 #include <asm/kexec.h>
41 #include <linux/mutex.h>
47 #include "ehea_phyp.h"
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
52 MODULE_DESCRIPTION("IBM eServer HEA Driver");
53 MODULE_VERSION(DRV_VERSION
);
56 static int msg_level
= -1;
57 static int rq1_entries
= EHEA_DEF_ENTRIES_RQ1
;
58 static int rq2_entries
= EHEA_DEF_ENTRIES_RQ2
;
59 static int rq3_entries
= EHEA_DEF_ENTRIES_RQ3
;
60 static int sq_entries
= EHEA_DEF_ENTRIES_SQ
;
63 static int lro_max_aggr
= EHEA_LRO_MAX_AGGR
;
64 static int num_tx_qps
= EHEA_NUM_TX_QP
;
65 static int prop_carrier_state
;
67 module_param(msg_level
, int, 0);
68 module_param(rq1_entries
, int, 0);
69 module_param(rq2_entries
, int, 0);
70 module_param(rq3_entries
, int, 0);
71 module_param(sq_entries
, int, 0);
72 module_param(prop_carrier_state
, int, 0);
73 module_param(use_mcs
, int, 0);
74 module_param(use_lro
, int, 0);
75 module_param(lro_max_aggr
, int, 0);
76 module_param(num_tx_qps
, int, 0);
78 MODULE_PARM_DESC(num_tx_qps
, "Number of TX-QPS");
79 MODULE_PARM_DESC(msg_level
, "msg_level");
80 MODULE_PARM_DESC(prop_carrier_state
, "Propagate carrier state of physical "
81 "port to stack. 1:yes, 0:no. Default = 0 ");
82 MODULE_PARM_DESC(rq3_entries
, "Number of entries for Receive Queue 3 "
83 "[2^x - 1], x = [6..14]. Default = "
84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3
) ")");
85 MODULE_PARM_DESC(rq2_entries
, "Number of entries for Receive Queue 2 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2
) ")");
88 MODULE_PARM_DESC(rq1_entries
, "Number of entries for Receive Queue 1 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1
) ")");
91 MODULE_PARM_DESC(sq_entries
, " Number of entries for the Send Queue "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ
) ")");
94 MODULE_PARM_DESC(use_mcs
, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
96 MODULE_PARM_DESC(lro_max_aggr
, " LRO: Max packets to be aggregated. Default = "
97 __MODULE_STRING(EHEA_LRO_MAX_AGGR
));
98 MODULE_PARM_DESC(use_lro
, " Large Receive Offload, 1: enable, 0: disable, "
101 static int port_name_cnt
;
102 static LIST_HEAD(adapter_list
);
103 static unsigned long ehea_driver_flags
;
104 struct work_struct ehea_rereg_mr_task
;
105 static DEFINE_MUTEX(dlpar_mem_lock
);
106 struct ehea_fw_handle_array ehea_fw_handles
;
107 struct ehea_bcmc_reg_array ehea_bcmc_regs
;
110 static int __devinit
ehea_probe_adapter(struct of_device
*dev
,
111 const struct of_device_id
*id
);
113 static int __devexit
ehea_remove(struct of_device
*dev
);
115 static struct of_device_id ehea_device_table
[] = {
118 .compatible
= "IBM,lhea",
122 MODULE_DEVICE_TABLE(of
, ehea_device_table
);
124 static struct of_platform_driver ehea_driver
= {
126 .match_table
= ehea_device_table
,
127 .probe
= ehea_probe_adapter
,
128 .remove
= ehea_remove
,
131 void ehea_dump(void *adr
, int len
, char *msg
)
134 unsigned char *deb
= adr
;
135 for (x
= 0; x
< len
; x
+= 16) {
136 printk(DRV_NAME
" %s adr=%p ofs=%04x %016llx %016llx\n", msg
,
137 deb
, x
, *((u64
*)&deb
[0]), *((u64
*)&deb
[8]));
142 void ehea_schedule_port_reset(struct ehea_port
*port
)
144 if (!test_bit(__EHEA_DISABLE_PORT_RESET
, &port
->flags
))
145 schedule_work(&port
->reset_task
);
148 static void ehea_update_firmware_handles(void)
150 struct ehea_fw_handle_entry
*arr
= NULL
;
151 struct ehea_adapter
*adapter
;
152 int num_adapters
= 0;
156 int num_fw_handles
, k
, l
;
158 /* Determine number of handles */
159 mutex_lock(&ehea_fw_handles
.lock
);
161 list_for_each_entry(adapter
, &adapter_list
, list
) {
164 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
165 struct ehea_port
*port
= adapter
->port
[k
];
167 if (!port
|| (port
->state
!= EHEA_PORT_UP
))
171 num_portres
+= port
->num_def_qps
+ port
->num_add_tx_qps
;
175 num_fw_handles
= num_adapters
* EHEA_NUM_ADAPTER_FW_HANDLES
+
176 num_ports
* EHEA_NUM_PORT_FW_HANDLES
+
177 num_portres
* EHEA_NUM_PORTRES_FW_HANDLES
;
179 if (num_fw_handles
) {
180 arr
= kzalloc(num_fw_handles
* sizeof(*arr
), GFP_KERNEL
);
182 goto out
; /* Keep the existing array */
186 list_for_each_entry(adapter
, &adapter_list
, list
) {
187 if (num_adapters
== 0)
190 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
191 struct ehea_port
*port
= adapter
->port
[k
];
193 if (!port
|| (port
->state
!= EHEA_PORT_UP
) ||
198 l
< port
->num_def_qps
+ port
->num_add_tx_qps
;
200 struct ehea_port_res
*pr
= &port
->port_res
[l
];
202 arr
[i
].adh
= adapter
->handle
;
203 arr
[i
++].fwh
= pr
->qp
->fw_handle
;
204 arr
[i
].adh
= adapter
->handle
;
205 arr
[i
++].fwh
= pr
->send_cq
->fw_handle
;
206 arr
[i
].adh
= adapter
->handle
;
207 arr
[i
++].fwh
= pr
->recv_cq
->fw_handle
;
208 arr
[i
].adh
= adapter
->handle
;
209 arr
[i
++].fwh
= pr
->eq
->fw_handle
;
210 arr
[i
].adh
= adapter
->handle
;
211 arr
[i
++].fwh
= pr
->send_mr
.handle
;
212 arr
[i
].adh
= adapter
->handle
;
213 arr
[i
++].fwh
= pr
->recv_mr
.handle
;
215 arr
[i
].adh
= adapter
->handle
;
216 arr
[i
++].fwh
= port
->qp_eq
->fw_handle
;
220 arr
[i
].adh
= adapter
->handle
;
221 arr
[i
++].fwh
= adapter
->neq
->fw_handle
;
223 if (adapter
->mr
.handle
) {
224 arr
[i
].adh
= adapter
->handle
;
225 arr
[i
++].fwh
= adapter
->mr
.handle
;
231 kfree(ehea_fw_handles
.arr
);
232 ehea_fw_handles
.arr
= arr
;
233 ehea_fw_handles
.num_entries
= i
;
235 mutex_unlock(&ehea_fw_handles
.lock
);
238 static void ehea_update_bcmc_registrations(void)
241 struct ehea_bcmc_reg_entry
*arr
= NULL
;
242 struct ehea_adapter
*adapter
;
243 struct ehea_mc_list
*mc_entry
;
244 int num_registrations
= 0;
248 spin_lock_irqsave(&ehea_bcmc_regs
.lock
, flags
);
250 /* Determine number of registrations */
251 list_for_each_entry(adapter
, &adapter_list
, list
)
252 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
253 struct ehea_port
*port
= adapter
->port
[k
];
255 if (!port
|| (port
->state
!= EHEA_PORT_UP
))
258 num_registrations
+= 2; /* Broadcast registrations */
260 list_for_each_entry(mc_entry
, &port
->mc_list
->list
,list
)
261 num_registrations
+= 2;
264 if (num_registrations
) {
265 arr
= kzalloc(num_registrations
* sizeof(*arr
), GFP_ATOMIC
);
267 goto out
; /* Keep the existing array */
271 list_for_each_entry(adapter
, &adapter_list
, list
) {
272 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
273 struct ehea_port
*port
= adapter
->port
[k
];
275 if (!port
|| (port
->state
!= EHEA_PORT_UP
))
278 if (num_registrations
== 0)
281 arr
[i
].adh
= adapter
->handle
;
282 arr
[i
].port_id
= port
->logical_port_id
;
283 arr
[i
].reg_type
= EHEA_BCMC_BROADCAST
|
285 arr
[i
++].macaddr
= port
->mac_addr
;
287 arr
[i
].adh
= adapter
->handle
;
288 arr
[i
].port_id
= port
->logical_port_id
;
289 arr
[i
].reg_type
= EHEA_BCMC_BROADCAST
|
290 EHEA_BCMC_VLANID_ALL
;
291 arr
[i
++].macaddr
= port
->mac_addr
;
292 num_registrations
-= 2;
294 list_for_each_entry(mc_entry
,
295 &port
->mc_list
->list
, list
) {
296 if (num_registrations
== 0)
299 arr
[i
].adh
= adapter
->handle
;
300 arr
[i
].port_id
= port
->logical_port_id
;
301 arr
[i
].reg_type
= EHEA_BCMC_SCOPE_ALL
|
302 EHEA_BCMC_MULTICAST
|
304 arr
[i
++].macaddr
= mc_entry
->macaddr
;
306 arr
[i
].adh
= adapter
->handle
;
307 arr
[i
].port_id
= port
->logical_port_id
;
308 arr
[i
].reg_type
= EHEA_BCMC_SCOPE_ALL
|
309 EHEA_BCMC_MULTICAST
|
310 EHEA_BCMC_VLANID_ALL
;
311 arr
[i
++].macaddr
= mc_entry
->macaddr
;
312 num_registrations
-= 2;
318 kfree(ehea_bcmc_regs
.arr
);
319 ehea_bcmc_regs
.arr
= arr
;
320 ehea_bcmc_regs
.num_entries
= i
;
322 spin_unlock_irqrestore(&ehea_bcmc_regs
.lock
, flags
);
325 static struct net_device_stats
*ehea_get_stats(struct net_device
*dev
)
327 struct ehea_port
*port
= netdev_priv(dev
);
328 struct net_device_stats
*stats
= &port
->stats
;
329 struct hcp_ehea_port_cb2
*cb2
;
330 u64 hret
, rx_packets
, tx_packets
;
333 memset(stats
, 0, sizeof(*stats
));
335 cb2
= (void *)get_zeroed_page(GFP_ATOMIC
);
337 ehea_error("no mem for cb2");
341 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
342 port
->logical_port_id
,
343 H_PORT_CB2
, H_PORT_CB2_ALL
, cb2
);
344 if (hret
!= H_SUCCESS
) {
345 ehea_error("query_ehea_port failed");
349 if (netif_msg_hw(port
))
350 ehea_dump(cb2
, sizeof(*cb2
), "net_device_stats");
353 for (i
= 0; i
< port
->num_def_qps
; i
++)
354 rx_packets
+= port
->port_res
[i
].rx_packets
;
357 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
358 tx_packets
+= port
->port_res
[i
].tx_packets
;
360 stats
->tx_packets
= tx_packets
;
361 stats
->multicast
= cb2
->rxmcp
;
362 stats
->rx_errors
= cb2
->rxuerr
;
363 stats
->rx_bytes
= cb2
->rxo
;
364 stats
->tx_bytes
= cb2
->txo
;
365 stats
->rx_packets
= rx_packets
;
368 free_page((unsigned long)cb2
);
373 static void ehea_refill_rq1(struct ehea_port_res
*pr
, int index
, int nr_of_wqes
)
375 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
376 struct net_device
*dev
= pr
->port
->netdev
;
377 int max_index_mask
= pr
->rq1_skba
.len
- 1;
378 int fill_wqes
= pr
->rq1_skba
.os_skbs
+ nr_of_wqes
;
382 pr
->rq1_skba
.os_skbs
= 0;
384 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
386 pr
->rq1_skba
.index
= index
;
387 pr
->rq1_skba
.os_skbs
= fill_wqes
;
391 for (i
= 0; i
< fill_wqes
; i
++) {
392 if (!skb_arr_rq1
[index
]) {
393 skb_arr_rq1
[index
] = netdev_alloc_skb(dev
,
395 if (!skb_arr_rq1
[index
]) {
396 pr
->rq1_skba
.os_skbs
= fill_wqes
- i
;
401 index
&= max_index_mask
;
409 ehea_update_rq1a(pr
->qp
, adder
);
412 static void ehea_init_fill_rq1(struct ehea_port_res
*pr
, int nr_rq1a
)
414 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
415 struct net_device
*dev
= pr
->port
->netdev
;
418 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++) {
419 skb_arr_rq1
[i
] = netdev_alloc_skb(dev
, EHEA_L_PKT_SIZE
);
424 ehea_update_rq1a(pr
->qp
, nr_rq1a
);
427 static int ehea_refill_rq_def(struct ehea_port_res
*pr
,
428 struct ehea_q_skb_arr
*q_skba
, int rq_nr
,
429 int num_wqes
, int wqe_type
, int packet_size
)
431 struct net_device
*dev
= pr
->port
->netdev
;
432 struct ehea_qp
*qp
= pr
->qp
;
433 struct sk_buff
**skb_arr
= q_skba
->arr
;
434 struct ehea_rwqe
*rwqe
;
435 int i
, index
, max_index_mask
, fill_wqes
;
439 fill_wqes
= q_skba
->os_skbs
+ num_wqes
;
442 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
443 q_skba
->os_skbs
= fill_wqes
;
447 index
= q_skba
->index
;
448 max_index_mask
= q_skba
->len
- 1;
449 for (i
= 0; i
< fill_wqes
; i
++) {
453 skb
= netdev_alloc_skb_ip_align(dev
, packet_size
);
455 q_skba
->os_skbs
= fill_wqes
- i
;
456 if (q_skba
->os_skbs
== q_skba
->len
- 2) {
457 ehea_info("%s: rq%i ran dry - no mem for skb",
458 pr
->port
->netdev
->name
, rq_nr
);
464 skb_arr
[index
] = skb
;
465 tmp_addr
= ehea_map_vaddr(skb
->data
);
466 if (tmp_addr
== -1) {
468 q_skba
->os_skbs
= fill_wqes
- i
;
473 rwqe
= ehea_get_next_rwqe(qp
, rq_nr
);
474 rwqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, wqe_type
)
475 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, index
);
476 rwqe
->sg_list
[0].l_key
= pr
->recv_mr
.lkey
;
477 rwqe
->sg_list
[0].vaddr
= tmp_addr
;
478 rwqe
->sg_list
[0].len
= packet_size
;
479 rwqe
->data_segments
= 1;
482 index
&= max_index_mask
;
486 q_skba
->index
= index
;
493 ehea_update_rq2a(pr
->qp
, adder
);
495 ehea_update_rq3a(pr
->qp
, adder
);
501 static int ehea_refill_rq2(struct ehea_port_res
*pr
, int nr_of_wqes
)
503 return ehea_refill_rq_def(pr
, &pr
->rq2_skba
, 2,
504 nr_of_wqes
, EHEA_RWQE2_TYPE
,
509 static int ehea_refill_rq3(struct ehea_port_res
*pr
, int nr_of_wqes
)
511 return ehea_refill_rq_def(pr
, &pr
->rq3_skba
, 3,
512 nr_of_wqes
, EHEA_RWQE3_TYPE
,
513 EHEA_MAX_PACKET_SIZE
);
516 static inline int ehea_check_cqe(struct ehea_cqe
*cqe
, int *rq_num
)
518 *rq_num
= (cqe
->type
& EHEA_CQE_TYPE_RQ
) >> 5;
519 if ((cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) == 0)
521 if (((cqe
->status
& EHEA_CQE_STAT_ERR_TCP
) != 0) &&
522 (cqe
->header_length
== 0))
527 static inline void ehea_fill_skb(struct net_device
*dev
,
528 struct sk_buff
*skb
, struct ehea_cqe
*cqe
)
530 int length
= cqe
->num_bytes_transfered
- 4; /*remove CRC */
532 skb_put(skb
, length
);
533 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
534 skb
->protocol
= eth_type_trans(skb
, dev
);
537 static inline struct sk_buff
*get_skb_by_index(struct sk_buff
**skb_array
,
539 struct ehea_cqe
*cqe
)
541 int skb_index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
552 prefetchw(pref
+ EHEA_CACHE_LINE
);
554 pref
= (skb_array
[x
]->data
);
556 prefetch(pref
+ EHEA_CACHE_LINE
);
557 prefetch(pref
+ EHEA_CACHE_LINE
* 2);
558 prefetch(pref
+ EHEA_CACHE_LINE
* 3);
561 skb
= skb_array
[skb_index
];
562 skb_array
[skb_index
] = NULL
;
566 static inline struct sk_buff
*get_skb_by_index_ll(struct sk_buff
**skb_array
,
567 int arr_len
, int wqe_index
)
579 prefetchw(pref
+ EHEA_CACHE_LINE
);
581 pref
= (skb_array
[x
]->data
);
583 prefetchw(pref
+ EHEA_CACHE_LINE
);
586 skb
= skb_array
[wqe_index
];
587 skb_array
[wqe_index
] = NULL
;
591 static int ehea_treat_poll_error(struct ehea_port_res
*pr
, int rq
,
592 struct ehea_cqe
*cqe
, int *processed_rq2
,
597 if (cqe
->status
& EHEA_CQE_STAT_ERR_TCP
)
598 pr
->p_stats
.err_tcp_cksum
++;
599 if (cqe
->status
& EHEA_CQE_STAT_ERR_IP
)
600 pr
->p_stats
.err_ip_cksum
++;
601 if (cqe
->status
& EHEA_CQE_STAT_ERR_CRC
)
602 pr
->p_stats
.err_frame_crc
++;
606 skb
= get_skb_by_index(pr
->rq2_skba
.arr
, pr
->rq2_skba
.len
, cqe
);
608 } else if (rq
== 3) {
610 skb
= get_skb_by_index(pr
->rq3_skba
.arr
, pr
->rq3_skba
.len
, cqe
);
614 if (cqe
->status
& EHEA_CQE_STAT_FAT_ERR_MASK
) {
615 if (netif_msg_rx_err(pr
->port
)) {
616 ehea_error("Critical receive error for QP %d. "
617 "Resetting port.", pr
->qp
->init_attr
.qp_nr
);
618 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
620 ehea_schedule_port_reset(pr
->port
);
627 static int get_skb_hdr(struct sk_buff
*skb
, void **iphdr
,
628 void **tcph
, u64
*hdr_flags
, void *priv
)
630 struct ehea_cqe
*cqe
= priv
;
634 /* non tcp/udp packets */
635 if (!cqe
->header_length
)
639 skb_reset_network_header(skb
);
641 if (iph
->protocol
!= IPPROTO_TCP
)
644 ip_len
= ip_hdrlen(skb
);
645 skb_set_transport_header(skb
, ip_len
);
646 *tcph
= tcp_hdr(skb
);
648 /* check if ip header and tcp header are complete */
649 if (ntohs(iph
->tot_len
) < ip_len
+ tcp_hdrlen(skb
))
652 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
658 static void ehea_proc_skb(struct ehea_port_res
*pr
, struct ehea_cqe
*cqe
,
661 int vlan_extracted
= ((cqe
->status
& EHEA_CQE_VLAN_TAG_XTRACT
) &&
666 lro_vlan_hwaccel_receive_skb(&pr
->lro_mgr
, skb
,
671 lro_receive_skb(&pr
->lro_mgr
, skb
, cqe
);
674 vlan_hwaccel_receive_skb(skb
, pr
->port
->vgrp
,
677 netif_receive_skb(skb
);
681 static int ehea_proc_rwqes(struct net_device
*dev
,
682 struct ehea_port_res
*pr
,
685 struct ehea_port
*port
= pr
->port
;
686 struct ehea_qp
*qp
= pr
->qp
;
687 struct ehea_cqe
*cqe
;
689 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
690 struct sk_buff
**skb_arr_rq2
= pr
->rq2_skba
.arr
;
691 struct sk_buff
**skb_arr_rq3
= pr
->rq3_skba
.arr
;
692 int skb_arr_rq1_len
= pr
->rq1_skba
.len
;
693 int skb_arr_rq2_len
= pr
->rq2_skba
.len
;
694 int skb_arr_rq3_len
= pr
->rq3_skba
.len
;
695 int processed
, processed_rq1
, processed_rq2
, processed_rq3
;
696 int wqe_index
, last_wqe_index
, rq
, port_reset
;
698 processed
= processed_rq1
= processed_rq2
= processed_rq3
= 0;
701 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
702 while ((processed
< budget
) && cqe
) {
706 if (netif_msg_rx_status(port
))
707 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
709 last_wqe_index
= wqe_index
;
711 if (!ehea_check_cqe(cqe
, &rq
)) {
714 skb
= get_skb_by_index_ll(skb_arr_rq1
,
717 if (unlikely(!skb
)) {
718 if (netif_msg_rx_err(port
))
719 ehea_error("LL rq1: skb=NULL");
721 skb
= netdev_alloc_skb(dev
,
726 skb_copy_to_linear_data(skb
, ((char *)cqe
) + 64,
727 cqe
->num_bytes_transfered
- 4);
728 ehea_fill_skb(dev
, skb
, cqe
);
729 } else if (rq
== 2) {
731 skb
= get_skb_by_index(skb_arr_rq2
,
732 skb_arr_rq2_len
, cqe
);
733 if (unlikely(!skb
)) {
734 if (netif_msg_rx_err(port
))
735 ehea_error("rq2: skb=NULL");
738 ehea_fill_skb(dev
, skb
, cqe
);
742 skb
= get_skb_by_index(skb_arr_rq3
,
743 skb_arr_rq3_len
, cqe
);
744 if (unlikely(!skb
)) {
745 if (netif_msg_rx_err(port
))
746 ehea_error("rq3: skb=NULL");
749 ehea_fill_skb(dev
, skb
, cqe
);
753 ehea_proc_skb(pr
, cqe
, skb
);
755 pr
->p_stats
.poll_receive_errors
++;
756 port_reset
= ehea_treat_poll_error(pr
, rq
, cqe
,
762 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
765 lro_flush_all(&pr
->lro_mgr
);
767 pr
->rx_packets
+= processed
;
769 ehea_refill_rq1(pr
, last_wqe_index
, processed_rq1
);
770 ehea_refill_rq2(pr
, processed_rq2
);
771 ehea_refill_rq3(pr
, processed_rq3
);
776 static struct ehea_cqe
*ehea_proc_cqes(struct ehea_port_res
*pr
, int my_quota
)
779 struct ehea_cq
*send_cq
= pr
->send_cq
;
780 struct ehea_cqe
*cqe
;
781 int quota
= my_quota
;
787 cqe
= ehea_poll_cq(send_cq
);
788 while (cqe
&& (quota
> 0)) {
789 ehea_inc_cq(send_cq
);
793 if (cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) {
794 ehea_error("Send Completion Error: Resetting port");
795 if (netif_msg_tx_err(pr
->port
))
796 ehea_dump(cqe
, sizeof(*cqe
), "Send CQE");
797 ehea_schedule_port_reset(pr
->port
);
801 if (netif_msg_tx_done(pr
->port
))
802 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
804 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE
, cqe
->wr_id
)
805 == EHEA_SWQE2_TYPE
)) {
807 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
808 skb
= pr
->sq_skba
.arr
[index
];
810 pr
->sq_skba
.arr
[index
] = NULL
;
813 swqe_av
+= EHEA_BMASK_GET(EHEA_WR_ID_REFILL
, cqe
->wr_id
);
816 cqe
= ehea_poll_cq(send_cq
);
819 ehea_update_feca(send_cq
, cqe_counter
);
820 atomic_add(swqe_av
, &pr
->swqe_avail
);
822 spin_lock_irqsave(&pr
->netif_queue
, flags
);
824 if (pr
->queue_stopped
&& (atomic_read(&pr
->swqe_avail
)
825 >= pr
->swqe_refill_th
)) {
826 netif_wake_queue(pr
->port
->netdev
);
827 pr
->queue_stopped
= 0;
829 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
834 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
835 #define EHEA_POLL_MAX_CQES 65535
837 static int ehea_poll(struct napi_struct
*napi
, int budget
)
839 struct ehea_port_res
*pr
= container_of(napi
, struct ehea_port_res
,
841 struct net_device
*dev
= pr
->port
->netdev
;
842 struct ehea_cqe
*cqe
;
843 struct ehea_cqe
*cqe_skb
= NULL
;
844 int force_irq
, wqe_index
;
847 force_irq
= (pr
->poll_counter
> EHEA_NAPI_POLL_NUM_BEFORE_IRQ
);
848 cqe_skb
= ehea_proc_cqes(pr
, EHEA_POLL_MAX_CQES
);
851 rx
+= ehea_proc_rwqes(dev
, pr
, budget
- rx
);
853 while ((rx
!= budget
) || force_irq
) {
854 pr
->poll_counter
= 0;
857 ehea_reset_cq_ep(pr
->recv_cq
);
858 ehea_reset_cq_ep(pr
->send_cq
);
859 ehea_reset_cq_n1(pr
->recv_cq
);
860 ehea_reset_cq_n1(pr
->send_cq
);
861 cqe
= ehea_poll_rq1(pr
->qp
, &wqe_index
);
862 cqe_skb
= ehea_poll_cq(pr
->send_cq
);
864 if (!cqe
&& !cqe_skb
)
867 if (!napi_reschedule(napi
))
870 cqe_skb
= ehea_proc_cqes(pr
, EHEA_POLL_MAX_CQES
);
871 rx
+= ehea_proc_rwqes(dev
, pr
, budget
- rx
);
878 #ifdef CONFIG_NET_POLL_CONTROLLER
879 static void ehea_netpoll(struct net_device
*dev
)
881 struct ehea_port
*port
= netdev_priv(dev
);
884 for (i
= 0; i
< port
->num_def_qps
; i
++)
885 napi_schedule(&port
->port_res
[i
].napi
);
889 static irqreturn_t
ehea_recv_irq_handler(int irq
, void *param
)
891 struct ehea_port_res
*pr
= param
;
893 napi_schedule(&pr
->napi
);
898 static irqreturn_t
ehea_qp_aff_irq_handler(int irq
, void *param
)
900 struct ehea_port
*port
= param
;
901 struct ehea_eqe
*eqe
;
905 eqe
= ehea_poll_eq(port
->qp_eq
);
908 qp_token
= EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN
, eqe
->entry
);
909 ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
910 eqe
->entry
, qp_token
);
912 qp
= port
->port_res
[qp_token
].qp
;
913 ehea_error_data(port
->adapter
, qp
->fw_handle
);
914 eqe
= ehea_poll_eq(port
->qp_eq
);
917 ehea_schedule_port_reset(port
);
922 static struct ehea_port
*ehea_get_port(struct ehea_adapter
*adapter
,
927 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
928 if (adapter
->port
[i
])
929 if (adapter
->port
[i
]->logical_port_id
== logical_port
)
930 return adapter
->port
[i
];
934 int ehea_sense_port_attr(struct ehea_port
*port
)
938 struct hcp_ehea_port_cb0
*cb0
;
940 /* may be called via ehea_neq_tasklet() */
941 cb0
= (void *)get_zeroed_page(GFP_ATOMIC
);
943 ehea_error("no mem for cb0");
948 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
949 port
->logical_port_id
, H_PORT_CB0
,
950 EHEA_BMASK_SET(H_PORT_CB0_ALL
, 0xFFFF),
952 if (hret
!= H_SUCCESS
) {
958 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
960 if (!is_valid_ether_addr((u8
*)&port
->mac_addr
)) {
961 ret
= -EADDRNOTAVAIL
;
966 switch (cb0
->port_speed
) {
968 port
->port_speed
= EHEA_SPEED_10M
;
969 port
->full_duplex
= 0;
972 port
->port_speed
= EHEA_SPEED_10M
;
973 port
->full_duplex
= 1;
976 port
->port_speed
= EHEA_SPEED_100M
;
977 port
->full_duplex
= 0;
980 port
->port_speed
= EHEA_SPEED_100M
;
981 port
->full_duplex
= 1;
984 port
->port_speed
= EHEA_SPEED_1G
;
985 port
->full_duplex
= 1;
988 port
->port_speed
= EHEA_SPEED_10G
;
989 port
->full_duplex
= 1;
992 port
->port_speed
= 0;
993 port
->full_duplex
= 0;
998 port
->num_mcs
= cb0
->num_default_qps
;
1000 /* Number of default QPs */
1002 port
->num_def_qps
= cb0
->num_default_qps
;
1004 port
->num_def_qps
= 1;
1006 if (!port
->num_def_qps
) {
1011 port
->num_tx_qps
= num_tx_qps
;
1013 if (port
->num_def_qps
>= port
->num_tx_qps
)
1014 port
->num_add_tx_qps
= 0;
1016 port
->num_add_tx_qps
= port
->num_tx_qps
- port
->num_def_qps
;
1020 if (ret
|| netif_msg_probe(port
))
1021 ehea_dump(cb0
, sizeof(*cb0
), "ehea_sense_port_attr");
1022 free_page((unsigned long)cb0
);
1027 int ehea_set_portspeed(struct ehea_port
*port
, u32 port_speed
)
1029 struct hcp_ehea_port_cb4
*cb4
;
1033 cb4
= (void *)get_zeroed_page(GFP_KERNEL
);
1035 ehea_error("no mem for cb4");
1040 cb4
->port_speed
= port_speed
;
1042 netif_carrier_off(port
->netdev
);
1044 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1045 port
->logical_port_id
,
1046 H_PORT_CB4
, H_PORT_CB4_SPEED
, cb4
);
1047 if (hret
== H_SUCCESS
) {
1048 port
->autoneg
= port_speed
== EHEA_SPEED_AUTONEG
? 1 : 0;
1050 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
1051 port
->logical_port_id
,
1052 H_PORT_CB4
, H_PORT_CB4_SPEED
,
1054 if (hret
== H_SUCCESS
) {
1055 switch (cb4
->port_speed
) {
1057 port
->port_speed
= EHEA_SPEED_10M
;
1058 port
->full_duplex
= 0;
1061 port
->port_speed
= EHEA_SPEED_10M
;
1062 port
->full_duplex
= 1;
1064 case H_SPEED_100M_H
:
1065 port
->port_speed
= EHEA_SPEED_100M
;
1066 port
->full_duplex
= 0;
1068 case H_SPEED_100M_F
:
1069 port
->port_speed
= EHEA_SPEED_100M
;
1070 port
->full_duplex
= 1;
1073 port
->port_speed
= EHEA_SPEED_1G
;
1074 port
->full_duplex
= 1;
1077 port
->port_speed
= EHEA_SPEED_10G
;
1078 port
->full_duplex
= 1;
1081 port
->port_speed
= 0;
1082 port
->full_duplex
= 0;
1086 ehea_error("Failed sensing port speed");
1090 if (hret
== H_AUTHORITY
) {
1091 ehea_info("Hypervisor denied setting port speed");
1095 ehea_error("Failed setting port speed");
1098 if (!prop_carrier_state
|| (port
->phy_link
== EHEA_PHY_LINK_UP
))
1099 netif_carrier_on(port
->netdev
);
1101 free_page((unsigned long)cb4
);
1106 static void ehea_parse_eqe(struct ehea_adapter
*adapter
, u64 eqe
)
1111 struct ehea_port
*port
;
1113 ec
= EHEA_BMASK_GET(NEQE_EVENT_CODE
, eqe
);
1114 portnum
= EHEA_BMASK_GET(NEQE_PORTNUM
, eqe
);
1115 port
= ehea_get_port(adapter
, portnum
);
1118 case EHEA_EC_PORTSTATE_CHG
: /* port state change */
1121 ehea_error("unknown portnum %x", portnum
);
1125 if (EHEA_BMASK_GET(NEQE_PORT_UP
, eqe
)) {
1126 if (!netif_carrier_ok(port
->netdev
)) {
1127 ret
= ehea_sense_port_attr(port
);
1129 ehea_error("failed resensing port "
1134 if (netif_msg_link(port
))
1135 ehea_info("%s: Logical port up: %dMbps "
1139 port
->full_duplex
==
1140 1 ? "Full" : "Half");
1142 netif_carrier_on(port
->netdev
);
1143 netif_wake_queue(port
->netdev
);
1146 if (netif_carrier_ok(port
->netdev
)) {
1147 if (netif_msg_link(port
))
1148 ehea_info("%s: Logical port down",
1149 port
->netdev
->name
);
1150 netif_carrier_off(port
->netdev
);
1151 netif_stop_queue(port
->netdev
);
1154 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP
, eqe
)) {
1155 port
->phy_link
= EHEA_PHY_LINK_UP
;
1156 if (netif_msg_link(port
))
1157 ehea_info("%s: Physical port up",
1158 port
->netdev
->name
);
1159 if (prop_carrier_state
)
1160 netif_carrier_on(port
->netdev
);
1162 port
->phy_link
= EHEA_PHY_LINK_DOWN
;
1163 if (netif_msg_link(port
))
1164 ehea_info("%s: Physical port down",
1165 port
->netdev
->name
);
1166 if (prop_carrier_state
)
1167 netif_carrier_off(port
->netdev
);
1170 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY
, eqe
))
1171 ehea_info("External switch port is primary port");
1173 ehea_info("External switch port is backup port");
1176 case EHEA_EC_ADAPTER_MALFUNC
:
1177 ehea_error("Adapter malfunction");
1179 case EHEA_EC_PORT_MALFUNC
:
1180 ehea_info("Port malfunction: Device: %s", port
->netdev
->name
);
1181 netif_carrier_off(port
->netdev
);
1182 netif_stop_queue(port
->netdev
);
1185 ehea_error("unknown event code %x, eqe=0x%llX", ec
, eqe
);
1190 static void ehea_neq_tasklet(unsigned long data
)
1192 struct ehea_adapter
*adapter
= (struct ehea_adapter
*)data
;
1193 struct ehea_eqe
*eqe
;
1196 eqe
= ehea_poll_eq(adapter
->neq
);
1197 ehea_debug("eqe=%p", eqe
);
1200 ehea_debug("*eqe=%lx", eqe
->entry
);
1201 ehea_parse_eqe(adapter
, eqe
->entry
);
1202 eqe
= ehea_poll_eq(adapter
->neq
);
1203 ehea_debug("next eqe=%p", eqe
);
1206 event_mask
= EHEA_BMASK_SET(NELR_PORTSTATE_CHG
, 1)
1207 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC
, 1)
1208 | EHEA_BMASK_SET(NELR_PORT_MALFUNC
, 1);
1210 ehea_h_reset_events(adapter
->handle
,
1211 adapter
->neq
->fw_handle
, event_mask
);
1214 static irqreturn_t
ehea_interrupt_neq(int irq
, void *param
)
1216 struct ehea_adapter
*adapter
= param
;
1217 tasklet_hi_schedule(&adapter
->neq_tasklet
);
1222 static int ehea_fill_port_res(struct ehea_port_res
*pr
)
1225 struct ehea_qp_init_attr
*init_attr
= &pr
->qp
->init_attr
;
1227 ehea_init_fill_rq1(pr
, init_attr
->act_nr_rwqes_rq1
1228 - init_attr
->act_nr_rwqes_rq2
1229 - init_attr
->act_nr_rwqes_rq3
- 1);
1231 ret
= ehea_refill_rq2(pr
, init_attr
->act_nr_rwqes_rq2
- 1);
1233 ret
|= ehea_refill_rq3(pr
, init_attr
->act_nr_rwqes_rq3
- 1);
1238 static int ehea_reg_interrupts(struct net_device
*dev
)
1240 struct ehea_port
*port
= netdev_priv(dev
);
1241 struct ehea_port_res
*pr
;
1245 snprintf(port
->int_aff_name
, EHEA_IRQ_NAME_SIZE
- 1, "%s-aff",
1248 ret
= ibmebus_request_irq(port
->qp_eq
->attr
.ist1
,
1249 ehea_qp_aff_irq_handler
,
1250 IRQF_DISABLED
, port
->int_aff_name
, port
);
1252 ehea_error("failed registering irq for qp_aff_irq_handler:"
1253 "ist=%X", port
->qp_eq
->attr
.ist1
);
1257 if (netif_msg_ifup(port
))
1258 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1259 "registered", port
->qp_eq
->attr
.ist1
);
1262 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
1263 pr
= &port
->port_res
[i
];
1264 snprintf(pr
->int_send_name
, EHEA_IRQ_NAME_SIZE
- 1,
1265 "%s-queue%d", dev
->name
, i
);
1266 ret
= ibmebus_request_irq(pr
->eq
->attr
.ist1
,
1267 ehea_recv_irq_handler
,
1268 IRQF_DISABLED
, pr
->int_send_name
,
1271 ehea_error("failed registering irq for ehea_queue "
1272 "port_res_nr:%d, ist=%X", i
,
1276 if (netif_msg_ifup(port
))
1277 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1278 "%d registered", pr
->eq
->attr
.ist1
, i
);
1286 u32 ist
= port
->port_res
[i
].eq
->attr
.ist1
;
1287 ibmebus_free_irq(ist
, &port
->port_res
[i
]);
1291 ibmebus_free_irq(port
->qp_eq
->attr
.ist1
, port
);
1292 i
= port
->num_def_qps
;
1298 static void ehea_free_interrupts(struct net_device
*dev
)
1300 struct ehea_port
*port
= netdev_priv(dev
);
1301 struct ehea_port_res
*pr
;
1306 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
1307 pr
= &port
->port_res
[i
];
1308 ibmebus_free_irq(pr
->eq
->attr
.ist1
, pr
);
1309 if (netif_msg_intr(port
))
1310 ehea_info("free send irq for res %d with handle 0x%X",
1311 i
, pr
->eq
->attr
.ist1
);
1314 /* associated events */
1315 ibmebus_free_irq(port
->qp_eq
->attr
.ist1
, port
);
1316 if (netif_msg_intr(port
))
1317 ehea_info("associated event interrupt for handle 0x%X freed",
1318 port
->qp_eq
->attr
.ist1
);
1321 static int ehea_configure_port(struct ehea_port
*port
)
1325 struct hcp_ehea_port_cb0
*cb0
;
1328 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
1332 cb0
->port_rc
= EHEA_BMASK_SET(PXLY_RC_VALID
, 1)
1333 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM
, 1)
1334 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM
, 1)
1335 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT
, 1)
1336 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER
,
1337 PXLY_RC_VLAN_FILTER
)
1338 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME
, 1);
1340 for (i
= 0; i
< port
->num_mcs
; i
++)
1342 cb0
->default_qpn_arr
[i
] =
1343 port
->port_res
[i
].qp
->init_attr
.qp_nr
;
1345 cb0
->default_qpn_arr
[i
] =
1346 port
->port_res
[0].qp
->init_attr
.qp_nr
;
1348 if (netif_msg_ifup(port
))
1349 ehea_dump(cb0
, sizeof(*cb0
), "ehea_configure_port");
1351 mask
= EHEA_BMASK_SET(H_PORT_CB0_PRC
, 1)
1352 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY
, 1);
1354 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1355 port
->logical_port_id
,
1356 H_PORT_CB0
, mask
, cb0
);
1358 if (hret
!= H_SUCCESS
)
1364 free_page((unsigned long)cb0
);
1369 int ehea_gen_smrs(struct ehea_port_res
*pr
)
1372 struct ehea_adapter
*adapter
= pr
->port
->adapter
;
1374 ret
= ehea_gen_smr(adapter
, &adapter
->mr
, &pr
->send_mr
);
1378 ret
= ehea_gen_smr(adapter
, &adapter
->mr
, &pr
->recv_mr
);
1385 ehea_rem_mr(&pr
->send_mr
);
1387 ehea_error("Generating SMRS failed\n");
1391 int ehea_rem_smrs(struct ehea_port_res
*pr
)
1393 if ((ehea_rem_mr(&pr
->send_mr
)) ||
1394 (ehea_rem_mr(&pr
->recv_mr
)))
1400 static int ehea_init_q_skba(struct ehea_q_skb_arr
*q_skba
, int max_q_entries
)
1402 int arr_size
= sizeof(void *) * max_q_entries
;
1404 q_skba
->arr
= vmalloc(arr_size
);
1408 memset(q_skba
->arr
, 0, arr_size
);
1410 q_skba
->len
= max_q_entries
;
1412 q_skba
->os_skbs
= 0;
1417 static int ehea_init_port_res(struct ehea_port
*port
, struct ehea_port_res
*pr
,
1418 struct port_res_cfg
*pr_cfg
, int queue_token
)
1420 struct ehea_adapter
*adapter
= port
->adapter
;
1421 enum ehea_eq_type eq_type
= EHEA_EQ
;
1422 struct ehea_qp_init_attr
*init_attr
= NULL
;
1425 memset(pr
, 0, sizeof(struct ehea_port_res
));
1428 spin_lock_init(&pr
->xmit_lock
);
1429 spin_lock_init(&pr
->netif_queue
);
1431 pr
->eq
= ehea_create_eq(adapter
, eq_type
, EHEA_MAX_ENTRIES_EQ
, 0);
1433 ehea_error("create_eq failed (eq)");
1437 pr
->recv_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_rcq
,
1439 port
->logical_port_id
);
1441 ehea_error("create_cq failed (cq_recv)");
1445 pr
->send_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_scq
,
1447 port
->logical_port_id
);
1449 ehea_error("create_cq failed (cq_send)");
1453 if (netif_msg_ifup(port
))
1454 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1455 pr
->send_cq
->attr
.act_nr_of_cqes
,
1456 pr
->recv_cq
->attr
.act_nr_of_cqes
);
1458 init_attr
= kzalloc(sizeof(*init_attr
), GFP_KERNEL
);
1461 ehea_error("no mem for ehea_qp_init_attr");
1465 init_attr
->low_lat_rq1
= 1;
1466 init_attr
->signalingtype
= 1; /* generate CQE if specified in WQE */
1467 init_attr
->rq_count
= 3;
1468 init_attr
->qp_token
= queue_token
;
1469 init_attr
->max_nr_send_wqes
= pr_cfg
->max_entries_sq
;
1470 init_attr
->max_nr_rwqes_rq1
= pr_cfg
->max_entries_rq1
;
1471 init_attr
->max_nr_rwqes_rq2
= pr_cfg
->max_entries_rq2
;
1472 init_attr
->max_nr_rwqes_rq3
= pr_cfg
->max_entries_rq3
;
1473 init_attr
->wqe_size_enc_sq
= EHEA_SG_SQ
;
1474 init_attr
->wqe_size_enc_rq1
= EHEA_SG_RQ1
;
1475 init_attr
->wqe_size_enc_rq2
= EHEA_SG_RQ2
;
1476 init_attr
->wqe_size_enc_rq3
= EHEA_SG_RQ3
;
1477 init_attr
->rq2_threshold
= EHEA_RQ2_THRESHOLD
;
1478 init_attr
->rq3_threshold
= EHEA_RQ3_THRESHOLD
;
1479 init_attr
->port_nr
= port
->logical_port_id
;
1480 init_attr
->send_cq_handle
= pr
->send_cq
->fw_handle
;
1481 init_attr
->recv_cq_handle
= pr
->recv_cq
->fw_handle
;
1482 init_attr
->aff_eq_handle
= port
->qp_eq
->fw_handle
;
1484 pr
->qp
= ehea_create_qp(adapter
, adapter
->pd
, init_attr
);
1486 ehea_error("create_qp failed");
1491 if (netif_msg_ifup(port
))
1492 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1493 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr
->qp_nr
,
1494 init_attr
->act_nr_send_wqes
,
1495 init_attr
->act_nr_rwqes_rq1
,
1496 init_attr
->act_nr_rwqes_rq2
,
1497 init_attr
->act_nr_rwqes_rq3
);
1499 pr
->sq_skba_size
= init_attr
->act_nr_send_wqes
+ 1;
1501 ret
= ehea_init_q_skba(&pr
->sq_skba
, pr
->sq_skba_size
);
1502 ret
|= ehea_init_q_skba(&pr
->rq1_skba
, init_attr
->act_nr_rwqes_rq1
+ 1);
1503 ret
|= ehea_init_q_skba(&pr
->rq2_skba
, init_attr
->act_nr_rwqes_rq2
+ 1);
1504 ret
|= ehea_init_q_skba(&pr
->rq3_skba
, init_attr
->act_nr_rwqes_rq3
+ 1);
1508 pr
->swqe_refill_th
= init_attr
->act_nr_send_wqes
/ 10;
1509 if (ehea_gen_smrs(pr
) != 0) {
1514 atomic_set(&pr
->swqe_avail
, init_attr
->act_nr_send_wqes
- 1);
1518 netif_napi_add(pr
->port
->netdev
, &pr
->napi
, ehea_poll
, 64);
1520 pr
->lro_mgr
.max_aggr
= pr
->port
->lro_max_aggr
;
1521 pr
->lro_mgr
.max_desc
= MAX_LRO_DESCRIPTORS
;
1522 pr
->lro_mgr
.lro_arr
= pr
->lro_desc
;
1523 pr
->lro_mgr
.get_skb_header
= get_skb_hdr
;
1524 pr
->lro_mgr
.features
= LRO_F_NAPI
| LRO_F_EXTRACT_VLAN_ID
;
1525 pr
->lro_mgr
.dev
= port
->netdev
;
1526 pr
->lro_mgr
.ip_summed
= CHECKSUM_UNNECESSARY
;
1527 pr
->lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1534 vfree(pr
->sq_skba
.arr
);
1535 vfree(pr
->rq1_skba
.arr
);
1536 vfree(pr
->rq2_skba
.arr
);
1537 vfree(pr
->rq3_skba
.arr
);
1538 ehea_destroy_qp(pr
->qp
);
1539 ehea_destroy_cq(pr
->send_cq
);
1540 ehea_destroy_cq(pr
->recv_cq
);
1541 ehea_destroy_eq(pr
->eq
);
1546 static int ehea_clean_portres(struct ehea_port
*port
, struct ehea_port_res
*pr
)
1551 netif_napi_del(&pr
->napi
);
1553 ret
= ehea_destroy_qp(pr
->qp
);
1556 ehea_destroy_cq(pr
->send_cq
);
1557 ehea_destroy_cq(pr
->recv_cq
);
1558 ehea_destroy_eq(pr
->eq
);
1560 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++)
1561 if (pr
->rq1_skba
.arr
[i
])
1562 dev_kfree_skb(pr
->rq1_skba
.arr
[i
]);
1564 for (i
= 0; i
< pr
->rq2_skba
.len
; i
++)
1565 if (pr
->rq2_skba
.arr
[i
])
1566 dev_kfree_skb(pr
->rq2_skba
.arr
[i
]);
1568 for (i
= 0; i
< pr
->rq3_skba
.len
; i
++)
1569 if (pr
->rq3_skba
.arr
[i
])
1570 dev_kfree_skb(pr
->rq3_skba
.arr
[i
]);
1572 for (i
= 0; i
< pr
->sq_skba
.len
; i
++)
1573 if (pr
->sq_skba
.arr
[i
])
1574 dev_kfree_skb(pr
->sq_skba
.arr
[i
]);
1576 vfree(pr
->rq1_skba
.arr
);
1577 vfree(pr
->rq2_skba
.arr
);
1578 vfree(pr
->rq3_skba
.arr
);
1579 vfree(pr
->sq_skba
.arr
);
1580 ret
= ehea_rem_smrs(pr
);
1586 * The write_* functions store information in swqe which is used by
1587 * the hardware to calculate the ip/tcp/udp checksum
1590 static inline void write_ip_start_end(struct ehea_swqe
*swqe
,
1591 const struct sk_buff
*skb
)
1593 swqe
->ip_start
= skb_network_offset(skb
);
1594 swqe
->ip_end
= (u8
)(swqe
->ip_start
+ ip_hdrlen(skb
) - 1);
1597 static inline void write_tcp_offset_end(struct ehea_swqe
*swqe
,
1598 const struct sk_buff
*skb
)
1601 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct tcphdr
, check
));
1603 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1606 static inline void write_udp_offset_end(struct ehea_swqe
*swqe
,
1607 const struct sk_buff
*skb
)
1610 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct udphdr
, check
));
1612 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1616 static void write_swqe2_TSO(struct sk_buff
*skb
,
1617 struct ehea_swqe
*swqe
, u32 lkey
)
1619 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1620 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1621 int skb_data_size
= skb
->len
- skb
->data_len
;
1624 /* Packet is TCP with TSO enabled */
1625 swqe
->tx_control
|= EHEA_SWQE_TSO
;
1626 swqe
->mss
= skb_shinfo(skb
)->gso_size
;
1627 /* copy only eth/ip/tcp headers to immediate data and
1628 * the rest of skb->data to sg1entry
1630 headersize
= ETH_HLEN
+ ip_hdrlen(skb
) + tcp_hdrlen(skb
);
1632 skb_data_size
= skb
->len
- skb
->data_len
;
1634 if (skb_data_size
>= headersize
) {
1635 /* copy immediate data */
1636 skb_copy_from_linear_data(skb
, imm_data
, headersize
);
1637 swqe
->immediate_data_length
= headersize
;
1639 if (skb_data_size
> headersize
) {
1640 /* set sg1entry data */
1641 sg1entry
->l_key
= lkey
;
1642 sg1entry
->len
= skb_data_size
- headersize
;
1644 ehea_map_vaddr(skb
->data
+ headersize
);
1645 swqe
->descriptors
++;
1648 ehea_error("cannot handle fragmented headers");
1651 static void write_swqe2_nonTSO(struct sk_buff
*skb
,
1652 struct ehea_swqe
*swqe
, u32 lkey
)
1654 int skb_data_size
= skb
->len
- skb
->data_len
;
1655 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1656 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1658 /* Packet is any nonTSO type
1660 * Copy as much as possible skb->data to immediate data and
1661 * the rest to sg1entry
1663 if (skb_data_size
>= SWQE2_MAX_IMM
) {
1664 /* copy immediate data */
1665 skb_copy_from_linear_data(skb
, imm_data
, SWQE2_MAX_IMM
);
1667 swqe
->immediate_data_length
= SWQE2_MAX_IMM
;
1669 if (skb_data_size
> SWQE2_MAX_IMM
) {
1670 /* copy sg1entry data */
1671 sg1entry
->l_key
= lkey
;
1672 sg1entry
->len
= skb_data_size
- SWQE2_MAX_IMM
;
1674 ehea_map_vaddr(skb
->data
+ SWQE2_MAX_IMM
);
1675 swqe
->descriptors
++;
1678 skb_copy_from_linear_data(skb
, imm_data
, skb_data_size
);
1679 swqe
->immediate_data_length
= skb_data_size
;
1683 static inline void write_swqe2_data(struct sk_buff
*skb
, struct net_device
*dev
,
1684 struct ehea_swqe
*swqe
, u32 lkey
)
1686 struct ehea_vsgentry
*sg_list
, *sg1entry
, *sgentry
;
1688 int nfrags
, sg1entry_contains_frag_data
, i
;
1690 nfrags
= skb_shinfo(skb
)->nr_frags
;
1691 sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1692 sg_list
= (struct ehea_vsgentry
*)&swqe
->u
.immdata_desc
.sg_list
;
1693 swqe
->descriptors
= 0;
1694 sg1entry_contains_frag_data
= 0;
1696 if ((dev
->features
& NETIF_F_TSO
) && skb_shinfo(skb
)->gso_size
)
1697 write_swqe2_TSO(skb
, swqe
, lkey
);
1699 write_swqe2_nonTSO(skb
, swqe
, lkey
);
1701 /* write descriptors */
1703 if (swqe
->descriptors
== 0) {
1704 /* sg1entry not yet used */
1705 frag
= &skb_shinfo(skb
)->frags
[0];
1707 /* copy sg1entry data */
1708 sg1entry
->l_key
= lkey
;
1709 sg1entry
->len
= frag
->size
;
1711 ehea_map_vaddr(page_address(frag
->page
)
1712 + frag
->page_offset
);
1713 swqe
->descriptors
++;
1714 sg1entry_contains_frag_data
= 1;
1717 for (i
= sg1entry_contains_frag_data
; i
< nfrags
; i
++) {
1719 frag
= &skb_shinfo(skb
)->frags
[i
];
1720 sgentry
= &sg_list
[i
- sg1entry_contains_frag_data
];
1722 sgentry
->l_key
= lkey
;
1723 sgentry
->len
= frag
->size
;
1725 ehea_map_vaddr(page_address(frag
->page
)
1726 + frag
->page_offset
);
1727 swqe
->descriptors
++;
1732 static int ehea_broadcast_reg_helper(struct ehea_port
*port
, u32 hcallid
)
1738 /* De/Register untagged packets */
1739 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_UNTAGGED
;
1740 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1741 port
->logical_port_id
,
1742 reg_type
, port
->mac_addr
, 0, hcallid
);
1743 if (hret
!= H_SUCCESS
) {
1744 ehea_error("%sregistering bc address failed (tagged)",
1745 hcallid
== H_REG_BCMC
? "" : "de");
1750 /* De/Register VLAN packets */
1751 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_VLANID_ALL
;
1752 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1753 port
->logical_port_id
,
1754 reg_type
, port
->mac_addr
, 0, hcallid
);
1755 if (hret
!= H_SUCCESS
) {
1756 ehea_error("%sregistering bc address failed (vlan)",
1757 hcallid
== H_REG_BCMC
? "" : "de");
1764 static int ehea_set_mac_addr(struct net_device
*dev
, void *sa
)
1766 struct ehea_port
*port
= netdev_priv(dev
);
1767 struct sockaddr
*mac_addr
= sa
;
1768 struct hcp_ehea_port_cb0
*cb0
;
1772 if (!is_valid_ether_addr(mac_addr
->sa_data
)) {
1773 ret
= -EADDRNOTAVAIL
;
1777 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
1779 ehea_error("no mem for cb0");
1784 memcpy(&(cb0
->port_mac_addr
), &(mac_addr
->sa_data
[0]), ETH_ALEN
);
1786 cb0
->port_mac_addr
= cb0
->port_mac_addr
>> 16;
1788 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1789 port
->logical_port_id
, H_PORT_CB0
,
1790 EHEA_BMASK_SET(H_PORT_CB0_MAC
, 1), cb0
);
1791 if (hret
!= H_SUCCESS
) {
1796 memcpy(dev
->dev_addr
, mac_addr
->sa_data
, dev
->addr_len
);
1798 /* Deregister old MAC in pHYP */
1799 if (port
->state
== EHEA_PORT_UP
) {
1800 ret
= ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
1805 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
1807 /* Register new MAC in pHYP */
1808 if (port
->state
== EHEA_PORT_UP
) {
1809 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
1817 ehea_update_bcmc_registrations();
1819 free_page((unsigned long)cb0
);
1824 static void ehea_promiscuous_error(u64 hret
, int enable
)
1826 if (hret
== H_AUTHORITY
)
1827 ehea_info("Hypervisor denied %sabling promiscuous mode",
1828 enable
== 1 ? "en" : "dis");
1830 ehea_error("failed %sabling promiscuous mode",
1831 enable
== 1 ? "en" : "dis");
1834 static void ehea_promiscuous(struct net_device
*dev
, int enable
)
1836 struct ehea_port
*port
= netdev_priv(dev
);
1837 struct hcp_ehea_port_cb7
*cb7
;
1840 if ((enable
&& port
->promisc
) || (!enable
&& !port
->promisc
))
1843 cb7
= (void *)get_zeroed_page(GFP_ATOMIC
);
1845 ehea_error("no mem for cb7");
1849 /* Modify Pxs_DUCQPN in CB7 */
1850 cb7
->def_uc_qpn
= enable
== 1 ? port
->port_res
[0].qp
->fw_handle
: 0;
1852 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1853 port
->logical_port_id
,
1854 H_PORT_CB7
, H_PORT_CB7_DUCQPN
, cb7
);
1856 ehea_promiscuous_error(hret
, enable
);
1860 port
->promisc
= enable
;
1862 free_page((unsigned long)cb7
);
1866 static u64
ehea_multicast_reg_helper(struct ehea_port
*port
, u64 mc_mac_addr
,
1872 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1873 | EHEA_BCMC_UNTAGGED
;
1875 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1876 port
->logical_port_id
,
1877 reg_type
, mc_mac_addr
, 0, hcallid
);
1881 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1882 | EHEA_BCMC_VLANID_ALL
;
1884 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1885 port
->logical_port_id
,
1886 reg_type
, mc_mac_addr
, 0, hcallid
);
1891 static int ehea_drop_multicast_list(struct net_device
*dev
)
1893 struct ehea_port
*port
= netdev_priv(dev
);
1894 struct ehea_mc_list
*mc_entry
= port
->mc_list
;
1895 struct list_head
*pos
;
1896 struct list_head
*temp
;
1900 list_for_each_safe(pos
, temp
, &(port
->mc_list
->list
)) {
1901 mc_entry
= list_entry(pos
, struct ehea_mc_list
, list
);
1903 hret
= ehea_multicast_reg_helper(port
, mc_entry
->macaddr
,
1906 ehea_error("failed deregistering mcast MAC");
1916 static void ehea_allmulti(struct net_device
*dev
, int enable
)
1918 struct ehea_port
*port
= netdev_priv(dev
);
1921 if (!port
->allmulti
) {
1923 /* Enable ALLMULTI */
1924 ehea_drop_multicast_list(dev
);
1925 hret
= ehea_multicast_reg_helper(port
, 0, H_REG_BCMC
);
1929 ehea_error("failed enabling IFF_ALLMULTI");
1933 /* Disable ALLMULTI */
1934 hret
= ehea_multicast_reg_helper(port
, 0, H_DEREG_BCMC
);
1938 ehea_error("failed disabling IFF_ALLMULTI");
1942 static void ehea_add_multicast_entry(struct ehea_port
*port
, u8
*mc_mac_addr
)
1944 struct ehea_mc_list
*ehea_mcl_entry
;
1947 ehea_mcl_entry
= kzalloc(sizeof(*ehea_mcl_entry
), GFP_ATOMIC
);
1948 if (!ehea_mcl_entry
) {
1949 ehea_error("no mem for mcl_entry");
1953 INIT_LIST_HEAD(&ehea_mcl_entry
->list
);
1955 memcpy(&ehea_mcl_entry
->macaddr
, mc_mac_addr
, ETH_ALEN
);
1957 hret
= ehea_multicast_reg_helper(port
, ehea_mcl_entry
->macaddr
,
1960 list_add(&ehea_mcl_entry
->list
, &port
->mc_list
->list
);
1962 ehea_error("failed registering mcast MAC");
1963 kfree(ehea_mcl_entry
);
1967 static void ehea_set_multicast_list(struct net_device
*dev
)
1969 struct ehea_port
*port
= netdev_priv(dev
);
1970 struct dev_mc_list
*k_mcl_entry
;
1973 if (dev
->flags
& IFF_PROMISC
) {
1974 ehea_promiscuous(dev
, 1);
1977 ehea_promiscuous(dev
, 0);
1979 if (dev
->flags
& IFF_ALLMULTI
) {
1980 ehea_allmulti(dev
, 1);
1983 ehea_allmulti(dev
, 0);
1985 if (!netdev_mc_empty(dev
)) {
1986 ret
= ehea_drop_multicast_list(dev
);
1988 /* Dropping the current multicast list failed.
1989 * Enabling ALL_MULTI is the best we can do.
1991 ehea_allmulti(dev
, 1);
1994 if (netdev_mc_count(dev
) > port
->adapter
->max_mc_mac
) {
1995 ehea_info("Mcast registration limit reached (0x%llx). "
1997 port
->adapter
->max_mc_mac
);
2001 netdev_for_each_mc_addr(k_mcl_entry
, dev
)
2002 ehea_add_multicast_entry(port
, k_mcl_entry
->dmi_addr
);
2006 ehea_update_bcmc_registrations();
2010 static int ehea_change_mtu(struct net_device
*dev
, int new_mtu
)
2012 if ((new_mtu
< 68) || (new_mtu
> EHEA_MAX_PACKET_SIZE
))
2018 static void ehea_xmit2(struct sk_buff
*skb
, struct net_device
*dev
,
2019 struct ehea_swqe
*swqe
, u32 lkey
)
2021 if (skb
->protocol
== htons(ETH_P_IP
)) {
2022 const struct iphdr
*iph
= ip_hdr(skb
);
2025 swqe
->tx_control
|= EHEA_SWQE_CRC
2026 | EHEA_SWQE_IP_CHECKSUM
2027 | EHEA_SWQE_TCP_CHECKSUM
2028 | EHEA_SWQE_IMM_DATA_PRESENT
2029 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
2031 write_ip_start_end(swqe
, skb
);
2033 if (iph
->protocol
== IPPROTO_UDP
) {
2034 if ((iph
->frag_off
& IP_MF
) ||
2035 (iph
->frag_off
& IP_OFFSET
))
2036 /* IP fragment, so don't change cs */
2037 swqe
->tx_control
&= ~EHEA_SWQE_TCP_CHECKSUM
;
2039 write_udp_offset_end(swqe
, skb
);
2040 } else if (iph
->protocol
== IPPROTO_TCP
) {
2041 write_tcp_offset_end(swqe
, skb
);
2044 /* icmp (big data) and ip segmentation packets (all other ip
2045 packets) do not require any special handling */
2048 /* Other Ethernet Protocol */
2049 swqe
->tx_control
|= EHEA_SWQE_CRC
2050 | EHEA_SWQE_IMM_DATA_PRESENT
2051 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
2054 write_swqe2_data(skb
, dev
, swqe
, lkey
);
2057 static void ehea_xmit3(struct sk_buff
*skb
, struct net_device
*dev
,
2058 struct ehea_swqe
*swqe
)
2060 int nfrags
= skb_shinfo(skb
)->nr_frags
;
2061 u8
*imm_data
= &swqe
->u
.immdata_nodesc
.immediate_data
[0];
2065 if (skb
->protocol
== htons(ETH_P_IP
)) {
2066 const struct iphdr
*iph
= ip_hdr(skb
);
2069 write_ip_start_end(swqe
, skb
);
2071 if (iph
->protocol
== IPPROTO_TCP
) {
2072 swqe
->tx_control
|= EHEA_SWQE_CRC
2073 | EHEA_SWQE_IP_CHECKSUM
2074 | EHEA_SWQE_TCP_CHECKSUM
2075 | EHEA_SWQE_IMM_DATA_PRESENT
;
2077 write_tcp_offset_end(swqe
, skb
);
2079 } else if (iph
->protocol
== IPPROTO_UDP
) {
2080 if ((iph
->frag_off
& IP_MF
) ||
2081 (iph
->frag_off
& IP_OFFSET
))
2082 /* IP fragment, so don't change cs */
2083 swqe
->tx_control
|= EHEA_SWQE_CRC
2084 | EHEA_SWQE_IMM_DATA_PRESENT
;
2086 swqe
->tx_control
|= EHEA_SWQE_CRC
2087 | EHEA_SWQE_IP_CHECKSUM
2088 | EHEA_SWQE_TCP_CHECKSUM
2089 | EHEA_SWQE_IMM_DATA_PRESENT
;
2091 write_udp_offset_end(swqe
, skb
);
2094 /* icmp (big data) and
2095 ip segmentation packets (all other ip packets) */
2096 swqe
->tx_control
|= EHEA_SWQE_CRC
2097 | EHEA_SWQE_IP_CHECKSUM
2098 | EHEA_SWQE_IMM_DATA_PRESENT
;
2101 /* Other Ethernet Protocol */
2102 swqe
->tx_control
|= EHEA_SWQE_CRC
| EHEA_SWQE_IMM_DATA_PRESENT
;
2104 /* copy (immediate) data */
2106 /* data is in a single piece */
2107 skb_copy_from_linear_data(skb
, imm_data
, skb
->len
);
2109 /* first copy data from the skb->data buffer ... */
2110 skb_copy_from_linear_data(skb
, imm_data
,
2111 skb
->len
- skb
->data_len
);
2112 imm_data
+= skb
->len
- skb
->data_len
;
2114 /* ... then copy data from the fragments */
2115 for (i
= 0; i
< nfrags
; i
++) {
2116 frag
= &skb_shinfo(skb
)->frags
[i
];
2118 page_address(frag
->page
) + frag
->page_offset
,
2120 imm_data
+= frag
->size
;
2123 swqe
->immediate_data_length
= skb
->len
;
2127 static inline int ehea_hash_skb(struct sk_buff
*skb
, int num_qps
)
2132 if ((skb
->protocol
== htons(ETH_P_IP
)) &&
2133 (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)) {
2134 tcp
= (struct tcphdr
*)(skb_network_header(skb
) +
2135 (ip_hdr(skb
)->ihl
* 4));
2136 tmp
= (tcp
->source
+ (tcp
->dest
<< 16)) % 31;
2137 tmp
+= ip_hdr(skb
)->daddr
% 31;
2138 return tmp
% num_qps
;
2143 static int ehea_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2145 struct ehea_port
*port
= netdev_priv(dev
);
2146 struct ehea_swqe
*swqe
;
2147 unsigned long flags
;
2150 struct ehea_port_res
*pr
;
2152 pr
= &port
->port_res
[ehea_hash_skb(skb
, port
->num_tx_qps
)];
2154 if (!spin_trylock(&pr
->xmit_lock
))
2155 return NETDEV_TX_BUSY
;
2157 if (pr
->queue_stopped
) {
2158 spin_unlock(&pr
->xmit_lock
);
2159 return NETDEV_TX_BUSY
;
2162 swqe
= ehea_get_swqe(pr
->qp
, &swqe_index
);
2163 memset(swqe
, 0, SWQE_HEADER_SIZE
);
2164 atomic_dec(&pr
->swqe_avail
);
2166 if (skb
->len
<= SWQE3_MAX_IMM
) {
2167 u32 sig_iv
= port
->sig_comp_iv
;
2168 u32 swqe_num
= pr
->swqe_id_counter
;
2169 ehea_xmit3(skb
, dev
, swqe
);
2170 swqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE3_TYPE
)
2171 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, swqe_num
);
2172 if (pr
->swqe_ll_count
>= (sig_iv
- 1)) {
2173 swqe
->wr_id
|= EHEA_BMASK_SET(EHEA_WR_ID_REFILL
,
2175 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
2176 pr
->swqe_ll_count
= 0;
2178 pr
->swqe_ll_count
+= 1;
2181 EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE2_TYPE
)
2182 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, pr
->swqe_id_counter
)
2183 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL
, 1)
2184 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, pr
->sq_skba
.index
);
2185 pr
->sq_skba
.arr
[pr
->sq_skba
.index
] = skb
;
2187 pr
->sq_skba
.index
++;
2188 pr
->sq_skba
.index
&= (pr
->sq_skba
.len
- 1);
2190 lkey
= pr
->send_mr
.lkey
;
2191 ehea_xmit2(skb
, dev
, swqe
, lkey
);
2192 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
2194 pr
->swqe_id_counter
+= 1;
2196 if (port
->vgrp
&& vlan_tx_tag_present(skb
)) {
2197 swqe
->tx_control
|= EHEA_SWQE_VLAN_INSERT
;
2198 swqe
->vlan_tag
= vlan_tx_tag_get(skb
);
2201 if (netif_msg_tx_queued(port
)) {
2202 ehea_info("post swqe on QP %d", pr
->qp
->init_attr
.qp_nr
);
2203 ehea_dump(swqe
, 512, "swqe");
2206 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
2207 netif_stop_queue(dev
);
2208 swqe
->tx_control
|= EHEA_SWQE_PURGE
;
2211 ehea_post_swqe(pr
->qp
, swqe
);
2214 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
2215 spin_lock_irqsave(&pr
->netif_queue
, flags
);
2216 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
2217 pr
->p_stats
.queue_stopped
++;
2218 netif_stop_queue(dev
);
2219 pr
->queue_stopped
= 1;
2221 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
2223 dev
->trans_start
= jiffies
;
2224 spin_unlock(&pr
->xmit_lock
);
2226 return NETDEV_TX_OK
;
2229 static void ehea_vlan_rx_register(struct net_device
*dev
,
2230 struct vlan_group
*grp
)
2232 struct ehea_port
*port
= netdev_priv(dev
);
2233 struct ehea_adapter
*adapter
= port
->adapter
;
2234 struct hcp_ehea_port_cb1
*cb1
;
2239 cb1
= (void *)get_zeroed_page(GFP_KERNEL
);
2241 ehea_error("no mem for cb1");
2245 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2246 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2247 if (hret
!= H_SUCCESS
)
2248 ehea_error("modify_ehea_port failed");
2250 free_page((unsigned long)cb1
);
2255 static void ehea_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
2257 struct ehea_port
*port
= netdev_priv(dev
);
2258 struct ehea_adapter
*adapter
= port
->adapter
;
2259 struct hcp_ehea_port_cb1
*cb1
;
2263 cb1
= (void *)get_zeroed_page(GFP_KERNEL
);
2265 ehea_error("no mem for cb1");
2269 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
2270 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2271 if (hret
!= H_SUCCESS
) {
2272 ehea_error("query_ehea_port failed");
2277 cb1
->vlan_filter
[index
] |= ((u64
)(0x8000000000000000 >> (vid
& 0x3F)));
2279 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2280 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2281 if (hret
!= H_SUCCESS
)
2282 ehea_error("modify_ehea_port failed");
2284 free_page((unsigned long)cb1
);
2288 static void ehea_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
2290 struct ehea_port
*port
= netdev_priv(dev
);
2291 struct ehea_adapter
*adapter
= port
->adapter
;
2292 struct hcp_ehea_port_cb1
*cb1
;
2296 vlan_group_set_device(port
->vgrp
, vid
, NULL
);
2298 cb1
= (void *)get_zeroed_page(GFP_KERNEL
);
2300 ehea_error("no mem for cb1");
2304 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
2305 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2306 if (hret
!= H_SUCCESS
) {
2307 ehea_error("query_ehea_port failed");
2312 cb1
->vlan_filter
[index
] &= ~((u64
)(0x8000000000000000 >> (vid
& 0x3F)));
2314 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2315 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2316 if (hret
!= H_SUCCESS
)
2317 ehea_error("modify_ehea_port failed");
2319 free_page((unsigned long)cb1
);
2323 int ehea_activate_qp(struct ehea_adapter
*adapter
, struct ehea_qp
*qp
)
2329 struct hcp_modify_qp_cb0
*cb0
;
2331 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
2337 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2338 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2339 if (hret
!= H_SUCCESS
) {
2340 ehea_error("query_ehea_qp failed (1)");
2344 cb0
->qp_ctl_reg
= H_QP_CR_STATE_INITIALIZED
;
2345 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2346 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2347 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2348 if (hret
!= H_SUCCESS
) {
2349 ehea_error("modify_ehea_qp failed (1)");
2353 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2354 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2355 if (hret
!= H_SUCCESS
) {
2356 ehea_error("query_ehea_qp failed (2)");
2360 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_INITIALIZED
;
2361 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2362 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2363 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2364 if (hret
!= H_SUCCESS
) {
2365 ehea_error("modify_ehea_qp failed (2)");
2369 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2370 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2371 if (hret
!= H_SUCCESS
) {
2372 ehea_error("query_ehea_qp failed (3)");
2376 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_RDY2SND
;
2377 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2378 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2379 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2380 if (hret
!= H_SUCCESS
) {
2381 ehea_error("modify_ehea_qp failed (3)");
2385 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2386 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2387 if (hret
!= H_SUCCESS
) {
2388 ehea_error("query_ehea_qp failed (4)");
2394 free_page((unsigned long)cb0
);
2398 static int ehea_port_res_setup(struct ehea_port
*port
, int def_qps
,
2402 struct port_res_cfg pr_cfg
, pr_cfg_small_rx
;
2403 enum ehea_eq_type eq_type
= EHEA_EQ
;
2405 port
->qp_eq
= ehea_create_eq(port
->adapter
, eq_type
,
2406 EHEA_MAX_ENTRIES_EQ
, 1);
2409 ehea_error("ehea_create_eq failed (qp_eq)");
2413 pr_cfg
.max_entries_rcq
= rq1_entries
+ rq2_entries
+ rq3_entries
;
2414 pr_cfg
.max_entries_scq
= sq_entries
* 2;
2415 pr_cfg
.max_entries_sq
= sq_entries
;
2416 pr_cfg
.max_entries_rq1
= rq1_entries
;
2417 pr_cfg
.max_entries_rq2
= rq2_entries
;
2418 pr_cfg
.max_entries_rq3
= rq3_entries
;
2420 pr_cfg_small_rx
.max_entries_rcq
= 1;
2421 pr_cfg_small_rx
.max_entries_scq
= sq_entries
;
2422 pr_cfg_small_rx
.max_entries_sq
= sq_entries
;
2423 pr_cfg_small_rx
.max_entries_rq1
= 1;
2424 pr_cfg_small_rx
.max_entries_rq2
= 1;
2425 pr_cfg_small_rx
.max_entries_rq3
= 1;
2427 for (i
= 0; i
< def_qps
; i
++) {
2428 ret
= ehea_init_port_res(port
, &port
->port_res
[i
], &pr_cfg
, i
);
2432 for (i
= def_qps
; i
< def_qps
+ add_tx_qps
; i
++) {
2433 ret
= ehea_init_port_res(port
, &port
->port_res
[i
],
2434 &pr_cfg_small_rx
, i
);
2443 ehea_clean_portres(port
, &port
->port_res
[i
]);
2446 ehea_destroy_eq(port
->qp_eq
);
2450 static int ehea_clean_all_portres(struct ehea_port
*port
)
2455 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2456 ret
|= ehea_clean_portres(port
, &port
->port_res
[i
]);
2458 ret
|= ehea_destroy_eq(port
->qp_eq
);
2463 static void ehea_remove_adapter_mr(struct ehea_adapter
*adapter
)
2465 if (adapter
->active_ports
)
2468 ehea_rem_mr(&adapter
->mr
);
2471 static int ehea_add_adapter_mr(struct ehea_adapter
*adapter
)
2473 if (adapter
->active_ports
)
2476 return ehea_reg_kernel_mr(adapter
, &adapter
->mr
);
2479 static int ehea_up(struct net_device
*dev
)
2482 struct ehea_port
*port
= netdev_priv(dev
);
2484 if (port
->state
== EHEA_PORT_UP
)
2487 ret
= ehea_port_res_setup(port
, port
->num_def_qps
,
2488 port
->num_add_tx_qps
);
2490 ehea_error("port_res_failed");
2494 /* Set default QP for this port */
2495 ret
= ehea_configure_port(port
);
2497 ehea_error("ehea_configure_port failed. ret:%d", ret
);
2501 ret
= ehea_reg_interrupts(dev
);
2503 ehea_error("reg_interrupts failed. ret:%d", ret
);
2507 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
2508 ret
= ehea_activate_qp(port
->adapter
, port
->port_res
[i
].qp
);
2510 ehea_error("activate_qp failed");
2515 for (i
= 0; i
< port
->num_def_qps
; i
++) {
2516 ret
= ehea_fill_port_res(&port
->port_res
[i
]);
2518 ehea_error("out_free_irqs");
2523 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
2529 port
->state
= EHEA_PORT_UP
;
2535 ehea_free_interrupts(dev
);
2538 ehea_clean_all_portres(port
);
2541 ehea_info("Failed starting %s. ret=%i", dev
->name
, ret
);
2543 ehea_update_bcmc_registrations();
2544 ehea_update_firmware_handles();
2549 static void port_napi_disable(struct ehea_port
*port
)
2553 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2554 napi_disable(&port
->port_res
[i
].napi
);
2557 static void port_napi_enable(struct ehea_port
*port
)
2561 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2562 napi_enable(&port
->port_res
[i
].napi
);
2565 static int ehea_open(struct net_device
*dev
)
2568 struct ehea_port
*port
= netdev_priv(dev
);
2570 mutex_lock(&port
->port_lock
);
2572 if (netif_msg_ifup(port
))
2573 ehea_info("enabling port %s", dev
->name
);
2577 port_napi_enable(port
);
2578 netif_start_queue(dev
);
2581 mutex_unlock(&port
->port_lock
);
2586 static int ehea_down(struct net_device
*dev
)
2589 struct ehea_port
*port
= netdev_priv(dev
);
2591 if (port
->state
== EHEA_PORT_DOWN
)
2594 ehea_drop_multicast_list(dev
);
2595 ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
2597 ehea_free_interrupts(dev
);
2599 port
->state
= EHEA_PORT_DOWN
;
2601 ehea_update_bcmc_registrations();
2603 ret
= ehea_clean_all_portres(port
);
2605 ehea_info("Failed freeing resources for %s. ret=%i",
2608 ehea_update_firmware_handles();
2613 static int ehea_stop(struct net_device
*dev
)
2616 struct ehea_port
*port
= netdev_priv(dev
);
2618 if (netif_msg_ifdown(port
))
2619 ehea_info("disabling port %s", dev
->name
);
2621 set_bit(__EHEA_DISABLE_PORT_RESET
, &port
->flags
);
2622 cancel_work_sync(&port
->reset_task
);
2623 mutex_lock(&port
->port_lock
);
2624 netif_stop_queue(dev
);
2625 port_napi_disable(port
);
2626 ret
= ehea_down(dev
);
2627 mutex_unlock(&port
->port_lock
);
2628 clear_bit(__EHEA_DISABLE_PORT_RESET
, &port
->flags
);
2632 static void ehea_purge_sq(struct ehea_qp
*orig_qp
)
2634 struct ehea_qp qp
= *orig_qp
;
2635 struct ehea_qp_init_attr
*init_attr
= &qp
.init_attr
;
2636 struct ehea_swqe
*swqe
;
2640 for (i
= 0; i
< init_attr
->act_nr_send_wqes
; i
++) {
2641 swqe
= ehea_get_swqe(&qp
, &wqe_index
);
2642 swqe
->tx_control
|= EHEA_SWQE_PURGE
;
2646 static void ehea_flush_sq(struct ehea_port
*port
)
2650 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
2651 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2652 int swqe_max
= pr
->sq_skba_size
- 2 - pr
->swqe_ll_count
;
2654 while (atomic_read(&pr
->swqe_avail
) < swqe_max
) {
2662 int ehea_stop_qps(struct net_device
*dev
)
2664 struct ehea_port
*port
= netdev_priv(dev
);
2665 struct ehea_adapter
*adapter
= port
->adapter
;
2666 struct hcp_modify_qp_cb0
*cb0
;
2674 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
2680 for (i
= 0; i
< (port
->num_def_qps
+ port
->num_add_tx_qps
); i
++) {
2681 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2682 struct ehea_qp
*qp
= pr
->qp
;
2684 /* Purge send queue */
2687 /* Disable queue pair */
2688 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2689 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2691 if (hret
!= H_SUCCESS
) {
2692 ehea_error("query_ehea_qp failed (1)");
2696 cb0
->qp_ctl_reg
= (cb0
->qp_ctl_reg
& H_QP_CR_RES_STATE
) << 8;
2697 cb0
->qp_ctl_reg
&= ~H_QP_CR_ENABLED
;
2699 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2700 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
,
2702 &dummy64
, &dummy16
, &dummy16
);
2703 if (hret
!= H_SUCCESS
) {
2704 ehea_error("modify_ehea_qp failed (1)");
2708 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2709 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2711 if (hret
!= H_SUCCESS
) {
2712 ehea_error("query_ehea_qp failed (2)");
2716 /* deregister shared memory regions */
2717 dret
= ehea_rem_smrs(pr
);
2719 ehea_error("unreg shared memory region failed");
2726 free_page((unsigned long)cb0
);
2731 void ehea_update_rqs(struct ehea_qp
*orig_qp
, struct ehea_port_res
*pr
)
2733 struct ehea_qp qp
= *orig_qp
;
2734 struct ehea_qp_init_attr
*init_attr
= &qp
.init_attr
;
2735 struct ehea_rwqe
*rwqe
;
2736 struct sk_buff
**skba_rq2
= pr
->rq2_skba
.arr
;
2737 struct sk_buff
**skba_rq3
= pr
->rq3_skba
.arr
;
2738 struct sk_buff
*skb
;
2739 u32 lkey
= pr
->recv_mr
.lkey
;
2745 for (i
= 0; i
< init_attr
->act_nr_rwqes_rq2
+ 1; i
++) {
2746 rwqe
= ehea_get_next_rwqe(&qp
, 2);
2747 rwqe
->sg_list
[0].l_key
= lkey
;
2748 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, rwqe
->wr_id
);
2749 skb
= skba_rq2
[index
];
2751 rwqe
->sg_list
[0].vaddr
= ehea_map_vaddr(skb
->data
);
2754 for (i
= 0; i
< init_attr
->act_nr_rwqes_rq3
+ 1; i
++) {
2755 rwqe
= ehea_get_next_rwqe(&qp
, 3);
2756 rwqe
->sg_list
[0].l_key
= lkey
;
2757 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, rwqe
->wr_id
);
2758 skb
= skba_rq3
[index
];
2760 rwqe
->sg_list
[0].vaddr
= ehea_map_vaddr(skb
->data
);
2764 int ehea_restart_qps(struct net_device
*dev
)
2766 struct ehea_port
*port
= netdev_priv(dev
);
2767 struct ehea_adapter
*adapter
= port
->adapter
;
2771 struct hcp_modify_qp_cb0
*cb0
;
2776 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
2782 for (i
= 0; i
< (port
->num_def_qps
+ port
->num_add_tx_qps
); i
++) {
2783 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2784 struct ehea_qp
*qp
= pr
->qp
;
2786 ret
= ehea_gen_smrs(pr
);
2788 ehea_error("creation of shared memory regions failed");
2792 ehea_update_rqs(qp
, pr
);
2794 /* Enable queue pair */
2795 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2796 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2798 if (hret
!= H_SUCCESS
) {
2799 ehea_error("query_ehea_qp failed (1)");
2803 cb0
->qp_ctl_reg
= (cb0
->qp_ctl_reg
& H_QP_CR_RES_STATE
) << 8;
2804 cb0
->qp_ctl_reg
|= H_QP_CR_ENABLED
;
2806 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2807 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
,
2809 &dummy64
, &dummy16
, &dummy16
);
2810 if (hret
!= H_SUCCESS
) {
2811 ehea_error("modify_ehea_qp failed (1)");
2815 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2816 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2818 if (hret
!= H_SUCCESS
) {
2819 ehea_error("query_ehea_qp failed (2)");
2823 /* refill entire queue */
2824 ehea_refill_rq1(pr
, pr
->rq1_skba
.index
, 0);
2825 ehea_refill_rq2(pr
, 0);
2826 ehea_refill_rq3(pr
, 0);
2829 free_page((unsigned long)cb0
);
2834 static void ehea_reset_port(struct work_struct
*work
)
2837 struct ehea_port
*port
=
2838 container_of(work
, struct ehea_port
, reset_task
);
2839 struct net_device
*dev
= port
->netdev
;
2842 mutex_lock(&port
->port_lock
);
2843 netif_stop_queue(dev
);
2845 port_napi_disable(port
);
2853 ehea_set_multicast_list(dev
);
2855 if (netif_msg_timer(port
))
2856 ehea_info("Device %s resetted successfully", dev
->name
);
2858 port_napi_enable(port
);
2860 netif_wake_queue(dev
);
2862 mutex_unlock(&port
->port_lock
);
2866 static void ehea_rereg_mrs(struct work_struct
*work
)
2869 struct ehea_adapter
*adapter
;
2871 mutex_lock(&dlpar_mem_lock
);
2872 ehea_info("LPAR memory changed - re-initializing driver");
2874 list_for_each_entry(adapter
, &adapter_list
, list
)
2875 if (adapter
->active_ports
) {
2876 /* Shutdown all ports */
2877 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++) {
2878 struct ehea_port
*port
= adapter
->port
[i
];
2879 struct net_device
*dev
;
2886 if (dev
->flags
& IFF_UP
) {
2887 mutex_lock(&port
->port_lock
);
2888 netif_stop_queue(dev
);
2889 ehea_flush_sq(port
);
2890 ret
= ehea_stop_qps(dev
);
2892 mutex_unlock(&port
->port_lock
);
2895 port_napi_disable(port
);
2896 mutex_unlock(&port
->port_lock
);
2900 /* Unregister old memory region */
2901 ret
= ehea_rem_mr(&adapter
->mr
);
2903 ehea_error("unregister MR failed - driver"
2909 clear_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
2911 list_for_each_entry(adapter
, &adapter_list
, list
)
2912 if (adapter
->active_ports
) {
2913 /* Register new memory region */
2914 ret
= ehea_reg_kernel_mr(adapter
, &adapter
->mr
);
2916 ehea_error("register MR failed - driver"
2921 /* Restart all ports */
2922 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++) {
2923 struct ehea_port
*port
= adapter
->port
[i
];
2926 struct net_device
*dev
= port
->netdev
;
2928 if (dev
->flags
& IFF_UP
) {
2929 mutex_lock(&port
->port_lock
);
2930 port_napi_enable(port
);
2931 ret
= ehea_restart_qps(dev
);
2933 netif_wake_queue(dev
);
2934 mutex_unlock(&port
->port_lock
);
2939 ehea_info("re-initializing driver complete");
2941 mutex_unlock(&dlpar_mem_lock
);
2945 static void ehea_tx_watchdog(struct net_device
*dev
)
2947 struct ehea_port
*port
= netdev_priv(dev
);
2949 if (netif_carrier_ok(dev
) &&
2950 !test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))
2951 ehea_schedule_port_reset(port
);
2954 int ehea_sense_adapter_attr(struct ehea_adapter
*adapter
)
2956 struct hcp_query_ehea
*cb
;
2960 cb
= (void *)get_zeroed_page(GFP_KERNEL
);
2966 hret
= ehea_h_query_ehea(adapter
->handle
, cb
);
2968 if (hret
!= H_SUCCESS
) {
2973 adapter
->max_mc_mac
= cb
->max_mc_mac
- 1;
2977 free_page((unsigned long)cb
);
2982 int ehea_get_jumboframe_status(struct ehea_port
*port
, int *jumbo
)
2984 struct hcp_ehea_port_cb4
*cb4
;
2990 /* (Try to) enable *jumbo frames */
2991 cb4
= (void *)get_zeroed_page(GFP_KERNEL
);
2993 ehea_error("no mem for cb4");
2997 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
2998 port
->logical_port_id
,
3000 H_PORT_CB4_JUMBO
, cb4
);
3001 if (hret
== H_SUCCESS
) {
3002 if (cb4
->jumbo_frame
)
3005 cb4
->jumbo_frame
= 1;
3006 hret
= ehea_h_modify_ehea_port(port
->adapter
->
3013 if (hret
== H_SUCCESS
)
3019 free_page((unsigned long)cb4
);
3025 static ssize_t
ehea_show_port_id(struct device
*dev
,
3026 struct device_attribute
*attr
, char *buf
)
3028 struct ehea_port
*port
= container_of(dev
, struct ehea_port
, ofdev
.dev
);
3029 return sprintf(buf
, "%d", port
->logical_port_id
);
3032 static DEVICE_ATTR(log_port_id
, S_IRUSR
| S_IRGRP
| S_IROTH
, ehea_show_port_id
,
3035 static void __devinit
logical_port_release(struct device
*dev
)
3037 struct ehea_port
*port
= container_of(dev
, struct ehea_port
, ofdev
.dev
);
3038 of_node_put(port
->ofdev
.node
);
3041 static struct device
*ehea_register_port(struct ehea_port
*port
,
3042 struct device_node
*dn
)
3046 port
->ofdev
.node
= of_node_get(dn
);
3047 port
->ofdev
.dev
.parent
= &port
->adapter
->ofdev
->dev
;
3048 port
->ofdev
.dev
.bus
= &ibmebus_bus_type
;
3050 dev_set_name(&port
->ofdev
.dev
, "port%d", port_name_cnt
++);
3051 port
->ofdev
.dev
.release
= logical_port_release
;
3053 ret
= of_device_register(&port
->ofdev
);
3055 ehea_error("failed to register device. ret=%d", ret
);
3059 ret
= device_create_file(&port
->ofdev
.dev
, &dev_attr_log_port_id
);
3061 ehea_error("failed to register attributes, ret=%d", ret
);
3062 goto out_unreg_of_dev
;
3065 return &port
->ofdev
.dev
;
3068 of_device_unregister(&port
->ofdev
);
3073 static void ehea_unregister_port(struct ehea_port
*port
)
3075 device_remove_file(&port
->ofdev
.dev
, &dev_attr_log_port_id
);
3076 of_device_unregister(&port
->ofdev
);
3079 static const struct net_device_ops ehea_netdev_ops
= {
3080 .ndo_open
= ehea_open
,
3081 .ndo_stop
= ehea_stop
,
3082 .ndo_start_xmit
= ehea_start_xmit
,
3083 #ifdef CONFIG_NET_POLL_CONTROLLER
3084 .ndo_poll_controller
= ehea_netpoll
,
3086 .ndo_get_stats
= ehea_get_stats
,
3087 .ndo_set_mac_address
= ehea_set_mac_addr
,
3088 .ndo_validate_addr
= eth_validate_addr
,
3089 .ndo_set_multicast_list
= ehea_set_multicast_list
,
3090 .ndo_change_mtu
= ehea_change_mtu
,
3091 .ndo_vlan_rx_register
= ehea_vlan_rx_register
,
3092 .ndo_vlan_rx_add_vid
= ehea_vlan_rx_add_vid
,
3093 .ndo_vlan_rx_kill_vid
= ehea_vlan_rx_kill_vid
,
3094 .ndo_tx_timeout
= ehea_tx_watchdog
,
3097 struct ehea_port
*ehea_setup_single_port(struct ehea_adapter
*adapter
,
3098 u32 logical_port_id
,
3099 struct device_node
*dn
)
3102 struct net_device
*dev
;
3103 struct ehea_port
*port
;
3104 struct device
*port_dev
;
3107 /* allocate memory for the port structures */
3108 dev
= alloc_etherdev(sizeof(struct ehea_port
));
3111 ehea_error("no mem for net_device");
3116 port
= netdev_priv(dev
);
3118 mutex_init(&port
->port_lock
);
3119 port
->state
= EHEA_PORT_DOWN
;
3120 port
->sig_comp_iv
= sq_entries
/ 10;
3122 port
->adapter
= adapter
;
3124 port
->logical_port_id
= logical_port_id
;
3126 port
->msg_enable
= netif_msg_init(msg_level
, EHEA_MSG_DEFAULT
);
3128 port
->mc_list
= kzalloc(sizeof(struct ehea_mc_list
), GFP_KERNEL
);
3129 if (!port
->mc_list
) {
3131 goto out_free_ethdev
;
3134 INIT_LIST_HEAD(&port
->mc_list
->list
);
3136 ret
= ehea_sense_port_attr(port
);
3138 goto out_free_mc_list
;
3140 port_dev
= ehea_register_port(port
, dn
);
3142 goto out_free_mc_list
;
3144 SET_NETDEV_DEV(dev
, port_dev
);
3146 /* initialize net_device structure */
3147 memcpy(dev
->dev_addr
, &port
->mac_addr
, ETH_ALEN
);
3149 dev
->netdev_ops
= &ehea_netdev_ops
;
3150 ehea_set_ethtool_ops(dev
);
3152 dev
->features
= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_TSO
3153 | NETIF_F_HIGHDMA
| NETIF_F_IP_CSUM
| NETIF_F_HW_VLAN_TX
3154 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
3156 dev
->watchdog_timeo
= EHEA_WATCH_DOG_TIMEOUT
;
3158 INIT_WORK(&port
->reset_task
, ehea_reset_port
);
3160 ret
= register_netdev(dev
);
3162 ehea_error("register_netdev failed. ret=%d", ret
);
3163 goto out_unreg_port
;
3166 port
->lro_max_aggr
= lro_max_aggr
;
3168 ret
= ehea_get_jumboframe_status(port
, &jumbo
);
3170 ehea_error("failed determining jumbo frame status for %s",
3171 port
->netdev
->name
);
3173 ehea_info("%s: Jumbo frames are %sabled", dev
->name
,
3174 jumbo
== 1 ? "en" : "dis");
3176 adapter
->active_ports
++;
3181 ehea_unregister_port(port
);
3184 kfree(port
->mc_list
);
3190 ehea_error("setting up logical port with id=%d failed, ret=%d",
3191 logical_port_id
, ret
);
3195 static void ehea_shutdown_single_port(struct ehea_port
*port
)
3197 struct ehea_adapter
*adapter
= port
->adapter
;
3198 unregister_netdev(port
->netdev
);
3199 ehea_unregister_port(port
);
3200 kfree(port
->mc_list
);
3201 free_netdev(port
->netdev
);
3202 adapter
->active_ports
--;
3205 static int ehea_setup_ports(struct ehea_adapter
*adapter
)
3207 struct device_node
*lhea_dn
;
3208 struct device_node
*eth_dn
= NULL
;
3210 const u32
*dn_log_port_id
;
3213 lhea_dn
= adapter
->ofdev
->node
;
3214 while ((eth_dn
= of_get_next_child(lhea_dn
, eth_dn
))) {
3216 dn_log_port_id
= of_get_property(eth_dn
, "ibm,hea-port-no",
3218 if (!dn_log_port_id
) {
3219 ehea_error("bad device node: eth_dn name=%s",
3224 if (ehea_add_adapter_mr(adapter
)) {
3225 ehea_error("creating MR failed");
3226 of_node_put(eth_dn
);
3230 adapter
->port
[i
] = ehea_setup_single_port(adapter
,
3233 if (adapter
->port
[i
])
3234 ehea_info("%s -> logical port id #%d",
3235 adapter
->port
[i
]->netdev
->name
,
3238 ehea_remove_adapter_mr(adapter
);
3245 static struct device_node
*ehea_get_eth_dn(struct ehea_adapter
*adapter
,
3246 u32 logical_port_id
)
3248 struct device_node
*lhea_dn
;
3249 struct device_node
*eth_dn
= NULL
;
3250 const u32
*dn_log_port_id
;
3252 lhea_dn
= adapter
->ofdev
->node
;
3253 while ((eth_dn
= of_get_next_child(lhea_dn
, eth_dn
))) {
3255 dn_log_port_id
= of_get_property(eth_dn
, "ibm,hea-port-no",
3258 if (*dn_log_port_id
== logical_port_id
)
3265 static ssize_t
ehea_probe_port(struct device
*dev
,
3266 struct device_attribute
*attr
,
3267 const char *buf
, size_t count
)
3269 struct ehea_adapter
*adapter
= dev_get_drvdata(dev
);
3270 struct ehea_port
*port
;
3271 struct device_node
*eth_dn
= NULL
;
3274 u32 logical_port_id
;
3276 sscanf(buf
, "%d", &logical_port_id
);
3278 port
= ehea_get_port(adapter
, logical_port_id
);
3281 ehea_info("adding port with logical port id=%d failed. port "
3282 "already configured as %s.", logical_port_id
,
3283 port
->netdev
->name
);
3287 eth_dn
= ehea_get_eth_dn(adapter
, logical_port_id
);
3290 ehea_info("no logical port with id %d found", logical_port_id
);
3294 if (ehea_add_adapter_mr(adapter
)) {
3295 ehea_error("creating MR failed");
3299 port
= ehea_setup_single_port(adapter
, logical_port_id
, eth_dn
);
3301 of_node_put(eth_dn
);
3304 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3305 if (!adapter
->port
[i
]) {
3306 adapter
->port
[i
] = port
;
3310 ehea_info("added %s (logical port id=%d)", port
->netdev
->name
,
3313 ehea_remove_adapter_mr(adapter
);
3317 return (ssize_t
) count
;
3320 static ssize_t
ehea_remove_port(struct device
*dev
,
3321 struct device_attribute
*attr
,
3322 const char *buf
, size_t count
)
3324 struct ehea_adapter
*adapter
= dev_get_drvdata(dev
);
3325 struct ehea_port
*port
;
3327 u32 logical_port_id
;
3329 sscanf(buf
, "%d", &logical_port_id
);
3331 port
= ehea_get_port(adapter
, logical_port_id
);
3334 ehea_info("removed %s (logical port id=%d)", port
->netdev
->name
,
3337 ehea_shutdown_single_port(port
);
3339 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3340 if (adapter
->port
[i
] == port
) {
3341 adapter
->port
[i
] = NULL
;
3345 ehea_error("removing port with logical port id=%d failed. port "
3346 "not configured.", logical_port_id
);
3350 ehea_remove_adapter_mr(adapter
);
3352 return (ssize_t
) count
;
3355 static DEVICE_ATTR(probe_port
, S_IWUSR
, NULL
, ehea_probe_port
);
3356 static DEVICE_ATTR(remove_port
, S_IWUSR
, NULL
, ehea_remove_port
);
3358 int ehea_create_device_sysfs(struct of_device
*dev
)
3360 int ret
= device_create_file(&dev
->dev
, &dev_attr_probe_port
);
3364 ret
= device_create_file(&dev
->dev
, &dev_attr_remove_port
);
3369 void ehea_remove_device_sysfs(struct of_device
*dev
)
3371 device_remove_file(&dev
->dev
, &dev_attr_probe_port
);
3372 device_remove_file(&dev
->dev
, &dev_attr_remove_port
);
3375 static int __devinit
ehea_probe_adapter(struct of_device
*dev
,
3376 const struct of_device_id
*id
)
3378 struct ehea_adapter
*adapter
;
3379 const u64
*adapter_handle
;
3382 if (!dev
|| !dev
->node
) {
3383 ehea_error("Invalid ibmebus device probed");
3387 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
3390 dev_err(&dev
->dev
, "no mem for ehea_adapter\n");
3394 list_add(&adapter
->list
, &adapter_list
);
3396 adapter
->ofdev
= dev
;
3398 adapter_handle
= of_get_property(dev
->node
, "ibm,hea-handle",
3401 adapter
->handle
= *adapter_handle
;
3403 if (!adapter
->handle
) {
3404 dev_err(&dev
->dev
, "failed getting handle for adapter"
3405 " '%s'\n", dev
->node
->full_name
);
3410 adapter
->pd
= EHEA_PD_ID
;
3412 dev_set_drvdata(&dev
->dev
, adapter
);
3415 /* initialize adapter and ports */
3416 /* get adapter properties */
3417 ret
= ehea_sense_adapter_attr(adapter
);
3419 dev_err(&dev
->dev
, "sense_adapter_attr failed: %d\n", ret
);
3423 adapter
->neq
= ehea_create_eq(adapter
,
3424 EHEA_NEQ
, EHEA_MAX_ENTRIES_EQ
, 1);
3425 if (!adapter
->neq
) {
3427 dev_err(&dev
->dev
, "NEQ creation failed\n");
3431 tasklet_init(&adapter
->neq_tasklet
, ehea_neq_tasklet
,
3432 (unsigned long)adapter
);
3434 ret
= ibmebus_request_irq(adapter
->neq
->attr
.ist1
,
3435 ehea_interrupt_neq
, IRQF_DISABLED
,
3436 "ehea_neq", adapter
);
3438 dev_err(&dev
->dev
, "requesting NEQ IRQ failed\n");
3442 ret
= ehea_create_device_sysfs(dev
);
3446 ret
= ehea_setup_ports(adapter
);
3448 dev_err(&dev
->dev
, "setup_ports failed\n");
3449 goto out_rem_dev_sysfs
;
3456 ehea_remove_device_sysfs(dev
);
3459 ibmebus_free_irq(adapter
->neq
->attr
.ist1
, adapter
);
3462 ehea_destroy_eq(adapter
->neq
);
3465 list_del(&adapter
->list
);
3469 ehea_update_firmware_handles();
3474 static int __devexit
ehea_remove(struct of_device
*dev
)
3476 struct ehea_adapter
*adapter
= dev_get_drvdata(&dev
->dev
);
3479 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3480 if (adapter
->port
[i
]) {
3481 ehea_shutdown_single_port(adapter
->port
[i
]);
3482 adapter
->port
[i
] = NULL
;
3485 ehea_remove_device_sysfs(dev
);
3487 flush_scheduled_work();
3489 ibmebus_free_irq(adapter
->neq
->attr
.ist1
, adapter
);
3490 tasklet_kill(&adapter
->neq_tasklet
);
3492 ehea_destroy_eq(adapter
->neq
);
3493 ehea_remove_adapter_mr(adapter
);
3494 list_del(&adapter
->list
);
3497 ehea_update_firmware_handles();
3502 void ehea_crash_handler(void)
3506 if (ehea_fw_handles
.arr
)
3507 for (i
= 0; i
< ehea_fw_handles
.num_entries
; i
++)
3508 ehea_h_free_resource(ehea_fw_handles
.arr
[i
].adh
,
3509 ehea_fw_handles
.arr
[i
].fwh
,
3512 if (ehea_bcmc_regs
.arr
)
3513 for (i
= 0; i
< ehea_bcmc_regs
.num_entries
; i
++)
3514 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs
.arr
[i
].adh
,
3515 ehea_bcmc_regs
.arr
[i
].port_id
,
3516 ehea_bcmc_regs
.arr
[i
].reg_type
,
3517 ehea_bcmc_regs
.arr
[i
].macaddr
,
3521 static int ehea_mem_notifier(struct notifier_block
*nb
,
3522 unsigned long action
, void *data
)
3524 struct memory_notify
*arg
= data
;
3526 case MEM_CANCEL_OFFLINE
:
3527 ehea_info("memory offlining canceled");
3528 /* Readd canceled memory block */
3530 ehea_info("memory is going online");
3531 set_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
3532 if (ehea_add_sect_bmap(arg
->start_pfn
, arg
->nr_pages
))
3534 ehea_rereg_mrs(NULL
);
3536 case MEM_GOING_OFFLINE
:
3537 ehea_info("memory is going offline");
3538 set_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
3539 if (ehea_rem_sect_bmap(arg
->start_pfn
, arg
->nr_pages
))
3541 ehea_rereg_mrs(NULL
);
3547 ehea_update_firmware_handles();
3552 static struct notifier_block ehea_mem_nb
= {
3553 .notifier_call
= ehea_mem_notifier
,
3556 static int ehea_reboot_notifier(struct notifier_block
*nb
,
3557 unsigned long action
, void *unused
)
3559 if (action
== SYS_RESTART
) {
3560 ehea_info("Reboot: freeing all eHEA resources");
3561 ibmebus_unregister_driver(&ehea_driver
);
3566 static struct notifier_block ehea_reboot_nb
= {
3567 .notifier_call
= ehea_reboot_notifier
,
3570 static int check_module_parm(void)
3574 if ((rq1_entries
< EHEA_MIN_ENTRIES_QP
) ||
3575 (rq1_entries
> EHEA_MAX_ENTRIES_RQ1
)) {
3576 ehea_info("Bad parameter: rq1_entries");
3579 if ((rq2_entries
< EHEA_MIN_ENTRIES_QP
) ||
3580 (rq2_entries
> EHEA_MAX_ENTRIES_RQ2
)) {
3581 ehea_info("Bad parameter: rq2_entries");
3584 if ((rq3_entries
< EHEA_MIN_ENTRIES_QP
) ||
3585 (rq3_entries
> EHEA_MAX_ENTRIES_RQ3
)) {
3586 ehea_info("Bad parameter: rq3_entries");
3589 if ((sq_entries
< EHEA_MIN_ENTRIES_QP
) ||
3590 (sq_entries
> EHEA_MAX_ENTRIES_SQ
)) {
3591 ehea_info("Bad parameter: sq_entries");
3598 static ssize_t
ehea_show_capabilities(struct device_driver
*drv
,
3601 return sprintf(buf
, "%d", EHEA_CAPABILITIES
);
3604 static DRIVER_ATTR(capabilities
, S_IRUSR
| S_IRGRP
| S_IROTH
,
3605 ehea_show_capabilities
, NULL
);
3607 int __init
ehea_module_init(void)
3611 printk(KERN_INFO
"IBM eHEA ethernet device driver (Release %s)\n",
3615 INIT_WORK(&ehea_rereg_mr_task
, ehea_rereg_mrs
);
3616 memset(&ehea_fw_handles
, 0, sizeof(ehea_fw_handles
));
3617 memset(&ehea_bcmc_regs
, 0, sizeof(ehea_bcmc_regs
));
3619 mutex_init(&ehea_fw_handles
.lock
);
3620 spin_lock_init(&ehea_bcmc_regs
.lock
);
3622 ret
= check_module_parm();
3626 ret
= ehea_create_busmap();
3630 ret
= register_reboot_notifier(&ehea_reboot_nb
);
3632 ehea_info("failed registering reboot notifier");
3634 ret
= register_memory_notifier(&ehea_mem_nb
);
3636 ehea_info("failed registering memory remove notifier");
3638 ret
= crash_shutdown_register(&ehea_crash_handler
);
3640 ehea_info("failed registering crash handler");
3642 ret
= ibmebus_register_driver(&ehea_driver
);
3644 ehea_error("failed registering eHEA device driver on ebus");
3648 ret
= driver_create_file(&ehea_driver
.driver
,
3649 &driver_attr_capabilities
);
3651 ehea_error("failed to register capabilities attribute, ret=%d",
3659 ibmebus_unregister_driver(&ehea_driver
);
3661 unregister_memory_notifier(&ehea_mem_nb
);
3662 unregister_reboot_notifier(&ehea_reboot_nb
);
3663 crash_shutdown_unregister(&ehea_crash_handler
);
3668 static void __exit
ehea_module_exit(void)
3672 flush_scheduled_work();
3673 driver_remove_file(&ehea_driver
.driver
, &driver_attr_capabilities
);
3674 ibmebus_unregister_driver(&ehea_driver
);
3675 unregister_reboot_notifier(&ehea_reboot_nb
);
3676 ret
= crash_shutdown_unregister(&ehea_crash_handler
);
3678 ehea_info("failed unregistering crash handler");
3679 unregister_memory_notifier(&ehea_mem_nb
);
3680 kfree(ehea_fw_handles
.arr
);
3681 kfree(ehea_bcmc_regs
.arr
);
3682 ehea_destroy_busmap();
3685 module_init(ehea_module_init
);
3686 module_exit(ehea_module_exit
);