2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/if_ether.h>
40 #include "ehea_phyp.h"
43 MODULE_LICENSE("GPL");
44 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
45 MODULE_DESCRIPTION("IBM eServer HEA Driver");
46 MODULE_VERSION(DRV_VERSION
);
49 static int msg_level
= -1;
50 static int rq1_entries
= EHEA_DEF_ENTRIES_RQ1
;
51 static int rq2_entries
= EHEA_DEF_ENTRIES_RQ2
;
52 static int rq3_entries
= EHEA_DEF_ENTRIES_RQ3
;
53 static int sq_entries
= EHEA_DEF_ENTRIES_SQ
;
55 module_param(msg_level
, int, 0);
56 module_param(rq1_entries
, int, 0);
57 module_param(rq2_entries
, int, 0);
58 module_param(rq3_entries
, int, 0);
59 module_param(sq_entries
, int, 0);
61 MODULE_PARM_DESC(msg_level
, "msg_level");
62 MODULE_PARM_DESC(rq3_entries
, "Number of entries for Receive Queue 3 "
63 "[2^x - 1], x = [6..14]. Default = "
64 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3
) ")");
65 MODULE_PARM_DESC(rq2_entries
, "Number of entries for Receive Queue 2 "
66 "[2^x - 1], x = [6..14]. Default = "
67 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2
) ")");
68 MODULE_PARM_DESC(rq1_entries
, "Number of entries for Receive Queue 1 "
69 "[2^x - 1], x = [6..14]. Default = "
70 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1
) ")");
71 MODULE_PARM_DESC(sq_entries
, " Number of entries for the Send Queue "
72 "[2^x - 1], x = [6..14]. Default = "
73 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ
) ")");
75 void ehea_dump(void *adr
, int len
, char *msg
) {
77 unsigned char *deb
= adr
;
78 for (x
= 0; x
< len
; x
+= 16) {
79 printk(DRV_NAME
"%s adr=%p ofs=%04x %016lx %016lx\n", msg
,
80 deb
, x
, *((u64
*)&deb
[0]), *((u64
*)&deb
[8]));
85 static struct net_device_stats
*ehea_get_stats(struct net_device
*dev
)
87 struct ehea_port
*port
= netdev_priv(dev
);
88 struct net_device_stats
*stats
= &port
->stats
;
89 struct hcp_ehea_port_cb2
*cb2
;
93 memset(stats
, 0, sizeof(*stats
));
95 cb2
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
97 ehea_error("no mem for cb2");
101 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
102 port
->logical_port_id
,
103 H_PORT_CB2
, H_PORT_CB2_ALL
, cb2
);
104 if (hret
!= H_SUCCESS
) {
105 ehea_error("query_ehea_port failed");
109 if (netif_msg_hw(port
))
110 ehea_dump(cb2
, sizeof(*cb2
), "net_device_stats");
113 for (i
= 0; i
< port
->num_def_qps
; i
++)
114 rx_packets
+= port
->port_res
[i
].rx_packets
;
116 stats
->tx_packets
= cb2
->txucp
+ cb2
->txmcp
+ cb2
->txbcp
;
117 stats
->multicast
= cb2
->rxmcp
;
118 stats
->rx_errors
= cb2
->rxuerr
;
119 stats
->rx_bytes
= cb2
->rxo
;
120 stats
->tx_bytes
= cb2
->txo
;
121 stats
->rx_packets
= rx_packets
;
129 static void ehea_refill_rq1(struct ehea_port_res
*pr
, int index
, int nr_of_wqes
)
131 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
132 struct net_device
*dev
= pr
->port
->netdev
;
133 int max_index_mask
= pr
->rq1_skba
.len
- 1;
139 for (i
= 0; i
< nr_of_wqes
; i
++) {
140 if (!skb_arr_rq1
[index
]) {
141 skb_arr_rq1
[index
] = netdev_alloc_skb(dev
,
143 if (!skb_arr_rq1
[index
]) {
144 ehea_error("%s: no mem for skb/%d wqes filled",
150 index
&= max_index_mask
;
153 ehea_update_rq1a(pr
->qp
, i
);
156 static int ehea_init_fill_rq1(struct ehea_port_res
*pr
, int nr_rq1a
)
159 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
160 struct net_device
*dev
= pr
->port
->netdev
;
163 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++) {
164 skb_arr_rq1
[i
] = netdev_alloc_skb(dev
, EHEA_L_PKT_SIZE
);
165 if (!skb_arr_rq1
[i
]) {
166 ehea_error("%s: no mem for skb/%d wqes filled",
173 ehea_update_rq1a(pr
->qp
, nr_rq1a
);
178 static int ehea_refill_rq_def(struct ehea_port_res
*pr
,
179 struct ehea_q_skb_arr
*q_skba
, int rq_nr
,
180 int num_wqes
, int wqe_type
, int packet_size
)
182 struct net_device
*dev
= pr
->port
->netdev
;
183 struct ehea_qp
*qp
= pr
->qp
;
184 struct sk_buff
**skb_arr
= q_skba
->arr
;
185 struct ehea_rwqe
*rwqe
;
186 int i
, index
, max_index_mask
, fill_wqes
;
189 fill_wqes
= q_skba
->os_skbs
+ num_wqes
;
194 index
= q_skba
->index
;
195 max_index_mask
= q_skba
->len
- 1;
196 for (i
= 0; i
< fill_wqes
; i
++) {
197 struct sk_buff
*skb
= netdev_alloc_skb(dev
, packet_size
);
199 ehea_error("%s: no mem for skb/%d wqes filled",
201 q_skba
->os_skbs
= fill_wqes
- i
;
205 skb_reserve(skb
, NET_IP_ALIGN
);
207 skb_arr
[index
] = skb
;
209 rwqe
= ehea_get_next_rwqe(qp
, rq_nr
);
210 rwqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, wqe_type
)
211 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, index
);
212 rwqe
->sg_list
[0].l_key
= pr
->recv_mr
.lkey
;
213 rwqe
->sg_list
[0].vaddr
= (u64
)skb
->data
;
214 rwqe
->sg_list
[0].len
= packet_size
;
215 rwqe
->data_segments
= 1;
218 index
&= max_index_mask
;
220 q_skba
->index
= index
;
225 ehea_update_rq2a(pr
->qp
, i
);
227 ehea_update_rq3a(pr
->qp
, i
);
233 static int ehea_refill_rq2(struct ehea_port_res
*pr
, int nr_of_wqes
)
235 return ehea_refill_rq_def(pr
, &pr
->rq2_skba
, 2,
236 nr_of_wqes
, EHEA_RWQE2_TYPE
,
237 EHEA_RQ2_PKT_SIZE
+ NET_IP_ALIGN
);
241 static int ehea_refill_rq3(struct ehea_port_res
*pr
, int nr_of_wqes
)
243 return ehea_refill_rq_def(pr
, &pr
->rq3_skba
, 3,
244 nr_of_wqes
, EHEA_RWQE3_TYPE
,
245 EHEA_MAX_PACKET_SIZE
+ NET_IP_ALIGN
);
248 static inline int ehea_check_cqe(struct ehea_cqe
*cqe
, int *rq_num
)
250 *rq_num
= (cqe
->type
& EHEA_CQE_TYPE_RQ
) >> 5;
251 if ((cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) == 0)
253 if (((cqe
->status
& EHEA_CQE_STAT_ERR_TCP
) != 0) &&
254 (cqe
->header_length
== 0))
259 static inline void ehea_fill_skb(struct net_device
*dev
,
260 struct sk_buff
*skb
, struct ehea_cqe
*cqe
)
262 int length
= cqe
->num_bytes_transfered
- 4; /*remove CRC */
264 skb_put(skb
, length
);
265 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
266 skb
->protocol
= eth_type_trans(skb
, dev
);
269 static inline struct sk_buff
*get_skb_by_index(struct sk_buff
**skb_array
,
271 struct ehea_cqe
*cqe
)
273 int skb_index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
283 prefetchw(pref
+ EHEA_CACHE_LINE
);
285 pref
= (skb_array
[x
]->data
);
287 prefetch(pref
+ EHEA_CACHE_LINE
);
288 prefetch(pref
+ EHEA_CACHE_LINE
* 2);
289 prefetch(pref
+ EHEA_CACHE_LINE
* 3);
290 skb
= skb_array
[skb_index
];
291 skb_array
[skb_index
] = NULL
;
295 static inline struct sk_buff
*get_skb_by_index_ll(struct sk_buff
**skb_array
,
296 int arr_len
, int wqe_index
)
307 prefetchw(pref
+ EHEA_CACHE_LINE
);
309 pref
= (skb_array
[x
]->data
);
311 prefetchw(pref
+ EHEA_CACHE_LINE
);
313 skb
= skb_array
[wqe_index
];
314 skb_array
[wqe_index
] = NULL
;
318 static int ehea_treat_poll_error(struct ehea_port_res
*pr
, int rq
,
319 struct ehea_cqe
*cqe
, int *processed_rq2
,
324 if (netif_msg_rx_err(pr
->port
)) {
325 ehea_error("CQE Error for QP %d", pr
->qp
->init_attr
.qp_nr
);
326 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
331 skb
= get_skb_by_index(pr
->rq2_skba
.arr
, pr
->rq2_skba
.len
, cqe
);
333 } else if (rq
== 3) {
335 skb
= get_skb_by_index(pr
->rq3_skba
.arr
, pr
->rq3_skba
.len
, cqe
);
339 if (cqe
->status
& EHEA_CQE_STAT_FAT_ERR_MASK
) {
340 ehea_error("Critical receive error. Resetting port.");
341 queue_work(pr
->port
->adapter
->ehea_wq
, &pr
->port
->reset_task
);
348 static int ehea_poll(struct net_device
*dev
, int *budget
)
350 struct ehea_port
*port
= netdev_priv(dev
);
351 struct ehea_port_res
*pr
= &port
->port_res
[0];
352 struct ehea_qp
*qp
= pr
->qp
;
353 struct ehea_cqe
*cqe
;
355 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
356 struct sk_buff
**skb_arr_rq2
= pr
->rq2_skba
.arr
;
357 struct sk_buff
**skb_arr_rq3
= pr
->rq3_skba
.arr
;
358 int skb_arr_rq1_len
= pr
->rq1_skba
.len
;
359 int skb_arr_rq2_len
= pr
->rq2_skba
.len
;
360 int skb_arr_rq3_len
= pr
->rq3_skba
.len
;
361 int processed
, processed_rq1
, processed_rq2
, processed_rq3
;
362 int wqe_index
, last_wqe_index
, rq
, intreq
, my_quota
, port_reset
;
364 processed
= processed_rq1
= processed_rq2
= processed_rq3
= 0;
366 my_quota
= min(*budget
, dev
->quota
);
367 my_quota
= min(my_quota
, EHEA_POLL_MAX_RWQE
);
369 /* rq0 is low latency RQ */
370 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
371 while ((my_quota
> 0) && cqe
) {
376 if (netif_msg_rx_status(port
))
377 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
379 last_wqe_index
= wqe_index
;
381 if (!ehea_check_cqe(cqe
, &rq
)) {
382 if (rq
== 1) { /* LL RQ1 */
383 skb
= get_skb_by_index_ll(skb_arr_rq1
,
386 if (unlikely(!skb
)) {
387 if (netif_msg_rx_err(port
))
388 ehea_error("LL rq1: skb=NULL");
389 skb
= netdev_alloc_skb(dev
,
394 memcpy(skb
->data
, ((char*)cqe
) + 64,
395 cqe
->num_bytes_transfered
- 4);
396 ehea_fill_skb(dev
, skb
, cqe
);
397 } else if (rq
== 2) { /* RQ2 */
398 skb
= get_skb_by_index(skb_arr_rq2
,
399 skb_arr_rq2_len
, cqe
);
400 if (unlikely(!skb
)) {
401 if (netif_msg_rx_err(port
))
402 ehea_error("rq2: skb=NULL");
405 ehea_fill_skb(dev
, skb
, cqe
);
408 skb
= get_skb_by_index(skb_arr_rq3
,
409 skb_arr_rq3_len
, cqe
);
410 if (unlikely(!skb
)) {
411 if (netif_msg_rx_err(port
))
412 ehea_error("rq3: skb=NULL");
415 ehea_fill_skb(dev
, skb
, cqe
);
419 if (cqe
->status
& EHEA_CQE_VLAN_TAG_XTRACT
)
420 vlan_hwaccel_receive_skb(skb
, port
->vgrp
,
423 netif_receive_skb(skb
);
425 } else { /* Error occured */
426 pr
->p_state
.poll_receive_errors
++;
427 port_reset
= ehea_treat_poll_error(pr
, rq
, cqe
,
433 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
436 dev
->quota
-= processed
;
437 *budget
-= processed
;
439 pr
->p_state
.ehea_poll
+= 1;
440 pr
->rx_packets
+= processed
;
442 ehea_refill_rq1(pr
, last_wqe_index
, processed_rq1
);
443 ehea_refill_rq2(pr
, processed_rq2
);
444 ehea_refill_rq3(pr
, processed_rq3
);
446 intreq
= ((pr
->p_state
.ehea_poll
& 0xF) == 0xF);
448 if (!cqe
|| intreq
) {
449 netif_rx_complete(dev
);
450 ehea_reset_cq_ep(pr
->recv_cq
);
451 ehea_reset_cq_n1(pr
->recv_cq
);
452 cqe
= hw_qeit_get_valid(&qp
->hw_rqueue1
);
455 if (!netif_rx_reschedule(dev
, my_quota
))
461 void free_sent_skbs(struct ehea_cqe
*cqe
, struct ehea_port_res
*pr
)
464 int index
, max_index_mask
, i
;
466 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
467 max_index_mask
= pr
->sq_skba
.len
- 1;
468 for (i
= 0; i
< EHEA_BMASK_GET(EHEA_WR_ID_REFILL
, cqe
->wr_id
); i
++) {
469 skb
= pr
->sq_skba
.arr
[index
];
472 pr
->sq_skba
.arr
[index
] = NULL
;
474 ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d",
475 cqe
->wr_id
, i
, index
);
478 index
&= max_index_mask
;
482 #define MAX_SENDCOMP_QUOTA 400
483 void ehea_send_irq_tasklet(unsigned long data
)
485 struct ehea_port_res
*pr
= (struct ehea_port_res
*)data
;
486 struct ehea_cq
*send_cq
= pr
->send_cq
;
487 struct ehea_cqe
*cqe
;
488 int quota
= MAX_SENDCOMP_QUOTA
;
494 cqe
= ehea_poll_cq(send_cq
);
496 ehea_reset_cq_ep(send_cq
);
497 ehea_reset_cq_n1(send_cq
);
498 cqe
= ehea_poll_cq(send_cq
);
504 if (cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) {
505 ehea_error("Send Completion Error: Resetting port");
506 if (netif_msg_tx_err(pr
->port
))
507 ehea_dump(cqe
, sizeof(*cqe
), "Send CQE");
508 queue_work(pr
->port
->adapter
->ehea_wq
,
509 &pr
->port
->reset_task
);
513 if (netif_msg_tx_done(pr
->port
))
514 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
516 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE
, cqe
->wr_id
)
518 free_sent_skbs(cqe
, pr
);
520 swqe_av
+= EHEA_BMASK_GET(EHEA_WR_ID_REFILL
, cqe
->wr_id
);
524 ehea_update_feca(send_cq
, cqe_counter
);
525 atomic_add(swqe_av
, &pr
->swqe_avail
);
527 spin_lock_irqsave(&pr
->netif_queue
, flags
);
528 if (pr
->queue_stopped
&& (atomic_read(&pr
->swqe_avail
)
529 >= pr
->swqe_refill_th
)) {
530 netif_wake_queue(pr
->port
->netdev
);
531 pr
->queue_stopped
= 0;
533 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
536 tasklet_hi_schedule(&pr
->send_comp_task
);
539 static irqreturn_t
ehea_send_irq_handler(int irq
, void *param
)
541 struct ehea_port_res
*pr
= param
;
542 tasklet_hi_schedule(&pr
->send_comp_task
);
546 static irqreturn_t
ehea_recv_irq_handler(int irq
, void *param
)
548 struct ehea_port_res
*pr
= param
;
549 struct ehea_port
*port
= pr
->port
;
550 netif_rx_schedule(port
->netdev
);
554 static irqreturn_t
ehea_qp_aff_irq_handler(int irq
, void *param
)
556 struct ehea_port
*port
= param
;
557 struct ehea_eqe
*eqe
;
560 eqe
= ehea_poll_eq(port
->qp_eq
);
561 ehea_debug("eqe=%p", eqe
);
563 ehea_debug("*eqe=%lx", *(u64
*)eqe
);
564 eqe
= ehea_poll_eq(port
->qp_eq
);
565 qp_token
= EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN
, eqe
->entry
);
566 ehea_debug("next eqe=%p", eqe
);
572 static struct ehea_port
*ehea_get_port(struct ehea_adapter
*adapter
,
577 for (i
= 0; i
< adapter
->num_ports
; i
++)
578 if (adapter
->port
[i
]->logical_port_id
== logical_port
)
579 return adapter
->port
[i
];
583 int ehea_sense_port_attr(struct ehea_port
*port
)
587 struct hcp_ehea_port_cb0
*cb0
;
589 cb0
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
); /* May be called via */
590 if (!cb0
) { /* ehea_neq_tasklet() */
591 ehea_error("no mem for cb0");
596 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
597 port
->logical_port_id
, H_PORT_CB0
,
598 EHEA_BMASK_SET(H_PORT_CB0_ALL
, 0xFFFF),
600 if (hret
!= H_SUCCESS
) {
606 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
608 if (!is_valid_ether_addr((u8
*)&port
->mac_addr
)) {
609 ret
= -EADDRNOTAVAIL
;
614 switch (cb0
->port_speed
) {
616 port
->port_speed
= EHEA_SPEED_10M
;
617 port
->full_duplex
= 0;
620 port
->port_speed
= EHEA_SPEED_10M
;
621 port
->full_duplex
= 1;
624 port
->port_speed
= EHEA_SPEED_100M
;
625 port
->full_duplex
= 0;
628 port
->port_speed
= EHEA_SPEED_100M
;
629 port
->full_duplex
= 1;
632 port
->port_speed
= EHEA_SPEED_1G
;
633 port
->full_duplex
= 1;
636 port
->port_speed
= EHEA_SPEED_10G
;
637 port
->full_duplex
= 1;
640 port
->port_speed
= 0;
641 port
->full_duplex
= 0;
645 /* Number of default QPs */
646 port
->num_def_qps
= cb0
->num_default_qps
;
648 if (!port
->num_def_qps
) {
653 if (port
->num_def_qps
>= EHEA_NUM_TX_QP
)
654 port
->num_add_tx_qps
= 0;
656 port
->num_add_tx_qps
= EHEA_NUM_TX_QP
- port
->num_def_qps
;
660 if (ret
|| netif_msg_probe(port
))
661 ehea_dump(cb0
, sizeof(*cb0
), "ehea_sense_port_attr");
667 int ehea_set_portspeed(struct ehea_port
*port
, u32 port_speed
)
669 struct hcp_ehea_port_cb4
*cb4
;
673 cb4
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
675 ehea_error("no mem for cb4");
680 cb4
->port_speed
= port_speed
;
682 netif_carrier_off(port
->netdev
);
684 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
685 port
->logical_port_id
,
686 H_PORT_CB4
, H_PORT_CB4_SPEED
, cb4
);
687 if (hret
== H_SUCCESS
) {
688 port
->autoneg
= port_speed
== EHEA_SPEED_AUTONEG
? 1 : 0;
690 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
691 port
->logical_port_id
,
692 H_PORT_CB4
, H_PORT_CB4_SPEED
,
694 if (hret
== H_SUCCESS
) {
695 switch (cb4
->port_speed
) {
697 port
->port_speed
= EHEA_SPEED_10M
;
698 port
->full_duplex
= 0;
701 port
->port_speed
= EHEA_SPEED_10M
;
702 port
->full_duplex
= 1;
705 port
->port_speed
= EHEA_SPEED_100M
;
706 port
->full_duplex
= 0;
709 port
->port_speed
= EHEA_SPEED_100M
;
710 port
->full_duplex
= 1;
713 port
->port_speed
= EHEA_SPEED_1G
;
714 port
->full_duplex
= 1;
717 port
->port_speed
= EHEA_SPEED_10G
;
718 port
->full_duplex
= 1;
721 port
->port_speed
= 0;
722 port
->full_duplex
= 0;
726 ehea_error("Failed sensing port speed");
730 if (hret
== H_AUTHORITY
) {
731 ehea_info("Hypervisor denied setting port speed. Either"
732 " this partition is not authorized to set "
733 "port speed or another partition has modified"
734 " port speed first.");
738 ehea_error("Failed setting port speed");
741 netif_carrier_on(port
->netdev
);
747 static void ehea_parse_eqe(struct ehea_adapter
*adapter
, u64 eqe
)
752 struct ehea_port
*port
;
754 ec
= EHEA_BMASK_GET(NEQE_EVENT_CODE
, eqe
);
755 portnum
= EHEA_BMASK_GET(NEQE_PORTNUM
, eqe
);
756 port
= ehea_get_port(adapter
, portnum
);
759 case EHEA_EC_PORTSTATE_CHG
: /* port state change */
762 ehea_error("unknown portnum %x", portnum
);
766 if (EHEA_BMASK_GET(NEQE_PORT_UP
, eqe
)) {
767 if (!netif_carrier_ok(port
->netdev
)) {
768 ret
= ehea_sense_port_attr(port
);
770 ehea_error("failed resensing port "
775 if (netif_msg_link(port
))
776 ehea_info("%s: Logical port up: %dMbps "
781 1 ? "Full" : "Half");
783 netif_carrier_on(port
->netdev
);
784 netif_wake_queue(port
->netdev
);
787 if (netif_carrier_ok(port
->netdev
)) {
788 if (netif_msg_link(port
))
789 ehea_info("%s: Logical port down",
791 netif_carrier_off(port
->netdev
);
792 netif_stop_queue(port
->netdev
);
795 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP
, eqe
)) {
796 if (netif_msg_link(port
))
797 ehea_info("%s: Physical port up",
800 if (netif_msg_link(port
))
801 ehea_info("%s: Physical port down",
805 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY
, eqe
))
806 ehea_info("External switch port is primary port");
808 ehea_info("External switch port is backup port");
811 case EHEA_EC_ADAPTER_MALFUNC
:
812 ehea_error("Adapter malfunction");
814 case EHEA_EC_PORT_MALFUNC
:
815 ehea_info("Port malfunction: Device: %s", port
->netdev
->name
);
816 netif_carrier_off(port
->netdev
);
817 netif_stop_queue(port
->netdev
);
820 ehea_error("unknown event code %x, eqe=0x%lX", ec
, eqe
);
825 static void ehea_neq_tasklet(unsigned long data
)
827 struct ehea_adapter
*adapter
= (struct ehea_adapter
*)data
;
828 struct ehea_eqe
*eqe
;
831 eqe
= ehea_poll_eq(adapter
->neq
);
832 ehea_debug("eqe=%p", eqe
);
835 ehea_debug("*eqe=%lx", eqe
->entry
);
836 ehea_parse_eqe(adapter
, eqe
->entry
);
837 eqe
= ehea_poll_eq(adapter
->neq
);
838 ehea_debug("next eqe=%p", eqe
);
841 event_mask
= EHEA_BMASK_SET(NELR_PORTSTATE_CHG
, 1)
842 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC
, 1)
843 | EHEA_BMASK_SET(NELR_PORT_MALFUNC
, 1);
845 ehea_h_reset_events(adapter
->handle
,
846 adapter
->neq
->fw_handle
, event_mask
);
849 static irqreturn_t
ehea_interrupt_neq(int irq
, void *param
)
851 struct ehea_adapter
*adapter
= param
;
852 tasklet_hi_schedule(&adapter
->neq_tasklet
);
857 static int ehea_fill_port_res(struct ehea_port_res
*pr
)
860 struct ehea_qp_init_attr
*init_attr
= &pr
->qp
->init_attr
;
862 ret
= ehea_init_fill_rq1(pr
, init_attr
->act_nr_rwqes_rq1
863 - init_attr
->act_nr_rwqes_rq2
864 - init_attr
->act_nr_rwqes_rq3
- 1);
866 ret
|= ehea_refill_rq2(pr
, init_attr
->act_nr_rwqes_rq2
- 1);
868 ret
|= ehea_refill_rq3(pr
, init_attr
->act_nr_rwqes_rq3
- 1);
873 static int ehea_reg_interrupts(struct net_device
*dev
)
875 struct ehea_port
*port
= netdev_priv(dev
);
876 struct ehea_port_res
*pr
;
879 for (i
= 0; i
< port
->num_def_qps
; i
++) {
880 pr
= &port
->port_res
[i
];
881 snprintf(pr
->int_recv_name
, EHEA_IRQ_NAME_SIZE
- 1
882 , "%s-recv%d", dev
->name
, i
);
883 ret
= ibmebus_request_irq(NULL
, pr
->recv_eq
->attr
.ist1
,
884 ehea_recv_irq_handler
,
885 SA_INTERRUPT
, pr
->int_recv_name
, pr
);
887 ehea_error("failed registering irq for ehea_recv_int:"
888 "port_res_nr:%d, ist=%X", i
,
889 pr
->recv_eq
->attr
.ist1
);
892 if (netif_msg_ifup(port
))
893 ehea_info("irq_handle 0x%X for funct ehea_recv_int %d "
894 "registered", pr
->recv_eq
->attr
.ist1
, i
);
897 snprintf(port
->int_aff_name
, EHEA_IRQ_NAME_SIZE
- 1, "%s-aff",
900 ret
= ibmebus_request_irq(NULL
, port
->qp_eq
->attr
.ist1
,
901 ehea_qp_aff_irq_handler
,
902 SA_INTERRUPT
, port
->int_aff_name
, port
);
904 ehea_error("failed registering irq for qp_aff_irq_handler:"
905 "ist=%X", port
->qp_eq
->attr
.ist1
);
909 if (netif_msg_ifup(port
))
910 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
911 "registered", port
->qp_eq
->attr
.ist1
);
913 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
914 pr
= &port
->port_res
[i
];
915 snprintf(pr
->int_send_name
, EHEA_IRQ_NAME_SIZE
- 1,
916 "%s-send%d", dev
->name
, i
);
917 ret
= ibmebus_request_irq(NULL
, pr
->send_eq
->attr
.ist1
,
918 ehea_send_irq_handler
,
919 SA_INTERRUPT
, pr
->int_send_name
,
922 ehea_error("failed registering irq for ehea_send "
923 "port_res_nr:%d, ist=%X", i
,
924 pr
->send_eq
->attr
.ist1
);
927 if (netif_msg_ifup(port
))
928 ehea_info("irq_handle 0x%X for function ehea_send_int "
929 "%d registered", pr
->send_eq
->attr
.ist1
, i
);
936 u32 ist
= port
->port_res
[i
].send_eq
->attr
.ist1
;
937 ibmebus_free_irq(NULL
, ist
, &port
->port_res
[i
]);
940 ibmebus_free_irq(NULL
, port
->qp_eq
->attr
.ist1
, port
);
941 i
= port
->num_def_qps
;
944 u32 ist
= port
->port_res
[i
].recv_eq
->attr
.ist1
;
945 ibmebus_free_irq(NULL
, ist
, &port
->port_res
[i
]);
950 static void ehea_free_interrupts(struct net_device
*dev
)
952 struct ehea_port
*port
= netdev_priv(dev
);
953 struct ehea_port_res
*pr
;
957 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
958 pr
= &port
->port_res
[i
];
959 ibmebus_free_irq(NULL
, pr
->send_eq
->attr
.ist1
, pr
);
960 if (netif_msg_intr(port
))
961 ehea_info("free send irq for res %d with handle 0x%X",
962 i
, pr
->send_eq
->attr
.ist1
);
966 for (i
= 0; i
< port
->num_def_qps
; i
++) {
967 pr
= &port
->port_res
[i
];
968 ibmebus_free_irq(NULL
, pr
->recv_eq
->attr
.ist1
, pr
);
969 if (netif_msg_intr(port
))
970 ehea_info("free recv irq for res %d with handle 0x%X",
971 i
, pr
->recv_eq
->attr
.ist1
);
974 /* associated events */
975 ibmebus_free_irq(NULL
, port
->qp_eq
->attr
.ist1
, port
);
976 if (netif_msg_intr(port
))
977 ehea_info("associated event interrupt for handle 0x%X freed",
978 port
->qp_eq
->attr
.ist1
);
981 static int ehea_configure_port(struct ehea_port
*port
)
985 struct hcp_ehea_port_cb0
*cb0
;
988 cb0
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
992 cb0
->port_rc
= EHEA_BMASK_SET(PXLY_RC_VALID
, 1)
993 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM
, 1)
994 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM
, 1)
995 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT
, 1)
996 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER
,
998 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME
, 1);
1000 for (i
= 0; i
< port
->num_def_qps
; i
++)
1001 cb0
->default_qpn_arr
[i
] = port
->port_res
[i
].qp
->init_attr
.qp_nr
;
1003 if (netif_msg_ifup(port
))
1004 ehea_dump(cb0
, sizeof(*cb0
), "ehea_configure_port");
1006 mask
= EHEA_BMASK_SET(H_PORT_CB0_PRC
, 1)
1007 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY
, 1);
1009 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1010 port
->logical_port_id
,
1011 H_PORT_CB0
, mask
, cb0
);
1013 if (hret
!= H_SUCCESS
)
1024 static int ehea_gen_smrs(struct ehea_port_res
*pr
)
1027 struct ehea_adapter
*adapter
= pr
->port
->adapter
;
1029 hret
= ehea_h_register_smr(adapter
->handle
, adapter
->mr
.handle
,
1030 adapter
->mr
.vaddr
, EHEA_MR_ACC_CTRL
,
1031 adapter
->pd
, &pr
->send_mr
);
1032 if (hret
!= H_SUCCESS
)
1035 hret
= ehea_h_register_smr(adapter
->handle
, adapter
->mr
.handle
,
1036 adapter
->mr
.vaddr
, EHEA_MR_ACC_CTRL
,
1037 adapter
->pd
, &pr
->recv_mr
);
1038 if (hret
!= H_SUCCESS
)
1044 hret
= ehea_h_free_resource(adapter
->handle
, pr
->send_mr
.handle
);
1045 if (hret
!= H_SUCCESS
)
1046 ehea_error("failed freeing SMR");
1051 static int ehea_rem_smrs(struct ehea_port_res
*pr
)
1053 struct ehea_adapter
*adapter
= pr
->port
->adapter
;
1057 hret
= ehea_h_free_resource(adapter
->handle
, pr
->send_mr
.handle
);
1058 if (hret
!= H_SUCCESS
) {
1060 ehea_error("failed freeing send SMR for pr=%p", pr
);
1063 hret
= ehea_h_free_resource(adapter
->handle
, pr
->recv_mr
.handle
);
1064 if (hret
!= H_SUCCESS
) {
1066 ehea_error("failed freeing recv SMR for pr=%p", pr
);
1072 static int ehea_init_q_skba(struct ehea_q_skb_arr
*q_skba
, int max_q_entries
)
1074 int arr_size
= sizeof(void*) * max_q_entries
;
1076 q_skba
->arr
= vmalloc(arr_size
);
1080 memset(q_skba
->arr
, 0, arr_size
);
1082 q_skba
->len
= max_q_entries
;
1084 q_skba
->os_skbs
= 0;
1089 static int ehea_init_port_res(struct ehea_port
*port
, struct ehea_port_res
*pr
,
1090 struct port_res_cfg
*pr_cfg
, int queue_token
)
1092 struct ehea_adapter
*adapter
= port
->adapter
;
1093 enum ehea_eq_type eq_type
= EHEA_EQ
;
1094 struct ehea_qp_init_attr
*init_attr
= NULL
;
1097 memset(pr
, 0, sizeof(struct ehea_port_res
));
1100 spin_lock_init(&pr
->send_lock
);
1101 spin_lock_init(&pr
->recv_lock
);
1102 spin_lock_init(&pr
->xmit_lock
);
1103 spin_lock_init(&pr
->netif_queue
);
1105 pr
->recv_eq
= ehea_create_eq(adapter
, eq_type
, EHEA_MAX_ENTRIES_EQ
, 0);
1107 ehea_error("create_eq failed (recv_eq)");
1111 pr
->send_eq
= ehea_create_eq(adapter
, eq_type
, EHEA_MAX_ENTRIES_EQ
, 0);
1113 ehea_error("create_eq failed (send_eq)");
1117 pr
->recv_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_rcq
,
1118 pr
->recv_eq
->fw_handle
,
1119 port
->logical_port_id
);
1121 ehea_error("create_cq failed (cq_recv)");
1125 pr
->send_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_scq
,
1126 pr
->send_eq
->fw_handle
,
1127 port
->logical_port_id
);
1129 ehea_error("create_cq failed (cq_send)");
1133 if (netif_msg_ifup(port
))
1134 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1135 pr
->send_cq
->attr
.act_nr_of_cqes
,
1136 pr
->recv_cq
->attr
.act_nr_of_cqes
);
1138 init_attr
= kzalloc(sizeof(*init_attr
), GFP_KERNEL
);
1141 ehea_error("no mem for ehea_qp_init_attr");
1145 init_attr
->low_lat_rq1
= 1;
1146 init_attr
->signalingtype
= 1; /* generate CQE if specified in WQE */
1147 init_attr
->rq_count
= 3;
1148 init_attr
->qp_token
= queue_token
;
1149 init_attr
->max_nr_send_wqes
= pr_cfg
->max_entries_sq
;
1150 init_attr
->max_nr_rwqes_rq1
= pr_cfg
->max_entries_rq1
;
1151 init_attr
->max_nr_rwqes_rq2
= pr_cfg
->max_entries_rq2
;
1152 init_attr
->max_nr_rwqes_rq3
= pr_cfg
->max_entries_rq3
;
1153 init_attr
->wqe_size_enc_sq
= EHEA_SG_SQ
;
1154 init_attr
->wqe_size_enc_rq1
= EHEA_SG_RQ1
;
1155 init_attr
->wqe_size_enc_rq2
= EHEA_SG_RQ2
;
1156 init_attr
->wqe_size_enc_rq3
= EHEA_SG_RQ3
;
1157 init_attr
->rq2_threshold
= EHEA_RQ2_THRESHOLD
;
1158 init_attr
->rq3_threshold
= EHEA_RQ3_THRESHOLD
;
1159 init_attr
->port_nr
= port
->logical_port_id
;
1160 init_attr
->send_cq_handle
= pr
->send_cq
->fw_handle
;
1161 init_attr
->recv_cq_handle
= pr
->recv_cq
->fw_handle
;
1162 init_attr
->aff_eq_handle
= port
->qp_eq
->fw_handle
;
1164 pr
->qp
= ehea_create_qp(adapter
, adapter
->pd
, init_attr
);
1166 ehea_error("create_qp failed");
1171 if (netif_msg_ifup(port
))
1172 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1173 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr
->qp_nr
,
1174 init_attr
->act_nr_send_wqes
,
1175 init_attr
->act_nr_rwqes_rq1
,
1176 init_attr
->act_nr_rwqes_rq2
,
1177 init_attr
->act_nr_rwqes_rq3
);
1179 ret
= ehea_init_q_skba(&pr
->sq_skba
, init_attr
->act_nr_send_wqes
+ 1);
1180 ret
|= ehea_init_q_skba(&pr
->rq1_skba
, init_attr
->act_nr_rwqes_rq1
+ 1);
1181 ret
|= ehea_init_q_skba(&pr
->rq2_skba
, init_attr
->act_nr_rwqes_rq2
+ 1);
1182 ret
|= ehea_init_q_skba(&pr
->rq3_skba
, init_attr
->act_nr_rwqes_rq3
+ 1);
1186 pr
->swqe_refill_th
= init_attr
->act_nr_send_wqes
/ 10;
1187 if (ehea_gen_smrs(pr
) != 0) {
1191 tasklet_init(&pr
->send_comp_task
, ehea_send_irq_tasklet
,
1193 atomic_set(&pr
->swqe_avail
, init_attr
->act_nr_send_wqes
- 1);
1201 vfree(pr
->sq_skba
.arr
);
1202 vfree(pr
->rq1_skba
.arr
);
1203 vfree(pr
->rq2_skba
.arr
);
1204 vfree(pr
->rq3_skba
.arr
);
1205 ehea_destroy_qp(pr
->qp
);
1206 ehea_destroy_cq(pr
->send_cq
);
1207 ehea_destroy_cq(pr
->recv_cq
);
1208 ehea_destroy_eq(pr
->send_eq
);
1209 ehea_destroy_eq(pr
->recv_eq
);
1214 static int ehea_clean_portres(struct ehea_port
*port
, struct ehea_port_res
*pr
)
1218 ret
= ehea_destroy_qp(pr
->qp
);
1221 ehea_destroy_cq(pr
->send_cq
);
1222 ehea_destroy_cq(pr
->recv_cq
);
1223 ehea_destroy_eq(pr
->send_eq
);
1224 ehea_destroy_eq(pr
->recv_eq
);
1226 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++)
1227 if (pr
->rq1_skba
.arr
[i
])
1228 dev_kfree_skb(pr
->rq1_skba
.arr
[i
]);
1230 for (i
= 0; i
< pr
->rq2_skba
.len
; i
++)
1231 if (pr
->rq2_skba
.arr
[i
])
1232 dev_kfree_skb(pr
->rq2_skba
.arr
[i
]);
1234 for (i
= 0; i
< pr
->rq3_skba
.len
; i
++)
1235 if (pr
->rq3_skba
.arr
[i
])
1236 dev_kfree_skb(pr
->rq3_skba
.arr
[i
]);
1238 for (i
= 0; i
< pr
->sq_skba
.len
; i
++)
1239 if (pr
->sq_skba
.arr
[i
])
1240 dev_kfree_skb(pr
->sq_skba
.arr
[i
]);
1242 vfree(pr
->rq1_skba
.arr
);
1243 vfree(pr
->rq2_skba
.arr
);
1244 vfree(pr
->rq3_skba
.arr
);
1245 vfree(pr
->sq_skba
.arr
);
1246 ret
= ehea_rem_smrs(pr
);
1252 * The write_* functions store information in swqe which is used by
1253 * the hardware to calculate the ip/tcp/udp checksum
1256 static inline void write_ip_start_end(struct ehea_swqe
*swqe
,
1257 const struct sk_buff
*skb
)
1259 swqe
->ip_start
= (u8
)(((u64
)skb
->nh
.iph
) - ((u64
)skb
->data
));
1260 swqe
->ip_end
= (u8
)(swqe
->ip_start
+ skb
->nh
.iph
->ihl
* 4 - 1);
1263 static inline void write_tcp_offset_end(struct ehea_swqe
*swqe
,
1264 const struct sk_buff
*skb
)
1267 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct tcphdr
, check
));
1269 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1272 static inline void write_udp_offset_end(struct ehea_swqe
*swqe
,
1273 const struct sk_buff
*skb
)
1276 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct udphdr
, check
));
1278 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1282 static void write_swqe2_TSO(struct sk_buff
*skb
,
1283 struct ehea_swqe
*swqe
, u32 lkey
)
1285 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1286 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1287 int skb_data_size
= skb
->len
- skb
->data_len
;
1291 /* Packet is TCP with TSO enabled */
1292 swqe
->tx_control
|= EHEA_SWQE_TSO
;
1293 swqe
->mss
= skb_shinfo(skb
)->gso_size
;
1294 /* copy only eth/ip/tcp headers to immediate data and
1295 * the rest of skb->data to sg1entry
1297 headersize
= ETH_HLEN
+ (skb
->nh
.iph
->ihl
* 4) + (skb
->h
.th
->doff
* 4);
1299 skb_data_size
= skb
->len
- skb
->data_len
;
1301 if (skb_data_size
>= headersize
) {
1302 /* copy immediate data */
1303 memcpy(imm_data
, skb
->data
, headersize
);
1304 swqe
->immediate_data_length
= headersize
;
1306 if (skb_data_size
> headersize
) {
1307 /* set sg1entry data */
1308 sg1entry
->l_key
= lkey
;
1309 sg1entry
->len
= skb_data_size
- headersize
;
1311 tmp_addr
= (u64
)(skb
->data
+ headersize
);
1312 sg1entry
->vaddr
= tmp_addr
;
1313 swqe
->descriptors
++;
1316 ehea_error("cannot handle fragmented headers");
1319 static void write_swqe2_nonTSO(struct sk_buff
*skb
,
1320 struct ehea_swqe
*swqe
, u32 lkey
)
1322 int skb_data_size
= skb
->len
- skb
->data_len
;
1323 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1324 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1327 /* Packet is any nonTSO type
1329 * Copy as much as possible skb->data to immediate data and
1330 * the rest to sg1entry
1332 if (skb_data_size
>= SWQE2_MAX_IMM
) {
1333 /* copy immediate data */
1334 memcpy(imm_data
, skb
->data
, SWQE2_MAX_IMM
);
1336 swqe
->immediate_data_length
= SWQE2_MAX_IMM
;
1338 if (skb_data_size
> SWQE2_MAX_IMM
) {
1339 /* copy sg1entry data */
1340 sg1entry
->l_key
= lkey
;
1341 sg1entry
->len
= skb_data_size
- SWQE2_MAX_IMM
;
1342 tmp_addr
= (u64
)(skb
->data
+ SWQE2_MAX_IMM
);
1343 sg1entry
->vaddr
= tmp_addr
;
1344 swqe
->descriptors
++;
1347 memcpy(imm_data
, skb
->data
, skb_data_size
);
1348 swqe
->immediate_data_length
= skb_data_size
;
1352 static inline void write_swqe2_data(struct sk_buff
*skb
, struct net_device
*dev
,
1353 struct ehea_swqe
*swqe
, u32 lkey
)
1355 struct ehea_vsgentry
*sg_list
, *sg1entry
, *sgentry
;
1357 int nfrags
, sg1entry_contains_frag_data
, i
;
1360 nfrags
= skb_shinfo(skb
)->nr_frags
;
1361 sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1362 sg_list
= (struct ehea_vsgentry
*)&swqe
->u
.immdata_desc
.sg_list
;
1363 swqe
->descriptors
= 0;
1364 sg1entry_contains_frag_data
= 0;
1366 if ((dev
->features
& NETIF_F_TSO
) && skb_shinfo(skb
)->gso_size
)
1367 write_swqe2_TSO(skb
, swqe
, lkey
);
1369 write_swqe2_nonTSO(skb
, swqe
, lkey
);
1371 /* write descriptors */
1373 if (swqe
->descriptors
== 0) {
1374 /* sg1entry not yet used */
1375 frag
= &skb_shinfo(skb
)->frags
[0];
1377 /* copy sg1entry data */
1378 sg1entry
->l_key
= lkey
;
1379 sg1entry
->len
= frag
->size
;
1380 tmp_addr
= (u64
)(page_address(frag
->page
)
1381 + frag
->page_offset
);
1382 sg1entry
->vaddr
= tmp_addr
;
1383 swqe
->descriptors
++;
1384 sg1entry_contains_frag_data
= 1;
1387 for (i
= sg1entry_contains_frag_data
; i
< nfrags
; i
++) {
1389 frag
= &skb_shinfo(skb
)->frags
[i
];
1390 sgentry
= &sg_list
[i
- sg1entry_contains_frag_data
];
1392 sgentry
->l_key
= lkey
;
1393 sgentry
->len
= frag
->size
;
1395 tmp_addr
= (u64
)(page_address(frag
->page
)
1396 + frag
->page_offset
);
1397 sgentry
->vaddr
= tmp_addr
;
1398 swqe
->descriptors
++;
1403 static int ehea_broadcast_reg_helper(struct ehea_port
*port
, u32 hcallid
)
1409 /* De/Register untagged packets */
1410 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_UNTAGGED
;
1411 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1412 port
->logical_port_id
,
1413 reg_type
, port
->mac_addr
, 0, hcallid
);
1414 if (hret
!= H_SUCCESS
) {
1415 ehea_error("reg_dereg_bcmc failed (tagged)");
1420 /* De/Register VLAN packets */
1421 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_VLANID_ALL
;
1422 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1423 port
->logical_port_id
,
1424 reg_type
, port
->mac_addr
, 0, hcallid
);
1425 if (hret
!= H_SUCCESS
) {
1426 ehea_error("reg_dereg_bcmc failed (vlan)");
1433 static int ehea_set_mac_addr(struct net_device
*dev
, void *sa
)
1435 struct ehea_port
*port
= netdev_priv(dev
);
1436 struct sockaddr
*mac_addr
= sa
;
1437 struct hcp_ehea_port_cb0
*cb0
;
1441 if (!is_valid_ether_addr(mac_addr
->sa_data
)) {
1442 ret
= -EADDRNOTAVAIL
;
1446 cb0
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1448 ehea_error("no mem for cb0");
1453 memcpy(&(cb0
->port_mac_addr
), &(mac_addr
->sa_data
[0]), ETH_ALEN
);
1455 cb0
->port_mac_addr
= cb0
->port_mac_addr
>> 16;
1457 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1458 port
->logical_port_id
, H_PORT_CB0
,
1459 EHEA_BMASK_SET(H_PORT_CB0_MAC
, 1), cb0
);
1460 if (hret
!= H_SUCCESS
) {
1465 memcpy(dev
->dev_addr
, mac_addr
->sa_data
, dev
->addr_len
);
1467 /* Deregister old MAC in pHYP */
1468 ret
= ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
1472 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
1474 /* Register new MAC in pHYP */
1475 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
1486 static void ehea_promiscuous_error(u64 hret
, int enable
)
1488 ehea_info("Hypervisor denied %sabling promiscuous mode.%s",
1489 enable
== 1 ? "en" : "dis",
1490 hret
!= H_AUTHORITY
? "" : " Another partition owning a "
1491 "logical port on the same physical port might have altered "
1492 "promiscuous mode first.");
1495 static void ehea_promiscuous(struct net_device
*dev
, int enable
)
1497 struct ehea_port
*port
= netdev_priv(dev
);
1498 struct hcp_ehea_port_cb7
*cb7
;
1501 if ((enable
&& port
->promisc
) || (!enable
&& !port
->promisc
))
1504 cb7
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
1506 ehea_error("no mem for cb7");
1510 /* Modify Pxs_DUCQPN in CB7 */
1511 cb7
->def_uc_qpn
= enable
== 1 ? port
->port_res
[0].qp
->fw_handle
: 0;
1513 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1514 port
->logical_port_id
,
1515 H_PORT_CB7
, H_PORT_CB7_DUCQPN
, cb7
);
1517 ehea_promiscuous_error(hret
, enable
);
1521 port
->promisc
= enable
;
1527 static u64
ehea_multicast_reg_helper(struct ehea_port
*port
, u64 mc_mac_addr
,
1533 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1534 | EHEA_BCMC_UNTAGGED
;
1536 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1537 port
->logical_port_id
,
1538 reg_type
, mc_mac_addr
, 0, hcallid
);
1542 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1543 | EHEA_BCMC_VLANID_ALL
;
1545 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1546 port
->logical_port_id
,
1547 reg_type
, mc_mac_addr
, 0, hcallid
);
1552 static int ehea_drop_multicast_list(struct net_device
*dev
)
1554 struct ehea_port
*port
= netdev_priv(dev
);
1555 struct ehea_mc_list
*mc_entry
= port
->mc_list
;
1556 struct list_head
*pos
;
1557 struct list_head
*temp
;
1561 list_for_each_safe(pos
, temp
, &(port
->mc_list
->list
)) {
1562 mc_entry
= list_entry(pos
, struct ehea_mc_list
, list
);
1564 hret
= ehea_multicast_reg_helper(port
, mc_entry
->macaddr
,
1567 ehea_error("failed deregistering mcast MAC");
1577 static void ehea_allmulti(struct net_device
*dev
, int enable
)
1579 struct ehea_port
*port
= netdev_priv(dev
);
1582 if (!port
->allmulti
) {
1584 /* Enable ALLMULTI */
1585 ehea_drop_multicast_list(dev
);
1586 hret
= ehea_multicast_reg_helper(port
, 0, H_REG_BCMC
);
1590 ehea_error("failed enabling IFF_ALLMULTI");
1594 /* Disable ALLMULTI */
1595 hret
= ehea_multicast_reg_helper(port
, 0, H_DEREG_BCMC
);
1599 ehea_error("failed disabling IFF_ALLMULTI");
1603 static void ehea_add_multicast_entry(struct ehea_port
* port
, u8
* mc_mac_addr
)
1605 struct ehea_mc_list
*ehea_mcl_entry
;
1608 ehea_mcl_entry
= kzalloc(sizeof(*ehea_mcl_entry
), GFP_ATOMIC
);
1609 if (!ehea_mcl_entry
) {
1610 ehea_error("no mem for mcl_entry");
1614 INIT_LIST_HEAD(&ehea_mcl_entry
->list
);
1616 memcpy(&ehea_mcl_entry
->macaddr
, mc_mac_addr
, ETH_ALEN
);
1618 hret
= ehea_multicast_reg_helper(port
, ehea_mcl_entry
->macaddr
,
1621 list_add(&ehea_mcl_entry
->list
, &port
->mc_list
->list
);
1623 ehea_error("failed registering mcast MAC");
1624 kfree(ehea_mcl_entry
);
1628 static void ehea_set_multicast_list(struct net_device
*dev
)
1630 struct ehea_port
*port
= netdev_priv(dev
);
1631 struct dev_mc_list
*k_mcl_entry
;
1634 if (dev
->flags
& IFF_PROMISC
) {
1635 ehea_promiscuous(dev
, 1);
1638 ehea_promiscuous(dev
, 0);
1640 if (dev
->flags
& IFF_ALLMULTI
) {
1641 ehea_allmulti(dev
, 1);
1644 ehea_allmulti(dev
, 0);
1646 if (dev
->mc_count
) {
1647 ret
= ehea_drop_multicast_list(dev
);
1649 /* Dropping the current multicast list failed.
1650 * Enabling ALL_MULTI is the best we can do.
1652 ehea_allmulti(dev
, 1);
1655 if (dev
->mc_count
> port
->adapter
->max_mc_mac
) {
1656 ehea_info("Mcast registration limit reached (0x%lx). "
1658 port
->adapter
->max_mc_mac
);
1662 for (i
= 0, k_mcl_entry
= dev
->mc_list
;
1664 i
++, k_mcl_entry
= k_mcl_entry
->next
) {
1665 ehea_add_multicast_entry(port
, k_mcl_entry
->dmi_addr
);
1672 static int ehea_change_mtu(struct net_device
*dev
, int new_mtu
)
1674 if ((new_mtu
< 68) || (new_mtu
> EHEA_MAX_PACKET_SIZE
))
1680 static void ehea_xmit2(struct sk_buff
*skb
, struct net_device
*dev
,
1681 struct ehea_swqe
*swqe
, u32 lkey
)
1683 if (skb
->protocol
== htons(ETH_P_IP
)) {
1685 swqe
->tx_control
|= EHEA_SWQE_CRC
1686 | EHEA_SWQE_IP_CHECKSUM
1687 | EHEA_SWQE_TCP_CHECKSUM
1688 | EHEA_SWQE_IMM_DATA_PRESENT
1689 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
1691 write_ip_start_end(swqe
, skb
);
1693 if (skb
->nh
.iph
->protocol
== IPPROTO_UDP
) {
1694 if ((skb
->nh
.iph
->frag_off
& IP_MF
) ||
1695 (skb
->nh
.iph
->frag_off
& IP_OFFSET
))
1696 /* IP fragment, so don't change cs */
1697 swqe
->tx_control
&= ~EHEA_SWQE_TCP_CHECKSUM
;
1699 write_udp_offset_end(swqe
, skb
);
1701 } else if (skb
->nh
.iph
->protocol
== IPPROTO_TCP
) {
1702 write_tcp_offset_end(swqe
, skb
);
1705 /* icmp (big data) and ip segmentation packets (all other ip
1706 packets) do not require any special handling */
1709 /* Other Ethernet Protocol */
1710 swqe
->tx_control
|= EHEA_SWQE_CRC
1711 | EHEA_SWQE_IMM_DATA_PRESENT
1712 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
1715 write_swqe2_data(skb
, dev
, swqe
, lkey
);
1718 static void ehea_xmit3(struct sk_buff
*skb
, struct net_device
*dev
,
1719 struct ehea_swqe
*swqe
)
1721 int nfrags
= skb_shinfo(skb
)->nr_frags
;
1722 u8
*imm_data
= &swqe
->u
.immdata_nodesc
.immediate_data
[0];
1726 if (skb
->protocol
== htons(ETH_P_IP
)) {
1728 write_ip_start_end(swqe
, skb
);
1730 if (skb
->nh
.iph
->protocol
== IPPROTO_TCP
) {
1731 swqe
->tx_control
|= EHEA_SWQE_CRC
1732 | EHEA_SWQE_IP_CHECKSUM
1733 | EHEA_SWQE_TCP_CHECKSUM
1734 | EHEA_SWQE_IMM_DATA_PRESENT
;
1736 write_tcp_offset_end(swqe
, skb
);
1738 } else if (skb
->nh
.iph
->protocol
== IPPROTO_UDP
) {
1739 if ((skb
->nh
.iph
->frag_off
& IP_MF
) ||
1740 (skb
->nh
.iph
->frag_off
& IP_OFFSET
))
1741 /* IP fragment, so don't change cs */
1742 swqe
->tx_control
|= EHEA_SWQE_CRC
1743 | EHEA_SWQE_IMM_DATA_PRESENT
;
1745 swqe
->tx_control
|= EHEA_SWQE_CRC
1746 | EHEA_SWQE_IP_CHECKSUM
1747 | EHEA_SWQE_TCP_CHECKSUM
1748 | EHEA_SWQE_IMM_DATA_PRESENT
;
1750 write_udp_offset_end(swqe
, skb
);
1753 /* icmp (big data) and
1754 ip segmentation packets (all other ip packets) */
1755 swqe
->tx_control
|= EHEA_SWQE_CRC
1756 | EHEA_SWQE_IP_CHECKSUM
1757 | EHEA_SWQE_IMM_DATA_PRESENT
;
1760 /* Other Ethernet Protocol */
1761 swqe
->tx_control
|= EHEA_SWQE_CRC
| EHEA_SWQE_IMM_DATA_PRESENT
;
1763 /* copy (immediate) data */
1765 /* data is in a single piece */
1766 memcpy(imm_data
, skb
->data
, skb
->len
);
1768 /* first copy data from the skb->data buffer ... */
1769 memcpy(imm_data
, skb
->data
, skb
->len
- skb
->data_len
);
1770 imm_data
+= skb
->len
- skb
->data_len
;
1772 /* ... then copy data from the fragments */
1773 for (i
= 0; i
< nfrags
; i
++) {
1774 frag
= &skb_shinfo(skb
)->frags
[i
];
1776 page_address(frag
->page
) + frag
->page_offset
,
1778 imm_data
+= frag
->size
;
1781 swqe
->immediate_data_length
= skb
->len
;
1785 static int ehea_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1787 struct ehea_port
*port
= netdev_priv(dev
);
1788 struct ehea_swqe
*swqe
;
1789 unsigned long flags
;
1792 struct ehea_port_res
*pr
= &port
->port_res
[0];
1794 spin_lock(&pr
->xmit_lock
);
1796 swqe
= ehea_get_swqe(pr
->qp
, &swqe_index
);
1797 memset(swqe
, 0, SWQE_HEADER_SIZE
);
1798 atomic_dec(&pr
->swqe_avail
);
1800 if (skb
->len
<= SWQE3_MAX_IMM
) {
1801 u32 sig_iv
= port
->sig_comp_iv
;
1802 u32 swqe_num
= pr
->swqe_id_counter
;
1803 ehea_xmit3(skb
, dev
, swqe
);
1804 swqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE3_TYPE
)
1805 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, swqe_num
);
1806 if (pr
->swqe_ll_count
>= (sig_iv
- 1)) {
1807 swqe
->wr_id
|= EHEA_BMASK_SET(EHEA_WR_ID_REFILL
,
1809 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
1810 pr
->swqe_ll_count
= 0;
1812 pr
->swqe_ll_count
+= 1;
1815 EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE2_TYPE
)
1816 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, pr
->swqe_id_counter
)
1817 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, pr
->sq_skba
.index
);
1818 pr
->sq_skba
.arr
[pr
->sq_skba
.index
] = skb
;
1820 pr
->sq_skba
.index
++;
1821 pr
->sq_skba
.index
&= (pr
->sq_skba
.len
- 1);
1823 lkey
= pr
->send_mr
.lkey
;
1824 ehea_xmit2(skb
, dev
, swqe
, lkey
);
1826 if (pr
->swqe_count
>= (EHEA_SIG_IV_LONG
- 1)) {
1827 swqe
->wr_id
|= EHEA_BMASK_SET(EHEA_WR_ID_REFILL
,
1829 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
1832 pr
->swqe_count
+= 1;
1834 pr
->swqe_id_counter
+= 1;
1836 if (port
->vgrp
&& vlan_tx_tag_present(skb
)) {
1837 swqe
->tx_control
|= EHEA_SWQE_VLAN_INSERT
;
1838 swqe
->vlan_tag
= vlan_tx_tag_get(skb
);
1841 if (netif_msg_tx_queued(port
)) {
1842 ehea_info("post swqe on QP %d", pr
->qp
->init_attr
.qp_nr
);
1843 ehea_dump(swqe
, 512, "swqe");
1846 ehea_post_swqe(pr
->qp
, swqe
);
1849 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
1850 spin_lock_irqsave(&pr
->netif_queue
, flags
);
1851 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
1852 netif_stop_queue(dev
);
1853 pr
->queue_stopped
= 1;
1855 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
1857 dev
->trans_start
= jiffies
;
1858 spin_unlock(&pr
->xmit_lock
);
1860 return NETDEV_TX_OK
;
1863 static void ehea_vlan_rx_register(struct net_device
*dev
,
1864 struct vlan_group
*grp
)
1866 struct ehea_port
*port
= netdev_priv(dev
);
1867 struct ehea_adapter
*adapter
= port
->adapter
;
1868 struct hcp_ehea_port_cb1
*cb1
;
1873 cb1
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1875 ehea_error("no mem for cb1");
1880 memset(cb1
->vlan_filter
, 0, sizeof(cb1
->vlan_filter
));
1882 memset(cb1
->vlan_filter
, 0xFF, sizeof(cb1
->vlan_filter
));
1884 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
1885 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
1886 if (hret
!= H_SUCCESS
)
1887 ehea_error("modify_ehea_port failed");
1894 static void ehea_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
1896 struct ehea_port
*port
= netdev_priv(dev
);
1897 struct ehea_adapter
*adapter
= port
->adapter
;
1898 struct hcp_ehea_port_cb1
*cb1
;
1902 cb1
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1904 ehea_error("no mem for cb1");
1908 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
1909 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
1910 if (hret
!= H_SUCCESS
) {
1911 ehea_error("query_ehea_port failed");
1916 cb1
->vlan_filter
[index
] |= ((u64
)(1 << (vid
& 0x3F)));
1918 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
1919 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
1920 if (hret
!= H_SUCCESS
)
1921 ehea_error("modify_ehea_port failed");
1927 static void ehea_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
1929 struct ehea_port
*port
= netdev_priv(dev
);
1930 struct ehea_adapter
*adapter
= port
->adapter
;
1931 struct hcp_ehea_port_cb1
*cb1
;
1936 port
->vgrp
->vlan_devices
[vid
] = NULL
;
1938 cb1
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1940 ehea_error("no mem for cb1");
1944 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
1945 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
1946 if (hret
!= H_SUCCESS
) {
1947 ehea_error("query_ehea_port failed");
1952 cb1
->vlan_filter
[index
] &= ~((u64
)(1 << (vid
& 0x3F)));
1954 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
1955 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
1956 if (hret
!= H_SUCCESS
)
1957 ehea_error("modify_ehea_port failed");
1963 int ehea_activate_qp(struct ehea_adapter
*adapter
, struct ehea_qp
*qp
)
1969 struct hcp_modify_qp_cb0
* cb0
;
1971 cb0
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1977 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
1978 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
1979 if (hret
!= H_SUCCESS
) {
1980 ehea_error("query_ehea_qp failed (1)");
1984 cb0
->qp_ctl_reg
= H_QP_CR_STATE_INITIALIZED
;
1985 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
1986 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
1987 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
1988 if (hret
!= H_SUCCESS
) {
1989 ehea_error("modify_ehea_qp failed (1)");
1993 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
1994 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
1995 if (hret
!= H_SUCCESS
) {
1996 ehea_error("query_ehea_qp failed (2)");
2000 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_INITIALIZED
;
2001 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2002 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2003 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2004 if (hret
!= H_SUCCESS
) {
2005 ehea_error("modify_ehea_qp failed (2)");
2009 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2010 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2011 if (hret
!= H_SUCCESS
) {
2012 ehea_error("query_ehea_qp failed (3)");
2016 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_RDY2SND
;
2017 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2018 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2019 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2020 if (hret
!= H_SUCCESS
) {
2021 ehea_error("modify_ehea_qp failed (3)");
2025 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2026 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2027 if (hret
!= H_SUCCESS
) {
2028 ehea_error("query_ehea_qp failed (4)");
2038 static int ehea_port_res_setup(struct ehea_port
*port
, int def_qps
,
2042 struct port_res_cfg pr_cfg
, pr_cfg_small_rx
;
2043 enum ehea_eq_type eq_type
= EHEA_EQ
;
2045 port
->qp_eq
= ehea_create_eq(port
->adapter
, eq_type
,
2046 EHEA_MAX_ENTRIES_EQ
, 1);
2049 ehea_error("ehea_create_eq failed (qp_eq)");
2053 pr_cfg
.max_entries_rcq
= rq1_entries
+ rq2_entries
+ rq3_entries
;
2054 pr_cfg
.max_entries_scq
= sq_entries
;
2055 pr_cfg
.max_entries_sq
= sq_entries
;
2056 pr_cfg
.max_entries_rq1
= rq1_entries
;
2057 pr_cfg
.max_entries_rq2
= rq2_entries
;
2058 pr_cfg
.max_entries_rq3
= rq3_entries
;
2060 pr_cfg_small_rx
.max_entries_rcq
= 1;
2061 pr_cfg_small_rx
.max_entries_scq
= sq_entries
;
2062 pr_cfg_small_rx
.max_entries_sq
= sq_entries
;
2063 pr_cfg_small_rx
.max_entries_rq1
= 1;
2064 pr_cfg_small_rx
.max_entries_rq2
= 1;
2065 pr_cfg_small_rx
.max_entries_rq3
= 1;
2067 for (i
= 0; i
< def_qps
; i
++) {
2068 ret
= ehea_init_port_res(port
, &port
->port_res
[i
], &pr_cfg
, i
);
2072 for (i
= def_qps
; i
< def_qps
+ add_tx_qps
; i
++) {
2073 ret
= ehea_init_port_res(port
, &port
->port_res
[i
],
2074 &pr_cfg_small_rx
, i
);
2083 ehea_clean_portres(port
, &port
->port_res
[i
]);
2086 ehea_destroy_eq(port
->qp_eq
);
2090 static int ehea_clean_all_portres(struct ehea_port
*port
)
2095 for(i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2096 ret
|= ehea_clean_portres(port
, &port
->port_res
[i
]);
2098 ret
|= ehea_destroy_eq(port
->qp_eq
);
2103 static int ehea_up(struct net_device
*dev
)
2106 struct ehea_port
*port
= netdev_priv(dev
);
2109 if (port
->state
== EHEA_PORT_UP
)
2112 ret
= ehea_port_res_setup(port
, port
->num_def_qps
,
2113 port
->num_add_tx_qps
);
2115 ehea_error("port_res_failed");
2119 /* Set default QP for this port */
2120 ret
= ehea_configure_port(port
);
2122 ehea_error("ehea_configure_port failed. ret:%d", ret
);
2126 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
2129 ehea_error("out_clean_pr");
2132 mac_addr
= (*(u64
*)dev
->dev_addr
) >> 16;
2134 ret
= ehea_reg_interrupts(dev
);
2136 ehea_error("out_dereg_bc");
2140 for(i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
2141 ret
= ehea_activate_qp(port
->adapter
, port
->port_res
[i
].qp
);
2143 ehea_error("activate_qp failed");
2148 for(i
= 0; i
< port
->num_def_qps
; i
++) {
2149 ret
= ehea_fill_port_res(&port
->port_res
[i
]);
2151 ehea_error("out_free_irqs");
2157 port
->state
= EHEA_PORT_UP
;
2161 ehea_free_interrupts(dev
);
2164 ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
2167 ehea_clean_all_portres(port
);
2172 static int ehea_open(struct net_device
*dev
)
2175 struct ehea_port
*port
= netdev_priv(dev
);
2177 down(&port
->port_lock
);
2179 if (netif_msg_ifup(port
))
2180 ehea_info("enabling port %s", dev
->name
);
2184 netif_start_queue(dev
);
2186 up(&port
->port_lock
);
2191 static int ehea_down(struct net_device
*dev
)
2194 struct ehea_port
*port
= netdev_priv(dev
);
2196 if (port
->state
== EHEA_PORT_DOWN
)
2199 ehea_drop_multicast_list(dev
);
2200 ehea_free_interrupts(dev
);
2202 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2203 tasklet_kill(&port
->port_res
[i
].send_comp_task
);
2205 ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
2206 ret
= ehea_clean_all_portres(port
);
2207 port
->state
= EHEA_PORT_DOWN
;
2211 static int ehea_stop(struct net_device
*dev
)
2214 struct ehea_port
*port
= netdev_priv(dev
);
2216 if (netif_msg_ifdown(port
))
2217 ehea_info("disabling port %s", dev
->name
);
2219 flush_workqueue(port
->adapter
->ehea_wq
);
2220 down(&port
->port_lock
);
2221 netif_stop_queue(dev
);
2222 ret
= ehea_down(dev
);
2223 up(&port
->port_lock
);
2227 static void ehea_reset_port(void *data
)
2230 struct net_device
*dev
= data
;
2231 struct ehea_port
*port
= netdev_priv(dev
);
2234 down(&port
->port_lock
);
2235 netif_stop_queue(dev
);
2236 netif_poll_disable(dev
);
2238 ret
= ehea_down(dev
);
2240 ehea_error("ehea_down failed. not all resources are freed");
2244 ehea_error("Reset device %s failed: ret=%d", dev
->name
, ret
);
2248 if (netif_msg_timer(port
))
2249 ehea_info("Device %s resetted successfully", dev
->name
);
2251 netif_poll_enable(dev
);
2252 netif_wake_queue(dev
);
2254 up(&port
->port_lock
);
2258 static void ehea_tx_watchdog(struct net_device
*dev
)
2260 struct ehea_port
*port
= netdev_priv(dev
);
2262 if (netif_carrier_ok(dev
))
2263 queue_work(port
->adapter
->ehea_wq
, &port
->reset_task
);
2266 int ehea_sense_adapter_attr(struct ehea_adapter
*adapter
)
2268 struct hcp_query_ehea
*cb
;
2272 cb
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2278 hret
= ehea_h_query_ehea(adapter
->handle
, cb
);
2280 if (hret
!= H_SUCCESS
) {
2285 adapter
->num_ports
= cb
->num_ports
;
2286 adapter
->max_mc_mac
= cb
->max_mc_mac
- 1;
2295 static int ehea_setup_single_port(struct ehea_port
*port
,
2296 struct device_node
*dn
)
2300 struct net_device
*dev
= port
->netdev
;
2301 struct ehea_adapter
*adapter
= port
->adapter
;
2302 struct hcp_ehea_port_cb4
*cb4
;
2303 u32
*dn_log_port_id
;
2305 sema_init(&port
->port_lock
, 1);
2306 port
->state
= EHEA_PORT_DOWN
;
2307 port
->sig_comp_iv
= sq_entries
/ 10;
2310 ehea_error("bad device node: dn=%p", dn
);
2315 port
->of_dev_node
= dn
;
2317 /* Determine logical port id */
2318 dn_log_port_id
= (u32
*)get_property(dn
, "ibm,hea-port-no", NULL
);
2320 if (!dn_log_port_id
) {
2321 ehea_error("bad device node: dn_log_port_id=%p",
2326 port
->logical_port_id
= *dn_log_port_id
;
2328 port
->mc_list
= kzalloc(sizeof(struct ehea_mc_list
), GFP_KERNEL
);
2329 if (!port
->mc_list
) {
2334 INIT_LIST_HEAD(&port
->mc_list
->list
);
2336 ehea_set_portspeed(port
, EHEA_SPEED_AUTONEG
);
2338 ret
= ehea_sense_port_attr(port
);
2342 /* Enable Jumbo frames */
2343 cb4
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2345 ehea_error("no mem for cb4");
2347 cb4
->jumbo_frame
= 1;
2348 hret
= ehea_h_modify_ehea_port(adapter
->handle
,
2349 port
->logical_port_id
,
2350 H_PORT_CB4
, H_PORT_CB4_JUMBO
,
2352 if (hret
!= H_SUCCESS
) {
2353 ehea_info("Jumbo frames not activated");
2358 /* initialize net_device structure */
2359 SET_MODULE_OWNER(dev
);
2361 memcpy(dev
->dev_addr
, &port
->mac_addr
, ETH_ALEN
);
2363 dev
->open
= ehea_open
;
2364 dev
->poll
= ehea_poll
;
2366 dev
->stop
= ehea_stop
;
2367 dev
->hard_start_xmit
= ehea_start_xmit
;
2368 dev
->get_stats
= ehea_get_stats
;
2369 dev
->set_multicast_list
= ehea_set_multicast_list
;
2370 dev
->set_mac_address
= ehea_set_mac_addr
;
2371 dev
->change_mtu
= ehea_change_mtu
;
2372 dev
->vlan_rx_register
= ehea_vlan_rx_register
;
2373 dev
->vlan_rx_add_vid
= ehea_vlan_rx_add_vid
;
2374 dev
->vlan_rx_kill_vid
= ehea_vlan_rx_kill_vid
;
2375 dev
->features
= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_TSO
2376 | NETIF_F_HIGHDMA
| NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_TX
2377 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
2379 dev
->tx_timeout
= &ehea_tx_watchdog
;
2380 dev
->watchdog_timeo
= EHEA_WATCH_DOG_TIMEOUT
;
2382 INIT_WORK(&port
->reset_task
, ehea_reset_port
, dev
);
2384 ehea_set_ethtool_ops(dev
);
2386 ret
= register_netdev(dev
);
2388 ehea_error("register_netdev failed. ret=%d", ret
);
2397 kfree(port
->mc_list
);
2402 static int ehea_setup_ports(struct ehea_adapter
*adapter
)
2405 int port_setup_ok
= 0;
2406 struct ehea_port
*port
;
2407 struct device_node
*dn
= NULL
;
2408 struct net_device
*dev
;
2411 /* get port properties for all ports */
2412 for (i
= 0; i
< adapter
->num_ports
; i
++) {
2414 if (adapter
->port
[i
])
2415 continue; /* port already up and running */
2417 /* allocate memory for the port structures */
2418 dev
= alloc_etherdev(sizeof(struct ehea_port
));
2421 ehea_error("no mem for net_device");
2425 port
= netdev_priv(dev
);
2426 port
->adapter
= adapter
;
2428 adapter
->port
[i
] = port
;
2429 port
->msg_enable
= netif_msg_init(msg_level
, EHEA_MSG_DEFAULT
);
2431 dn
= of_find_node_by_name(dn
, "ethernet");
2432 ret
= ehea_setup_single_port(port
, dn
);
2434 /* Free mem for this port struct. The others will be
2435 processed on rollback */
2437 adapter
->port
[i
] = NULL
;
2438 ehea_error("eHEA port %d setup failed, ret=%d", i
, ret
);
2444 /* Check for succesfully set up ports */
2445 for (i
= 0; i
< adapter
->num_ports
; i
++)
2446 if (adapter
->port
[i
])
2450 ret
= 0; /* At least some ports are setup correctly */
2457 static int __devinit
ehea_probe(struct ibmebus_dev
*dev
,
2458 const struct of_device_id
*id
)
2460 struct ehea_adapter
*adapter
;
2461 u64
*adapter_handle
;
2464 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
2467 dev_err(&dev
->ofdev
.dev
, "no mem for ehea_adapter\n");
2471 adapter_handle
= (u64
*)get_property(dev
->ofdev
.node
, "ibm,hea-handle",
2473 if (!adapter_handle
) {
2474 dev_err(&dev
->ofdev
.dev
, "failed getting handle for adapter"
2475 " '%s'\n", dev
->ofdev
.node
->full_name
);
2480 adapter
->handle
= *adapter_handle
;
2481 adapter
->pd
= EHEA_PD_ID
;
2483 dev
->ofdev
.dev
.driver_data
= adapter
;
2485 ret
= ehea_reg_mr_adapter(adapter
);
2487 dev_err(&dev
->ofdev
.dev
, "reg_mr_adapter failed\n");
2491 /* initialize adapter and ports */
2492 /* get adapter properties */
2493 ret
= ehea_sense_adapter_attr(adapter
);
2495 dev_err(&dev
->ofdev
.dev
, "sense_adapter_attr failed: %d", ret
);
2498 dev_info(&dev
->ofdev
.dev
, "%d eHEA ports found\n", adapter
->num_ports
);
2500 adapter
->neq
= ehea_create_eq(adapter
,
2501 EHEA_NEQ
, EHEA_MAX_ENTRIES_EQ
, 1);
2502 if (!adapter
->neq
) {
2503 dev_err(&dev
->ofdev
.dev
, "NEQ creation failed");
2507 tasklet_init(&adapter
->neq_tasklet
, ehea_neq_tasklet
,
2508 (unsigned long)adapter
);
2510 ret
= ibmebus_request_irq(NULL
, adapter
->neq
->attr
.ist1
,
2511 ehea_interrupt_neq
, SA_INTERRUPT
,
2512 "ehea_neq", adapter
);
2514 dev_err(&dev
->ofdev
.dev
, "requesting NEQ IRQ failed");
2518 adapter
->ehea_wq
= create_workqueue("ehea_wq");
2519 if (!adapter
->ehea_wq
)
2522 ret
= ehea_setup_ports(adapter
);
2524 dev_err(&dev
->ofdev
.dev
, "setup_ports failed");
2532 destroy_workqueue(adapter
->ehea_wq
);
2535 ibmebus_free_irq(NULL
, adapter
->neq
->attr
.ist1
, adapter
);
2538 ehea_destroy_eq(adapter
->neq
);
2541 ehea_h_free_resource(adapter
->handle
, adapter
->mr
.handle
);
2549 static void ehea_shutdown_single_port(struct ehea_port
*port
)
2551 unregister_netdev(port
->netdev
);
2552 kfree(port
->mc_list
);
2553 free_netdev(port
->netdev
);
2556 static int __devexit
ehea_remove(struct ibmebus_dev
*dev
)
2558 struct ehea_adapter
*adapter
= dev
->ofdev
.dev
.driver_data
;
2562 for (i
= 0; i
< adapter
->num_ports
; i
++)
2563 if (adapter
->port
[i
]) {
2564 ehea_shutdown_single_port(adapter
->port
[i
]);
2565 adapter
->port
[i
] = NULL
;
2567 destroy_workqueue(adapter
->ehea_wq
);
2569 ibmebus_free_irq(NULL
, adapter
->neq
->attr
.ist1
, adapter
);
2571 ehea_destroy_eq(adapter
->neq
);
2573 hret
= ehea_h_free_resource(adapter
->handle
, adapter
->mr
.handle
);
2575 dev_err(&dev
->ofdev
.dev
, "free_resource_mr failed");
2582 static int check_module_parm(void)
2586 if ((rq1_entries
< EHEA_MIN_ENTRIES_QP
) ||
2587 (rq1_entries
> EHEA_MAX_ENTRIES_RQ1
)) {
2588 ehea_info("Bad parameter: rq1_entries");
2591 if ((rq2_entries
< EHEA_MIN_ENTRIES_QP
) ||
2592 (rq2_entries
> EHEA_MAX_ENTRIES_RQ2
)) {
2593 ehea_info("Bad parameter: rq2_entries");
2596 if ((rq3_entries
< EHEA_MIN_ENTRIES_QP
) ||
2597 (rq3_entries
> EHEA_MAX_ENTRIES_RQ3
)) {
2598 ehea_info("Bad parameter: rq3_entries");
2601 if ((sq_entries
< EHEA_MIN_ENTRIES_QP
) ||
2602 (sq_entries
> EHEA_MAX_ENTRIES_SQ
)) {
2603 ehea_info("Bad parameter: sq_entries");
2610 static struct of_device_id ehea_device_table
[] = {
2613 .compatible
= "IBM,lhea",
2618 static struct ibmebus_driver ehea_driver
= {
2620 .id_table
= ehea_device_table
,
2621 .probe
= ehea_probe
,
2622 .remove
= ehea_remove
,
2625 int __init
ehea_module_init(void)
2629 printk(KERN_INFO
"IBM eHEA ethernet device driver (Release %s)\n",
2632 ret
= check_module_parm();
2635 ret
= ibmebus_register_driver(&ehea_driver
);
2637 ehea_error("failed registering eHEA device driver on ebus");
2643 static void __exit
ehea_module_exit(void)
2645 ibmebus_unregister_driver(&ehea_driver
);
2648 module_init(ehea_module_init
);
2649 module_exit(ehea_module_exit
);