2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/if_ether.h>
36 #include <linux/notifier.h>
37 #include <linux/reboot.h>
43 #include "ehea_phyp.h"
46 MODULE_LICENSE("GPL");
47 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
48 MODULE_DESCRIPTION("IBM eServer HEA Driver");
49 MODULE_VERSION(DRV_VERSION
);
52 static int msg_level
= -1;
53 static int rq1_entries
= EHEA_DEF_ENTRIES_RQ1
;
54 static int rq2_entries
= EHEA_DEF_ENTRIES_RQ2
;
55 static int rq3_entries
= EHEA_DEF_ENTRIES_RQ3
;
56 static int sq_entries
= EHEA_DEF_ENTRIES_SQ
;
57 static int use_mcs
= 0;
58 static int use_lro
= 0;
59 static int lro_max_aggr
= EHEA_LRO_MAX_AGGR
;
60 static int num_tx_qps
= EHEA_NUM_TX_QP
;
61 static int prop_carrier_state
= 0;
63 module_param(msg_level
, int, 0);
64 module_param(rq1_entries
, int, 0);
65 module_param(rq2_entries
, int, 0);
66 module_param(rq3_entries
, int, 0);
67 module_param(sq_entries
, int, 0);
68 module_param(prop_carrier_state
, int, 0);
69 module_param(use_mcs
, int, 0);
70 module_param(use_lro
, int, 0);
71 module_param(lro_max_aggr
, int, 0);
72 module_param(num_tx_qps
, int, 0);
74 MODULE_PARM_DESC(num_tx_qps
, "Number of TX-QPS");
75 MODULE_PARM_DESC(msg_level
, "msg_level");
76 MODULE_PARM_DESC(prop_carrier_state
, "Propagate carrier state of physical "
77 "port to stack. 1:yes, 0:no. Default = 0 ");
78 MODULE_PARM_DESC(rq3_entries
, "Number of entries for Receive Queue 3 "
79 "[2^x - 1], x = [6..14]. Default = "
80 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3
) ")");
81 MODULE_PARM_DESC(rq2_entries
, "Number of entries for Receive Queue 2 "
82 "[2^x - 1], x = [6..14]. Default = "
83 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2
) ")");
84 MODULE_PARM_DESC(rq1_entries
, "Number of entries for Receive Queue 1 "
85 "[2^x - 1], x = [6..14]. Default = "
86 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1
) ")");
87 MODULE_PARM_DESC(sq_entries
, " Number of entries for the Send Queue "
88 "[2^x - 1], x = [6..14]. Default = "
89 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ
) ")");
90 MODULE_PARM_DESC(use_mcs
, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
92 MODULE_PARM_DESC(lro_max_aggr
, " LRO: Max packets to be aggregated. Default = "
93 __MODULE_STRING(EHEA_LRO_MAX_AGGR
));
94 MODULE_PARM_DESC(use_lro
, " Large Receive Offload, 1: enable, 0: disable, "
97 static int port_name_cnt
= 0;
98 static LIST_HEAD(adapter_list
);
99 u64 ehea_driver_flags
= 0;
100 struct work_struct ehea_rereg_mr_task
;
102 struct semaphore dlpar_mem_lock
;
104 static int __devinit
ehea_probe_adapter(struct of_device
*dev
,
105 const struct of_device_id
*id
);
107 static int __devexit
ehea_remove(struct of_device
*dev
);
109 static struct of_device_id ehea_device_table
[] = {
112 .compatible
= "IBM,lhea",
117 static struct of_platform_driver ehea_driver
= {
119 .match_table
= ehea_device_table
,
120 .probe
= ehea_probe_adapter
,
121 .remove
= ehea_remove
,
124 void ehea_dump(void *adr
, int len
, char *msg
) {
126 unsigned char *deb
= adr
;
127 for (x
= 0; x
< len
; x
+= 16) {
128 printk(DRV_NAME
" %s adr=%p ofs=%04x %016lx %016lx\n", msg
,
129 deb
, x
, *((u64
*)&deb
[0]), *((u64
*)&deb
[8]));
134 static struct net_device_stats
*ehea_get_stats(struct net_device
*dev
)
136 struct ehea_port
*port
= netdev_priv(dev
);
137 struct net_device_stats
*stats
= &port
->stats
;
138 struct hcp_ehea_port_cb2
*cb2
;
139 u64 hret
, rx_packets
, tx_packets
;
142 memset(stats
, 0, sizeof(*stats
));
144 cb2
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
146 ehea_error("no mem for cb2");
150 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
151 port
->logical_port_id
,
152 H_PORT_CB2
, H_PORT_CB2_ALL
, cb2
);
153 if (hret
!= H_SUCCESS
) {
154 ehea_error("query_ehea_port failed");
158 if (netif_msg_hw(port
))
159 ehea_dump(cb2
, sizeof(*cb2
), "net_device_stats");
162 for (i
= 0; i
< port
->num_def_qps
; i
++)
163 rx_packets
+= port
->port_res
[i
].rx_packets
;
166 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
167 tx_packets
+= port
->port_res
[i
].tx_packets
;
169 stats
->tx_packets
= tx_packets
;
170 stats
->multicast
= cb2
->rxmcp
;
171 stats
->rx_errors
= cb2
->rxuerr
;
172 stats
->rx_bytes
= cb2
->rxo
;
173 stats
->tx_bytes
= cb2
->txo
;
174 stats
->rx_packets
= rx_packets
;
182 static void ehea_refill_rq1(struct ehea_port_res
*pr
, int index
, int nr_of_wqes
)
184 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
185 struct net_device
*dev
= pr
->port
->netdev
;
186 int max_index_mask
= pr
->rq1_skba
.len
- 1;
187 int fill_wqes
= pr
->rq1_skba
.os_skbs
+ nr_of_wqes
;
191 pr
->rq1_skba
.os_skbs
= 0;
193 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
194 pr
->rq1_skba
.index
= index
;
195 pr
->rq1_skba
.os_skbs
= fill_wqes
;
199 for (i
= 0; i
< fill_wqes
; i
++) {
200 if (!skb_arr_rq1
[index
]) {
201 skb_arr_rq1
[index
] = netdev_alloc_skb(dev
,
203 if (!skb_arr_rq1
[index
]) {
204 pr
->rq1_skba
.os_skbs
= fill_wqes
- i
;
205 ehea_error("%s: no mem for skb/%d wqes filled",
211 index
&= max_index_mask
;
219 ehea_update_rq1a(pr
->qp
, adder
);
222 static int ehea_init_fill_rq1(struct ehea_port_res
*pr
, int nr_rq1a
)
225 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
226 struct net_device
*dev
= pr
->port
->netdev
;
229 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++) {
230 skb_arr_rq1
[i
] = netdev_alloc_skb(dev
, EHEA_L_PKT_SIZE
);
231 if (!skb_arr_rq1
[i
]) {
232 ehea_error("%s: no mem for skb/%d wqes filled",
239 ehea_update_rq1a(pr
->qp
, nr_rq1a
);
244 static int ehea_refill_rq_def(struct ehea_port_res
*pr
,
245 struct ehea_q_skb_arr
*q_skba
, int rq_nr
,
246 int num_wqes
, int wqe_type
, int packet_size
)
248 struct net_device
*dev
= pr
->port
->netdev
;
249 struct ehea_qp
*qp
= pr
->qp
;
250 struct sk_buff
**skb_arr
= q_skba
->arr
;
251 struct ehea_rwqe
*rwqe
;
252 int i
, index
, max_index_mask
, fill_wqes
;
256 fill_wqes
= q_skba
->os_skbs
+ num_wqes
;
259 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
260 q_skba
->os_skbs
= fill_wqes
;
264 index
= q_skba
->index
;
265 max_index_mask
= q_skba
->len
- 1;
266 for (i
= 0; i
< fill_wqes
; i
++) {
268 struct sk_buff
*skb
= netdev_alloc_skb(dev
, packet_size
);
270 ehea_error("%s: no mem for skb/%d wqes filled",
271 pr
->port
->netdev
->name
, i
);
272 q_skba
->os_skbs
= fill_wqes
- i
;
276 skb_reserve(skb
, NET_IP_ALIGN
);
278 skb_arr
[index
] = skb
;
279 tmp_addr
= ehea_map_vaddr(skb
->data
);
280 if (tmp_addr
== -1) {
282 q_skba
->os_skbs
= fill_wqes
- i
;
287 rwqe
= ehea_get_next_rwqe(qp
, rq_nr
);
288 rwqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, wqe_type
)
289 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, index
);
290 rwqe
->sg_list
[0].l_key
= pr
->recv_mr
.lkey
;
291 rwqe
->sg_list
[0].vaddr
= tmp_addr
;
292 rwqe
->sg_list
[0].len
= packet_size
;
293 rwqe
->data_segments
= 1;
296 index
&= max_index_mask
;
300 q_skba
->index
= index
;
307 ehea_update_rq2a(pr
->qp
, adder
);
309 ehea_update_rq3a(pr
->qp
, adder
);
315 static int ehea_refill_rq2(struct ehea_port_res
*pr
, int nr_of_wqes
)
317 return ehea_refill_rq_def(pr
, &pr
->rq2_skba
, 2,
318 nr_of_wqes
, EHEA_RWQE2_TYPE
,
319 EHEA_RQ2_PKT_SIZE
+ NET_IP_ALIGN
);
323 static int ehea_refill_rq3(struct ehea_port_res
*pr
, int nr_of_wqes
)
325 return ehea_refill_rq_def(pr
, &pr
->rq3_skba
, 3,
326 nr_of_wqes
, EHEA_RWQE3_TYPE
,
327 EHEA_MAX_PACKET_SIZE
+ NET_IP_ALIGN
);
330 static inline int ehea_check_cqe(struct ehea_cqe
*cqe
, int *rq_num
)
332 *rq_num
= (cqe
->type
& EHEA_CQE_TYPE_RQ
) >> 5;
333 if ((cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) == 0)
335 if (((cqe
->status
& EHEA_CQE_STAT_ERR_TCP
) != 0) &&
336 (cqe
->header_length
== 0))
341 static inline void ehea_fill_skb(struct net_device
*dev
,
342 struct sk_buff
*skb
, struct ehea_cqe
*cqe
)
344 int length
= cqe
->num_bytes_transfered
- 4; /*remove CRC */
346 skb_put(skb
, length
);
347 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
348 skb
->protocol
= eth_type_trans(skb
, dev
);
351 static inline struct sk_buff
*get_skb_by_index(struct sk_buff
**skb_array
,
353 struct ehea_cqe
*cqe
)
355 int skb_index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
365 prefetchw(pref
+ EHEA_CACHE_LINE
);
367 pref
= (skb_array
[x
]->data
);
369 prefetch(pref
+ EHEA_CACHE_LINE
);
370 prefetch(pref
+ EHEA_CACHE_LINE
* 2);
371 prefetch(pref
+ EHEA_CACHE_LINE
* 3);
372 skb
= skb_array
[skb_index
];
373 skb_array
[skb_index
] = NULL
;
377 static inline struct sk_buff
*get_skb_by_index_ll(struct sk_buff
**skb_array
,
378 int arr_len
, int wqe_index
)
389 prefetchw(pref
+ EHEA_CACHE_LINE
);
391 pref
= (skb_array
[x
]->data
);
393 prefetchw(pref
+ EHEA_CACHE_LINE
);
395 skb
= skb_array
[wqe_index
];
396 skb_array
[wqe_index
] = NULL
;
400 static int ehea_treat_poll_error(struct ehea_port_res
*pr
, int rq
,
401 struct ehea_cqe
*cqe
, int *processed_rq2
,
406 if (cqe
->status
& EHEA_CQE_STAT_ERR_TCP
)
407 pr
->p_stats
.err_tcp_cksum
++;
408 if (cqe
->status
& EHEA_CQE_STAT_ERR_IP
)
409 pr
->p_stats
.err_ip_cksum
++;
410 if (cqe
->status
& EHEA_CQE_STAT_ERR_CRC
)
411 pr
->p_stats
.err_frame_crc
++;
415 skb
= get_skb_by_index(pr
->rq2_skba
.arr
, pr
->rq2_skba
.len
, cqe
);
417 } else if (rq
== 3) {
419 skb
= get_skb_by_index(pr
->rq3_skba
.arr
, pr
->rq3_skba
.len
, cqe
);
423 if (cqe
->status
& EHEA_CQE_STAT_FAT_ERR_MASK
) {
424 if (netif_msg_rx_err(pr
->port
)) {
425 ehea_error("Critical receive error for QP %d. "
426 "Resetting port.", pr
->qp
->init_attr
.qp_nr
);
427 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
429 schedule_work(&pr
->port
->reset_task
);
436 static int get_skb_hdr(struct sk_buff
*skb
, void **iphdr
,
437 void **tcph
, u64
*hdr_flags
, void *priv
)
439 struct ehea_cqe
*cqe
= priv
;
443 /* non tcp/udp packets */
444 if (!cqe
->header_length
)
448 skb_reset_network_header(skb
);
450 if (iph
->protocol
!= IPPROTO_TCP
)
453 ip_len
= ip_hdrlen(skb
);
454 skb_set_transport_header(skb
, ip_len
);
455 *tcph
= tcp_hdr(skb
);
457 /* check if ip header and tcp header are complete */
458 if (iph
->tot_len
< ip_len
+ tcp_hdrlen(skb
))
461 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
467 static void ehea_proc_skb(struct ehea_port_res
*pr
, struct ehea_cqe
*cqe
,
470 int vlan_extracted
= (cqe
->status
& EHEA_CQE_VLAN_TAG_XTRACT
)
475 lro_vlan_hwaccel_receive_skb(&pr
->lro_mgr
, skb
,
480 lro_receive_skb(&pr
->lro_mgr
, skb
, cqe
);
483 vlan_hwaccel_receive_skb(skb
, pr
->port
->vgrp
,
486 netif_receive_skb(skb
);
490 static int ehea_proc_rwqes(struct net_device
*dev
,
491 struct ehea_port_res
*pr
,
494 struct ehea_port
*port
= pr
->port
;
495 struct ehea_qp
*qp
= pr
->qp
;
496 struct ehea_cqe
*cqe
;
498 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
499 struct sk_buff
**skb_arr_rq2
= pr
->rq2_skba
.arr
;
500 struct sk_buff
**skb_arr_rq3
= pr
->rq3_skba
.arr
;
501 int skb_arr_rq1_len
= pr
->rq1_skba
.len
;
502 int skb_arr_rq2_len
= pr
->rq2_skba
.len
;
503 int skb_arr_rq3_len
= pr
->rq3_skba
.len
;
504 int processed
, processed_rq1
, processed_rq2
, processed_rq3
;
505 int wqe_index
, last_wqe_index
, rq
, port_reset
;
507 processed
= processed_rq1
= processed_rq2
= processed_rq3
= 0;
510 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
511 while ((processed
< budget
) && cqe
) {
515 if (netif_msg_rx_status(port
))
516 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
518 last_wqe_index
= wqe_index
;
520 if (!ehea_check_cqe(cqe
, &rq
)) {
521 if (rq
== 1) { /* LL RQ1 */
522 skb
= get_skb_by_index_ll(skb_arr_rq1
,
525 if (unlikely(!skb
)) {
526 if (netif_msg_rx_err(port
))
527 ehea_error("LL rq1: skb=NULL");
529 skb
= netdev_alloc_skb(dev
,
534 skb_copy_to_linear_data(skb
, ((char*)cqe
) + 64,
535 cqe
->num_bytes_transfered
- 4);
536 ehea_fill_skb(dev
, skb
, cqe
);
537 } else if (rq
== 2) { /* RQ2 */
538 skb
= get_skb_by_index(skb_arr_rq2
,
539 skb_arr_rq2_len
, cqe
);
540 if (unlikely(!skb
)) {
541 if (netif_msg_rx_err(port
))
542 ehea_error("rq2: skb=NULL");
545 ehea_fill_skb(dev
, skb
, cqe
);
548 skb
= get_skb_by_index(skb_arr_rq3
,
549 skb_arr_rq3_len
, cqe
);
550 if (unlikely(!skb
)) {
551 if (netif_msg_rx_err(port
))
552 ehea_error("rq3: skb=NULL");
555 ehea_fill_skb(dev
, skb
, cqe
);
559 ehea_proc_skb(pr
, cqe
, skb
);
560 dev
->last_rx
= jiffies
;
562 pr
->p_stats
.poll_receive_errors
++;
563 port_reset
= ehea_treat_poll_error(pr
, rq
, cqe
,
569 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
572 lro_flush_all(&pr
->lro_mgr
);
574 pr
->rx_packets
+= processed
;
576 ehea_refill_rq1(pr
, last_wqe_index
, processed_rq1
);
577 ehea_refill_rq2(pr
, processed_rq2
);
578 ehea_refill_rq3(pr
, processed_rq3
);
583 static struct ehea_cqe
*ehea_proc_cqes(struct ehea_port_res
*pr
, int my_quota
)
586 struct ehea_cq
*send_cq
= pr
->send_cq
;
587 struct ehea_cqe
*cqe
;
588 int quota
= my_quota
;
594 cqe
= ehea_poll_cq(send_cq
);
595 while(cqe
&& (quota
> 0)) {
596 ehea_inc_cq(send_cq
);
600 if (cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) {
601 ehea_error("Send Completion Error: Resetting port");
602 if (netif_msg_tx_err(pr
->port
))
603 ehea_dump(cqe
, sizeof(*cqe
), "Send CQE");
604 schedule_work(&pr
->port
->reset_task
);
608 if (netif_msg_tx_done(pr
->port
))
609 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
611 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE
, cqe
->wr_id
)
612 == EHEA_SWQE2_TYPE
)) {
614 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
615 skb
= pr
->sq_skba
.arr
[index
];
617 pr
->sq_skba
.arr
[index
] = NULL
;
620 swqe_av
+= EHEA_BMASK_GET(EHEA_WR_ID_REFILL
, cqe
->wr_id
);
623 cqe
= ehea_poll_cq(send_cq
);
626 ehea_update_feca(send_cq
, cqe_counter
);
627 atomic_add(swqe_av
, &pr
->swqe_avail
);
629 spin_lock_irqsave(&pr
->netif_queue
, flags
);
631 if (pr
->queue_stopped
&& (atomic_read(&pr
->swqe_avail
)
632 >= pr
->swqe_refill_th
)) {
633 netif_wake_queue(pr
->port
->netdev
);
634 pr
->queue_stopped
= 0;
636 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
641 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
642 #define EHEA_POLL_MAX_CQES 65535
644 static int ehea_poll(struct napi_struct
*napi
, int budget
)
646 struct ehea_port_res
*pr
= container_of(napi
, struct ehea_port_res
, napi
);
647 struct net_device
*dev
= pr
->port
->netdev
;
648 struct ehea_cqe
*cqe
;
649 struct ehea_cqe
*cqe_skb
= NULL
;
650 int force_irq
, wqe_index
;
653 force_irq
= (pr
->poll_counter
> EHEA_NAPI_POLL_NUM_BEFORE_IRQ
);
654 cqe_skb
= ehea_proc_cqes(pr
, EHEA_POLL_MAX_CQES
);
657 rx
+= ehea_proc_rwqes(dev
, pr
, budget
- rx
);
659 while ((rx
!= budget
) || force_irq
) {
660 pr
->poll_counter
= 0;
662 netif_rx_complete(dev
, napi
);
663 ehea_reset_cq_ep(pr
->recv_cq
);
664 ehea_reset_cq_ep(pr
->send_cq
);
665 ehea_reset_cq_n1(pr
->recv_cq
);
666 ehea_reset_cq_n1(pr
->send_cq
);
667 cqe
= ehea_poll_rq1(pr
->qp
, &wqe_index
);
668 cqe_skb
= ehea_poll_cq(pr
->send_cq
);
670 if (!cqe
&& !cqe_skb
)
673 if (!netif_rx_reschedule(dev
, napi
))
676 cqe_skb
= ehea_proc_cqes(pr
, EHEA_POLL_MAX_CQES
);
677 rx
+= ehea_proc_rwqes(dev
, pr
, budget
- rx
);
684 #ifdef CONFIG_NET_POLL_CONTROLLER
685 static void ehea_netpoll(struct net_device
*dev
)
687 struct ehea_port
*port
= netdev_priv(dev
);
690 for (i
= 0; i
< port
->num_def_qps
; i
++)
691 netif_rx_schedule(dev
, &port
->port_res
[i
].napi
);
695 static irqreturn_t
ehea_recv_irq_handler(int irq
, void *param
)
697 struct ehea_port_res
*pr
= param
;
699 netif_rx_schedule(pr
->port
->netdev
, &pr
->napi
);
704 static irqreturn_t
ehea_qp_aff_irq_handler(int irq
, void *param
)
706 struct ehea_port
*port
= param
;
707 struct ehea_eqe
*eqe
;
711 eqe
= ehea_poll_eq(port
->qp_eq
);
714 qp_token
= EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN
, eqe
->entry
);
715 ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
716 eqe
->entry
, qp_token
);
718 qp
= port
->port_res
[qp_token
].qp
;
719 ehea_error_data(port
->adapter
, qp
->fw_handle
);
720 eqe
= ehea_poll_eq(port
->qp_eq
);
723 schedule_work(&port
->reset_task
);
728 static struct ehea_port
*ehea_get_port(struct ehea_adapter
*adapter
,
733 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
734 if (adapter
->port
[i
])
735 if (adapter
->port
[i
]->logical_port_id
== logical_port
)
736 return adapter
->port
[i
];
740 int ehea_sense_port_attr(struct ehea_port
*port
)
744 struct hcp_ehea_port_cb0
*cb0
;
746 cb0
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
); /* May be called via */
747 if (!cb0
) { /* ehea_neq_tasklet() */
748 ehea_error("no mem for cb0");
753 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
754 port
->logical_port_id
, H_PORT_CB0
,
755 EHEA_BMASK_SET(H_PORT_CB0_ALL
, 0xFFFF),
757 if (hret
!= H_SUCCESS
) {
763 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
765 if (!is_valid_ether_addr((u8
*)&port
->mac_addr
)) {
766 ret
= -EADDRNOTAVAIL
;
771 switch (cb0
->port_speed
) {
773 port
->port_speed
= EHEA_SPEED_10M
;
774 port
->full_duplex
= 0;
777 port
->port_speed
= EHEA_SPEED_10M
;
778 port
->full_duplex
= 1;
781 port
->port_speed
= EHEA_SPEED_100M
;
782 port
->full_duplex
= 0;
785 port
->port_speed
= EHEA_SPEED_100M
;
786 port
->full_duplex
= 1;
789 port
->port_speed
= EHEA_SPEED_1G
;
790 port
->full_duplex
= 1;
793 port
->port_speed
= EHEA_SPEED_10G
;
794 port
->full_duplex
= 1;
797 port
->port_speed
= 0;
798 port
->full_duplex
= 0;
803 port
->num_mcs
= cb0
->num_default_qps
;
805 /* Number of default QPs */
807 port
->num_def_qps
= cb0
->num_default_qps
;
809 port
->num_def_qps
= 1;
811 if (!port
->num_def_qps
) {
816 port
->num_tx_qps
= num_tx_qps
;
818 if (port
->num_def_qps
>= port
->num_tx_qps
)
819 port
->num_add_tx_qps
= 0;
821 port
->num_add_tx_qps
= port
->num_tx_qps
- port
->num_def_qps
;
825 if (ret
|| netif_msg_probe(port
))
826 ehea_dump(cb0
, sizeof(*cb0
), "ehea_sense_port_attr");
832 int ehea_set_portspeed(struct ehea_port
*port
, u32 port_speed
)
834 struct hcp_ehea_port_cb4
*cb4
;
838 cb4
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
840 ehea_error("no mem for cb4");
845 cb4
->port_speed
= port_speed
;
847 netif_carrier_off(port
->netdev
);
849 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
850 port
->logical_port_id
,
851 H_PORT_CB4
, H_PORT_CB4_SPEED
, cb4
);
852 if (hret
== H_SUCCESS
) {
853 port
->autoneg
= port_speed
== EHEA_SPEED_AUTONEG
? 1 : 0;
855 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
856 port
->logical_port_id
,
857 H_PORT_CB4
, H_PORT_CB4_SPEED
,
859 if (hret
== H_SUCCESS
) {
860 switch (cb4
->port_speed
) {
862 port
->port_speed
= EHEA_SPEED_10M
;
863 port
->full_duplex
= 0;
866 port
->port_speed
= EHEA_SPEED_10M
;
867 port
->full_duplex
= 1;
870 port
->port_speed
= EHEA_SPEED_100M
;
871 port
->full_duplex
= 0;
874 port
->port_speed
= EHEA_SPEED_100M
;
875 port
->full_duplex
= 1;
878 port
->port_speed
= EHEA_SPEED_1G
;
879 port
->full_duplex
= 1;
882 port
->port_speed
= EHEA_SPEED_10G
;
883 port
->full_duplex
= 1;
886 port
->port_speed
= 0;
887 port
->full_duplex
= 0;
891 ehea_error("Failed sensing port speed");
895 if (hret
== H_AUTHORITY
) {
896 ehea_info("Hypervisor denied setting port speed");
900 ehea_error("Failed setting port speed");
903 if (!prop_carrier_state
|| (port
->phy_link
== EHEA_PHY_LINK_UP
))
904 netif_carrier_on(port
->netdev
);
911 static void ehea_parse_eqe(struct ehea_adapter
*adapter
, u64 eqe
)
916 struct ehea_port
*port
;
918 ec
= EHEA_BMASK_GET(NEQE_EVENT_CODE
, eqe
);
919 portnum
= EHEA_BMASK_GET(NEQE_PORTNUM
, eqe
);
920 port
= ehea_get_port(adapter
, portnum
);
923 case EHEA_EC_PORTSTATE_CHG
: /* port state change */
926 ehea_error("unknown portnum %x", portnum
);
930 if (EHEA_BMASK_GET(NEQE_PORT_UP
, eqe
)) {
931 if (!netif_carrier_ok(port
->netdev
)) {
932 ret
= ehea_sense_port_attr(port
);
934 ehea_error("failed resensing port "
939 if (netif_msg_link(port
))
940 ehea_info("%s: Logical port up: %dMbps "
945 1 ? "Full" : "Half");
947 netif_carrier_on(port
->netdev
);
948 netif_wake_queue(port
->netdev
);
951 if (netif_carrier_ok(port
->netdev
)) {
952 if (netif_msg_link(port
))
953 ehea_info("%s: Logical port down",
955 netif_carrier_off(port
->netdev
);
956 netif_stop_queue(port
->netdev
);
959 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP
, eqe
)) {
960 port
->phy_link
= EHEA_PHY_LINK_UP
;
961 if (netif_msg_link(port
))
962 ehea_info("%s: Physical port up",
964 if (prop_carrier_state
)
965 netif_carrier_on(port
->netdev
);
967 port
->phy_link
= EHEA_PHY_LINK_DOWN
;
968 if (netif_msg_link(port
))
969 ehea_info("%s: Physical port down",
971 if (prop_carrier_state
)
972 netif_carrier_off(port
->netdev
);
975 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY
, eqe
))
976 ehea_info("External switch port is primary port");
978 ehea_info("External switch port is backup port");
981 case EHEA_EC_ADAPTER_MALFUNC
:
982 ehea_error("Adapter malfunction");
984 case EHEA_EC_PORT_MALFUNC
:
985 ehea_info("Port malfunction: Device: %s", port
->netdev
->name
);
986 netif_carrier_off(port
->netdev
);
987 netif_stop_queue(port
->netdev
);
990 ehea_error("unknown event code %x, eqe=0x%lX", ec
, eqe
);
995 static void ehea_neq_tasklet(unsigned long data
)
997 struct ehea_adapter
*adapter
= (struct ehea_adapter
*)data
;
998 struct ehea_eqe
*eqe
;
1001 eqe
= ehea_poll_eq(adapter
->neq
);
1002 ehea_debug("eqe=%p", eqe
);
1005 ehea_debug("*eqe=%lx", eqe
->entry
);
1006 ehea_parse_eqe(adapter
, eqe
->entry
);
1007 eqe
= ehea_poll_eq(adapter
->neq
);
1008 ehea_debug("next eqe=%p", eqe
);
1011 event_mask
= EHEA_BMASK_SET(NELR_PORTSTATE_CHG
, 1)
1012 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC
, 1)
1013 | EHEA_BMASK_SET(NELR_PORT_MALFUNC
, 1);
1015 ehea_h_reset_events(adapter
->handle
,
1016 adapter
->neq
->fw_handle
, event_mask
);
1019 static irqreturn_t
ehea_interrupt_neq(int irq
, void *param
)
1021 struct ehea_adapter
*adapter
= param
;
1022 tasklet_hi_schedule(&adapter
->neq_tasklet
);
1027 static int ehea_fill_port_res(struct ehea_port_res
*pr
)
1030 struct ehea_qp_init_attr
*init_attr
= &pr
->qp
->init_attr
;
1032 ret
= ehea_init_fill_rq1(pr
, init_attr
->act_nr_rwqes_rq1
1033 - init_attr
->act_nr_rwqes_rq2
1034 - init_attr
->act_nr_rwqes_rq3
- 1);
1036 ret
|= ehea_refill_rq2(pr
, init_attr
->act_nr_rwqes_rq2
- 1);
1038 ret
|= ehea_refill_rq3(pr
, init_attr
->act_nr_rwqes_rq3
- 1);
1043 static int ehea_reg_interrupts(struct net_device
*dev
)
1045 struct ehea_port
*port
= netdev_priv(dev
);
1046 struct ehea_port_res
*pr
;
1050 snprintf(port
->int_aff_name
, EHEA_IRQ_NAME_SIZE
- 1, "%s-aff",
1053 ret
= ibmebus_request_irq(port
->qp_eq
->attr
.ist1
,
1054 ehea_qp_aff_irq_handler
,
1055 IRQF_DISABLED
, port
->int_aff_name
, port
);
1057 ehea_error("failed registering irq for qp_aff_irq_handler:"
1058 "ist=%X", port
->qp_eq
->attr
.ist1
);
1062 if (netif_msg_ifup(port
))
1063 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1064 "registered", port
->qp_eq
->attr
.ist1
);
1067 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
1068 pr
= &port
->port_res
[i
];
1069 snprintf(pr
->int_send_name
, EHEA_IRQ_NAME_SIZE
- 1,
1070 "%s-queue%d", dev
->name
, i
);
1071 ret
= ibmebus_request_irq(pr
->eq
->attr
.ist1
,
1072 ehea_recv_irq_handler
,
1073 IRQF_DISABLED
, pr
->int_send_name
,
1076 ehea_error("failed registering irq for ehea_queue "
1077 "port_res_nr:%d, ist=%X", i
,
1081 if (netif_msg_ifup(port
))
1082 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1083 "%d registered", pr
->eq
->attr
.ist1
, i
);
1091 u32 ist
= port
->port_res
[i
].eq
->attr
.ist1
;
1092 ibmebus_free_irq(ist
, &port
->port_res
[i
]);
1096 ibmebus_free_irq(port
->qp_eq
->attr
.ist1
, port
);
1097 i
= port
->num_def_qps
;
1103 static void ehea_free_interrupts(struct net_device
*dev
)
1105 struct ehea_port
*port
= netdev_priv(dev
);
1106 struct ehea_port_res
*pr
;
1111 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
1112 pr
= &port
->port_res
[i
];
1113 ibmebus_free_irq(pr
->eq
->attr
.ist1
, pr
);
1114 if (netif_msg_intr(port
))
1115 ehea_info("free send irq for res %d with handle 0x%X",
1116 i
, pr
->eq
->attr
.ist1
);
1119 /* associated events */
1120 ibmebus_free_irq(port
->qp_eq
->attr
.ist1
, port
);
1121 if (netif_msg_intr(port
))
1122 ehea_info("associated event interrupt for handle 0x%X freed",
1123 port
->qp_eq
->attr
.ist1
);
1126 static int ehea_configure_port(struct ehea_port
*port
)
1130 struct hcp_ehea_port_cb0
*cb0
;
1133 cb0
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1137 cb0
->port_rc
= EHEA_BMASK_SET(PXLY_RC_VALID
, 1)
1138 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM
, 1)
1139 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM
, 1)
1140 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT
, 1)
1141 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER
,
1142 PXLY_RC_VLAN_FILTER
)
1143 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME
, 1);
1145 for (i
= 0; i
< port
->num_mcs
; i
++)
1147 cb0
->default_qpn_arr
[i
] =
1148 port
->port_res
[i
].qp
->init_attr
.qp_nr
;
1150 cb0
->default_qpn_arr
[i
] =
1151 port
->port_res
[0].qp
->init_attr
.qp_nr
;
1153 if (netif_msg_ifup(port
))
1154 ehea_dump(cb0
, sizeof(*cb0
), "ehea_configure_port");
1156 mask
= EHEA_BMASK_SET(H_PORT_CB0_PRC
, 1)
1157 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY
, 1);
1159 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1160 port
->logical_port_id
,
1161 H_PORT_CB0
, mask
, cb0
);
1163 if (hret
!= H_SUCCESS
)
1174 int ehea_gen_smrs(struct ehea_port_res
*pr
)
1177 struct ehea_adapter
*adapter
= pr
->port
->adapter
;
1179 ret
= ehea_gen_smr(adapter
, &adapter
->mr
, &pr
->send_mr
);
1183 ret
= ehea_gen_smr(adapter
, &adapter
->mr
, &pr
->recv_mr
);
1190 ehea_rem_mr(&pr
->send_mr
);
1192 ehea_error("Generating SMRS failed\n");
1196 int ehea_rem_smrs(struct ehea_port_res
*pr
)
1198 if ((ehea_rem_mr(&pr
->send_mr
))
1199 || (ehea_rem_mr(&pr
->recv_mr
)))
1205 static int ehea_init_q_skba(struct ehea_q_skb_arr
*q_skba
, int max_q_entries
)
1207 int arr_size
= sizeof(void*) * max_q_entries
;
1209 q_skba
->arr
= vmalloc(arr_size
);
1213 memset(q_skba
->arr
, 0, arr_size
);
1215 q_skba
->len
= max_q_entries
;
1217 q_skba
->os_skbs
= 0;
1222 static int ehea_init_port_res(struct ehea_port
*port
, struct ehea_port_res
*pr
,
1223 struct port_res_cfg
*pr_cfg
, int queue_token
)
1225 struct ehea_adapter
*adapter
= port
->adapter
;
1226 enum ehea_eq_type eq_type
= EHEA_EQ
;
1227 struct ehea_qp_init_attr
*init_attr
= NULL
;
1230 memset(pr
, 0, sizeof(struct ehea_port_res
));
1233 spin_lock_init(&pr
->xmit_lock
);
1234 spin_lock_init(&pr
->netif_queue
);
1236 pr
->eq
= ehea_create_eq(adapter
, eq_type
, EHEA_MAX_ENTRIES_EQ
, 0);
1238 ehea_error("create_eq failed (eq)");
1242 pr
->recv_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_rcq
,
1244 port
->logical_port_id
);
1246 ehea_error("create_cq failed (cq_recv)");
1250 pr
->send_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_scq
,
1252 port
->logical_port_id
);
1254 ehea_error("create_cq failed (cq_send)");
1258 if (netif_msg_ifup(port
))
1259 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1260 pr
->send_cq
->attr
.act_nr_of_cqes
,
1261 pr
->recv_cq
->attr
.act_nr_of_cqes
);
1263 init_attr
= kzalloc(sizeof(*init_attr
), GFP_KERNEL
);
1266 ehea_error("no mem for ehea_qp_init_attr");
1270 init_attr
->low_lat_rq1
= 1;
1271 init_attr
->signalingtype
= 1; /* generate CQE if specified in WQE */
1272 init_attr
->rq_count
= 3;
1273 init_attr
->qp_token
= queue_token
;
1274 init_attr
->max_nr_send_wqes
= pr_cfg
->max_entries_sq
;
1275 init_attr
->max_nr_rwqes_rq1
= pr_cfg
->max_entries_rq1
;
1276 init_attr
->max_nr_rwqes_rq2
= pr_cfg
->max_entries_rq2
;
1277 init_attr
->max_nr_rwqes_rq3
= pr_cfg
->max_entries_rq3
;
1278 init_attr
->wqe_size_enc_sq
= EHEA_SG_SQ
;
1279 init_attr
->wqe_size_enc_rq1
= EHEA_SG_RQ1
;
1280 init_attr
->wqe_size_enc_rq2
= EHEA_SG_RQ2
;
1281 init_attr
->wqe_size_enc_rq3
= EHEA_SG_RQ3
;
1282 init_attr
->rq2_threshold
= EHEA_RQ2_THRESHOLD
;
1283 init_attr
->rq3_threshold
= EHEA_RQ3_THRESHOLD
;
1284 init_attr
->port_nr
= port
->logical_port_id
;
1285 init_attr
->send_cq_handle
= pr
->send_cq
->fw_handle
;
1286 init_attr
->recv_cq_handle
= pr
->recv_cq
->fw_handle
;
1287 init_attr
->aff_eq_handle
= port
->qp_eq
->fw_handle
;
1289 pr
->qp
= ehea_create_qp(adapter
, adapter
->pd
, init_attr
);
1291 ehea_error("create_qp failed");
1296 if (netif_msg_ifup(port
))
1297 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1298 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr
->qp_nr
,
1299 init_attr
->act_nr_send_wqes
,
1300 init_attr
->act_nr_rwqes_rq1
,
1301 init_attr
->act_nr_rwqes_rq2
,
1302 init_attr
->act_nr_rwqes_rq3
);
1304 ret
= ehea_init_q_skba(&pr
->sq_skba
, init_attr
->act_nr_send_wqes
+ 1);
1305 ret
|= ehea_init_q_skba(&pr
->rq1_skba
, init_attr
->act_nr_rwqes_rq1
+ 1);
1306 ret
|= ehea_init_q_skba(&pr
->rq2_skba
, init_attr
->act_nr_rwqes_rq2
+ 1);
1307 ret
|= ehea_init_q_skba(&pr
->rq3_skba
, init_attr
->act_nr_rwqes_rq3
+ 1);
1311 pr
->swqe_refill_th
= init_attr
->act_nr_send_wqes
/ 10;
1312 if (ehea_gen_smrs(pr
) != 0) {
1317 atomic_set(&pr
->swqe_avail
, init_attr
->act_nr_send_wqes
- 1);
1321 netif_napi_add(pr
->port
->netdev
, &pr
->napi
, ehea_poll
, 64);
1323 pr
->lro_mgr
.max_aggr
= pr
->port
->lro_max_aggr
;
1324 pr
->lro_mgr
.max_desc
= MAX_LRO_DESCRIPTORS
;
1325 pr
->lro_mgr
.lro_arr
= pr
->lro_desc
;
1326 pr
->lro_mgr
.get_skb_header
= get_skb_hdr
;
1327 pr
->lro_mgr
.features
= LRO_F_NAPI
| LRO_F_EXTRACT_VLAN_ID
;
1328 pr
->lro_mgr
.dev
= port
->netdev
;
1329 pr
->lro_mgr
.ip_summed
= CHECKSUM_UNNECESSARY
;
1330 pr
->lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1337 vfree(pr
->sq_skba
.arr
);
1338 vfree(pr
->rq1_skba
.arr
);
1339 vfree(pr
->rq2_skba
.arr
);
1340 vfree(pr
->rq3_skba
.arr
);
1341 ehea_destroy_qp(pr
->qp
);
1342 ehea_destroy_cq(pr
->send_cq
);
1343 ehea_destroy_cq(pr
->recv_cq
);
1344 ehea_destroy_eq(pr
->eq
);
1349 static int ehea_clean_portres(struct ehea_port
*port
, struct ehea_port_res
*pr
)
1353 ret
= ehea_destroy_qp(pr
->qp
);
1356 ehea_destroy_cq(pr
->send_cq
);
1357 ehea_destroy_cq(pr
->recv_cq
);
1358 ehea_destroy_eq(pr
->eq
);
1360 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++)
1361 if (pr
->rq1_skba
.arr
[i
])
1362 dev_kfree_skb(pr
->rq1_skba
.arr
[i
]);
1364 for (i
= 0; i
< pr
->rq2_skba
.len
; i
++)
1365 if (pr
->rq2_skba
.arr
[i
])
1366 dev_kfree_skb(pr
->rq2_skba
.arr
[i
]);
1368 for (i
= 0; i
< pr
->rq3_skba
.len
; i
++)
1369 if (pr
->rq3_skba
.arr
[i
])
1370 dev_kfree_skb(pr
->rq3_skba
.arr
[i
]);
1372 for (i
= 0; i
< pr
->sq_skba
.len
; i
++)
1373 if (pr
->sq_skba
.arr
[i
])
1374 dev_kfree_skb(pr
->sq_skba
.arr
[i
]);
1376 vfree(pr
->rq1_skba
.arr
);
1377 vfree(pr
->rq2_skba
.arr
);
1378 vfree(pr
->rq3_skba
.arr
);
1379 vfree(pr
->sq_skba
.arr
);
1380 ret
= ehea_rem_smrs(pr
);
1386 * The write_* functions store information in swqe which is used by
1387 * the hardware to calculate the ip/tcp/udp checksum
1390 static inline void write_ip_start_end(struct ehea_swqe
*swqe
,
1391 const struct sk_buff
*skb
)
1393 swqe
->ip_start
= skb_network_offset(skb
);
1394 swqe
->ip_end
= (u8
)(swqe
->ip_start
+ ip_hdrlen(skb
) - 1);
1397 static inline void write_tcp_offset_end(struct ehea_swqe
*swqe
,
1398 const struct sk_buff
*skb
)
1401 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct tcphdr
, check
));
1403 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1406 static inline void write_udp_offset_end(struct ehea_swqe
*swqe
,
1407 const struct sk_buff
*skb
)
1410 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct udphdr
, check
));
1412 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1416 static void write_swqe2_TSO(struct sk_buff
*skb
,
1417 struct ehea_swqe
*swqe
, u32 lkey
)
1419 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1420 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1421 int skb_data_size
= skb
->len
- skb
->data_len
;
1424 /* Packet is TCP with TSO enabled */
1425 swqe
->tx_control
|= EHEA_SWQE_TSO
;
1426 swqe
->mss
= skb_shinfo(skb
)->gso_size
;
1427 /* copy only eth/ip/tcp headers to immediate data and
1428 * the rest of skb->data to sg1entry
1430 headersize
= ETH_HLEN
+ ip_hdrlen(skb
) + tcp_hdrlen(skb
);
1432 skb_data_size
= skb
->len
- skb
->data_len
;
1434 if (skb_data_size
>= headersize
) {
1435 /* copy immediate data */
1436 skb_copy_from_linear_data(skb
, imm_data
, headersize
);
1437 swqe
->immediate_data_length
= headersize
;
1439 if (skb_data_size
> headersize
) {
1440 /* set sg1entry data */
1441 sg1entry
->l_key
= lkey
;
1442 sg1entry
->len
= skb_data_size
- headersize
;
1444 ehea_map_vaddr(skb
->data
+ headersize
);
1445 swqe
->descriptors
++;
1448 ehea_error("cannot handle fragmented headers");
1451 static void write_swqe2_nonTSO(struct sk_buff
*skb
,
1452 struct ehea_swqe
*swqe
, u32 lkey
)
1454 int skb_data_size
= skb
->len
- skb
->data_len
;
1455 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1456 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1458 /* Packet is any nonTSO type
1460 * Copy as much as possible skb->data to immediate data and
1461 * the rest to sg1entry
1463 if (skb_data_size
>= SWQE2_MAX_IMM
) {
1464 /* copy immediate data */
1465 skb_copy_from_linear_data(skb
, imm_data
, SWQE2_MAX_IMM
);
1467 swqe
->immediate_data_length
= SWQE2_MAX_IMM
;
1469 if (skb_data_size
> SWQE2_MAX_IMM
) {
1470 /* copy sg1entry data */
1471 sg1entry
->l_key
= lkey
;
1472 sg1entry
->len
= skb_data_size
- SWQE2_MAX_IMM
;
1474 ehea_map_vaddr(skb
->data
+ SWQE2_MAX_IMM
);
1475 swqe
->descriptors
++;
1478 skb_copy_from_linear_data(skb
, imm_data
, skb_data_size
);
1479 swqe
->immediate_data_length
= skb_data_size
;
1483 static inline void write_swqe2_data(struct sk_buff
*skb
, struct net_device
*dev
,
1484 struct ehea_swqe
*swqe
, u32 lkey
)
1486 struct ehea_vsgentry
*sg_list
, *sg1entry
, *sgentry
;
1488 int nfrags
, sg1entry_contains_frag_data
, i
;
1490 nfrags
= skb_shinfo(skb
)->nr_frags
;
1491 sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1492 sg_list
= (struct ehea_vsgentry
*)&swqe
->u
.immdata_desc
.sg_list
;
1493 swqe
->descriptors
= 0;
1494 sg1entry_contains_frag_data
= 0;
1496 if ((dev
->features
& NETIF_F_TSO
) && skb_shinfo(skb
)->gso_size
)
1497 write_swqe2_TSO(skb
, swqe
, lkey
);
1499 write_swqe2_nonTSO(skb
, swqe
, lkey
);
1501 /* write descriptors */
1503 if (swqe
->descriptors
== 0) {
1504 /* sg1entry not yet used */
1505 frag
= &skb_shinfo(skb
)->frags
[0];
1507 /* copy sg1entry data */
1508 sg1entry
->l_key
= lkey
;
1509 sg1entry
->len
= frag
->size
;
1511 ehea_map_vaddr(page_address(frag
->page
)
1512 + frag
->page_offset
);
1513 swqe
->descriptors
++;
1514 sg1entry_contains_frag_data
= 1;
1517 for (i
= sg1entry_contains_frag_data
; i
< nfrags
; i
++) {
1519 frag
= &skb_shinfo(skb
)->frags
[i
];
1520 sgentry
= &sg_list
[i
- sg1entry_contains_frag_data
];
1522 sgentry
->l_key
= lkey
;
1523 sgentry
->len
= frag
->size
;
1525 ehea_map_vaddr(page_address(frag
->page
)
1526 + frag
->page_offset
);
1527 swqe
->descriptors
++;
1532 static int ehea_broadcast_reg_helper(struct ehea_port
*port
, u32 hcallid
)
1538 /* De/Register untagged packets */
1539 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_UNTAGGED
;
1540 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1541 port
->logical_port_id
,
1542 reg_type
, port
->mac_addr
, 0, hcallid
);
1543 if (hret
!= H_SUCCESS
) {
1544 ehea_error("%sregistering bc address failed (tagged)",
1545 hcallid
== H_REG_BCMC
? "" : "de");
1550 /* De/Register VLAN packets */
1551 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_VLANID_ALL
;
1552 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1553 port
->logical_port_id
,
1554 reg_type
, port
->mac_addr
, 0, hcallid
);
1555 if (hret
!= H_SUCCESS
) {
1556 ehea_error("%sregistering bc address failed (vlan)",
1557 hcallid
== H_REG_BCMC
? "" : "de");
1564 static int ehea_set_mac_addr(struct net_device
*dev
, void *sa
)
1566 struct ehea_port
*port
= netdev_priv(dev
);
1567 struct sockaddr
*mac_addr
= sa
;
1568 struct hcp_ehea_port_cb0
*cb0
;
1572 if (!is_valid_ether_addr(mac_addr
->sa_data
)) {
1573 ret
= -EADDRNOTAVAIL
;
1577 cb0
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1579 ehea_error("no mem for cb0");
1584 memcpy(&(cb0
->port_mac_addr
), &(mac_addr
->sa_data
[0]), ETH_ALEN
);
1586 cb0
->port_mac_addr
= cb0
->port_mac_addr
>> 16;
1588 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1589 port
->logical_port_id
, H_PORT_CB0
,
1590 EHEA_BMASK_SET(H_PORT_CB0_MAC
, 1), cb0
);
1591 if (hret
!= H_SUCCESS
) {
1596 memcpy(dev
->dev_addr
, mac_addr
->sa_data
, dev
->addr_len
);
1598 /* Deregister old MAC in pHYP */
1599 ret
= ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
1603 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
1605 /* Register new MAC in pHYP */
1606 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
1617 static void ehea_promiscuous_error(u64 hret
, int enable
)
1619 if (hret
== H_AUTHORITY
)
1620 ehea_info("Hypervisor denied %sabling promiscuous mode",
1621 enable
== 1 ? "en" : "dis");
1623 ehea_error("failed %sabling promiscuous mode",
1624 enable
== 1 ? "en" : "dis");
1627 static void ehea_promiscuous(struct net_device
*dev
, int enable
)
1629 struct ehea_port
*port
= netdev_priv(dev
);
1630 struct hcp_ehea_port_cb7
*cb7
;
1633 if ((enable
&& port
->promisc
) || (!enable
&& !port
->promisc
))
1636 cb7
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
1638 ehea_error("no mem for cb7");
1642 /* Modify Pxs_DUCQPN in CB7 */
1643 cb7
->def_uc_qpn
= enable
== 1 ? port
->port_res
[0].qp
->fw_handle
: 0;
1645 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1646 port
->logical_port_id
,
1647 H_PORT_CB7
, H_PORT_CB7_DUCQPN
, cb7
);
1649 ehea_promiscuous_error(hret
, enable
);
1653 port
->promisc
= enable
;
1659 static u64
ehea_multicast_reg_helper(struct ehea_port
*port
, u64 mc_mac_addr
,
1665 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1666 | EHEA_BCMC_UNTAGGED
;
1668 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1669 port
->logical_port_id
,
1670 reg_type
, mc_mac_addr
, 0, hcallid
);
1674 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1675 | EHEA_BCMC_VLANID_ALL
;
1677 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1678 port
->logical_port_id
,
1679 reg_type
, mc_mac_addr
, 0, hcallid
);
1684 static int ehea_drop_multicast_list(struct net_device
*dev
)
1686 struct ehea_port
*port
= netdev_priv(dev
);
1687 struct ehea_mc_list
*mc_entry
= port
->mc_list
;
1688 struct list_head
*pos
;
1689 struct list_head
*temp
;
1693 list_for_each_safe(pos
, temp
, &(port
->mc_list
->list
)) {
1694 mc_entry
= list_entry(pos
, struct ehea_mc_list
, list
);
1696 hret
= ehea_multicast_reg_helper(port
, mc_entry
->macaddr
,
1699 ehea_error("failed deregistering mcast MAC");
1709 static void ehea_allmulti(struct net_device
*dev
, int enable
)
1711 struct ehea_port
*port
= netdev_priv(dev
);
1714 if (!port
->allmulti
) {
1716 /* Enable ALLMULTI */
1717 ehea_drop_multicast_list(dev
);
1718 hret
= ehea_multicast_reg_helper(port
, 0, H_REG_BCMC
);
1722 ehea_error("failed enabling IFF_ALLMULTI");
1726 /* Disable ALLMULTI */
1727 hret
= ehea_multicast_reg_helper(port
, 0, H_DEREG_BCMC
);
1731 ehea_error("failed disabling IFF_ALLMULTI");
1735 static void ehea_add_multicast_entry(struct ehea_port
* port
, u8
* mc_mac_addr
)
1737 struct ehea_mc_list
*ehea_mcl_entry
;
1740 ehea_mcl_entry
= kzalloc(sizeof(*ehea_mcl_entry
), GFP_ATOMIC
);
1741 if (!ehea_mcl_entry
) {
1742 ehea_error("no mem for mcl_entry");
1746 INIT_LIST_HEAD(&ehea_mcl_entry
->list
);
1748 memcpy(&ehea_mcl_entry
->macaddr
, mc_mac_addr
, ETH_ALEN
);
1750 hret
= ehea_multicast_reg_helper(port
, ehea_mcl_entry
->macaddr
,
1753 list_add(&ehea_mcl_entry
->list
, &port
->mc_list
->list
);
1755 ehea_error("failed registering mcast MAC");
1756 kfree(ehea_mcl_entry
);
1760 static void ehea_set_multicast_list(struct net_device
*dev
)
1762 struct ehea_port
*port
= netdev_priv(dev
);
1763 struct dev_mc_list
*k_mcl_entry
;
1766 if (dev
->flags
& IFF_PROMISC
) {
1767 ehea_promiscuous(dev
, 1);
1770 ehea_promiscuous(dev
, 0);
1772 if (dev
->flags
& IFF_ALLMULTI
) {
1773 ehea_allmulti(dev
, 1);
1776 ehea_allmulti(dev
, 0);
1778 if (dev
->mc_count
) {
1779 ret
= ehea_drop_multicast_list(dev
);
1781 /* Dropping the current multicast list failed.
1782 * Enabling ALL_MULTI is the best we can do.
1784 ehea_allmulti(dev
, 1);
1787 if (dev
->mc_count
> port
->adapter
->max_mc_mac
) {
1788 ehea_info("Mcast registration limit reached (0x%lx). "
1790 port
->adapter
->max_mc_mac
);
1794 for (i
= 0, k_mcl_entry
= dev
->mc_list
;
1796 i
++, k_mcl_entry
= k_mcl_entry
->next
) {
1797 ehea_add_multicast_entry(port
, k_mcl_entry
->dmi_addr
);
1804 static int ehea_change_mtu(struct net_device
*dev
, int new_mtu
)
1806 if ((new_mtu
< 68) || (new_mtu
> EHEA_MAX_PACKET_SIZE
))
1812 static void ehea_xmit2(struct sk_buff
*skb
, struct net_device
*dev
,
1813 struct ehea_swqe
*swqe
, u32 lkey
)
1815 if (skb
->protocol
== htons(ETH_P_IP
)) {
1816 const struct iphdr
*iph
= ip_hdr(skb
);
1819 swqe
->tx_control
|= EHEA_SWQE_CRC
1820 | EHEA_SWQE_IP_CHECKSUM
1821 | EHEA_SWQE_TCP_CHECKSUM
1822 | EHEA_SWQE_IMM_DATA_PRESENT
1823 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
1825 write_ip_start_end(swqe
, skb
);
1827 if (iph
->protocol
== IPPROTO_UDP
) {
1828 if ((iph
->frag_off
& IP_MF
)
1829 || (iph
->frag_off
& IP_OFFSET
))
1830 /* IP fragment, so don't change cs */
1831 swqe
->tx_control
&= ~EHEA_SWQE_TCP_CHECKSUM
;
1833 write_udp_offset_end(swqe
, skb
);
1834 } else if (iph
->protocol
== IPPROTO_TCP
) {
1835 write_tcp_offset_end(swqe
, skb
);
1838 /* icmp (big data) and ip segmentation packets (all other ip
1839 packets) do not require any special handling */
1842 /* Other Ethernet Protocol */
1843 swqe
->tx_control
|= EHEA_SWQE_CRC
1844 | EHEA_SWQE_IMM_DATA_PRESENT
1845 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
1848 write_swqe2_data(skb
, dev
, swqe
, lkey
);
1851 static void ehea_xmit3(struct sk_buff
*skb
, struct net_device
*dev
,
1852 struct ehea_swqe
*swqe
)
1854 int nfrags
= skb_shinfo(skb
)->nr_frags
;
1855 u8
*imm_data
= &swqe
->u
.immdata_nodesc
.immediate_data
[0];
1859 if (skb
->protocol
== htons(ETH_P_IP
)) {
1860 const struct iphdr
*iph
= ip_hdr(skb
);
1863 write_ip_start_end(swqe
, skb
);
1865 if (iph
->protocol
== IPPROTO_TCP
) {
1866 swqe
->tx_control
|= EHEA_SWQE_CRC
1867 | EHEA_SWQE_IP_CHECKSUM
1868 | EHEA_SWQE_TCP_CHECKSUM
1869 | EHEA_SWQE_IMM_DATA_PRESENT
;
1871 write_tcp_offset_end(swqe
, skb
);
1873 } else if (iph
->protocol
== IPPROTO_UDP
) {
1874 if ((iph
->frag_off
& IP_MF
)
1875 || (iph
->frag_off
& IP_OFFSET
))
1876 /* IP fragment, so don't change cs */
1877 swqe
->tx_control
|= EHEA_SWQE_CRC
1878 | EHEA_SWQE_IMM_DATA_PRESENT
;
1880 swqe
->tx_control
|= EHEA_SWQE_CRC
1881 | EHEA_SWQE_IP_CHECKSUM
1882 | EHEA_SWQE_TCP_CHECKSUM
1883 | EHEA_SWQE_IMM_DATA_PRESENT
;
1885 write_udp_offset_end(swqe
, skb
);
1888 /* icmp (big data) and
1889 ip segmentation packets (all other ip packets) */
1890 swqe
->tx_control
|= EHEA_SWQE_CRC
1891 | EHEA_SWQE_IP_CHECKSUM
1892 | EHEA_SWQE_IMM_DATA_PRESENT
;
1895 /* Other Ethernet Protocol */
1896 swqe
->tx_control
|= EHEA_SWQE_CRC
| EHEA_SWQE_IMM_DATA_PRESENT
;
1898 /* copy (immediate) data */
1900 /* data is in a single piece */
1901 skb_copy_from_linear_data(skb
, imm_data
, skb
->len
);
1903 /* first copy data from the skb->data buffer ... */
1904 skb_copy_from_linear_data(skb
, imm_data
,
1905 skb
->len
- skb
->data_len
);
1906 imm_data
+= skb
->len
- skb
->data_len
;
1908 /* ... then copy data from the fragments */
1909 for (i
= 0; i
< nfrags
; i
++) {
1910 frag
= &skb_shinfo(skb
)->frags
[i
];
1912 page_address(frag
->page
) + frag
->page_offset
,
1914 imm_data
+= frag
->size
;
1917 swqe
->immediate_data_length
= skb
->len
;
1921 static inline int ehea_hash_skb(struct sk_buff
*skb
, int num_qps
)
1926 if ((skb
->protocol
== htons(ETH_P_IP
)) &&
1927 (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)) {
1928 tcp
= (struct tcphdr
*)(skb_network_header(skb
) + (ip_hdr(skb
)->ihl
* 4));
1929 tmp
= (tcp
->source
+ (tcp
->dest
<< 16)) % 31;
1930 tmp
+= ip_hdr(skb
)->daddr
% 31;
1931 return tmp
% num_qps
;
1937 static int ehea_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1939 struct ehea_port
*port
= netdev_priv(dev
);
1940 struct ehea_swqe
*swqe
;
1941 unsigned long flags
;
1944 struct ehea_port_res
*pr
;
1946 pr
= &port
->port_res
[ehea_hash_skb(skb
, port
->num_tx_qps
)];
1948 if (!spin_trylock(&pr
->xmit_lock
))
1949 return NETDEV_TX_BUSY
;
1951 if (pr
->queue_stopped
) {
1952 spin_unlock(&pr
->xmit_lock
);
1953 return NETDEV_TX_BUSY
;
1956 swqe
= ehea_get_swqe(pr
->qp
, &swqe_index
);
1957 memset(swqe
, 0, SWQE_HEADER_SIZE
);
1958 atomic_dec(&pr
->swqe_avail
);
1960 if (skb
->len
<= SWQE3_MAX_IMM
) {
1961 u32 sig_iv
= port
->sig_comp_iv
;
1962 u32 swqe_num
= pr
->swqe_id_counter
;
1963 ehea_xmit3(skb
, dev
, swqe
);
1964 swqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE3_TYPE
)
1965 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, swqe_num
);
1966 if (pr
->swqe_ll_count
>= (sig_iv
- 1)) {
1967 swqe
->wr_id
|= EHEA_BMASK_SET(EHEA_WR_ID_REFILL
,
1969 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
1970 pr
->swqe_ll_count
= 0;
1972 pr
->swqe_ll_count
+= 1;
1975 EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE2_TYPE
)
1976 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, pr
->swqe_id_counter
)
1977 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL
, 1)
1978 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, pr
->sq_skba
.index
);
1979 pr
->sq_skba
.arr
[pr
->sq_skba
.index
] = skb
;
1981 pr
->sq_skba
.index
++;
1982 pr
->sq_skba
.index
&= (pr
->sq_skba
.len
- 1);
1984 lkey
= pr
->send_mr
.lkey
;
1985 ehea_xmit2(skb
, dev
, swqe
, lkey
);
1986 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
1988 pr
->swqe_id_counter
+= 1;
1990 if (port
->vgrp
&& vlan_tx_tag_present(skb
)) {
1991 swqe
->tx_control
|= EHEA_SWQE_VLAN_INSERT
;
1992 swqe
->vlan_tag
= vlan_tx_tag_get(skb
);
1995 if (netif_msg_tx_queued(port
)) {
1996 ehea_info("post swqe on QP %d", pr
->qp
->init_attr
.qp_nr
);
1997 ehea_dump(swqe
, 512, "swqe");
2000 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
2001 netif_stop_queue(dev
);
2002 swqe
->tx_control
|= EHEA_SWQE_PURGE
;
2005 ehea_post_swqe(pr
->qp
, swqe
);
2008 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
2009 spin_lock_irqsave(&pr
->netif_queue
, flags
);
2010 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
2011 pr
->p_stats
.queue_stopped
++;
2012 netif_stop_queue(dev
);
2013 pr
->queue_stopped
= 1;
2015 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
2017 dev
->trans_start
= jiffies
;
2018 spin_unlock(&pr
->xmit_lock
);
2020 return NETDEV_TX_OK
;
2023 static void ehea_vlan_rx_register(struct net_device
*dev
,
2024 struct vlan_group
*grp
)
2026 struct ehea_port
*port
= netdev_priv(dev
);
2027 struct ehea_adapter
*adapter
= port
->adapter
;
2028 struct hcp_ehea_port_cb1
*cb1
;
2033 cb1
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2035 ehea_error("no mem for cb1");
2039 memset(cb1
->vlan_filter
, 0, sizeof(cb1
->vlan_filter
));
2041 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2042 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2043 if (hret
!= H_SUCCESS
)
2044 ehea_error("modify_ehea_port failed");
2051 static void ehea_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
2053 struct ehea_port
*port
= netdev_priv(dev
);
2054 struct ehea_adapter
*adapter
= port
->adapter
;
2055 struct hcp_ehea_port_cb1
*cb1
;
2059 cb1
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2061 ehea_error("no mem for cb1");
2065 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
2066 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2067 if (hret
!= H_SUCCESS
) {
2068 ehea_error("query_ehea_port failed");
2073 cb1
->vlan_filter
[index
] |= ((u64
)(0x8000000000000000 >> (vid
& 0x3F)));
2075 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2076 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2077 if (hret
!= H_SUCCESS
)
2078 ehea_error("modify_ehea_port failed");
2084 static void ehea_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
2086 struct ehea_port
*port
= netdev_priv(dev
);
2087 struct ehea_adapter
*adapter
= port
->adapter
;
2088 struct hcp_ehea_port_cb1
*cb1
;
2092 vlan_group_set_device(port
->vgrp
, vid
, NULL
);
2094 cb1
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2096 ehea_error("no mem for cb1");
2100 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
2101 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2102 if (hret
!= H_SUCCESS
) {
2103 ehea_error("query_ehea_port failed");
2108 cb1
->vlan_filter
[index
] &= ~((u64
)(0x8000000000000000 >> (vid
& 0x3F)));
2110 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2111 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2112 if (hret
!= H_SUCCESS
)
2113 ehea_error("modify_ehea_port failed");
2119 int ehea_activate_qp(struct ehea_adapter
*adapter
, struct ehea_qp
*qp
)
2125 struct hcp_modify_qp_cb0
* cb0
;
2127 cb0
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2133 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2134 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2135 if (hret
!= H_SUCCESS
) {
2136 ehea_error("query_ehea_qp failed (1)");
2140 cb0
->qp_ctl_reg
= H_QP_CR_STATE_INITIALIZED
;
2141 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2142 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2143 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2144 if (hret
!= H_SUCCESS
) {
2145 ehea_error("modify_ehea_qp failed (1)");
2149 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2150 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2151 if (hret
!= H_SUCCESS
) {
2152 ehea_error("query_ehea_qp failed (2)");
2156 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_INITIALIZED
;
2157 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2158 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2159 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2160 if (hret
!= H_SUCCESS
) {
2161 ehea_error("modify_ehea_qp failed (2)");
2165 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2166 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2167 if (hret
!= H_SUCCESS
) {
2168 ehea_error("query_ehea_qp failed (3)");
2172 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_RDY2SND
;
2173 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2174 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2175 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2176 if (hret
!= H_SUCCESS
) {
2177 ehea_error("modify_ehea_qp failed (3)");
2181 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2182 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2183 if (hret
!= H_SUCCESS
) {
2184 ehea_error("query_ehea_qp failed (4)");
2194 static int ehea_port_res_setup(struct ehea_port
*port
, int def_qps
,
2198 struct port_res_cfg pr_cfg
, pr_cfg_small_rx
;
2199 enum ehea_eq_type eq_type
= EHEA_EQ
;
2201 port
->qp_eq
= ehea_create_eq(port
->adapter
, eq_type
,
2202 EHEA_MAX_ENTRIES_EQ
, 1);
2205 ehea_error("ehea_create_eq failed (qp_eq)");
2209 pr_cfg
.max_entries_rcq
= rq1_entries
+ rq2_entries
+ rq3_entries
;
2210 pr_cfg
.max_entries_scq
= sq_entries
* 2;
2211 pr_cfg
.max_entries_sq
= sq_entries
;
2212 pr_cfg
.max_entries_rq1
= rq1_entries
;
2213 pr_cfg
.max_entries_rq2
= rq2_entries
;
2214 pr_cfg
.max_entries_rq3
= rq3_entries
;
2216 pr_cfg_small_rx
.max_entries_rcq
= 1;
2217 pr_cfg_small_rx
.max_entries_scq
= sq_entries
;
2218 pr_cfg_small_rx
.max_entries_sq
= sq_entries
;
2219 pr_cfg_small_rx
.max_entries_rq1
= 1;
2220 pr_cfg_small_rx
.max_entries_rq2
= 1;
2221 pr_cfg_small_rx
.max_entries_rq3
= 1;
2223 for (i
= 0; i
< def_qps
; i
++) {
2224 ret
= ehea_init_port_res(port
, &port
->port_res
[i
], &pr_cfg
, i
);
2228 for (i
= def_qps
; i
< def_qps
+ add_tx_qps
; i
++) {
2229 ret
= ehea_init_port_res(port
, &port
->port_res
[i
],
2230 &pr_cfg_small_rx
, i
);
2239 ehea_clean_portres(port
, &port
->port_res
[i
]);
2242 ehea_destroy_eq(port
->qp_eq
);
2246 static int ehea_clean_all_portres(struct ehea_port
*port
)
2251 for(i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2252 ret
|= ehea_clean_portres(port
, &port
->port_res
[i
]);
2254 ret
|= ehea_destroy_eq(port
->qp_eq
);
2259 static void ehea_remove_adapter_mr(struct ehea_adapter
*adapter
)
2261 if (adapter
->active_ports
)
2264 ehea_rem_mr(&adapter
->mr
);
2267 static int ehea_add_adapter_mr(struct ehea_adapter
*adapter
)
2269 if (adapter
->active_ports
)
2272 return ehea_reg_kernel_mr(adapter
, &adapter
->mr
);
2275 static int ehea_up(struct net_device
*dev
)
2278 struct ehea_port
*port
= netdev_priv(dev
);
2280 if (port
->state
== EHEA_PORT_UP
)
2283 ret
= ehea_port_res_setup(port
, port
->num_def_qps
,
2284 port
->num_add_tx_qps
);
2286 ehea_error("port_res_failed");
2290 /* Set default QP for this port */
2291 ret
= ehea_configure_port(port
);
2293 ehea_error("ehea_configure_port failed. ret:%d", ret
);
2297 ret
= ehea_reg_interrupts(dev
);
2299 ehea_error("reg_interrupts failed. ret:%d", ret
);
2303 for(i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
2304 ret
= ehea_activate_qp(port
->adapter
, port
->port_res
[i
].qp
);
2306 ehea_error("activate_qp failed");
2311 for(i
= 0; i
< port
->num_def_qps
; i
++) {
2312 ret
= ehea_fill_port_res(&port
->port_res
[i
]);
2314 ehea_error("out_free_irqs");
2320 port
->state
= EHEA_PORT_UP
;
2324 ehea_free_interrupts(dev
);
2327 ehea_clean_all_portres(port
);
2330 ehea_info("Failed starting %s. ret=%i", dev
->name
, ret
);
2335 static void port_napi_disable(struct ehea_port
*port
)
2339 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2340 napi_disable(&port
->port_res
[i
].napi
);
2343 static void port_napi_enable(struct ehea_port
*port
)
2347 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2348 napi_enable(&port
->port_res
[i
].napi
);
2351 static int ehea_open(struct net_device
*dev
)
2354 struct ehea_port
*port
= netdev_priv(dev
);
2356 down(&port
->port_lock
);
2358 if (netif_msg_ifup(port
))
2359 ehea_info("enabling port %s", dev
->name
);
2363 port_napi_enable(port
);
2364 netif_start_queue(dev
);
2367 up(&port
->port_lock
);
2372 static int ehea_down(struct net_device
*dev
)
2375 struct ehea_port
*port
= netdev_priv(dev
);
2377 if (port
->state
== EHEA_PORT_DOWN
)
2380 ehea_drop_multicast_list(dev
);
2381 ehea_free_interrupts(dev
);
2383 port
->state
= EHEA_PORT_DOWN
;
2385 ret
= ehea_clean_all_portres(port
);
2387 ehea_info("Failed freeing resources for %s. ret=%i",
2393 static int ehea_stop(struct net_device
*dev
)
2396 struct ehea_port
*port
= netdev_priv(dev
);
2398 if (netif_msg_ifdown(port
))
2399 ehea_info("disabling port %s", dev
->name
);
2401 flush_scheduled_work();
2402 down(&port
->port_lock
);
2403 netif_stop_queue(dev
);
2404 port_napi_disable(port
);
2405 ret
= ehea_down(dev
);
2406 up(&port
->port_lock
);
2410 void ehea_purge_sq(struct ehea_qp
*orig_qp
)
2412 struct ehea_qp qp
= *orig_qp
;
2413 struct ehea_qp_init_attr
*init_attr
= &qp
.init_attr
;
2414 struct ehea_swqe
*swqe
;
2418 for (i
= 0; i
< init_attr
->act_nr_send_wqes
; i
++) {
2419 swqe
= ehea_get_swqe(&qp
, &wqe_index
);
2420 swqe
->tx_control
|= EHEA_SWQE_PURGE
;
2424 int ehea_stop_qps(struct net_device
*dev
)
2426 struct ehea_port
*port
= netdev_priv(dev
);
2427 struct ehea_adapter
*adapter
= port
->adapter
;
2428 struct hcp_modify_qp_cb0
* cb0
;
2436 cb0
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2442 for (i
= 0; i
< (port
->num_def_qps
+ port
->num_add_tx_qps
); i
++) {
2443 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2444 struct ehea_qp
*qp
= pr
->qp
;
2446 /* Purge send queue */
2449 /* Disable queue pair */
2450 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2451 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2453 if (hret
!= H_SUCCESS
) {
2454 ehea_error("query_ehea_qp failed (1)");
2458 cb0
->qp_ctl_reg
= (cb0
->qp_ctl_reg
& H_QP_CR_RES_STATE
) << 8;
2459 cb0
->qp_ctl_reg
&= ~H_QP_CR_ENABLED
;
2461 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2462 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
,
2464 &dummy64
, &dummy16
, &dummy16
);
2465 if (hret
!= H_SUCCESS
) {
2466 ehea_error("modify_ehea_qp failed (1)");
2470 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2471 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2473 if (hret
!= H_SUCCESS
) {
2474 ehea_error("query_ehea_qp failed (2)");
2478 /* deregister shared memory regions */
2479 dret
= ehea_rem_smrs(pr
);
2481 ehea_error("unreg shared memory region failed");
2493 void ehea_update_rqs(struct ehea_qp
*orig_qp
, struct ehea_port_res
* pr
)
2495 struct ehea_qp qp
= *orig_qp
;
2496 struct ehea_qp_init_attr
*init_attr
= &qp
.init_attr
;
2497 struct ehea_rwqe
*rwqe
;
2498 struct sk_buff
**skba_rq2
= pr
->rq2_skba
.arr
;
2499 struct sk_buff
**skba_rq3
= pr
->rq3_skba
.arr
;
2500 struct sk_buff
*skb
;
2501 u32 lkey
= pr
->recv_mr
.lkey
;
2507 for (i
= 0; i
< init_attr
->act_nr_rwqes_rq2
+ 1; i
++) {
2508 rwqe
= ehea_get_next_rwqe(&qp
, 2);
2509 rwqe
->sg_list
[0].l_key
= lkey
;
2510 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, rwqe
->wr_id
);
2511 skb
= skba_rq2
[index
];
2513 rwqe
->sg_list
[0].vaddr
= ehea_map_vaddr(skb
->data
);
2516 for (i
= 0; i
< init_attr
->act_nr_rwqes_rq3
+ 1; i
++) {
2517 rwqe
= ehea_get_next_rwqe(&qp
, 3);
2518 rwqe
->sg_list
[0].l_key
= lkey
;
2519 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, rwqe
->wr_id
);
2520 skb
= skba_rq3
[index
];
2522 rwqe
->sg_list
[0].vaddr
= ehea_map_vaddr(skb
->data
);
2526 int ehea_restart_qps(struct net_device
*dev
)
2528 struct ehea_port
*port
= netdev_priv(dev
);
2529 struct ehea_adapter
*adapter
= port
->adapter
;
2533 struct hcp_modify_qp_cb0
* cb0
;
2538 cb0
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2544 for (i
= 0; i
< (port
->num_def_qps
+ port
->num_add_tx_qps
); i
++) {
2545 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2546 struct ehea_qp
*qp
= pr
->qp
;
2548 ret
= ehea_gen_smrs(pr
);
2550 ehea_error("creation of shared memory regions failed");
2554 ehea_update_rqs(qp
, pr
);
2556 /* Enable queue pair */
2557 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2558 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2560 if (hret
!= H_SUCCESS
) {
2561 ehea_error("query_ehea_qp failed (1)");
2565 cb0
->qp_ctl_reg
= (cb0
->qp_ctl_reg
& H_QP_CR_RES_STATE
) << 8;
2566 cb0
->qp_ctl_reg
|= H_QP_CR_ENABLED
;
2568 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2569 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
,
2571 &dummy64
, &dummy16
, &dummy16
);
2572 if (hret
!= H_SUCCESS
) {
2573 ehea_error("modify_ehea_qp failed (1)");
2577 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2578 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2580 if (hret
!= H_SUCCESS
) {
2581 ehea_error("query_ehea_qp failed (2)");
2585 /* refill entire queue */
2586 ehea_refill_rq1(pr
, pr
->rq1_skba
.index
, 0);
2587 ehea_refill_rq2(pr
, 0);
2588 ehea_refill_rq3(pr
, 0);
2596 static void ehea_reset_port(struct work_struct
*work
)
2599 struct ehea_port
*port
=
2600 container_of(work
, struct ehea_port
, reset_task
);
2601 struct net_device
*dev
= port
->netdev
;
2604 down(&port
->port_lock
);
2605 netif_stop_queue(dev
);
2607 port_napi_disable(port
);
2615 ehea_set_multicast_list(dev
);
2617 if (netif_msg_timer(port
))
2618 ehea_info("Device %s resetted successfully", dev
->name
);
2620 port_napi_enable(port
);
2622 netif_wake_queue(dev
);
2624 up(&port
->port_lock
);
2628 static void ehea_rereg_mrs(struct work_struct
*work
)
2631 struct ehea_adapter
*adapter
;
2633 down(&dlpar_mem_lock
);
2634 ehea_info("LPAR memory enlarged - re-initializing driver");
2636 list_for_each_entry(adapter
, &adapter_list
, list
)
2637 if (adapter
->active_ports
) {
2638 /* Shutdown all ports */
2639 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++) {
2640 struct ehea_port
*port
= adapter
->port
[i
];
2643 struct net_device
*dev
= port
->netdev
;
2645 if (dev
->flags
& IFF_UP
) {
2646 down(&port
->port_lock
);
2647 netif_stop_queue(dev
);
2648 ret
= ehea_stop_qps(dev
);
2650 up(&port
->port_lock
);
2653 port_napi_disable(port
);
2654 up(&port
->port_lock
);
2659 /* Unregister old memory region */
2660 ret
= ehea_rem_mr(&adapter
->mr
);
2662 ehea_error("unregister MR failed - driver"
2668 ehea_destroy_busmap();
2669 ret
= ehea_create_busmap();
2671 ehea_error("creating ehea busmap failed");
2675 clear_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
2677 list_for_each_entry(adapter
, &adapter_list
, list
)
2678 if (adapter
->active_ports
) {
2679 /* Register new memory region */
2680 ret
= ehea_reg_kernel_mr(adapter
, &adapter
->mr
);
2682 ehea_error("register MR failed - driver"
2687 /* Restart all ports */
2688 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++) {
2689 struct ehea_port
*port
= adapter
->port
[i
];
2692 struct net_device
*dev
= port
->netdev
;
2694 if (dev
->flags
& IFF_UP
) {
2695 down(&port
->port_lock
);
2696 port_napi_enable(port
);
2697 ret
= ehea_restart_qps(dev
);
2699 netif_wake_queue(dev
);
2700 up(&port
->port_lock
);
2705 up(&dlpar_mem_lock
);
2706 ehea_info("re-initializing driver complete");
2711 static void ehea_tx_watchdog(struct net_device
*dev
)
2713 struct ehea_port
*port
= netdev_priv(dev
);
2715 if (netif_carrier_ok(dev
) &&
2716 !test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))
2717 schedule_work(&port
->reset_task
);
2720 int ehea_sense_adapter_attr(struct ehea_adapter
*adapter
)
2722 struct hcp_query_ehea
*cb
;
2726 cb
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2732 hret
= ehea_h_query_ehea(adapter
->handle
, cb
);
2734 if (hret
!= H_SUCCESS
) {
2739 adapter
->max_mc_mac
= cb
->max_mc_mac
- 1;
2748 int ehea_get_jumboframe_status(struct ehea_port
*port
, int *jumbo
)
2750 struct hcp_ehea_port_cb4
*cb4
;
2756 /* (Try to) enable *jumbo frames */
2757 cb4
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2759 ehea_error("no mem for cb4");
2763 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
2764 port
->logical_port_id
,
2766 H_PORT_CB4_JUMBO
, cb4
);
2767 if (hret
== H_SUCCESS
) {
2768 if (cb4
->jumbo_frame
)
2771 cb4
->jumbo_frame
= 1;
2772 hret
= ehea_h_modify_ehea_port(port
->adapter
->
2779 if (hret
== H_SUCCESS
)
2791 static ssize_t
ehea_show_port_id(struct device
*dev
,
2792 struct device_attribute
*attr
, char *buf
)
2794 struct ehea_port
*port
= container_of(dev
, struct ehea_port
, ofdev
.dev
);
2795 return sprintf(buf
, "%d", port
->logical_port_id
);
2798 static DEVICE_ATTR(log_port_id
, S_IRUSR
| S_IRGRP
| S_IROTH
, ehea_show_port_id
,
2801 static void __devinit
logical_port_release(struct device
*dev
)
2803 struct ehea_port
*port
= container_of(dev
, struct ehea_port
, ofdev
.dev
);
2804 of_node_put(port
->ofdev
.node
);
2807 static int ehea_driver_sysfs_add(struct device
*dev
,
2808 struct device_driver
*driver
)
2812 ret
= sysfs_create_link(&driver
->kobj
, &dev
->kobj
,
2813 kobject_name(&dev
->kobj
));
2815 ret
= sysfs_create_link(&dev
->kobj
, &driver
->kobj
,
2818 sysfs_remove_link(&driver
->kobj
,
2819 kobject_name(&dev
->kobj
));
2824 static void ehea_driver_sysfs_remove(struct device
*dev
,
2825 struct device_driver
*driver
)
2827 struct device_driver
*drv
= driver
;
2830 sysfs_remove_link(&drv
->kobj
, kobject_name(&dev
->kobj
));
2831 sysfs_remove_link(&dev
->kobj
, "driver");
2835 static struct device
*ehea_register_port(struct ehea_port
*port
,
2836 struct device_node
*dn
)
2840 port
->ofdev
.node
= of_node_get(dn
);
2841 port
->ofdev
.dev
.parent
= &port
->adapter
->ofdev
->dev
;
2842 port
->ofdev
.dev
.bus
= &ibmebus_bus_type
;
2844 sprintf(port
->ofdev
.dev
.bus_id
, "port%d", port_name_cnt
++);
2845 port
->ofdev
.dev
.release
= logical_port_release
;
2847 ret
= of_device_register(&port
->ofdev
);
2849 ehea_error("failed to register device. ret=%d", ret
);
2853 ret
= device_create_file(&port
->ofdev
.dev
, &dev_attr_log_port_id
);
2855 ehea_error("failed to register attributes, ret=%d", ret
);
2856 goto out_unreg_of_dev
;
2859 ret
= ehea_driver_sysfs_add(&port
->ofdev
.dev
, &ehea_driver
.driver
);
2861 ehea_error("failed to register sysfs driver link");
2862 goto out_rem_dev_file
;
2865 return &port
->ofdev
.dev
;
2868 device_remove_file(&port
->ofdev
.dev
, &dev_attr_log_port_id
);
2870 of_device_unregister(&port
->ofdev
);
2875 static void ehea_unregister_port(struct ehea_port
*port
)
2877 ehea_driver_sysfs_remove(&port
->ofdev
.dev
, &ehea_driver
.driver
);
2878 device_remove_file(&port
->ofdev
.dev
, &dev_attr_log_port_id
);
2879 of_device_unregister(&port
->ofdev
);
2882 struct ehea_port
*ehea_setup_single_port(struct ehea_adapter
*adapter
,
2883 u32 logical_port_id
,
2884 struct device_node
*dn
)
2887 struct net_device
*dev
;
2888 struct ehea_port
*port
;
2889 struct device
*port_dev
;
2892 /* allocate memory for the port structures */
2893 dev
= alloc_etherdev(sizeof(struct ehea_port
));
2896 ehea_error("no mem for net_device");
2901 port
= netdev_priv(dev
);
2903 sema_init(&port
->port_lock
, 1);
2904 port
->state
= EHEA_PORT_DOWN
;
2905 port
->sig_comp_iv
= sq_entries
/ 10;
2907 port
->adapter
= adapter
;
2909 port
->logical_port_id
= logical_port_id
;
2911 port
->msg_enable
= netif_msg_init(msg_level
, EHEA_MSG_DEFAULT
);
2913 port
->mc_list
= kzalloc(sizeof(struct ehea_mc_list
), GFP_KERNEL
);
2914 if (!port
->mc_list
) {
2916 goto out_free_ethdev
;
2919 INIT_LIST_HEAD(&port
->mc_list
->list
);
2921 ret
= ehea_sense_port_attr(port
);
2923 goto out_free_mc_list
;
2925 port_dev
= ehea_register_port(port
, dn
);
2927 goto out_free_mc_list
;
2929 SET_NETDEV_DEV(dev
, port_dev
);
2931 /* initialize net_device structure */
2932 memcpy(dev
->dev_addr
, &port
->mac_addr
, ETH_ALEN
);
2934 dev
->open
= ehea_open
;
2935 #ifdef CONFIG_NET_POLL_CONTROLLER
2936 dev
->poll_controller
= ehea_netpoll
;
2938 dev
->stop
= ehea_stop
;
2939 dev
->hard_start_xmit
= ehea_start_xmit
;
2940 dev
->get_stats
= ehea_get_stats
;
2941 dev
->set_multicast_list
= ehea_set_multicast_list
;
2942 dev
->set_mac_address
= ehea_set_mac_addr
;
2943 dev
->change_mtu
= ehea_change_mtu
;
2944 dev
->vlan_rx_register
= ehea_vlan_rx_register
;
2945 dev
->vlan_rx_add_vid
= ehea_vlan_rx_add_vid
;
2946 dev
->vlan_rx_kill_vid
= ehea_vlan_rx_kill_vid
;
2947 dev
->features
= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_TSO
2948 | NETIF_F_HIGHDMA
| NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_TX
2949 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
2951 dev
->tx_timeout
= &ehea_tx_watchdog
;
2952 dev
->watchdog_timeo
= EHEA_WATCH_DOG_TIMEOUT
;
2954 INIT_WORK(&port
->reset_task
, ehea_reset_port
);
2956 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
2959 goto out_unreg_port
;
2962 ehea_set_ethtool_ops(dev
);
2964 ret
= register_netdev(dev
);
2966 ehea_error("register_netdev failed. ret=%d", ret
);
2970 port
->lro_max_aggr
= lro_max_aggr
;
2972 ret
= ehea_get_jumboframe_status(port
, &jumbo
);
2974 ehea_error("failed determining jumbo frame status for %s",
2975 port
->netdev
->name
);
2977 ehea_info("%s: Jumbo frames are %sabled", dev
->name
,
2978 jumbo
== 1 ? "en" : "dis");
2980 adapter
->active_ports
++;
2985 ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
2988 ehea_unregister_port(port
);
2991 kfree(port
->mc_list
);
2997 ehea_error("setting up logical port with id=%d failed, ret=%d",
2998 logical_port_id
, ret
);
3002 static void ehea_shutdown_single_port(struct ehea_port
*port
)
3004 unregister_netdev(port
->netdev
);
3005 ehea_unregister_port(port
);
3006 ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
3007 kfree(port
->mc_list
);
3008 free_netdev(port
->netdev
);
3009 port
->adapter
->active_ports
--;
3012 static int ehea_setup_ports(struct ehea_adapter
*adapter
)
3014 struct device_node
*lhea_dn
;
3015 struct device_node
*eth_dn
= NULL
;
3017 const u32
*dn_log_port_id
;
3020 lhea_dn
= adapter
->ofdev
->node
;
3021 while ((eth_dn
= of_get_next_child(lhea_dn
, eth_dn
))) {
3023 dn_log_port_id
= of_get_property(eth_dn
, "ibm,hea-port-no",
3025 if (!dn_log_port_id
) {
3026 ehea_error("bad device node: eth_dn name=%s",
3031 if (ehea_add_adapter_mr(adapter
)) {
3032 ehea_error("creating MR failed");
3033 of_node_put(eth_dn
);
3037 adapter
->port
[i
] = ehea_setup_single_port(adapter
,
3040 if (adapter
->port
[i
])
3041 ehea_info("%s -> logical port id #%d",
3042 adapter
->port
[i
]->netdev
->name
,
3045 ehea_remove_adapter_mr(adapter
);
3053 static struct device_node
*ehea_get_eth_dn(struct ehea_adapter
*adapter
,
3054 u32 logical_port_id
)
3056 struct device_node
*lhea_dn
;
3057 struct device_node
*eth_dn
= NULL
;
3058 const u32
*dn_log_port_id
;
3060 lhea_dn
= adapter
->ofdev
->node
;
3061 while ((eth_dn
= of_get_next_child(lhea_dn
, eth_dn
))) {
3063 dn_log_port_id
= of_get_property(eth_dn
, "ibm,hea-port-no",
3066 if (*dn_log_port_id
== logical_port_id
)
3073 static ssize_t
ehea_probe_port(struct device
*dev
,
3074 struct device_attribute
*attr
,
3075 const char *buf
, size_t count
)
3077 struct ehea_adapter
*adapter
= dev
->driver_data
;
3078 struct ehea_port
*port
;
3079 struct device_node
*eth_dn
= NULL
;
3082 u32 logical_port_id
;
3084 sscanf(buf
, "%d", &logical_port_id
);
3086 port
= ehea_get_port(adapter
, logical_port_id
);
3089 ehea_info("adding port with logical port id=%d failed. port "
3090 "already configured as %s.", logical_port_id
,
3091 port
->netdev
->name
);
3095 eth_dn
= ehea_get_eth_dn(adapter
, logical_port_id
);
3098 ehea_info("no logical port with id %d found", logical_port_id
);
3102 if (ehea_add_adapter_mr(adapter
)) {
3103 ehea_error("creating MR failed");
3107 port
= ehea_setup_single_port(adapter
, logical_port_id
, eth_dn
);
3109 of_node_put(eth_dn
);
3112 for (i
=0; i
< EHEA_MAX_PORTS
; i
++)
3113 if (!adapter
->port
[i
]) {
3114 adapter
->port
[i
] = port
;
3118 ehea_info("added %s (logical port id=%d)", port
->netdev
->name
,
3121 ehea_remove_adapter_mr(adapter
);
3125 return (ssize_t
) count
;
3128 static ssize_t
ehea_remove_port(struct device
*dev
,
3129 struct device_attribute
*attr
,
3130 const char *buf
, size_t count
)
3132 struct ehea_adapter
*adapter
= dev
->driver_data
;
3133 struct ehea_port
*port
;
3135 u32 logical_port_id
;
3137 sscanf(buf
, "%d", &logical_port_id
);
3139 port
= ehea_get_port(adapter
, logical_port_id
);
3142 ehea_info("removed %s (logical port id=%d)", port
->netdev
->name
,
3145 ehea_shutdown_single_port(port
);
3147 for (i
=0; i
< EHEA_MAX_PORTS
; i
++)
3148 if (adapter
->port
[i
] == port
) {
3149 adapter
->port
[i
] = NULL
;
3153 ehea_error("removing port with logical port id=%d failed. port "
3154 "not configured.", logical_port_id
);
3158 ehea_remove_adapter_mr(adapter
);
3160 return (ssize_t
) count
;
3163 static DEVICE_ATTR(probe_port
, S_IWUSR
, NULL
, ehea_probe_port
);
3164 static DEVICE_ATTR(remove_port
, S_IWUSR
, NULL
, ehea_remove_port
);
3166 int ehea_create_device_sysfs(struct of_device
*dev
)
3168 int ret
= device_create_file(&dev
->dev
, &dev_attr_probe_port
);
3172 ret
= device_create_file(&dev
->dev
, &dev_attr_remove_port
);
3177 void ehea_remove_device_sysfs(struct of_device
*dev
)
3179 device_remove_file(&dev
->dev
, &dev_attr_probe_port
);
3180 device_remove_file(&dev
->dev
, &dev_attr_remove_port
);
3183 static int __devinit
ehea_probe_adapter(struct of_device
*dev
,
3184 const struct of_device_id
*id
)
3186 struct ehea_adapter
*adapter
;
3187 const u64
*adapter_handle
;
3190 if (!dev
|| !dev
->node
) {
3191 ehea_error("Invalid ibmebus device probed");
3195 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
3198 dev_err(&dev
->dev
, "no mem for ehea_adapter\n");
3202 list_add(&adapter
->list
, &adapter_list
);
3204 adapter
->ofdev
= dev
;
3206 adapter_handle
= of_get_property(dev
->node
, "ibm,hea-handle",
3209 adapter
->handle
= *adapter_handle
;
3211 if (!adapter
->handle
) {
3212 dev_err(&dev
->dev
, "failed getting handle for adapter"
3213 " '%s'\n", dev
->node
->full_name
);
3218 adapter
->pd
= EHEA_PD_ID
;
3220 dev
->dev
.driver_data
= adapter
;
3223 /* initialize adapter and ports */
3224 /* get adapter properties */
3225 ret
= ehea_sense_adapter_attr(adapter
);
3227 dev_err(&dev
->dev
, "sense_adapter_attr failed: %d\n", ret
);
3231 adapter
->neq
= ehea_create_eq(adapter
,
3232 EHEA_NEQ
, EHEA_MAX_ENTRIES_EQ
, 1);
3233 if (!adapter
->neq
) {
3235 dev_err(&dev
->dev
, "NEQ creation failed\n");
3239 tasklet_init(&adapter
->neq_tasklet
, ehea_neq_tasklet
,
3240 (unsigned long)adapter
);
3242 ret
= ibmebus_request_irq(adapter
->neq
->attr
.ist1
,
3243 ehea_interrupt_neq
, IRQF_DISABLED
,
3244 "ehea_neq", adapter
);
3246 dev_err(&dev
->dev
, "requesting NEQ IRQ failed\n");
3250 ret
= ehea_create_device_sysfs(dev
);
3254 ret
= ehea_setup_ports(adapter
);
3256 dev_err(&dev
->dev
, "setup_ports failed\n");
3257 goto out_rem_dev_sysfs
;
3264 ehea_remove_device_sysfs(dev
);
3267 ibmebus_free_irq(adapter
->neq
->attr
.ist1
, adapter
);
3270 ehea_destroy_eq(adapter
->neq
);
3278 static int __devexit
ehea_remove(struct of_device
*dev
)
3280 struct ehea_adapter
*adapter
= dev
->dev
.driver_data
;
3283 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3284 if (adapter
->port
[i
]) {
3285 ehea_shutdown_single_port(adapter
->port
[i
]);
3286 adapter
->port
[i
] = NULL
;
3289 ehea_remove_device_sysfs(dev
);
3291 flush_scheduled_work();
3293 ibmebus_free_irq(adapter
->neq
->attr
.ist1
, adapter
);
3294 tasklet_kill(&adapter
->neq_tasklet
);
3296 ehea_destroy_eq(adapter
->neq
);
3297 ehea_remove_adapter_mr(adapter
);
3298 list_del(&adapter
->list
);
3305 static int ehea_reboot_notifier(struct notifier_block
*nb
,
3306 unsigned long action
, void *unused
)
3308 if (action
== SYS_RESTART
) {
3309 ehea_info("Reboot: freeing all eHEA resources");
3310 ibmebus_unregister_driver(&ehea_driver
);
3315 static struct notifier_block ehea_reboot_nb
= {
3316 .notifier_call
= ehea_reboot_notifier
,
3319 static int check_module_parm(void)
3323 if ((rq1_entries
< EHEA_MIN_ENTRIES_QP
) ||
3324 (rq1_entries
> EHEA_MAX_ENTRIES_RQ1
)) {
3325 ehea_info("Bad parameter: rq1_entries");
3328 if ((rq2_entries
< EHEA_MIN_ENTRIES_QP
) ||
3329 (rq2_entries
> EHEA_MAX_ENTRIES_RQ2
)) {
3330 ehea_info("Bad parameter: rq2_entries");
3333 if ((rq3_entries
< EHEA_MIN_ENTRIES_QP
) ||
3334 (rq3_entries
> EHEA_MAX_ENTRIES_RQ3
)) {
3335 ehea_info("Bad parameter: rq3_entries");
3338 if ((sq_entries
< EHEA_MIN_ENTRIES_QP
) ||
3339 (sq_entries
> EHEA_MAX_ENTRIES_SQ
)) {
3340 ehea_info("Bad parameter: sq_entries");
3347 static ssize_t
ehea_show_capabilities(struct device_driver
*drv
,
3350 return sprintf(buf
, "%d", EHEA_CAPABILITIES
);
3353 static DRIVER_ATTR(capabilities
, S_IRUSR
| S_IRGRP
| S_IROTH
,
3354 ehea_show_capabilities
, NULL
);
3356 int __init
ehea_module_init(void)
3360 printk(KERN_INFO
"IBM eHEA ethernet device driver (Release %s)\n",
3364 INIT_WORK(&ehea_rereg_mr_task
, ehea_rereg_mrs
);
3365 sema_init(&dlpar_mem_lock
, 1);
3367 ret
= check_module_parm();
3371 ret
= ehea_create_busmap();
3375 register_reboot_notifier(&ehea_reboot_nb
);
3377 ret
= ibmebus_register_driver(&ehea_driver
);
3379 ehea_error("failed registering eHEA device driver on ebus");
3383 ret
= driver_create_file(&ehea_driver
.driver
,
3384 &driver_attr_capabilities
);
3386 ehea_error("failed to register capabilities attribute, ret=%d",
3388 unregister_reboot_notifier(&ehea_reboot_nb
);
3389 ibmebus_unregister_driver(&ehea_driver
);
3397 static void __exit
ehea_module_exit(void)
3399 flush_scheduled_work();
3400 driver_remove_file(&ehea_driver
.driver
, &driver_attr_capabilities
);
3401 ibmebus_unregister_driver(&ehea_driver
);
3402 unregister_reboot_notifier(&ehea_reboot_nb
);
3403 ehea_destroy_busmap();
3406 module_init(ehea_module_init
);
3407 module_exit(ehea_module_exit
);