1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
81 static const char ibmvnic_driver_name
[] = "ibmvnic";
82 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
89 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
90 static int ibmvnic_remove(struct vio_dev
*);
91 static void release_sub_crqs(struct ibmvnic_adapter
*);
92 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter
*);
93 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
94 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
95 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
96 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
97 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
98 union sub_crq
*sub_crq
);
99 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
100 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
101 static int enable_scrq_irq(struct ibmvnic_adapter
*,
102 struct ibmvnic_sub_crq_queue
*);
103 static int disable_scrq_irq(struct ibmvnic_adapter
*,
104 struct ibmvnic_sub_crq_queue
*);
105 static int pending_scrq(struct ibmvnic_adapter
*,
106 struct ibmvnic_sub_crq_queue
*);
107 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
108 struct ibmvnic_sub_crq_queue
*);
109 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
110 static void send_map_query(struct ibmvnic_adapter
*adapter
);
111 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
112 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
114 struct ibmvnic_stat
{
115 char name
[ETH_GSTRING_LEN
];
119 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
123 static const struct ibmvnic_stat ibmvnic_stats
[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
148 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
149 unsigned long length
, unsigned long *number
,
152 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
155 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
162 /* net_device_ops functions */
164 static void init_rx_pool(struct ibmvnic_adapter
*adapter
,
165 struct ibmvnic_rx_pool
*rx_pool
, int num
, int index
,
166 int buff_size
, int active
)
168 netdev_dbg(adapter
->netdev
,
169 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
170 index
, num
, buff_size
);
172 rx_pool
->index
= index
;
173 rx_pool
->buff_size
= buff_size
;
174 rx_pool
->active
= active
;
177 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
178 struct ibmvnic_long_term_buff
*ltb
, int size
)
180 struct device
*dev
= &adapter
->vdev
->dev
;
183 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
187 dev_err(dev
, "Couldn't alloc long term buffer\n");
190 ltb
->map_id
= adapter
->map_id
;
193 init_completion(&adapter
->fw_done
);
194 send_request_map(adapter
, ltb
->addr
,
195 ltb
->size
, ltb
->map_id
);
196 wait_for_completion(&adapter
->fw_done
);
200 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
201 struct ibmvnic_long_term_buff
*ltb
)
203 struct device
*dev
= &adapter
->vdev
->dev
;
205 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
206 if (!adapter
->failover
)
207 send_request_unmap(adapter
, ltb
->map_id
);
210 static int alloc_rx_pool(struct ibmvnic_adapter
*adapter
,
211 struct ibmvnic_rx_pool
*pool
)
213 struct device
*dev
= &adapter
->vdev
->dev
;
216 pool
->free_map
= kcalloc(pool
->size
, sizeof(int), GFP_KERNEL
);
220 pool
->rx_buff
= kcalloc(pool
->size
, sizeof(struct ibmvnic_rx_buff
),
223 if (!pool
->rx_buff
) {
224 dev_err(dev
, "Couldn't alloc rx buffers\n");
225 kfree(pool
->free_map
);
229 if (alloc_long_term_buff(adapter
, &pool
->long_term_buff
,
230 pool
->size
* pool
->buff_size
)) {
231 kfree(pool
->free_map
);
232 kfree(pool
->rx_buff
);
236 for (i
= 0; i
< pool
->size
; ++i
)
237 pool
->free_map
[i
] = i
;
239 atomic_set(&pool
->available
, 0);
240 pool
->next_alloc
= 0;
246 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
247 struct ibmvnic_rx_pool
*pool
)
249 int count
= pool
->size
- atomic_read(&pool
->available
);
250 struct device
*dev
= &adapter
->vdev
->dev
;
251 int buffers_added
= 0;
252 unsigned long lpar_rc
;
253 union sub_crq sub_crq
;
263 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
264 be32_to_cpu(adapter
->login_rsp_buf
->
267 for (i
= 0; i
< count
; ++i
) {
268 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
270 dev_err(dev
, "Couldn't replenish rx buff\n");
271 adapter
->replenish_no_mem
++;
275 index
= pool
->free_map
[pool
->next_free
];
277 if (pool
->rx_buff
[index
].skb
)
278 dev_err(dev
, "Inconsistent free_map!\n");
280 /* Copy the skb to the long term mapped DMA buffer */
281 offset
= index
* pool
->buff_size
;
282 dst
= pool
->long_term_buff
.buff
+ offset
;
283 memset(dst
, 0, pool
->buff_size
);
284 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
285 pool
->rx_buff
[index
].data
= dst
;
287 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
288 pool
->rx_buff
[index
].dma
= dma_addr
;
289 pool
->rx_buff
[index
].skb
= skb
;
290 pool
->rx_buff
[index
].pool_index
= pool
->index
;
291 pool
->rx_buff
[index
].size
= pool
->buff_size
;
293 memset(&sub_crq
, 0, sizeof(sub_crq
));
294 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
295 sub_crq
.rx_add
.correlator
=
296 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
297 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
298 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
300 /* The length field of the sCRQ is defined to be 24 bits so the
301 * buffer size needs to be left shifted by a byte before it is
302 * converted to big endian to prevent the last byte from being
305 #ifdef __LITTLE_ENDIAN__
308 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
310 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
312 if (lpar_rc
!= H_SUCCESS
)
316 adapter
->replenish_add_buff_success
++;
317 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
319 atomic_add(buffers_added
, &pool
->available
);
323 dev_info(dev
, "replenish pools failure\n");
324 pool
->free_map
[pool
->next_free
] = index
;
325 pool
->rx_buff
[index
].skb
= NULL
;
326 if (!dma_mapping_error(dev
, dma_addr
))
327 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
330 dev_kfree_skb_any(skb
);
331 adapter
->replenish_add_buff_failure
++;
332 atomic_add(buffers_added
, &pool
->available
);
335 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
339 if (adapter
->migrated
)
342 adapter
->replenish_task_cycles
++;
343 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
345 if (adapter
->rx_pool
[i
].active
)
346 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
350 static void free_rx_pool(struct ibmvnic_adapter
*adapter
,
351 struct ibmvnic_rx_pool
*pool
)
355 kfree(pool
->free_map
);
356 pool
->free_map
= NULL
;
361 for (i
= 0; i
< pool
->size
; i
++) {
362 if (pool
->rx_buff
[i
].skb
) {
363 dev_kfree_skb_any(pool
->rx_buff
[i
].skb
);
364 pool
->rx_buff
[i
].skb
= NULL
;
367 kfree(pool
->rx_buff
);
368 pool
->rx_buff
= NULL
;
371 static int ibmvnic_open(struct net_device
*netdev
)
373 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
374 struct device
*dev
= &adapter
->vdev
->dev
;
375 struct ibmvnic_tx_pool
*tx_pool
;
376 union ibmvnic_crq crq
;
383 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
385 be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
386 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
387 be32_to_cpu(adapter
->login_rsp_buf
->
388 off_rxadd_buff_size
));
390 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
391 sizeof(struct napi_struct
), GFP_KERNEL
);
393 goto alloc_napi_failed
;
394 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
395 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
397 napi_enable(&adapter
->napi
[i
]);
400 kcalloc(rxadd_subcrqs
, sizeof(struct ibmvnic_rx_pool
), GFP_KERNEL
);
402 if (!adapter
->rx_pool
)
403 goto rx_pool_arr_alloc_failed
;
404 send_map_query(adapter
);
405 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
406 init_rx_pool(adapter
, &adapter
->rx_pool
[i
],
407 IBMVNIC_BUFFS_PER_POOL
, i
,
408 be64_to_cpu(size_array
[i
]), 1);
409 if (alloc_rx_pool(adapter
, &adapter
->rx_pool
[i
])) {
410 dev_err(dev
, "Couldn't alloc rx pool\n");
411 goto rx_pool_alloc_failed
;
415 kcalloc(tx_subcrqs
, sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
417 if (!adapter
->tx_pool
)
418 goto tx_pool_arr_alloc_failed
;
419 for (i
= 0; i
< tx_subcrqs
; i
++) {
420 tx_pool
= &adapter
->tx_pool
[i
];
422 kcalloc(adapter
->max_tx_entries_per_subcrq
,
423 sizeof(struct ibmvnic_tx_buff
), GFP_KERNEL
);
424 if (!tx_pool
->tx_buff
)
425 goto tx_pool_alloc_failed
;
427 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
428 adapter
->max_tx_entries_per_subcrq
*
430 goto tx_ltb_alloc_failed
;
433 kcalloc(adapter
->max_tx_entries_per_subcrq
,
434 sizeof(int), GFP_KERNEL
);
435 if (!tx_pool
->free_map
)
436 goto tx_fm_alloc_failed
;
438 for (j
= 0; j
< adapter
->max_tx_entries_per_subcrq
; j
++)
439 tx_pool
->free_map
[j
] = j
;
441 tx_pool
->consumer_index
= 0;
442 tx_pool
->producer_index
= 0;
444 adapter
->bounce_buffer_size
=
445 (netdev
->mtu
+ ETH_HLEN
- 1) / PAGE_SIZE
+ 1;
446 adapter
->bounce_buffer
= kmalloc(adapter
->bounce_buffer_size
,
448 if (!adapter
->bounce_buffer
)
449 goto bounce_alloc_failed
;
451 adapter
->bounce_buffer_dma
= dma_map_single(dev
, adapter
->bounce_buffer
,
452 adapter
->bounce_buffer_size
,
454 if (dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
455 dev_err(dev
, "Couldn't map tx bounce buffer\n");
456 goto bounce_map_failed
;
458 replenish_pools(adapter
);
460 /* We're ready to receive frames, enable the sub-crq interrupts and
461 * set the logical link state to up
463 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
464 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
466 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
467 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
469 memset(&crq
, 0, sizeof(crq
));
470 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
471 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
472 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_UP
;
473 ibmvnic_send_crq(adapter
, &crq
);
475 netif_tx_start_all_queues(netdev
);
480 kfree(adapter
->bounce_buffer
);
483 kfree(adapter
->tx_pool
[i
].free_map
);
485 free_long_term_buff(adapter
, &adapter
->tx_pool
[i
].long_term_buff
);
487 kfree(adapter
->tx_pool
[i
].tx_buff
);
488 tx_pool_alloc_failed
:
489 for (j
= 0; j
< i
; j
++) {
490 kfree(adapter
->tx_pool
[j
].tx_buff
);
491 free_long_term_buff(adapter
,
492 &adapter
->tx_pool
[j
].long_term_buff
);
493 kfree(adapter
->tx_pool
[j
].free_map
);
495 kfree(adapter
->tx_pool
);
496 adapter
->tx_pool
= NULL
;
497 tx_pool_arr_alloc_failed
:
499 rx_pool_alloc_failed
:
500 for (j
= 0; j
< i
; j
++) {
501 free_rx_pool(adapter
, &adapter
->rx_pool
[j
]);
502 free_long_term_buff(adapter
,
503 &adapter
->rx_pool
[j
].long_term_buff
);
505 kfree(adapter
->rx_pool
);
506 adapter
->rx_pool
= NULL
;
507 rx_pool_arr_alloc_failed
:
508 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
509 napi_disable(&adapter
->napi
[i
]);
514 static int ibmvnic_close(struct net_device
*netdev
)
516 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
517 struct device
*dev
= &adapter
->vdev
->dev
;
518 union ibmvnic_crq crq
;
521 adapter
->closing
= true;
523 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
524 napi_disable(&adapter
->napi
[i
]);
526 if (!adapter
->failover
)
527 netif_tx_stop_all_queues(netdev
);
529 if (adapter
->bounce_buffer
) {
530 if (!dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
531 dma_unmap_single(&adapter
->vdev
->dev
,
532 adapter
->bounce_buffer_dma
,
533 adapter
->bounce_buffer_size
,
535 adapter
->bounce_buffer_dma
= DMA_ERROR_CODE
;
537 kfree(adapter
->bounce_buffer
);
538 adapter
->bounce_buffer
= NULL
;
541 memset(&crq
, 0, sizeof(crq
));
542 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
543 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
544 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_DN
;
545 ibmvnic_send_crq(adapter
, &crq
);
547 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
549 kfree(adapter
->tx_pool
[i
].tx_buff
);
550 free_long_term_buff(adapter
,
551 &adapter
->tx_pool
[i
].long_term_buff
);
552 kfree(adapter
->tx_pool
[i
].free_map
);
554 kfree(adapter
->tx_pool
);
555 adapter
->tx_pool
= NULL
;
557 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
559 free_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
560 free_long_term_buff(adapter
,
561 &adapter
->rx_pool
[i
].long_term_buff
);
563 kfree(adapter
->rx_pool
);
564 adapter
->rx_pool
= NULL
;
566 adapter
->closing
= false;
572 * build_hdr_data - creates L2/L3/L4 header data buffer
573 * @hdr_field - bitfield determining needed headers
574 * @skb - socket buffer
575 * @hdr_len - array of header lengths
576 * @tot_len - total length of data
578 * Reads hdr_field to determine which headers are needed by firmware.
579 * Builds a buffer containing these headers. Saves individual header
580 * lengths and total buffer length to be used to build descriptors.
582 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
583 int *hdr_len
, u8
*hdr_data
)
588 hdr_len
[0] = sizeof(struct ethhdr
);
590 if (skb
->protocol
== htons(ETH_P_IP
)) {
591 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
592 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
593 hdr_len
[2] = tcp_hdrlen(skb
);
594 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
595 hdr_len
[2] = sizeof(struct udphdr
);
596 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
597 hdr_len
[1] = sizeof(struct ipv6hdr
);
598 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
599 hdr_len
[2] = tcp_hdrlen(skb
);
600 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
601 hdr_len
[2] = sizeof(struct udphdr
);
604 memset(hdr_data
, 0, 120);
605 if ((hdr_field
>> 6) & 1) {
606 hdr
= skb_mac_header(skb
);
607 memcpy(hdr_data
, hdr
, hdr_len
[0]);
611 if ((hdr_field
>> 5) & 1) {
612 hdr
= skb_network_header(skb
);
613 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
617 if ((hdr_field
>> 4) & 1) {
618 hdr
= skb_transport_header(skb
);
619 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
626 * create_hdr_descs - create header and header extension descriptors
627 * @hdr_field - bitfield determining needed headers
628 * @data - buffer containing header data
629 * @len - length of data buffer
630 * @hdr_len - array of individual header lengths
631 * @scrq_arr - descriptor array
633 * Creates header and, if needed, header extension descriptors and
634 * places them in a descriptor array, scrq_arr
637 static void create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
638 union sub_crq
*scrq_arr
)
640 union sub_crq hdr_desc
;
645 while (tmp_len
> 0) {
646 cur
= hdr_data
+ len
- tmp_len
;
648 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
649 if (cur
!= hdr_data
) {
650 data
= hdr_desc
.hdr_ext
.data
;
651 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
652 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
653 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
654 hdr_desc
.hdr_ext
.len
= tmp
;
656 data
= hdr_desc
.hdr
.data
;
657 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
658 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
659 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
660 hdr_desc
.hdr
.len
= tmp
;
661 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
662 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
663 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
664 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
666 memcpy(data
, cur
, tmp
);
668 *scrq_arr
= hdr_desc
;
674 * build_hdr_descs_arr - build a header descriptor array
675 * @skb - socket buffer
676 * @num_entries - number of descriptors to be sent
677 * @subcrq - first TX descriptor
678 * @hdr_field - bit field determining which headers will be sent
680 * This function will build a TX descriptor array with applicable
681 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
684 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
685 int *num_entries
, u8 hdr_field
)
687 int hdr_len
[3] = {0, 0, 0};
689 u8
*hdr_data
= txbuff
->hdr_data
;
691 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
696 num_entries
+= len
% 29 ? len
/ 29 + 1 : len
/ 29;
697 create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
698 txbuff
->indir_arr
+ 1);
701 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
703 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
704 int queue_num
= skb_get_queue_mapping(skb
);
705 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
706 struct device
*dev
= &adapter
->vdev
->dev
;
707 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
708 struct ibmvnic_tx_pool
*tx_pool
;
709 unsigned int tx_send_failed
= 0;
710 unsigned int tx_map_failed
= 0;
711 unsigned int tx_dropped
= 0;
712 unsigned int tx_packets
= 0;
713 unsigned int tx_bytes
= 0;
714 dma_addr_t data_dma_addr
;
715 struct netdev_queue
*txq
;
716 bool used_bounce
= false;
717 unsigned long lpar_rc
;
718 union sub_crq tx_crq
;
726 tx_pool
= &adapter
->tx_pool
[queue_num
];
727 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
728 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
729 be32_to_cpu(adapter
->login_rsp_buf
->
730 off_txsubm_subcrqs
));
731 if (adapter
->migrated
) {
734 ret
= NETDEV_TX_BUSY
;
738 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
739 offset
= index
* adapter
->req_mtu
;
740 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
741 memset(dst
, 0, adapter
->req_mtu
);
742 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
743 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
745 tx_pool
->consumer_index
=
746 (tx_pool
->consumer_index
+ 1) %
747 adapter
->max_tx_entries_per_subcrq
;
749 tx_buff
= &tx_pool
->tx_buff
[index
];
751 tx_buff
->data_dma
[0] = data_dma_addr
;
752 tx_buff
->data_len
[0] = skb
->len
;
753 tx_buff
->index
= index
;
754 tx_buff
->pool_index
= queue_num
;
755 tx_buff
->last_frag
= true;
756 tx_buff
->used_bounce
= used_bounce
;
758 memset(&tx_crq
, 0, sizeof(tx_crq
));
759 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
760 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
761 tx_crq
.v1
.n_crq_elem
= 1;
763 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
764 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
765 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
766 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
767 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
769 if (adapter
->vlan_header_insertion
) {
770 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
771 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
774 if (skb
->protocol
== htons(ETH_P_IP
)) {
775 if (ip_hdr(skb
)->version
== 4)
776 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
777 else if (ip_hdr(skb
)->version
== 6)
778 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
780 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
781 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
782 else if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
783 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
786 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
787 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
790 /* determine if l2/3/4 headers are sent to firmware */
791 if ((*hdrs
>> 7) & 1 &&
792 (skb
->protocol
== htons(ETH_P_IP
) ||
793 skb
->protocol
== htons(ETH_P_IPV6
))) {
794 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
795 tx_crq
.v1
.n_crq_elem
= num_entries
;
796 tx_buff
->indir_arr
[0] = tx_crq
;
797 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
798 sizeof(tx_buff
->indir_arr
),
800 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
801 if (!firmware_has_feature(FW_FEATURE_CMO
))
802 dev_err(dev
, "tx: unable to map descriptor array\n");
805 ret
= NETDEV_TX_BUSY
;
808 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
809 (u64
)tx_buff
->indir_dma
,
812 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
815 if (lpar_rc
!= H_SUCCESS
) {
816 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
818 if (tx_pool
->consumer_index
== 0)
819 tx_pool
->consumer_index
=
820 adapter
->max_tx_entries_per_subcrq
- 1;
822 tx_pool
->consumer_index
--;
826 ret
= NETDEV_TX_BUSY
;
830 tx_bytes
+= skb
->len
;
831 txq
->trans_start
= jiffies
;
835 netdev
->stats
.tx_dropped
+= tx_dropped
;
836 netdev
->stats
.tx_bytes
+= tx_bytes
;
837 netdev
->stats
.tx_packets
+= tx_packets
;
838 adapter
->tx_send_failed
+= tx_send_failed
;
839 adapter
->tx_map_failed
+= tx_map_failed
;
844 static void ibmvnic_set_multi(struct net_device
*netdev
)
846 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
847 struct netdev_hw_addr
*ha
;
848 union ibmvnic_crq crq
;
850 memset(&crq
, 0, sizeof(crq
));
851 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
852 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
854 if (netdev
->flags
& IFF_PROMISC
) {
855 if (!adapter
->promisc_supported
)
858 if (netdev
->flags
& IFF_ALLMULTI
) {
859 /* Accept all multicast */
860 memset(&crq
, 0, sizeof(crq
));
861 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
862 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
863 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
864 ibmvnic_send_crq(adapter
, &crq
);
865 } else if (netdev_mc_empty(netdev
)) {
866 /* Reject all multicast */
867 memset(&crq
, 0, sizeof(crq
));
868 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
869 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
870 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
871 ibmvnic_send_crq(adapter
, &crq
);
873 /* Accept one or more multicast(s) */
874 netdev_for_each_mc_addr(ha
, netdev
) {
875 memset(&crq
, 0, sizeof(crq
));
876 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
877 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
878 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
879 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
881 ibmvnic_send_crq(adapter
, &crq
);
887 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
889 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
890 struct sockaddr
*addr
= p
;
891 union ibmvnic_crq crq
;
893 if (!is_valid_ether_addr(addr
->sa_data
))
894 return -EADDRNOTAVAIL
;
896 memset(&crq
, 0, sizeof(crq
));
897 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
898 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
899 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
900 ibmvnic_send_crq(adapter
, &crq
);
901 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
905 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
907 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
909 if (new_mtu
> adapter
->req_mtu
|| new_mtu
< adapter
->min_mtu
)
912 netdev
->mtu
= new_mtu
;
916 static void ibmvnic_tx_timeout(struct net_device
*dev
)
918 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
921 /* Adapter timed out, resetting it */
922 release_sub_crqs(adapter
);
923 rc
= ibmvnic_reset_crq(adapter
);
925 dev_err(&adapter
->vdev
->dev
, "Adapter timeout, reset failed\n");
927 ibmvnic_send_crq_init(adapter
);
930 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
931 struct ibmvnic_rx_buff
*rx_buff
)
933 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
937 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
938 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
940 atomic_dec(&pool
->available
);
943 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
945 struct net_device
*netdev
= napi
->dev
;
946 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
947 int scrq_num
= (int)(napi
- adapter
->napi
);
948 int frames_processed
= 0;
950 while (frames_processed
< budget
) {
952 struct ibmvnic_rx_buff
*rx_buff
;
958 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
960 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
962 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
964 /* do error checking */
965 if (next
->rx_comp
.rc
) {
966 netdev_err(netdev
, "rx error %x\n", next
->rx_comp
.rc
);
968 next
->rx_comp
.first
= 0;
969 remove_buff_from_pool(adapter
, rx_buff
);
973 length
= be32_to_cpu(next
->rx_comp
.len
);
974 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
975 flags
= next
->rx_comp
.flags
;
977 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
979 skb
->vlan_tci
= be16_to_cpu(next
->rx_comp
.vlan_tci
);
981 next
->rx_comp
.first
= 0;
982 remove_buff_from_pool(adapter
, rx_buff
);
984 skb_put(skb
, length
);
985 skb
->protocol
= eth_type_trans(skb
, netdev
);
987 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
988 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
989 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
993 napi_gro_receive(napi
, skb
); /* send it up */
994 netdev
->stats
.rx_packets
++;
995 netdev
->stats
.rx_bytes
+= length
;
998 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
1000 if (frames_processed
< budget
) {
1001 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1002 napi_complete(napi
);
1003 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
1004 napi_reschedule(napi
)) {
1005 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1009 return frames_processed
;
1012 #ifdef CONFIG_NET_POLL_CONTROLLER
1013 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
1015 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1018 replenish_pools(netdev_priv(dev
));
1019 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1020 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
1021 adapter
->rx_scrq
[i
]);
1025 static const struct net_device_ops ibmvnic_netdev_ops
= {
1026 .ndo_open
= ibmvnic_open
,
1027 .ndo_stop
= ibmvnic_close
,
1028 .ndo_start_xmit
= ibmvnic_xmit
,
1029 .ndo_set_rx_mode
= ibmvnic_set_multi
,
1030 .ndo_set_mac_address
= ibmvnic_set_mac
,
1031 .ndo_validate_addr
= eth_validate_addr
,
1032 .ndo_change_mtu
= ibmvnic_change_mtu
,
1033 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
1034 #ifdef CONFIG_NET_POLL_CONTROLLER
1035 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
1039 /* ethtool functions */
1041 static int ibmvnic_get_settings(struct net_device
*netdev
,
1042 struct ethtool_cmd
*cmd
)
1044 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
1046 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
1048 ethtool_cmd_speed_set(cmd
, SPEED_1000
);
1049 cmd
->duplex
= DUPLEX_FULL
;
1050 cmd
->port
= PORT_FIBRE
;
1051 cmd
->phy_address
= 0;
1052 cmd
->transceiver
= XCVR_INTERNAL
;
1053 cmd
->autoneg
= AUTONEG_ENABLE
;
1059 static void ibmvnic_get_drvinfo(struct net_device
*dev
,
1060 struct ethtool_drvinfo
*info
)
1062 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
1063 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
1066 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
1068 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1070 return adapter
->msg_enable
;
1073 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
1075 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1077 adapter
->msg_enable
= data
;
1080 static u32
ibmvnic_get_link(struct net_device
*netdev
)
1082 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1084 /* Don't need to send a query because we request a logical link up at
1085 * init and then we wait for link state indications
1087 return adapter
->logical_link_state
;
1090 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
1091 struct ethtool_ringparam
*ring
)
1093 ring
->rx_max_pending
= 0;
1094 ring
->tx_max_pending
= 0;
1095 ring
->rx_mini_max_pending
= 0;
1096 ring
->rx_jumbo_max_pending
= 0;
1097 ring
->rx_pending
= 0;
1098 ring
->tx_pending
= 0;
1099 ring
->rx_mini_pending
= 0;
1100 ring
->rx_jumbo_pending
= 0;
1103 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1107 if (stringset
!= ETH_SS_STATS
)
1110 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
1111 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
1114 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
1118 return ARRAY_SIZE(ibmvnic_stats
);
1124 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
1125 struct ethtool_stats
*stats
, u64
*data
)
1127 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1128 union ibmvnic_crq crq
;
1131 memset(&crq
, 0, sizeof(crq
));
1132 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
1133 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
1134 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
1135 crq
.request_statistics
.len
=
1136 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
1138 /* Wait for data to be written */
1139 init_completion(&adapter
->stats_done
);
1140 ibmvnic_send_crq(adapter
, &crq
);
1141 wait_for_completion(&adapter
->stats_done
);
1143 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
1144 data
[i
] = IBMVNIC_GET_STAT(adapter
, ibmvnic_stats
[i
].offset
);
1147 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
1148 .get_settings
= ibmvnic_get_settings
,
1149 .get_drvinfo
= ibmvnic_get_drvinfo
,
1150 .get_msglevel
= ibmvnic_get_msglevel
,
1151 .set_msglevel
= ibmvnic_set_msglevel
,
1152 .get_link
= ibmvnic_get_link
,
1153 .get_ringparam
= ibmvnic_get_ringparam
,
1154 .get_strings
= ibmvnic_get_strings
,
1155 .get_sset_count
= ibmvnic_get_sset_count
,
1156 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
1159 /* Routines for managing CRQs/sCRQs */
1161 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1162 struct ibmvnic_sub_crq_queue
*scrq
)
1164 struct device
*dev
= &adapter
->vdev
->dev
;
1167 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
1169 /* Close the sub-crqs */
1171 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1172 adapter
->vdev
->unit_address
,
1174 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1176 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1178 free_pages((unsigned long)scrq
->msgs
, 2);
1182 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
1185 struct device
*dev
= &adapter
->vdev
->dev
;
1186 struct ibmvnic_sub_crq_queue
*scrq
;
1189 scrq
= kmalloc(sizeof(*scrq
), GFP_ATOMIC
);
1193 scrq
->msgs
= (union sub_crq
*)__get_free_pages(GFP_ATOMIC
, 2);
1194 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
1196 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
1197 goto zero_page_failed
;
1200 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
1202 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
1203 dev_warn(dev
, "Couldn't map crq queue messages page\n");
1207 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1208 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1210 if (rc
== H_RESOURCE
)
1211 rc
= ibmvnic_reset_crq(adapter
);
1213 if (rc
== H_CLOSED
) {
1214 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
1216 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
1220 scrq
->adapter
= adapter
;
1221 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
1223 scrq
->rx_skb_top
= NULL
;
1224 spin_lock_init(&scrq
->lock
);
1226 netdev_dbg(adapter
->netdev
,
1227 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1228 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
1233 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1236 free_pages((unsigned long)scrq
->msgs
, 2);
1243 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
1247 if (adapter
->tx_scrq
) {
1248 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1249 if (adapter
->tx_scrq
[i
]) {
1250 free_irq(adapter
->tx_scrq
[i
]->irq
,
1251 adapter
->tx_scrq
[i
]);
1252 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
1253 release_sub_crq_queue(adapter
,
1254 adapter
->tx_scrq
[i
]);
1256 adapter
->tx_scrq
= NULL
;
1259 if (adapter
->rx_scrq
) {
1260 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1261 if (adapter
->rx_scrq
[i
]) {
1262 free_irq(adapter
->rx_scrq
[i
]->irq
,
1263 adapter
->rx_scrq
[i
]);
1264 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
1265 release_sub_crq_queue(adapter
,
1266 adapter
->rx_scrq
[i
]);
1268 adapter
->rx_scrq
= NULL
;
1271 adapter
->requested_caps
= 0;
1274 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter
*adapter
)
1278 if (adapter
->tx_scrq
) {
1279 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1280 if (adapter
->tx_scrq
[i
])
1281 release_sub_crq_queue(adapter
,
1282 adapter
->tx_scrq
[i
]);
1283 adapter
->tx_scrq
= NULL
;
1286 if (adapter
->rx_scrq
) {
1287 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1288 if (adapter
->rx_scrq
[i
])
1289 release_sub_crq_queue(adapter
,
1290 adapter
->rx_scrq
[i
]);
1291 adapter
->rx_scrq
= NULL
;
1294 adapter
->requested_caps
= 0;
1297 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1298 struct ibmvnic_sub_crq_queue
*scrq
)
1300 struct device
*dev
= &adapter
->vdev
->dev
;
1303 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1304 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1306 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1311 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1312 struct ibmvnic_sub_crq_queue
*scrq
)
1314 struct device
*dev
= &adapter
->vdev
->dev
;
1317 if (scrq
->hw_irq
> 0x100000000ULL
) {
1318 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
1322 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1323 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1325 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1330 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
1331 struct ibmvnic_sub_crq_queue
*scrq
)
1333 struct device
*dev
= &adapter
->vdev
->dev
;
1334 struct ibmvnic_tx_buff
*txbuff
;
1335 union sub_crq
*next
;
1341 while (pending_scrq(adapter
, scrq
)) {
1342 unsigned int pool
= scrq
->pool_index
;
1344 next
= ibmvnic_next_scrq(adapter
, scrq
);
1345 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
1346 if (next
->tx_comp
.rcs
[i
]) {
1347 dev_err(dev
, "tx error %x\n",
1348 next
->tx_comp
.rcs
[i
]);
1351 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
1352 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
1354 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
1355 if (!txbuff
->data_dma
[j
])
1358 txbuff
->data_dma
[j
] = 0;
1359 txbuff
->used_bounce
= false;
1361 /* if sub_crq was sent indirectly */
1362 first
= txbuff
->indir_arr
[0].generic
.first
;
1363 if (first
== IBMVNIC_CRQ_CMD
) {
1364 dma_unmap_single(dev
, txbuff
->indir_dma
,
1365 sizeof(txbuff
->indir_arr
),
1369 if (txbuff
->last_frag
)
1370 dev_kfree_skb_any(txbuff
->skb
);
1372 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
1373 producer_index
] = index
;
1374 adapter
->tx_pool
[pool
].producer_index
=
1375 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
1376 adapter
->max_tx_entries_per_subcrq
;
1378 /* remove tx_comp scrq*/
1379 next
->tx_comp
.first
= 0;
1382 enable_scrq_irq(adapter
, scrq
);
1384 if (pending_scrq(adapter
, scrq
)) {
1385 disable_scrq_irq(adapter
, scrq
);
1392 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
1394 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1395 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1397 disable_scrq_irq(adapter
, scrq
);
1398 ibmvnic_complete_tx(adapter
, scrq
);
1403 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
1405 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1406 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1408 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
1409 disable_scrq_irq(adapter
, scrq
);
1410 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
1416 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
1418 struct device
*dev
= &adapter
->vdev
->dev
;
1419 struct ibmvnic_sub_crq_queue
*scrq
;
1423 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1424 scrq
= adapter
->tx_scrq
[i
];
1425 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1429 dev_err(dev
, "Error mapping irq\n");
1430 goto req_tx_irq_failed
;
1433 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
1434 0, "ibmvnic_tx", scrq
);
1437 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
1439 irq_dispose_mapping(scrq
->irq
);
1440 goto req_rx_irq_failed
;
1444 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1445 scrq
= adapter
->rx_scrq
[i
];
1446 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1449 dev_err(dev
, "Error mapping irq\n");
1450 goto req_rx_irq_failed
;
1452 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
1453 0, "ibmvnic_rx", scrq
);
1455 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
1457 irq_dispose_mapping(scrq
->irq
);
1458 goto req_rx_irq_failed
;
1464 for (j
= 0; j
< i
; j
++) {
1465 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
1466 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1468 i
= adapter
->req_tx_queues
;
1470 for (j
= 0; j
< i
; j
++) {
1471 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
1472 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1474 release_sub_crqs_no_irqs(adapter
);
1478 static void init_sub_crqs(struct ibmvnic_adapter
*adapter
, int retry
)
1480 struct device
*dev
= &adapter
->vdev
->dev
;
1481 struct ibmvnic_sub_crq_queue
**allqueues
;
1482 int registered_queues
= 0;
1483 union ibmvnic_crq crq
;
1489 /* Sub-CRQ entries are 32 byte long */
1490 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
1492 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
1493 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
1494 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
1495 goto allqueues_failed
;
1498 /* Get the minimum between the queried max and the entries
1499 * that fit in our PAGE_SIZE
1501 adapter
->req_tx_entries_per_subcrq
=
1502 adapter
->max_tx_entries_per_subcrq
> entries_page
?
1503 entries_page
: adapter
->max_tx_entries_per_subcrq
;
1504 adapter
->req_rx_add_entries_per_subcrq
=
1505 adapter
->max_rx_add_entries_per_subcrq
> entries_page
?
1506 entries_page
: adapter
->max_rx_add_entries_per_subcrq
;
1508 adapter
->req_tx_queues
= adapter
->opt_tx_comp_sub_queues
;
1509 adapter
->req_rx_queues
= adapter
->opt_rx_comp_queues
;
1510 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
1512 adapter
->req_mtu
= adapter
->max_mtu
;
1515 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
1517 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_ATOMIC
);
1519 goto allqueues_failed
;
1521 for (i
= 0; i
< total_queues
; i
++) {
1522 allqueues
[i
] = init_sub_crq_queue(adapter
);
1523 if (!allqueues
[i
]) {
1524 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
1527 registered_queues
++;
1530 /* Make sure we were able to register the minimum number of queues */
1531 if (registered_queues
<
1532 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
1533 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
1537 /* Distribute the failed allocated queues*/
1538 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
1539 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
1542 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
1543 adapter
->req_rx_queues
--;
1548 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
1549 adapter
->req_tx_queues
--;
1556 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
1557 sizeof(*adapter
->tx_scrq
), GFP_ATOMIC
);
1558 if (!adapter
->tx_scrq
)
1561 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1562 adapter
->tx_scrq
[i
] = allqueues
[i
];
1563 adapter
->tx_scrq
[i
]->pool_index
= i
;
1566 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
1567 sizeof(*adapter
->rx_scrq
), GFP_ATOMIC
);
1568 if (!adapter
->rx_scrq
)
1571 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1572 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
1573 adapter
->rx_scrq
[i
]->scrq_num
= i
;
1576 memset(&crq
, 0, sizeof(crq
));
1577 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1578 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1580 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
1581 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
1582 ibmvnic_send_crq(adapter
, &crq
);
1584 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
1585 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
1586 ibmvnic_send_crq(adapter
, &crq
);
1588 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
1589 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
1590 ibmvnic_send_crq(adapter
, &crq
);
1592 crq
.request_capability
.capability
=
1593 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
1594 crq
.request_capability
.number
=
1595 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
1596 ibmvnic_send_crq(adapter
, &crq
);
1598 crq
.request_capability
.capability
=
1599 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
1600 crq
.request_capability
.number
=
1601 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
1602 ibmvnic_send_crq(adapter
, &crq
);
1604 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
1605 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
1606 ibmvnic_send_crq(adapter
, &crq
);
1608 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
1609 if (adapter
->promisc_supported
) {
1610 crq
.request_capability
.capability
=
1611 cpu_to_be16(PROMISC_REQUESTED
);
1612 crq
.request_capability
.number
= cpu_to_be64(1);
1613 ibmvnic_send_crq(adapter
, &crq
);
1616 crq
.request_capability
.capability
=
1617 cpu_to_be16(PROMISC_REQUESTED
);
1618 crq
.request_capability
.number
= cpu_to_be64(0);
1619 ibmvnic_send_crq(adapter
, &crq
);
1627 kfree(adapter
->tx_scrq
);
1628 adapter
->tx_scrq
= NULL
;
1630 for (i
= 0; i
< registered_queues
; i
++)
1631 release_sub_crq_queue(adapter
, allqueues
[i
]);
1634 ibmvnic_remove(adapter
->vdev
);
1637 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
1638 struct ibmvnic_sub_crq_queue
*scrq
)
1640 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
1642 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
|| adapter
->closing
)
1648 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
1649 struct ibmvnic_sub_crq_queue
*scrq
)
1651 union sub_crq
*entry
;
1652 unsigned long flags
;
1654 spin_lock_irqsave(&scrq
->lock
, flags
);
1655 entry
= &scrq
->msgs
[scrq
->cur
];
1656 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1657 if (++scrq
->cur
== scrq
->size
)
1662 spin_unlock_irqrestore(&scrq
->lock
, flags
);
1667 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
1669 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
1670 union ibmvnic_crq
*crq
;
1672 crq
= &queue
->msgs
[queue
->cur
];
1673 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1674 if (++queue
->cur
== queue
->size
)
1683 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
1684 union sub_crq
*sub_crq
)
1686 unsigned int ua
= adapter
->vdev
->unit_address
;
1687 struct device
*dev
= &adapter
->vdev
->dev
;
1688 u64
*u64_crq
= (u64
*)sub_crq
;
1691 netdev_dbg(adapter
->netdev
,
1692 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1693 (unsigned long int)cpu_to_be64(remote_handle
),
1694 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1695 (unsigned long int)cpu_to_be64(u64_crq
[1]),
1696 (unsigned long int)cpu_to_be64(u64_crq
[2]),
1697 (unsigned long int)cpu_to_be64(u64_crq
[3]));
1699 /* Make sure the hypervisor sees the complete request */
1702 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
1703 cpu_to_be64(remote_handle
),
1704 cpu_to_be64(u64_crq
[0]),
1705 cpu_to_be64(u64_crq
[1]),
1706 cpu_to_be64(u64_crq
[2]),
1707 cpu_to_be64(u64_crq
[3]));
1711 dev_warn(dev
, "CRQ Queue closed\n");
1712 dev_err(dev
, "Send error (rc=%d)\n", rc
);
1718 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
1719 u64 remote_handle
, u64 ioba
, u64 num_entries
)
1721 unsigned int ua
= adapter
->vdev
->unit_address
;
1722 struct device
*dev
= &adapter
->vdev
->dev
;
1725 /* Make sure the hypervisor sees the complete request */
1727 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
1728 cpu_to_be64(remote_handle
),
1733 dev_warn(dev
, "CRQ Queue closed\n");
1734 dev_err(dev
, "Send (indirect) error (rc=%d)\n", rc
);
1740 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
1741 union ibmvnic_crq
*crq
)
1743 unsigned int ua
= adapter
->vdev
->unit_address
;
1744 struct device
*dev
= &adapter
->vdev
->dev
;
1745 u64
*u64_crq
= (u64
*)crq
;
1748 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
1749 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1750 (unsigned long int)cpu_to_be64(u64_crq
[1]));
1752 /* Make sure the hypervisor sees the complete request */
1755 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
1756 cpu_to_be64(u64_crq
[0]),
1757 cpu_to_be64(u64_crq
[1]));
1761 dev_warn(dev
, "CRQ Queue closed\n");
1762 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
1768 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
1770 union ibmvnic_crq crq
;
1772 memset(&crq
, 0, sizeof(crq
));
1773 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1774 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
1775 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
1777 return ibmvnic_send_crq(adapter
, &crq
);
1780 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter
*adapter
)
1782 union ibmvnic_crq crq
;
1784 memset(&crq
, 0, sizeof(crq
));
1785 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1786 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT_COMPLETE
;
1787 netdev_dbg(adapter
->netdev
, "Sending CRQ init complete\n");
1789 return ibmvnic_send_crq(adapter
, &crq
);
1792 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
1794 union ibmvnic_crq crq
;
1796 memset(&crq
, 0, sizeof(crq
));
1797 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
1798 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
1799 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
1801 return ibmvnic_send_crq(adapter
, &crq
);
1804 static void send_login(struct ibmvnic_adapter
*adapter
)
1806 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
1807 struct ibmvnic_login_buffer
*login_buffer
;
1808 struct ibmvnic_inflight_cmd
*inflight_cmd
;
1809 struct device
*dev
= &adapter
->vdev
->dev
;
1810 dma_addr_t rsp_buffer_token
;
1811 dma_addr_t buffer_token
;
1812 size_t rsp_buffer_size
;
1813 union ibmvnic_crq crq
;
1814 unsigned long flags
;
1821 sizeof(struct ibmvnic_login_buffer
) +
1822 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
);
1824 login_buffer
= kmalloc(buffer_size
, GFP_ATOMIC
);
1826 goto buf_alloc_failed
;
1828 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
1830 if (dma_mapping_error(dev
, buffer_token
)) {
1831 dev_err(dev
, "Couldn't map login buffer\n");
1832 goto buf_map_failed
;
1835 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
1836 sizeof(u64
) * adapter
->req_tx_queues
+
1837 sizeof(u64
) * adapter
->req_rx_queues
+
1838 sizeof(u64
) * adapter
->req_rx_queues
+
1839 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
1841 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
1842 if (!login_rsp_buffer
)
1843 goto buf_rsp_alloc_failed
;
1845 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
1846 rsp_buffer_size
, DMA_FROM_DEVICE
);
1847 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
1848 dev_err(dev
, "Couldn't map login rsp buffer\n");
1849 goto buf_rsp_map_failed
;
1851 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
1852 if (!inflight_cmd
) {
1853 dev_err(dev
, "Couldn't allocate inflight_cmd\n");
1854 goto inflight_alloc_failed
;
1856 adapter
->login_buf
= login_buffer
;
1857 adapter
->login_buf_token
= buffer_token
;
1858 adapter
->login_buf_sz
= buffer_size
;
1859 adapter
->login_rsp_buf
= login_rsp_buffer
;
1860 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
1861 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
1863 login_buffer
->len
= cpu_to_be32(buffer_size
);
1864 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
1865 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
1866 login_buffer
->off_txcomp_subcrqs
=
1867 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
1868 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
1869 login_buffer
->off_rxcomp_subcrqs
=
1870 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
1871 sizeof(u64
) * adapter
->req_tx_queues
);
1872 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
1873 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
1875 tx_list_p
= (__be64
*)((char *)login_buffer
+
1876 sizeof(struct ibmvnic_login_buffer
));
1877 rx_list_p
= (__be64
*)((char *)login_buffer
+
1878 sizeof(struct ibmvnic_login_buffer
) +
1879 sizeof(u64
) * adapter
->req_tx_queues
);
1881 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1882 if (adapter
->tx_scrq
[i
]) {
1883 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
1888 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1889 if (adapter
->rx_scrq
[i
]) {
1890 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
1895 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
1896 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
1897 netdev_dbg(adapter
->netdev
, "%016lx\n",
1898 ((unsigned long int *)(adapter
->login_buf
))[i
]);
1901 memset(&crq
, 0, sizeof(crq
));
1902 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
1903 crq
.login
.cmd
= LOGIN
;
1904 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
1905 crq
.login
.len
= cpu_to_be32(buffer_size
);
1907 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
1909 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
1910 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
1911 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
1913 ibmvnic_send_crq(adapter
, &crq
);
1917 inflight_alloc_failed
:
1918 dma_unmap_single(dev
, rsp_buffer_token
, rsp_buffer_size
,
1921 kfree(login_rsp_buffer
);
1922 buf_rsp_alloc_failed
:
1923 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
1925 kfree(login_buffer
);
1930 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
1933 union ibmvnic_crq crq
;
1935 memset(&crq
, 0, sizeof(crq
));
1936 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
1937 crq
.request_map
.cmd
= REQUEST_MAP
;
1938 crq
.request_map
.map_id
= map_id
;
1939 crq
.request_map
.ioba
= cpu_to_be32(addr
);
1940 crq
.request_map
.len
= cpu_to_be32(len
);
1941 ibmvnic_send_crq(adapter
, &crq
);
1944 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
1946 union ibmvnic_crq crq
;
1948 memset(&crq
, 0, sizeof(crq
));
1949 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
1950 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
1951 crq
.request_unmap
.map_id
= map_id
;
1952 ibmvnic_send_crq(adapter
, &crq
);
1955 static void send_map_query(struct ibmvnic_adapter
*adapter
)
1957 union ibmvnic_crq crq
;
1959 memset(&crq
, 0, sizeof(crq
));
1960 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
1961 crq
.query_map
.cmd
= QUERY_MAP
;
1962 ibmvnic_send_crq(adapter
, &crq
);
1965 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1966 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
1968 union ibmvnic_crq crq
;
1970 atomic_set(&adapter
->running_cap_queries
, 0);
1971 memset(&crq
, 0, sizeof(crq
));
1972 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
1973 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
1975 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
1976 atomic_inc(&adapter
->running_cap_queries
);
1977 ibmvnic_send_crq(adapter
, &crq
);
1979 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
1980 atomic_inc(&adapter
->running_cap_queries
);
1981 ibmvnic_send_crq(adapter
, &crq
);
1983 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
1984 atomic_inc(&adapter
->running_cap_queries
);
1985 ibmvnic_send_crq(adapter
, &crq
);
1987 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
1988 atomic_inc(&adapter
->running_cap_queries
);
1989 ibmvnic_send_crq(adapter
, &crq
);
1991 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
1992 atomic_inc(&adapter
->running_cap_queries
);
1993 ibmvnic_send_crq(adapter
, &crq
);
1995 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
1996 atomic_inc(&adapter
->running_cap_queries
);
1997 ibmvnic_send_crq(adapter
, &crq
);
1999 crq
.query_capability
.capability
=
2000 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
2001 atomic_inc(&adapter
->running_cap_queries
);
2002 ibmvnic_send_crq(adapter
, &crq
);
2004 crq
.query_capability
.capability
=
2005 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
2006 atomic_inc(&adapter
->running_cap_queries
);
2007 ibmvnic_send_crq(adapter
, &crq
);
2009 crq
.query_capability
.capability
=
2010 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
2011 atomic_inc(&adapter
->running_cap_queries
);
2012 ibmvnic_send_crq(adapter
, &crq
);
2014 crq
.query_capability
.capability
=
2015 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
2016 atomic_inc(&adapter
->running_cap_queries
);
2017 ibmvnic_send_crq(adapter
, &crq
);
2019 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
2020 atomic_inc(&adapter
->running_cap_queries
);
2021 ibmvnic_send_crq(adapter
, &crq
);
2023 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
2024 atomic_inc(&adapter
->running_cap_queries
);
2025 ibmvnic_send_crq(adapter
, &crq
);
2027 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
2028 atomic_inc(&adapter
->running_cap_queries
);
2029 ibmvnic_send_crq(adapter
, &crq
);
2031 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
2032 atomic_inc(&adapter
->running_cap_queries
);
2033 ibmvnic_send_crq(adapter
, &crq
);
2035 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
2036 atomic_inc(&adapter
->running_cap_queries
);
2037 ibmvnic_send_crq(adapter
, &crq
);
2039 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
2040 atomic_inc(&adapter
->running_cap_queries
);
2041 ibmvnic_send_crq(adapter
, &crq
);
2043 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
2044 atomic_inc(&adapter
->running_cap_queries
);
2045 ibmvnic_send_crq(adapter
, &crq
);
2047 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
2048 atomic_inc(&adapter
->running_cap_queries
);
2049 ibmvnic_send_crq(adapter
, &crq
);
2051 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
2052 atomic_inc(&adapter
->running_cap_queries
);
2053 ibmvnic_send_crq(adapter
, &crq
);
2055 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
2056 atomic_inc(&adapter
->running_cap_queries
);
2057 ibmvnic_send_crq(adapter
, &crq
);
2059 crq
.query_capability
.capability
=
2060 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
2061 atomic_inc(&adapter
->running_cap_queries
);
2062 ibmvnic_send_crq(adapter
, &crq
);
2064 crq
.query_capability
.capability
=
2065 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
2066 atomic_inc(&adapter
->running_cap_queries
);
2067 ibmvnic_send_crq(adapter
, &crq
);
2069 crq
.query_capability
.capability
=
2070 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
2071 atomic_inc(&adapter
->running_cap_queries
);
2072 ibmvnic_send_crq(adapter
, &crq
);
2074 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
2075 atomic_inc(&adapter
->running_cap_queries
);
2076 ibmvnic_send_crq(adapter
, &crq
);
2079 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
2081 struct device
*dev
= &adapter
->vdev
->dev
;
2082 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
2083 union ibmvnic_crq crq
;
2086 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
2087 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
2089 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
2090 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
2091 netdev_dbg(adapter
->netdev
, "%016lx\n",
2092 ((unsigned long int *)(buf
))[i
]);
2094 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
2095 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
2096 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
2097 buf
->tcp_ipv4_chksum
);
2098 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
2099 buf
->tcp_ipv6_chksum
);
2100 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
2101 buf
->udp_ipv4_chksum
);
2102 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
2103 buf
->udp_ipv6_chksum
);
2104 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
2105 buf
->large_tx_ipv4
);
2106 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
2107 buf
->large_tx_ipv6
);
2108 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
2109 buf
->large_rx_ipv4
);
2110 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
2111 buf
->large_rx_ipv6
);
2112 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
2113 buf
->max_ipv4_header_size
);
2114 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
2115 buf
->max_ipv6_header_size
);
2116 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
2117 buf
->max_tcp_header_size
);
2118 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
2119 buf
->max_udp_header_size
);
2120 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
2121 buf
->max_large_tx_size
);
2122 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
2123 buf
->max_large_rx_size
);
2124 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
2125 buf
->ipv6_extension_header
);
2126 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
2127 buf
->tcp_pseudosum_req
);
2128 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
2129 buf
->num_ipv6_ext_headers
);
2130 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
2131 buf
->off_ipv6_ext_headers
);
2133 adapter
->ip_offload_ctrl_tok
=
2134 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
2135 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
2137 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
2138 dev_err(dev
, "Couldn't map ip offload control buffer\n");
2142 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
2143 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
2144 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
2145 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
2146 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
2148 /* large_tx/rx disabled for now, additional features needed */
2149 adapter
->ip_offload_ctrl
.large_tx_ipv4
= 0;
2150 adapter
->ip_offload_ctrl
.large_tx_ipv6
= 0;
2151 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
2152 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
2154 adapter
->netdev
->features
= NETIF_F_GSO
;
2156 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
2157 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
2159 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
2160 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
2162 if ((adapter
->netdev
->features
&
2163 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
2164 adapter
->netdev
->features
|= NETIF_F_RXCSUM
;
2166 memset(&crq
, 0, sizeof(crq
));
2167 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2168 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
2169 crq
.control_ip_offload
.len
=
2170 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
2171 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
2172 ibmvnic_send_crq(adapter
, &crq
);
2175 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
2176 struct ibmvnic_adapter
*adapter
)
2178 struct device
*dev
= &adapter
->vdev
->dev
;
2179 struct ibmvnic_error_buff
*error_buff
, *tmp
;
2180 unsigned long flags
;
2184 if (!crq
->request_error_rsp
.rc
.code
) {
2185 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
2186 crq
->request_error_rsp
.rc
.code
);
2190 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2191 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
)
2192 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
2194 list_del(&error_buff
->list
);
2197 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2200 dev_err(dev
, "Couldn't find error id %x\n",
2201 be32_to_cpu(crq
->request_error_rsp
.error_id
));
2205 dev_err(dev
, "Detailed info for error id %x:",
2206 be32_to_cpu(crq
->request_error_rsp
.error_id
));
2208 for (i
= 0; i
< error_buff
->len
; i
++) {
2209 pr_cont("%02x", (int)error_buff
->buff
[i
]);
2215 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
2217 kfree(error_buff
->buff
);
2221 static void handle_dump_size_rsp(union ibmvnic_crq
*crq
,
2222 struct ibmvnic_adapter
*adapter
)
2224 int len
= be32_to_cpu(crq
->request_dump_size_rsp
.len
);
2225 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2226 struct device
*dev
= &adapter
->vdev
->dev
;
2227 union ibmvnic_crq newcrq
;
2228 unsigned long flags
;
2230 /* allocate and map buffer */
2231 adapter
->dump_data
= kmalloc(len
, GFP_KERNEL
);
2232 if (!adapter
->dump_data
) {
2233 complete(&adapter
->fw_done
);
2237 adapter
->dump_data_token
= dma_map_single(dev
, adapter
->dump_data
, len
,
2240 if (dma_mapping_error(dev
, adapter
->dump_data_token
)) {
2241 if (!firmware_has_feature(FW_FEATURE_CMO
))
2242 dev_err(dev
, "Couldn't map dump data\n");
2243 kfree(adapter
->dump_data
);
2244 complete(&adapter
->fw_done
);
2248 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2249 if (!inflight_cmd
) {
2250 dma_unmap_single(dev
, adapter
->dump_data_token
, len
,
2252 kfree(adapter
->dump_data
);
2253 complete(&adapter
->fw_done
);
2257 memset(&newcrq
, 0, sizeof(newcrq
));
2258 newcrq
.request_dump
.first
= IBMVNIC_CRQ_CMD
;
2259 newcrq
.request_dump
.cmd
= REQUEST_DUMP
;
2260 newcrq
.request_dump
.ioba
= cpu_to_be32(adapter
->dump_data_token
);
2261 newcrq
.request_dump
.len
= cpu_to_be32(adapter
->dump_data_size
);
2263 memcpy(&inflight_cmd
->crq
, &newcrq
, sizeof(newcrq
));
2265 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2266 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2267 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2269 ibmvnic_send_crq(adapter
, &newcrq
);
2272 static void handle_error_indication(union ibmvnic_crq
*crq
,
2273 struct ibmvnic_adapter
*adapter
)
2275 int detail_len
= be32_to_cpu(crq
->error_indication
.detail_error_sz
);
2276 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2277 struct device
*dev
= &adapter
->vdev
->dev
;
2278 struct ibmvnic_error_buff
*error_buff
;
2279 union ibmvnic_crq new_crq
;
2280 unsigned long flags
;
2282 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
2283 crq
->error_indication
.
2284 flags
& IBMVNIC_FATAL_ERROR
? "FATAL " : "",
2285 be32_to_cpu(crq
->error_indication
.error_id
),
2286 be16_to_cpu(crq
->error_indication
.error_cause
));
2288 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
2292 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
2293 if (!error_buff
->buff
) {
2298 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
2300 if (dma_mapping_error(dev
, error_buff
->dma
)) {
2301 if (!firmware_has_feature(FW_FEATURE_CMO
))
2302 dev_err(dev
, "Couldn't map error buffer\n");
2303 kfree(error_buff
->buff
);
2308 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2309 if (!inflight_cmd
) {
2310 dma_unmap_single(dev
, error_buff
->dma
, detail_len
,
2312 kfree(error_buff
->buff
);
2317 error_buff
->len
= detail_len
;
2318 error_buff
->error_id
= crq
->error_indication
.error_id
;
2320 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2321 list_add_tail(&error_buff
->list
, &adapter
->errors
);
2322 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2324 memset(&new_crq
, 0, sizeof(new_crq
));
2325 new_crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
2326 new_crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
2327 new_crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
2328 new_crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
2329 new_crq
.request_error_info
.error_id
= crq
->error_indication
.error_id
;
2331 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
2333 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2334 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2335 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2337 ibmvnic_send_crq(adapter
, &new_crq
);
2340 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
2341 struct ibmvnic_adapter
*adapter
)
2343 struct net_device
*netdev
= adapter
->netdev
;
2344 struct device
*dev
= &adapter
->vdev
->dev
;
2347 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
2349 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
2352 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
2356 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
2357 struct ibmvnic_adapter
*adapter
)
2359 struct device
*dev
= &adapter
->vdev
->dev
;
2363 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
2365 req_value
= &adapter
->req_tx_queues
;
2369 req_value
= &adapter
->req_rx_queues
;
2372 case REQ_RX_ADD_QUEUES
:
2373 req_value
= &adapter
->req_rx_add_queues
;
2376 case REQ_TX_ENTRIES_PER_SUBCRQ
:
2377 req_value
= &adapter
->req_tx_entries_per_subcrq
;
2378 name
= "tx_entries_per_subcrq";
2380 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
2381 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
2382 name
= "rx_add_entries_per_subcrq";
2385 req_value
= &adapter
->req_mtu
;
2388 case PROMISC_REQUESTED
:
2389 req_value
= &adapter
->promisc
;
2393 dev_err(dev
, "Got invalid cap request rsp %d\n",
2394 crq
->request_capability
.capability
);
2398 switch (crq
->request_capability_rsp
.rc
.code
) {
2401 case PARTIALSUCCESS
:
2402 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2404 (long int)be64_to_cpu(crq
->request_capability_rsp
.
2406 release_sub_crqs_no_irqs(adapter
);
2407 *req_value
= be64_to_cpu(crq
->request_capability_rsp
.number
);
2408 init_sub_crqs(adapter
, 1);
2411 dev_err(dev
, "Error %d in request cap rsp\n",
2412 crq
->request_capability_rsp
.rc
.code
);
2416 /* Done receiving requested capabilities, query IP offload support */
2417 if (++adapter
->requested_caps
== 7) {
2418 union ibmvnic_crq newcrq
;
2419 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
2420 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
2421 &adapter
->ip_offload_buf
;
2423 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
2427 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
2428 if (!firmware_has_feature(FW_FEATURE_CMO
))
2429 dev_err(dev
, "Couldn't map offload buffer\n");
2433 memset(&newcrq
, 0, sizeof(newcrq
));
2434 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2435 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
2436 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
2437 newcrq
.query_ip_offload
.ioba
=
2438 cpu_to_be32(adapter
->ip_offload_tok
);
2440 ibmvnic_send_crq(adapter
, &newcrq
);
2444 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
2445 struct ibmvnic_adapter
*adapter
)
2447 struct device
*dev
= &adapter
->vdev
->dev
;
2448 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
2449 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
2450 union ibmvnic_crq crq
;
2453 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
2455 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
2456 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
2458 /* If the number of queues requested can't be allocated by the
2459 * server, the login response will return with code 1. We will need
2460 * to resend the login buffer with fewer queues requested.
2462 if (login_rsp_crq
->generic
.rc
.code
) {
2463 adapter
->renegotiate
= true;
2464 complete(&adapter
->init_done
);
2468 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
2469 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
2470 netdev_dbg(adapter
->netdev
, "%016lx\n",
2471 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
2475 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
2476 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
2477 adapter
->req_rx_add_queues
!=
2478 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
2479 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
2480 ibmvnic_remove(adapter
->vdev
);
2483 complete(&adapter
->init_done
);
2485 memset(&crq
, 0, sizeof(crq
));
2486 crq
.request_ras_comp_num
.first
= IBMVNIC_CRQ_CMD
;
2487 crq
.request_ras_comp_num
.cmd
= REQUEST_RAS_COMP_NUM
;
2488 ibmvnic_send_crq(adapter
, &crq
);
2493 static void handle_request_map_rsp(union ibmvnic_crq
*crq
,
2494 struct ibmvnic_adapter
*adapter
)
2496 struct device
*dev
= &adapter
->vdev
->dev
;
2497 u8 map_id
= crq
->request_map_rsp
.map_id
;
2503 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
2504 rx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
2506 rc
= crq
->request_map_rsp
.rc
.code
;
2508 dev_err(dev
, "Error %ld in REQUEST_MAP_RSP\n", rc
);
2510 /* need to find and zero tx/rx_pool map_id */
2511 for (i
= 0; i
< tx_subcrqs
; i
++) {
2512 if (adapter
->tx_pool
[i
].long_term_buff
.map_id
== map_id
)
2513 adapter
->tx_pool
[i
].long_term_buff
.map_id
= 0;
2515 for (i
= 0; i
< rx_subcrqs
; i
++) {
2516 if (adapter
->rx_pool
[i
].long_term_buff
.map_id
== map_id
)
2517 adapter
->rx_pool
[i
].long_term_buff
.map_id
= 0;
2520 complete(&adapter
->fw_done
);
2523 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
2524 struct ibmvnic_adapter
*adapter
)
2526 struct device
*dev
= &adapter
->vdev
->dev
;
2529 rc
= crq
->request_unmap_rsp
.rc
.code
;
2531 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
2534 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
2535 struct ibmvnic_adapter
*adapter
)
2537 struct net_device
*netdev
= adapter
->netdev
;
2538 struct device
*dev
= &adapter
->vdev
->dev
;
2541 rc
= crq
->query_map_rsp
.rc
.code
;
2543 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
2546 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2547 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
2548 crq
->query_map_rsp
.free_pages
);
2551 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
2552 struct ibmvnic_adapter
*adapter
)
2554 struct net_device
*netdev
= adapter
->netdev
;
2555 struct device
*dev
= &adapter
->vdev
->dev
;
2558 atomic_dec(&adapter
->running_cap_queries
);
2559 netdev_dbg(netdev
, "Outstanding queries: %d\n",
2560 atomic_read(&adapter
->running_cap_queries
));
2561 rc
= crq
->query_capability
.rc
.code
;
2563 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
2567 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
2569 adapter
->min_tx_queues
=
2570 be64_to_cpu(crq
->query_capability
.number
);
2571 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
2572 adapter
->min_tx_queues
);
2575 adapter
->min_rx_queues
=
2576 be64_to_cpu(crq
->query_capability
.number
);
2577 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
2578 adapter
->min_rx_queues
);
2580 case MIN_RX_ADD_QUEUES
:
2581 adapter
->min_rx_add_queues
=
2582 be64_to_cpu(crq
->query_capability
.number
);
2583 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
2584 adapter
->min_rx_add_queues
);
2587 adapter
->max_tx_queues
=
2588 be64_to_cpu(crq
->query_capability
.number
);
2589 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
2590 adapter
->max_tx_queues
);
2593 adapter
->max_rx_queues
=
2594 be64_to_cpu(crq
->query_capability
.number
);
2595 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
2596 adapter
->max_rx_queues
);
2598 case MAX_RX_ADD_QUEUES
:
2599 adapter
->max_rx_add_queues
=
2600 be64_to_cpu(crq
->query_capability
.number
);
2601 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
2602 adapter
->max_rx_add_queues
);
2604 case MIN_TX_ENTRIES_PER_SUBCRQ
:
2605 adapter
->min_tx_entries_per_subcrq
=
2606 be64_to_cpu(crq
->query_capability
.number
);
2607 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
2608 adapter
->min_tx_entries_per_subcrq
);
2610 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
2611 adapter
->min_rx_add_entries_per_subcrq
=
2612 be64_to_cpu(crq
->query_capability
.number
);
2613 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
2614 adapter
->min_rx_add_entries_per_subcrq
);
2616 case MAX_TX_ENTRIES_PER_SUBCRQ
:
2617 adapter
->max_tx_entries_per_subcrq
=
2618 be64_to_cpu(crq
->query_capability
.number
);
2619 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
2620 adapter
->max_tx_entries_per_subcrq
);
2622 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
2623 adapter
->max_rx_add_entries_per_subcrq
=
2624 be64_to_cpu(crq
->query_capability
.number
);
2625 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
2626 adapter
->max_rx_add_entries_per_subcrq
);
2628 case TCP_IP_OFFLOAD
:
2629 adapter
->tcp_ip_offload
=
2630 be64_to_cpu(crq
->query_capability
.number
);
2631 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
2632 adapter
->tcp_ip_offload
);
2634 case PROMISC_SUPPORTED
:
2635 adapter
->promisc_supported
=
2636 be64_to_cpu(crq
->query_capability
.number
);
2637 netdev_dbg(netdev
, "promisc_supported = %lld\n",
2638 adapter
->promisc_supported
);
2641 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2642 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
2645 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2646 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
2648 case MAX_MULTICAST_FILTERS
:
2649 adapter
->max_multicast_filters
=
2650 be64_to_cpu(crq
->query_capability
.number
);
2651 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
2652 adapter
->max_multicast_filters
);
2654 case VLAN_HEADER_INSERTION
:
2655 adapter
->vlan_header_insertion
=
2656 be64_to_cpu(crq
->query_capability
.number
);
2657 if (adapter
->vlan_header_insertion
)
2658 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
2659 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
2660 adapter
->vlan_header_insertion
);
2662 case MAX_TX_SG_ENTRIES
:
2663 adapter
->max_tx_sg_entries
=
2664 be64_to_cpu(crq
->query_capability
.number
);
2665 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
2666 adapter
->max_tx_sg_entries
);
2668 case RX_SG_SUPPORTED
:
2669 adapter
->rx_sg_supported
=
2670 be64_to_cpu(crq
->query_capability
.number
);
2671 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
2672 adapter
->rx_sg_supported
);
2674 case OPT_TX_COMP_SUB_QUEUES
:
2675 adapter
->opt_tx_comp_sub_queues
=
2676 be64_to_cpu(crq
->query_capability
.number
);
2677 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
2678 adapter
->opt_tx_comp_sub_queues
);
2680 case OPT_RX_COMP_QUEUES
:
2681 adapter
->opt_rx_comp_queues
=
2682 be64_to_cpu(crq
->query_capability
.number
);
2683 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
2684 adapter
->opt_rx_comp_queues
);
2686 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
2687 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
2688 be64_to_cpu(crq
->query_capability
.number
);
2689 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2690 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
2692 case OPT_TX_ENTRIES_PER_SUBCRQ
:
2693 adapter
->opt_tx_entries_per_subcrq
=
2694 be64_to_cpu(crq
->query_capability
.number
);
2695 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
2696 adapter
->opt_tx_entries_per_subcrq
);
2698 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
2699 adapter
->opt_rxba_entries_per_subcrq
=
2700 be64_to_cpu(crq
->query_capability
.number
);
2701 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
2702 adapter
->opt_rxba_entries_per_subcrq
);
2704 case TX_RX_DESC_REQ
:
2705 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
2706 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
2707 adapter
->tx_rx_desc_req
);
2711 netdev_err(netdev
, "Got invalid cap rsp %d\n",
2712 crq
->query_capability
.capability
);
2716 if (atomic_read(&adapter
->running_cap_queries
) == 0)
2717 init_sub_crqs(adapter
, 0);
2718 /* We're done querying the capabilities, initialize sub-crqs */
2721 static void handle_control_ras_rsp(union ibmvnic_crq
*crq
,
2722 struct ibmvnic_adapter
*adapter
)
2724 u8 correlator
= crq
->control_ras_rsp
.correlator
;
2725 struct device
*dev
= &adapter
->vdev
->dev
;
2729 if (crq
->control_ras_rsp
.rc
.code
) {
2730 dev_warn(dev
, "Control ras failed rc=%d\n",
2731 crq
->control_ras_rsp
.rc
.code
);
2735 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
2736 if (adapter
->ras_comps
[i
].correlator
== correlator
) {
2743 dev_warn(dev
, "Correlator not found on control_ras_rsp\n");
2747 switch (crq
->control_ras_rsp
.op
) {
2748 case IBMVNIC_TRACE_LEVEL
:
2749 adapter
->ras_comps
[i
].trace_level
= crq
->control_ras
.level
;
2751 case IBMVNIC_ERROR_LEVEL
:
2752 adapter
->ras_comps
[i
].error_check_level
=
2753 crq
->control_ras
.level
;
2755 case IBMVNIC_TRACE_PAUSE
:
2756 adapter
->ras_comp_int
[i
].paused
= 1;
2758 case IBMVNIC_TRACE_RESUME
:
2759 adapter
->ras_comp_int
[i
].paused
= 0;
2761 case IBMVNIC_TRACE_ON
:
2762 adapter
->ras_comps
[i
].trace_on
= 1;
2764 case IBMVNIC_TRACE_OFF
:
2765 adapter
->ras_comps
[i
].trace_on
= 0;
2767 case IBMVNIC_CHG_TRACE_BUFF_SZ
:
2768 /* trace_buff_sz is 3 bytes, stuff it into an int */
2769 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[0] = 0;
2770 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[1] =
2771 crq
->control_ras_rsp
.trace_buff_sz
[0];
2772 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[2] =
2773 crq
->control_ras_rsp
.trace_buff_sz
[1];
2774 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[3] =
2775 crq
->control_ras_rsp
.trace_buff_sz
[2];
2778 dev_err(dev
, "invalid op %d on control_ras_rsp",
2779 crq
->control_ras_rsp
.op
);
2783 static ssize_t
trace_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2786 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2787 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2788 struct device
*dev
= &adapter
->vdev
->dev
;
2789 struct ibmvnic_fw_trace_entry
*trace
;
2790 int num
= ras_comp_int
->num
;
2791 union ibmvnic_crq crq
;
2792 dma_addr_t trace_tok
;
2794 if (*ppos
>= be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2798 dma_alloc_coherent(dev
,
2799 be32_to_cpu(adapter
->ras_comps
[num
].
2800 trace_buff_size
), &trace_tok
,
2803 dev_err(dev
, "Couldn't alloc trace buffer\n");
2807 memset(&crq
, 0, sizeof(crq
));
2808 crq
.collect_fw_trace
.first
= IBMVNIC_CRQ_CMD
;
2809 crq
.collect_fw_trace
.cmd
= COLLECT_FW_TRACE
;
2810 crq
.collect_fw_trace
.correlator
= adapter
->ras_comps
[num
].correlator
;
2811 crq
.collect_fw_trace
.ioba
= cpu_to_be32(trace_tok
);
2812 crq
.collect_fw_trace
.len
= adapter
->ras_comps
[num
].trace_buff_size
;
2814 init_completion(&adapter
->fw_done
);
2815 ibmvnic_send_crq(adapter
, &crq
);
2816 wait_for_completion(&adapter
->fw_done
);
2818 if (*ppos
+ len
> be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2820 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
) -
2823 copy_to_user(user_buf
, &((u8
*)trace
)[*ppos
], len
);
2825 dma_free_coherent(dev
,
2826 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
),
2832 static const struct file_operations trace_ops
= {
2833 .owner
= THIS_MODULE
,
2834 .open
= simple_open
,
2838 static ssize_t
paused_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2841 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2842 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2843 int num
= ras_comp_int
->num
;
2844 char buff
[5]; /* 1 or 0 plus \n and \0 */
2847 size
= sprintf(buff
, "%d\n", adapter
->ras_comp_int
[num
].paused
);
2852 copy_to_user(user_buf
, buff
, size
);
2857 static ssize_t
paused_write(struct file
*file
, const char __user
*user_buf
,
2858 size_t len
, loff_t
*ppos
)
2860 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2861 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2862 int num
= ras_comp_int
->num
;
2863 union ibmvnic_crq crq
;
2865 char buff
[9]; /* decimal max int plus \n and \0 */
2867 copy_from_user(buff
, user_buf
, sizeof(buff
));
2868 val
= kstrtoul(buff
, 10, NULL
);
2870 adapter
->ras_comp_int
[num
].paused
= val
? 1 : 0;
2872 memset(&crq
, 0, sizeof(crq
));
2873 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2874 crq
.control_ras
.cmd
= CONTROL_RAS
;
2875 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2876 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_PAUSE
: IBMVNIC_TRACE_RESUME
;
2877 ibmvnic_send_crq(adapter
, &crq
);
2882 static const struct file_operations paused_ops
= {
2883 .owner
= THIS_MODULE
,
2884 .open
= simple_open
,
2885 .read
= paused_read
,
2886 .write
= paused_write
,
2889 static ssize_t
tracing_read(struct file
*file
, char __user
*user_buf
,
2890 size_t len
, loff_t
*ppos
)
2892 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2893 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2894 int num
= ras_comp_int
->num
;
2895 char buff
[5]; /* 1 or 0 plus \n and \0 */
2898 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_on
);
2903 copy_to_user(user_buf
, buff
, size
);
2908 static ssize_t
tracing_write(struct file
*file
, const char __user
*user_buf
,
2909 size_t len
, loff_t
*ppos
)
2911 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2912 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2913 int num
= ras_comp_int
->num
;
2914 union ibmvnic_crq crq
;
2916 char buff
[9]; /* decimal max int plus \n and \0 */
2918 copy_from_user(buff
, user_buf
, sizeof(buff
));
2919 val
= kstrtoul(buff
, 10, NULL
);
2921 memset(&crq
, 0, sizeof(crq
));
2922 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2923 crq
.control_ras
.cmd
= CONTROL_RAS
;
2924 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2925 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_ON
: IBMVNIC_TRACE_OFF
;
2930 static const struct file_operations tracing_ops
= {
2931 .owner
= THIS_MODULE
,
2932 .open
= simple_open
,
2933 .read
= tracing_read
,
2934 .write
= tracing_write
,
2937 static ssize_t
error_level_read(struct file
*file
, char __user
*user_buf
,
2938 size_t len
, loff_t
*ppos
)
2940 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2941 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2942 int num
= ras_comp_int
->num
;
2943 char buff
[5]; /* decimal max char plus \n and \0 */
2946 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].error_check_level
);
2951 copy_to_user(user_buf
, buff
, size
);
2956 static ssize_t
error_level_write(struct file
*file
, const char __user
*user_buf
,
2957 size_t len
, loff_t
*ppos
)
2959 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2960 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2961 int num
= ras_comp_int
->num
;
2962 union ibmvnic_crq crq
;
2964 char buff
[9]; /* decimal max int plus \n and \0 */
2966 copy_from_user(buff
, user_buf
, sizeof(buff
));
2967 val
= kstrtoul(buff
, 10, NULL
);
2972 memset(&crq
, 0, sizeof(crq
));
2973 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2974 crq
.control_ras
.cmd
= CONTROL_RAS
;
2975 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2976 crq
.control_ras
.op
= IBMVNIC_ERROR_LEVEL
;
2977 crq
.control_ras
.level
= val
;
2978 ibmvnic_send_crq(adapter
, &crq
);
2983 static const struct file_operations error_level_ops
= {
2984 .owner
= THIS_MODULE
,
2985 .open
= simple_open
,
2986 .read
= error_level_read
,
2987 .write
= error_level_write
,
2990 static ssize_t
trace_level_read(struct file
*file
, char __user
*user_buf
,
2991 size_t len
, loff_t
*ppos
)
2993 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2994 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2995 int num
= ras_comp_int
->num
;
2996 char buff
[5]; /* decimal max char plus \n and \0 */
2999 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_level
);
3003 copy_to_user(user_buf
, buff
, size
);
3008 static ssize_t
trace_level_write(struct file
*file
, const char __user
*user_buf
,
3009 size_t len
, loff_t
*ppos
)
3011 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3012 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3013 union ibmvnic_crq crq
;
3015 char buff
[9]; /* decimal max int plus \n and \0 */
3017 copy_from_user(buff
, user_buf
, sizeof(buff
));
3018 val
= kstrtoul(buff
, 10, NULL
);
3022 memset(&crq
, 0, sizeof(crq
));
3023 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
3024 crq
.control_ras
.cmd
= CONTROL_RAS
;
3025 crq
.control_ras
.correlator
=
3026 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
3027 crq
.control_ras
.op
= IBMVNIC_TRACE_LEVEL
;
3028 crq
.control_ras
.level
= val
;
3029 ibmvnic_send_crq(adapter
, &crq
);
3034 static const struct file_operations trace_level_ops
= {
3035 .owner
= THIS_MODULE
,
3036 .open
= simple_open
,
3037 .read
= trace_level_read
,
3038 .write
= trace_level_write
,
3041 static ssize_t
trace_buff_size_read(struct file
*file
, char __user
*user_buf
,
3042 size_t len
, loff_t
*ppos
)
3044 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3045 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3046 int num
= ras_comp_int
->num
;
3047 char buff
[9]; /* decimal max int plus \n and \0 */
3050 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_buff_size
);
3054 copy_to_user(user_buf
, buff
, size
);
3059 static ssize_t
trace_buff_size_write(struct file
*file
,
3060 const char __user
*user_buf
, size_t len
,
3063 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3064 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3065 union ibmvnic_crq crq
;
3067 char buff
[9]; /* decimal max int plus \n and \0 */
3069 copy_from_user(buff
, user_buf
, sizeof(buff
));
3070 val
= kstrtoul(buff
, 10, NULL
);
3072 memset(&crq
, 0, sizeof(crq
));
3073 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
3074 crq
.control_ras
.cmd
= CONTROL_RAS
;
3075 crq
.control_ras
.correlator
=
3076 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
3077 crq
.control_ras
.op
= IBMVNIC_CHG_TRACE_BUFF_SZ
;
3078 /* trace_buff_sz is 3 bytes, stuff an int into it */
3079 crq
.control_ras
.trace_buff_sz
[0] = ((u8
*)(&val
))[5];
3080 crq
.control_ras
.trace_buff_sz
[1] = ((u8
*)(&val
))[6];
3081 crq
.control_ras
.trace_buff_sz
[2] = ((u8
*)(&val
))[7];
3082 ibmvnic_send_crq(adapter
, &crq
);
3087 static const struct file_operations trace_size_ops
= {
3088 .owner
= THIS_MODULE
,
3089 .open
= simple_open
,
3090 .read
= trace_buff_size_read
,
3091 .write
= trace_buff_size_write
,
3094 static void handle_request_ras_comps_rsp(union ibmvnic_crq
*crq
,
3095 struct ibmvnic_adapter
*adapter
)
3097 struct device
*dev
= &adapter
->vdev
->dev
;
3098 struct dentry
*dir_ent
;
3102 debugfs_remove_recursive(adapter
->ras_comps_ent
);
3104 adapter
->ras_comps_ent
= debugfs_create_dir("ras_comps",
3105 adapter
->debugfs_dir
);
3106 if (!adapter
->ras_comps_ent
|| IS_ERR(adapter
->ras_comps_ent
)) {
3107 dev_info(dev
, "debugfs create ras_comps dir failed\n");
3111 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
3112 dir_ent
= debugfs_create_dir(adapter
->ras_comps
[i
].name
,
3113 adapter
->ras_comps_ent
);
3114 if (!dir_ent
|| IS_ERR(dir_ent
)) {
3115 dev_info(dev
, "debugfs create %s dir failed\n",
3116 adapter
->ras_comps
[i
].name
);
3120 adapter
->ras_comp_int
[i
].adapter
= adapter
;
3121 adapter
->ras_comp_int
[i
].num
= i
;
3122 adapter
->ras_comp_int
[i
].desc_blob
.data
=
3123 &adapter
->ras_comps
[i
].description
;
3124 adapter
->ras_comp_int
[i
].desc_blob
.size
=
3125 sizeof(adapter
->ras_comps
[i
].description
);
3127 /* Don't need to remember the dentry's because the debugfs dir
3128 * gets removed recursively
3130 ent
= debugfs_create_blob("description", S_IRUGO
, dir_ent
,
3131 &adapter
->ras_comp_int
[i
].desc_blob
);
3132 ent
= debugfs_create_file("trace_buf_size", S_IRUGO
| S_IWUSR
,
3133 dir_ent
, &adapter
->ras_comp_int
[i
],
3135 ent
= debugfs_create_file("trace_level",
3137 (adapter
->ras_comps
[i
].trace_level
!=
3138 0xFF ? S_IWUSR
: 0),
3139 dir_ent
, &adapter
->ras_comp_int
[i
],
3141 ent
= debugfs_create_file("error_level",
3144 ras_comps
[i
].error_check_level
!=
3145 0xFF ? S_IWUSR
: 0),
3146 dir_ent
, &adapter
->ras_comp_int
[i
],
3148 ent
= debugfs_create_file("tracing", S_IRUGO
| S_IWUSR
,
3149 dir_ent
, &adapter
->ras_comp_int
[i
],
3151 ent
= debugfs_create_file("paused", S_IRUGO
| S_IWUSR
,
3152 dir_ent
, &adapter
->ras_comp_int
[i
],
3154 ent
= debugfs_create_file("trace", S_IRUGO
, dir_ent
,
3155 &adapter
->ras_comp_int
[i
],
3160 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq
*crq
,
3161 struct ibmvnic_adapter
*adapter
)
3163 int len
= adapter
->ras_comp_num
* sizeof(struct ibmvnic_fw_component
);
3164 struct device
*dev
= &adapter
->vdev
->dev
;
3165 union ibmvnic_crq newcrq
;
3167 adapter
->ras_comps
= dma_alloc_coherent(dev
, len
,
3168 &adapter
->ras_comps_tok
,
3170 if (!adapter
->ras_comps
) {
3171 if (!firmware_has_feature(FW_FEATURE_CMO
))
3172 dev_err(dev
, "Couldn't alloc fw comps buffer\n");
3176 adapter
->ras_comp_int
= kmalloc(adapter
->ras_comp_num
*
3177 sizeof(struct ibmvnic_fw_comp_internal
),
3179 if (!adapter
->ras_comp_int
)
3180 dma_free_coherent(dev
, len
, adapter
->ras_comps
,
3181 adapter
->ras_comps_tok
);
3183 memset(&newcrq
, 0, sizeof(newcrq
));
3184 newcrq
.request_ras_comps
.first
= IBMVNIC_CRQ_CMD
;
3185 newcrq
.request_ras_comps
.cmd
= REQUEST_RAS_COMPS
;
3186 newcrq
.request_ras_comps
.ioba
= cpu_to_be32(adapter
->ras_comps_tok
);
3187 newcrq
.request_ras_comps
.len
= cpu_to_be32(len
);
3188 ibmvnic_send_crq(adapter
, &newcrq
);
3191 static void ibmvnic_free_inflight(struct ibmvnic_adapter
*adapter
)
3193 struct ibmvnic_inflight_cmd
*inflight_cmd
, *tmp1
;
3194 struct device
*dev
= &adapter
->vdev
->dev
;
3195 struct ibmvnic_error_buff
*error_buff
, *tmp2
;
3196 unsigned long flags
;
3197 unsigned long flags2
;
3199 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
3200 list_for_each_entry_safe(inflight_cmd
, tmp1
, &adapter
->inflight
, list
) {
3201 switch (inflight_cmd
->crq
.generic
.cmd
) {
3203 dma_unmap_single(dev
, adapter
->login_buf_token
,
3204 adapter
->login_buf_sz
,
3206 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
3207 adapter
->login_rsp_buf_sz
,
3209 kfree(adapter
->login_rsp_buf
);
3210 kfree(adapter
->login_buf
);
3213 complete(&adapter
->fw_done
);
3215 case REQUEST_ERROR_INFO
:
3216 spin_lock_irqsave(&adapter
->error_list_lock
, flags2
);
3217 list_for_each_entry_safe(error_buff
, tmp2
,
3218 &adapter
->errors
, list
) {
3219 dma_unmap_single(dev
, error_buff
->dma
,
3222 kfree(error_buff
->buff
);
3223 list_del(&error_buff
->list
);
3226 spin_unlock_irqrestore(&adapter
->error_list_lock
,
3230 list_del(&inflight_cmd
->list
);
3231 kfree(inflight_cmd
);
3233 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
3236 static void ibmvnic_xport_event(struct work_struct
*work
)
3238 struct ibmvnic_adapter
*adapter
= container_of(work
,
3239 struct ibmvnic_adapter
,
3241 struct device
*dev
= &adapter
->vdev
->dev
;
3244 ibmvnic_free_inflight(adapter
);
3245 release_sub_crqs(adapter
);
3246 if (adapter
->migrated
) {
3247 rc
= ibmvnic_reenable_crq_queue(adapter
);
3249 dev_err(dev
, "Error after enable rc=%ld\n", rc
);
3250 adapter
->migrated
= false;
3251 rc
= ibmvnic_send_crq_init(adapter
);
3253 dev_err(dev
, "Error sending init rc=%ld\n", rc
);
3257 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
3258 struct ibmvnic_adapter
*adapter
)
3260 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
3261 struct net_device
*netdev
= adapter
->netdev
;
3262 struct device
*dev
= &adapter
->vdev
->dev
;
3265 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
3266 ((unsigned long int *)crq
)[0],
3267 ((unsigned long int *)crq
)[1]);
3268 switch (gen_crq
->first
) {
3269 case IBMVNIC_CRQ_INIT_RSP
:
3270 switch (gen_crq
->cmd
) {
3271 case IBMVNIC_CRQ_INIT
:
3272 dev_info(dev
, "Partner initialized\n");
3273 /* Send back a response */
3274 rc
= ibmvnic_send_crq_init_complete(adapter
);
3276 schedule_work(&adapter
->vnic_crq_init
);
3278 dev_err(dev
, "Can't send initrsp rc=%ld\n", rc
);
3280 case IBMVNIC_CRQ_INIT_COMPLETE
:
3281 dev_info(dev
, "Partner initialization complete\n");
3282 send_version_xchg(adapter
);
3285 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
3288 case IBMVNIC_CRQ_XPORT_EVENT
:
3289 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
3290 dev_info(dev
, "Re-enabling adapter\n");
3291 adapter
->migrated
= true;
3292 schedule_work(&adapter
->ibmvnic_xport
);
3293 } else if (gen_crq
->cmd
== IBMVNIC_DEVICE_FAILOVER
) {
3294 dev_info(dev
, "Backing device failover detected\n");
3295 netif_carrier_off(netdev
);
3296 adapter
->failover
= true;
3298 /* The adapter lost the connection */
3299 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
3301 schedule_work(&adapter
->ibmvnic_xport
);
3304 case IBMVNIC_CRQ_CMD_RSP
:
3307 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
3312 switch (gen_crq
->cmd
) {
3313 case VERSION_EXCHANGE_RSP
:
3314 rc
= crq
->version_exchange_rsp
.rc
.code
;
3316 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
3319 dev_info(dev
, "Partner protocol version is %d\n",
3320 crq
->version_exchange_rsp
.version
);
3321 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
3324 be16_to_cpu(crq
->version_exchange_rsp
.version
);
3325 send_cap_queries(adapter
);
3327 case QUERY_CAPABILITY_RSP
:
3328 handle_query_cap_rsp(crq
, adapter
);
3331 handle_query_map_rsp(crq
, adapter
);
3333 case REQUEST_MAP_RSP
:
3334 handle_request_map_rsp(crq
, adapter
);
3336 case REQUEST_UNMAP_RSP
:
3337 handle_request_unmap_rsp(crq
, adapter
);
3339 case REQUEST_CAPABILITY_RSP
:
3340 handle_request_cap_rsp(crq
, adapter
);
3343 netdev_dbg(netdev
, "Got Login Response\n");
3344 handle_login_rsp(crq
, adapter
);
3346 case LOGICAL_LINK_STATE_RSP
:
3347 netdev_dbg(netdev
, "Got Logical Link State Response\n");
3348 adapter
->logical_link_state
=
3349 crq
->logical_link_state_rsp
.link_state
;
3351 case LINK_STATE_INDICATION
:
3352 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
3353 adapter
->phys_link_state
=
3354 crq
->link_state_indication
.phys_link_state
;
3355 adapter
->logical_link_state
=
3356 crq
->link_state_indication
.logical_link_state
;
3358 case CHANGE_MAC_ADDR_RSP
:
3359 netdev_dbg(netdev
, "Got MAC address change Response\n");
3360 handle_change_mac_rsp(crq
, adapter
);
3362 case ERROR_INDICATION
:
3363 netdev_dbg(netdev
, "Got Error Indication\n");
3364 handle_error_indication(crq
, adapter
);
3366 case REQUEST_ERROR_RSP
:
3367 netdev_dbg(netdev
, "Got Error Detail Response\n");
3368 handle_error_info_rsp(crq
, adapter
);
3370 case REQUEST_STATISTICS_RSP
:
3371 netdev_dbg(netdev
, "Got Statistics Response\n");
3372 complete(&adapter
->stats_done
);
3374 case REQUEST_DUMP_SIZE_RSP
:
3375 netdev_dbg(netdev
, "Got Request Dump Size Response\n");
3376 handle_dump_size_rsp(crq
, adapter
);
3378 case REQUEST_DUMP_RSP
:
3379 netdev_dbg(netdev
, "Got Request Dump Response\n");
3380 complete(&adapter
->fw_done
);
3382 case QUERY_IP_OFFLOAD_RSP
:
3383 netdev_dbg(netdev
, "Got Query IP offload Response\n");
3384 handle_query_ip_offload_rsp(adapter
);
3386 case MULTICAST_CTRL_RSP
:
3387 netdev_dbg(netdev
, "Got multicast control Response\n");
3389 case CONTROL_IP_OFFLOAD_RSP
:
3390 netdev_dbg(netdev
, "Got Control IP offload Response\n");
3391 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
3392 sizeof(adapter
->ip_offload_ctrl
),
3394 /* We're done with the queries, perform the login */
3395 send_login(adapter
);
3397 case REQUEST_RAS_COMP_NUM_RSP
:
3398 netdev_dbg(netdev
, "Got Request RAS Comp Num Response\n");
3399 if (crq
->request_ras_comp_num_rsp
.rc
.code
== 10) {
3400 netdev_dbg(netdev
, "Request RAS Comp Num not supported\n");
3403 adapter
->ras_comp_num
=
3404 be32_to_cpu(crq
->request_ras_comp_num_rsp
.num_components
);
3405 handle_request_ras_comp_num_rsp(crq
, adapter
);
3407 case REQUEST_RAS_COMPS_RSP
:
3408 netdev_dbg(netdev
, "Got Request RAS Comps Response\n");
3409 handle_request_ras_comps_rsp(crq
, adapter
);
3411 case CONTROL_RAS_RSP
:
3412 netdev_dbg(netdev
, "Got Control RAS Response\n");
3413 handle_control_ras_rsp(crq
, adapter
);
3415 case COLLECT_FW_TRACE_RSP
:
3416 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
3417 complete(&adapter
->fw_done
);
3420 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
3425 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
3427 struct ibmvnic_adapter
*adapter
= instance
;
3428 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3429 struct vio_dev
*vdev
= adapter
->vdev
;
3430 union ibmvnic_crq
*crq
;
3431 unsigned long flags
;
3434 spin_lock_irqsave(&queue
->lock
, flags
);
3435 vio_disable_interrupts(vdev
);
3437 /* Pull all the valid messages off the CRQ */
3438 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
3439 ibmvnic_handle_crq(crq
, adapter
);
3440 crq
->generic
.first
= 0;
3442 vio_enable_interrupts(vdev
);
3443 crq
= ibmvnic_next_crq(adapter
);
3445 vio_disable_interrupts(vdev
);
3446 ibmvnic_handle_crq(crq
, adapter
);
3447 crq
->generic
.first
= 0;
3452 spin_unlock_irqrestore(&queue
->lock
, flags
);
3456 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
3458 struct vio_dev
*vdev
= adapter
->vdev
;
3462 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
3463 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3466 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
3471 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
3473 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3474 struct device
*dev
= &adapter
->vdev
->dev
;
3475 struct vio_dev
*vdev
= adapter
->vdev
;
3480 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3481 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3483 /* Clean out the queue */
3484 memset(crq
->msgs
, 0, PAGE_SIZE
);
3487 /* And re-open it again */
3488 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3489 crq
->msg_token
, PAGE_SIZE
);
3492 /* Adapter is good, but other end is not ready */
3493 dev_warn(dev
, "Partner adapter not ready\n");
3495 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
3500 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter
*adapter
)
3502 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3503 struct vio_dev
*vdev
= adapter
->vdev
;
3506 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
3507 free_irq(vdev
->irq
, adapter
);
3509 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3510 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3512 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
3514 free_page((unsigned long)crq
->msgs
);
3517 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter
*adapter
)
3519 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3520 struct device
*dev
= &adapter
->vdev
->dev
;
3521 struct vio_dev
*vdev
= adapter
->vdev
;
3522 int rc
, retrc
= -ENOMEM
;
3524 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
3525 /* Should we allocate more than one page? */
3530 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
3531 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
3533 if (dma_mapping_error(dev
, crq
->msg_token
))
3536 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3537 crq
->msg_token
, PAGE_SIZE
);
3539 if (rc
== H_RESOURCE
)
3540 /* maybe kexecing and resource is busy. try a reset */
3541 rc
= ibmvnic_reset_crq(adapter
);
3544 if (rc
== H_CLOSED
) {
3545 dev_warn(dev
, "Partner adapter not ready\n");
3547 dev_warn(dev
, "Error %d opening adapter\n", rc
);
3548 goto reg_crq_failed
;
3553 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
3554 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
3557 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
3559 goto req_irq_failed
;
3562 rc
= vio_enable_interrupts(vdev
);
3564 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
3565 goto req_irq_failed
;
3569 spin_lock_init(&crq
->lock
);
3575 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3576 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3578 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3580 free_page((unsigned long)crq
->msgs
);
3584 /* debugfs for dump */
3585 static int ibmvnic_dump_show(struct seq_file
*seq
, void *v
)
3587 struct net_device
*netdev
= seq
->private;
3588 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3589 struct device
*dev
= &adapter
->vdev
->dev
;
3590 union ibmvnic_crq crq
;
3592 memset(&crq
, 0, sizeof(crq
));
3593 crq
.request_dump_size
.first
= IBMVNIC_CRQ_CMD
;
3594 crq
.request_dump_size
.cmd
= REQUEST_DUMP_SIZE
;
3596 init_completion(&adapter
->fw_done
);
3597 ibmvnic_send_crq(adapter
, &crq
);
3598 wait_for_completion(&adapter
->fw_done
);
3600 seq_write(seq
, adapter
->dump_data
, adapter
->dump_data_size
);
3602 dma_unmap_single(dev
, adapter
->dump_data_token
, adapter
->dump_data_size
,
3605 kfree(adapter
->dump_data
);
3610 static int ibmvnic_dump_open(struct inode
*inode
, struct file
*file
)
3612 return single_open(file
, ibmvnic_dump_show
, inode
->i_private
);
3615 static const struct file_operations ibmvnic_dump_ops
= {
3616 .owner
= THIS_MODULE
,
3617 .open
= ibmvnic_dump_open
,
3619 .llseek
= seq_lseek
,
3620 .release
= single_release
,
3623 static void handle_crq_init_rsp(struct work_struct
*work
)
3625 struct ibmvnic_adapter
*adapter
= container_of(work
,
3626 struct ibmvnic_adapter
,
3628 struct device
*dev
= &adapter
->vdev
->dev
;
3629 struct net_device
*netdev
= adapter
->netdev
;
3630 unsigned long timeout
= msecs_to_jiffies(30000);
3631 bool restart
= false;
3634 if (adapter
->failover
) {
3635 release_sub_crqs(adapter
);
3636 if (netif_running(netdev
)) {
3637 netif_tx_disable(netdev
);
3638 ibmvnic_close(netdev
);
3643 reinit_completion(&adapter
->init_done
);
3644 send_version_xchg(adapter
);
3645 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
3646 dev_err(dev
, "Passive init timeout\n");
3651 if (adapter
->renegotiate
) {
3652 adapter
->renegotiate
= false;
3653 release_sub_crqs_no_irqs(adapter
);
3655 reinit_completion(&adapter
->init_done
);
3656 send_cap_queries(adapter
);
3657 if (!wait_for_completion_timeout(&adapter
->init_done
,
3659 dev_err(dev
, "Passive init timeout\n");
3663 } while (adapter
->renegotiate
);
3664 rc
= init_sub_crq_irqs(adapter
);
3669 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3670 netdev
->mtu
= adapter
->req_mtu
;
3672 if (adapter
->failover
) {
3673 adapter
->failover
= false;
3675 rc
= ibmvnic_open(netdev
);
3677 goto restart_failed
;
3679 netif_carrier_on(netdev
);
3683 rc
= register_netdev(netdev
);
3686 "failed to register netdev rc=%d\n", rc
);
3687 goto register_failed
;
3689 dev_info(dev
, "ibmvnic registered\n");
3694 dev_err(dev
, "Failed to restart ibmvnic, rc=%d\n", rc
);
3696 release_sub_crqs(adapter
);
3698 dev_err(dev
, "Passive initialization was not successful\n");
3701 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
3703 unsigned long timeout
= msecs_to_jiffies(30000);
3704 struct ibmvnic_adapter
*adapter
;
3705 struct net_device
*netdev
;
3706 unsigned char *mac_addr_p
;
3708 char buf
[17]; /* debugfs name buf */
3711 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
3714 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
3715 VETH_MAC_ADDR
, NULL
);
3718 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3719 __FILE__
, __LINE__
);
3723 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
3724 IBMVNIC_MAX_TX_QUEUES
);
3728 adapter
= netdev_priv(netdev
);
3729 dev_set_drvdata(&dev
->dev
, netdev
);
3730 adapter
->vdev
= dev
;
3731 adapter
->netdev
= netdev
;
3732 adapter
->failover
= false;
3734 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
3735 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3736 netdev
->irq
= dev
->irq
;
3737 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
3738 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
3739 SET_NETDEV_DEV(netdev
, &dev
->dev
);
3741 INIT_WORK(&adapter
->vnic_crq_init
, handle_crq_init_rsp
);
3742 INIT_WORK(&adapter
->ibmvnic_xport
, ibmvnic_xport_event
);
3744 spin_lock_init(&adapter
->stats_lock
);
3746 rc
= ibmvnic_init_crq_queue(adapter
);
3748 dev_err(&dev
->dev
, "Couldn't initialize crq. rc=%d\n", rc
);
3752 INIT_LIST_HEAD(&adapter
->errors
);
3753 INIT_LIST_HEAD(&adapter
->inflight
);
3754 spin_lock_init(&adapter
->error_list_lock
);
3755 spin_lock_init(&adapter
->inflight_lock
);
3757 adapter
->stats_token
= dma_map_single(&dev
->dev
, &adapter
->stats
,
3758 sizeof(struct ibmvnic_statistics
),
3760 if (dma_mapping_error(&dev
->dev
, adapter
->stats_token
)) {
3761 if (!firmware_has_feature(FW_FEATURE_CMO
))
3762 dev_err(&dev
->dev
, "Couldn't map stats buffer\n");
3767 snprintf(buf
, sizeof(buf
), "ibmvnic_%x", dev
->unit_address
);
3768 ent
= debugfs_create_dir(buf
, NULL
);
3769 if (!ent
|| IS_ERR(ent
)) {
3770 dev_info(&dev
->dev
, "debugfs create directory failed\n");
3771 adapter
->debugfs_dir
= NULL
;
3773 adapter
->debugfs_dir
= ent
;
3774 ent
= debugfs_create_file("dump", S_IRUGO
, adapter
->debugfs_dir
,
3775 netdev
, &ibmvnic_dump_ops
);
3776 if (!ent
|| IS_ERR(ent
)) {
3778 "debugfs create dump file failed\n");
3779 adapter
->debugfs_dump
= NULL
;
3781 adapter
->debugfs_dump
= ent
;
3785 init_completion(&adapter
->init_done
);
3786 ibmvnic_send_crq_init(adapter
);
3787 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
))
3791 if (adapter
->renegotiate
) {
3792 adapter
->renegotiate
= false;
3793 release_sub_crqs_no_irqs(adapter
);
3795 reinit_completion(&adapter
->init_done
);
3796 send_cap_queries(adapter
);
3797 if (!wait_for_completion_timeout(&adapter
->init_done
,
3801 } while (adapter
->renegotiate
);
3803 rc
= init_sub_crq_irqs(adapter
);
3805 dev_err(&dev
->dev
, "failed to initialize sub crq irqs\n");
3809 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3810 netdev
->mtu
= adapter
->req_mtu
;
3812 rc
= register_netdev(netdev
);
3814 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
3817 dev_info(&dev
->dev
, "ibmvnic registered\n");
3822 release_sub_crqs(adapter
);
3824 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3825 debugfs_remove_recursive(adapter
->debugfs_dir
);
3827 ibmvnic_release_crq_queue(adapter
);
3829 free_netdev(netdev
);
3833 static int ibmvnic_remove(struct vio_dev
*dev
)
3835 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
3836 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3838 unregister_netdev(netdev
);
3840 release_sub_crqs(adapter
);
3842 ibmvnic_release_crq_queue(adapter
);
3844 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3845 debugfs_remove_recursive(adapter
->debugfs_dir
);
3847 dma_unmap_single(&dev
->dev
, adapter
->stats_token
,
3848 sizeof(struct ibmvnic_statistics
), DMA_FROM_DEVICE
);
3850 if (adapter
->ras_comps
)
3851 dma_free_coherent(&dev
->dev
,
3852 adapter
->ras_comp_num
*
3853 sizeof(struct ibmvnic_fw_component
),
3854 adapter
->ras_comps
, adapter
->ras_comps_tok
);
3856 kfree(adapter
->ras_comp_int
);
3858 free_netdev(netdev
);
3859 dev_set_drvdata(&dev
->dev
, NULL
);
3864 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
3866 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
3867 struct ibmvnic_adapter
*adapter
;
3868 struct iommu_table
*tbl
;
3869 unsigned long ret
= 0;
3872 tbl
= get_iommu_table_base(&vdev
->dev
);
3874 /* netdev inits at probe time along with the structures we need below*/
3876 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
3878 adapter
= netdev_priv(netdev
);
3880 ret
+= PAGE_SIZE
; /* the crq message queue */
3881 ret
+= adapter
->bounce_buffer_size
;
3882 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
3884 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
3885 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
3887 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
3889 ret
+= adapter
->rx_pool
[i
].size
*
3890 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
3895 static int ibmvnic_resume(struct device
*dev
)
3897 struct net_device
*netdev
= dev_get_drvdata(dev
);
3898 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3901 /* kick the interrupt handlers just in case we lost an interrupt */
3902 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
3903 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
3904 adapter
->rx_scrq
[i
]);
3909 static struct vio_device_id ibmvnic_device_table
[] = {
3910 {"network", "IBM,vnic"},
3913 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
3915 static const struct dev_pm_ops ibmvnic_pm_ops
= {
3916 .resume
= ibmvnic_resume
3919 static struct vio_driver ibmvnic_driver
= {
3920 .id_table
= ibmvnic_device_table
,
3921 .probe
= ibmvnic_probe
,
3922 .remove
= ibmvnic_remove
,
3923 .get_desired_dma
= ibmvnic_get_desired_dma
,
3924 .name
= ibmvnic_driver_name
,
3925 .pm
= &ibmvnic_pm_ops
,
3928 /* module functions */
3929 static int __init
ibmvnic_module_init(void)
3931 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
3932 IBMVNIC_DRIVER_VERSION
);
3934 return vio_register_driver(&ibmvnic_driver
);
3937 static void __exit
ibmvnic_module_exit(void)
3939 vio_unregister_driver(&ibmvnic_driver
);
3942 module_init(ibmvnic_module_init
);
3943 module_exit(ibmvnic_module_exit
);