1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/seq_file.h>
78 #include <linux/workqueue.h>
82 static const char ibmvnic_driver_name
[] = "ibmvnic";
83 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
85 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
86 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
90 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
91 static int ibmvnic_remove(struct vio_dev
*);
92 static void release_sub_crqs(struct ibmvnic_adapter
*);
93 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter
*);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
98 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
99 union sub_crq
*sub_crq
);
100 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
101 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
102 static int enable_scrq_irq(struct ibmvnic_adapter
*,
103 struct ibmvnic_sub_crq_queue
*);
104 static int disable_scrq_irq(struct ibmvnic_adapter
*,
105 struct ibmvnic_sub_crq_queue
*);
106 static int pending_scrq(struct ibmvnic_adapter
*,
107 struct ibmvnic_sub_crq_queue
*);
108 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
109 struct ibmvnic_sub_crq_queue
*);
110 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
111 static void send_map_query(struct ibmvnic_adapter
*adapter
);
112 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
113 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
115 struct ibmvnic_stat
{
116 char name
[ETH_GSTRING_LEN
];
120 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
121 offsetof(struct ibmvnic_statistics, stat))
122 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
124 static const struct ibmvnic_stat ibmvnic_stats
[] = {
125 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
126 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
127 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
128 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
129 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
130 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
131 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
132 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
133 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
134 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
135 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
136 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
137 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
138 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
139 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
140 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
141 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
142 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
143 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
144 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
145 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
146 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
149 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
150 unsigned long length
, unsigned long *number
,
153 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
156 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
163 /* net_device_ops functions */
165 static void init_rx_pool(struct ibmvnic_adapter
*adapter
,
166 struct ibmvnic_rx_pool
*rx_pool
, int num
, int index
,
167 int buff_size
, int active
)
169 netdev_dbg(adapter
->netdev
,
170 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
171 index
, num
, buff_size
);
173 rx_pool
->index
= index
;
174 rx_pool
->buff_size
= buff_size
;
175 rx_pool
->active
= active
;
178 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
179 struct ibmvnic_long_term_buff
*ltb
, int size
)
181 struct device
*dev
= &adapter
->vdev
->dev
;
184 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
188 dev_err(dev
, "Couldn't alloc long term buffer\n");
191 ltb
->map_id
= adapter
->map_id
;
193 send_request_map(adapter
, ltb
->addr
,
194 ltb
->size
, ltb
->map_id
);
195 init_completion(&adapter
->fw_done
);
196 wait_for_completion(&adapter
->fw_done
);
200 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
201 struct ibmvnic_long_term_buff
*ltb
)
203 struct device
*dev
= &adapter
->vdev
->dev
;
205 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
206 send_request_unmap(adapter
, ltb
->map_id
);
209 static int alloc_rx_pool(struct ibmvnic_adapter
*adapter
,
210 struct ibmvnic_rx_pool
*pool
)
212 struct device
*dev
= &adapter
->vdev
->dev
;
215 pool
->free_map
= kcalloc(pool
->size
, sizeof(int), GFP_KERNEL
);
219 pool
->rx_buff
= kcalloc(pool
->size
, sizeof(struct ibmvnic_rx_buff
),
222 if (!pool
->rx_buff
) {
223 dev_err(dev
, "Couldn't alloc rx buffers\n");
224 kfree(pool
->free_map
);
228 if (alloc_long_term_buff(adapter
, &pool
->long_term_buff
,
229 pool
->size
* pool
->buff_size
)) {
230 kfree(pool
->free_map
);
231 kfree(pool
->rx_buff
);
235 for (i
= 0; i
< pool
->size
; ++i
)
236 pool
->free_map
[i
] = i
;
238 atomic_set(&pool
->available
, 0);
239 pool
->next_alloc
= 0;
245 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
246 struct ibmvnic_rx_pool
*pool
)
248 int count
= pool
->size
- atomic_read(&pool
->available
);
249 struct device
*dev
= &adapter
->vdev
->dev
;
250 int buffers_added
= 0;
251 unsigned long lpar_rc
;
252 union sub_crq sub_crq
;
262 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
263 be32_to_cpu(adapter
->login_rsp_buf
->
266 for (i
= 0; i
< count
; ++i
) {
267 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
269 dev_err(dev
, "Couldn't replenish rx buff\n");
270 adapter
->replenish_no_mem
++;
274 index
= pool
->free_map
[pool
->next_free
];
276 if (pool
->rx_buff
[index
].skb
)
277 dev_err(dev
, "Inconsistent free_map!\n");
279 /* Copy the skb to the long term mapped DMA buffer */
280 offset
= index
* pool
->buff_size
;
281 dst
= pool
->long_term_buff
.buff
+ offset
;
282 memset(dst
, 0, pool
->buff_size
);
283 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
284 pool
->rx_buff
[index
].data
= dst
;
286 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
287 pool
->rx_buff
[index
].dma
= dma_addr
;
288 pool
->rx_buff
[index
].skb
= skb
;
289 pool
->rx_buff
[index
].pool_index
= pool
->index
;
290 pool
->rx_buff
[index
].size
= pool
->buff_size
;
292 memset(&sub_crq
, 0, sizeof(sub_crq
));
293 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
294 sub_crq
.rx_add
.correlator
=
295 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
296 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
297 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
299 /* The length field of the sCRQ is defined to be 24 bits so the
300 * buffer size needs to be left shifted by a byte before it is
301 * converted to big endian to prevent the last byte from being
304 #ifdef __LITTLE_ENDIAN__
307 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
309 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
311 if (lpar_rc
!= H_SUCCESS
)
315 adapter
->replenish_add_buff_success
++;
316 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
318 atomic_add(buffers_added
, &pool
->available
);
322 dev_info(dev
, "replenish pools failure\n");
323 pool
->free_map
[pool
->next_free
] = index
;
324 pool
->rx_buff
[index
].skb
= NULL
;
325 if (!dma_mapping_error(dev
, dma_addr
))
326 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
329 dev_kfree_skb_any(skb
);
330 adapter
->replenish_add_buff_failure
++;
331 atomic_add(buffers_added
, &pool
->available
);
334 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
338 if (adapter
->migrated
)
341 adapter
->replenish_task_cycles
++;
342 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
344 if (adapter
->rx_pool
[i
].active
)
345 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
349 static void free_rx_pool(struct ibmvnic_adapter
*adapter
,
350 struct ibmvnic_rx_pool
*pool
)
354 kfree(pool
->free_map
);
355 pool
->free_map
= NULL
;
360 for (i
= 0; i
< pool
->size
; i
++) {
361 if (pool
->rx_buff
[i
].skb
) {
362 dev_kfree_skb_any(pool
->rx_buff
[i
].skb
);
363 pool
->rx_buff
[i
].skb
= NULL
;
366 kfree(pool
->rx_buff
);
367 pool
->rx_buff
= NULL
;
370 static int ibmvnic_open(struct net_device
*netdev
)
372 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
373 struct device
*dev
= &adapter
->vdev
->dev
;
374 struct ibmvnic_tx_pool
*tx_pool
;
375 union ibmvnic_crq crq
;
382 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
384 be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
385 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
386 be32_to_cpu(adapter
->login_rsp_buf
->
387 off_rxadd_buff_size
));
389 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
390 sizeof(struct napi_struct
), GFP_KERNEL
);
392 goto alloc_napi_failed
;
393 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
394 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
396 napi_enable(&adapter
->napi
[i
]);
399 kcalloc(rxadd_subcrqs
, sizeof(struct ibmvnic_rx_pool
), GFP_KERNEL
);
401 if (!adapter
->rx_pool
)
402 goto rx_pool_arr_alloc_failed
;
403 send_map_query(adapter
);
404 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
405 init_rx_pool(adapter
, &adapter
->rx_pool
[i
],
406 IBMVNIC_BUFFS_PER_POOL
, i
,
407 be64_to_cpu(size_array
[i
]), 1);
408 if (alloc_rx_pool(adapter
, &adapter
->rx_pool
[i
])) {
409 dev_err(dev
, "Couldn't alloc rx pool\n");
410 goto rx_pool_alloc_failed
;
414 kcalloc(tx_subcrqs
, sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
416 if (!adapter
->tx_pool
)
417 goto tx_pool_arr_alloc_failed
;
418 for (i
= 0; i
< tx_subcrqs
; i
++) {
419 tx_pool
= &adapter
->tx_pool
[i
];
421 kcalloc(adapter
->max_tx_entries_per_subcrq
,
422 sizeof(struct ibmvnic_tx_buff
), GFP_KERNEL
);
423 if (!tx_pool
->tx_buff
)
424 goto tx_pool_alloc_failed
;
426 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
427 adapter
->max_tx_entries_per_subcrq
*
429 goto tx_ltb_alloc_failed
;
432 kcalloc(adapter
->max_tx_entries_per_subcrq
,
433 sizeof(int), GFP_KERNEL
);
434 if (!tx_pool
->free_map
)
435 goto tx_fm_alloc_failed
;
437 for (j
= 0; j
< adapter
->max_tx_entries_per_subcrq
; j
++)
438 tx_pool
->free_map
[j
] = j
;
440 tx_pool
->consumer_index
= 0;
441 tx_pool
->producer_index
= 0;
443 adapter
->bounce_buffer_size
=
444 (netdev
->mtu
+ ETH_HLEN
- 1) / PAGE_SIZE
+ 1;
445 adapter
->bounce_buffer
= kmalloc(adapter
->bounce_buffer_size
,
447 if (!adapter
->bounce_buffer
)
448 goto bounce_alloc_failed
;
450 adapter
->bounce_buffer_dma
= dma_map_single(dev
, adapter
->bounce_buffer
,
451 adapter
->bounce_buffer_size
,
453 if (dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
454 dev_err(dev
, "Couldn't map tx bounce buffer\n");
455 goto bounce_map_failed
;
457 replenish_pools(adapter
);
459 /* We're ready to receive frames, enable the sub-crq interrupts and
460 * set the logical link state to up
462 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
463 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
465 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
466 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
468 memset(&crq
, 0, sizeof(crq
));
469 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
470 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
471 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_UP
;
472 ibmvnic_send_crq(adapter
, &crq
);
474 netif_tx_start_all_queues(netdev
);
479 kfree(adapter
->bounce_buffer
);
482 kfree(adapter
->tx_pool
[i
].free_map
);
484 free_long_term_buff(adapter
, &adapter
->tx_pool
[i
].long_term_buff
);
486 kfree(adapter
->tx_pool
[i
].tx_buff
);
487 tx_pool_alloc_failed
:
488 for (j
= 0; j
< i
; j
++) {
489 kfree(adapter
->tx_pool
[j
].tx_buff
);
490 free_long_term_buff(adapter
,
491 &adapter
->tx_pool
[j
].long_term_buff
);
492 kfree(adapter
->tx_pool
[j
].free_map
);
494 kfree(adapter
->tx_pool
);
495 adapter
->tx_pool
= NULL
;
496 tx_pool_arr_alloc_failed
:
498 rx_pool_alloc_failed
:
499 for (j
= 0; j
< i
; j
++) {
500 free_rx_pool(adapter
, &adapter
->rx_pool
[j
]);
501 free_long_term_buff(adapter
,
502 &adapter
->rx_pool
[j
].long_term_buff
);
504 kfree(adapter
->rx_pool
);
505 adapter
->rx_pool
= NULL
;
506 rx_pool_arr_alloc_failed
:
507 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
508 napi_enable(&adapter
->napi
[i
]);
513 static int ibmvnic_close(struct net_device
*netdev
)
515 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
516 struct device
*dev
= &adapter
->vdev
->dev
;
517 union ibmvnic_crq crq
;
520 adapter
->closing
= true;
522 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
523 napi_disable(&adapter
->napi
[i
]);
525 netif_tx_stop_all_queues(netdev
);
527 if (adapter
->bounce_buffer
) {
528 if (!dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
529 dma_unmap_single(&adapter
->vdev
->dev
,
530 adapter
->bounce_buffer_dma
,
531 adapter
->bounce_buffer_size
,
533 adapter
->bounce_buffer_dma
= DMA_ERROR_CODE
;
535 kfree(adapter
->bounce_buffer
);
536 adapter
->bounce_buffer
= NULL
;
539 memset(&crq
, 0, sizeof(crq
));
540 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
541 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
542 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_DN
;
543 ibmvnic_send_crq(adapter
, &crq
);
545 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
547 kfree(adapter
->tx_pool
[i
].tx_buff
);
548 free_long_term_buff(adapter
,
549 &adapter
->tx_pool
[i
].long_term_buff
);
550 kfree(adapter
->tx_pool
[i
].free_map
);
552 kfree(adapter
->tx_pool
);
553 adapter
->tx_pool
= NULL
;
555 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
557 free_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
558 free_long_term_buff(adapter
,
559 &adapter
->rx_pool
[i
].long_term_buff
);
561 kfree(adapter
->rx_pool
);
562 adapter
->rx_pool
= NULL
;
564 adapter
->closing
= false;
570 * build_hdr_data - creates L2/L3/L4 header data buffer
571 * @hdr_field - bitfield determining needed headers
572 * @skb - socket buffer
573 * @hdr_len - array of header lengths
574 * @tot_len - total length of data
576 * Reads hdr_field to determine which headers are needed by firmware.
577 * Builds a buffer containing these headers. Saves individual header
578 * lengths and total buffer length to be used to build descriptors.
580 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
581 int *hdr_len
, u8
*hdr_data
)
586 hdr_len
[0] = sizeof(struct ethhdr
);
588 if (skb
->protocol
== htons(ETH_P_IP
)) {
589 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
590 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
591 hdr_len
[2] = tcp_hdrlen(skb
);
592 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
593 hdr_len
[2] = sizeof(struct udphdr
);
594 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
595 hdr_len
[1] = sizeof(struct ipv6hdr
);
596 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
597 hdr_len
[2] = tcp_hdrlen(skb
);
598 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
599 hdr_len
[2] = sizeof(struct udphdr
);
602 memset(hdr_data
, 0, 120);
603 if ((hdr_field
>> 6) & 1) {
604 hdr
= skb_mac_header(skb
);
605 memcpy(hdr_data
, hdr
, hdr_len
[0]);
609 if ((hdr_field
>> 5) & 1) {
610 hdr
= skb_network_header(skb
);
611 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
615 if ((hdr_field
>> 4) & 1) {
616 hdr
= skb_transport_header(skb
);
617 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
624 * create_hdr_descs - create header and header extension descriptors
625 * @hdr_field - bitfield determining needed headers
626 * @data - buffer containing header data
627 * @len - length of data buffer
628 * @hdr_len - array of individual header lengths
629 * @scrq_arr - descriptor array
631 * Creates header and, if needed, header extension descriptors and
632 * places them in a descriptor array, scrq_arr
635 static void create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
636 union sub_crq
*scrq_arr
)
638 union sub_crq hdr_desc
;
643 while (tmp_len
> 0) {
644 cur
= hdr_data
+ len
- tmp_len
;
646 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
647 if (cur
!= hdr_data
) {
648 data
= hdr_desc
.hdr_ext
.data
;
649 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
650 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
651 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
652 hdr_desc
.hdr_ext
.len
= tmp
;
654 data
= hdr_desc
.hdr
.data
;
655 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
656 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
657 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
658 hdr_desc
.hdr
.len
= tmp
;
659 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
660 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
661 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
662 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
664 memcpy(data
, cur
, tmp
);
666 *scrq_arr
= hdr_desc
;
672 * build_hdr_descs_arr - build a header descriptor array
673 * @skb - socket buffer
674 * @num_entries - number of descriptors to be sent
675 * @subcrq - first TX descriptor
676 * @hdr_field - bit field determining which headers will be sent
678 * This function will build a TX descriptor array with applicable
679 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
682 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
683 int *num_entries
, u8 hdr_field
)
685 int hdr_len
[3] = {0, 0, 0};
687 u8
*hdr_data
= txbuff
->hdr_data
;
689 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
694 num_entries
+= len
% 29 ? len
/ 29 + 1 : len
/ 29;
695 create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
696 txbuff
->indir_arr
+ 1);
699 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
701 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
702 int queue_num
= skb_get_queue_mapping(skb
);
703 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
704 struct device
*dev
= &adapter
->vdev
->dev
;
705 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
706 struct ibmvnic_tx_pool
*tx_pool
;
707 unsigned int tx_send_failed
= 0;
708 unsigned int tx_map_failed
= 0;
709 unsigned int tx_dropped
= 0;
710 unsigned int tx_packets
= 0;
711 unsigned int tx_bytes
= 0;
712 dma_addr_t data_dma_addr
;
713 struct netdev_queue
*txq
;
714 bool used_bounce
= false;
715 unsigned long lpar_rc
;
716 union sub_crq tx_crq
;
724 tx_pool
= &adapter
->tx_pool
[queue_num
];
725 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
726 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
727 be32_to_cpu(adapter
->login_rsp_buf
->
728 off_txsubm_subcrqs
));
729 if (adapter
->migrated
) {
732 ret
= NETDEV_TX_BUSY
;
736 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
737 offset
= index
* adapter
->req_mtu
;
738 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
739 memset(dst
, 0, adapter
->req_mtu
);
740 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
741 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
743 tx_pool
->consumer_index
=
744 (tx_pool
->consumer_index
+ 1) %
745 adapter
->max_tx_entries_per_subcrq
;
747 tx_buff
= &tx_pool
->tx_buff
[index
];
749 tx_buff
->data_dma
[0] = data_dma_addr
;
750 tx_buff
->data_len
[0] = skb
->len
;
751 tx_buff
->index
= index
;
752 tx_buff
->pool_index
= queue_num
;
753 tx_buff
->last_frag
= true;
754 tx_buff
->used_bounce
= used_bounce
;
756 memset(&tx_crq
, 0, sizeof(tx_crq
));
757 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
758 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
759 tx_crq
.v1
.n_crq_elem
= 1;
761 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
762 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
763 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
764 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
765 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
767 if (adapter
->vlan_header_insertion
) {
768 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
769 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
772 if (skb
->protocol
== htons(ETH_P_IP
)) {
773 if (ip_hdr(skb
)->version
== 4)
774 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
775 else if (ip_hdr(skb
)->version
== 6)
776 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
778 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
779 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
780 else if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
781 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
784 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
785 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
788 /* determine if l2/3/4 headers are sent to firmware */
789 if ((*hdrs
>> 7) & 1 &&
790 (skb
->protocol
== htons(ETH_P_IP
) ||
791 skb
->protocol
== htons(ETH_P_IPV6
))) {
792 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
793 tx_crq
.v1
.n_crq_elem
= num_entries
;
794 tx_buff
->indir_arr
[0] = tx_crq
;
795 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
796 sizeof(tx_buff
->indir_arr
),
798 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
799 if (!firmware_has_feature(FW_FEATURE_CMO
))
800 dev_err(dev
, "tx: unable to map descriptor array\n");
803 ret
= NETDEV_TX_BUSY
;
806 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
807 (u64
)tx_buff
->indir_dma
,
810 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
813 if (lpar_rc
!= H_SUCCESS
) {
814 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
816 if (tx_pool
->consumer_index
== 0)
817 tx_pool
->consumer_index
=
818 adapter
->max_tx_entries_per_subcrq
- 1;
820 tx_pool
->consumer_index
--;
824 ret
= NETDEV_TX_BUSY
;
828 tx_bytes
+= skb
->len
;
829 txq
->trans_start
= jiffies
;
833 netdev
->stats
.tx_dropped
+= tx_dropped
;
834 netdev
->stats
.tx_bytes
+= tx_bytes
;
835 netdev
->stats
.tx_packets
+= tx_packets
;
836 adapter
->tx_send_failed
+= tx_send_failed
;
837 adapter
->tx_map_failed
+= tx_map_failed
;
842 static void ibmvnic_set_multi(struct net_device
*netdev
)
844 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
845 struct netdev_hw_addr
*ha
;
846 union ibmvnic_crq crq
;
848 memset(&crq
, 0, sizeof(crq
));
849 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
850 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
852 if (netdev
->flags
& IFF_PROMISC
) {
853 if (!adapter
->promisc_supported
)
856 if (netdev
->flags
& IFF_ALLMULTI
) {
857 /* Accept all multicast */
858 memset(&crq
, 0, sizeof(crq
));
859 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
860 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
861 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
862 ibmvnic_send_crq(adapter
, &crq
);
863 } else if (netdev_mc_empty(netdev
)) {
864 /* Reject all multicast */
865 memset(&crq
, 0, sizeof(crq
));
866 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
867 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
868 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
869 ibmvnic_send_crq(adapter
, &crq
);
871 /* Accept one or more multicast(s) */
872 netdev_for_each_mc_addr(ha
, netdev
) {
873 memset(&crq
, 0, sizeof(crq
));
874 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
875 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
876 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
877 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
879 ibmvnic_send_crq(adapter
, &crq
);
885 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
887 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
888 struct sockaddr
*addr
= p
;
889 union ibmvnic_crq crq
;
891 if (!is_valid_ether_addr(addr
->sa_data
))
892 return -EADDRNOTAVAIL
;
894 memset(&crq
, 0, sizeof(crq
));
895 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
896 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
897 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
898 ibmvnic_send_crq(adapter
, &crq
);
899 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
903 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
905 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
907 if (new_mtu
> adapter
->req_mtu
|| new_mtu
< adapter
->min_mtu
)
910 netdev
->mtu
= new_mtu
;
914 static void ibmvnic_tx_timeout(struct net_device
*dev
)
916 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
919 /* Adapter timed out, resetting it */
920 release_sub_crqs(adapter
);
921 rc
= ibmvnic_reset_crq(adapter
);
923 dev_err(&adapter
->vdev
->dev
, "Adapter timeout, reset failed\n");
925 ibmvnic_send_crq_init(adapter
);
928 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
929 struct ibmvnic_rx_buff
*rx_buff
)
931 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
935 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
936 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
938 atomic_dec(&pool
->available
);
941 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
943 struct net_device
*netdev
= napi
->dev
;
944 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
945 int scrq_num
= (int)(napi
- adapter
->napi
);
946 int frames_processed
= 0;
948 while (frames_processed
< budget
) {
950 struct ibmvnic_rx_buff
*rx_buff
;
956 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
958 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
960 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
962 /* do error checking */
963 if (next
->rx_comp
.rc
) {
964 netdev_err(netdev
, "rx error %x\n", next
->rx_comp
.rc
);
966 next
->rx_comp
.first
= 0;
967 remove_buff_from_pool(adapter
, rx_buff
);
971 length
= be32_to_cpu(next
->rx_comp
.len
);
972 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
973 flags
= next
->rx_comp
.flags
;
975 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
977 skb
->vlan_tci
= be16_to_cpu(next
->rx_comp
.vlan_tci
);
979 next
->rx_comp
.first
= 0;
980 remove_buff_from_pool(adapter
, rx_buff
);
982 skb_put(skb
, length
);
983 skb
->protocol
= eth_type_trans(skb
, netdev
);
985 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
986 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
987 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
991 napi_gro_receive(napi
, skb
); /* send it up */
992 netdev
->stats
.rx_packets
++;
993 netdev
->stats
.rx_bytes
+= length
;
996 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
998 if (frames_processed
< budget
) {
999 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1000 napi_complete(napi
);
1001 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
1002 napi_reschedule(napi
)) {
1003 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1007 return frames_processed
;
1010 #ifdef CONFIG_NET_POLL_CONTROLLER
1011 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
1013 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1016 replenish_pools(netdev_priv(dev
));
1017 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1018 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
1019 adapter
->rx_scrq
[i
]);
1023 static const struct net_device_ops ibmvnic_netdev_ops
= {
1024 .ndo_open
= ibmvnic_open
,
1025 .ndo_stop
= ibmvnic_close
,
1026 .ndo_start_xmit
= ibmvnic_xmit
,
1027 .ndo_set_rx_mode
= ibmvnic_set_multi
,
1028 .ndo_set_mac_address
= ibmvnic_set_mac
,
1029 .ndo_validate_addr
= eth_validate_addr
,
1030 .ndo_change_mtu
= ibmvnic_change_mtu
,
1031 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
1032 #ifdef CONFIG_NET_POLL_CONTROLLER
1033 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
1037 /* ethtool functions */
1039 static int ibmvnic_get_settings(struct net_device
*netdev
,
1040 struct ethtool_cmd
*cmd
)
1042 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
1044 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
1046 ethtool_cmd_speed_set(cmd
, SPEED_1000
);
1047 cmd
->duplex
= DUPLEX_FULL
;
1048 cmd
->port
= PORT_FIBRE
;
1049 cmd
->phy_address
= 0;
1050 cmd
->transceiver
= XCVR_INTERNAL
;
1051 cmd
->autoneg
= AUTONEG_ENABLE
;
1057 static void ibmvnic_get_drvinfo(struct net_device
*dev
,
1058 struct ethtool_drvinfo
*info
)
1060 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
1061 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
1064 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
1066 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1068 return adapter
->msg_enable
;
1071 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
1073 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1075 adapter
->msg_enable
= data
;
1078 static u32
ibmvnic_get_link(struct net_device
*netdev
)
1080 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1082 /* Don't need to send a query because we request a logical link up at
1083 * init and then we wait for link state indications
1085 return adapter
->logical_link_state
;
1088 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
1089 struct ethtool_ringparam
*ring
)
1091 ring
->rx_max_pending
= 0;
1092 ring
->tx_max_pending
= 0;
1093 ring
->rx_mini_max_pending
= 0;
1094 ring
->rx_jumbo_max_pending
= 0;
1095 ring
->rx_pending
= 0;
1096 ring
->tx_pending
= 0;
1097 ring
->rx_mini_pending
= 0;
1098 ring
->rx_jumbo_pending
= 0;
1101 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1105 if (stringset
!= ETH_SS_STATS
)
1108 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
1109 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
1112 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
1116 return ARRAY_SIZE(ibmvnic_stats
);
1122 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
1123 struct ethtool_stats
*stats
, u64
*data
)
1125 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1126 union ibmvnic_crq crq
;
1129 memset(&crq
, 0, sizeof(crq
));
1130 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
1131 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
1132 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
1133 crq
.request_statistics
.len
=
1134 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
1135 ibmvnic_send_crq(adapter
, &crq
);
1137 /* Wait for data to be written */
1138 init_completion(&adapter
->stats_done
);
1139 wait_for_completion(&adapter
->stats_done
);
1141 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
1142 data
[i
] = IBMVNIC_GET_STAT(adapter
, ibmvnic_stats
[i
].offset
);
1145 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
1146 .get_settings
= ibmvnic_get_settings
,
1147 .get_drvinfo
= ibmvnic_get_drvinfo
,
1148 .get_msglevel
= ibmvnic_get_msglevel
,
1149 .set_msglevel
= ibmvnic_set_msglevel
,
1150 .get_link
= ibmvnic_get_link
,
1151 .get_ringparam
= ibmvnic_get_ringparam
,
1152 .get_strings
= ibmvnic_get_strings
,
1153 .get_sset_count
= ibmvnic_get_sset_count
,
1154 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
1157 /* Routines for managing CRQs/sCRQs */
1159 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1160 struct ibmvnic_sub_crq_queue
*scrq
)
1162 struct device
*dev
= &adapter
->vdev
->dev
;
1165 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
1167 /* Close the sub-crqs */
1169 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1170 adapter
->vdev
->unit_address
,
1172 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1174 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1176 free_pages((unsigned long)scrq
->msgs
, 2);
1180 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
1183 struct device
*dev
= &adapter
->vdev
->dev
;
1184 struct ibmvnic_sub_crq_queue
*scrq
;
1187 scrq
= kmalloc(sizeof(*scrq
), GFP_ATOMIC
);
1191 scrq
->msgs
= (union sub_crq
*)__get_free_pages(GFP_KERNEL
, 2);
1192 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
1194 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
1195 goto zero_page_failed
;
1198 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
1200 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
1201 dev_warn(dev
, "Couldn't map crq queue messages page\n");
1205 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1206 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1208 if (rc
== H_RESOURCE
)
1209 rc
= ibmvnic_reset_crq(adapter
);
1211 if (rc
== H_CLOSED
) {
1212 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
1214 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
1218 scrq
->adapter
= adapter
;
1219 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
1221 scrq
->rx_skb_top
= NULL
;
1222 spin_lock_init(&scrq
->lock
);
1224 netdev_dbg(adapter
->netdev
,
1225 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1226 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
1231 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1234 free_pages((unsigned long)scrq
->msgs
, 2);
1241 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
1245 if (adapter
->tx_scrq
) {
1246 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1247 if (adapter
->tx_scrq
[i
]) {
1248 free_irq(adapter
->tx_scrq
[i
]->irq
,
1249 adapter
->tx_scrq
[i
]);
1250 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
1251 release_sub_crq_queue(adapter
,
1252 adapter
->tx_scrq
[i
]);
1254 adapter
->tx_scrq
= NULL
;
1257 if (adapter
->rx_scrq
) {
1258 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1259 if (adapter
->rx_scrq
[i
]) {
1260 free_irq(adapter
->rx_scrq
[i
]->irq
,
1261 adapter
->rx_scrq
[i
]);
1262 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
1263 release_sub_crq_queue(adapter
,
1264 adapter
->rx_scrq
[i
]);
1266 adapter
->rx_scrq
= NULL
;
1269 adapter
->requested_caps
= 0;
1272 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter
*adapter
)
1276 if (adapter
->tx_scrq
) {
1277 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1278 if (adapter
->tx_scrq
[i
])
1279 release_sub_crq_queue(adapter
,
1280 adapter
->tx_scrq
[i
]);
1281 adapter
->tx_scrq
= NULL
;
1284 if (adapter
->rx_scrq
) {
1285 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1286 if (adapter
->rx_scrq
[i
])
1287 release_sub_crq_queue(adapter
,
1288 adapter
->rx_scrq
[i
]);
1289 adapter
->rx_scrq
= NULL
;
1292 adapter
->requested_caps
= 0;
1295 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1296 struct ibmvnic_sub_crq_queue
*scrq
)
1298 struct device
*dev
= &adapter
->vdev
->dev
;
1301 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1302 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1304 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1309 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1310 struct ibmvnic_sub_crq_queue
*scrq
)
1312 struct device
*dev
= &adapter
->vdev
->dev
;
1315 if (scrq
->hw_irq
> 0x100000000ULL
) {
1316 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
1320 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1321 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1323 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1328 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
1329 struct ibmvnic_sub_crq_queue
*scrq
)
1331 struct device
*dev
= &adapter
->vdev
->dev
;
1332 struct ibmvnic_tx_buff
*txbuff
;
1333 union sub_crq
*next
;
1339 while (pending_scrq(adapter
, scrq
)) {
1340 unsigned int pool
= scrq
->pool_index
;
1342 next
= ibmvnic_next_scrq(adapter
, scrq
);
1343 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
1344 if (next
->tx_comp
.rcs
[i
]) {
1345 dev_err(dev
, "tx error %x\n",
1346 next
->tx_comp
.rcs
[i
]);
1349 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
1350 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
1352 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
1353 if (!txbuff
->data_dma
[j
])
1356 txbuff
->data_dma
[j
] = 0;
1357 txbuff
->used_bounce
= false;
1359 /* if sub_crq was sent indirectly */
1360 first
= txbuff
->indir_arr
[0].generic
.first
;
1361 if (first
== IBMVNIC_CRQ_CMD
) {
1362 dma_unmap_single(dev
, txbuff
->indir_dma
,
1363 sizeof(txbuff
->indir_arr
),
1367 if (txbuff
->last_frag
)
1368 dev_kfree_skb_any(txbuff
->skb
);
1370 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
1371 producer_index
] = index
;
1372 adapter
->tx_pool
[pool
].producer_index
=
1373 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
1374 adapter
->max_tx_entries_per_subcrq
;
1376 /* remove tx_comp scrq*/
1377 next
->tx_comp
.first
= 0;
1380 enable_scrq_irq(adapter
, scrq
);
1382 if (pending_scrq(adapter
, scrq
)) {
1383 disable_scrq_irq(adapter
, scrq
);
1390 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
1392 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1393 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1395 disable_scrq_irq(adapter
, scrq
);
1396 ibmvnic_complete_tx(adapter
, scrq
);
1401 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
1403 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1404 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1406 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
1407 disable_scrq_irq(adapter
, scrq
);
1408 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
1414 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
1416 struct device
*dev
= &adapter
->vdev
->dev
;
1417 struct ibmvnic_sub_crq_queue
*scrq
;
1421 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1422 scrq
= adapter
->tx_scrq
[i
];
1423 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1425 if (scrq
->irq
== NO_IRQ
) {
1427 dev_err(dev
, "Error mapping irq\n");
1428 goto req_tx_irq_failed
;
1431 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
1432 0, "ibmvnic_tx", scrq
);
1435 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
1437 irq_dispose_mapping(scrq
->irq
);
1438 goto req_rx_irq_failed
;
1442 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1443 scrq
= adapter
->rx_scrq
[i
];
1444 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1445 if (scrq
->irq
== NO_IRQ
) {
1447 dev_err(dev
, "Error mapping irq\n");
1448 goto req_rx_irq_failed
;
1450 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
1451 0, "ibmvnic_rx", scrq
);
1453 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
1455 irq_dispose_mapping(scrq
->irq
);
1456 goto req_rx_irq_failed
;
1462 for (j
= 0; j
< i
; j
++)
1463 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
1464 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1465 i
= adapter
->req_tx_queues
;
1467 for (j
= 0; j
< i
; j
++)
1468 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
1469 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1470 release_sub_crqs_no_irqs(adapter
);
1474 static void init_sub_crqs(struct ibmvnic_adapter
*adapter
, int retry
)
1476 struct device
*dev
= &adapter
->vdev
->dev
;
1477 struct ibmvnic_sub_crq_queue
**allqueues
;
1478 int registered_queues
= 0;
1479 union ibmvnic_crq crq
;
1485 /* Sub-CRQ entries are 32 byte long */
1486 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
1488 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
1489 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
1490 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
1491 goto allqueues_failed
;
1494 /* Get the minimum between the queried max and the entries
1495 * that fit in our PAGE_SIZE
1497 adapter
->req_tx_entries_per_subcrq
=
1498 adapter
->max_tx_entries_per_subcrq
> entries_page
?
1499 entries_page
: adapter
->max_tx_entries_per_subcrq
;
1500 adapter
->req_rx_add_entries_per_subcrq
=
1501 adapter
->max_rx_add_entries_per_subcrq
> entries_page
?
1502 entries_page
: adapter
->max_rx_add_entries_per_subcrq
;
1504 /* Choosing the maximum number of queues supported by firmware*/
1505 adapter
->req_tx_queues
= adapter
->max_tx_queues
;
1506 adapter
->req_rx_queues
= adapter
->max_rx_queues
;
1507 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
1509 adapter
->req_mtu
= adapter
->max_mtu
;
1512 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
1514 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_ATOMIC
);
1516 goto allqueues_failed
;
1518 for (i
= 0; i
< total_queues
; i
++) {
1519 allqueues
[i
] = init_sub_crq_queue(adapter
);
1520 if (!allqueues
[i
]) {
1521 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
1524 registered_queues
++;
1527 /* Make sure we were able to register the minimum number of queues */
1528 if (registered_queues
<
1529 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
1530 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
1534 /* Distribute the failed allocated queues*/
1535 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
1536 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
1539 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
1540 adapter
->req_rx_queues
--;
1545 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
1546 adapter
->req_tx_queues
--;
1553 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
1554 sizeof(*adapter
->tx_scrq
), GFP_ATOMIC
);
1555 if (!adapter
->tx_scrq
)
1558 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1559 adapter
->tx_scrq
[i
] = allqueues
[i
];
1560 adapter
->tx_scrq
[i
]->pool_index
= i
;
1563 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
1564 sizeof(*adapter
->rx_scrq
), GFP_ATOMIC
);
1565 if (!adapter
->rx_scrq
)
1568 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1569 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
1570 adapter
->rx_scrq
[i
]->scrq_num
= i
;
1573 memset(&crq
, 0, sizeof(crq
));
1574 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1575 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1577 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
1578 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
1579 ibmvnic_send_crq(adapter
, &crq
);
1581 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
1582 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
1583 ibmvnic_send_crq(adapter
, &crq
);
1585 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
1586 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
1587 ibmvnic_send_crq(adapter
, &crq
);
1589 crq
.request_capability
.capability
=
1590 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
1591 crq
.request_capability
.number
=
1592 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
1593 ibmvnic_send_crq(adapter
, &crq
);
1595 crq
.request_capability
.capability
=
1596 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
1597 crq
.request_capability
.number
=
1598 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
1599 ibmvnic_send_crq(adapter
, &crq
);
1601 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
1602 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
1603 ibmvnic_send_crq(adapter
, &crq
);
1605 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
1606 if (adapter
->promisc_supported
) {
1607 crq
.request_capability
.capability
=
1608 cpu_to_be16(PROMISC_REQUESTED
);
1609 crq
.request_capability
.number
= cpu_to_be64(1);
1610 ibmvnic_send_crq(adapter
, &crq
);
1613 crq
.request_capability
.capability
=
1614 cpu_to_be16(PROMISC_REQUESTED
);
1615 crq
.request_capability
.number
= cpu_to_be64(0);
1616 ibmvnic_send_crq(adapter
, &crq
);
1624 kfree(adapter
->tx_scrq
);
1625 adapter
->tx_scrq
= NULL
;
1627 for (i
= 0; i
< registered_queues
; i
++)
1628 release_sub_crq_queue(adapter
, allqueues
[i
]);
1631 ibmvnic_remove(adapter
->vdev
);
1634 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
1635 struct ibmvnic_sub_crq_queue
*scrq
)
1637 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
1639 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
|| adapter
->closing
)
1645 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
1646 struct ibmvnic_sub_crq_queue
*scrq
)
1648 union sub_crq
*entry
;
1649 unsigned long flags
;
1651 spin_lock_irqsave(&scrq
->lock
, flags
);
1652 entry
= &scrq
->msgs
[scrq
->cur
];
1653 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1654 if (++scrq
->cur
== scrq
->size
)
1659 spin_unlock_irqrestore(&scrq
->lock
, flags
);
1664 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
1666 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
1667 union ibmvnic_crq
*crq
;
1669 crq
= &queue
->msgs
[queue
->cur
];
1670 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1671 if (++queue
->cur
== queue
->size
)
1680 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
1681 union sub_crq
*sub_crq
)
1683 unsigned int ua
= adapter
->vdev
->unit_address
;
1684 struct device
*dev
= &adapter
->vdev
->dev
;
1685 u64
*u64_crq
= (u64
*)sub_crq
;
1688 netdev_dbg(adapter
->netdev
,
1689 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1690 (unsigned long int)cpu_to_be64(remote_handle
),
1691 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1692 (unsigned long int)cpu_to_be64(u64_crq
[1]),
1693 (unsigned long int)cpu_to_be64(u64_crq
[2]),
1694 (unsigned long int)cpu_to_be64(u64_crq
[3]));
1696 /* Make sure the hypervisor sees the complete request */
1699 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
1700 cpu_to_be64(remote_handle
),
1701 cpu_to_be64(u64_crq
[0]),
1702 cpu_to_be64(u64_crq
[1]),
1703 cpu_to_be64(u64_crq
[2]),
1704 cpu_to_be64(u64_crq
[3]));
1708 dev_warn(dev
, "CRQ Queue closed\n");
1709 dev_err(dev
, "Send error (rc=%d)\n", rc
);
1715 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
1716 u64 remote_handle
, u64 ioba
, u64 num_entries
)
1718 unsigned int ua
= adapter
->vdev
->unit_address
;
1719 struct device
*dev
= &adapter
->vdev
->dev
;
1722 /* Make sure the hypervisor sees the complete request */
1724 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
1725 cpu_to_be64(remote_handle
),
1730 dev_warn(dev
, "CRQ Queue closed\n");
1731 dev_err(dev
, "Send (indirect) error (rc=%d)\n", rc
);
1737 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
1738 union ibmvnic_crq
*crq
)
1740 unsigned int ua
= adapter
->vdev
->unit_address
;
1741 struct device
*dev
= &adapter
->vdev
->dev
;
1742 u64
*u64_crq
= (u64
*)crq
;
1745 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
1746 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1747 (unsigned long int)cpu_to_be64(u64_crq
[1]));
1749 /* Make sure the hypervisor sees the complete request */
1752 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
1753 cpu_to_be64(u64_crq
[0]),
1754 cpu_to_be64(u64_crq
[1]));
1758 dev_warn(dev
, "CRQ Queue closed\n");
1759 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
1765 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
1767 union ibmvnic_crq crq
;
1769 memset(&crq
, 0, sizeof(crq
));
1770 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1771 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
1772 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
1774 return ibmvnic_send_crq(adapter
, &crq
);
1777 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter
*adapter
)
1779 union ibmvnic_crq crq
;
1781 memset(&crq
, 0, sizeof(crq
));
1782 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1783 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT_COMPLETE
;
1784 netdev_dbg(adapter
->netdev
, "Sending CRQ init complete\n");
1786 return ibmvnic_send_crq(adapter
, &crq
);
1789 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
1791 union ibmvnic_crq crq
;
1793 memset(&crq
, 0, sizeof(crq
));
1794 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
1795 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
1796 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
1798 return ibmvnic_send_crq(adapter
, &crq
);
1801 static void send_login(struct ibmvnic_adapter
*adapter
)
1803 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
1804 struct ibmvnic_login_buffer
*login_buffer
;
1805 struct ibmvnic_inflight_cmd
*inflight_cmd
;
1806 struct device
*dev
= &adapter
->vdev
->dev
;
1807 dma_addr_t rsp_buffer_token
;
1808 dma_addr_t buffer_token
;
1809 size_t rsp_buffer_size
;
1810 union ibmvnic_crq crq
;
1811 unsigned long flags
;
1818 sizeof(struct ibmvnic_login_buffer
) +
1819 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
);
1821 login_buffer
= kmalloc(buffer_size
, GFP_ATOMIC
);
1823 goto buf_alloc_failed
;
1825 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
1827 if (dma_mapping_error(dev
, buffer_token
)) {
1828 dev_err(dev
, "Couldn't map login buffer\n");
1829 goto buf_map_failed
;
1832 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
1833 sizeof(u64
) * adapter
->req_tx_queues
+
1834 sizeof(u64
) * adapter
->req_rx_queues
+
1835 sizeof(u64
) * adapter
->req_rx_queues
+
1836 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
1838 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
1839 if (!login_rsp_buffer
)
1840 goto buf_rsp_alloc_failed
;
1842 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
1843 rsp_buffer_size
, DMA_FROM_DEVICE
);
1844 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
1845 dev_err(dev
, "Couldn't map login rsp buffer\n");
1846 goto buf_rsp_map_failed
;
1848 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
1849 if (!inflight_cmd
) {
1850 dev_err(dev
, "Couldn't allocate inflight_cmd\n");
1851 goto inflight_alloc_failed
;
1853 adapter
->login_buf
= login_buffer
;
1854 adapter
->login_buf_token
= buffer_token
;
1855 adapter
->login_buf_sz
= buffer_size
;
1856 adapter
->login_rsp_buf
= login_rsp_buffer
;
1857 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
1858 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
1860 login_buffer
->len
= cpu_to_be32(buffer_size
);
1861 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
1862 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
1863 login_buffer
->off_txcomp_subcrqs
=
1864 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
1865 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
1866 login_buffer
->off_rxcomp_subcrqs
=
1867 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
1868 sizeof(u64
) * adapter
->req_tx_queues
);
1869 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
1870 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
1872 tx_list_p
= (__be64
*)((char *)login_buffer
+
1873 sizeof(struct ibmvnic_login_buffer
));
1874 rx_list_p
= (__be64
*)((char *)login_buffer
+
1875 sizeof(struct ibmvnic_login_buffer
) +
1876 sizeof(u64
) * adapter
->req_tx_queues
);
1878 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1879 if (adapter
->tx_scrq
[i
]) {
1880 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
1885 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1886 if (adapter
->rx_scrq
[i
]) {
1887 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
1892 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
1893 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
1894 netdev_dbg(adapter
->netdev
, "%016lx\n",
1895 ((unsigned long int *)(adapter
->login_buf
))[i
]);
1898 memset(&crq
, 0, sizeof(crq
));
1899 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
1900 crq
.login
.cmd
= LOGIN
;
1901 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
1902 crq
.login
.len
= cpu_to_be32(buffer_size
);
1904 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
1906 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
1907 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
1908 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
1910 ibmvnic_send_crq(adapter
, &crq
);
1914 inflight_alloc_failed
:
1915 dma_unmap_single(dev
, rsp_buffer_token
, rsp_buffer_size
,
1918 kfree(login_rsp_buffer
);
1919 buf_rsp_alloc_failed
:
1920 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
1922 kfree(login_buffer
);
1927 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
1930 union ibmvnic_crq crq
;
1932 memset(&crq
, 0, sizeof(crq
));
1933 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
1934 crq
.request_map
.cmd
= REQUEST_MAP
;
1935 crq
.request_map
.map_id
= map_id
;
1936 crq
.request_map
.ioba
= cpu_to_be32(addr
);
1937 crq
.request_map
.len
= cpu_to_be32(len
);
1938 ibmvnic_send_crq(adapter
, &crq
);
1941 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
1943 union ibmvnic_crq crq
;
1945 memset(&crq
, 0, sizeof(crq
));
1946 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
1947 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
1948 crq
.request_unmap
.map_id
= map_id
;
1949 ibmvnic_send_crq(adapter
, &crq
);
1952 static void send_map_query(struct ibmvnic_adapter
*adapter
)
1954 union ibmvnic_crq crq
;
1956 memset(&crq
, 0, sizeof(crq
));
1957 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
1958 crq
.query_map
.cmd
= QUERY_MAP
;
1959 ibmvnic_send_crq(adapter
, &crq
);
1962 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1963 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
1965 union ibmvnic_crq crq
;
1967 atomic_set(&adapter
->running_cap_queries
, 0);
1968 memset(&crq
, 0, sizeof(crq
));
1969 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
1970 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
1972 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
1973 atomic_inc(&adapter
->running_cap_queries
);
1974 ibmvnic_send_crq(adapter
, &crq
);
1976 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
1977 atomic_inc(&adapter
->running_cap_queries
);
1978 ibmvnic_send_crq(adapter
, &crq
);
1980 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
1981 atomic_inc(&adapter
->running_cap_queries
);
1982 ibmvnic_send_crq(adapter
, &crq
);
1984 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
1985 atomic_inc(&adapter
->running_cap_queries
);
1986 ibmvnic_send_crq(adapter
, &crq
);
1988 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
1989 atomic_inc(&adapter
->running_cap_queries
);
1990 ibmvnic_send_crq(adapter
, &crq
);
1992 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
1993 atomic_inc(&adapter
->running_cap_queries
);
1994 ibmvnic_send_crq(adapter
, &crq
);
1996 crq
.query_capability
.capability
=
1997 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
1998 atomic_inc(&adapter
->running_cap_queries
);
1999 ibmvnic_send_crq(adapter
, &crq
);
2001 crq
.query_capability
.capability
=
2002 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
2003 atomic_inc(&adapter
->running_cap_queries
);
2004 ibmvnic_send_crq(adapter
, &crq
);
2006 crq
.query_capability
.capability
=
2007 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
2008 atomic_inc(&adapter
->running_cap_queries
);
2009 ibmvnic_send_crq(adapter
, &crq
);
2011 crq
.query_capability
.capability
=
2012 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
2013 atomic_inc(&adapter
->running_cap_queries
);
2014 ibmvnic_send_crq(adapter
, &crq
);
2016 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
2017 atomic_inc(&adapter
->running_cap_queries
);
2018 ibmvnic_send_crq(adapter
, &crq
);
2020 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
2021 atomic_inc(&adapter
->running_cap_queries
);
2022 ibmvnic_send_crq(adapter
, &crq
);
2024 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
2025 atomic_inc(&adapter
->running_cap_queries
);
2026 ibmvnic_send_crq(adapter
, &crq
);
2028 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
2029 atomic_inc(&adapter
->running_cap_queries
);
2030 ibmvnic_send_crq(adapter
, &crq
);
2032 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
2033 atomic_inc(&adapter
->running_cap_queries
);
2034 ibmvnic_send_crq(adapter
, &crq
);
2036 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
2037 atomic_inc(&adapter
->running_cap_queries
);
2038 ibmvnic_send_crq(adapter
, &crq
);
2040 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
2041 atomic_inc(&adapter
->running_cap_queries
);
2042 ibmvnic_send_crq(adapter
, &crq
);
2044 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
2045 atomic_inc(&adapter
->running_cap_queries
);
2046 ibmvnic_send_crq(adapter
, &crq
);
2048 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
2049 atomic_inc(&adapter
->running_cap_queries
);
2050 ibmvnic_send_crq(adapter
, &crq
);
2052 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
2053 atomic_inc(&adapter
->running_cap_queries
);
2054 ibmvnic_send_crq(adapter
, &crq
);
2056 crq
.query_capability
.capability
=
2057 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
2058 atomic_inc(&adapter
->running_cap_queries
);
2059 ibmvnic_send_crq(adapter
, &crq
);
2061 crq
.query_capability
.capability
=
2062 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
2063 atomic_inc(&adapter
->running_cap_queries
);
2064 ibmvnic_send_crq(adapter
, &crq
);
2066 crq
.query_capability
.capability
=
2067 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
2068 atomic_inc(&adapter
->running_cap_queries
);
2069 ibmvnic_send_crq(adapter
, &crq
);
2071 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
2072 atomic_inc(&adapter
->running_cap_queries
);
2073 ibmvnic_send_crq(adapter
, &crq
);
2076 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
2078 struct device
*dev
= &adapter
->vdev
->dev
;
2079 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
2080 union ibmvnic_crq crq
;
2083 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
2084 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
2086 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
2087 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
2088 netdev_dbg(adapter
->netdev
, "%016lx\n",
2089 ((unsigned long int *)(buf
))[i
]);
2091 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
2092 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
2093 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
2094 buf
->tcp_ipv4_chksum
);
2095 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
2096 buf
->tcp_ipv6_chksum
);
2097 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
2098 buf
->udp_ipv4_chksum
);
2099 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
2100 buf
->udp_ipv6_chksum
);
2101 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
2102 buf
->large_tx_ipv4
);
2103 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
2104 buf
->large_tx_ipv6
);
2105 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
2106 buf
->large_rx_ipv4
);
2107 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
2108 buf
->large_rx_ipv6
);
2109 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
2110 buf
->max_ipv4_header_size
);
2111 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
2112 buf
->max_ipv6_header_size
);
2113 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
2114 buf
->max_tcp_header_size
);
2115 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
2116 buf
->max_udp_header_size
);
2117 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
2118 buf
->max_large_tx_size
);
2119 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
2120 buf
->max_large_rx_size
);
2121 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
2122 buf
->ipv6_extension_header
);
2123 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
2124 buf
->tcp_pseudosum_req
);
2125 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
2126 buf
->num_ipv6_ext_headers
);
2127 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
2128 buf
->off_ipv6_ext_headers
);
2130 adapter
->ip_offload_ctrl_tok
=
2131 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
2132 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
2134 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
2135 dev_err(dev
, "Couldn't map ip offload control buffer\n");
2139 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
2140 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
2141 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
2142 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
2143 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
2145 /* large_tx/rx disabled for now, additional features needed */
2146 adapter
->ip_offload_ctrl
.large_tx_ipv4
= 0;
2147 adapter
->ip_offload_ctrl
.large_tx_ipv6
= 0;
2148 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
2149 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
2151 adapter
->netdev
->features
= NETIF_F_GSO
;
2153 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
2154 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
2156 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
2157 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
2159 if ((adapter
->netdev
->features
&
2160 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
2161 adapter
->netdev
->features
|= NETIF_F_RXCSUM
;
2163 memset(&crq
, 0, sizeof(crq
));
2164 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2165 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
2166 crq
.control_ip_offload
.len
=
2167 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
2168 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
2169 ibmvnic_send_crq(adapter
, &crq
);
2172 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
2173 struct ibmvnic_adapter
*adapter
)
2175 struct device
*dev
= &adapter
->vdev
->dev
;
2176 struct ibmvnic_error_buff
*error_buff
, *tmp
;
2177 unsigned long flags
;
2181 if (!crq
->request_error_rsp
.rc
.code
) {
2182 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
2183 crq
->request_error_rsp
.rc
.code
);
2187 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2188 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
)
2189 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
2191 list_del(&error_buff
->list
);
2194 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2197 dev_err(dev
, "Couldn't find error id %x\n",
2198 crq
->request_error_rsp
.error_id
);
2202 dev_err(dev
, "Detailed info for error id %x:",
2203 crq
->request_error_rsp
.error_id
);
2205 for (i
= 0; i
< error_buff
->len
; i
++) {
2206 pr_cont("%02x", (int)error_buff
->buff
[i
]);
2212 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
2214 kfree(error_buff
->buff
);
2218 static void handle_dump_size_rsp(union ibmvnic_crq
*crq
,
2219 struct ibmvnic_adapter
*adapter
)
2221 int len
= be32_to_cpu(crq
->request_dump_size_rsp
.len
);
2222 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2223 struct device
*dev
= &adapter
->vdev
->dev
;
2224 union ibmvnic_crq newcrq
;
2225 unsigned long flags
;
2227 /* allocate and map buffer */
2228 adapter
->dump_data
= kmalloc(len
, GFP_KERNEL
);
2229 if (!adapter
->dump_data
) {
2230 complete(&adapter
->fw_done
);
2234 adapter
->dump_data_token
= dma_map_single(dev
, adapter
->dump_data
, len
,
2237 if (dma_mapping_error(dev
, adapter
->dump_data_token
)) {
2238 if (!firmware_has_feature(FW_FEATURE_CMO
))
2239 dev_err(dev
, "Couldn't map dump data\n");
2240 kfree(adapter
->dump_data
);
2241 complete(&adapter
->fw_done
);
2245 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2246 if (!inflight_cmd
) {
2247 dma_unmap_single(dev
, adapter
->dump_data_token
, len
,
2249 kfree(adapter
->dump_data
);
2250 complete(&adapter
->fw_done
);
2254 memset(&newcrq
, 0, sizeof(newcrq
));
2255 newcrq
.request_dump
.first
= IBMVNIC_CRQ_CMD
;
2256 newcrq
.request_dump
.cmd
= REQUEST_DUMP
;
2257 newcrq
.request_dump
.ioba
= cpu_to_be32(adapter
->dump_data_token
);
2258 newcrq
.request_dump
.len
= cpu_to_be32(adapter
->dump_data_size
);
2260 memcpy(&inflight_cmd
->crq
, &newcrq
, sizeof(newcrq
));
2262 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2263 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2264 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2266 ibmvnic_send_crq(adapter
, &newcrq
);
2269 static void handle_error_indication(union ibmvnic_crq
*crq
,
2270 struct ibmvnic_adapter
*adapter
)
2272 int detail_len
= be32_to_cpu(crq
->error_indication
.detail_error_sz
);
2273 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2274 struct device
*dev
= &adapter
->vdev
->dev
;
2275 struct ibmvnic_error_buff
*error_buff
;
2276 union ibmvnic_crq new_crq
;
2277 unsigned long flags
;
2279 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
2280 crq
->error_indication
.
2281 flags
& IBMVNIC_FATAL_ERROR
? "FATAL " : "",
2282 crq
->error_indication
.error_id
,
2283 crq
->error_indication
.error_cause
);
2285 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
2289 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
2290 if (!error_buff
->buff
) {
2295 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
2297 if (dma_mapping_error(dev
, error_buff
->dma
)) {
2298 if (!firmware_has_feature(FW_FEATURE_CMO
))
2299 dev_err(dev
, "Couldn't map error buffer\n");
2300 kfree(error_buff
->buff
);
2305 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2306 if (!inflight_cmd
) {
2307 dma_unmap_single(dev
, error_buff
->dma
, detail_len
,
2309 kfree(error_buff
->buff
);
2314 error_buff
->len
= detail_len
;
2315 error_buff
->error_id
= crq
->error_indication
.error_id
;
2317 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2318 list_add_tail(&error_buff
->list
, &adapter
->errors
);
2319 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2321 memset(&new_crq
, 0, sizeof(new_crq
));
2322 new_crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
2323 new_crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
2324 new_crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
2325 new_crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
2326 new_crq
.request_error_info
.error_id
= crq
->error_indication
.error_id
;
2328 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
2330 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2331 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2332 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2334 ibmvnic_send_crq(adapter
, &new_crq
);
2337 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
2338 struct ibmvnic_adapter
*adapter
)
2340 struct net_device
*netdev
= adapter
->netdev
;
2341 struct device
*dev
= &adapter
->vdev
->dev
;
2344 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
2346 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
2349 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
2353 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
2354 struct ibmvnic_adapter
*adapter
)
2356 struct device
*dev
= &adapter
->vdev
->dev
;
2360 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
2362 req_value
= &adapter
->req_tx_queues
;
2366 req_value
= &adapter
->req_rx_queues
;
2369 case REQ_RX_ADD_QUEUES
:
2370 req_value
= &adapter
->req_rx_add_queues
;
2373 case REQ_TX_ENTRIES_PER_SUBCRQ
:
2374 req_value
= &adapter
->req_tx_entries_per_subcrq
;
2375 name
= "tx_entries_per_subcrq";
2377 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
2378 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
2379 name
= "rx_add_entries_per_subcrq";
2382 req_value
= &adapter
->req_mtu
;
2385 case PROMISC_REQUESTED
:
2386 req_value
= &adapter
->promisc
;
2390 dev_err(dev
, "Got invalid cap request rsp %d\n",
2391 crq
->request_capability
.capability
);
2395 switch (crq
->request_capability_rsp
.rc
.code
) {
2398 case PARTIALSUCCESS
:
2399 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2401 (long int)be32_to_cpu(crq
->request_capability_rsp
.
2403 release_sub_crqs_no_irqs(adapter
);
2404 *req_value
= be32_to_cpu(crq
->request_capability_rsp
.number
);
2405 init_sub_crqs(adapter
, 1);
2408 dev_err(dev
, "Error %d in request cap rsp\n",
2409 crq
->request_capability_rsp
.rc
.code
);
2413 /* Done receiving requested capabilities, query IP offload support */
2414 if (++adapter
->requested_caps
== 7) {
2415 union ibmvnic_crq newcrq
;
2416 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
2417 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
2418 &adapter
->ip_offload_buf
;
2420 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
2424 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
2425 if (!firmware_has_feature(FW_FEATURE_CMO
))
2426 dev_err(dev
, "Couldn't map offload buffer\n");
2430 memset(&newcrq
, 0, sizeof(newcrq
));
2431 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2432 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
2433 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
2434 newcrq
.query_ip_offload
.ioba
=
2435 cpu_to_be32(adapter
->ip_offload_tok
);
2437 ibmvnic_send_crq(adapter
, &newcrq
);
2441 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
2442 struct ibmvnic_adapter
*adapter
)
2444 struct device
*dev
= &adapter
->vdev
->dev
;
2445 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
2446 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
2447 union ibmvnic_crq crq
;
2450 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
2452 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
2453 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
2455 /* If the number of queues requested can't be allocated by the
2456 * server, the login response will return with code 1. We will need
2457 * to resend the login buffer with fewer queues requested.
2459 if (login_rsp_crq
->generic
.rc
.code
) {
2460 adapter
->renegotiate
= true;
2461 complete(&adapter
->init_done
);
2465 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
2466 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
2467 netdev_dbg(adapter
->netdev
, "%016lx\n",
2468 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
2472 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
2473 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
2474 adapter
->req_rx_add_queues
!=
2475 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
2476 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
2477 ibmvnic_remove(adapter
->vdev
);
2480 complete(&adapter
->init_done
);
2482 memset(&crq
, 0, sizeof(crq
));
2483 crq
.request_ras_comp_num
.first
= IBMVNIC_CRQ_CMD
;
2484 crq
.request_ras_comp_num
.cmd
= REQUEST_RAS_COMP_NUM
;
2485 ibmvnic_send_crq(adapter
, &crq
);
2490 static void handle_request_map_rsp(union ibmvnic_crq
*crq
,
2491 struct ibmvnic_adapter
*adapter
)
2493 struct device
*dev
= &adapter
->vdev
->dev
;
2494 u8 map_id
= crq
->request_map_rsp
.map_id
;
2500 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
2501 rx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
2503 rc
= crq
->request_map_rsp
.rc
.code
;
2505 dev_err(dev
, "Error %ld in REQUEST_MAP_RSP\n", rc
);
2507 /* need to find and zero tx/rx_pool map_id */
2508 for (i
= 0; i
< tx_subcrqs
; i
++) {
2509 if (adapter
->tx_pool
[i
].long_term_buff
.map_id
== map_id
)
2510 adapter
->tx_pool
[i
].long_term_buff
.map_id
= 0;
2512 for (i
= 0; i
< rx_subcrqs
; i
++) {
2513 if (adapter
->rx_pool
[i
].long_term_buff
.map_id
== map_id
)
2514 adapter
->rx_pool
[i
].long_term_buff
.map_id
= 0;
2517 complete(&adapter
->fw_done
);
2520 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
2521 struct ibmvnic_adapter
*adapter
)
2523 struct device
*dev
= &adapter
->vdev
->dev
;
2526 rc
= crq
->request_unmap_rsp
.rc
.code
;
2528 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
2531 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
2532 struct ibmvnic_adapter
*adapter
)
2534 struct net_device
*netdev
= adapter
->netdev
;
2535 struct device
*dev
= &adapter
->vdev
->dev
;
2538 rc
= crq
->query_map_rsp
.rc
.code
;
2540 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
2543 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2544 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
2545 crq
->query_map_rsp
.free_pages
);
2548 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
2549 struct ibmvnic_adapter
*adapter
)
2551 struct net_device
*netdev
= adapter
->netdev
;
2552 struct device
*dev
= &adapter
->vdev
->dev
;
2555 atomic_dec(&adapter
->running_cap_queries
);
2556 netdev_dbg(netdev
, "Outstanding queries: %d\n",
2557 atomic_read(&adapter
->running_cap_queries
));
2558 rc
= crq
->query_capability
.rc
.code
;
2560 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
2564 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
2566 adapter
->min_tx_queues
=
2567 be64_to_cpu(crq
->query_capability
.number
);
2568 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
2569 adapter
->min_tx_queues
);
2572 adapter
->min_rx_queues
=
2573 be64_to_cpu(crq
->query_capability
.number
);
2574 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
2575 adapter
->min_rx_queues
);
2577 case MIN_RX_ADD_QUEUES
:
2578 adapter
->min_rx_add_queues
=
2579 be64_to_cpu(crq
->query_capability
.number
);
2580 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
2581 adapter
->min_rx_add_queues
);
2584 adapter
->max_tx_queues
=
2585 be64_to_cpu(crq
->query_capability
.number
);
2586 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
2587 adapter
->max_tx_queues
);
2590 adapter
->max_rx_queues
=
2591 be64_to_cpu(crq
->query_capability
.number
);
2592 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
2593 adapter
->max_rx_queues
);
2595 case MAX_RX_ADD_QUEUES
:
2596 adapter
->max_rx_add_queues
=
2597 be64_to_cpu(crq
->query_capability
.number
);
2598 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
2599 adapter
->max_rx_add_queues
);
2601 case MIN_TX_ENTRIES_PER_SUBCRQ
:
2602 adapter
->min_tx_entries_per_subcrq
=
2603 be64_to_cpu(crq
->query_capability
.number
);
2604 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
2605 adapter
->min_tx_entries_per_subcrq
);
2607 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
2608 adapter
->min_rx_add_entries_per_subcrq
=
2609 be64_to_cpu(crq
->query_capability
.number
);
2610 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
2611 adapter
->min_rx_add_entries_per_subcrq
);
2613 case MAX_TX_ENTRIES_PER_SUBCRQ
:
2614 adapter
->max_tx_entries_per_subcrq
=
2615 be64_to_cpu(crq
->query_capability
.number
);
2616 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
2617 adapter
->max_tx_entries_per_subcrq
);
2619 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
2620 adapter
->max_rx_add_entries_per_subcrq
=
2621 be64_to_cpu(crq
->query_capability
.number
);
2622 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
2623 adapter
->max_rx_add_entries_per_subcrq
);
2625 case TCP_IP_OFFLOAD
:
2626 adapter
->tcp_ip_offload
=
2627 be64_to_cpu(crq
->query_capability
.number
);
2628 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
2629 adapter
->tcp_ip_offload
);
2631 case PROMISC_SUPPORTED
:
2632 adapter
->promisc_supported
=
2633 be64_to_cpu(crq
->query_capability
.number
);
2634 netdev_dbg(netdev
, "promisc_supported = %lld\n",
2635 adapter
->promisc_supported
);
2638 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2639 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
2642 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2643 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
2645 case MAX_MULTICAST_FILTERS
:
2646 adapter
->max_multicast_filters
=
2647 be64_to_cpu(crq
->query_capability
.number
);
2648 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
2649 adapter
->max_multicast_filters
);
2651 case VLAN_HEADER_INSERTION
:
2652 adapter
->vlan_header_insertion
=
2653 be64_to_cpu(crq
->query_capability
.number
);
2654 if (adapter
->vlan_header_insertion
)
2655 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
2656 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
2657 adapter
->vlan_header_insertion
);
2659 case MAX_TX_SG_ENTRIES
:
2660 adapter
->max_tx_sg_entries
=
2661 be64_to_cpu(crq
->query_capability
.number
);
2662 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
2663 adapter
->max_tx_sg_entries
);
2665 case RX_SG_SUPPORTED
:
2666 adapter
->rx_sg_supported
=
2667 be64_to_cpu(crq
->query_capability
.number
);
2668 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
2669 adapter
->rx_sg_supported
);
2671 case OPT_TX_COMP_SUB_QUEUES
:
2672 adapter
->opt_tx_comp_sub_queues
=
2673 be64_to_cpu(crq
->query_capability
.number
);
2674 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
2675 adapter
->opt_tx_comp_sub_queues
);
2677 case OPT_RX_COMP_QUEUES
:
2678 adapter
->opt_rx_comp_queues
=
2679 be64_to_cpu(crq
->query_capability
.number
);
2680 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
2681 adapter
->opt_rx_comp_queues
);
2683 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
2684 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
2685 be64_to_cpu(crq
->query_capability
.number
);
2686 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2687 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
2689 case OPT_TX_ENTRIES_PER_SUBCRQ
:
2690 adapter
->opt_tx_entries_per_subcrq
=
2691 be64_to_cpu(crq
->query_capability
.number
);
2692 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
2693 adapter
->opt_tx_entries_per_subcrq
);
2695 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
2696 adapter
->opt_rxba_entries_per_subcrq
=
2697 be64_to_cpu(crq
->query_capability
.number
);
2698 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
2699 adapter
->opt_rxba_entries_per_subcrq
);
2701 case TX_RX_DESC_REQ
:
2702 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
2703 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
2704 adapter
->tx_rx_desc_req
);
2708 netdev_err(netdev
, "Got invalid cap rsp %d\n",
2709 crq
->query_capability
.capability
);
2713 if (atomic_read(&adapter
->running_cap_queries
) == 0)
2714 init_sub_crqs(adapter
, 0);
2715 /* We're done querying the capabilities, initialize sub-crqs */
2718 static void handle_control_ras_rsp(union ibmvnic_crq
*crq
,
2719 struct ibmvnic_adapter
*adapter
)
2721 u8 correlator
= crq
->control_ras_rsp
.correlator
;
2722 struct device
*dev
= &adapter
->vdev
->dev
;
2726 if (crq
->control_ras_rsp
.rc
.code
) {
2727 dev_warn(dev
, "Control ras failed rc=%d\n",
2728 crq
->control_ras_rsp
.rc
.code
);
2732 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
2733 if (adapter
->ras_comps
[i
].correlator
== correlator
) {
2740 dev_warn(dev
, "Correlator not found on control_ras_rsp\n");
2744 switch (crq
->control_ras_rsp
.op
) {
2745 case IBMVNIC_TRACE_LEVEL
:
2746 adapter
->ras_comps
[i
].trace_level
= crq
->control_ras
.level
;
2748 case IBMVNIC_ERROR_LEVEL
:
2749 adapter
->ras_comps
[i
].error_check_level
=
2750 crq
->control_ras
.level
;
2752 case IBMVNIC_TRACE_PAUSE
:
2753 adapter
->ras_comp_int
[i
].paused
= 1;
2755 case IBMVNIC_TRACE_RESUME
:
2756 adapter
->ras_comp_int
[i
].paused
= 0;
2758 case IBMVNIC_TRACE_ON
:
2759 adapter
->ras_comps
[i
].trace_on
= 1;
2761 case IBMVNIC_TRACE_OFF
:
2762 adapter
->ras_comps
[i
].trace_on
= 0;
2764 case IBMVNIC_CHG_TRACE_BUFF_SZ
:
2765 /* trace_buff_sz is 3 bytes, stuff it into an int */
2766 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[0] = 0;
2767 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[1] =
2768 crq
->control_ras_rsp
.trace_buff_sz
[0];
2769 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[2] =
2770 crq
->control_ras_rsp
.trace_buff_sz
[1];
2771 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[3] =
2772 crq
->control_ras_rsp
.trace_buff_sz
[2];
2775 dev_err(dev
, "invalid op %d on control_ras_rsp",
2776 crq
->control_ras_rsp
.op
);
2780 static int ibmvnic_fw_comp_open(struct inode
*inode
, struct file
*file
)
2782 file
->private_data
= inode
->i_private
;
2786 static ssize_t
trace_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2789 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2790 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2791 struct device
*dev
= &adapter
->vdev
->dev
;
2792 struct ibmvnic_fw_trace_entry
*trace
;
2793 int num
= ras_comp_int
->num
;
2794 union ibmvnic_crq crq
;
2795 dma_addr_t trace_tok
;
2797 if (*ppos
>= be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2801 dma_alloc_coherent(dev
,
2802 be32_to_cpu(adapter
->ras_comps
[num
].
2803 trace_buff_size
), &trace_tok
,
2806 dev_err(dev
, "Couldn't alloc trace buffer\n");
2810 memset(&crq
, 0, sizeof(crq
));
2811 crq
.collect_fw_trace
.first
= IBMVNIC_CRQ_CMD
;
2812 crq
.collect_fw_trace
.cmd
= COLLECT_FW_TRACE
;
2813 crq
.collect_fw_trace
.correlator
= adapter
->ras_comps
[num
].correlator
;
2814 crq
.collect_fw_trace
.ioba
= cpu_to_be32(trace_tok
);
2815 crq
.collect_fw_trace
.len
= adapter
->ras_comps
[num
].trace_buff_size
;
2816 ibmvnic_send_crq(adapter
, &crq
);
2818 init_completion(&adapter
->fw_done
);
2819 wait_for_completion(&adapter
->fw_done
);
2821 if (*ppos
+ len
> be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2823 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
) -
2826 copy_to_user(user_buf
, &((u8
*)trace
)[*ppos
], len
);
2828 dma_free_coherent(dev
,
2829 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
),
2835 static const struct file_operations trace_ops
= {
2836 .owner
= THIS_MODULE
,
2837 .open
= ibmvnic_fw_comp_open
,
2841 static ssize_t
paused_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2844 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2845 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2846 int num
= ras_comp_int
->num
;
2847 char buff
[5]; /* 1 or 0 plus \n and \0 */
2850 size
= sprintf(buff
, "%d\n", adapter
->ras_comp_int
[num
].paused
);
2855 copy_to_user(user_buf
, buff
, size
);
2860 static ssize_t
paused_write(struct file
*file
, const char __user
*user_buf
,
2861 size_t len
, loff_t
*ppos
)
2863 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2864 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2865 int num
= ras_comp_int
->num
;
2866 union ibmvnic_crq crq
;
2868 char buff
[9]; /* decimal max int plus \n and \0 */
2870 copy_from_user(buff
, user_buf
, sizeof(buff
));
2871 val
= kstrtoul(buff
, 10, NULL
);
2873 adapter
->ras_comp_int
[num
].paused
= val
? 1 : 0;
2875 memset(&crq
, 0, sizeof(crq
));
2876 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2877 crq
.control_ras
.cmd
= CONTROL_RAS
;
2878 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2879 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_PAUSE
: IBMVNIC_TRACE_RESUME
;
2880 ibmvnic_send_crq(adapter
, &crq
);
2885 static const struct file_operations paused_ops
= {
2886 .owner
= THIS_MODULE
,
2887 .open
= ibmvnic_fw_comp_open
,
2888 .read
= paused_read
,
2889 .write
= paused_write
,
2892 static ssize_t
tracing_read(struct file
*file
, char __user
*user_buf
,
2893 size_t len
, loff_t
*ppos
)
2895 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2896 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2897 int num
= ras_comp_int
->num
;
2898 char buff
[5]; /* 1 or 0 plus \n and \0 */
2901 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_on
);
2906 copy_to_user(user_buf
, buff
, size
);
2911 static ssize_t
tracing_write(struct file
*file
, const char __user
*user_buf
,
2912 size_t len
, loff_t
*ppos
)
2914 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2915 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2916 int num
= ras_comp_int
->num
;
2917 union ibmvnic_crq crq
;
2919 char buff
[9]; /* decimal max int plus \n and \0 */
2921 copy_from_user(buff
, user_buf
, sizeof(buff
));
2922 val
= kstrtoul(buff
, 10, NULL
);
2924 memset(&crq
, 0, sizeof(crq
));
2925 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2926 crq
.control_ras
.cmd
= CONTROL_RAS
;
2927 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2928 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_ON
: IBMVNIC_TRACE_OFF
;
2933 static const struct file_operations tracing_ops
= {
2934 .owner
= THIS_MODULE
,
2935 .open
= ibmvnic_fw_comp_open
,
2936 .read
= tracing_read
,
2937 .write
= tracing_write
,
2940 static ssize_t
error_level_read(struct file
*file
, char __user
*user_buf
,
2941 size_t len
, loff_t
*ppos
)
2943 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2944 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2945 int num
= ras_comp_int
->num
;
2946 char buff
[5]; /* decimal max char plus \n and \0 */
2949 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].error_check_level
);
2954 copy_to_user(user_buf
, buff
, size
);
2959 static ssize_t
error_level_write(struct file
*file
, const char __user
*user_buf
,
2960 size_t len
, loff_t
*ppos
)
2962 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2963 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2964 int num
= ras_comp_int
->num
;
2965 union ibmvnic_crq crq
;
2967 char buff
[9]; /* decimal max int plus \n and \0 */
2969 copy_from_user(buff
, user_buf
, sizeof(buff
));
2970 val
= kstrtoul(buff
, 10, NULL
);
2975 memset(&crq
, 0, sizeof(crq
));
2976 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2977 crq
.control_ras
.cmd
= CONTROL_RAS
;
2978 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2979 crq
.control_ras
.op
= IBMVNIC_ERROR_LEVEL
;
2980 crq
.control_ras
.level
= val
;
2981 ibmvnic_send_crq(adapter
, &crq
);
2986 static const struct file_operations error_level_ops
= {
2987 .owner
= THIS_MODULE
,
2988 .open
= ibmvnic_fw_comp_open
,
2989 .read
= error_level_read
,
2990 .write
= error_level_write
,
2993 static ssize_t
trace_level_read(struct file
*file
, char __user
*user_buf
,
2994 size_t len
, loff_t
*ppos
)
2996 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2997 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2998 int num
= ras_comp_int
->num
;
2999 char buff
[5]; /* decimal max char plus \n and \0 */
3002 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_level
);
3006 copy_to_user(user_buf
, buff
, size
);
3011 static ssize_t
trace_level_write(struct file
*file
, const char __user
*user_buf
,
3012 size_t len
, loff_t
*ppos
)
3014 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3015 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3016 union ibmvnic_crq crq
;
3018 char buff
[9]; /* decimal max int plus \n and \0 */
3020 copy_from_user(buff
, user_buf
, sizeof(buff
));
3021 val
= kstrtoul(buff
, 10, NULL
);
3025 memset(&crq
, 0, sizeof(crq
));
3026 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
3027 crq
.control_ras
.cmd
= CONTROL_RAS
;
3028 crq
.control_ras
.correlator
=
3029 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
3030 crq
.control_ras
.op
= IBMVNIC_TRACE_LEVEL
;
3031 crq
.control_ras
.level
= val
;
3032 ibmvnic_send_crq(adapter
, &crq
);
3037 static const struct file_operations trace_level_ops
= {
3038 .owner
= THIS_MODULE
,
3039 .open
= ibmvnic_fw_comp_open
,
3040 .read
= trace_level_read
,
3041 .write
= trace_level_write
,
3044 static ssize_t
trace_buff_size_read(struct file
*file
, char __user
*user_buf
,
3045 size_t len
, loff_t
*ppos
)
3047 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3048 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3049 int num
= ras_comp_int
->num
;
3050 char buff
[9]; /* decimal max int plus \n and \0 */
3053 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_buff_size
);
3057 copy_to_user(user_buf
, buff
, size
);
3062 static ssize_t
trace_buff_size_write(struct file
*file
,
3063 const char __user
*user_buf
, size_t len
,
3066 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3067 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3068 union ibmvnic_crq crq
;
3070 char buff
[9]; /* decimal max int plus \n and \0 */
3072 copy_from_user(buff
, user_buf
, sizeof(buff
));
3073 val
= kstrtoul(buff
, 10, NULL
);
3075 memset(&crq
, 0, sizeof(crq
));
3076 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
3077 crq
.control_ras
.cmd
= CONTROL_RAS
;
3078 crq
.control_ras
.correlator
=
3079 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
3080 crq
.control_ras
.op
= IBMVNIC_CHG_TRACE_BUFF_SZ
;
3081 /* trace_buff_sz is 3 bytes, stuff an int into it */
3082 crq
.control_ras
.trace_buff_sz
[0] = ((u8
*)(&val
))[5];
3083 crq
.control_ras
.trace_buff_sz
[1] = ((u8
*)(&val
))[6];
3084 crq
.control_ras
.trace_buff_sz
[2] = ((u8
*)(&val
))[7];
3085 ibmvnic_send_crq(adapter
, &crq
);
3090 static const struct file_operations trace_size_ops
= {
3091 .owner
= THIS_MODULE
,
3092 .open
= ibmvnic_fw_comp_open
,
3093 .read
= trace_buff_size_read
,
3094 .write
= trace_buff_size_write
,
3097 static void handle_request_ras_comps_rsp(union ibmvnic_crq
*crq
,
3098 struct ibmvnic_adapter
*adapter
)
3100 struct device
*dev
= &adapter
->vdev
->dev
;
3101 struct dentry
*dir_ent
;
3105 debugfs_remove_recursive(adapter
->ras_comps_ent
);
3107 adapter
->ras_comps_ent
= debugfs_create_dir("ras_comps",
3108 adapter
->debugfs_dir
);
3109 if (!adapter
->ras_comps_ent
|| IS_ERR(adapter
->ras_comps_ent
)) {
3110 dev_info(dev
, "debugfs create ras_comps dir failed\n");
3114 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
3115 dir_ent
= debugfs_create_dir(adapter
->ras_comps
[i
].name
,
3116 adapter
->ras_comps_ent
);
3117 if (!dir_ent
|| IS_ERR(dir_ent
)) {
3118 dev_info(dev
, "debugfs create %s dir failed\n",
3119 adapter
->ras_comps
[i
].name
);
3123 adapter
->ras_comp_int
[i
].adapter
= adapter
;
3124 adapter
->ras_comp_int
[i
].num
= i
;
3125 adapter
->ras_comp_int
[i
].desc_blob
.data
=
3126 &adapter
->ras_comps
[i
].description
;
3127 adapter
->ras_comp_int
[i
].desc_blob
.size
=
3128 sizeof(adapter
->ras_comps
[i
].description
);
3130 /* Don't need to remember the dentry's because the debugfs dir
3131 * gets removed recursively
3133 ent
= debugfs_create_blob("description", S_IRUGO
, dir_ent
,
3134 &adapter
->ras_comp_int
[i
].desc_blob
);
3135 ent
= debugfs_create_file("trace_buf_size", S_IRUGO
| S_IWUSR
,
3136 dir_ent
, &adapter
->ras_comp_int
[i
],
3138 ent
= debugfs_create_file("trace_level",
3140 (adapter
->ras_comps
[i
].trace_level
!=
3141 0xFF ? S_IWUSR
: 0),
3142 dir_ent
, &adapter
->ras_comp_int
[i
],
3144 ent
= debugfs_create_file("error_level",
3147 ras_comps
[i
].error_check_level
!=
3148 0xFF ? S_IWUSR
: 0),
3149 dir_ent
, &adapter
->ras_comp_int
[i
],
3151 ent
= debugfs_create_file("tracing", S_IRUGO
| S_IWUSR
,
3152 dir_ent
, &adapter
->ras_comp_int
[i
],
3154 ent
= debugfs_create_file("paused", S_IRUGO
| S_IWUSR
,
3155 dir_ent
, &adapter
->ras_comp_int
[i
],
3157 ent
= debugfs_create_file("trace", S_IRUGO
, dir_ent
,
3158 &adapter
->ras_comp_int
[i
],
3163 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq
*crq
,
3164 struct ibmvnic_adapter
*adapter
)
3166 int len
= adapter
->ras_comp_num
* sizeof(struct ibmvnic_fw_component
);
3167 struct device
*dev
= &adapter
->vdev
->dev
;
3168 union ibmvnic_crq newcrq
;
3170 adapter
->ras_comps
= dma_alloc_coherent(dev
, len
,
3171 &adapter
->ras_comps_tok
,
3173 if (!adapter
->ras_comps
) {
3174 if (!firmware_has_feature(FW_FEATURE_CMO
))
3175 dev_err(dev
, "Couldn't alloc fw comps buffer\n");
3179 adapter
->ras_comp_int
= kmalloc(adapter
->ras_comp_num
*
3180 sizeof(struct ibmvnic_fw_comp_internal
),
3182 if (!adapter
->ras_comp_int
)
3183 dma_free_coherent(dev
, len
, adapter
->ras_comps
,
3184 adapter
->ras_comps_tok
);
3186 memset(&newcrq
, 0, sizeof(newcrq
));
3187 newcrq
.request_ras_comps
.first
= IBMVNIC_CRQ_CMD
;
3188 newcrq
.request_ras_comps
.cmd
= REQUEST_RAS_COMPS
;
3189 newcrq
.request_ras_comps
.ioba
= cpu_to_be32(adapter
->ras_comps_tok
);
3190 newcrq
.request_ras_comps
.len
= cpu_to_be32(len
);
3191 ibmvnic_send_crq(adapter
, &newcrq
);
3194 static void ibmvnic_free_inflight(struct ibmvnic_adapter
*adapter
)
3196 struct ibmvnic_inflight_cmd
*inflight_cmd
, *tmp1
;
3197 struct device
*dev
= &adapter
->vdev
->dev
;
3198 struct ibmvnic_error_buff
*error_buff
, *tmp2
;
3199 unsigned long flags
;
3200 unsigned long flags2
;
3202 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
3203 list_for_each_entry_safe(inflight_cmd
, tmp1
, &adapter
->inflight
, list
) {
3204 switch (inflight_cmd
->crq
.generic
.cmd
) {
3206 dma_unmap_single(dev
, adapter
->login_buf_token
,
3207 adapter
->login_buf_sz
,
3209 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
3210 adapter
->login_rsp_buf_sz
,
3212 kfree(adapter
->login_rsp_buf
);
3213 kfree(adapter
->login_buf
);
3216 complete(&adapter
->fw_done
);
3218 case REQUEST_ERROR_INFO
:
3219 spin_lock_irqsave(&adapter
->error_list_lock
, flags2
);
3220 list_for_each_entry_safe(error_buff
, tmp2
,
3221 &adapter
->errors
, list
) {
3222 dma_unmap_single(dev
, error_buff
->dma
,
3225 kfree(error_buff
->buff
);
3226 list_del(&error_buff
->list
);
3229 spin_unlock_irqrestore(&adapter
->error_list_lock
,
3233 list_del(&inflight_cmd
->list
);
3234 kfree(inflight_cmd
);
3236 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
3239 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
3240 struct ibmvnic_adapter
*adapter
)
3242 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
3243 struct net_device
*netdev
= adapter
->netdev
;
3244 struct device
*dev
= &adapter
->vdev
->dev
;
3247 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
3248 ((unsigned long int *)crq
)[0],
3249 ((unsigned long int *)crq
)[1]);
3250 switch (gen_crq
->first
) {
3251 case IBMVNIC_CRQ_INIT_RSP
:
3252 switch (gen_crq
->cmd
) {
3253 case IBMVNIC_CRQ_INIT
:
3254 dev_info(dev
, "Partner initialized\n");
3255 /* Send back a response */
3256 rc
= ibmvnic_send_crq_init_complete(adapter
);
3258 schedule_work(&adapter
->vnic_crq_init
);
3260 dev_err(dev
, "Can't send initrsp rc=%ld\n", rc
);
3262 case IBMVNIC_CRQ_INIT_COMPLETE
:
3263 dev_info(dev
, "Partner initialization complete\n");
3264 send_version_xchg(adapter
);
3267 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
3270 case IBMVNIC_CRQ_XPORT_EVENT
:
3271 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
3272 dev_info(dev
, "Re-enabling adapter\n");
3273 adapter
->migrated
= true;
3274 ibmvnic_free_inflight(adapter
);
3275 release_sub_crqs(adapter
);
3276 rc
= ibmvnic_reenable_crq_queue(adapter
);
3278 dev_err(dev
, "Error after enable rc=%ld\n", rc
);
3279 adapter
->migrated
= false;
3280 rc
= ibmvnic_send_crq_init(adapter
);
3282 dev_err(dev
, "Error sending init rc=%ld\n", rc
);
3284 /* The adapter lost the connection */
3285 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
3287 ibmvnic_free_inflight(adapter
);
3288 release_sub_crqs(adapter
);
3291 case IBMVNIC_CRQ_CMD_RSP
:
3294 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
3299 switch (gen_crq
->cmd
) {
3300 case VERSION_EXCHANGE_RSP
:
3301 rc
= crq
->version_exchange_rsp
.rc
.code
;
3303 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
3306 dev_info(dev
, "Partner protocol version is %d\n",
3307 crq
->version_exchange_rsp
.version
);
3308 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
3311 be16_to_cpu(crq
->version_exchange_rsp
.version
);
3312 send_cap_queries(adapter
);
3314 case QUERY_CAPABILITY_RSP
:
3315 handle_query_cap_rsp(crq
, adapter
);
3318 handle_query_map_rsp(crq
, adapter
);
3320 case REQUEST_MAP_RSP
:
3321 handle_request_map_rsp(crq
, adapter
);
3323 case REQUEST_UNMAP_RSP
:
3324 handle_request_unmap_rsp(crq
, adapter
);
3326 case REQUEST_CAPABILITY_RSP
:
3327 handle_request_cap_rsp(crq
, adapter
);
3330 netdev_dbg(netdev
, "Got Login Response\n");
3331 handle_login_rsp(crq
, adapter
);
3333 case LOGICAL_LINK_STATE_RSP
:
3334 netdev_dbg(netdev
, "Got Logical Link State Response\n");
3335 adapter
->logical_link_state
=
3336 crq
->logical_link_state_rsp
.link_state
;
3338 case LINK_STATE_INDICATION
:
3339 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
3340 adapter
->phys_link_state
=
3341 crq
->link_state_indication
.phys_link_state
;
3342 adapter
->logical_link_state
=
3343 crq
->link_state_indication
.logical_link_state
;
3345 case CHANGE_MAC_ADDR_RSP
:
3346 netdev_dbg(netdev
, "Got MAC address change Response\n");
3347 handle_change_mac_rsp(crq
, adapter
);
3349 case ERROR_INDICATION
:
3350 netdev_dbg(netdev
, "Got Error Indication\n");
3351 handle_error_indication(crq
, adapter
);
3353 case REQUEST_ERROR_RSP
:
3354 netdev_dbg(netdev
, "Got Error Detail Response\n");
3355 handle_error_info_rsp(crq
, adapter
);
3357 case REQUEST_STATISTICS_RSP
:
3358 netdev_dbg(netdev
, "Got Statistics Response\n");
3359 complete(&adapter
->stats_done
);
3361 case REQUEST_DUMP_SIZE_RSP
:
3362 netdev_dbg(netdev
, "Got Request Dump Size Response\n");
3363 handle_dump_size_rsp(crq
, adapter
);
3365 case REQUEST_DUMP_RSP
:
3366 netdev_dbg(netdev
, "Got Request Dump Response\n");
3367 complete(&adapter
->fw_done
);
3369 case QUERY_IP_OFFLOAD_RSP
:
3370 netdev_dbg(netdev
, "Got Query IP offload Response\n");
3371 handle_query_ip_offload_rsp(adapter
);
3373 case MULTICAST_CTRL_RSP
:
3374 netdev_dbg(netdev
, "Got multicast control Response\n");
3376 case CONTROL_IP_OFFLOAD_RSP
:
3377 netdev_dbg(netdev
, "Got Control IP offload Response\n");
3378 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
3379 sizeof(adapter
->ip_offload_ctrl
),
3381 /* We're done with the queries, perform the login */
3382 send_login(adapter
);
3384 case REQUEST_RAS_COMP_NUM_RSP
:
3385 netdev_dbg(netdev
, "Got Request RAS Comp Num Response\n");
3386 if (crq
->request_ras_comp_num_rsp
.rc
.code
== 10) {
3387 netdev_dbg(netdev
, "Request RAS Comp Num not supported\n");
3390 adapter
->ras_comp_num
=
3391 be32_to_cpu(crq
->request_ras_comp_num_rsp
.num_components
);
3392 handle_request_ras_comp_num_rsp(crq
, adapter
);
3394 case REQUEST_RAS_COMPS_RSP
:
3395 netdev_dbg(netdev
, "Got Request RAS Comps Response\n");
3396 handle_request_ras_comps_rsp(crq
, adapter
);
3398 case CONTROL_RAS_RSP
:
3399 netdev_dbg(netdev
, "Got Control RAS Response\n");
3400 handle_control_ras_rsp(crq
, adapter
);
3402 case COLLECT_FW_TRACE_RSP
:
3403 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
3404 complete(&adapter
->fw_done
);
3407 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
3412 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
3414 struct ibmvnic_adapter
*adapter
= instance
;
3415 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3416 struct vio_dev
*vdev
= adapter
->vdev
;
3417 union ibmvnic_crq
*crq
;
3418 unsigned long flags
;
3421 spin_lock_irqsave(&queue
->lock
, flags
);
3422 vio_disable_interrupts(vdev
);
3424 /* Pull all the valid messages off the CRQ */
3425 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
3426 ibmvnic_handle_crq(crq
, adapter
);
3427 crq
->generic
.first
= 0;
3429 vio_enable_interrupts(vdev
);
3430 crq
= ibmvnic_next_crq(adapter
);
3432 vio_disable_interrupts(vdev
);
3433 ibmvnic_handle_crq(crq
, adapter
);
3434 crq
->generic
.first
= 0;
3439 spin_unlock_irqrestore(&queue
->lock
, flags
);
3443 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
3445 struct vio_dev
*vdev
= adapter
->vdev
;
3449 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
3450 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3453 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
3458 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
3460 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3461 struct device
*dev
= &adapter
->vdev
->dev
;
3462 struct vio_dev
*vdev
= adapter
->vdev
;
3467 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3468 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3470 /* Clean out the queue */
3471 memset(crq
->msgs
, 0, PAGE_SIZE
);
3474 /* And re-open it again */
3475 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3476 crq
->msg_token
, PAGE_SIZE
);
3479 /* Adapter is good, but other end is not ready */
3480 dev_warn(dev
, "Partner adapter not ready\n");
3482 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
3487 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter
*adapter
)
3489 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3490 struct vio_dev
*vdev
= adapter
->vdev
;
3493 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
3494 free_irq(vdev
->irq
, adapter
);
3496 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3497 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3499 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
3501 free_page((unsigned long)crq
->msgs
);
3504 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter
*adapter
)
3506 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3507 struct device
*dev
= &adapter
->vdev
->dev
;
3508 struct vio_dev
*vdev
= adapter
->vdev
;
3509 int rc
, retrc
= -ENOMEM
;
3511 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
3512 /* Should we allocate more than one page? */
3517 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
3518 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
3520 if (dma_mapping_error(dev
, crq
->msg_token
))
3523 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3524 crq
->msg_token
, PAGE_SIZE
);
3526 if (rc
== H_RESOURCE
)
3527 /* maybe kexecing and resource is busy. try a reset */
3528 rc
= ibmvnic_reset_crq(adapter
);
3531 if (rc
== H_CLOSED
) {
3532 dev_warn(dev
, "Partner adapter not ready\n");
3534 dev_warn(dev
, "Error %d opening adapter\n", rc
);
3535 goto reg_crq_failed
;
3540 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
3541 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
3544 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
3546 goto req_irq_failed
;
3549 rc
= vio_enable_interrupts(vdev
);
3551 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
3552 goto req_irq_failed
;
3556 spin_lock_init(&crq
->lock
);
3562 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3563 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3565 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3567 free_page((unsigned long)crq
->msgs
);
3571 /* debugfs for dump */
3572 static int ibmvnic_dump_show(struct seq_file
*seq
, void *v
)
3574 struct net_device
*netdev
= seq
->private;
3575 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3576 struct device
*dev
= &adapter
->vdev
->dev
;
3577 union ibmvnic_crq crq
;
3579 memset(&crq
, 0, sizeof(crq
));
3580 crq
.request_dump_size
.first
= IBMVNIC_CRQ_CMD
;
3581 crq
.request_dump_size
.cmd
= REQUEST_DUMP_SIZE
;
3582 ibmvnic_send_crq(adapter
, &crq
);
3584 init_completion(&adapter
->fw_done
);
3585 wait_for_completion(&adapter
->fw_done
);
3587 seq_write(seq
, adapter
->dump_data
, adapter
->dump_data_size
);
3589 dma_unmap_single(dev
, adapter
->dump_data_token
, adapter
->dump_data_size
,
3592 kfree(adapter
->dump_data
);
3597 static int ibmvnic_dump_open(struct inode
*inode
, struct file
*file
)
3599 return single_open(file
, ibmvnic_dump_show
, inode
->i_private
);
3602 static const struct file_operations ibmvnic_dump_ops
= {
3603 .owner
= THIS_MODULE
,
3604 .open
= ibmvnic_dump_open
,
3606 .llseek
= seq_lseek
,
3607 .release
= single_release
,
3610 static void handle_crq_init_rsp(struct work_struct
*work
)
3612 struct ibmvnic_adapter
*adapter
= container_of(work
,
3613 struct ibmvnic_adapter
,
3615 struct device
*dev
= &adapter
->vdev
->dev
;
3616 struct net_device
*netdev
= adapter
->netdev
;
3617 unsigned long timeout
= msecs_to_jiffies(30000);
3620 send_version_xchg(adapter
);
3621 reinit_completion(&adapter
->init_done
);
3622 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
3623 dev_err(dev
, "Passive init timeout\n");
3628 if (adapter
->renegotiate
) {
3629 adapter
->renegotiate
= false;
3630 release_sub_crqs_no_irqs(adapter
);
3631 send_cap_queries(adapter
);
3633 reinit_completion(&adapter
->init_done
);
3634 if (!wait_for_completion_timeout(&adapter
->init_done
,
3636 dev_err(dev
, "Passive init timeout\n");
3640 } while (adapter
->renegotiate
);
3641 rc
= init_sub_crq_irqs(adapter
);
3646 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3648 rc
= register_netdev(netdev
);
3651 "failed to register netdev rc=%d\n", rc
);
3652 goto register_failed
;
3654 dev_info(dev
, "ibmvnic registered\n");
3659 release_sub_crqs(adapter
);
3661 dev_err(dev
, "Passive initialization was not successful\n");
3664 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
3666 unsigned long timeout
= msecs_to_jiffies(30000);
3667 struct ibmvnic_adapter
*adapter
;
3668 struct net_device
*netdev
;
3669 unsigned char *mac_addr_p
;
3671 char buf
[16]; /* debugfs name buf */
3674 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
3677 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
3678 VETH_MAC_ADDR
, NULL
);
3681 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3682 __FILE__
, __LINE__
);
3686 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
3687 IBMVNIC_MAX_TX_QUEUES
);
3691 adapter
= netdev_priv(netdev
);
3692 dev_set_drvdata(&dev
->dev
, netdev
);
3693 adapter
->vdev
= dev
;
3694 adapter
->netdev
= netdev
;
3696 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
3697 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3698 netdev
->irq
= dev
->irq
;
3699 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
3700 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
3701 SET_NETDEV_DEV(netdev
, &dev
->dev
);
3703 INIT_WORK(&adapter
->vnic_crq_init
, handle_crq_init_rsp
);
3705 spin_lock_init(&adapter
->stats_lock
);
3707 rc
= ibmvnic_init_crq_queue(adapter
);
3709 dev_err(&dev
->dev
, "Couldn't initialize crq. rc=%d\n", rc
);
3713 INIT_LIST_HEAD(&adapter
->errors
);
3714 INIT_LIST_HEAD(&adapter
->inflight
);
3715 spin_lock_init(&adapter
->error_list_lock
);
3716 spin_lock_init(&adapter
->inflight_lock
);
3718 adapter
->stats_token
= dma_map_single(&dev
->dev
, &adapter
->stats
,
3719 sizeof(struct ibmvnic_statistics
),
3721 if (dma_mapping_error(&dev
->dev
, adapter
->stats_token
)) {
3722 if (!firmware_has_feature(FW_FEATURE_CMO
))
3723 dev_err(&dev
->dev
, "Couldn't map stats buffer\n");
3727 snprintf(buf
, sizeof(buf
), "ibmvnic_%x", dev
->unit_address
);
3728 ent
= debugfs_create_dir(buf
, NULL
);
3729 if (!ent
|| IS_ERR(ent
)) {
3730 dev_info(&dev
->dev
, "debugfs create directory failed\n");
3731 adapter
->debugfs_dir
= NULL
;
3733 adapter
->debugfs_dir
= ent
;
3734 ent
= debugfs_create_file("dump", S_IRUGO
, adapter
->debugfs_dir
,
3735 netdev
, &ibmvnic_dump_ops
);
3736 if (!ent
|| IS_ERR(ent
)) {
3738 "debugfs create dump file failed\n");
3739 adapter
->debugfs_dump
= NULL
;
3741 adapter
->debugfs_dump
= ent
;
3744 ibmvnic_send_crq_init(adapter
);
3746 init_completion(&adapter
->init_done
);
3747 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
))
3751 if (adapter
->renegotiate
) {
3752 adapter
->renegotiate
= false;
3753 release_sub_crqs_no_irqs(adapter
);
3754 send_cap_queries(adapter
);
3756 reinit_completion(&adapter
->init_done
);
3757 if (!wait_for_completion_timeout(&adapter
->init_done
,
3761 } while (adapter
->renegotiate
);
3763 rc
= init_sub_crq_irqs(adapter
);
3765 dev_err(&dev
->dev
, "failed to initialize sub crq irqs\n");
3769 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3771 rc
= register_netdev(netdev
);
3773 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
3776 dev_info(&dev
->dev
, "ibmvnic registered\n");
3781 release_sub_crqs(adapter
);
3783 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3784 debugfs_remove_recursive(adapter
->debugfs_dir
);
3786 ibmvnic_release_crq_queue(adapter
);
3788 free_netdev(netdev
);
3792 static int ibmvnic_remove(struct vio_dev
*dev
)
3794 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
3795 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3797 unregister_netdev(netdev
);
3799 release_sub_crqs(adapter
);
3801 ibmvnic_release_crq_queue(adapter
);
3803 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3804 debugfs_remove_recursive(adapter
->debugfs_dir
);
3806 if (adapter
->ras_comps
)
3807 dma_free_coherent(&dev
->dev
,
3808 adapter
->ras_comp_num
*
3809 sizeof(struct ibmvnic_fw_component
),
3810 adapter
->ras_comps
, adapter
->ras_comps_tok
);
3812 kfree(adapter
->ras_comp_int
);
3814 free_netdev(netdev
);
3815 dev_set_drvdata(&dev
->dev
, NULL
);
3820 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
3822 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
3823 struct ibmvnic_adapter
*adapter
;
3824 struct iommu_table
*tbl
;
3825 unsigned long ret
= 0;
3828 tbl
= get_iommu_table_base(&vdev
->dev
);
3830 /* netdev inits at probe time along with the structures we need below*/
3832 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
3834 adapter
= netdev_priv(netdev
);
3836 ret
+= PAGE_SIZE
; /* the crq message queue */
3837 ret
+= adapter
->bounce_buffer_size
;
3838 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
3840 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
3841 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
3843 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
3845 ret
+= adapter
->rx_pool
[i
].size
*
3846 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
3851 static int ibmvnic_resume(struct device
*dev
)
3853 struct net_device
*netdev
= dev_get_drvdata(dev
);
3854 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3857 /* kick the interrupt handlers just in case we lost an interrupt */
3858 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
3859 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
3860 adapter
->rx_scrq
[i
]);
3865 static struct vio_device_id ibmvnic_device_table
[] = {
3866 {"network", "IBM,vnic"},
3869 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
3871 static const struct dev_pm_ops ibmvnic_pm_ops
= {
3872 .resume
= ibmvnic_resume
3875 static struct vio_driver ibmvnic_driver
= {
3876 .id_table
= ibmvnic_device_table
,
3877 .probe
= ibmvnic_probe
,
3878 .remove
= ibmvnic_remove
,
3879 .get_desired_dma
= ibmvnic_get_desired_dma
,
3880 .name
= ibmvnic_driver_name
,
3881 .pm
= &ibmvnic_pm_ops
,
3884 /* module functions */
3885 static int __init
ibmvnic_module_init(void)
3887 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
3888 IBMVNIC_DRIVER_VERSION
);
3890 return vio_register_driver(&ibmvnic_driver
);
3893 static void __exit
ibmvnic_module_exit(void)
3895 vio_unregister_driver(&ibmvnic_driver
);
3898 module_init(ibmvnic_module_init
);
3899 module_exit(ibmvnic_module_exit
);