1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/irq.h>
65 #include <linux/kthread.h>
66 #include <linux/seq_file.h>
67 #include <linux/debugfs.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/seq_file.h>
80 static const char ibmvnic_driver_name
[] = "ibmvnic";
81 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
83 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
84 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
88 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
89 static int ibmvnic_remove(struct vio_dev
*);
90 static void release_sub_crqs(struct ibmvnic_adapter
*);
91 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
92 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
93 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
94 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
95 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
96 union sub_crq
*sub_crq
);
97 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
98 static int enable_scrq_irq(struct ibmvnic_adapter
*,
99 struct ibmvnic_sub_crq_queue
*);
100 static int disable_scrq_irq(struct ibmvnic_adapter
*,
101 struct ibmvnic_sub_crq_queue
*);
102 static int pending_scrq(struct ibmvnic_adapter
*,
103 struct ibmvnic_sub_crq_queue
*);
104 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
105 struct ibmvnic_sub_crq_queue
*);
106 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
107 static void send_map_query(struct ibmvnic_adapter
*adapter
);
108 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
109 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
111 struct ibmvnic_stat
{
112 char name
[ETH_GSTRING_LEN
];
116 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
117 offsetof(struct ibmvnic_statistics, stat))
118 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
120 static const struct ibmvnic_stat ibmvnic_stats
[] = {
121 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
122 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
123 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
124 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
125 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
126 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
127 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
128 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
129 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
130 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
131 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
132 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
133 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
134 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
135 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
136 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
137 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
138 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
139 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
140 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
141 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
142 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
145 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
146 unsigned long length
, unsigned long *number
,
149 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
152 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
159 /* net_device_ops functions */
161 static void init_rx_pool(struct ibmvnic_adapter
*adapter
,
162 struct ibmvnic_rx_pool
*rx_pool
, int num
, int index
,
163 int buff_size
, int active
)
165 netdev_dbg(adapter
->netdev
,
166 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
167 index
, num
, buff_size
);
169 rx_pool
->index
= index
;
170 rx_pool
->buff_size
= buff_size
;
171 rx_pool
->active
= active
;
174 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
175 struct ibmvnic_long_term_buff
*ltb
, int size
)
177 struct device
*dev
= &adapter
->vdev
->dev
;
180 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
184 dev_err(dev
, "Couldn't alloc long term buffer\n");
187 ltb
->map_id
= adapter
->map_id
;
189 send_request_map(adapter
, ltb
->addr
,
190 ltb
->size
, ltb
->map_id
);
191 init_completion(&adapter
->fw_done
);
192 wait_for_completion(&adapter
->fw_done
);
196 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
197 struct ibmvnic_long_term_buff
*ltb
)
199 struct device
*dev
= &adapter
->vdev
->dev
;
201 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
202 send_request_unmap(adapter
, ltb
->map_id
);
205 static int alloc_rx_pool(struct ibmvnic_adapter
*adapter
,
206 struct ibmvnic_rx_pool
*pool
)
208 struct device
*dev
= &adapter
->vdev
->dev
;
211 pool
->free_map
= kcalloc(pool
->size
, sizeof(int), GFP_KERNEL
);
215 pool
->rx_buff
= kcalloc(pool
->size
, sizeof(struct ibmvnic_rx_buff
),
218 if (!pool
->rx_buff
) {
219 dev_err(dev
, "Couldn't alloc rx buffers\n");
220 kfree(pool
->free_map
);
224 if (alloc_long_term_buff(adapter
, &pool
->long_term_buff
,
225 pool
->size
* pool
->buff_size
)) {
226 kfree(pool
->free_map
);
227 kfree(pool
->rx_buff
);
231 for (i
= 0; i
< pool
->size
; ++i
)
232 pool
->free_map
[i
] = i
;
234 atomic_set(&pool
->available
, 0);
235 pool
->next_alloc
= 0;
241 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
242 struct ibmvnic_rx_pool
*pool
)
244 int count
= pool
->size
- atomic_read(&pool
->available
);
245 struct device
*dev
= &adapter
->vdev
->dev
;
246 int buffers_added
= 0;
247 unsigned long lpar_rc
;
248 union sub_crq sub_crq
;
258 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
259 be32_to_cpu(adapter
->login_rsp_buf
->
262 for (i
= 0; i
< count
; ++i
) {
263 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
265 dev_err(dev
, "Couldn't replenish rx buff\n");
266 adapter
->replenish_no_mem
++;
270 index
= pool
->free_map
[pool
->next_free
];
272 if (pool
->rx_buff
[index
].skb
)
273 dev_err(dev
, "Inconsistent free_map!\n");
275 /* Copy the skb to the long term mapped DMA buffer */
276 offset
= index
* pool
->buff_size
;
277 dst
= pool
->long_term_buff
.buff
+ offset
;
278 memset(dst
, 0, pool
->buff_size
);
279 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
280 pool
->rx_buff
[index
].data
= dst
;
282 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
283 pool
->rx_buff
[index
].dma
= dma_addr
;
284 pool
->rx_buff
[index
].skb
= skb
;
285 pool
->rx_buff
[index
].pool_index
= pool
->index
;
286 pool
->rx_buff
[index
].size
= pool
->buff_size
;
288 memset(&sub_crq
, 0, sizeof(sub_crq
));
289 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
290 sub_crq
.rx_add
.correlator
=
291 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
292 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
293 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
295 /* The length field of the sCRQ is defined to be 24 bits so the
296 * buffer size needs to be left shifted by a byte before it is
297 * converted to big endian to prevent the last byte from being
300 #ifdef __LITTLE_ENDIAN__
303 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
305 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
307 if (lpar_rc
!= H_SUCCESS
)
311 adapter
->replenish_add_buff_success
++;
312 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
314 atomic_add(buffers_added
, &pool
->available
);
318 dev_info(dev
, "replenish pools failure\n");
319 pool
->free_map
[pool
->next_free
] = index
;
320 pool
->rx_buff
[index
].skb
= NULL
;
321 if (!dma_mapping_error(dev
, dma_addr
))
322 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
325 dev_kfree_skb_any(skb
);
326 adapter
->replenish_add_buff_failure
++;
327 atomic_add(buffers_added
, &pool
->available
);
330 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
334 if (adapter
->migrated
)
337 adapter
->replenish_task_cycles
++;
338 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
340 if (adapter
->rx_pool
[i
].active
)
341 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
345 static void free_rx_pool(struct ibmvnic_adapter
*adapter
,
346 struct ibmvnic_rx_pool
*pool
)
350 kfree(pool
->free_map
);
351 pool
->free_map
= NULL
;
356 for (i
= 0; i
< pool
->size
; i
++) {
357 if (pool
->rx_buff
[i
].skb
) {
358 dev_kfree_skb_any(pool
->rx_buff
[i
].skb
);
359 pool
->rx_buff
[i
].skb
= NULL
;
362 kfree(pool
->rx_buff
);
363 pool
->rx_buff
= NULL
;
366 static int ibmvnic_open(struct net_device
*netdev
)
368 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
369 struct device
*dev
= &adapter
->vdev
->dev
;
370 struct ibmvnic_tx_pool
*tx_pool
;
371 union ibmvnic_crq crq
;
378 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
380 be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
381 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
382 be32_to_cpu(adapter
->login_rsp_buf
->
383 off_rxadd_buff_size
));
385 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
386 sizeof(struct napi_struct
), GFP_KERNEL
);
388 goto alloc_napi_failed
;
389 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
390 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
392 napi_enable(&adapter
->napi
[i
]);
395 kcalloc(rxadd_subcrqs
, sizeof(struct ibmvnic_rx_pool
), GFP_KERNEL
);
397 if (!adapter
->rx_pool
)
398 goto rx_pool_arr_alloc_failed
;
399 send_map_query(adapter
);
400 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
401 init_rx_pool(adapter
, &adapter
->rx_pool
[i
],
402 IBMVNIC_BUFFS_PER_POOL
, i
,
403 be64_to_cpu(size_array
[i
]), 1);
404 if (alloc_rx_pool(adapter
, &adapter
->rx_pool
[i
])) {
405 dev_err(dev
, "Couldn't alloc rx pool\n");
406 goto rx_pool_alloc_failed
;
410 kcalloc(tx_subcrqs
, sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
412 if (!adapter
->tx_pool
)
413 goto tx_pool_arr_alloc_failed
;
414 for (i
= 0; i
< tx_subcrqs
; i
++) {
415 tx_pool
= &adapter
->tx_pool
[i
];
417 kcalloc(adapter
->max_tx_entries_per_subcrq
,
418 sizeof(struct ibmvnic_tx_buff
), GFP_KERNEL
);
419 if (!tx_pool
->tx_buff
)
420 goto tx_pool_alloc_failed
;
422 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
423 adapter
->max_tx_entries_per_subcrq
*
425 goto tx_ltb_alloc_failed
;
428 kcalloc(adapter
->max_tx_entries_per_subcrq
,
429 sizeof(int), GFP_KERNEL
);
430 if (!tx_pool
->free_map
)
431 goto tx_fm_alloc_failed
;
433 for (j
= 0; j
< adapter
->max_tx_entries_per_subcrq
; j
++)
434 tx_pool
->free_map
[j
] = j
;
436 tx_pool
->consumer_index
= 0;
437 tx_pool
->producer_index
= 0;
439 adapter
->bounce_buffer_size
=
440 (netdev
->mtu
+ ETH_HLEN
- 1) / PAGE_SIZE
+ 1;
441 adapter
->bounce_buffer
= kmalloc(adapter
->bounce_buffer_size
,
443 if (!adapter
->bounce_buffer
)
444 goto bounce_alloc_failed
;
446 adapter
->bounce_buffer_dma
= dma_map_single(dev
, adapter
->bounce_buffer
,
447 adapter
->bounce_buffer_size
,
449 if (dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
450 dev_err(dev
, "Couldn't map tx bounce buffer\n");
451 goto bounce_map_failed
;
453 replenish_pools(adapter
);
455 /* We're ready to receive frames, enable the sub-crq interrupts and
456 * set the logical link state to up
458 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
459 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
461 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
462 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
464 memset(&crq
, 0, sizeof(crq
));
465 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
466 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
467 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_UP
;
468 ibmvnic_send_crq(adapter
, &crq
);
470 netif_start_queue(netdev
);
474 kfree(adapter
->bounce_buffer
);
477 kfree(adapter
->tx_pool
[i
].free_map
);
479 free_long_term_buff(adapter
, &adapter
->tx_pool
[i
].long_term_buff
);
481 kfree(adapter
->tx_pool
[i
].tx_buff
);
482 tx_pool_alloc_failed
:
483 for (j
= 0; j
< i
; j
++) {
484 kfree(adapter
->tx_pool
[j
].tx_buff
);
485 free_long_term_buff(adapter
,
486 &adapter
->tx_pool
[j
].long_term_buff
);
487 kfree(adapter
->tx_pool
[j
].free_map
);
489 kfree(adapter
->tx_pool
);
490 adapter
->tx_pool
= NULL
;
491 tx_pool_arr_alloc_failed
:
493 rx_pool_alloc_failed
:
494 for (j
= 0; j
< i
; j
++) {
495 free_rx_pool(adapter
, &adapter
->rx_pool
[j
]);
496 free_long_term_buff(adapter
,
497 &adapter
->rx_pool
[j
].long_term_buff
);
499 kfree(adapter
->rx_pool
);
500 adapter
->rx_pool
= NULL
;
501 rx_pool_arr_alloc_failed
:
502 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
503 napi_enable(&adapter
->napi
[i
]);
508 static int ibmvnic_close(struct net_device
*netdev
)
510 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
511 struct device
*dev
= &adapter
->vdev
->dev
;
512 union ibmvnic_crq crq
;
515 adapter
->closing
= true;
517 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
518 napi_disable(&adapter
->napi
[i
]);
520 netif_stop_queue(netdev
);
522 if (adapter
->bounce_buffer
) {
523 if (!dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
524 dma_unmap_single(&adapter
->vdev
->dev
,
525 adapter
->bounce_buffer_dma
,
526 adapter
->bounce_buffer_size
,
528 adapter
->bounce_buffer_dma
= DMA_ERROR_CODE
;
530 kfree(adapter
->bounce_buffer
);
531 adapter
->bounce_buffer
= NULL
;
534 memset(&crq
, 0, sizeof(crq
));
535 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
536 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
537 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_DN
;
538 ibmvnic_send_crq(adapter
, &crq
);
540 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
542 kfree(adapter
->tx_pool
[i
].tx_buff
);
543 free_long_term_buff(adapter
,
544 &adapter
->tx_pool
[i
].long_term_buff
);
545 kfree(adapter
->tx_pool
[i
].free_map
);
547 kfree(adapter
->tx_pool
);
548 adapter
->tx_pool
= NULL
;
550 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
552 free_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
553 free_long_term_buff(adapter
,
554 &adapter
->rx_pool
[i
].long_term_buff
);
556 kfree(adapter
->rx_pool
);
557 adapter
->rx_pool
= NULL
;
559 adapter
->closing
= false;
564 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
566 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
567 int queue_num
= skb_get_queue_mapping(skb
);
568 struct device
*dev
= &adapter
->vdev
->dev
;
569 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
570 struct ibmvnic_tx_pool
*tx_pool
;
571 unsigned int tx_send_failed
= 0;
572 unsigned int tx_map_failed
= 0;
573 unsigned int tx_dropped
= 0;
574 unsigned int tx_packets
= 0;
575 unsigned int tx_bytes
= 0;
576 dma_addr_t data_dma_addr
;
577 struct netdev_queue
*txq
;
578 bool used_bounce
= false;
579 unsigned long lpar_rc
;
580 union sub_crq tx_crq
;
587 tx_pool
= &adapter
->tx_pool
[queue_num
];
588 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
589 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
590 be32_to_cpu(adapter
->login_rsp_buf
->
591 off_txsubm_subcrqs
));
592 if (adapter
->migrated
) {
595 ret
= NETDEV_TX_BUSY
;
599 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
600 offset
= index
* adapter
->req_mtu
;
601 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
602 memset(dst
, 0, adapter
->req_mtu
);
603 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
604 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
606 tx_pool
->consumer_index
=
607 (tx_pool
->consumer_index
+ 1) %
608 adapter
->max_tx_entries_per_subcrq
;
610 tx_buff
= &tx_pool
->tx_buff
[index
];
612 tx_buff
->data_dma
[0] = data_dma_addr
;
613 tx_buff
->data_len
[0] = skb
->len
;
614 tx_buff
->index
= index
;
615 tx_buff
->pool_index
= queue_num
;
616 tx_buff
->last_frag
= true;
617 tx_buff
->used_bounce
= used_bounce
;
619 memset(&tx_crq
, 0, sizeof(tx_crq
));
620 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
621 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
622 tx_crq
.v1
.n_crq_elem
= 1;
624 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
625 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
626 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
627 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
628 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
630 if (adapter
->vlan_header_insertion
) {
631 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
632 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
635 if (skb
->protocol
== htons(ETH_P_IP
)) {
636 if (ip_hdr(skb
)->version
== 4)
637 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
638 else if (ip_hdr(skb
)->version
== 6)
639 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
641 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
642 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
643 else if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
644 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
647 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
648 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
650 lpar_rc
= send_subcrq(adapter
, handle_array
[0], &tx_crq
);
652 if (lpar_rc
!= H_SUCCESS
) {
653 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
655 if (tx_pool
->consumer_index
== 0)
656 tx_pool
->consumer_index
=
657 adapter
->max_tx_entries_per_subcrq
- 1;
659 tx_pool
->consumer_index
--;
663 ret
= NETDEV_TX_BUSY
;
667 tx_bytes
+= skb
->len
;
668 txq
->trans_start
= jiffies
;
672 netdev
->stats
.tx_dropped
+= tx_dropped
;
673 netdev
->stats
.tx_bytes
+= tx_bytes
;
674 netdev
->stats
.tx_packets
+= tx_packets
;
675 adapter
->tx_send_failed
+= tx_send_failed
;
676 adapter
->tx_map_failed
+= tx_map_failed
;
681 static void ibmvnic_set_multi(struct net_device
*netdev
)
683 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
684 struct netdev_hw_addr
*ha
;
685 union ibmvnic_crq crq
;
687 memset(&crq
, 0, sizeof(crq
));
688 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
689 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
691 if (netdev
->flags
& IFF_PROMISC
) {
692 if (!adapter
->promisc_supported
)
695 if (netdev
->flags
& IFF_ALLMULTI
) {
696 /* Accept all multicast */
697 memset(&crq
, 0, sizeof(crq
));
698 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
699 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
700 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
701 ibmvnic_send_crq(adapter
, &crq
);
702 } else if (netdev_mc_empty(netdev
)) {
703 /* Reject all multicast */
704 memset(&crq
, 0, sizeof(crq
));
705 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
706 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
707 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
708 ibmvnic_send_crq(adapter
, &crq
);
710 /* Accept one or more multicast(s) */
711 netdev_for_each_mc_addr(ha
, netdev
) {
712 memset(&crq
, 0, sizeof(crq
));
713 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
714 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
715 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
716 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
718 ibmvnic_send_crq(adapter
, &crq
);
724 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
726 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
727 struct sockaddr
*addr
= p
;
728 union ibmvnic_crq crq
;
730 if (!is_valid_ether_addr(addr
->sa_data
))
731 return -EADDRNOTAVAIL
;
733 memset(&crq
, 0, sizeof(crq
));
734 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
735 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
736 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
737 ibmvnic_send_crq(adapter
, &crq
);
738 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
742 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
744 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
746 if (new_mtu
> adapter
->req_mtu
|| new_mtu
< adapter
->min_mtu
)
749 netdev
->mtu
= new_mtu
;
753 static void ibmvnic_tx_timeout(struct net_device
*dev
)
755 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
758 /* Adapter timed out, resetting it */
759 release_sub_crqs(adapter
);
760 rc
= ibmvnic_reset_crq(adapter
);
762 dev_err(&adapter
->vdev
->dev
, "Adapter timeout, reset failed\n");
764 ibmvnic_send_crq_init(adapter
);
767 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
768 struct ibmvnic_rx_buff
*rx_buff
)
770 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
774 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
775 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
777 atomic_dec(&pool
->available
);
780 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
782 struct net_device
*netdev
= napi
->dev
;
783 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
784 int scrq_num
= (int)(napi
- adapter
->napi
);
785 int frames_processed
= 0;
787 while (frames_processed
< budget
) {
789 struct ibmvnic_rx_buff
*rx_buff
;
795 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
797 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
799 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
801 /* do error checking */
802 if (next
->rx_comp
.rc
) {
803 netdev_err(netdev
, "rx error %x\n", next
->rx_comp
.rc
);
805 next
->rx_comp
.first
= 0;
806 remove_buff_from_pool(adapter
, rx_buff
);
810 length
= be32_to_cpu(next
->rx_comp
.len
);
811 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
812 flags
= next
->rx_comp
.flags
;
814 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
816 skb
->vlan_tci
= be16_to_cpu(next
->rx_comp
.vlan_tci
);
818 next
->rx_comp
.first
= 0;
819 remove_buff_from_pool(adapter
, rx_buff
);
821 skb_put(skb
, length
);
822 skb
->protocol
= eth_type_trans(skb
, netdev
);
824 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
825 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
826 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
830 napi_gro_receive(napi
, skb
); /* send it up */
831 netdev
->stats
.rx_packets
++;
832 netdev
->stats
.rx_bytes
+= length
;
835 replenish_pools(adapter
);
837 if (frames_processed
< budget
) {
838 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
840 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
841 napi_reschedule(napi
)) {
842 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
846 return frames_processed
;
849 #ifdef CONFIG_NET_POLL_CONTROLLER
850 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
852 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
855 replenish_pools(netdev_priv(dev
));
856 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
857 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
858 adapter
->rx_scrq
[i
]);
862 static const struct net_device_ops ibmvnic_netdev_ops
= {
863 .ndo_open
= ibmvnic_open
,
864 .ndo_stop
= ibmvnic_close
,
865 .ndo_start_xmit
= ibmvnic_xmit
,
866 .ndo_set_rx_mode
= ibmvnic_set_multi
,
867 .ndo_set_mac_address
= ibmvnic_set_mac
,
868 .ndo_validate_addr
= eth_validate_addr
,
869 .ndo_change_mtu
= ibmvnic_change_mtu
,
870 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
871 #ifdef CONFIG_NET_POLL_CONTROLLER
872 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
876 /* ethtool functions */
878 static int ibmvnic_get_settings(struct net_device
*netdev
,
879 struct ethtool_cmd
*cmd
)
881 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
883 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
885 ethtool_cmd_speed_set(cmd
, SPEED_1000
);
886 cmd
->duplex
= DUPLEX_FULL
;
887 cmd
->port
= PORT_FIBRE
;
888 cmd
->phy_address
= 0;
889 cmd
->transceiver
= XCVR_INTERNAL
;
890 cmd
->autoneg
= AUTONEG_ENABLE
;
896 static void ibmvnic_get_drvinfo(struct net_device
*dev
,
897 struct ethtool_drvinfo
*info
)
899 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
900 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
903 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
905 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
907 return adapter
->msg_enable
;
910 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
912 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
914 adapter
->msg_enable
= data
;
917 static u32
ibmvnic_get_link(struct net_device
*netdev
)
919 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
921 /* Don't need to send a query because we request a logical link up at
922 * init and then we wait for link state indications
924 return adapter
->logical_link_state
;
927 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
928 struct ethtool_ringparam
*ring
)
930 ring
->rx_max_pending
= 0;
931 ring
->tx_max_pending
= 0;
932 ring
->rx_mini_max_pending
= 0;
933 ring
->rx_jumbo_max_pending
= 0;
934 ring
->rx_pending
= 0;
935 ring
->tx_pending
= 0;
936 ring
->rx_mini_pending
= 0;
937 ring
->rx_jumbo_pending
= 0;
940 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
944 if (stringset
!= ETH_SS_STATS
)
947 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
948 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
951 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
955 return ARRAY_SIZE(ibmvnic_stats
);
961 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
962 struct ethtool_stats
*stats
, u64
*data
)
964 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
965 union ibmvnic_crq crq
;
968 memset(&crq
, 0, sizeof(crq
));
969 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
970 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
971 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
972 crq
.request_statistics
.len
=
973 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
974 ibmvnic_send_crq(adapter
, &crq
);
976 /* Wait for data to be written */
977 init_completion(&adapter
->stats_done
);
978 wait_for_completion(&adapter
->stats_done
);
980 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
981 data
[i
] = IBMVNIC_GET_STAT(adapter
, ibmvnic_stats
[i
].offset
);
984 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
985 .get_settings
= ibmvnic_get_settings
,
986 .get_drvinfo
= ibmvnic_get_drvinfo
,
987 .get_msglevel
= ibmvnic_get_msglevel
,
988 .set_msglevel
= ibmvnic_set_msglevel
,
989 .get_link
= ibmvnic_get_link
,
990 .get_ringparam
= ibmvnic_get_ringparam
,
991 .get_strings
= ibmvnic_get_strings
,
992 .get_sset_count
= ibmvnic_get_sset_count
,
993 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
996 /* Routines for managing CRQs/sCRQs */
998 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
999 struct ibmvnic_sub_crq_queue
*scrq
)
1001 struct device
*dev
= &adapter
->vdev
->dev
;
1004 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
1006 /* Close the sub-crqs */
1008 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1009 adapter
->vdev
->unit_address
,
1011 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1013 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1015 free_pages((unsigned long)scrq
->msgs
, 2);
1019 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
1022 struct device
*dev
= &adapter
->vdev
->dev
;
1023 struct ibmvnic_sub_crq_queue
*scrq
;
1026 scrq
= kmalloc(sizeof(*scrq
), GFP_ATOMIC
);
1030 scrq
->msgs
= (union sub_crq
*)__get_free_pages(GFP_KERNEL
, 2);
1031 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
1033 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
1034 goto zero_page_failed
;
1037 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
1039 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
1040 dev_warn(dev
, "Couldn't map crq queue messages page\n");
1044 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1045 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1047 if (rc
== H_RESOURCE
)
1048 rc
= ibmvnic_reset_crq(adapter
);
1050 if (rc
== H_CLOSED
) {
1051 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
1053 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
1057 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1058 if (scrq
->irq
== NO_IRQ
) {
1059 dev_err(dev
, "Error mapping irq\n");
1060 goto map_irq_failed
;
1063 scrq
->adapter
= adapter
;
1064 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
1066 scrq
->rx_skb_top
= NULL
;
1067 spin_lock_init(&scrq
->lock
);
1069 netdev_dbg(adapter
->netdev
,
1070 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1071 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
1077 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1078 adapter
->vdev
->unit_address
,
1080 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1082 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1085 free_pages((unsigned long)scrq
->msgs
, 2);
1092 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
1096 if (adapter
->tx_scrq
) {
1097 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1098 if (adapter
->tx_scrq
[i
]) {
1099 free_irq(adapter
->tx_scrq
[i
]->irq
,
1100 adapter
->tx_scrq
[i
]);
1101 release_sub_crq_queue(adapter
,
1102 adapter
->tx_scrq
[i
]);
1104 adapter
->tx_scrq
= NULL
;
1107 if (adapter
->rx_scrq
) {
1108 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1109 if (adapter
->rx_scrq
[i
]) {
1110 free_irq(adapter
->rx_scrq
[i
]->irq
,
1111 adapter
->rx_scrq
[i
]);
1112 release_sub_crq_queue(adapter
,
1113 adapter
->rx_scrq
[i
]);
1115 adapter
->rx_scrq
= NULL
;
1118 adapter
->requested_caps
= 0;
1121 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1122 struct ibmvnic_sub_crq_queue
*scrq
)
1124 struct device
*dev
= &adapter
->vdev
->dev
;
1127 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1128 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1130 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1135 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1136 struct ibmvnic_sub_crq_queue
*scrq
)
1138 struct device
*dev
= &adapter
->vdev
->dev
;
1141 if (scrq
->hw_irq
> 0x100000000ULL
) {
1142 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
1146 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1147 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1149 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1154 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
1155 struct ibmvnic_sub_crq_queue
*scrq
)
1157 struct device
*dev
= &adapter
->vdev
->dev
;
1158 struct ibmvnic_tx_buff
*txbuff
;
1159 union sub_crq
*next
;
1164 while (pending_scrq(adapter
, scrq
)) {
1165 unsigned int pool
= scrq
->pool_index
;
1167 next
= ibmvnic_next_scrq(adapter
, scrq
);
1168 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
1169 if (next
->tx_comp
.rcs
[i
]) {
1170 dev_err(dev
, "tx error %x\n",
1171 next
->tx_comp
.rcs
[i
]);
1174 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
1175 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
1177 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
1178 if (!txbuff
->data_dma
[j
])
1181 txbuff
->data_dma
[j
] = 0;
1182 txbuff
->used_bounce
= false;
1185 if (txbuff
->last_frag
)
1186 dev_kfree_skb_any(txbuff
->skb
);
1188 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
1189 producer_index
] = index
;
1190 adapter
->tx_pool
[pool
].producer_index
=
1191 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
1192 adapter
->max_tx_entries_per_subcrq
;
1194 /* remove tx_comp scrq*/
1195 next
->tx_comp
.first
= 0;
1198 enable_scrq_irq(adapter
, scrq
);
1200 if (pending_scrq(adapter
, scrq
)) {
1201 disable_scrq_irq(adapter
, scrq
);
1208 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
1210 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1211 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1213 disable_scrq_irq(adapter
, scrq
);
1214 ibmvnic_complete_tx(adapter
, scrq
);
1219 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
1221 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1222 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1224 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
1225 disable_scrq_irq(adapter
, scrq
);
1226 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
1232 static void init_sub_crqs(struct ibmvnic_adapter
*adapter
, int retry
)
1234 struct device
*dev
= &adapter
->vdev
->dev
;
1235 struct ibmvnic_sub_crq_queue
**allqueues
;
1236 int registered_queues
= 0;
1237 union ibmvnic_crq crq
;
1244 /* Sub-CRQ entries are 32 byte long */
1245 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
1247 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
1248 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
1249 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
1250 goto allqueues_failed
;
1253 /* Get the minimum between the queried max and the entries
1254 * that fit in our PAGE_SIZE
1256 adapter
->req_tx_entries_per_subcrq
=
1257 adapter
->max_tx_entries_per_subcrq
> entries_page
?
1258 entries_page
: adapter
->max_tx_entries_per_subcrq
;
1259 adapter
->req_rx_add_entries_per_subcrq
=
1260 adapter
->max_rx_add_entries_per_subcrq
> entries_page
?
1261 entries_page
: adapter
->max_rx_add_entries_per_subcrq
;
1263 /* Choosing the maximum number of queues supported by firmware*/
1264 adapter
->req_tx_queues
= adapter
->min_tx_queues
;
1265 adapter
->req_rx_queues
= adapter
->min_rx_queues
;
1266 adapter
->req_rx_add_queues
= adapter
->min_rx_add_queues
;
1268 adapter
->req_mtu
= adapter
->max_mtu
;
1271 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
1273 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_ATOMIC
);
1275 goto allqueues_failed
;
1277 for (i
= 0; i
< total_queues
; i
++) {
1278 allqueues
[i
] = init_sub_crq_queue(adapter
);
1279 if (!allqueues
[i
]) {
1280 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
1283 registered_queues
++;
1286 /* Make sure we were able to register the minimum number of queues */
1287 if (registered_queues
<
1288 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
1289 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
1293 /* Distribute the failed allocated queues*/
1294 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
1295 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
1298 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
1299 adapter
->req_rx_queues
--;
1304 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
1305 adapter
->req_tx_queues
--;
1312 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
1313 sizeof(*adapter
->tx_scrq
), GFP_ATOMIC
);
1314 if (!adapter
->tx_scrq
)
1317 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1318 adapter
->tx_scrq
[i
] = allqueues
[i
];
1319 adapter
->tx_scrq
[i
]->pool_index
= i
;
1320 rc
= request_irq(adapter
->tx_scrq
[i
]->irq
, ibmvnic_interrupt_tx
,
1321 0, "ibmvnic_tx", adapter
->tx_scrq
[i
]);
1323 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
1324 adapter
->tx_scrq
[i
]->irq
, rc
);
1325 goto req_tx_irq_failed
;
1329 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
1330 sizeof(*adapter
->rx_scrq
), GFP_ATOMIC
);
1331 if (!adapter
->rx_scrq
)
1334 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1335 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
1336 adapter
->rx_scrq
[i
]->scrq_num
= i
;
1337 rc
= request_irq(adapter
->rx_scrq
[i
]->irq
, ibmvnic_interrupt_rx
,
1338 0, "ibmvnic_rx", adapter
->rx_scrq
[i
]);
1340 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
1341 adapter
->rx_scrq
[i
]->irq
, rc
);
1342 goto req_rx_irq_failed
;
1346 memset(&crq
, 0, sizeof(crq
));
1347 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1348 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1350 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
1351 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
1352 ibmvnic_send_crq(adapter
, &crq
);
1354 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
1355 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
1356 ibmvnic_send_crq(adapter
, &crq
);
1358 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
1359 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
1360 ibmvnic_send_crq(adapter
, &crq
);
1362 crq
.request_capability
.capability
=
1363 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
1364 crq
.request_capability
.number
=
1365 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
1366 ibmvnic_send_crq(adapter
, &crq
);
1368 crq
.request_capability
.capability
=
1369 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
1370 crq
.request_capability
.number
=
1371 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
1372 ibmvnic_send_crq(adapter
, &crq
);
1374 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
1375 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
1376 ibmvnic_send_crq(adapter
, &crq
);
1378 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
1379 if (adapter
->promisc_supported
) {
1380 crq
.request_capability
.capability
=
1381 cpu_to_be16(PROMISC_REQUESTED
);
1382 crq
.request_capability
.number
= cpu_to_be64(1);
1383 ibmvnic_send_crq(adapter
, &crq
);
1386 crq
.request_capability
.capability
=
1387 cpu_to_be16(PROMISC_REQUESTED
);
1388 crq
.request_capability
.number
= cpu_to_be64(0);
1389 ibmvnic_send_crq(adapter
, &crq
);
1397 for (j
= 0; j
< i
; j
++)
1398 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
1399 i
= adapter
->req_tx_queues
;
1401 for (j
= 0; j
< i
; j
++)
1402 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
1403 kfree(adapter
->rx_scrq
);
1404 adapter
->rx_scrq
= NULL
;
1406 kfree(adapter
->tx_scrq
);
1407 adapter
->tx_scrq
= NULL
;
1409 for (i
= 0; i
< registered_queues
; i
++)
1410 release_sub_crq_queue(adapter
, allqueues
[i
]);
1413 ibmvnic_remove(adapter
->vdev
);
1416 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
1417 struct ibmvnic_sub_crq_queue
*scrq
)
1419 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
1421 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
|| adapter
->closing
)
1427 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
1428 struct ibmvnic_sub_crq_queue
*scrq
)
1430 union sub_crq
*entry
;
1431 unsigned long flags
;
1433 spin_lock_irqsave(&scrq
->lock
, flags
);
1434 entry
= &scrq
->msgs
[scrq
->cur
];
1435 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1436 if (++scrq
->cur
== scrq
->size
)
1441 spin_unlock_irqrestore(&scrq
->lock
, flags
);
1446 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
1448 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
1449 union ibmvnic_crq
*crq
;
1451 crq
= &queue
->msgs
[queue
->cur
];
1452 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1453 if (++queue
->cur
== queue
->size
)
1462 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
1463 union sub_crq
*sub_crq
)
1465 unsigned int ua
= adapter
->vdev
->unit_address
;
1466 struct device
*dev
= &adapter
->vdev
->dev
;
1467 u64
*u64_crq
= (u64
*)sub_crq
;
1470 netdev_dbg(adapter
->netdev
,
1471 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1472 (unsigned long int)cpu_to_be64(remote_handle
),
1473 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1474 (unsigned long int)cpu_to_be64(u64_crq
[1]),
1475 (unsigned long int)cpu_to_be64(u64_crq
[2]),
1476 (unsigned long int)cpu_to_be64(u64_crq
[3]));
1478 /* Make sure the hypervisor sees the complete request */
1481 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
1482 cpu_to_be64(remote_handle
),
1483 cpu_to_be64(u64_crq
[0]),
1484 cpu_to_be64(u64_crq
[1]),
1485 cpu_to_be64(u64_crq
[2]),
1486 cpu_to_be64(u64_crq
[3]));
1490 dev_warn(dev
, "CRQ Queue closed\n");
1491 dev_err(dev
, "Send error (rc=%d)\n", rc
);
1497 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
1498 union ibmvnic_crq
*crq
)
1500 unsigned int ua
= adapter
->vdev
->unit_address
;
1501 struct device
*dev
= &adapter
->vdev
->dev
;
1502 u64
*u64_crq
= (u64
*)crq
;
1505 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
1506 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1507 (unsigned long int)cpu_to_be64(u64_crq
[1]));
1509 /* Make sure the hypervisor sees the complete request */
1512 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
1513 cpu_to_be64(u64_crq
[0]),
1514 cpu_to_be64(u64_crq
[1]));
1518 dev_warn(dev
, "CRQ Queue closed\n");
1519 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
1525 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
1527 union ibmvnic_crq crq
;
1529 memset(&crq
, 0, sizeof(crq
));
1530 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1531 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
1532 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
1534 return ibmvnic_send_crq(adapter
, &crq
);
1537 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter
*adapter
)
1539 union ibmvnic_crq crq
;
1541 memset(&crq
, 0, sizeof(crq
));
1542 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1543 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT_COMPLETE
;
1544 netdev_dbg(adapter
->netdev
, "Sending CRQ init complete\n");
1546 return ibmvnic_send_crq(adapter
, &crq
);
1549 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
1551 union ibmvnic_crq crq
;
1553 memset(&crq
, 0, sizeof(crq
));
1554 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
1555 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
1556 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
1558 return ibmvnic_send_crq(adapter
, &crq
);
1561 static void send_login(struct ibmvnic_adapter
*adapter
)
1563 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
1564 struct ibmvnic_login_buffer
*login_buffer
;
1565 struct ibmvnic_inflight_cmd
*inflight_cmd
;
1566 struct device
*dev
= &adapter
->vdev
->dev
;
1567 dma_addr_t rsp_buffer_token
;
1568 dma_addr_t buffer_token
;
1569 size_t rsp_buffer_size
;
1570 union ibmvnic_crq crq
;
1571 unsigned long flags
;
1578 sizeof(struct ibmvnic_login_buffer
) +
1579 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
);
1581 login_buffer
= kmalloc(buffer_size
, GFP_ATOMIC
);
1583 goto buf_alloc_failed
;
1585 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
1587 if (dma_mapping_error(dev
, buffer_token
)) {
1588 dev_err(dev
, "Couldn't map login buffer\n");
1589 goto buf_map_failed
;
1593 sizeof(struct ibmvnic_login_rsp_buffer
) +
1594 sizeof(u64
) * (adapter
->req_tx_queues
+
1595 adapter
->req_rx_queues
*
1596 adapter
->req_rx_add_queues
+ adapter
->
1597 req_rx_add_queues
) +
1598 sizeof(u8
) * (IBMVNIC_TX_DESC_VERSIONS
);
1600 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
1601 if (!login_rsp_buffer
)
1602 goto buf_rsp_alloc_failed
;
1604 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
1605 rsp_buffer_size
, DMA_FROM_DEVICE
);
1606 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
1607 dev_err(dev
, "Couldn't map login rsp buffer\n");
1608 goto buf_rsp_map_failed
;
1610 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
1611 if (!inflight_cmd
) {
1612 dev_err(dev
, "Couldn't allocate inflight_cmd\n");
1613 goto inflight_alloc_failed
;
1615 adapter
->login_buf
= login_buffer
;
1616 adapter
->login_buf_token
= buffer_token
;
1617 adapter
->login_buf_sz
= buffer_size
;
1618 adapter
->login_rsp_buf
= login_rsp_buffer
;
1619 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
1620 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
1622 login_buffer
->len
= cpu_to_be32(buffer_size
);
1623 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
1624 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
1625 login_buffer
->off_txcomp_subcrqs
=
1626 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
1627 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
1628 login_buffer
->off_rxcomp_subcrqs
=
1629 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
1630 sizeof(u64
) * adapter
->req_tx_queues
);
1631 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
1632 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
1634 tx_list_p
= (__be64
*)((char *)login_buffer
+
1635 sizeof(struct ibmvnic_login_buffer
));
1636 rx_list_p
= (__be64
*)((char *)login_buffer
+
1637 sizeof(struct ibmvnic_login_buffer
) +
1638 sizeof(u64
) * adapter
->req_tx_queues
);
1640 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1641 if (adapter
->tx_scrq
[i
]) {
1642 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
1647 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1648 if (adapter
->rx_scrq
[i
]) {
1649 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
1654 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
1655 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
1656 netdev_dbg(adapter
->netdev
, "%016lx\n",
1657 ((unsigned long int *)(adapter
->login_buf
))[i
]);
1660 memset(&crq
, 0, sizeof(crq
));
1661 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
1662 crq
.login
.cmd
= LOGIN
;
1663 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
1664 crq
.login
.len
= cpu_to_be32(buffer_size
);
1666 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
1668 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
1669 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
1670 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
1672 ibmvnic_send_crq(adapter
, &crq
);
1676 inflight_alloc_failed
:
1677 dma_unmap_single(dev
, rsp_buffer_token
, rsp_buffer_size
,
1680 kfree(login_rsp_buffer
);
1681 buf_rsp_alloc_failed
:
1682 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
1684 kfree(login_buffer
);
1689 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
1692 union ibmvnic_crq crq
;
1694 memset(&crq
, 0, sizeof(crq
));
1695 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
1696 crq
.request_map
.cmd
= REQUEST_MAP
;
1697 crq
.request_map
.map_id
= map_id
;
1698 crq
.request_map
.ioba
= cpu_to_be32(addr
);
1699 crq
.request_map
.len
= cpu_to_be32(len
);
1700 ibmvnic_send_crq(adapter
, &crq
);
1703 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
1705 union ibmvnic_crq crq
;
1707 memset(&crq
, 0, sizeof(crq
));
1708 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
1709 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
1710 crq
.request_unmap
.map_id
= map_id
;
1711 ibmvnic_send_crq(adapter
, &crq
);
1714 static void send_map_query(struct ibmvnic_adapter
*adapter
)
1716 union ibmvnic_crq crq
;
1718 memset(&crq
, 0, sizeof(crq
));
1719 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
1720 crq
.query_map
.cmd
= QUERY_MAP
;
1721 ibmvnic_send_crq(adapter
, &crq
);
1724 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1725 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
1727 union ibmvnic_crq crq
;
1729 atomic_set(&adapter
->running_cap_queries
, 0);
1730 memset(&crq
, 0, sizeof(crq
));
1731 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
1732 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
1734 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
1735 atomic_inc(&adapter
->running_cap_queries
);
1736 ibmvnic_send_crq(adapter
, &crq
);
1738 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
1739 atomic_inc(&adapter
->running_cap_queries
);
1740 ibmvnic_send_crq(adapter
, &crq
);
1742 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
1743 atomic_inc(&adapter
->running_cap_queries
);
1744 ibmvnic_send_crq(adapter
, &crq
);
1746 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
1747 atomic_inc(&adapter
->running_cap_queries
);
1748 ibmvnic_send_crq(adapter
, &crq
);
1750 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
1751 atomic_inc(&adapter
->running_cap_queries
);
1752 ibmvnic_send_crq(adapter
, &crq
);
1754 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
1755 atomic_inc(&adapter
->running_cap_queries
);
1756 ibmvnic_send_crq(adapter
, &crq
);
1758 crq
.query_capability
.capability
=
1759 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
1760 atomic_inc(&adapter
->running_cap_queries
);
1761 ibmvnic_send_crq(adapter
, &crq
);
1763 crq
.query_capability
.capability
=
1764 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
1765 atomic_inc(&adapter
->running_cap_queries
);
1766 ibmvnic_send_crq(adapter
, &crq
);
1768 crq
.query_capability
.capability
=
1769 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
1770 atomic_inc(&adapter
->running_cap_queries
);
1771 ibmvnic_send_crq(adapter
, &crq
);
1773 crq
.query_capability
.capability
=
1774 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
1775 atomic_inc(&adapter
->running_cap_queries
);
1776 ibmvnic_send_crq(adapter
, &crq
);
1778 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
1779 atomic_inc(&adapter
->running_cap_queries
);
1780 ibmvnic_send_crq(adapter
, &crq
);
1782 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
1783 atomic_inc(&adapter
->running_cap_queries
);
1784 ibmvnic_send_crq(adapter
, &crq
);
1786 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
1787 atomic_inc(&adapter
->running_cap_queries
);
1788 ibmvnic_send_crq(adapter
, &crq
);
1790 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
1791 atomic_inc(&adapter
->running_cap_queries
);
1792 ibmvnic_send_crq(adapter
, &crq
);
1794 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
1795 atomic_inc(&adapter
->running_cap_queries
);
1796 ibmvnic_send_crq(adapter
, &crq
);
1798 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
1799 atomic_inc(&adapter
->running_cap_queries
);
1800 ibmvnic_send_crq(adapter
, &crq
);
1802 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
1803 atomic_inc(&adapter
->running_cap_queries
);
1804 ibmvnic_send_crq(adapter
, &crq
);
1806 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
1807 atomic_inc(&adapter
->running_cap_queries
);
1808 ibmvnic_send_crq(adapter
, &crq
);
1810 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
1811 atomic_inc(&adapter
->running_cap_queries
);
1812 ibmvnic_send_crq(adapter
, &crq
);
1814 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
1815 atomic_inc(&adapter
->running_cap_queries
);
1816 ibmvnic_send_crq(adapter
, &crq
);
1818 crq
.query_capability
.capability
=
1819 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
1820 atomic_inc(&adapter
->running_cap_queries
);
1821 ibmvnic_send_crq(adapter
, &crq
);
1823 crq
.query_capability
.capability
=
1824 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
1825 atomic_inc(&adapter
->running_cap_queries
);
1826 ibmvnic_send_crq(adapter
, &crq
);
1828 crq
.query_capability
.capability
=
1829 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
1830 atomic_inc(&adapter
->running_cap_queries
);
1831 ibmvnic_send_crq(adapter
, &crq
);
1833 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
1834 atomic_inc(&adapter
->running_cap_queries
);
1835 ibmvnic_send_crq(adapter
, &crq
);
1838 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
1840 struct device
*dev
= &adapter
->vdev
->dev
;
1841 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
1842 union ibmvnic_crq crq
;
1845 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
1846 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
1848 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
1849 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
1850 netdev_dbg(adapter
->netdev
, "%016lx\n",
1851 ((unsigned long int *)(buf
))[i
]);
1853 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
1854 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
1855 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
1856 buf
->tcp_ipv4_chksum
);
1857 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
1858 buf
->tcp_ipv6_chksum
);
1859 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
1860 buf
->udp_ipv4_chksum
);
1861 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
1862 buf
->udp_ipv6_chksum
);
1863 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
1864 buf
->large_tx_ipv4
);
1865 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
1866 buf
->large_tx_ipv6
);
1867 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
1868 buf
->large_rx_ipv4
);
1869 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
1870 buf
->large_rx_ipv6
);
1871 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
1872 buf
->max_ipv4_header_size
);
1873 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
1874 buf
->max_ipv6_header_size
);
1875 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
1876 buf
->max_tcp_header_size
);
1877 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
1878 buf
->max_udp_header_size
);
1879 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
1880 buf
->max_large_tx_size
);
1881 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
1882 buf
->max_large_rx_size
);
1883 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
1884 buf
->ipv6_extension_header
);
1885 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
1886 buf
->tcp_pseudosum_req
);
1887 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
1888 buf
->num_ipv6_ext_headers
);
1889 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
1890 buf
->off_ipv6_ext_headers
);
1892 adapter
->ip_offload_ctrl_tok
=
1893 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
1894 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
1896 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
1897 dev_err(dev
, "Couldn't map ip offload control buffer\n");
1901 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
1902 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
1903 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
1904 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
1905 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
1907 /* large_tx/rx disabled for now, additional features needed */
1908 adapter
->ip_offload_ctrl
.large_tx_ipv4
= 0;
1909 adapter
->ip_offload_ctrl
.large_tx_ipv6
= 0;
1910 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
1911 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
1913 adapter
->netdev
->features
= NETIF_F_GSO
;
1915 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
1916 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
1918 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
1919 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
1921 memset(&crq
, 0, sizeof(crq
));
1922 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
1923 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
1924 crq
.control_ip_offload
.len
=
1925 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
1926 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
1927 ibmvnic_send_crq(adapter
, &crq
);
1930 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
1931 struct ibmvnic_adapter
*adapter
)
1933 struct device
*dev
= &adapter
->vdev
->dev
;
1934 struct ibmvnic_error_buff
*error_buff
;
1935 unsigned long flags
;
1939 if (!crq
->request_error_rsp
.rc
.code
) {
1940 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
1941 crq
->request_error_rsp
.rc
.code
);
1945 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
1946 list_for_each_entry(error_buff
, &adapter
->errors
, list
)
1947 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
1949 list_del(&error_buff
->list
);
1952 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
1955 dev_err(dev
, "Couldn't find error id %x\n",
1956 crq
->request_error_rsp
.error_id
);
1960 dev_err(dev
, "Detailed info for error id %x:",
1961 crq
->request_error_rsp
.error_id
);
1963 for (i
= 0; i
< error_buff
->len
; i
++) {
1964 pr_cont("%02x", (int)error_buff
->buff
[i
]);
1970 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
1972 kfree(error_buff
->buff
);
1976 static void handle_dump_size_rsp(union ibmvnic_crq
*crq
,
1977 struct ibmvnic_adapter
*adapter
)
1979 int len
= be32_to_cpu(crq
->request_dump_size_rsp
.len
);
1980 struct ibmvnic_inflight_cmd
*inflight_cmd
;
1981 struct device
*dev
= &adapter
->vdev
->dev
;
1982 union ibmvnic_crq newcrq
;
1983 unsigned long flags
;
1985 /* allocate and map buffer */
1986 adapter
->dump_data
= kmalloc(len
, GFP_KERNEL
);
1987 if (!adapter
->dump_data
) {
1988 complete(&adapter
->fw_done
);
1992 adapter
->dump_data_token
= dma_map_single(dev
, adapter
->dump_data
, len
,
1995 if (dma_mapping_error(dev
, adapter
->dump_data_token
)) {
1996 if (!firmware_has_feature(FW_FEATURE_CMO
))
1997 dev_err(dev
, "Couldn't map dump data\n");
1998 kfree(adapter
->dump_data
);
1999 complete(&adapter
->fw_done
);
2003 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2004 if (!inflight_cmd
) {
2005 dma_unmap_single(dev
, adapter
->dump_data_token
, len
,
2007 kfree(adapter
->dump_data
);
2008 complete(&adapter
->fw_done
);
2012 memset(&newcrq
, 0, sizeof(newcrq
));
2013 newcrq
.request_dump
.first
= IBMVNIC_CRQ_CMD
;
2014 newcrq
.request_dump
.cmd
= REQUEST_DUMP
;
2015 newcrq
.request_dump
.ioba
= cpu_to_be32(adapter
->dump_data_token
);
2016 newcrq
.request_dump
.len
= cpu_to_be32(adapter
->dump_data_size
);
2018 memcpy(&inflight_cmd
->crq
, &newcrq
, sizeof(newcrq
));
2020 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2021 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2022 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2024 ibmvnic_send_crq(adapter
, &newcrq
);
2027 static void handle_error_indication(union ibmvnic_crq
*crq
,
2028 struct ibmvnic_adapter
*adapter
)
2030 int detail_len
= be32_to_cpu(crq
->error_indication
.detail_error_sz
);
2031 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2032 struct device
*dev
= &adapter
->vdev
->dev
;
2033 struct ibmvnic_error_buff
*error_buff
;
2034 union ibmvnic_crq new_crq
;
2035 unsigned long flags
;
2037 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
2038 crq
->error_indication
.
2039 flags
& IBMVNIC_FATAL_ERROR
? "FATAL " : "",
2040 crq
->error_indication
.error_id
,
2041 crq
->error_indication
.error_cause
);
2043 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
2047 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
2048 if (!error_buff
->buff
) {
2053 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
2055 if (dma_mapping_error(dev
, error_buff
->dma
)) {
2056 if (!firmware_has_feature(FW_FEATURE_CMO
))
2057 dev_err(dev
, "Couldn't map error buffer\n");
2058 kfree(error_buff
->buff
);
2063 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2064 if (!inflight_cmd
) {
2065 dma_unmap_single(dev
, error_buff
->dma
, detail_len
,
2067 kfree(error_buff
->buff
);
2072 error_buff
->len
= detail_len
;
2073 error_buff
->error_id
= crq
->error_indication
.error_id
;
2075 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2076 list_add_tail(&error_buff
->list
, &adapter
->errors
);
2077 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2079 memset(&new_crq
, 0, sizeof(new_crq
));
2080 new_crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
2081 new_crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
2082 new_crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
2083 new_crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
2084 new_crq
.request_error_info
.error_id
= crq
->error_indication
.error_id
;
2086 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
2088 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2089 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2090 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2092 ibmvnic_send_crq(adapter
, &new_crq
);
2095 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
2096 struct ibmvnic_adapter
*adapter
)
2098 struct net_device
*netdev
= adapter
->netdev
;
2099 struct device
*dev
= &adapter
->vdev
->dev
;
2102 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
2104 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
2107 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
2111 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
2112 struct ibmvnic_adapter
*adapter
)
2114 struct device
*dev
= &adapter
->vdev
->dev
;
2118 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
2120 req_value
= &adapter
->req_tx_queues
;
2124 req_value
= &adapter
->req_rx_queues
;
2127 case REQ_RX_ADD_QUEUES
:
2128 req_value
= &adapter
->req_rx_add_queues
;
2131 case REQ_TX_ENTRIES_PER_SUBCRQ
:
2132 req_value
= &adapter
->req_tx_entries_per_subcrq
;
2133 name
= "tx_entries_per_subcrq";
2135 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
2136 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
2137 name
= "rx_add_entries_per_subcrq";
2140 req_value
= &adapter
->req_mtu
;
2143 case PROMISC_REQUESTED
:
2144 req_value
= &adapter
->promisc
;
2148 dev_err(dev
, "Got invalid cap request rsp %d\n",
2149 crq
->request_capability
.capability
);
2153 switch (crq
->request_capability_rsp
.rc
.code
) {
2156 case PARTIALSUCCESS
:
2157 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2159 (long int)be32_to_cpu(crq
->request_capability_rsp
.
2161 release_sub_crqs(adapter
);
2162 *req_value
= be32_to_cpu(crq
->request_capability_rsp
.number
);
2163 complete(&adapter
->init_done
);
2166 dev_err(dev
, "Error %d in request cap rsp\n",
2167 crq
->request_capability_rsp
.rc
.code
);
2171 /* Done receiving requested capabilities, query IP offload support */
2172 if (++adapter
->requested_caps
== 7) {
2173 union ibmvnic_crq newcrq
;
2174 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
2175 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
2176 &adapter
->ip_offload_buf
;
2178 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
2182 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
2183 if (!firmware_has_feature(FW_FEATURE_CMO
))
2184 dev_err(dev
, "Couldn't map offload buffer\n");
2188 memset(&newcrq
, 0, sizeof(newcrq
));
2189 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2190 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
2191 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
2192 newcrq
.query_ip_offload
.ioba
=
2193 cpu_to_be32(adapter
->ip_offload_tok
);
2195 ibmvnic_send_crq(adapter
, &newcrq
);
2199 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
2200 struct ibmvnic_adapter
*adapter
)
2202 struct device
*dev
= &adapter
->vdev
->dev
;
2203 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
2204 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
2205 union ibmvnic_crq crq
;
2208 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
2210 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
2211 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
2213 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
2214 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
2215 netdev_dbg(adapter
->netdev
, "%016lx\n",
2216 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
2220 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
2221 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
2222 adapter
->req_rx_add_queues
!=
2223 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
2224 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
2225 ibmvnic_remove(adapter
->vdev
);
2228 complete(&adapter
->init_done
);
2230 memset(&crq
, 0, sizeof(crq
));
2231 crq
.request_ras_comp_num
.first
= IBMVNIC_CRQ_CMD
;
2232 crq
.request_ras_comp_num
.cmd
= REQUEST_RAS_COMP_NUM
;
2233 ibmvnic_send_crq(adapter
, &crq
);
2238 static void handle_request_map_rsp(union ibmvnic_crq
*crq
,
2239 struct ibmvnic_adapter
*adapter
)
2241 struct device
*dev
= &adapter
->vdev
->dev
;
2242 u8 map_id
= crq
->request_map_rsp
.map_id
;
2248 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
2249 rx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
2251 rc
= crq
->request_map_rsp
.rc
.code
;
2253 dev_err(dev
, "Error %ld in REQUEST_MAP_RSP\n", rc
);
2255 /* need to find and zero tx/rx_pool map_id */
2256 for (i
= 0; i
< tx_subcrqs
; i
++) {
2257 if (adapter
->tx_pool
[i
].long_term_buff
.map_id
== map_id
)
2258 adapter
->tx_pool
[i
].long_term_buff
.map_id
= 0;
2260 for (i
= 0; i
< rx_subcrqs
; i
++) {
2261 if (adapter
->rx_pool
[i
].long_term_buff
.map_id
== map_id
)
2262 adapter
->rx_pool
[i
].long_term_buff
.map_id
= 0;
2265 complete(&adapter
->fw_done
);
2268 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
2269 struct ibmvnic_adapter
*adapter
)
2271 struct device
*dev
= &adapter
->vdev
->dev
;
2274 rc
= crq
->request_unmap_rsp
.rc
.code
;
2276 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
2279 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
2280 struct ibmvnic_adapter
*adapter
)
2282 struct net_device
*netdev
= adapter
->netdev
;
2283 struct device
*dev
= &adapter
->vdev
->dev
;
2286 rc
= crq
->query_map_rsp
.rc
.code
;
2288 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
2291 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2292 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
2293 crq
->query_map_rsp
.free_pages
);
2296 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
2297 struct ibmvnic_adapter
*adapter
)
2299 struct net_device
*netdev
= adapter
->netdev
;
2300 struct device
*dev
= &adapter
->vdev
->dev
;
2303 atomic_dec(&adapter
->running_cap_queries
);
2304 netdev_dbg(netdev
, "Outstanding queries: %d\n",
2305 atomic_read(&adapter
->running_cap_queries
));
2306 rc
= crq
->query_capability
.rc
.code
;
2308 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
2312 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
2314 adapter
->min_tx_queues
=
2315 be64_to_cpu(crq
->query_capability
.number
);
2316 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
2317 adapter
->min_tx_queues
);
2320 adapter
->min_rx_queues
=
2321 be64_to_cpu(crq
->query_capability
.number
);
2322 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
2323 adapter
->min_rx_queues
);
2325 case MIN_RX_ADD_QUEUES
:
2326 adapter
->min_rx_add_queues
=
2327 be64_to_cpu(crq
->query_capability
.number
);
2328 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
2329 adapter
->min_rx_add_queues
);
2332 adapter
->max_tx_queues
=
2333 be64_to_cpu(crq
->query_capability
.number
);
2334 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
2335 adapter
->max_tx_queues
);
2338 adapter
->max_rx_queues
=
2339 be64_to_cpu(crq
->query_capability
.number
);
2340 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
2341 adapter
->max_rx_queues
);
2343 case MAX_RX_ADD_QUEUES
:
2344 adapter
->max_rx_add_queues
=
2345 be64_to_cpu(crq
->query_capability
.number
);
2346 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
2347 adapter
->max_rx_add_queues
);
2349 case MIN_TX_ENTRIES_PER_SUBCRQ
:
2350 adapter
->min_tx_entries_per_subcrq
=
2351 be64_to_cpu(crq
->query_capability
.number
);
2352 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
2353 adapter
->min_tx_entries_per_subcrq
);
2355 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
2356 adapter
->min_rx_add_entries_per_subcrq
=
2357 be64_to_cpu(crq
->query_capability
.number
);
2358 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
2359 adapter
->min_rx_add_entries_per_subcrq
);
2361 case MAX_TX_ENTRIES_PER_SUBCRQ
:
2362 adapter
->max_tx_entries_per_subcrq
=
2363 be64_to_cpu(crq
->query_capability
.number
);
2364 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
2365 adapter
->max_tx_entries_per_subcrq
);
2367 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
2368 adapter
->max_rx_add_entries_per_subcrq
=
2369 be64_to_cpu(crq
->query_capability
.number
);
2370 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
2371 adapter
->max_rx_add_entries_per_subcrq
);
2373 case TCP_IP_OFFLOAD
:
2374 adapter
->tcp_ip_offload
=
2375 be64_to_cpu(crq
->query_capability
.number
);
2376 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
2377 adapter
->tcp_ip_offload
);
2379 case PROMISC_SUPPORTED
:
2380 adapter
->promisc_supported
=
2381 be64_to_cpu(crq
->query_capability
.number
);
2382 netdev_dbg(netdev
, "promisc_supported = %lld\n",
2383 adapter
->promisc_supported
);
2386 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2387 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
2390 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2391 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
2393 case MAX_MULTICAST_FILTERS
:
2394 adapter
->max_multicast_filters
=
2395 be64_to_cpu(crq
->query_capability
.number
);
2396 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
2397 adapter
->max_multicast_filters
);
2399 case VLAN_HEADER_INSERTION
:
2400 adapter
->vlan_header_insertion
=
2401 be64_to_cpu(crq
->query_capability
.number
);
2402 if (adapter
->vlan_header_insertion
)
2403 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
2404 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
2405 adapter
->vlan_header_insertion
);
2407 case MAX_TX_SG_ENTRIES
:
2408 adapter
->max_tx_sg_entries
=
2409 be64_to_cpu(crq
->query_capability
.number
);
2410 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
2411 adapter
->max_tx_sg_entries
);
2413 case RX_SG_SUPPORTED
:
2414 adapter
->rx_sg_supported
=
2415 be64_to_cpu(crq
->query_capability
.number
);
2416 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
2417 adapter
->rx_sg_supported
);
2419 case OPT_TX_COMP_SUB_QUEUES
:
2420 adapter
->opt_tx_comp_sub_queues
=
2421 be64_to_cpu(crq
->query_capability
.number
);
2422 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
2423 adapter
->opt_tx_comp_sub_queues
);
2425 case OPT_RX_COMP_QUEUES
:
2426 adapter
->opt_rx_comp_queues
=
2427 be64_to_cpu(crq
->query_capability
.number
);
2428 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
2429 adapter
->opt_rx_comp_queues
);
2431 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
2432 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
2433 be64_to_cpu(crq
->query_capability
.number
);
2434 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2435 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
2437 case OPT_TX_ENTRIES_PER_SUBCRQ
:
2438 adapter
->opt_tx_entries_per_subcrq
=
2439 be64_to_cpu(crq
->query_capability
.number
);
2440 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
2441 adapter
->opt_tx_entries_per_subcrq
);
2443 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
2444 adapter
->opt_rxba_entries_per_subcrq
=
2445 be64_to_cpu(crq
->query_capability
.number
);
2446 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
2447 adapter
->opt_rxba_entries_per_subcrq
);
2449 case TX_RX_DESC_REQ
:
2450 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
2451 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
2452 adapter
->tx_rx_desc_req
);
2456 netdev_err(netdev
, "Got invalid cap rsp %d\n",
2457 crq
->query_capability
.capability
);
2461 if (atomic_read(&adapter
->running_cap_queries
) == 0)
2462 complete(&adapter
->init_done
);
2463 /* We're done querying the capabilities, initialize sub-crqs */
2466 static void handle_control_ras_rsp(union ibmvnic_crq
*crq
,
2467 struct ibmvnic_adapter
*adapter
)
2469 u8 correlator
= crq
->control_ras_rsp
.correlator
;
2470 struct device
*dev
= &adapter
->vdev
->dev
;
2474 if (crq
->control_ras_rsp
.rc
.code
) {
2475 dev_warn(dev
, "Control ras failed rc=%d\n",
2476 crq
->control_ras_rsp
.rc
.code
);
2480 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
2481 if (adapter
->ras_comps
[i
].correlator
== correlator
) {
2488 dev_warn(dev
, "Correlator not found on control_ras_rsp\n");
2492 switch (crq
->control_ras_rsp
.op
) {
2493 case IBMVNIC_TRACE_LEVEL
:
2494 adapter
->ras_comps
[i
].trace_level
= crq
->control_ras
.level
;
2496 case IBMVNIC_ERROR_LEVEL
:
2497 adapter
->ras_comps
[i
].error_check_level
=
2498 crq
->control_ras
.level
;
2500 case IBMVNIC_TRACE_PAUSE
:
2501 adapter
->ras_comp_int
[i
].paused
= 1;
2503 case IBMVNIC_TRACE_RESUME
:
2504 adapter
->ras_comp_int
[i
].paused
= 0;
2506 case IBMVNIC_TRACE_ON
:
2507 adapter
->ras_comps
[i
].trace_on
= 1;
2509 case IBMVNIC_TRACE_OFF
:
2510 adapter
->ras_comps
[i
].trace_on
= 0;
2512 case IBMVNIC_CHG_TRACE_BUFF_SZ
:
2513 /* trace_buff_sz is 3 bytes, stuff it into an int */
2514 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[0] = 0;
2515 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[1] =
2516 crq
->control_ras_rsp
.trace_buff_sz
[0];
2517 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[2] =
2518 crq
->control_ras_rsp
.trace_buff_sz
[1];
2519 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[3] =
2520 crq
->control_ras_rsp
.trace_buff_sz
[2];
2523 dev_err(dev
, "invalid op %d on control_ras_rsp",
2524 crq
->control_ras_rsp
.op
);
2528 static int ibmvnic_fw_comp_open(struct inode
*inode
, struct file
*file
)
2530 file
->private_data
= inode
->i_private
;
2534 static ssize_t
trace_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2537 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2538 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2539 struct device
*dev
= &adapter
->vdev
->dev
;
2540 struct ibmvnic_fw_trace_entry
*trace
;
2541 int num
= ras_comp_int
->num
;
2542 union ibmvnic_crq crq
;
2543 dma_addr_t trace_tok
;
2545 if (*ppos
>= be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2549 dma_alloc_coherent(dev
,
2550 be32_to_cpu(adapter
->ras_comps
[num
].
2551 trace_buff_size
), &trace_tok
,
2554 dev_err(dev
, "Couldn't alloc trace buffer\n");
2558 memset(&crq
, 0, sizeof(crq
));
2559 crq
.collect_fw_trace
.first
= IBMVNIC_CRQ_CMD
;
2560 crq
.collect_fw_trace
.cmd
= COLLECT_FW_TRACE
;
2561 crq
.collect_fw_trace
.correlator
= adapter
->ras_comps
[num
].correlator
;
2562 crq
.collect_fw_trace
.ioba
= cpu_to_be32(trace_tok
);
2563 crq
.collect_fw_trace
.len
= adapter
->ras_comps
[num
].trace_buff_size
;
2564 ibmvnic_send_crq(adapter
, &crq
);
2566 init_completion(&adapter
->fw_done
);
2567 wait_for_completion(&adapter
->fw_done
);
2569 if (*ppos
+ len
> be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2571 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
) -
2574 copy_to_user(user_buf
, &((u8
*)trace
)[*ppos
], len
);
2576 dma_free_coherent(dev
,
2577 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
),
2583 static const struct file_operations trace_ops
= {
2584 .owner
= THIS_MODULE
,
2585 .open
= ibmvnic_fw_comp_open
,
2589 static ssize_t
paused_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2592 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2593 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2594 int num
= ras_comp_int
->num
;
2595 char buff
[5]; /* 1 or 0 plus \n and \0 */
2598 size
= sprintf(buff
, "%d\n", adapter
->ras_comp_int
[num
].paused
);
2603 copy_to_user(user_buf
, buff
, size
);
2608 static ssize_t
paused_write(struct file
*file
, const char __user
*user_buf
,
2609 size_t len
, loff_t
*ppos
)
2611 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2612 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2613 int num
= ras_comp_int
->num
;
2614 union ibmvnic_crq crq
;
2616 char buff
[9]; /* decimal max int plus \n and \0 */
2618 copy_from_user(buff
, user_buf
, sizeof(buff
));
2619 val
= kstrtoul(buff
, 10, NULL
);
2621 adapter
->ras_comp_int
[num
].paused
= val
? 1 : 0;
2623 memset(&crq
, 0, sizeof(crq
));
2624 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2625 crq
.control_ras
.cmd
= CONTROL_RAS
;
2626 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2627 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_PAUSE
: IBMVNIC_TRACE_RESUME
;
2628 ibmvnic_send_crq(adapter
, &crq
);
2633 static const struct file_operations paused_ops
= {
2634 .owner
= THIS_MODULE
,
2635 .open
= ibmvnic_fw_comp_open
,
2636 .read
= paused_read
,
2637 .write
= paused_write
,
2640 static ssize_t
tracing_read(struct file
*file
, char __user
*user_buf
,
2641 size_t len
, loff_t
*ppos
)
2643 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2644 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2645 int num
= ras_comp_int
->num
;
2646 char buff
[5]; /* 1 or 0 plus \n and \0 */
2649 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_on
);
2654 copy_to_user(user_buf
, buff
, size
);
2659 static ssize_t
tracing_write(struct file
*file
, const char __user
*user_buf
,
2660 size_t len
, loff_t
*ppos
)
2662 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2663 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2664 int num
= ras_comp_int
->num
;
2665 union ibmvnic_crq crq
;
2667 char buff
[9]; /* decimal max int plus \n and \0 */
2669 copy_from_user(buff
, user_buf
, sizeof(buff
));
2670 val
= kstrtoul(buff
, 10, NULL
);
2672 memset(&crq
, 0, sizeof(crq
));
2673 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2674 crq
.control_ras
.cmd
= CONTROL_RAS
;
2675 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2676 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_ON
: IBMVNIC_TRACE_OFF
;
2681 static const struct file_operations tracing_ops
= {
2682 .owner
= THIS_MODULE
,
2683 .open
= ibmvnic_fw_comp_open
,
2684 .read
= tracing_read
,
2685 .write
= tracing_write
,
2688 static ssize_t
error_level_read(struct file
*file
, char __user
*user_buf
,
2689 size_t len
, loff_t
*ppos
)
2691 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2692 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2693 int num
= ras_comp_int
->num
;
2694 char buff
[5]; /* decimal max char plus \n and \0 */
2697 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].error_check_level
);
2702 copy_to_user(user_buf
, buff
, size
);
2707 static ssize_t
error_level_write(struct file
*file
, const char __user
*user_buf
,
2708 size_t len
, loff_t
*ppos
)
2710 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2711 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2712 int num
= ras_comp_int
->num
;
2713 union ibmvnic_crq crq
;
2715 char buff
[9]; /* decimal max int plus \n and \0 */
2717 copy_from_user(buff
, user_buf
, sizeof(buff
));
2718 val
= kstrtoul(buff
, 10, NULL
);
2723 memset(&crq
, 0, sizeof(crq
));
2724 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2725 crq
.control_ras
.cmd
= CONTROL_RAS
;
2726 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2727 crq
.control_ras
.op
= IBMVNIC_ERROR_LEVEL
;
2728 crq
.control_ras
.level
= val
;
2729 ibmvnic_send_crq(adapter
, &crq
);
2734 static const struct file_operations error_level_ops
= {
2735 .owner
= THIS_MODULE
,
2736 .open
= ibmvnic_fw_comp_open
,
2737 .read
= error_level_read
,
2738 .write
= error_level_write
,
2741 static ssize_t
trace_level_read(struct file
*file
, char __user
*user_buf
,
2742 size_t len
, loff_t
*ppos
)
2744 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2745 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2746 int num
= ras_comp_int
->num
;
2747 char buff
[5]; /* decimal max char plus \n and \0 */
2750 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_level
);
2754 copy_to_user(user_buf
, buff
, size
);
2759 static ssize_t
trace_level_write(struct file
*file
, const char __user
*user_buf
,
2760 size_t len
, loff_t
*ppos
)
2762 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2763 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2764 union ibmvnic_crq crq
;
2766 char buff
[9]; /* decimal max int plus \n and \0 */
2768 copy_from_user(buff
, user_buf
, sizeof(buff
));
2769 val
= kstrtoul(buff
, 10, NULL
);
2773 memset(&crq
, 0, sizeof(crq
));
2774 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2775 crq
.control_ras
.cmd
= CONTROL_RAS
;
2776 crq
.control_ras
.correlator
=
2777 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
2778 crq
.control_ras
.op
= IBMVNIC_TRACE_LEVEL
;
2779 crq
.control_ras
.level
= val
;
2780 ibmvnic_send_crq(adapter
, &crq
);
2785 static const struct file_operations trace_level_ops
= {
2786 .owner
= THIS_MODULE
,
2787 .open
= ibmvnic_fw_comp_open
,
2788 .read
= trace_level_read
,
2789 .write
= trace_level_write
,
2792 static ssize_t
trace_buff_size_read(struct file
*file
, char __user
*user_buf
,
2793 size_t len
, loff_t
*ppos
)
2795 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2796 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2797 int num
= ras_comp_int
->num
;
2798 char buff
[9]; /* decimal max int plus \n and \0 */
2801 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_buff_size
);
2805 copy_to_user(user_buf
, buff
, size
);
2810 static ssize_t
trace_buff_size_write(struct file
*file
,
2811 const char __user
*user_buf
, size_t len
,
2814 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2815 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2816 union ibmvnic_crq crq
;
2818 char buff
[9]; /* decimal max int plus \n and \0 */
2820 copy_from_user(buff
, user_buf
, sizeof(buff
));
2821 val
= kstrtoul(buff
, 10, NULL
);
2823 memset(&crq
, 0, sizeof(crq
));
2824 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2825 crq
.control_ras
.cmd
= CONTROL_RAS
;
2826 crq
.control_ras
.correlator
=
2827 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
2828 crq
.control_ras
.op
= IBMVNIC_CHG_TRACE_BUFF_SZ
;
2829 /* trace_buff_sz is 3 bytes, stuff an int into it */
2830 crq
.control_ras
.trace_buff_sz
[0] = ((u8
*)(&val
))[5];
2831 crq
.control_ras
.trace_buff_sz
[1] = ((u8
*)(&val
))[6];
2832 crq
.control_ras
.trace_buff_sz
[2] = ((u8
*)(&val
))[7];
2833 ibmvnic_send_crq(adapter
, &crq
);
2838 static const struct file_operations trace_size_ops
= {
2839 .owner
= THIS_MODULE
,
2840 .open
= ibmvnic_fw_comp_open
,
2841 .read
= trace_buff_size_read
,
2842 .write
= trace_buff_size_write
,
2845 static void handle_request_ras_comps_rsp(union ibmvnic_crq
*crq
,
2846 struct ibmvnic_adapter
*adapter
)
2848 struct device
*dev
= &adapter
->vdev
->dev
;
2849 struct dentry
*dir_ent
;
2853 debugfs_remove_recursive(adapter
->ras_comps_ent
);
2855 adapter
->ras_comps_ent
= debugfs_create_dir("ras_comps",
2856 adapter
->debugfs_dir
);
2857 if (!adapter
->ras_comps_ent
|| IS_ERR(adapter
->ras_comps_ent
)) {
2858 dev_info(dev
, "debugfs create ras_comps dir failed\n");
2862 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
2863 dir_ent
= debugfs_create_dir(adapter
->ras_comps
[i
].name
,
2864 adapter
->ras_comps_ent
);
2865 if (!dir_ent
|| IS_ERR(dir_ent
)) {
2866 dev_info(dev
, "debugfs create %s dir failed\n",
2867 adapter
->ras_comps
[i
].name
);
2871 adapter
->ras_comp_int
[i
].adapter
= adapter
;
2872 adapter
->ras_comp_int
[i
].num
= i
;
2873 adapter
->ras_comp_int
[i
].desc_blob
.data
=
2874 &adapter
->ras_comps
[i
].description
;
2875 adapter
->ras_comp_int
[i
].desc_blob
.size
=
2876 sizeof(adapter
->ras_comps
[i
].description
);
2878 /* Don't need to remember the dentry's because the debugfs dir
2879 * gets removed recursively
2881 ent
= debugfs_create_blob("description", S_IRUGO
, dir_ent
,
2882 &adapter
->ras_comp_int
[i
].desc_blob
);
2883 ent
= debugfs_create_file("trace_buf_size", S_IRUGO
| S_IWUSR
,
2884 dir_ent
, &adapter
->ras_comp_int
[i
],
2886 ent
= debugfs_create_file("trace_level",
2888 (adapter
->ras_comps
[i
].trace_level
!=
2889 0xFF ? S_IWUSR
: 0),
2890 dir_ent
, &adapter
->ras_comp_int
[i
],
2892 ent
= debugfs_create_file("error_level",
2895 ras_comps
[i
].error_check_level
!=
2896 0xFF ? S_IWUSR
: 0),
2897 dir_ent
, &adapter
->ras_comp_int
[i
],
2899 ent
= debugfs_create_file("tracing", S_IRUGO
| S_IWUSR
,
2900 dir_ent
, &adapter
->ras_comp_int
[i
],
2902 ent
= debugfs_create_file("paused", S_IRUGO
| S_IWUSR
,
2903 dir_ent
, &adapter
->ras_comp_int
[i
],
2905 ent
= debugfs_create_file("trace", S_IRUGO
, dir_ent
,
2906 &adapter
->ras_comp_int
[i
],
2911 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq
*crq
,
2912 struct ibmvnic_adapter
*adapter
)
2914 int len
= adapter
->ras_comp_num
* sizeof(struct ibmvnic_fw_component
);
2915 struct device
*dev
= &adapter
->vdev
->dev
;
2916 union ibmvnic_crq newcrq
;
2918 adapter
->ras_comps
= dma_alloc_coherent(dev
, len
,
2919 &adapter
->ras_comps_tok
,
2921 if (!adapter
->ras_comps
) {
2922 if (!firmware_has_feature(FW_FEATURE_CMO
))
2923 dev_err(dev
, "Couldn't alloc fw comps buffer\n");
2927 adapter
->ras_comp_int
= kmalloc(adapter
->ras_comp_num
*
2928 sizeof(struct ibmvnic_fw_comp_internal
),
2930 if (!adapter
->ras_comp_int
)
2931 dma_free_coherent(dev
, len
, adapter
->ras_comps
,
2932 adapter
->ras_comps_tok
);
2934 memset(&newcrq
, 0, sizeof(newcrq
));
2935 newcrq
.request_ras_comps
.first
= IBMVNIC_CRQ_CMD
;
2936 newcrq
.request_ras_comps
.cmd
= REQUEST_RAS_COMPS
;
2937 newcrq
.request_ras_comps
.ioba
= cpu_to_be32(adapter
->ras_comps_tok
);
2938 newcrq
.request_ras_comps
.len
= cpu_to_be32(len
);
2939 ibmvnic_send_crq(adapter
, &newcrq
);
2942 static void ibmvnic_free_inflight(struct ibmvnic_adapter
*adapter
)
2944 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2945 struct device
*dev
= &adapter
->vdev
->dev
;
2946 struct ibmvnic_error_buff
*error_buff
;
2947 unsigned long flags
;
2948 unsigned long flags2
;
2950 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2951 list_for_each_entry(inflight_cmd
, &adapter
->inflight
, list
) {
2952 switch (inflight_cmd
->crq
.generic
.cmd
) {
2954 dma_unmap_single(dev
, adapter
->login_buf_token
,
2955 adapter
->login_buf_sz
,
2957 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
2958 adapter
->login_rsp_buf_sz
,
2960 kfree(adapter
->login_rsp_buf
);
2961 kfree(adapter
->login_buf
);
2964 complete(&adapter
->fw_done
);
2966 case REQUEST_ERROR_INFO
:
2967 spin_lock_irqsave(&adapter
->error_list_lock
, flags2
);
2968 list_for_each_entry(error_buff
, &adapter
->errors
,
2970 dma_unmap_single(dev
, error_buff
->dma
,
2973 kfree(error_buff
->buff
);
2974 list_del(&error_buff
->list
);
2977 spin_unlock_irqrestore(&adapter
->error_list_lock
,
2981 list_del(&inflight_cmd
->list
);
2982 kfree(inflight_cmd
);
2984 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2987 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
2988 struct ibmvnic_adapter
*adapter
)
2990 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
2991 struct net_device
*netdev
= adapter
->netdev
;
2992 struct device
*dev
= &adapter
->vdev
->dev
;
2995 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
2996 ((unsigned long int *)crq
)[0],
2997 ((unsigned long int *)crq
)[1]);
2998 switch (gen_crq
->first
) {
2999 case IBMVNIC_CRQ_INIT_RSP
:
3000 switch (gen_crq
->cmd
) {
3001 case IBMVNIC_CRQ_INIT
:
3002 dev_info(dev
, "Partner initialized\n");
3003 /* Send back a response */
3004 rc
= ibmvnic_send_crq_init_complete(adapter
);
3006 send_version_xchg(adapter
);
3008 dev_err(dev
, "Can't send initrsp rc=%ld\n", rc
);
3010 case IBMVNIC_CRQ_INIT_COMPLETE
:
3011 dev_info(dev
, "Partner initialization complete\n");
3012 send_version_xchg(adapter
);
3015 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
3018 case IBMVNIC_CRQ_XPORT_EVENT
:
3019 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
3020 dev_info(dev
, "Re-enabling adapter\n");
3021 adapter
->migrated
= true;
3022 ibmvnic_free_inflight(adapter
);
3023 release_sub_crqs(adapter
);
3024 rc
= ibmvnic_reenable_crq_queue(adapter
);
3026 dev_err(dev
, "Error after enable rc=%ld\n", rc
);
3027 adapter
->migrated
= false;
3028 rc
= ibmvnic_send_crq_init(adapter
);
3030 dev_err(dev
, "Error sending init rc=%ld\n", rc
);
3032 /* The adapter lost the connection */
3033 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
3035 ibmvnic_free_inflight(adapter
);
3036 release_sub_crqs(adapter
);
3039 case IBMVNIC_CRQ_CMD_RSP
:
3042 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
3047 switch (gen_crq
->cmd
) {
3048 case VERSION_EXCHANGE_RSP
:
3049 rc
= crq
->version_exchange_rsp
.rc
.code
;
3051 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
3054 dev_info(dev
, "Partner protocol version is %d\n",
3055 crq
->version_exchange_rsp
.version
);
3056 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
3059 be16_to_cpu(crq
->version_exchange_rsp
.version
);
3060 send_cap_queries(adapter
);
3062 case QUERY_CAPABILITY_RSP
:
3063 handle_query_cap_rsp(crq
, adapter
);
3066 handle_query_map_rsp(crq
, adapter
);
3068 case REQUEST_MAP_RSP
:
3069 handle_request_map_rsp(crq
, adapter
);
3071 case REQUEST_UNMAP_RSP
:
3072 handle_request_unmap_rsp(crq
, adapter
);
3074 case REQUEST_CAPABILITY_RSP
:
3075 handle_request_cap_rsp(crq
, adapter
);
3078 netdev_dbg(netdev
, "Got Login Response\n");
3079 handle_login_rsp(crq
, adapter
);
3081 case LOGICAL_LINK_STATE_RSP
:
3082 netdev_dbg(netdev
, "Got Logical Link State Response\n");
3083 adapter
->logical_link_state
=
3084 crq
->logical_link_state_rsp
.link_state
;
3086 case LINK_STATE_INDICATION
:
3087 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
3088 adapter
->phys_link_state
=
3089 crq
->link_state_indication
.phys_link_state
;
3090 adapter
->logical_link_state
=
3091 crq
->link_state_indication
.logical_link_state
;
3093 case CHANGE_MAC_ADDR_RSP
:
3094 netdev_dbg(netdev
, "Got MAC address change Response\n");
3095 handle_change_mac_rsp(crq
, adapter
);
3097 case ERROR_INDICATION
:
3098 netdev_dbg(netdev
, "Got Error Indication\n");
3099 handle_error_indication(crq
, adapter
);
3101 case REQUEST_ERROR_RSP
:
3102 netdev_dbg(netdev
, "Got Error Detail Response\n");
3103 handle_error_info_rsp(crq
, adapter
);
3105 case REQUEST_STATISTICS_RSP
:
3106 netdev_dbg(netdev
, "Got Statistics Response\n");
3107 complete(&adapter
->stats_done
);
3109 case REQUEST_DUMP_SIZE_RSP
:
3110 netdev_dbg(netdev
, "Got Request Dump Size Response\n");
3111 handle_dump_size_rsp(crq
, adapter
);
3113 case REQUEST_DUMP_RSP
:
3114 netdev_dbg(netdev
, "Got Request Dump Response\n");
3115 complete(&adapter
->fw_done
);
3117 case QUERY_IP_OFFLOAD_RSP
:
3118 netdev_dbg(netdev
, "Got Query IP offload Response\n");
3119 handle_query_ip_offload_rsp(adapter
);
3121 case MULTICAST_CTRL_RSP
:
3122 netdev_dbg(netdev
, "Got multicast control Response\n");
3124 case CONTROL_IP_OFFLOAD_RSP
:
3125 netdev_dbg(netdev
, "Got Control IP offload Response\n");
3126 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
3127 sizeof(adapter
->ip_offload_ctrl
),
3129 /* We're done with the queries, perform the login */
3130 send_login(adapter
);
3132 case REQUEST_RAS_COMP_NUM_RSP
:
3133 netdev_dbg(netdev
, "Got Request RAS Comp Num Response\n");
3134 if (crq
->request_ras_comp_num_rsp
.rc
.code
== 10) {
3135 netdev_dbg(netdev
, "Request RAS Comp Num not supported\n");
3138 adapter
->ras_comp_num
=
3139 be32_to_cpu(crq
->request_ras_comp_num_rsp
.num_components
);
3140 handle_request_ras_comp_num_rsp(crq
, adapter
);
3142 case REQUEST_RAS_COMPS_RSP
:
3143 netdev_dbg(netdev
, "Got Request RAS Comps Response\n");
3144 handle_request_ras_comps_rsp(crq
, adapter
);
3146 case CONTROL_RAS_RSP
:
3147 netdev_dbg(netdev
, "Got Control RAS Response\n");
3148 handle_control_ras_rsp(crq
, adapter
);
3150 case COLLECT_FW_TRACE_RSP
:
3151 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
3152 complete(&adapter
->fw_done
);
3155 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
3160 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
3162 struct ibmvnic_adapter
*adapter
= instance
;
3163 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3164 struct vio_dev
*vdev
= adapter
->vdev
;
3165 union ibmvnic_crq
*crq
;
3166 unsigned long flags
;
3169 spin_lock_irqsave(&queue
->lock
, flags
);
3170 vio_disable_interrupts(vdev
);
3172 /* Pull all the valid messages off the CRQ */
3173 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
3174 ibmvnic_handle_crq(crq
, adapter
);
3175 crq
->generic
.first
= 0;
3177 vio_enable_interrupts(vdev
);
3178 crq
= ibmvnic_next_crq(adapter
);
3180 vio_disable_interrupts(vdev
);
3181 ibmvnic_handle_crq(crq
, adapter
);
3182 crq
->generic
.first
= 0;
3187 spin_unlock_irqrestore(&queue
->lock
, flags
);
3191 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
3193 struct vio_dev
*vdev
= adapter
->vdev
;
3197 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
3198 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3201 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
3206 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
3208 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3209 struct device
*dev
= &adapter
->vdev
->dev
;
3210 struct vio_dev
*vdev
= adapter
->vdev
;
3215 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3216 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3218 /* Clean out the queue */
3219 memset(crq
->msgs
, 0, PAGE_SIZE
);
3222 /* And re-open it again */
3223 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3224 crq
->msg_token
, PAGE_SIZE
);
3227 /* Adapter is good, but other end is not ready */
3228 dev_warn(dev
, "Partner adapter not ready\n");
3230 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
3235 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter
*adapter
)
3237 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3238 struct vio_dev
*vdev
= adapter
->vdev
;
3241 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
3242 free_irq(vdev
->irq
, adapter
);
3244 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3245 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3247 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
3249 free_page((unsigned long)crq
->msgs
);
3252 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter
*adapter
)
3254 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3255 struct device
*dev
= &adapter
->vdev
->dev
;
3256 struct vio_dev
*vdev
= adapter
->vdev
;
3257 int rc
, retrc
= -ENOMEM
;
3259 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
3260 /* Should we allocate more than one page? */
3265 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
3266 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
3268 if (dma_mapping_error(dev
, crq
->msg_token
))
3271 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3272 crq
->msg_token
, PAGE_SIZE
);
3274 if (rc
== H_RESOURCE
)
3275 /* maybe kexecing and resource is busy. try a reset */
3276 rc
= ibmvnic_reset_crq(adapter
);
3279 if (rc
== H_CLOSED
) {
3280 dev_warn(dev
, "Partner adapter not ready\n");
3282 dev_warn(dev
, "Error %d opening adapter\n", rc
);
3283 goto reg_crq_failed
;
3288 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
3289 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
3292 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
3294 goto req_irq_failed
;
3297 rc
= vio_enable_interrupts(vdev
);
3299 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
3300 goto req_irq_failed
;
3304 spin_lock_init(&crq
->lock
);
3310 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3311 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3313 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3315 free_page((unsigned long)crq
->msgs
);
3319 /* debugfs for dump */
3320 static int ibmvnic_dump_show(struct seq_file
*seq
, void *v
)
3322 struct net_device
*netdev
= seq
->private;
3323 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3324 struct device
*dev
= &adapter
->vdev
->dev
;
3325 union ibmvnic_crq crq
;
3327 memset(&crq
, 0, sizeof(crq
));
3328 crq
.request_dump_size
.first
= IBMVNIC_CRQ_CMD
;
3329 crq
.request_dump_size
.cmd
= REQUEST_DUMP_SIZE
;
3330 ibmvnic_send_crq(adapter
, &crq
);
3332 init_completion(&adapter
->fw_done
);
3333 wait_for_completion(&adapter
->fw_done
);
3335 seq_write(seq
, adapter
->dump_data
, adapter
->dump_data_size
);
3337 dma_unmap_single(dev
, adapter
->dump_data_token
, adapter
->dump_data_size
,
3340 kfree(adapter
->dump_data
);
3345 static int ibmvnic_dump_open(struct inode
*inode
, struct file
*file
)
3347 return single_open(file
, ibmvnic_dump_show
, inode
->i_private
);
3350 static const struct file_operations ibmvnic_dump_ops
= {
3351 .owner
= THIS_MODULE
,
3352 .open
= ibmvnic_dump_open
,
3354 .llseek
= seq_lseek
,
3355 .release
= single_release
,
3358 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
3360 struct ibmvnic_adapter
*adapter
;
3361 struct net_device
*netdev
;
3362 unsigned char *mac_addr_p
;
3364 char buf
[16]; /* debugfs name buf */
3367 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
3370 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
3371 VETH_MAC_ADDR
, NULL
);
3374 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3375 __FILE__
, __LINE__
);
3379 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
3380 IBMVNIC_MAX_TX_QUEUES
);
3384 adapter
= netdev_priv(netdev
);
3385 dev_set_drvdata(&dev
->dev
, netdev
);
3386 adapter
->vdev
= dev
;
3387 adapter
->netdev
= netdev
;
3389 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
3390 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3391 netdev
->irq
= dev
->irq
;
3392 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
3393 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
3394 SET_NETDEV_DEV(netdev
, &dev
->dev
);
3396 spin_lock_init(&adapter
->stats_lock
);
3398 rc
= ibmvnic_init_crq_queue(adapter
);
3400 dev_err(&dev
->dev
, "Couldn't initialize crq. rc=%d\n", rc
);
3404 INIT_LIST_HEAD(&adapter
->errors
);
3405 INIT_LIST_HEAD(&adapter
->inflight
);
3406 spin_lock_init(&adapter
->error_list_lock
);
3407 spin_lock_init(&adapter
->inflight_lock
);
3409 adapter
->stats_token
= dma_map_single(&dev
->dev
, &adapter
->stats
,
3410 sizeof(struct ibmvnic_statistics
),
3412 if (dma_mapping_error(&dev
->dev
, adapter
->stats_token
)) {
3413 if (!firmware_has_feature(FW_FEATURE_CMO
))
3414 dev_err(&dev
->dev
, "Couldn't map stats buffer\n");
3418 snprintf(buf
, sizeof(buf
), "ibmvnic_%x", dev
->unit_address
);
3419 ent
= debugfs_create_dir(buf
, NULL
);
3420 if (!ent
|| IS_ERR(ent
)) {
3421 dev_info(&dev
->dev
, "debugfs create directory failed\n");
3422 adapter
->debugfs_dir
= NULL
;
3424 adapter
->debugfs_dir
= ent
;
3425 ent
= debugfs_create_file("dump", S_IRUGO
, adapter
->debugfs_dir
,
3426 netdev
, &ibmvnic_dump_ops
);
3427 if (!ent
|| IS_ERR(ent
)) {
3429 "debugfs create dump file failed\n");
3430 adapter
->debugfs_dump
= NULL
;
3432 adapter
->debugfs_dump
= ent
;
3435 ibmvnic_send_crq_init(adapter
);
3437 init_completion(&adapter
->init_done
);
3438 wait_for_completion(&adapter
->init_done
);
3440 /* needed to pull init_sub_crqs outside of an interrupt context
3441 * because it creates IRQ mappings for the subCRQ queues, causing
3444 init_sub_crqs(adapter
, 0);
3446 reinit_completion(&adapter
->init_done
);
3447 wait_for_completion(&adapter
->init_done
);
3449 /* if init_sub_crqs is partially successful, retry */
3450 while (!adapter
->tx_scrq
|| !adapter
->rx_scrq
) {
3451 init_sub_crqs(adapter
, 1);
3453 reinit_completion(&adapter
->init_done
);
3454 wait_for_completion(&adapter
->init_done
);
3457 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3459 rc
= register_netdev(netdev
);
3461 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
3464 dev_info(&dev
->dev
, "ibmvnic registered\n");
3469 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3470 debugfs_remove_recursive(adapter
->debugfs_dir
);
3472 ibmvnic_release_crq_queue(adapter
);
3474 free_netdev(netdev
);
3478 static int ibmvnic_remove(struct vio_dev
*dev
)
3480 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
3481 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3483 unregister_netdev(netdev
);
3485 release_sub_crqs(adapter
);
3487 ibmvnic_release_crq_queue(adapter
);
3489 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3490 debugfs_remove_recursive(adapter
->debugfs_dir
);
3492 if (adapter
->ras_comps
)
3493 dma_free_coherent(&dev
->dev
,
3494 adapter
->ras_comp_num
*
3495 sizeof(struct ibmvnic_fw_component
),
3496 adapter
->ras_comps
, adapter
->ras_comps_tok
);
3498 kfree(adapter
->ras_comp_int
);
3500 free_netdev(netdev
);
3501 dev_set_drvdata(&dev
->dev
, NULL
);
3506 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
3508 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
3509 struct ibmvnic_adapter
*adapter
;
3510 struct iommu_table
*tbl
;
3511 unsigned long ret
= 0;
3514 tbl
= get_iommu_table_base(&vdev
->dev
);
3516 /* netdev inits at probe time along with the structures we need below*/
3518 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
3520 adapter
= netdev_priv(netdev
);
3522 ret
+= PAGE_SIZE
; /* the crq message queue */
3523 ret
+= adapter
->bounce_buffer_size
;
3524 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
3526 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
3527 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
3529 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
3531 ret
+= adapter
->rx_pool
[i
].size
*
3532 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
3537 static int ibmvnic_resume(struct device
*dev
)
3539 struct net_device
*netdev
= dev_get_drvdata(dev
);
3540 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3543 /* kick the interrupt handlers just in case we lost an interrupt */
3544 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
3545 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
3546 adapter
->rx_scrq
[i
]);
3551 static struct vio_device_id ibmvnic_device_table
[] = {
3552 {"network", "IBM,vnic"},
3555 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
3557 static const struct dev_pm_ops ibmvnic_pm_ops
= {
3558 .resume
= ibmvnic_resume
3561 static struct vio_driver ibmvnic_driver
= {
3562 .id_table
= ibmvnic_device_table
,
3563 .probe
= ibmvnic_probe
,
3564 .remove
= ibmvnic_remove
,
3565 .get_desired_dma
= ibmvnic_get_desired_dma
,
3566 .name
= ibmvnic_driver_name
,
3567 .pm
= &ibmvnic_pm_ops
,
3570 /* module functions */
3571 static int __init
ibmvnic_module_init(void)
3573 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
3574 IBMVNIC_DRIVER_VERSION
);
3576 return vio_register_driver(&ibmvnic_driver
);
3579 static void __exit
ibmvnic_module_exit(void)
3581 vio_unregister_driver(&ibmvnic_driver
);
3584 module_init(ibmvnic_module_init
);
3585 module_exit(ibmvnic_module_exit
);