1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /**************************************************************************/
18 #define IBMVNIC_NAME "ibmvnic"
19 #define IBMVNIC_DRIVER_VERSION "1.0.1"
20 #define IBMVNIC_INVALID_MAP -1
21 #define IBMVNIC_OPEN_FAILED 3
23 /* basic structures plus 100 2k buffers */
24 #define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305
26 /* Initial module_parameters */
27 #define IBMVNIC_RX_WEIGHT 16
28 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
29 #define IBMVNIC_BUFFS_PER_POOL 100
30 #define IBMVNIC_MAX_QUEUES 16
31 #define IBMVNIC_MAX_QUEUE_SZ 4096
32 #define IBMVNIC_MAX_IND_DESCS 16
33 #define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
35 #define IBMVNIC_TSO_BUF_SZ 65536
36 #define IBMVNIC_TSO_BUFS 64
37 #define IBMVNIC_TSO_POOL_MASK 0x80000000
39 /* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
40 * has a set of buffers. The size of each buffer is determined by the MTU.
42 * Each Rx/Tx pool is also associated with a DMA region that is shared
43 * with the "hardware" (VIOS) and used to send/receive packets. The DMA
44 * region is also referred to as a Long Term Buffer or LTB.
46 * The size of the DMA region required for an Rx/Tx pool depends on the
47 * number and size (MTU) of the buffers in the pool. At the max levels
48 * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
51 * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the
52 * kernel (about 16MB currently). To support say 4K Jumbo frames, we
53 * use a set of LTBs (struct ltb_set) per pool.
55 * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel
56 * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
57 * (must be <= IBMVNIC_ONE_LTB_MAX)
58 * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
60 * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
61 * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
63 * The Rx and Tx pools can have upto 4096 buffers. The max size of these
64 * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
65 * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
67 * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
68 * the allocation of the LTB can fail when system is low in memory. If
69 * its too small, we would need several mappings for each of the Rx/
70 * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
73 * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
74 * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
75 * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
76 * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
78 #define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE))
79 #define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
80 #define IBMVNIC_LTB_SET_SIZE (38 << 20)
82 #define IBMVNIC_BUFFER_HLEN 500
83 #define IBMVNIC_RESET_DELAY 100
85 struct ibmvnic_login_buffer
{
88 #define INITIAL_VERSION_LB 1
89 __be32 num_txcomp_subcrqs
;
90 __be32 off_txcomp_subcrqs
;
91 __be32 num_rxcomp_subcrqs
;
92 __be32 off_rxcomp_subcrqs
;
93 __be32 login_rsp_ioba
;
95 __be32 client_data_offset
;
96 __be32 client_data_len
;
97 } __packed
__aligned(8);
99 struct ibmvnic_login_rsp_buffer
{
102 #define INITIAL_VERSION_LRB 1
103 __be32 num_txsubm_subcrqs
;
104 __be32 off_txsubm_subcrqs
;
105 __be32 num_rxadd_subcrqs
;
106 __be32 off_rxadd_subcrqs
;
107 __be32 off_rxadd_buff_size
;
108 __be32 num_supp_tx_desc
;
109 __be32 off_supp_tx_desc
;
110 } __packed
__aligned(8);
112 struct ibmvnic_query_ip_offload_buffer
{
115 #define INITIAL_VERSION_IOB 1
127 __be16 max_ipv4_header_size
;
128 __be16 max_ipv6_header_size
;
129 __be16 max_tcp_header_size
;
130 __be16 max_udp_header_size
;
131 __be32 max_large_tx_size
;
132 __be32 max_large_rx_size
;
134 u8 ipv6_extension_header
;
135 #define IPV6_EH_NOT_SUPPORTED 0x00
136 #define IPV6_EH_SUPPORTED_LIM 0x01
137 #define IPV6_EH_SUPPORTED 0xFF
138 u8 tcp_pseudosum_req
;
139 #define TCP_PS_NOT_REQUIRED 0x00
140 #define TCP_PS_REQUIRED 0x01
142 __be16 num_ipv6_ext_headers
;
143 __be32 off_ipv6_ext_headers
;
145 } __packed
__aligned(8);
147 struct ibmvnic_control_ip_offload_buffer
{
150 #define INITIAL_VERSION_IOB 1
163 } __packed
__aligned(8);
165 struct ibmvnic_fw_component
{
167 __be32 trace_buff_size
;
170 u8 parent_correlator
;
171 u8 error_check_level
;
175 } __packed
__aligned(8);
177 struct ibmvnic_fw_trace_entry
{
181 __be64 pmc_registers
;
183 __be64 trace_data
[5];
184 } __packed
__aligned(8);
186 struct ibmvnic_statistics
{
193 __be64 ucast_tx_packets
;
194 __be64 ucast_rx_packets
;
195 __be64 mcast_tx_packets
;
196 __be64 mcast_rx_packets
;
197 __be64 bcast_tx_packets
;
198 __be64 bcast_rx_packets
;
201 __be64 single_collision_frames
;
202 __be64 multi_collision_frames
;
203 __be64 sqe_test_errors
;
205 __be64 late_collisions
;
206 __be64 excess_collisions
;
207 __be64 internal_mac_tx_errors
;
208 __be64 carrier_sense
;
209 __be64 too_long_frames
;
210 __be64 internal_mac_rx_errors
;
212 } __packed
__aligned(8);
214 #define NUM_TX_STATS 3
215 struct ibmvnic_tx_queue_stats
{
222 #define NUM_RX_STATS 3
223 struct ibmvnic_rx_queue_stats
{
229 struct ibmvnic_acl_buffer
{
232 #define INITIAL_VERSION_IOB 1
233 u8 mac_acls_restrict
;
234 u8 vlan_acls_restrict
;
236 __be32 num_mac_addrs
;
237 __be32 offset_mac_addrs
;
239 __be32 offset_vlan_ids
;
241 } __packed
__aligned(8);
243 /* descriptors have been changed, how should this be defined? 1? 4? */
245 #define IBMVNIC_TX_DESC_VERSIONS 3
247 /* is this still needed? */
248 struct ibmvnic_tx_comp_desc
{
252 __be32 correlators
[5];
253 } __packed
__aligned(8);
255 /* some flags that included in v0 descriptor, which is gone
256 * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM
257 * and only in some offload_flags variable that doesn't seem
258 * to be used anywhere, can probably be removed?
261 #define IBMVNIC_TCP_CHKSUM 0x20
262 #define IBMVNIC_UDP_CHKSUM 0x08
264 struct ibmvnic_tx_desc
{
268 #define IBMVNIC_TX_DESC 0x10
272 #define IBMVNIC_TX_COMP_NEEDED 0x80
273 #define IBMVNIC_TX_CHKSUM_OFFLOAD 0x40
274 #define IBMVNIC_TX_LSO 0x20
275 #define IBMVNIC_TX_PROT_TCP 0x10
276 #define IBMVNIC_TX_PROT_UDP 0x08
277 #define IBMVNIC_TX_PROT_IPV4 0x04
278 #define IBMVNIC_TX_PROT_IPV6 0x02
279 #define IBMVNIC_TX_VLAN_PRESENT 0x01
281 #define IBMVNIC_TX_VLAN_INSERT 0x80
289 } __packed
__aligned(8);
291 struct ibmvnic_hdr_desc
{
294 #define IBMVNIC_HDR_DESC 0x11
301 } __packed
__aligned(8);
303 struct ibmvnic_hdr_ext_desc
{
306 #define IBMVNIC_HDR_EXT_DESC 0x12
309 } __packed
__aligned(8);
311 struct ibmvnic_sge_desc
{
314 #define IBMVNIC_SGE_DESC 0x30
322 } __packed
__aligned(8);
324 struct ibmvnic_rx_comp_desc
{
327 #define IBMVNIC_IP_CHKSUM_GOOD 0x80
328 #define IBMVNIC_TCP_UDP_CHKSUM_GOOD 0x40
329 #define IBMVNIC_END_FRAME 0x20
330 #define IBMVNIC_EXACT_MC 0x10
331 #define IBMVNIC_VLAN_STRIPPED 0x08
332 __be16 off_frame_data
;
338 } __packed
__aligned(8);
340 struct ibmvnic_generic_scrq
{
343 } __packed
__aligned(8);
345 struct ibmvnic_rx_buff_add_desc
{
353 } __packed
__aligned(8);
356 u8 code
; /* one of enum ibmvnic_rc_codes */
358 } __packed
__aligned(4);
360 struct ibmvnic_generic_crq
{
364 struct ibmvnic_rc rc
;
365 } __packed
__aligned(8);
367 struct ibmvnic_version_exchange
{
371 #define IBMVNIC_INITIAL_VERSION 1
373 struct ibmvnic_rc rc
;
374 } __packed
__aligned(8);
376 struct ibmvnic_capability
{
379 __be16 capability
; /* one of ibmvnic_capabilities */
381 struct ibmvnic_rc rc
;
382 } __packed
__aligned(8);
384 struct ibmvnic_login
{
390 } __packed
__aligned(8);
392 struct ibmvnic_phys_parms
{
396 #define IBMVNIC_EXTERNAL_LOOPBACK 0x80
397 #define IBMVNIC_INTERNAL_LOOPBACK 0x40
398 #define IBMVNIC_PROMISC 0x20
399 #define IBMVNIC_PHYS_LINK_ACTIVE 0x10
400 #define IBMVNIC_AUTONEG_DUPLEX 0x08
401 #define IBMVNIC_FULL_DUPLEX 0x04
402 #define IBMVNIC_HALF_DUPLEX 0x02
403 #define IBMVNIC_CAN_CHG_PHYS_PARMS 0x01
405 #define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
407 #define IBMVNIC_AUTONEG 0x80000000
408 #define IBMVNIC_10MBPS 0x40000000
409 #define IBMVNIC_100MBPS 0x20000000
410 #define IBMVNIC_1GBPS 0x10000000
411 #define IBMVNIC_10GBPS 0x08000000
412 #define IBMVNIC_40GBPS 0x04000000
413 #define IBMVNIC_100GBPS 0x02000000
414 #define IBMVNIC_25GBPS 0x01000000
415 #define IBMVNIC_50GBPS 0x00800000
416 #define IBMVNIC_200GBPS 0x00400000
418 struct ibmvnic_rc rc
;
419 } __packed
__aligned(8);
421 struct ibmvnic_logical_link_state
{
425 #define IBMVNIC_LOGICAL_LNK_DN 0x00
426 #define IBMVNIC_LOGICAL_LNK_UP 0x01
427 #define IBMVNIC_LOGICAL_LNK_QUERY 0xff
429 struct ibmvnic_rc rc
;
430 } __packed
__aligned(8);
432 struct ibmvnic_query_ip_offload
{
438 struct ibmvnic_rc rc
;
439 } __packed
__aligned(8);
441 struct ibmvnic_control_ip_offload
{
447 struct ibmvnic_rc rc
;
448 } __packed
__aligned(8);
450 struct ibmvnic_request_statistics
{
454 #define IBMVNIC_PHYSICAL_PORT 0x80
459 } __packed
__aligned(8);
461 struct ibmvnic_error_indication
{
465 #define IBMVNIC_FATAL_ERROR 0x80
468 __be32 detail_error_sz
;
471 } __packed
__aligned(8);
473 struct ibmvnic_link_state_indication
{
478 u8 logical_link_state
;
480 } __packed
__aligned(8);
482 struct ibmvnic_change_mac_addr
{
487 struct ibmvnic_rc rc
;
488 } __packed
__aligned(8);
490 struct ibmvnic_multicast_ctrl
{
495 #define IBMVNIC_ENABLE_MC 0x80
496 #define IBMVNIC_DISABLE_MC 0x40
497 #define IBMVNIC_ENABLE_ALL 0x20
498 #define IBMVNIC_DISABLE_ALL 0x10
500 __be16 reserved2
; /* was num_enabled_mc_addr; */
501 struct ibmvnic_rc rc
;
502 } __packed
__aligned(8);
504 struct ibmvnic_get_vpd_size
{
508 } __packed
__aligned(8);
510 struct ibmvnic_get_vpd_size_rsp
{
515 struct ibmvnic_rc rc
;
516 } __packed
__aligned(8);
518 struct ibmvnic_get_vpd
{
525 } __packed
__aligned(8);
527 struct ibmvnic_get_vpd_rsp
{
531 struct ibmvnic_rc rc
;
532 } __packed
__aligned(8);
534 struct ibmvnic_acl_change_indication
{
538 #define IBMVNIC_MAC_ACL 0
539 #define IBMVNIC_VLAN_ACL 1
541 } __packed
__aligned(8);
543 struct ibmvnic_acl_query
{
550 } __packed
__aligned(8);
552 struct ibmvnic_tune
{
559 } __packed
__aligned(8);
561 struct ibmvnic_request_map
{
569 } __packed
__aligned(8);
571 struct ibmvnic_request_map_rsp
{
577 struct ibmvnic_rc rc
;
578 } __packed
__aligned(8);
580 struct ibmvnic_request_unmap
{
586 } __packed
__aligned(8);
588 struct ibmvnic_request_unmap_rsp
{
594 struct ibmvnic_rc rc
;
595 } __packed
__aligned(8);
597 struct ibmvnic_query_map
{
601 } __packed
__aligned(8);
603 struct ibmvnic_query_map_rsp
{
610 struct ibmvnic_rc rc
;
611 } __packed
__aligned(8);
614 struct ibmvnic_generic_crq generic
;
615 struct ibmvnic_version_exchange version_exchange
;
616 struct ibmvnic_version_exchange version_exchange_rsp
;
617 struct ibmvnic_capability query_capability
;
618 struct ibmvnic_capability query_capability_rsp
;
619 struct ibmvnic_capability request_capability
;
620 struct ibmvnic_capability request_capability_rsp
;
621 struct ibmvnic_login login
;
622 struct ibmvnic_generic_crq login_rsp
;
623 struct ibmvnic_phys_parms query_phys_parms
;
624 struct ibmvnic_phys_parms query_phys_parms_rsp
;
625 struct ibmvnic_phys_parms query_phys_capabilities
;
626 struct ibmvnic_phys_parms query_phys_capabilities_rsp
;
627 struct ibmvnic_phys_parms set_phys_parms
;
628 struct ibmvnic_phys_parms set_phys_parms_rsp
;
629 struct ibmvnic_logical_link_state logical_link_state
;
630 struct ibmvnic_logical_link_state logical_link_state_rsp
;
631 struct ibmvnic_query_ip_offload query_ip_offload
;
632 struct ibmvnic_query_ip_offload query_ip_offload_rsp
;
633 struct ibmvnic_control_ip_offload control_ip_offload
;
634 struct ibmvnic_control_ip_offload control_ip_offload_rsp
;
635 struct ibmvnic_request_statistics request_statistics
;
636 struct ibmvnic_generic_crq request_statistics_rsp
;
637 struct ibmvnic_error_indication error_indication
;
638 struct ibmvnic_link_state_indication link_state_indication
;
639 struct ibmvnic_change_mac_addr change_mac_addr
;
640 struct ibmvnic_change_mac_addr change_mac_addr_rsp
;
641 struct ibmvnic_multicast_ctrl multicast_ctrl
;
642 struct ibmvnic_multicast_ctrl multicast_ctrl_rsp
;
643 struct ibmvnic_get_vpd_size get_vpd_size
;
644 struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp
;
645 struct ibmvnic_get_vpd get_vpd
;
646 struct ibmvnic_get_vpd_rsp get_vpd_rsp
;
647 struct ibmvnic_acl_change_indication acl_change_indication
;
648 struct ibmvnic_acl_query acl_query
;
649 struct ibmvnic_generic_crq acl_query_rsp
;
650 struct ibmvnic_tune tune
;
651 struct ibmvnic_generic_crq tune_rsp
;
652 struct ibmvnic_request_map request_map
;
653 struct ibmvnic_request_map_rsp request_map_rsp
;
654 struct ibmvnic_request_unmap request_unmap
;
655 struct ibmvnic_request_unmap_rsp request_unmap_rsp
;
656 struct ibmvnic_query_map query_map
;
657 struct ibmvnic_query_map_rsp query_map_rsp
;
660 enum ibmvnic_rc_codes
{
671 UNSUPPORTEDOPTION
= 10,
674 enum ibmvnic_capabilities
{
677 MIN_RX_ADD_QUEUES
= 3,
680 MAX_RX_ADD_QUEUES
= 6,
683 REQ_RX_ADD_QUEUES
= 9,
684 MIN_TX_ENTRIES_PER_SUBCRQ
= 10,
685 MIN_RX_ADD_ENTRIES_PER_SUBCRQ
= 11,
686 MAX_TX_ENTRIES_PER_SUBCRQ
= 12,
687 MAX_RX_ADD_ENTRIES_PER_SUBCRQ
= 13,
688 REQ_TX_ENTRIES_PER_SUBCRQ
= 14,
689 REQ_RX_ADD_ENTRIES_PER_SUBCRQ
= 15,
691 PROMISC_REQUESTED
= 17,
692 PROMISC_SUPPORTED
= 18,
696 MAX_MULTICAST_FILTERS
= 22,
697 VLAN_HEADER_INSERTION
= 23,
698 RX_VLAN_HEADER_INSERTION
= 24,
699 MAX_TX_SG_ENTRIES
= 25,
700 RX_SG_SUPPORTED
= 26,
701 RX_SG_REQUESTED
= 27,
702 OPT_TX_COMP_SUB_QUEUES
= 28,
703 OPT_RX_COMP_QUEUES
= 29,
704 OPT_RX_BUFADD_Q_PER_RX_COMP_Q
= 30,
705 OPT_TX_ENTRIES_PER_SUBCRQ
= 31,
706 OPT_RXBA_ENTRIES_PER_SUBCRQ
= 32,
710 enum ibmvnic_error_cause
{
720 enum ibmvnic_commands
{
721 VERSION_EXCHANGE
= 0x01,
722 VERSION_EXCHANGE_RSP
= 0x81,
723 QUERY_CAPABILITY
= 0x02,
724 QUERY_CAPABILITY_RSP
= 0x82,
725 REQUEST_CAPABILITY
= 0x03,
726 REQUEST_CAPABILITY_RSP
= 0x83,
729 QUERY_PHYS_PARMS
= 0x05,
730 QUERY_PHYS_PARMS_RSP
= 0x85,
731 QUERY_PHYS_CAPABILITIES
= 0x06,
732 QUERY_PHYS_CAPABILITIES_RSP
= 0x86,
733 SET_PHYS_PARMS
= 0x07,
734 SET_PHYS_PARMS_RSP
= 0x87,
735 ERROR_INDICATION
= 0x08,
736 LOGICAL_LINK_STATE
= 0x0C,
737 LOGICAL_LINK_STATE_RSP
= 0x8C,
738 REQUEST_STATISTICS
= 0x0D,
739 REQUEST_STATISTICS_RSP
= 0x8D,
740 COLLECT_FW_TRACE
= 0x11,
741 COLLECT_FW_TRACE_RSP
= 0x91,
742 LINK_STATE_INDICATION
= 0x12,
743 CHANGE_MAC_ADDR
= 0x13,
744 CHANGE_MAC_ADDR_RSP
= 0x93,
745 MULTICAST_CTRL
= 0x14,
746 MULTICAST_CTRL_RSP
= 0x94,
748 GET_VPD_SIZE_RSP
= 0x95,
753 QUERY_IP_OFFLOAD
= 0x18,
754 QUERY_IP_OFFLOAD_RSP
= 0x98,
755 CONTROL_IP_OFFLOAD
= 0x19,
756 CONTROL_IP_OFFLOAD_RSP
= 0x99,
757 ACL_CHANGE_INDICATION
= 0x1A,
759 ACL_QUERY_RSP
= 0x9B,
761 QUERY_MAP_RSP
= 0x9D,
763 REQUEST_MAP_RSP
= 0x9E,
764 REQUEST_UNMAP
= 0x1F,
765 REQUEST_UNMAP_RSP
= 0x9F,
767 VLAN_CTRL_RSP
= 0xA0,
770 enum ibmvnic_crq_type
{
771 IBMVNIC_CRQ_CMD
= 0x80,
772 IBMVNIC_CRQ_CMD_RSP
= 0x80,
773 IBMVNIC_CRQ_INIT_CMD
= 0xC0,
774 IBMVNIC_CRQ_INIT_RSP
= 0xC0,
775 IBMVNIC_CRQ_XPORT_EVENT
= 0xFF,
778 enum ibmvfc_crq_format
{
779 IBMVNIC_CRQ_INIT
= 0x01,
780 IBMVNIC_CRQ_INIT_COMPLETE
= 0x02,
781 IBMVNIC_PARTITION_MIGRATED
= 0x06,
782 IBMVNIC_DEVICE_FAILOVER
= 0x08,
785 struct ibmvnic_crq_queue
{
786 union ibmvnic_crq
*msgs
;
788 dma_addr_t msg_token
;
789 /* Used for serialization of msgs, cur */
796 struct ibmvnic_generic_scrq generic
;
797 struct ibmvnic_tx_comp_desc tx_comp
;
798 struct ibmvnic_tx_desc v1
;
799 struct ibmvnic_hdr_desc hdr
;
800 struct ibmvnic_hdr_ext_desc hdr_ext
;
801 struct ibmvnic_sge_desc sge
;
802 struct ibmvnic_rx_comp_desc rx_comp
;
803 struct ibmvnic_rx_buff_add_desc rx_add
;
806 struct ibmvnic_ind_xmit_queue
{
807 union sub_crq
*indir_arr
;
808 dma_addr_t indir_dma
;
812 struct ibmvnic_sub_crq_queue
{
815 dma_addr_t msg_token
;
816 unsigned long crq_num
;
817 unsigned long hw_irq
;
819 unsigned int pool_index
;
821 /* Used for serialization of msgs, cur */
823 struct sk_buff
*rx_skb_top
;
824 struct ibmvnic_adapter
*adapter
;
825 struct ibmvnic_ind_xmit_queue ind_buf
;
829 cpumask_var_t affinity_mask
;
830 } ____cacheline_aligned
;
832 struct ibmvnic_long_term_buff
{
839 struct ibmvnic_ltb_set
{
841 struct ibmvnic_long_term_buff
*ltbs
;
844 struct ibmvnic_tx_buff
{
851 struct ibmvnic_tx_pool
{
852 struct ibmvnic_tx_buff
*tx_buff
;
856 struct ibmvnic_ltb_set ltb_set
;
859 } ____cacheline_aligned
;
861 struct ibmvnic_rx_buff
{
869 struct ibmvnic_rx_pool
{
870 struct ibmvnic_rx_buff
*rx_buff
;
871 int size
; /* # of buffers in the pool */
879 struct ibmvnic_ltb_set ltb_set
;
880 } ____cacheline_aligned
;
888 enum vnic_state
{VNIC_PROBING
= 1,
898 enum ibmvnic_reset_reason
{VNIC_RESET_FAILOVER
= 1,
901 VNIC_RESET_NON_FATAL
,
903 VNIC_RESET_CHANGE_PARAM
,
904 VNIC_RESET_PASSIVE_INIT
};
907 enum ibmvnic_reset_reason reset_reason
;
908 struct list_head list
;
911 struct ibmvnic_tunables
{
919 struct ibmvnic_adapter
{
920 struct vio_dev
*vdev
;
921 struct net_device
*netdev
;
922 struct ibmvnic_crq_queue crq
;
923 u8 mac_addr
[ETH_ALEN
];
924 struct ibmvnic_query_ip_offload_buffer ip_offload_buf
;
925 dma_addr_t ip_offload_tok
;
926 struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl
;
927 dma_addr_t ip_offload_ctrl_tok
;
930 /* Vital Product Data (VPD) */
931 struct ibmvnic_vpd
*vpd
;
935 struct ibmvnic_statistics stats
;
936 dma_addr_t stats_token
;
937 struct completion stats_done
;
938 int replenish_no_mem
;
939 int replenish_add_buff_success
;
940 int replenish_add_buff_failure
;
941 int replenish_task_cycles
;
945 struct ibmvnic_tx_queue_stats
*tx_stats_buffers
;
946 struct ibmvnic_rx_queue_stats
*rx_stats_buffers
;
949 int logical_link_state
;
955 struct ibmvnic_login_buffer
*login_buf
;
956 dma_addr_t login_buf_token
;
959 struct ibmvnic_login_rsp_buffer
*login_rsp_buf
;
960 dma_addr_t login_rsp_buf_token
;
961 int login_rsp_buf_sz
;
963 atomic_t running_cap_crqs
;
965 struct ibmvnic_sub_crq_queue
**tx_scrq ____cacheline_aligned
;
966 struct ibmvnic_sub_crq_queue
**rx_scrq ____cacheline_aligned
;
969 struct napi_struct
*napi
;
970 struct ibmvnic_rx_pool
*rx_pool
;
973 struct ibmvnic_tx_pool
*tx_pool
;
974 struct ibmvnic_tx_pool
*tso_pool
;
975 struct completion probe_done
;
976 struct completion init_done
;
979 struct completion fw_done
;
980 /* Used for serialization of device commands */
981 struct mutex fw_lock
;
984 struct completion reset_done
;
988 /* CPU hotplug instances for online & dead */
989 struct hlist_node node
;
990 struct hlist_node node_dead
;
992 /* partner capabilities */
995 u64 min_rx_add_queues
;
998 u64 max_rx_add_queues
;
1001 u64 req_rx_add_queues
;
1002 u64 min_tx_entries_per_subcrq
;
1003 u64 min_rx_add_entries_per_subcrq
;
1004 u64 max_tx_entries_per_subcrq
;
1005 u64 max_rx_add_entries_per_subcrq
;
1006 u64 req_tx_entries_per_subcrq
;
1007 u64 req_rx_add_entries_per_subcrq
;
1009 u64 promisc_requested
;
1010 u64 promisc_supported
;
1015 u64 max_multicast_filters
;
1016 u64 vlan_header_insertion
;
1017 u64 rx_vlan_header_insertion
;
1018 u64 max_tx_sg_entries
;
1019 u64 rx_sg_supported
;
1020 u64 rx_sg_requested
;
1021 u64 opt_tx_comp_sub_queues
;
1022 u64 opt_rx_comp_queues
;
1023 u64 opt_rx_bufadd_q_per_rx_comp_q
;
1024 u64 opt_tx_entries_per_subcrq
;
1025 u64 opt_rxba_entries_per_subcrq
;
1026 __be64 tx_rx_desc_req
;
1027 #define MAX_MAP_ID 255
1028 DECLARE_BITMAP(map_ids
, MAX_MAP_ID
);
1029 u32 num_active_rx_scrqs
;
1030 u32 num_active_rx_pools
;
1031 u32 num_active_rx_napi
;
1032 u32 num_active_tx_scrqs
;
1033 u32 num_active_tx_pools
;
1035 u32 prev_rx_pool_size
;
1036 u32 prev_tx_pool_size
;
1040 struct tasklet_struct tasklet
;
1041 enum vnic_state state
;
1042 /* Used for serialization of state field. When taking both state
1043 * and rwi locks, take state lock first.
1045 spinlock_t state_lock
;
1046 enum ibmvnic_reset_reason reset_reason
;
1047 struct list_head rwi_list
;
1048 /* Used for serialization of rwi_list. When taking both state
1049 * and rwi locks, take state lock first
1051 spinlock_t rwi_lock
;
1052 struct work_struct ibmvnic_reset
;
1053 struct delayed_work ibmvnic_delayed_reset
;
1054 unsigned long resetting
;
1055 /* last device reset time */
1056 unsigned long last_reset_time
;
1059 bool from_passive_init
;
1061 /* protected by rcu */
1062 bool tx_queues_active
;
1063 bool failover_pending
;
1064 bool force_reset_recovery
;
1066 struct ibmvnic_tunables desired
;
1067 struct ibmvnic_tunables fallback
;