2 * vxge-config.h: gPXE driver for Neterion Inc's X3100 Series 10GbE
3 * PCIe I/O Virtualized Server Adapter.
5 * Copyright(c) 2002-2010 Neterion Inc.
7 * This software may be used and distributed according to the terms of
8 * the GNU General Public License (GPL), incorporated herein by
9 * reference. Drivers based on or derived from this code fall under
10 * the GPL and must retain the authorship, copyright and license
15 FILE_LICENCE(GPL2_ONLY
);
21 #include <gpxe/list.h>
24 #ifndef VXGE_CACHE_LINE_SIZE
25 #define VXGE_CACHE_LINE_SIZE 4096
31 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
34 #define VXGE_HW_MAC_MAX_WIRE_PORTS 2
35 #define VXGE_HW_MAC_MAX_AGGR_PORTS 2
36 #define VXGE_HW_MAC_MAX_PORTS 3
38 #define VXGE_HW_MIN_MTU 68
39 #define VXGE_HW_MAX_MTU 9600
40 #define VXGE_HW_DEFAULT_MTU 1500
46 #ifndef ____cacheline_aligned
47 #define ____cacheline_aligned
51 * debug filtering masks
53 #define VXGE_NONE 0x00
54 #define VXGE_INFO 0x01
55 #define VXGE_INTR 0x02
56 #define VXGE_XMIT 0x04
57 #define VXGE_POLL 0x08
59 #define VXGE_TRACE 0x20
60 #define VXGE_ALL (VXGE_INFO|VXGE_INTR|VXGE_XMIT\
61 |VXGE_POLL|VXGE_ERR|VXGE_TRACE)
63 #define NULL_VPID 0xFFFFFFFF
65 #define VXGE_HW_EVENT_BASE 0
66 #define VXGE_LL_EVENT_BASE 100
68 #define VXGE_HW_BASE_INF 100
69 #define VXGE_HW_BASE_ERR 200
70 #define VXGE_HW_BASE_BADCFG 300
71 #define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
72 #define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
78 VXGE_HW_COMPLETIONS_REMAIN
= 3,
80 VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
= VXGE_HW_BASE_INF
+ 1,
81 VXGE_HW_INF_OUT_OF_DESCRIPTORS
= VXGE_HW_BASE_INF
+ 2,
82 VXGE_HW_INF_SW_LRO_BEGIN
= VXGE_HW_BASE_INF
+ 3,
83 VXGE_HW_INF_SW_LRO_CONT
= VXGE_HW_BASE_INF
+ 4,
84 VXGE_HW_INF_SW_LRO_UNCAPABLE
= VXGE_HW_BASE_INF
+ 5,
85 VXGE_HW_INF_SW_LRO_FLUSH_SESSION
= VXGE_HW_BASE_INF
+ 6,
86 VXGE_HW_INF_SW_LRO_FLUSH_BOTH
= VXGE_HW_BASE_INF
+ 7,
88 VXGE_HW_ERR_INVALID_HANDLE
= VXGE_HW_BASE_ERR
+ 1,
89 VXGE_HW_ERR_OUT_OF_MEMORY
= VXGE_HW_BASE_ERR
+ 2,
90 VXGE_HW_ERR_VPATH_NOT_AVAILABLE
= VXGE_HW_BASE_ERR
+ 3,
91 VXGE_HW_ERR_VPATH_NOT_OPEN
= VXGE_HW_BASE_ERR
+ 4,
92 VXGE_HW_ERR_WRONG_IRQ
= VXGE_HW_BASE_ERR
+ 5,
93 VXGE_HW_ERR_SWAPPER_CTRL
= VXGE_HW_BASE_ERR
+ 6,
94 VXGE_HW_ERR_INVALID_MTU_SIZE
= VXGE_HW_BASE_ERR
+ 7,
95 VXGE_HW_ERR_INVALID_INDEX
= VXGE_HW_BASE_ERR
+ 8,
96 VXGE_HW_ERR_INVALID_TYPE
= VXGE_HW_BASE_ERR
+ 9,
97 VXGE_HW_ERR_INVALID_OFFSET
= VXGE_HW_BASE_ERR
+ 10,
98 VXGE_HW_ERR_INVALID_DEVICE
= VXGE_HW_BASE_ERR
+ 11,
99 VXGE_HW_ERR_VERSION_CONFLICT
= VXGE_HW_BASE_ERR
+ 12,
100 VXGE_HW_ERR_INVALID_PCI_INFO
= VXGE_HW_BASE_ERR
+ 13,
101 VXGE_HW_ERR_INVALID_TCODE
= VXGE_HW_BASE_ERR
+ 14,
102 VXGE_HW_ERR_INVALID_BLOCK_SIZE
= VXGE_HW_BASE_ERR
+ 15,
103 VXGE_HW_ERR_INVALID_STATE
= VXGE_HW_BASE_ERR
+ 16,
104 VXGE_HW_ERR_PRIVILAGED_OPEARATION
= VXGE_HW_BASE_ERR
+ 17,
105 VXGE_HW_ERR_INVALID_PORT
= VXGE_HW_BASE_ERR
+ 18,
106 VXGE_HW_ERR_FIFO
= VXGE_HW_BASE_ERR
+ 19,
107 VXGE_HW_ERR_VPATH
= VXGE_HW_BASE_ERR
+ 20,
108 VXGE_HW_ERR_CRITICAL
= VXGE_HW_BASE_ERR
+ 21,
109 VXGE_HW_ERR_SLOT_FREEZE
= VXGE_HW_BASE_ERR
+ 22,
110 VXGE_HW_ERR_INVALID_MIN_BANDWIDTH
= VXGE_HW_BASE_ERR
+ 25,
111 VXGE_HW_ERR_INVALID_MAX_BANDWIDTH
= VXGE_HW_BASE_ERR
+ 26,
112 VXGE_HW_ERR_INVALID_TOTAL_BANDWIDTH
= VXGE_HW_BASE_ERR
+ 27,
113 VXGE_HW_ERR_INVALID_BANDWIDTH_LIMIT
= VXGE_HW_BASE_ERR
+ 28,
114 VXGE_HW_ERR_RESET_IN_PROGRESS
= VXGE_HW_BASE_ERR
+ 29,
115 VXGE_HW_ERR_OUT_OF_SPACE
= VXGE_HW_BASE_ERR
+ 30,
116 VXGE_HW_ERR_INVALID_FUNC_MODE
= VXGE_HW_BASE_ERR
+ 31,
117 VXGE_HW_ERR_INVALID_DP_MODE
= VXGE_HW_BASE_ERR
+ 32,
118 VXGE_HW_ERR_INVALID_FAILURE_BEHAVIOUR
= VXGE_HW_BASE_ERR
+ 33,
119 VXGE_HW_ERR_INVALID_L2_SWITCH_STATE
= VXGE_HW_BASE_ERR
+ 34,
120 VXGE_HW_ERR_INVALID_CATCH_BASIN_MODE
= VXGE_HW_BASE_ERR
+ 35,
122 VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS
= VXGE_HW_BASE_BADCFG
+ 1,
123 VXGE_HW_BADCFG_FIFO_BLOCKS
= VXGE_HW_BASE_BADCFG
+ 2,
124 VXGE_HW_BADCFG_VPATH_MTU
= VXGE_HW_BASE_BADCFG
+ 3,
125 VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG
= VXGE_HW_BASE_BADCFG
+ 4,
126 VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH
= VXGE_HW_BASE_BADCFG
+ 5,
127 VXGE_HW_BADCFG_VPATH_BANDWIDTH_LIMIT
= VXGE_HW_BASE_BADCFG
+ 6,
128 VXGE_HW_BADCFG_INTR_MODE
= VXGE_HW_BASE_BADCFG
+ 7,
129 VXGE_HW_BADCFG_RTS_MAC_EN
= VXGE_HW_BASE_BADCFG
+ 8,
130 VXGE_HW_BADCFG_VPATH_AGGR_ACK
= VXGE_HW_BASE_BADCFG
+ 9,
131 VXGE_HW_BADCFG_VPATH_PRIORITY
= VXGE_HW_BASE_BADCFG
+ 10,
133 VXGE_HW_EOF_TRACE_BUF
= -1
137 * enum enum vxge_hw_device_link_state - Link state enumeration.
138 * @VXGE_HW_LINK_NONE: Invalid link state.
139 * @VXGE_HW_LINK_DOWN: Link is down.
140 * @VXGE_HW_LINK_UP: Link is up.
143 enum vxge_hw_device_link_state
{
149 /*forward declaration*/
151 struct __vxge_hw_virtualpath
;
154 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
156 * One buffer mode RxD for ring structure
158 struct vxge_hw_ring_rxd_1
{
161 #define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
163 #define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
165 #define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
167 #define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
169 #define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
171 #define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
172 #define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
174 #define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
176 #define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
178 #define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
180 #define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
182 #define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
184 #define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
186 #define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
188 #define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
190 #define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
192 #define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
194 #define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
198 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
199 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
200 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
202 #define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
204 #define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
210 * struct vxge_hw_fifo_txd - Transmit Descriptor
212 * Transmit descriptor (TxD).Fifo descriptor contains configured number
213 * (list) of TxDs. * For more details please refer to Titan User Guide,
214 * Section 5.4.2 "Transmit Descriptor (TxD) Format".
216 struct vxge_hw_fifo_txd
{
218 #define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
220 #define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
221 #define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
222 #define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
224 #define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
225 #define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
226 #define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
228 #define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
229 #define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
230 #define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
233 #define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
234 #define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
235 #define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
236 #define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
238 #define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
239 #define VXGE_HW_FIFO_TXD_NO_BW_LIMIT vxge_mBIT(43)
241 #define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
243 #define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
244 #define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
252 * struct vxge_hw_device_date - Date Format
256 * @date: Date in string format
258 * Structure for returning date
261 #define VXGE_HW_FW_STRLEN 32
262 struct vxge_hw_device_date
{
266 char date
[VXGE_HW_FW_STRLEN
];
269 struct vxge_hw_device_version
{
273 char version
[VXGE_HW_FW_STRLEN
];
276 u64
__vxge_hw_vpath_pci_func_mode_get(
278 struct vxge_hw_vpath_reg __iomem
*vpath_reg
);
281 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
282 * @control_0: Bits 0 to 7 - Doorbell type.
283 * Bits 8 to 31 - Reserved.
284 * Bits 32 to 39 - The highest TxD in this TxDL.
285 * Bits 40 to 47 - Reserved.
286 * Bits 48 to 55 - Reserved.
287 * Bits 56 to 63 - No snoop flags.
288 * @txdl_ptr: The starting location of the TxDL in host memory.
290 * Created by the host and written to the adapter via PIO to a Kernel Doorbell
291 * FIFO. All non-offload doorbell wrapper fields must be written by the host as
292 * part of a doorbell write. Consumed by the adapter but is not written by the
295 struct __vxge_hw_non_offload_db_wrapper
{
297 #define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
298 #define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
299 #define VXGE_HW_NODBW_TYPE_NODBW 0
301 #define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
302 #define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
304 #define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
305 #define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
306 #define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
307 #define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
313 * struct __vxge_hw_fifo - Fifo.
314 * @vp_id: Virtual path id
315 * @tx_intr_num: Interrupt Number associated with the TX
316 * @txdl: Start pointer of the txdl list of this fifo.
317 * gPxe does not support tx fragmentation, so we need
318 * only one txd in a list
319 * @depth: total number of lists in this fifo
320 * @hw_offset: txd index from where adapter owns the txd list
321 * @sw_offset: txd index from where driver owns the txd list
323 * @stats: Statistics of this fifo
326 struct __vxge_hw_fifo
{
327 struct vxge_hw_vpath_reg
*vp_reg
;
328 struct __vxge_hw_non_offload_db_wrapper
*nofl_db
;
332 struct vxge_hw_fifo_txd
*txdl
;
333 #define VXGE_HW_FIFO_TXD_DEPTH 128
338 struct __vxge_hw_virtualpath
*vpathh
;
341 /* Structure that represents the Rx descriptor block which contains
342 * 128 Rx descriptors.
344 struct __vxge_hw_ring_block
{
345 #define VXGE_HW_MAX_RXDS_PER_BLOCK_1 127
346 struct vxge_hw_ring_rxd_1 rxd
[VXGE_HW_MAX_RXDS_PER_BLOCK_1
];
349 #define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
350 /* 0xFEFFFFFFFFFFFFFF to mark last Rxd in this blk */
352 /* Logical ptr to next */
353 u64 reserved_2_pNext_RxD_block
;
354 /* Buff0_ptr.In a 32 bit arch the upper 32 bits should be 0 */
355 u64 pNext_RxD_Blk_physical
;
359 * struct __vxge_hw_ring - Ring channel.
361 * Note: The structure is cache line aligned to better utilize
362 * CPU cache performance.
364 struct __vxge_hw_ring
{
365 struct vxge_hw_vpath_reg
*vp_reg
;
366 struct vxge_hw_common_reg
*common_reg
;
368 #define VXGE_HW_RING_RXD_QWORDS_MODE_1 4
371 #define VXGE_HW_RING_RXD_QWORD_LIMIT 16
374 struct __vxge_hw_ring_block
*rxdl
;
375 #define VXGE_HW_RING_BUF_PER_BLOCK 9
379 #define VXGE_HW_RING_RX_POLL_WEIGHT 8
382 struct io_buffer
*iobuf
[VXGE_HW_RING_BUF_PER_BLOCK
+ 1];
383 struct __vxge_hw_virtualpath
*vpathh
;
387 * struct __vxge_hw_virtualpath - Virtual Path
389 * Virtual path structure to encapsulate the data related to a virtual path.
390 * Virtual paths are allocated by the HW upon getting configuration from the
391 * driver and inserted into the list of virtual paths.
393 struct __vxge_hw_virtualpath
{
397 #define VXGE_HW_VP_NOT_OPEN 0
398 #define VXGE_HW_VP_OPEN 1
400 struct __vxge_hw_device
*hldev
;
401 struct vxge_hw_vpath_reg
*vp_reg
;
402 struct vxge_hw_vpmgmt_reg
*vpmgmt_reg
;
403 struct __vxge_hw_non_offload_db_wrapper
*nofl_db
;
410 struct __vxge_hw_ring ringh
;
411 struct __vxge_hw_fifo fifoh
;
413 #define VXGE_HW_INFO_LEN 64
414 #define VXGE_HW_PMD_INFO_LEN 16
415 #define VXGE_MAX_PRINT_BUF_SIZE 128
417 * struct vxge_hw_device_hw_info - Device information
418 * @host_type: Host Type
419 * @func_id: Function Id
420 * @vpath_mask: vpath bit mask
421 * @fw_version: Firmware version
422 * @fw_date: Firmware Date
423 * @flash_version: Firmware version
424 * @flash_date: Firmware Date
425 * @mac_addrs: Mac addresses for each vpath
426 * @mac_addr_masks: Mac address masks for each vpath
428 * Returns the vpath mask that has the bits set for each vpath allocated
429 * for the driver and the first mac address for each vpath
431 struct vxge_hw_device_hw_info
{
433 #define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
434 #define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
435 #define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
436 #define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
437 #define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
438 #define VXGE_HW_SR_VH_FUNCTION0 5
439 #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
440 #define VXGE_HW_VH_NORMAL_FUNCTION 7
442 #define VXGE_HW_FUNCTION_MODE_MIN 0
443 #define VXGE_HW_FUNCTION_MODE_MAX 10
445 #define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
446 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
447 #define VXGE_HW_FUNCTION_MODE_SRIOV 2
448 #define VXGE_HW_FUNCTION_MODE_MRIOV 3
449 #define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
450 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
451 #define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
452 #define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
453 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
454 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
455 #define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
459 struct vxge_hw_device_version fw_version
;
460 struct vxge_hw_device_date fw_date
;
461 struct vxge_hw_device_version flash_version
;
462 struct vxge_hw_device_date flash_date
;
463 u8 serial_number
[VXGE_HW_INFO_LEN
];
464 u8 part_number
[VXGE_HW_INFO_LEN
];
465 u8 product_desc
[VXGE_HW_INFO_LEN
];
466 u8 (mac_addrs
)[VXGE_HW_MAX_VIRTUAL_PATHS
][ETH_ALEN
];
467 u8 (mac_addr_masks
)[VXGE_HW_MAX_VIRTUAL_PATHS
][ETH_ALEN
];
471 * struct __vxge_hw_device - Hal device object
472 * @magic: Magic Number
473 * @bar0: BAR0 virtual address.
474 * @pdev: Physical device handle
475 * @config: Confguration passed by the LL driver at initialization
476 * @link_state: Link state
478 * HW device object. Represents Titan adapter
480 struct __vxge_hw_device
{
482 #define VXGE_HW_DEVICE_MAGIC 0x12345678
483 #define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
485 struct pci_device
*pdev
;
486 struct net_device
*ndev
;
487 struct vxgedev
*vdev
;
489 enum vxge_hw_device_link_state link_state
;
495 #define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
496 #define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
497 #define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
498 struct vxge_hw_legacy_reg
*legacy_reg
;
499 struct vxge_hw_toc_reg
*toc_reg
;
500 struct vxge_hw_common_reg
*common_reg
;
501 struct vxge_hw_mrpcim_reg
*mrpcim_reg
;
502 struct vxge_hw_srpcim_reg
*srpcim_reg \
503 [VXGE_HW_TITAN_SRPCIM_REG_SPACES
];
504 struct vxge_hw_vpmgmt_reg
*vpmgmt_reg \
505 [VXGE_HW_TITAN_VPMGMT_REG_SPACES
];
506 struct vxge_hw_vpath_reg
*vpath_reg \
507 [VXGE_HW_TITAN_VPATH_REG_SPACES
];
510 struct __vxge_hw_virtualpath virtual_path
;
511 u64 vpath_assignments
;
514 u64 tim_int_mask0
[4];
515 u32 tim_int_mask1
[4];
517 struct vxge_hw_device_hw_info hw_info
;
520 #define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
522 #define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
524 m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
525 m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
528 m1[0] = 0x80000000; \
529 m1[1] = 0x40000000; \
533 #define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
535 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
536 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
545 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
546 * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
547 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
548 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
550 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
551 * filling-in and posting later.
553 * Titan/HW descriptor states.
556 enum vxge_hw_txdl_state
{
557 VXGE_HW_TXDL_STATE_NONE
= 0,
558 VXGE_HW_TXDL_STATE_AVAIL
= 1,
559 VXGE_HW_TXDL_STATE_POSTED
= 2,
560 VXGE_HW_TXDL_STATE_FREED
= 3
564 /* fifo and ring circular buffer offset tracking apis */
565 static inline void __vxge_hw_desc_offset_up(u16 upper_limit
,
568 if (++(*offset
) >= upper_limit
)
572 /* rxd offset handling apis */
573 static inline void vxge_hw_ring_rxd_offset_up(u16
*offset
)
575 __vxge_hw_desc_offset_up(VXGE_HW_MAX_RXDS_PER_BLOCK_1
,
578 /* txd offset handling apis */
579 static inline void vxge_hw_fifo_txd_offset_up(u16
*offset
)
581 __vxge_hw_desc_offset_up(VXGE_HW_FIFO_TXD_DEPTH
, offset
);
585 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
586 * @rxdh: Descriptor handle.
587 * @dma_pointer: DMA address of a single receive buffer this descriptor
588 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
589 * the receive buffer should be already mapped to the device
590 * @size: Size of the receive @dma_pointer buffer.
592 * Prepare 1-buffer-mode Rx descriptor for posting
593 * (via vxge_hw_ring_rxd_post()).
595 * This inline helper-function does not return any parameters and always
600 void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1
*rxdp
,
601 struct io_buffer
*iob
, u32 size
)
603 rxdp
->host_control
= (intptr_t)(iob
);
604 rxdp
->buffer0_ptr
= virt_to_bus(iob
->data
);
605 rxdp
->control_1
&= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK
;
606 rxdp
->control_1
|= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size
);
609 enum vxge_hw_status
vxge_hw_device_hw_info_get(
611 struct vxge_hw_device_hw_info
*hw_info
);
614 __vxge_hw_vpath_fw_ver_get(
615 struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
616 struct vxge_hw_device_hw_info
*hw_info
);
619 __vxge_hw_vpath_card_info_get(
620 struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
621 struct vxge_hw_device_hw_info
*hw_info
);
624 * vxge_hw_device_link_state_get - Get link state.
625 * @devh: HW device handle.
628 * Returns: link state.
631 enum vxge_hw_device_link_state
vxge_hw_device_link_state_get(
632 struct __vxge_hw_device
*devh
)
634 return devh
->link_state
;
637 void vxge_hw_device_terminate(struct __vxge_hw_device
*devh
);
639 enum vxge_hw_status
vxge_hw_device_initialize(
640 struct __vxge_hw_device
**devh
,
642 struct pci_device
*pdev
,
646 vxge_hw_vpath_open(struct __vxge_hw_device
*hldev
, struct vxge_vpath
*vpath
);
649 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem
*vpath_rst_in_prog
);
651 enum vxge_hw_status
vxge_hw_vpath_close(struct __vxge_hw_virtualpath
*vpath
);
653 enum vxge_hw_status
vxge_hw_vpath_reset(struct __vxge_hw_virtualpath
*vpath
);
656 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath
*vpath
);
659 vxge_hw_vpath_enable(struct __vxge_hw_virtualpath
*vpath
);
662 vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath
*vpath
, u32 new_mtu
);
665 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath
*vpath
);
668 __vxge_hw_device_pci_e_init(struct __vxge_hw_device
*hldev
);
671 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem
*legacy_reg
);
674 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem
*vpath_reg
);
677 __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem
*legacy_reg
,
678 struct vxge_hw_vpath_reg __iomem
*vpath_reg
);
681 __vxge_hw_device_register_poll(
683 u64 mask
, u32 max_millis
);
686 static inline u64
readq(void __iomem
*addr
)
689 ret
= readl(addr
+ 4);
698 static inline void writeq(u64 val
, void __iomem
*addr
)
700 writel((u32
) (val
), addr
);
701 writel((u32
) (val
>> 32), (addr
+ 4));
705 static inline void __vxge_hw_pio_mem_write32_upper(u32 val
, void __iomem
*addr
)
707 writel(val
, addr
+ 4);
710 static inline void __vxge_hw_pio_mem_write32_lower(u32 val
, void __iomem
*addr
)
715 static inline enum vxge_hw_status
716 __vxge_hw_pio_mem_write64(u64 val64
, void __iomem
*addr
,
717 u64 mask
, u32 max_millis
)
719 enum vxge_hw_status status
= VXGE_HW_OK
;
721 __vxge_hw_pio_mem_write32_lower((u32
)vxge_bVALn(val64
, 32, 32), addr
);
723 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32), addr
);
726 status
= __vxge_hw_device_register_poll(addr
, mask
, max_millis
);
730 struct vxge_hw_toc_reg __iomem
*
731 __vxge_hw_device_toc_get(void __iomem
*bar0
);
734 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device
*hldev
);
737 __vxge_hw_device_host_info_get(struct __vxge_hw_device
*hldev
);
740 __vxge_hw_device_initialize(struct __vxge_hw_device
*hldev
);
743 __vxge_hw_vpath_pci_read(
744 struct __vxge_hw_virtualpath
*vpath
,
750 __vxge_hw_vpath_addr_get(
751 struct vxge_hw_vpath_reg __iomem
*vpath_reg
,
752 u8 (macaddr
)[ETH_ALEN
],
753 u8 (macaddr_mask
)[ETH_ALEN
]);
756 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
);
759 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath
*vpath
);
762 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device
*hldev
, u64 vpath_mask
);
766 * @mask: mask for the debug
767 * @fmt: printf like format string
769 static const u16 debug_filter
= VXGE_ERR
;
770 #define vxge_debug(mask, fmt...) do { \
771 if (debug_filter & mask) \
775 #define vxge_trace() vxge_debug(VXGE_TRACE, "%s:%d\n", __func__, __LINE__);
778 vxge_hw_get_func_mode(struct __vxge_hw_device
*hldev
, u32
*func_mode
);
781 vxge_hw_set_fw_api(struct __vxge_hw_device
*hldev
,
782 u64 vp_id
, u32 action
,
783 u32 offset
, u64 data0
, u64 data1
);
785 vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device
*hldev
);