2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 #include <linux/bitops.h>
41 #include <linux/cache.h>
42 #include <linux/interrupt.h>
43 #include <linux/list.h>
44 #include <linux/netdevice.h>
45 #include <linux/pci.h>
46 #include <linux/spinlock.h>
47 #include <linux/timer.h>
48 #include <linux/vmalloc.h>
49 #include <linux/rhashtable.h>
50 #include <linux/etherdevice.h>
51 #include <linux/net_tstamp.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/ptp_classify.h>
54 #include <linux/crash_dump.h>
55 #include <linux/thermal.h>
57 #include "t4_chip_type.h"
58 #include "cxgb4_uld.h"
61 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
62 extern struct list_head adapter_list
;
63 extern struct mutex uld_mutex
;
65 /* Suspend an Ethernet Tx queue with fewer available descriptors than this.
66 * This is the same as calc_tx_descs() for a TSO packet with
67 * nr_frags == MAX_SKB_FRAGS.
69 #define ETHTXQ_STOP_THRES \
70 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
72 #define FW_PARAM_DEV(param) \
73 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
74 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
76 #define FW_PARAM_PFVF(param) \
77 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
78 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
79 FW_PARAMS_PARAM_Y_V(0) | \
80 FW_PARAMS_PARAM_Z_V(0))
83 MAX_NPORTS
= 4, /* max # of ports */
84 SERNUM_LEN
= 24, /* Serial # length */
85 EC_LEN
= 16, /* E/C length */
86 ID_LEN
= 16, /* ID length */
87 PN_LEN
= 16, /* Part Number length */
88 MACADDR_LEN
= 12, /* MAC Address length */
92 T4_REGMAP_SIZE
= (160 * 1024),
93 T5_REGMAP_SIZE
= (332 * 1024),
106 MEMWIN0_APERTURE
= 2048,
107 MEMWIN0_BASE
= 0x1b800,
108 MEMWIN1_APERTURE
= 32768,
109 MEMWIN1_BASE
= 0x28000,
110 MEMWIN1_BASE_T5
= 0x52000,
111 MEMWIN2_APERTURE
= 65536,
112 MEMWIN2_BASE
= 0x30000,
113 MEMWIN2_APERTURE_T5
= 131072,
114 MEMWIN2_BASE_T5
= 0x60000,
132 PAUSE_AUTONEG
= 1 << 2
136 FEC_AUTO
= 1 << 0, /* IEEE 802.3 "automatic" */
137 FEC_RS
= 1 << 1, /* Reed-Solomon */
138 FEC_BASER_RS
= 1 << 2 /* BaseR/Reed-Solomon */
142 u64 tx_octets
; /* total # of octets in good frames */
143 u64 tx_frames
; /* all good frames */
144 u64 tx_bcast_frames
; /* all broadcast frames */
145 u64 tx_mcast_frames
; /* all multicast frames */
146 u64 tx_ucast_frames
; /* all unicast frames */
147 u64 tx_error_frames
; /* all error frames */
149 u64 tx_frames_64
; /* # of Tx frames in a particular range */
150 u64 tx_frames_65_127
;
151 u64 tx_frames_128_255
;
152 u64 tx_frames_256_511
;
153 u64 tx_frames_512_1023
;
154 u64 tx_frames_1024_1518
;
155 u64 tx_frames_1519_max
;
157 u64 tx_drop
; /* # of dropped Tx frames */
158 u64 tx_pause
; /* # of transmitted pause frames */
159 u64 tx_ppp0
; /* # of transmitted PPP prio 0 frames */
160 u64 tx_ppp1
; /* # of transmitted PPP prio 1 frames */
161 u64 tx_ppp2
; /* # of transmitted PPP prio 2 frames */
162 u64 tx_ppp3
; /* # of transmitted PPP prio 3 frames */
163 u64 tx_ppp4
; /* # of transmitted PPP prio 4 frames */
164 u64 tx_ppp5
; /* # of transmitted PPP prio 5 frames */
165 u64 tx_ppp6
; /* # of transmitted PPP prio 6 frames */
166 u64 tx_ppp7
; /* # of transmitted PPP prio 7 frames */
168 u64 rx_octets
; /* total # of octets in good frames */
169 u64 rx_frames
; /* all good frames */
170 u64 rx_bcast_frames
; /* all broadcast frames */
171 u64 rx_mcast_frames
; /* all multicast frames */
172 u64 rx_ucast_frames
; /* all unicast frames */
173 u64 rx_too_long
; /* # of frames exceeding MTU */
174 u64 rx_jabber
; /* # of jabber frames */
175 u64 rx_fcs_err
; /* # of received frames with bad FCS */
176 u64 rx_len_err
; /* # of received frames with length error */
177 u64 rx_symbol_err
; /* symbol errors */
178 u64 rx_runt
; /* # of short frames */
180 u64 rx_frames_64
; /* # of Rx frames in a particular range */
181 u64 rx_frames_65_127
;
182 u64 rx_frames_128_255
;
183 u64 rx_frames_256_511
;
184 u64 rx_frames_512_1023
;
185 u64 rx_frames_1024_1518
;
186 u64 rx_frames_1519_max
;
188 u64 rx_pause
; /* # of received pause frames */
189 u64 rx_ppp0
; /* # of received PPP prio 0 frames */
190 u64 rx_ppp1
; /* # of received PPP prio 1 frames */
191 u64 rx_ppp2
; /* # of received PPP prio 2 frames */
192 u64 rx_ppp3
; /* # of received PPP prio 3 frames */
193 u64 rx_ppp4
; /* # of received PPP prio 4 frames */
194 u64 rx_ppp5
; /* # of received PPP prio 5 frames */
195 u64 rx_ppp6
; /* # of received PPP prio 6 frames */
196 u64 rx_ppp7
; /* # of received PPP prio 7 frames */
198 u64 rx_ovflow0
; /* drops due to buffer-group 0 overflows */
199 u64 rx_ovflow1
; /* drops due to buffer-group 1 overflows */
200 u64 rx_ovflow2
; /* drops due to buffer-group 2 overflows */
201 u64 rx_ovflow3
; /* drops due to buffer-group 3 overflows */
202 u64 rx_trunc0
; /* buffer-group 0 truncated packets */
203 u64 rx_trunc1
; /* buffer-group 1 truncated packets */
204 u64 rx_trunc2
; /* buffer-group 2 truncated packets */
205 u64 rx_trunc3
; /* buffer-group 3 truncated packets */
208 struct lb_port_stats
{
221 u64 frames_1024_1518
;
236 struct tp_tcp_stats
{
240 u64 tcp_retrans_segs
;
243 struct tp_usm_stats
{
249 struct tp_fcoe_stats
{
255 struct tp_err_stats
{
259 u32 tnl_cong_drops
[4];
260 u32 ofld_chan_drops
[4];
262 u32 ofld_vlan_drops
[4];
268 struct tp_cpl_stats
{
273 struct tp_rdma_stats
{
279 u32 hps
; /* host page size for our PF/VF */
280 u32 eq_qpp
; /* egress queues/page for our PF/VF */
281 u32 iq_qpp
; /* egress queues/page for our PF/VF */
285 unsigned int tre
; /* log2 of core clocks per TP tick */
286 unsigned int la_mask
; /* what events are recorded by TP LA */
287 unsigned short tx_modq_map
; /* TX modulation scheduler queue to */
290 uint32_t dack_re
; /* DACK timer resolution */
291 unsigned short tx_modq
[NCHAN
]; /* channel to modulation queue map */
293 u32 vlan_pri_map
; /* cached TP_VLAN_PRI_MAP */
295 u32 ingress_config
; /* cached TP_INGRESS_CONFIG */
297 /* cached TP_OUT_CONFIG compressed error vector
298 * and passing outer header info for encapsulated packets.
302 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
303 * subset of the set of fields which may be present in the Compressed
304 * Filter Tuple portion of filters and TCP TCB connections. The
305 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
306 * Since a variable number of fields may or may not be present, their
307 * shifted field positions within the Compressed Filter Tuple may
308 * vary, or not even be present if the field isn't selected in
309 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
310 * places we store their offsets here, or a -1 if the field isn't
324 u64 hash_filter_mask
;
330 u8 sn
[SERNUM_LEN
+ 1];
333 u8 na
[MACADDR_LEN
+ 1];
336 /* Maximum resources provisioned for a PCI PF.
338 struct pf_resources
{
339 unsigned int nvi
; /* N virtual interfaces */
340 unsigned int neq
; /* N egress Qs */
341 unsigned int nethctrl
; /* N egress ETH or CTRL Qs */
342 unsigned int niqflint
; /* N ingress Qs/w free list(s) & intr */
343 unsigned int niq
; /* N ingress Qs */
344 unsigned int tc
; /* PCI-E traffic class */
345 unsigned int pmask
; /* port access rights mask */
346 unsigned int nexactf
; /* N exact MPS filters */
347 unsigned int r_caps
; /* read capabilities */
348 unsigned int wx_caps
; /* write/execute capabilities */
352 unsigned int vpd_cap_addr
;
357 struct devlog_params
{
358 u32 memtype
; /* which memory (EDC0, EDC1, MC) */
359 u32 start
; /* start of log in firmware memory */
360 u32 size
; /* size of log */
363 /* Stores chip specific parameters */
364 struct arch_specific_params
{
367 u8 cng_ch_bits_log
; /* congestion channel map bits width */
374 struct adapter_params
{
375 struct sge_params sge
;
377 struct vpd_params vpd
;
378 struct pf_resources pfres
;
379 struct pci_params pci
;
380 struct devlog_params devlog
;
381 enum pcie_memwin drv_memwin
;
383 unsigned int cim_la_size
;
385 unsigned int sf_size
; /* serial flash size in bytes */
386 unsigned int sf_nsec
; /* # of flash sectors */
388 unsigned int fw_vers
; /* firmware version */
389 unsigned int bs_vers
; /* bootstrap version */
390 unsigned int tp_vers
; /* TP microcode version */
391 unsigned int er_vers
; /* expansion ROM version */
392 unsigned int scfg_vers
; /* Serial Configuration version */
393 unsigned int vpd_vers
; /* VPD Version */
396 unsigned short mtus
[NMTUS
];
397 unsigned short a_wnd
[NCCTRL_WIN
];
398 unsigned short b_wnd
[NCCTRL_WIN
];
400 unsigned char nports
; /* # of ethernet ports */
401 unsigned char portvec
;
402 enum chip_type chip
; /* chip code */
403 struct arch_specific_params arch
; /* chip specific params */
404 unsigned char offload
;
405 unsigned char crypto
; /* HW capability for crypto */
406 unsigned char ethofld
; /* QoS support */
408 unsigned char bypass
;
409 unsigned char hash_filter
;
411 unsigned int ofldq_wr_cred
;
412 bool ulptx_memwrite_dsgl
; /* use of T5 DSGL allowed */
414 unsigned int nsched_cls
; /* number of traffic classes */
415 unsigned int max_ordird_qp
; /* Max read depth per RDMA QP */
416 unsigned int max_ird_adapter
; /* Max read depth per adapter */
417 bool fr_nsmr_tpte_wr_support
; /* FW support for FR_NSMR_TPTE_WR */
418 u8 fw_caps_support
; /* 32-bit Port Capabilities */
419 bool filter2_wr_support
; /* FW support for FILTER2_WR */
420 unsigned int viid_smt_extn_support
:1; /* FW returns vin and smt index */
422 /* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is
425 u8 mps_bg_map
[MAX_NPORTS
]; /* MPS Buffer Group Map */
426 bool write_w_imm_support
; /* FW supports WRITE_WITH_IMMEDIATE */
427 bool write_cmpl_support
; /* FW supports WRITE_CMPL */
430 /* State needed to monitor the forward progress of SGE Ingress DMA activities
431 * and possible hangs.
433 struct sge_idma_monitor_state
{
434 unsigned int idma_1s_thresh
; /* 1s threshold in Core Clock ticks */
435 unsigned int idma_stalled
[2]; /* synthesized stalled timers in HZ */
436 unsigned int idma_state
[2]; /* IDMA Hang detect state */
437 unsigned int idma_qid
[2]; /* IDMA Hung Ingress Queue ID */
438 unsigned int idma_warn
[2]; /* time to warning in HZ */
441 /* Firmware Mailbox Command/Reply log. All values are in Host-Endian format.
442 * The access and execute times are signed in order to accommodate negative
446 u64 cmd
[MBOX_LEN
/ 8]; /* a Firmware Mailbox Command/Reply */
447 u64 timestamp
; /* OS-dependent timestamp */
448 u32 seqno
; /* sequence number */
449 s16 access
; /* time (ms) to access mailbox */
450 s16 execute
; /* time (ms) to execute */
453 struct mbox_cmd_log
{
454 unsigned int size
; /* number of entries in the log */
455 unsigned int cursor
; /* next position in the log to write */
456 u32 seqno
; /* next sequence number */
457 /* variable length mailbox command log starts here */
460 /* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
461 * return a pointer to the specified entry.
463 static inline struct mbox_cmd
*mbox_cmd_log_entry(struct mbox_cmd_log
*log
,
464 unsigned int entry_idx
)
466 return &((struct mbox_cmd
*)&(log
)[1])[entry_idx
];
469 #include "t4fw_api.h"
471 #define FW_VERSION(chip) ( \
472 FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
473 FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
474 FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
475 FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
476 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
482 struct fw_hdr fw_hdr
;
485 struct trace_params
{
486 u32 data
[TRACE_LEN
/ 4];
487 u32 mask
[TRACE_LEN
/ 4];
488 unsigned short snap_len
;
489 unsigned short min_len
;
490 unsigned char skip_ofst
;
491 unsigned char skip_len
;
492 unsigned char invert
;
496 /* Firmware Port Capabilities types. */
498 typedef u16 fw_port_cap16_t
; /* 16-bit Port Capabilities integral value */
499 typedef u32 fw_port_cap32_t
; /* 32-bit Port Capabilities integral value */
502 FW_CAPS_UNKNOWN
= 0, /* 0'ed out initial state */
503 FW_CAPS16
= 1, /* old Firmware: 16-bit Port Capabilities */
504 FW_CAPS32
= 2, /* new Firmware: 32-bit Port Capabilities */
508 fw_port_cap32_t pcaps
; /* link capabilities */
509 fw_port_cap32_t def_acaps
; /* default advertised capabilities */
510 fw_port_cap32_t acaps
; /* advertised capabilities */
511 fw_port_cap32_t lpacaps
; /* peer advertised capabilities */
513 fw_port_cap32_t speed_caps
; /* speed(s) user has requested */
514 unsigned int speed
; /* actual link speed (Mb/s) */
516 enum cc_pause requested_fc
; /* flow control user has requested */
517 enum cc_pause fc
; /* actual link flow control */
518 enum cc_pause advertised_fc
; /* actual advertised flow control */
520 enum cc_fec requested_fec
; /* Forward Error Correction: */
521 enum cc_fec fec
; /* requested and actual in use */
523 unsigned char autoneg
; /* autonegotiating? */
525 unsigned char link_ok
; /* link up? */
526 unsigned char link_down_rc
; /* link down reason */
528 bool new_module
; /* ->OS Transceiver Module inserted */
529 bool redo_l1cfg
; /* ->CC redo current "sticky" L1 CFG */
532 #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
535 MAX_ETH_QSETS
= 32, /* # of Ethernet Tx/Rx queue sets */
536 MAX_OFLD_QSETS
= 16, /* # of offload Tx, iscsi Rx queue sets */
537 MAX_CTRL_QUEUES
= NCHAN
, /* # of control Tx queues */
541 MAX_TXQ_ENTRIES
= 16384,
542 MAX_CTRL_TXQ_ENTRIES
= 1024,
543 MAX_RSPQ_ENTRIES
= 16384,
544 MAX_RX_BUFFERS
= 16384,
545 MIN_TXQ_ENTRIES
= 32,
546 MIN_CTRL_TXQ_ENTRIES
= 32,
547 MIN_RSPQ_ENTRIES
= 128,
552 MAX_TXQ_DESC_SIZE
= 64,
553 MAX_RXQ_DESC_SIZE
= 128,
554 MAX_FL_DESC_SIZE
= 8,
555 MAX_CTRL_TXQ_DESC_SIZE
= 64,
559 INGQ_EXTRAS
= 2, /* firmware event queue and */
560 /* forwarded interrupts */
561 MAX_INGQ
= MAX_ETH_QSETS
+ INGQ_EXTRAS
,
565 PRIV_FLAG_PORT_TX_VM_BIT
,
568 #define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT)
570 #define PRIV_FLAGS_ADAP 0
571 #define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM
576 #include "cxgb4_dcb.h"
578 #ifdef CONFIG_CHELSIO_T4_FCOE
579 #include "cxgb4_fcoe.h"
580 #endif /* CONFIG_CHELSIO_T4_FCOE */
583 struct adapter
*adapter
;
585 int xact_addr_filt
; /* index of exact MAC address filter */
586 u16 rss_size
; /* size of VI's RSS table slice */
588 enum fw_port_type port_type
;
592 u8 lport
; /* associated offload logical port */
593 u8 nqsets
; /* # of qsets */
594 u8 first_qset
; /* index of first qset */
596 struct link_config link_cfg
;
598 struct port_stats stats_base
;
599 #ifdef CONFIG_CHELSIO_T4_DCB
600 struct port_dcb_info dcb
; /* Data Center Bridging support */
602 #ifdef CONFIG_CHELSIO_T4_FCOE
603 struct cxgb_fcoe fcoe
;
604 #endif /* CONFIG_CHELSIO_T4_FCOE */
605 bool rxtstamp
; /* Enable TS */
606 struct hwtstamp_config tstamp_config
;
608 struct sched_table
*sched_tbl
;
611 /* viid and smt fields either returned by fw
612 * or decoded by parsing viid by driver.
619 bool tc_block_shared
;
625 enum { /* adapter flags */
626 CXGB4_FULL_INIT_DONE
= (1 << 0),
627 CXGB4_DEV_ENABLED
= (1 << 1),
628 CXGB4_USING_MSI
= (1 << 2),
629 CXGB4_USING_MSIX
= (1 << 3),
630 CXGB4_FW_OK
= (1 << 4),
631 CXGB4_RSS_TNLALLLOOKUP
= (1 << 5),
632 CXGB4_USING_SOFT_PARAMS
= (1 << 6),
633 CXGB4_MASTER_PF
= (1 << 7),
634 CXGB4_FW_OFLD_CONN
= (1 << 9),
635 CXGB4_ROOT_NO_RELAXED_ORDERING
= (1 << 10),
636 CXGB4_SHUTTING_DOWN
= (1 << 11),
637 CXGB4_SGE_DBQ_TIMER
= (1 << 12),
641 ULP_CRYPTO_LOOKASIDE
= 1 << 0,
642 ULP_CRYPTO_IPSEC_INLINE
= 1 << 1,
647 struct sge_fl
{ /* SGE free-buffer queue state */
648 unsigned int avail
; /* # of available Rx buffers */
649 unsigned int pend_cred
; /* new buffers since last FL DB ring */
650 unsigned int cidx
; /* consumer index */
651 unsigned int pidx
; /* producer index */
652 unsigned long alloc_failed
; /* # of times buffer allocation failed */
653 unsigned long large_alloc_failed
;
654 unsigned long mapping_err
; /* # of RX Buffer DMA Mapping failures */
655 unsigned long low
; /* # of times momentarily starving */
656 unsigned long starving
;
658 unsigned int cntxt_id
; /* SGE context id for the free list */
659 unsigned int size
; /* capacity of free list */
660 struct rx_sw_desc
*sdesc
; /* address of SW Rx descriptor ring */
661 __be64
*desc
; /* address of HW Rx descriptor ring */
662 dma_addr_t addr
; /* bus address of HW ring start */
663 void __iomem
*bar2_addr
; /* address of BAR2 Queue registers */
664 unsigned int bar2_qid
; /* Queue ID for BAR2 Queue registers */
667 /* A packet gather list */
669 u64 sgetstamp
; /* SGE Time Stamp for Ingress Packet */
670 struct page_frag frags
[MAX_SKB_FRAGS
];
671 void *va
; /* virtual address of first byte */
672 unsigned int nfrags
; /* # of fragments */
673 unsigned int tot_len
; /* total length of fragments */
676 typedef int (*rspq_handler_t
)(struct sge_rspq
*q
, const __be64
*rsp
,
677 const struct pkt_gl
*gl
);
678 typedef void (*rspq_flush_handler_t
)(struct sge_rspq
*q
);
679 /* LRO related declarations for ULD */
681 #define MAX_LRO_SESSIONS 64
682 u8 lro_session_cnt
; /* # of sessions to aggregate */
683 unsigned long lro_pkts
; /* # of LRO super packets */
684 unsigned long lro_merged
; /* # of wire packets merged by LRO */
685 struct sk_buff_head lroq
; /* list of aggregated sessions */
688 struct sge_rspq
{ /* state for an SGE response queue */
689 struct napi_struct napi
;
690 const __be64
*cur_desc
; /* current descriptor in queue */
691 unsigned int cidx
; /* consumer index */
692 u8 gen
; /* current generation bit */
693 u8 intr_params
; /* interrupt holdoff parameters */
694 u8 next_intr_params
; /* holdoff params for next interrupt */
696 u8 pktcnt_idx
; /* interrupt packet threshold */
697 u8 uld
; /* ULD handling this queue */
698 u8 idx
; /* queue index within its group */
699 int offset
; /* offset into current Rx buffer */
700 u16 cntxt_id
; /* SGE context id for the response q */
701 u16 abs_id
; /* absolute SGE id for the response q */
702 __be64
*desc
; /* address of HW response ring */
703 dma_addr_t phys_addr
; /* physical address of the ring */
704 void __iomem
*bar2_addr
; /* address of BAR2 Queue registers */
705 unsigned int bar2_qid
; /* Queue ID for BAR2 Queue registers */
706 unsigned int iqe_len
; /* entry size */
707 unsigned int size
; /* capacity of response queue */
708 struct adapter
*adap
;
709 struct net_device
*netdev
; /* associated net device */
710 rspq_handler_t handler
;
711 rspq_flush_handler_t flush_handler
;
712 struct t4_lro_mgr lro_mgr
;
715 struct sge_eth_stats
{ /* Ethernet queue statistics */
716 unsigned long pkts
; /* # of ethernet packets */
717 unsigned long lro_pkts
; /* # of LRO super packets */
718 unsigned long lro_merged
; /* # of wire packets merged by LRO */
719 unsigned long rx_cso
; /* # of Rx checksum offloads */
720 unsigned long vlan_ex
; /* # of Rx VLAN extractions */
721 unsigned long rx_drops
; /* # of packets dropped due to no mem */
722 unsigned long bad_rx_pkts
; /* # of packets with err_vec!=0 */
725 struct sge_eth_rxq
{ /* SW Ethernet Rx queue */
726 struct sge_rspq rspq
;
728 struct sge_eth_stats stats
;
729 struct msix_info
*msix
;
730 } ____cacheline_aligned_in_smp
;
732 struct sge_ofld_stats
{ /* offload queue statistics */
733 unsigned long pkts
; /* # of packets */
734 unsigned long imm
; /* # of immediate-data packets */
735 unsigned long an
; /* # of asynchronous notifications */
736 unsigned long nomem
; /* # of responses deferred due to no mem */
739 struct sge_ofld_rxq
{ /* SW offload Rx queue */
740 struct sge_rspq rspq
;
742 struct sge_ofld_stats stats
;
743 struct msix_info
*msix
;
744 } ____cacheline_aligned_in_smp
;
753 struct sk_buff
*skb
; /* SKB to free after getting completion */
754 dma_addr_t addr
[MAX_SKB_FRAGS
+ 1]; /* DMA mapped addresses */
758 unsigned int in_use
; /* # of in-use Tx descriptors */
759 unsigned int q_type
; /* Q type Eth/Ctrl/Ofld */
760 unsigned int size
; /* # of descriptors */
761 unsigned int cidx
; /* SW consumer index */
762 unsigned int pidx
; /* producer index */
763 unsigned long stops
; /* # of times q has been stopped */
764 unsigned long restarts
; /* # of queue restarts */
765 unsigned int cntxt_id
; /* SGE context id for the Tx q */
766 struct tx_desc
*desc
; /* address of HW Tx descriptor ring */
767 struct tx_sw_desc
*sdesc
; /* address of SW Tx descriptor ring */
768 struct sge_qstat
*stat
; /* queue status entry */
769 dma_addr_t phys_addr
; /* physical address of the ring */
772 unsigned short db_pidx
;
773 unsigned short db_pidx_inc
;
774 void __iomem
*bar2_addr
; /* address of BAR2 Queue registers */
775 unsigned int bar2_qid
; /* Queue ID for BAR2 Queue registers */
778 struct sge_eth_txq
{ /* state for an SGE Ethernet Tx queue */
780 struct netdev_queue
*txq
; /* associated netdev TX queue */
781 #ifdef CONFIG_CHELSIO_T4_DCB
782 u8 dcb_prio
; /* DCB Priority bound to queue */
784 u8 dbqt
; /* SGE Doorbell Queue Timer in use */
785 unsigned int dbqtimerix
; /* SGE Doorbell Queue Timer Index */
786 unsigned long tso
; /* # of TSO requests */
787 unsigned long uso
; /* # of USO requests */
788 unsigned long tx_cso
; /* # of Tx checksum offloads */
789 unsigned long vlan_ins
; /* # of Tx VLAN insertions */
790 unsigned long mapping_err
; /* # of I/O MMU packet mapping errors */
791 } ____cacheline_aligned_in_smp
;
793 struct sge_uld_txq
{ /* state for an SGE offload Tx queue */
795 struct adapter
*adap
;
796 struct sk_buff_head sendq
; /* list of backpressured packets */
797 struct tasklet_struct qresume_tsk
; /* restarts the queue */
798 bool service_ofldq_running
; /* service_ofldq() is processing sendq */
799 u8 full
; /* the Tx ring is full */
800 unsigned long mapping_err
; /* # of I/O MMU packet mapping errors */
801 } ____cacheline_aligned_in_smp
;
803 struct sge_ctrl_txq
{ /* state for an SGE control Tx queue */
805 struct adapter
*adap
;
806 struct sk_buff_head sendq
; /* list of backpressured packets */
807 struct tasklet_struct qresume_tsk
; /* restarts the queue */
808 u8 full
; /* the Tx ring is full */
809 } ____cacheline_aligned_in_smp
;
811 struct sge_uld_rxq_info
{
812 char name
[IFNAMSIZ
]; /* name of ULD driver */
813 struct sge_ofld_rxq
*uldrxq
; /* Rxq's for ULD */
814 u16
*rspq_id
; /* response queue id's of rxq */
815 u16 nrxq
; /* # of ingress uld queues */
816 u16 nciq
; /* # of completion queues */
817 u8 uld
; /* uld type */
820 struct sge_uld_txq_info
{
821 struct sge_uld_txq
*uldtxq
; /* Txq's for ULD */
822 atomic_t users
; /* num users */
823 u16 ntxq
; /* # of egress uld queues */
826 enum sge_eosw_state
{
827 CXGB4_EO_STATE_CLOSED
= 0, /* Not ready to accept traffic */
828 CXGB4_EO_STATE_FLOWC_OPEN_SEND
, /* Send FLOWC open request */
829 CXGB4_EO_STATE_FLOWC_OPEN_REPLY
, /* Waiting for FLOWC open reply */
830 CXGB4_EO_STATE_ACTIVE
, /* Ready to accept traffic */
831 CXGB4_EO_STATE_FLOWC_CLOSE_SEND
, /* Send FLOWC close request */
832 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY
, /* Waiting for FLOWC close reply */
835 struct sge_eosw_txq
{
836 spinlock_t lock
; /* Per queue lock to synchronize completions */
837 enum sge_eosw_state state
; /* Current ETHOFLD State */
838 struct tx_sw_desc
*desc
; /* Descriptor ring to hold packets */
839 u32 ndesc
; /* Number of descriptors */
840 u32 pidx
; /* Current Producer Index */
841 u32 last_pidx
; /* Last successfully transmitted Producer Index */
842 u32 cidx
; /* Current Consumer Index */
843 u32 last_cidx
; /* Last successfully reclaimed Consumer Index */
844 u32 flowc_idx
; /* Descriptor containing a FLOWC request */
845 u32 inuse
; /* Number of packets held in ring */
847 u32 cred
; /* Current available credits */
848 u32 ncompl
; /* # of completions posted */
849 u32 last_compl
; /* # of credits consumed since last completion req */
851 u32 eotid
; /* Index into EOTID table in software */
852 u32 hwtid
; /* Hardware EOTID index */
854 u32 hwqid
; /* Underlying hardware queue index */
855 struct net_device
*netdev
; /* Pointer to netdevice */
856 struct tasklet_struct qresume_tsk
; /* Restarts the queue */
857 struct completion completion
; /* completion for FLOWC rendezvous */
860 struct sge_eohw_txq
{
861 spinlock_t lock
; /* Per queue lock */
862 struct sge_txq q
; /* HW Txq */
863 struct adapter
*adap
; /* Backpointer to adapter */
864 unsigned long tso
; /* # of TSO requests */
865 unsigned long uso
; /* # of USO requests */
866 unsigned long tx_cso
; /* # of Tx checksum offloads */
867 unsigned long vlan_ins
; /* # of Tx VLAN insertions */
868 unsigned long mapping_err
; /* # of I/O MMU packet mapping errors */
872 struct sge_eth_txq ethtxq
[MAX_ETH_QSETS
];
873 struct sge_eth_txq ptptxq
;
874 struct sge_ctrl_txq ctrlq
[MAX_CTRL_QUEUES
];
876 struct sge_eth_rxq ethrxq
[MAX_ETH_QSETS
];
877 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp
;
878 struct sge_uld_rxq_info
**uld_rxq_info
;
879 struct sge_uld_txq_info
**uld_txq_info
;
881 struct sge_rspq intrq ____cacheline_aligned_in_smp
;
882 spinlock_t intrq_lock
;
884 struct sge_eohw_txq
*eohw_txq
;
885 struct sge_ofld_rxq
*eohw_rxq
;
887 u16 max_ethqsets
; /* # of available Ethernet queue sets */
888 u16 ethqsets
; /* # of active Ethernet queue sets */
889 u16 ethtxq_rover
; /* Tx queue to clean up next */
890 u16 ofldqsets
; /* # of active ofld queue sets */
891 u16 nqs_per_uld
; /* # of Rx queues per ULD */
892 u16 eoqsets
; /* # of ETHOFLD queues */
894 u16 timer_val
[SGE_NTIMERS
];
895 u8 counter_val
[SGE_NCOUNTERS
];
897 u16 dbqtimer_val
[SGE_NDBQTIMERS
];
898 u32 fl_pg_order
; /* large page allocation size */
899 u32 stat_len
; /* length of status page at ring end */
900 u32 pktshift
; /* padding between CPL & packet data */
901 u32 fl_align
; /* response queue message alignment */
902 u32 fl_starve_thres
; /* Free List starvation threshold */
904 struct sge_idma_monitor_state idma_monitor
;
905 unsigned int egr_start
;
907 unsigned int ingr_start
;
908 unsigned int ingr_sz
;
909 void **egr_map
; /* qid->queue egress queue map */
910 struct sge_rspq
**ingr_map
; /* qid->queue ingress queue map */
911 unsigned long *starving_fl
;
912 unsigned long *txq_maperr
;
913 unsigned long *blocked_fl
;
914 struct timer_list rx_timer
; /* refills starving FLs */
915 struct timer_list tx_timer
; /* checks Tx queues */
917 int fwevtq_msix_idx
; /* Index to firmware event queue MSI-X info */
918 int nd_msix_idx
; /* Index to non-data interrupts MSI-X info */
921 #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
922 #define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
926 #ifdef CONFIG_PCI_IOV
928 /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
929 * Configuration initialization for T5 only has SR-IOV functionality enabled
930 * on PF0-3 in order to simplify everything.
932 #define NUM_OF_PF_WITH_SRIOV 4
936 struct doorbell_stats
{
942 struct hash_mac_addr
{
943 struct list_head list
;
945 unsigned int iface_mac
;
949 unsigned long *msix_bmap
;
950 unsigned int mapsize
;
951 spinlock_t lock
; /* lock for acquiring bitmap */
956 char desc
[IFNAMSIZ
+ 10];
958 cpumask_var_t aff_mask
;
962 unsigned char vf_mac_addr
[ETH_ALEN
];
963 unsigned int tx_rate
;
970 HMA_DMA_MAPPED_FLAG
= 1
975 struct sg_table
*sgt
;
976 dma_addr_t
*phy_addr
; /* physical address of the page */
980 struct list_head list
;
983 #if IS_ENABLED(CONFIG_THERMAL)
985 struct thermal_zone_device
*tzdev
;
991 struct mps_entries_ref
{
992 struct list_head list
;
1003 struct pci_dev
*pdev
;
1004 struct device
*pdev_dev
;
1009 unsigned int adap_idx
;
1010 enum chip_type chip
;
1019 struct adapter_params params
;
1020 struct cxgb4_virt_res vres
;
1021 unsigned int swintr
;
1023 /* MSI-X Info for NIC and OFLD queues */
1024 struct msix_info
*msix_info
;
1025 struct msix_bmap msix_bmap
;
1027 struct doorbell_stats db_stats
;
1030 struct net_device
*port
[MAX_NPORTS
];
1031 u8 chan_map
[NCHAN
]; /* channel -> port map */
1033 struct vf_info
*vfinfo
;
1037 unsigned int l2t_start
;
1038 unsigned int l2t_end
;
1039 struct l2t_data
*l2t
;
1040 unsigned int clipt_start
;
1041 unsigned int clipt_end
;
1042 struct clip_tbl
*clipt
;
1043 unsigned int rawf_start
;
1044 unsigned int rawf_cnt
;
1045 struct smt_data
*smt
;
1046 struct cxgb4_uld_info
*uld
;
1047 void *uld_handle
[CXGB4_ULD_MAX
];
1048 unsigned int num_uld
;
1049 unsigned int num_ofld_uld
;
1050 struct list_head list_node
;
1051 struct list_head rcu_node
;
1052 struct list_head mac_hlist
; /* list of MAC addresses in MPS Hash */
1053 struct list_head mps_ref
;
1054 spinlock_t mps_ref_lock
; /* lock for syncing mps ref/def activities */
1058 struct tid_info tids
;
1059 void **tid_release_head
;
1060 spinlock_t tid_release_lock
;
1061 struct workqueue_struct
*workq
;
1062 struct work_struct tid_release_task
;
1063 struct work_struct db_full_task
;
1064 struct work_struct db_drop_task
;
1065 struct work_struct fatal_err_notify_task
;
1066 bool tid_release_task_busy
;
1068 /* lock for mailbox cmd list */
1069 spinlock_t mbox_lock
;
1070 struct mbox_list mlist
;
1072 /* support for mailbox command/reply logging */
1073 #define T4_OS_LOG_MBOX_CMDS 256
1074 struct mbox_cmd_log
*mbox_log
;
1076 struct mutex uld_mutex
;
1078 struct dentry
*debugfs_root
;
1079 bool use_bd
; /* Use SGE Back Door intfc for reading SGE Contexts */
1080 bool trace_rss
; /* 1 implies that different RSS flit per filter is
1081 * used per filter else if 0 default RSS flit is
1082 * used for all 4 filters.
1085 struct ptp_clock
*ptp_clock
;
1086 struct ptp_clock_info ptp_clock_info
;
1087 struct sk_buff
*ptp_tx_skb
;
1089 spinlock_t ptp_lock
;
1090 spinlock_t stats_lock
;
1091 spinlock_t win0_lock ____cacheline_aligned_in_smp
;
1093 /* TC u32 offload */
1094 struct cxgb4_tc_u32_table
*tc_u32
;
1095 struct chcr_stats_debug chcr_stats
;
1097 /* TC flower offload */
1098 bool tc_flower_initialized
;
1099 struct rhashtable flower_tbl
;
1100 struct rhashtable_params flower_ht_params
;
1101 struct timer_list flower_stats_timer
;
1102 struct work_struct flower_stats_work
;
1105 struct ethtool_dump eth_dump
;
1108 struct hma_data hma
;
1110 struct srq_data
*srq
;
1112 /* Dump buffer for collecting logs in kdump kernel */
1113 struct vmcoredd_data vmcoredd
;
1114 #if IS_ENABLED(CONFIG_THERMAL)
1115 struct ch_thermal ch_thermal
;
1118 /* TC MQPRIO offload */
1119 struct cxgb4_tc_mqprio
*tc_mqprio
;
1121 /* TC MATCHALL classifier offload */
1122 struct cxgb4_tc_matchall
*tc_matchall
;
1125 /* Support for "sched-class" command to allow a TX Scheduling Class to be
1126 * programmed with various parameters.
1128 struct ch_sched_params
{
1129 s8 type
; /* packet or flow */
1132 s8 level
; /* scheduler hierarchy level */
1133 s8 mode
; /* per-class or per-flow */
1134 s8 rateunit
; /* bit or packet rate */
1135 s8 ratemode
; /* %port relative or kbps absolute */
1136 s8 channel
; /* scheduler channel [0..N] */
1137 s8
class; /* scheduler class [0..N] */
1138 s32 minrate
; /* minimum rate */
1139 s32 maxrate
; /* maximum rate */
1140 s16 weight
; /* percent weight */
1141 s16 pktsize
; /* average packet size */
1147 SCHED_CLASS_TYPE_PACKET
= 0, /* class type */
1151 SCHED_CLASS_LEVEL_CL_RL
= 0, /* class rate limiter */
1152 SCHED_CLASS_LEVEL_CH_RL
= 2, /* channel rate limiter */
1156 SCHED_CLASS_MODE_CLASS
= 0, /* per-class scheduling */
1157 SCHED_CLASS_MODE_FLOW
, /* per-flow scheduling */
1161 SCHED_CLASS_RATEUNIT_BITS
= 0, /* bit rate scheduling */
1165 SCHED_CLASS_RATEMODE_ABS
= 1, /* Kb/s */
1168 /* Support for "sched_queue" command to allow one or more NIC TX Queues
1169 * to be bound to a TX Scheduling Class.
1171 struct ch_sched_queue
{
1172 s8 queue
; /* queue index */
1173 s8
class; /* class index */
1176 /* Support for "sched_flowc" command to allow one or more FLOWC
1177 * to be bound to a TX Scheduling Class.
1179 struct ch_sched_flowc
{
1180 s32 tid
; /* TID to bind */
1181 s8
class; /* class index */
1184 /* Defined bit width of user definable filter tuples
1186 #define ETHTYPE_BITWIDTH 16
1187 #define FRAG_BITWIDTH 1
1188 #define MACIDX_BITWIDTH 9
1189 #define FCOE_BITWIDTH 1
1190 #define IPORT_BITWIDTH 3
1191 #define MATCHTYPE_BITWIDTH 3
1192 #define PROTO_BITWIDTH 8
1193 #define TOS_BITWIDTH 8
1194 #define PF_BITWIDTH 8
1195 #define VF_BITWIDTH 8
1196 #define IVLAN_BITWIDTH 16
1197 #define OVLAN_BITWIDTH 16
1198 #define ENCAP_VNI_BITWIDTH 24
1200 /* Filter matching rules. These consist of a set of ingress packet field
1201 * (value, mask) tuples. The associated ingress packet field matches the
1202 * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
1203 * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
1204 * matches an ingress packet when all of the individual individual field
1205 * matching rules are true.
1207 * Partial field masks are always valid, however, while it may be easy to
1208 * understand their meanings for some fields (e.g. IP address to match a
1209 * subnet), for others making sensible partial masks is less intuitive (e.g.
1210 * MPS match type) ...
1212 * Most of the following data structures are modeled on T4 capabilities.
1213 * Drivers for earlier chips use the subsets which make sense for those chips.
1214 * We really need to come up with a hardware-independent mechanism to
1215 * represent hardware filter capabilities ...
1217 struct ch_filter_tuple
{
1218 /* Compressed header matching field rules. The TP_VLAN_PRI_MAP
1219 * register selects which of these fields will participate in the
1220 * filter match rules -- up to a maximum of 36 bits. Because
1221 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
1224 uint32_t ethtype
:ETHTYPE_BITWIDTH
; /* Ethernet type */
1225 uint32_t frag
:FRAG_BITWIDTH
; /* IP fragmentation header */
1226 uint32_t ivlan_vld
:1; /* inner VLAN valid */
1227 uint32_t ovlan_vld
:1; /* outer VLAN valid */
1228 uint32_t pfvf_vld
:1; /* PF/VF valid */
1229 uint32_t encap_vld
:1; /* Encapsulation valid */
1230 uint32_t macidx
:MACIDX_BITWIDTH
; /* exact match MAC index */
1231 uint32_t fcoe
:FCOE_BITWIDTH
; /* FCoE packet */
1232 uint32_t iport
:IPORT_BITWIDTH
; /* ingress port */
1233 uint32_t matchtype
:MATCHTYPE_BITWIDTH
; /* MPS match type */
1234 uint32_t proto
:PROTO_BITWIDTH
; /* protocol type */
1235 uint32_t tos
:TOS_BITWIDTH
; /* TOS/Traffic Type */
1236 uint32_t pf
:PF_BITWIDTH
; /* PCI-E PF ID */
1237 uint32_t vf
:VF_BITWIDTH
; /* PCI-E VF ID */
1238 uint32_t ivlan
:IVLAN_BITWIDTH
; /* inner VLAN */
1239 uint32_t ovlan
:OVLAN_BITWIDTH
; /* outer VLAN */
1240 uint32_t vni
:ENCAP_VNI_BITWIDTH
; /* VNI of tunnel */
1242 /* Uncompressed header matching field rules. These are always
1243 * available for field rules.
1245 uint8_t lip
[16]; /* local IP address (IPv4 in [3:0]) */
1246 uint8_t fip
[16]; /* foreign IP address (IPv4 in [3:0]) */
1247 uint16_t lport
; /* local port */
1248 uint16_t fport
; /* foreign port */
1251 /* A filter ioctl command.
1253 struct ch_filter_specification
{
1254 /* Administrative fields for filter.
1256 uint32_t hitcnts
:1; /* count filter hits in TCB */
1257 uint32_t prio
:1; /* filter has priority over active/server */
1259 /* Fundamental filter typing. This is the one element of filter
1260 * matching that doesn't exist as a (value, mask) tuple.
1262 uint32_t type
:1; /* 0 => IPv4, 1 => IPv6 */
1263 u32 hash
:1; /* 0 => wild-card, 1 => exact-match */
1265 /* Packet dispatch information. Ingress packets which match the
1266 * filter rules will be dropped, passed to the host or switched back
1267 * out as egress packets.
1269 uint32_t action
:2; /* drop, pass, switch */
1271 uint32_t rpttid
:1; /* report TID in RSS hash field */
1273 uint32_t dirsteer
:1; /* 0 => RSS, 1 => steer to iq */
1274 uint32_t iq
:10; /* ingress queue */
1276 uint32_t maskhash
:1; /* dirsteer=0: store RSS hash in TCB */
1277 uint32_t dirsteerhash
:1;/* dirsteer=1: 0 => TCB contains RSS hash */
1278 /* 1 => TCB contains IQ ID */
1280 /* Switch proxy/rewrite fields. An ingress packet which matches a
1281 * filter with "switch" set will be looped back out as an egress
1282 * packet -- potentially with some Ethernet header rewriting.
1284 uint32_t eport
:2; /* egress port to switch packet out */
1285 uint32_t newdmac
:1; /* rewrite destination MAC address */
1286 uint32_t newsmac
:1; /* rewrite source MAC address */
1287 uint32_t newvlan
:2; /* rewrite VLAN Tag */
1288 uint32_t nat_mode
:3; /* specify NAT operation mode */
1289 uint8_t dmac
[ETH_ALEN
]; /* new destination MAC address */
1290 uint8_t smac
[ETH_ALEN
]; /* new source MAC address */
1291 uint16_t vlan
; /* VLAN Tag to insert */
1293 u8 nat_lip
[16]; /* local IP to use after NAT'ing */
1294 u8 nat_fip
[16]; /* foreign IP to use after NAT'ing */
1295 u16 nat_lport
; /* local port to use after NAT'ing */
1296 u16 nat_fport
; /* foreign port to use after NAT'ing */
1298 u32 tc_prio
; /* TC's filter priority index */
1299 u64 tc_cookie
; /* Unique cookie identifying TC rules */
1301 /* reservation for future additions */
1304 /* Filter rule value/mask pairs.
1306 struct ch_filter_tuple val
;
1307 struct ch_filter_tuple mask
;
1311 FILTER_PASS
= 0, /* default */
1317 VLAN_NOCHANGE
= 0, /* default */
1324 NAT_MODE_NONE
= 0, /* No NAT performed */
1325 NAT_MODE_DIP
, /* NAT on Dst IP */
1326 NAT_MODE_DIP_DP
, /* NAT on Dst IP, Dst Port */
1327 NAT_MODE_DIP_DP_SIP
, /* NAT on Dst IP, Dst Port and Src IP */
1328 NAT_MODE_DIP_DP_SP
, /* NAT on Dst IP, Dst Port and Src Port */
1329 NAT_MODE_SIP_SP
, /* NAT on Src IP and Src Port */
1330 NAT_MODE_DIP_SIP_SP
, /* NAT on Dst IP, Src IP and Src Port */
1331 NAT_MODE_ALL
/* NAT on entire 4-tuple */
1334 /* Host shadow copy of ingress filter entry. This is in host native format
1335 * and doesn't match the ordering or bit order, etc. of the hardware of the
1336 * firmware command. The use of bit-field structure elements is purely to
1337 * remind ourselves of the field size limitations and save memory in the case
1338 * where the filter table is large.
1340 struct filter_entry
{
1341 /* Administrative fields for filter. */
1342 u32 valid
:1; /* filter allocated and valid */
1343 u32 locked
:1; /* filter is administratively locked */
1345 u32 pending
:1; /* filter action is pending firmware reply */
1346 struct filter_ctx
*ctx
; /* Caller's completion hook */
1347 struct l2t_entry
*l2t
; /* Layer Two Table entry for dmac */
1348 struct smt_entry
*smt
; /* Source Mac Table entry for smac */
1349 struct net_device
*dev
; /* Associated net device */
1350 u32 tid
; /* This will store the actual tid */
1352 /* The filter itself. Most of this is a straight copy of information
1353 * provided by the extended ioctl(). Some fields are translated to
1354 * internal forms -- for instance the Ingress Queue ID passed in from
1355 * the ioctl() is translated into the Absolute Ingress Queue ID.
1357 struct ch_filter_specification fs
;
1360 static inline int is_offload(const struct adapter
*adap
)
1362 return adap
->params
.offload
;
1365 static inline int is_hashfilter(const struct adapter
*adap
)
1367 return adap
->params
.hash_filter
;
1370 static inline int is_pci_uld(const struct adapter
*adap
)
1372 return adap
->params
.crypto
;
1375 static inline int is_uld(const struct adapter
*adap
)
1377 return (adap
->params
.offload
|| adap
->params
.crypto
);
1380 static inline int is_ethofld(const struct adapter
*adap
)
1382 return adap
->params
.ethofld
;
1385 static inline u32
t4_read_reg(struct adapter
*adap
, u32 reg_addr
)
1387 return readl(adap
->regs
+ reg_addr
);
1390 static inline void t4_write_reg(struct adapter
*adap
, u32 reg_addr
, u32 val
)
1392 writel(val
, adap
->regs
+ reg_addr
);
1396 static inline u64
readq(const volatile void __iomem
*addr
)
1398 return readl(addr
) + ((u64
)readl(addr
+ 4) << 32);
1401 static inline void writeq(u64 val
, volatile void __iomem
*addr
)
1404 writel(val
>> 32, addr
+ 4);
1408 static inline u64
t4_read_reg64(struct adapter
*adap
, u32 reg_addr
)
1410 return readq(adap
->regs
+ reg_addr
);
1413 static inline void t4_write_reg64(struct adapter
*adap
, u32 reg_addr
, u64 val
)
1415 writeq(val
, adap
->regs
+ reg_addr
);
1419 * t4_set_hw_addr - store a port's MAC address in SW
1420 * @adapter: the adapter
1421 * @port_idx: the port index
1422 * @hw_addr: the Ethernet address
1424 * Store the Ethernet address of the given port in SW. Called by the common
1425 * code when it retrieves a port's Ethernet address from EEPROM.
1427 static inline void t4_set_hw_addr(struct adapter
*adapter
, int port_idx
,
1430 ether_addr_copy(adapter
->port
[port_idx
]->dev_addr
, hw_addr
);
1431 ether_addr_copy(adapter
->port
[port_idx
]->perm_addr
, hw_addr
);
1435 * netdev2pinfo - return the port_info structure associated with a net_device
1438 * Return the struct port_info associated with a net_device
1440 static inline struct port_info
*netdev2pinfo(const struct net_device
*dev
)
1442 return netdev_priv(dev
);
1446 * adap2pinfo - return the port_info of a port
1447 * @adap: the adapter
1448 * @idx: the port index
1450 * Return the port_info structure for the port of the given index.
1452 static inline struct port_info
*adap2pinfo(struct adapter
*adap
, int idx
)
1454 return netdev_priv(adap
->port
[idx
]);
1458 * netdev2adap - return the adapter structure associated with a net_device
1461 * Return the struct adapter associated with a net_device
1463 static inline struct adapter
*netdev2adap(const struct net_device
*dev
)
1465 return netdev2pinfo(dev
)->adapter
;
1468 /* Return a version number to identify the type of adapter. The scheme is:
1469 * - bits 0..9: chip version
1470 * - bits 10..15: chip revision
1471 * - bits 16..23: register dump version
1473 static inline unsigned int mk_adap_vers(struct adapter
*ap
)
1475 return CHELSIO_CHIP_VERSION(ap
->params
.chip
) |
1476 (CHELSIO_CHIP_RELEASE(ap
->params
.chip
) << 10) | (1 << 16);
1479 /* Return a queue's interrupt hold-off time in us. 0 means no timer. */
1480 static inline unsigned int qtimer_val(const struct adapter
*adap
,
1481 const struct sge_rspq
*q
)
1483 unsigned int idx
= q
->intr_params
>> 1;
1485 return idx
< SGE_NTIMERS
? adap
->sge
.timer_val
[idx
] : 0;
1488 /* driver version & name used for ethtool_drvinfo */
1489 extern char cxgb4_driver_name
[];
1490 extern const char cxgb4_driver_version
[];
1492 void t4_os_portmod_changed(struct adapter
*adap
, int port_id
);
1493 void t4_os_link_changed(struct adapter
*adap
, int port_id
, int link_stat
);
1495 void t4_free_sge_resources(struct adapter
*adap
);
1496 void t4_free_ofld_rxqs(struct adapter
*adap
, int n
, struct sge_ofld_rxq
*q
);
1497 irq_handler_t
t4_intr_handler(struct adapter
*adap
);
1498 netdev_tx_t
t4_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
1499 int t4_ethrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
1500 const struct pkt_gl
*gl
);
1501 int t4_mgmt_tx(struct adapter
*adap
, struct sk_buff
*skb
);
1502 int t4_ofld_send(struct adapter
*adap
, struct sk_buff
*skb
);
1503 int t4_sge_alloc_rxq(struct adapter
*adap
, struct sge_rspq
*iq
, bool fwevtq
,
1504 struct net_device
*dev
, int intr_idx
,
1505 struct sge_fl
*fl
, rspq_handler_t hnd
,
1506 rspq_flush_handler_t flush_handler
, int cong
);
1507 int t4_sge_alloc_eth_txq(struct adapter
*adap
, struct sge_eth_txq
*txq
,
1508 struct net_device
*dev
, struct netdev_queue
*netdevq
,
1509 unsigned int iqid
, u8 dbqt
);
1510 int t4_sge_alloc_ctrl_txq(struct adapter
*adap
, struct sge_ctrl_txq
*txq
,
1511 struct net_device
*dev
, unsigned int iqid
,
1512 unsigned int cmplqid
);
1513 int t4_sge_mod_ctrl_txq(struct adapter
*adap
, unsigned int eqid
,
1514 unsigned int cmplqid
);
1515 int t4_sge_alloc_uld_txq(struct adapter
*adap
, struct sge_uld_txq
*txq
,
1516 struct net_device
*dev
, unsigned int iqid
,
1517 unsigned int uld_type
);
1518 int t4_sge_alloc_ethofld_txq(struct adapter
*adap
, struct sge_eohw_txq
*txq
,
1519 struct net_device
*dev
, u32 iqid
);
1520 void t4_sge_free_ethofld_txq(struct adapter
*adap
, struct sge_eohw_txq
*txq
);
1521 irqreturn_t
t4_sge_intr_msix(int irq
, void *cookie
);
1522 int t4_sge_init(struct adapter
*adap
);
1523 void t4_sge_start(struct adapter
*adap
);
1524 void t4_sge_stop(struct adapter
*adap
);
1525 int t4_sge_eth_txq_egress_update(struct adapter
*adap
, struct sge_eth_txq
*q
,
1527 void cxgb4_set_ethtool_ops(struct net_device
*netdev
);
1528 int cxgb4_write_rss(const struct port_info
*pi
, const u16
*queues
);
1529 enum cpl_tx_tnl_lso_type
cxgb_encap_offload_supported(struct sk_buff
*skb
);
1530 extern int dbfifo_int_thresh
;
1532 #define for_each_port(adapter, iter) \
1533 for (iter = 0; iter < (adapter)->params.nports; ++iter)
1535 static inline int is_bypass(struct adapter
*adap
)
1537 return adap
->params
.bypass
;
1540 static inline int is_bypass_device(int device
)
1542 /* this should be set based upon device capabilities */
1552 static inline int is_10gbt_device(int device
)
1554 /* this should be set based upon device capabilities */
1565 static inline unsigned int core_ticks_per_usec(const struct adapter
*adap
)
1567 return adap
->params
.vpd
.cclk
/ 1000;
1570 static inline unsigned int us_to_core_ticks(const struct adapter
*adap
,
1573 return (us
* adap
->params
.vpd
.cclk
) / 1000;
1576 static inline unsigned int core_ticks_to_us(const struct adapter
*adapter
,
1579 /* add Core Clock / 2 to round ticks to nearest uS */
1580 return ((ticks
* 1000 + adapter
->params
.vpd
.cclk
/2) /
1581 adapter
->params
.vpd
.cclk
);
1584 static inline unsigned int dack_ticks_to_usec(const struct adapter
*adap
,
1587 return (ticks
<< adap
->params
.tp
.dack_re
) / core_ticks_per_usec(adap
);
1590 void t4_set_reg_field(struct adapter
*adap
, unsigned int addr
, u32 mask
,
1593 int t4_wr_mbox_meat_timeout(struct adapter
*adap
, int mbox
, const void *cmd
,
1594 int size
, void *rpl
, bool sleep_ok
, int timeout
);
1595 int t4_wr_mbox_meat(struct adapter
*adap
, int mbox
, const void *cmd
, int size
,
1596 void *rpl
, bool sleep_ok
);
1598 static inline int t4_wr_mbox_timeout(struct adapter
*adap
, int mbox
,
1599 const void *cmd
, int size
, void *rpl
,
1602 return t4_wr_mbox_meat_timeout(adap
, mbox
, cmd
, size
, rpl
, true,
1606 static inline int t4_wr_mbox(struct adapter
*adap
, int mbox
, const void *cmd
,
1607 int size
, void *rpl
)
1609 return t4_wr_mbox_meat(adap
, mbox
, cmd
, size
, rpl
, true);
1612 static inline int t4_wr_mbox_ns(struct adapter
*adap
, int mbox
, const void *cmd
,
1613 int size
, void *rpl
)
1615 return t4_wr_mbox_meat(adap
, mbox
, cmd
, size
, rpl
, false);
1619 * hash_mac_addr - return the hash value of a MAC address
1620 * @addr: the 48-bit Ethernet MAC address
1622 * Hashes a MAC address according to the hash function used by HW inexact
1623 * (hash) address matching.
1625 static inline int hash_mac_addr(const u8
*addr
)
1627 u32 a
= ((u32
)addr
[0] << 16) | ((u32
)addr
[1] << 8) | addr
[2];
1628 u32 b
= ((u32
)addr
[3] << 16) | ((u32
)addr
[4] << 8) | addr
[5];
1636 int cxgb4_set_rspq_intr_params(struct sge_rspq
*q
, unsigned int us
,
1638 static inline void init_rspq(struct adapter
*adap
, struct sge_rspq
*q
,
1639 unsigned int us
, unsigned int cnt
,
1640 unsigned int size
, unsigned int iqe_size
)
1643 cxgb4_set_rspq_intr_params(q
, us
, cnt
);
1644 q
->iqe_len
= iqe_size
;
1649 * t4_is_inserted_mod_type - is a plugged in Firmware Module Type
1650 * @fw_mod_type: the Firmware Mofule Type
1652 * Return whether the Firmware Module Type represents a real Transceiver
1653 * Module/Cable Module Type which has been inserted.
1655 static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type
)
1657 return (fw_mod_type
!= FW_PORT_MOD_TYPE_NONE
&&
1658 fw_mod_type
!= FW_PORT_MOD_TYPE_NOTSUPPORTED
&&
1659 fw_mod_type
!= FW_PORT_MOD_TYPE_UNKNOWN
&&
1660 fw_mod_type
!= FW_PORT_MOD_TYPE_ERROR
);
1663 void t4_write_indirect(struct adapter
*adap
, unsigned int addr_reg
,
1664 unsigned int data_reg
, const u32
*vals
,
1665 unsigned int nregs
, unsigned int start_idx
);
1666 void t4_read_indirect(struct adapter
*adap
, unsigned int addr_reg
,
1667 unsigned int data_reg
, u32
*vals
, unsigned int nregs
,
1668 unsigned int start_idx
);
1669 void t4_hw_pci_read_cfg4(struct adapter
*adapter
, int reg
, u32
*val
);
1671 struct fw_filter_wr
;
1673 void t4_intr_enable(struct adapter
*adapter
);
1674 void t4_intr_disable(struct adapter
*adapter
);
1675 int t4_slow_intr_handler(struct adapter
*adapter
);
1677 int t4_wait_dev_ready(void __iomem
*regs
);
1679 fw_port_cap32_t
t4_link_acaps(struct adapter
*adapter
, unsigned int port
,
1680 struct link_config
*lc
);
1681 int t4_link_l1cfg_core(struct adapter
*adap
, unsigned int mbox
,
1682 unsigned int port
, struct link_config
*lc
,
1683 u8 sleep_ok
, int timeout
);
1685 static inline int t4_link_l1cfg(struct adapter
*adapter
, unsigned int mbox
,
1686 unsigned int port
, struct link_config
*lc
)
1688 return t4_link_l1cfg_core(adapter
, mbox
, port
, lc
,
1689 true, FW_CMD_MAX_TIMEOUT
);
1692 static inline int t4_link_l1cfg_ns(struct adapter
*adapter
, unsigned int mbox
,
1693 unsigned int port
, struct link_config
*lc
)
1695 return t4_link_l1cfg_core(adapter
, mbox
, port
, lc
,
1696 false, FW_CMD_MAX_TIMEOUT
);
1699 int t4_restart_aneg(struct adapter
*adap
, unsigned int mbox
, unsigned int port
);
1701 u32
t4_read_pcie_cfg4(struct adapter
*adap
, int reg
);
1702 u32
t4_get_util_window(struct adapter
*adap
);
1703 void t4_setup_memwin(struct adapter
*adap
, u32 memwin_base
, u32 window
);
1705 int t4_memory_rw_init(struct adapter
*adap
, int win
, int mtype
, u32
*mem_off
,
1706 u32
*mem_base
, u32
*mem_aperture
);
1707 void t4_memory_update_win(struct adapter
*adap
, int win
, u32 addr
);
1708 void t4_memory_rw_residual(struct adapter
*adap
, u32 off
, u32 addr
, u8
*buf
,
1710 #define T4_MEMORY_WRITE 0
1711 #define T4_MEMORY_READ 1
1712 int t4_memory_rw(struct adapter
*adap
, int win
, int mtype
, u32 addr
, u32 len
,
1713 void *buf
, int dir
);
1714 static inline int t4_memory_write(struct adapter
*adap
, int mtype
, u32 addr
,
1715 u32 len
, __be32
*buf
)
1717 return t4_memory_rw(adap
, 0, mtype
, addr
, len
, buf
, 0);
1720 unsigned int t4_get_regs_len(struct adapter
*adapter
);
1721 void t4_get_regs(struct adapter
*adap
, void *buf
, size_t buf_size
);
1723 int t4_eeprom_ptov(unsigned int phys_addr
, unsigned int fn
, unsigned int sz
);
1724 int t4_seeprom_wp(struct adapter
*adapter
, bool enable
);
1725 int t4_get_raw_vpd_params(struct adapter
*adapter
, struct vpd_params
*p
);
1726 int t4_get_vpd_params(struct adapter
*adapter
, struct vpd_params
*p
);
1727 int t4_get_pfres(struct adapter
*adapter
);
1728 int t4_read_flash(struct adapter
*adapter
, unsigned int addr
,
1729 unsigned int nwords
, u32
*data
, int byte_oriented
);
1730 int t4_load_fw(struct adapter
*adapter
, const u8
*fw_data
, unsigned int size
);
1731 int t4_load_phy_fw(struct adapter
*adap
,
1732 int win
, spinlock_t
*lock
,
1733 int (*phy_fw_version
)(const u8
*, size_t),
1734 const u8
*phy_fw_data
, size_t phy_fw_size
);
1735 int t4_phy_fw_ver(struct adapter
*adap
, int *phy_fw_ver
);
1736 int t4_fwcache(struct adapter
*adap
, enum fw_params_param_dev_fwcache op
);
1737 int t4_fw_upgrade(struct adapter
*adap
, unsigned int mbox
,
1738 const u8
*fw_data
, unsigned int size
, int force
);
1739 int t4_fl_pkt_align(struct adapter
*adap
);
1740 unsigned int t4_flash_cfg_addr(struct adapter
*adapter
);
1741 int t4_check_fw_version(struct adapter
*adap
);
1742 int t4_load_cfg(struct adapter
*adapter
, const u8
*cfg_data
, unsigned int size
);
1743 int t4_get_fw_version(struct adapter
*adapter
, u32
*vers
);
1744 int t4_get_bs_version(struct adapter
*adapter
, u32
*vers
);
1745 int t4_get_tp_version(struct adapter
*adapter
, u32
*vers
);
1746 int t4_get_exprom_version(struct adapter
*adapter
, u32
*vers
);
1747 int t4_get_scfg_version(struct adapter
*adapter
, u32
*vers
);
1748 int t4_get_vpd_version(struct adapter
*adapter
, u32
*vers
);
1749 int t4_get_version_info(struct adapter
*adapter
);
1750 void t4_dump_version_info(struct adapter
*adapter
);
1751 int t4_prep_fw(struct adapter
*adap
, struct fw_info
*fw_info
,
1752 const u8
*fw_data
, unsigned int fw_size
,
1753 struct fw_hdr
*card_fw
, enum dev_state state
, int *reset
);
1754 int t4_prep_adapter(struct adapter
*adapter
);
1755 int t4_shutdown_adapter(struct adapter
*adapter
);
1757 enum t4_bar2_qtype
{ T4_BAR2_QTYPE_EGRESS
, T4_BAR2_QTYPE_INGRESS
};
1758 int t4_bar2_sge_qregs(struct adapter
*adapter
,
1760 enum t4_bar2_qtype qtype
,
1763 unsigned int *pbar2_qid
);
1765 unsigned int qtimer_val(const struct adapter
*adap
,
1766 const struct sge_rspq
*q
);
1768 int t4_init_devlog_params(struct adapter
*adapter
);
1769 int t4_init_sge_params(struct adapter
*adapter
);
1770 int t4_init_tp_params(struct adapter
*adap
, bool sleep_ok
);
1771 int t4_filter_field_shift(const struct adapter
*adap
, int filter_sel
);
1772 int t4_init_rss_mode(struct adapter
*adap
, int mbox
);
1773 int t4_init_portinfo(struct port_info
*pi
, int mbox
,
1774 int port
, int pf
, int vf
, u8 mac
[]);
1775 int t4_port_init(struct adapter
*adap
, int mbox
, int pf
, int vf
);
1776 void t4_fatal_err(struct adapter
*adapter
);
1777 unsigned int t4_chip_rss_size(struct adapter
*adapter
);
1778 int t4_config_rss_range(struct adapter
*adapter
, int mbox
, unsigned int viid
,
1779 int start
, int n
, const u16
*rspq
, unsigned int nrspq
);
1780 int t4_config_glbl_rss(struct adapter
*adapter
, int mbox
, unsigned int mode
,
1781 unsigned int flags
);
1782 int t4_config_vi_rss(struct adapter
*adapter
, int mbox
, unsigned int viid
,
1783 unsigned int flags
, unsigned int defq
);
1784 int t4_read_rss(struct adapter
*adapter
, u16
*entries
);
1785 void t4_read_rss_key(struct adapter
*adapter
, u32
*key
, bool sleep_ok
);
1786 void t4_write_rss_key(struct adapter
*adap
, const u32
*key
, int idx
,
1788 void t4_read_rss_pf_config(struct adapter
*adapter
, unsigned int index
,
1789 u32
*valp
, bool sleep_ok
);
1790 void t4_read_rss_vf_config(struct adapter
*adapter
, unsigned int index
,
1791 u32
*vfl
, u32
*vfh
, bool sleep_ok
);
1792 u32
t4_read_rss_pf_map(struct adapter
*adapter
, bool sleep_ok
);
1793 u32
t4_read_rss_pf_mask(struct adapter
*adapter
, bool sleep_ok
);
1795 unsigned int t4_get_mps_bg_map(struct adapter
*adapter
, int pidx
);
1796 unsigned int t4_get_tp_ch_map(struct adapter
*adapter
, int pidx
);
1797 void t4_pmtx_get_stats(struct adapter
*adap
, u32 cnt
[], u64 cycles
[]);
1798 void t4_pmrx_get_stats(struct adapter
*adap
, u32 cnt
[], u64 cycles
[]);
1799 int t4_read_cim_ibq(struct adapter
*adap
, unsigned int qid
, u32
*data
,
1801 int t4_read_cim_obq(struct adapter
*adap
, unsigned int qid
, u32
*data
,
1803 int t4_cim_read(struct adapter
*adap
, unsigned int addr
, unsigned int n
,
1804 unsigned int *valp
);
1805 int t4_cim_write(struct adapter
*adap
, unsigned int addr
, unsigned int n
,
1806 const unsigned int *valp
);
1807 int t4_cim_read_la(struct adapter
*adap
, u32
*la_buf
, unsigned int *wrptr
);
1808 void t4_cim_read_pif_la(struct adapter
*adap
, u32
*pif_req
, u32
*pif_rsp
,
1809 unsigned int *pif_req_wrptr
,
1810 unsigned int *pif_rsp_wrptr
);
1811 void t4_cim_read_ma_la(struct adapter
*adap
, u32
*ma_req
, u32
*ma_rsp
);
1812 void t4_read_cimq_cfg(struct adapter
*adap
, u16
*base
, u16
*size
, u16
*thres
);
1813 const char *t4_get_port_type_description(enum fw_port_type port_type
);
1814 void t4_get_port_stats(struct adapter
*adap
, int idx
, struct port_stats
*p
);
1815 void t4_get_port_stats_offset(struct adapter
*adap
, int idx
,
1816 struct port_stats
*stats
,
1817 struct port_stats
*offset
);
1818 void t4_get_lb_stats(struct adapter
*adap
, int idx
, struct lb_port_stats
*p
);
1819 void t4_read_mtu_tbl(struct adapter
*adap
, u16
*mtus
, u8
*mtu_log
);
1820 void t4_read_cong_tbl(struct adapter
*adap
, u16 incr
[NMTUS
][NCCTRL_WIN
]);
1821 void t4_tp_wr_bits_indirect(struct adapter
*adap
, unsigned int addr
,
1822 unsigned int mask
, unsigned int val
);
1823 void t4_tp_read_la(struct adapter
*adap
, u64
*la_buf
, unsigned int *wrptr
);
1824 void t4_tp_get_err_stats(struct adapter
*adap
, struct tp_err_stats
*st
,
1826 void t4_tp_get_cpl_stats(struct adapter
*adap
, struct tp_cpl_stats
*st
,
1828 void t4_tp_get_rdma_stats(struct adapter
*adap
, struct tp_rdma_stats
*st
,
1830 void t4_get_usm_stats(struct adapter
*adap
, struct tp_usm_stats
*st
,
1832 void t4_tp_get_tcp_stats(struct adapter
*adap
, struct tp_tcp_stats
*v4
,
1833 struct tp_tcp_stats
*v6
, bool sleep_ok
);
1834 void t4_get_fcoe_stats(struct adapter
*adap
, unsigned int idx
,
1835 struct tp_fcoe_stats
*st
, bool sleep_ok
);
1836 void t4_load_mtus(struct adapter
*adap
, const unsigned short *mtus
,
1837 const unsigned short *alpha
, const unsigned short *beta
);
1839 void t4_ulprx_read_la(struct adapter
*adap
, u32
*la_buf
);
1841 void t4_get_chan_txrate(struct adapter
*adap
, u64
*nic_rate
, u64
*ofld_rate
);
1842 void t4_mk_filtdelwr(unsigned int ftid
, struct fw_filter_wr
*wr
, int qid
);
1844 void t4_wol_magic_enable(struct adapter
*adap
, unsigned int port
,
1846 int t4_wol_pat_enable(struct adapter
*adap
, unsigned int port
, unsigned int map
,
1847 u64 mask0
, u64 mask1
, unsigned int crc
, bool enable
);
1849 int t4_fw_hello(struct adapter
*adap
, unsigned int mbox
, unsigned int evt_mbox
,
1850 enum dev_master master
, enum dev_state
*state
);
1851 int t4_fw_bye(struct adapter
*adap
, unsigned int mbox
);
1852 int t4_early_init(struct adapter
*adap
, unsigned int mbox
);
1853 int t4_fw_reset(struct adapter
*adap
, unsigned int mbox
, int reset
);
1854 int t4_fixup_host_params(struct adapter
*adap
, unsigned int page_size
,
1855 unsigned int cache_line_size
);
1856 int t4_fw_initialize(struct adapter
*adap
, unsigned int mbox
);
1857 int t4_query_params(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1858 unsigned int vf
, unsigned int nparams
, const u32
*params
,
1860 int t4_query_params_ns(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1861 unsigned int vf
, unsigned int nparams
, const u32
*params
,
1863 int t4_query_params_rw(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1864 unsigned int vf
, unsigned int nparams
, const u32
*params
,
1865 u32
*val
, int rw
, bool sleep_ok
);
1866 int t4_set_params_timeout(struct adapter
*adap
, unsigned int mbox
,
1867 unsigned int pf
, unsigned int vf
,
1868 unsigned int nparams
, const u32
*params
,
1869 const u32
*val
, int timeout
);
1870 int t4_set_params(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1871 unsigned int vf
, unsigned int nparams
, const u32
*params
,
1873 int t4_cfg_pfvf(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1874 unsigned int vf
, unsigned int txq
, unsigned int txq_eth_ctrl
,
1875 unsigned int rxqi
, unsigned int rxq
, unsigned int tc
,
1876 unsigned int vi
, unsigned int cmask
, unsigned int pmask
,
1877 unsigned int nexact
, unsigned int rcaps
, unsigned int wxcaps
);
1878 int t4_alloc_vi(struct adapter
*adap
, unsigned int mbox
, unsigned int port
,
1879 unsigned int pf
, unsigned int vf
, unsigned int nmac
, u8
*mac
,
1880 unsigned int *rss_size
, u8
*vivld
, u8
*vin
);
1881 int t4_free_vi(struct adapter
*adap
, unsigned int mbox
,
1882 unsigned int pf
, unsigned int vf
,
1884 int t4_set_rxmode(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
1885 int mtu
, int promisc
, int all_multi
, int bcast
, int vlanex
,
1887 int t4_free_raw_mac_filt(struct adapter
*adap
, unsigned int viid
,
1888 const u8
*addr
, const u8
*mask
, unsigned int idx
,
1889 u8 lookup_type
, u8 port_id
, bool sleep_ok
);
1890 int t4_free_encap_mac_filt(struct adapter
*adap
, unsigned int viid
, int idx
,
1892 int t4_alloc_encap_mac_filt(struct adapter
*adap
, unsigned int viid
,
1893 const u8
*addr
, const u8
*mask
, unsigned int vni
,
1894 unsigned int vni_mask
, u8 dip_hit
, u8 lookup_type
,
1896 int t4_alloc_raw_mac_filt(struct adapter
*adap
, unsigned int viid
,
1897 const u8
*addr
, const u8
*mask
, unsigned int idx
,
1898 u8 lookup_type
, u8 port_id
, bool sleep_ok
);
1899 int t4_alloc_mac_filt(struct adapter
*adap
, unsigned int mbox
,
1900 unsigned int viid
, bool free
, unsigned int naddr
,
1901 const u8
**addr
, u16
*idx
, u64
*hash
, bool sleep_ok
);
1902 int t4_free_mac_filt(struct adapter
*adap
, unsigned int mbox
,
1903 unsigned int viid
, unsigned int naddr
,
1904 const u8
**addr
, bool sleep_ok
);
1905 int t4_change_mac(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
1906 int idx
, const u8
*addr
, bool persist
, u8
*smt_idx
);
1907 int t4_set_addr_hash(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
1908 bool ucast
, u64 vec
, bool sleep_ok
);
1909 int t4_enable_vi_params(struct adapter
*adap
, unsigned int mbox
,
1910 unsigned int viid
, bool rx_en
, bool tx_en
, bool dcb_en
);
1911 int t4_enable_pi_params(struct adapter
*adap
, unsigned int mbox
,
1912 struct port_info
*pi
,
1913 bool rx_en
, bool tx_en
, bool dcb_en
);
1914 int t4_enable_vi(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
1915 bool rx_en
, bool tx_en
);
1916 int t4_identify_port(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
1917 unsigned int nblinks
);
1918 int t4_mdio_rd(struct adapter
*adap
, unsigned int mbox
, unsigned int phy_addr
,
1919 unsigned int mmd
, unsigned int reg
, u16
*valp
);
1920 int t4_mdio_wr(struct adapter
*adap
, unsigned int mbox
, unsigned int phy_addr
,
1921 unsigned int mmd
, unsigned int reg
, u16 val
);
1922 int t4_iq_stop(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1923 unsigned int vf
, unsigned int iqtype
, unsigned int iqid
,
1924 unsigned int fl0id
, unsigned int fl1id
);
1925 int t4_iq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1926 unsigned int vf
, unsigned int iqtype
, unsigned int iqid
,
1927 unsigned int fl0id
, unsigned int fl1id
);
1928 int t4_eth_eq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1929 unsigned int vf
, unsigned int eqid
);
1930 int t4_ctrl_eq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1931 unsigned int vf
, unsigned int eqid
);
1932 int t4_ofld_eq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
1933 unsigned int vf
, unsigned int eqid
);
1934 int t4_sge_ctxt_flush(struct adapter
*adap
, unsigned int mbox
, int ctxt_type
);
1935 int t4_read_sge_dbqtimers(struct adapter
*adap
, unsigned int ndbqtimers
,
1937 void t4_handle_get_port_info(struct port_info
*pi
, const __be64
*rpl
);
1938 int t4_update_port_info(struct port_info
*pi
);
1939 int t4_get_link_params(struct port_info
*pi
, unsigned int *link_okp
,
1940 unsigned int *speedp
, unsigned int *mtup
);
1941 int t4_handle_fw_rpl(struct adapter
*adap
, const __be64
*rpl
);
1942 void t4_db_full(struct adapter
*adapter
);
1943 void t4_db_dropped(struct adapter
*adapter
);
1944 int t4_set_trace_filter(struct adapter
*adapter
, const struct trace_params
*tp
,
1945 int filter_index
, int enable
);
1946 void t4_get_trace_filter(struct adapter
*adapter
, struct trace_params
*tp
,
1947 int filter_index
, int *enabled
);
1948 int t4_fwaddrspace_write(struct adapter
*adap
, unsigned int mbox
,
1950 void t4_read_pace_tbl(struct adapter
*adap
, unsigned int pace_vals
[NTX_SCHED
]);
1951 void t4_get_tx_sched(struct adapter
*adap
, unsigned int sched
,
1952 unsigned int *kbps
, unsigned int *ipg
, bool sleep_ok
);
1953 int t4_sge_ctxt_rd(struct adapter
*adap
, unsigned int mbox
, unsigned int cid
,
1954 enum ctxt_type ctype
, u32
*data
);
1955 int t4_sge_ctxt_rd_bd(struct adapter
*adap
, unsigned int cid
,
1956 enum ctxt_type ctype
, u32
*data
);
1957 int t4_sched_params(struct adapter
*adapter
, int type
, int level
, int mode
,
1958 int rateunit
, int ratemode
, int channel
, int class,
1959 int minrate
, int maxrate
, int weight
, int pktsize
);
1960 void t4_sge_decode_idma_state(struct adapter
*adapter
, int state
);
1961 void t4_idma_monitor_init(struct adapter
*adapter
,
1962 struct sge_idma_monitor_state
*idma
);
1963 void t4_idma_monitor(struct adapter
*adapter
,
1964 struct sge_idma_monitor_state
*idma
,
1966 int t4_set_vf_mac_acl(struct adapter
*adapter
, unsigned int vf
,
1967 unsigned int naddr
, u8
*addr
);
1968 void t4_tp_pio_read(struct adapter
*adap
, u32
*buff
, u32 nregs
,
1969 u32 start_index
, bool sleep_ok
);
1970 void t4_tp_tm_pio_read(struct adapter
*adap
, u32
*buff
, u32 nregs
,
1971 u32 start_index
, bool sleep_ok
);
1972 void t4_tp_mib_read(struct adapter
*adap
, u32
*buff
, u32 nregs
,
1973 u32 start_index
, bool sleep_ok
);
1975 void t4_uld_mem_free(struct adapter
*adap
);
1976 int t4_uld_mem_alloc(struct adapter
*adap
);
1977 void t4_uld_clean_up(struct adapter
*adap
);
1978 void t4_register_netevent_notifier(void);
1979 int t4_i2c_rd(struct adapter
*adap
, unsigned int mbox
, int port
,
1980 unsigned int devid
, unsigned int offset
,
1981 unsigned int len
, u8
*buf
);
1982 void free_rspq_fl(struct adapter
*adap
, struct sge_rspq
*rq
, struct sge_fl
*fl
);
1983 void free_tx_desc(struct adapter
*adap
, struct sge_txq
*q
,
1984 unsigned int n
, bool unmap
);
1985 void cxgb4_eosw_txq_free_desc(struct adapter
*adap
, struct sge_eosw_txq
*txq
,
1987 int cxgb4_ethofld_send_flowc(struct net_device
*dev
, u32 eotid
, u32 tc
);
1988 void cxgb4_ethofld_restart(unsigned long data
);
1989 int cxgb4_ethofld_rx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
1990 const struct pkt_gl
*si
);
1991 void free_txq(struct adapter
*adap
, struct sge_txq
*q
);
1992 void cxgb4_reclaim_completed_tx(struct adapter
*adap
,
1993 struct sge_txq
*q
, bool unmap
);
1994 int cxgb4_map_skb(struct device
*dev
, const struct sk_buff
*skb
,
1996 void cxgb4_inline_tx_skb(const struct sk_buff
*skb
, const struct sge_txq
*q
,
1998 void cxgb4_write_sgl(const struct sk_buff
*skb
, struct sge_txq
*q
,
1999 struct ulptx_sgl
*sgl
, u64
*end
, unsigned int start
,
2000 const dma_addr_t
*addr
);
2001 void cxgb4_ring_tx_db(struct adapter
*adap
, struct sge_txq
*q
, int n
);
2002 int t4_set_vlan_acl(struct adapter
*adap
, unsigned int mbox
, unsigned int vf
,
2004 int cxgb4_dcb_enabled(const struct net_device
*dev
);
2006 int cxgb4_thermal_init(struct adapter
*adap
);
2007 int cxgb4_thermal_remove(struct adapter
*adap
);
2008 int cxgb4_set_msix_aff(struct adapter
*adap
, unsigned short vec
,
2009 cpumask_var_t
*aff_mask
, int idx
);
2010 void cxgb4_clear_msix_aff(unsigned short vec
, cpumask_var_t aff_mask
);
2012 int cxgb4_change_mac(struct port_info
*pi
, unsigned int viid
,
2013 int *tcam_idx
, const u8
*addr
,
2014 bool persistent
, u8
*smt_idx
);
2016 int cxgb4_alloc_mac_filt(struct adapter
*adap
, unsigned int viid
,
2017 bool free
, unsigned int naddr
,
2018 const u8
**addr
, u16
*idx
,
2019 u64
*hash
, bool sleep_ok
);
2020 int cxgb4_free_mac_filt(struct adapter
*adap
, unsigned int viid
,
2021 unsigned int naddr
, const u8
**addr
, bool sleep_ok
);
2022 int cxgb4_init_mps_ref_entries(struct adapter
*adap
);
2023 void cxgb4_free_mps_ref_entries(struct adapter
*adap
);
2024 int cxgb4_alloc_encap_mac_filt(struct adapter
*adap
, unsigned int viid
,
2025 const u8
*addr
, const u8
*mask
,
2026 unsigned int vni
, unsigned int vni_mask
,
2027 u8 dip_hit
, u8 lookup_type
, bool sleep_ok
);
2028 int cxgb4_free_encap_mac_filt(struct adapter
*adap
, unsigned int viid
,
2029 int idx
, bool sleep_ok
);
2030 int cxgb4_free_raw_mac_filt(struct adapter
*adap
,
2038 int cxgb4_alloc_raw_mac_filt(struct adapter
*adap
,
2046 int cxgb4_update_mac_filt(struct port_info
*pi
, unsigned int viid
,
2047 int *tcam_idx
, const u8
*addr
,
2048 bool persistent
, u8
*smt_idx
);
2049 int cxgb4_get_msix_idx_from_bmap(struct adapter
*adap
);
2050 void cxgb4_free_msix_idx_in_bmap(struct adapter
*adap
, u32 msix_idx
);
2051 int cxgb_open(struct net_device
*dev
);
2052 int cxgb_close(struct net_device
*dev
);
2053 void cxgb4_enable_rx(struct adapter
*adap
, struct sge_rspq
*q
);
2054 void cxgb4_quiesce_rx(struct sge_rspq
*q
);
2055 #endif /* __CXGB4_H__ */