2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/phy.h>
29 #include <linux/clk.h>
30 #include <linux/hrtimer.h>
31 #include <linux/ktime.h>
32 #include <uapi/linux/ppp_defs.h>
36 /* RX Fifo Registers */
37 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
38 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
39 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
40 #define MVPP2_RX_FIFO_INIT_REG 0x64
42 /* RX DMA Top Registers */
43 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
44 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
45 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
46 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
47 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
48 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
49 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
50 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
51 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
52 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
53 #define MVPP2_RXQ_POOL_LONG_OFFS 24
54 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
55 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
56 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
57 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
59 /* Parser Registers */
60 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
61 #define MVPP2_PRS_PORT_LU_MAX 0xf
62 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
63 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
64 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
65 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
66 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
67 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
68 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
69 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
70 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
71 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
72 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
73 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
74 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
75 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
76 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
78 /* Classifier Registers */
79 #define MVPP2_CLS_MODE_REG 0x1800
80 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
81 #define MVPP2_CLS_PORT_WAY_REG 0x1810
82 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
83 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
84 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
85 #define MVPP2_CLS_LKP_TBL_REG 0x1818
86 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
87 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
88 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
89 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
90 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
91 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
92 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
93 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
94 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
95 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
96 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
97 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
99 /* Descriptor Manager Top Registers */
100 #define MVPP2_RXQ_NUM_REG 0x2040
101 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
102 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
103 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
104 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
105 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
106 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
107 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
108 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
109 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
110 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
111 #define MVPP2_RXQ_THRESH_REG 0x204c
112 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
113 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
114 #define MVPP2_RXQ_INDEX_REG 0x2050
115 #define MVPP2_TXQ_NUM_REG 0x2080
116 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
117 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
118 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
119 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
120 #define MVPP2_TXQ_THRESH_REG 0x2094
121 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
122 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
123 #define MVPP2_TXQ_INDEX_REG 0x2098
124 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
125 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
126 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
127 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
128 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
129 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
130 #define MVPP2_TXQ_PENDING_REG 0x20a0
131 #define MVPP2_TXQ_PENDING_MASK 0x3fff
132 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
133 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
134 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
135 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
136 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
137 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
138 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
139 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
140 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
141 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
142 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
143 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
144 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
145 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
146 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
147 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
149 /* MBUS bridge registers */
150 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
151 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
152 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
153 #define MVPP2_BASE_ADDR_ENABLE 0x4060
155 /* Interrupt Cause and Mask registers */
156 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
157 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
158 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
159 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
160 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
161 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
162 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
163 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
164 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
165 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
166 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
167 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
168 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
169 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
170 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
171 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
172 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
173 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
174 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
175 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
177 /* Buffer Manager registers */
178 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
179 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
180 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
181 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
182 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
183 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
184 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
185 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
186 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
187 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
188 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
189 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
190 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
191 #define MVPP2_BM_START_MASK BIT(0)
192 #define MVPP2_BM_STOP_MASK BIT(1)
193 #define MVPP2_BM_STATE_MASK BIT(4)
194 #define MVPP2_BM_LOW_THRESH_OFFS 8
195 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
196 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
197 MVPP2_BM_LOW_THRESH_OFFS)
198 #define MVPP2_BM_HIGH_THRESH_OFFS 16
199 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
200 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
201 MVPP2_BM_HIGH_THRESH_OFFS)
202 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
203 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
204 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
205 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
206 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
207 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
208 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
209 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
210 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
211 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
212 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
213 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
214 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
215 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
216 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
217 #define MVPP2_BM_MC_RLS_REG 0x64c4
218 #define MVPP2_BM_MC_ID_MASK 0xfff
219 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
221 /* TX Scheduler registers */
222 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
223 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
224 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
225 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
226 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
227 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
228 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
229 #define MVPP2_TXP_MTU_MAX 0x7FFFF
230 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
231 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
232 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
233 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
234 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
235 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
236 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
237 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
238 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
239 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
240 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
241 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
242 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
243 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
245 /* TX general registers */
246 #define MVPP2_TX_SNOOP_REG 0x8800
247 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
248 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
251 #define MVPP2_SRC_ADDR_MIDDLE 0x24
252 #define MVPP2_SRC_ADDR_HIGH 0x28
253 #define MVPP2_PHY_AN_CFG0_REG 0x34
254 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
255 #define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
256 0x400 + (port) * 0x400)
257 #define MVPP2_MIB_LATE_COLLISION 0x7c
258 #define MVPP2_ISR_SUM_MASK_REG 0x220c
259 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
260 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
262 /* Per-port registers */
263 #define MVPP2_GMAC_CTRL_0_REG 0x0
264 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
265 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
266 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
267 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
268 #define MVPP2_GMAC_CTRL_1_REG 0x4
269 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
270 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
271 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
272 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
273 #define MVPP2_GMAC_SA_LOW_OFFS 7
274 #define MVPP2_GMAC_CTRL_2_REG 0x8
275 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
276 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
277 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
278 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
279 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
280 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
281 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
282 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
283 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
284 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
285 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
286 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
287 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
288 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
289 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
290 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
291 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
292 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
294 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
296 /* Descriptor ring Macros */
297 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
298 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
300 /* Various constants */
303 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
304 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
305 #define MVPP2_RX_COAL_PKTS 32
306 #define MVPP2_RX_COAL_USEC 100
308 /* The two bytes Marvell header. Either contains a special value used
309 * by Marvell switches when a specific hardware mode is enabled (not
310 * supported by this driver) or is filled automatically by zeroes on
311 * the RX side. Those two bytes being at the front of the Ethernet
312 * header, they allow to have the IP header aligned on a 4 bytes
313 * boundary automatically: the hardware skips those two bytes on its
316 #define MVPP2_MH_SIZE 2
317 #define MVPP2_ETH_TYPE_LEN 2
318 #define MVPP2_PPPOE_HDR_SIZE 8
319 #define MVPP2_VLAN_TAG_LEN 4
321 /* Lbtd 802.3 type */
322 #define MVPP2_IP_LBDT_TYPE 0xfffa
324 #define MVPP2_TX_CSUM_MAX_SIZE 9800
326 /* Timeout constants */
327 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
328 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
330 #define MVPP2_TX_MTU_MAX 0x7ffff
332 /* Maximum number of T-CONTs of PON port */
333 #define MVPP2_MAX_TCONT 16
335 /* Maximum number of supported ports */
336 #define MVPP2_MAX_PORTS 4
338 /* Maximum number of TXQs used by single port */
339 #define MVPP2_MAX_TXQ 8
341 /* Maximum number of RXQs used by single port */
342 #define MVPP2_MAX_RXQ 8
344 /* Dfault number of RXQs in use */
345 #define MVPP2_DEFAULT_RXQ 4
347 /* Total number of RXQs available to all ports */
348 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
350 /* Max number of Rx descriptors */
351 #define MVPP2_MAX_RXD 128
353 /* Max number of Tx descriptors */
354 #define MVPP2_MAX_TXD 1024
356 /* Amount of Tx descriptors that can be reserved at once by CPU */
357 #define MVPP2_CPU_DESC_CHUNK 64
359 /* Max number of Tx descriptors in each aggregated queue */
360 #define MVPP2_AGGR_TXQ_SIZE 256
362 /* Descriptor aligned size */
363 #define MVPP2_DESC_ALIGNED_SIZE 32
365 /* Descriptor alignment mask */
366 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
368 /* RX FIFO constants */
369 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
370 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
371 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
373 /* RX buffer constants */
374 #define MVPP2_SKB_SHINFO_SIZE \
375 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
377 #define MVPP2_RX_PKT_SIZE(mtu) \
378 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
379 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
381 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
382 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
383 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
384 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
386 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
388 /* IPv6 max L3 address size */
389 #define MVPP2_MAX_L3_ADDR_SIZE 16
392 #define MVPP2_F_LOOPBACK BIT(0)
394 /* Marvell tag types */
395 enum mvpp2_tag_type
{
396 MVPP2_TAG_TYPE_NONE
= 0,
397 MVPP2_TAG_TYPE_MH
= 1,
398 MVPP2_TAG_TYPE_DSA
= 2,
399 MVPP2_TAG_TYPE_EDSA
= 3,
400 MVPP2_TAG_TYPE_VLAN
= 4,
401 MVPP2_TAG_TYPE_LAST
= 5
404 /* Parser constants */
405 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
406 #define MVPP2_PRS_TCAM_WORDS 6
407 #define MVPP2_PRS_SRAM_WORDS 4
408 #define MVPP2_PRS_FLOW_ID_SIZE 64
409 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
410 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
411 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
412 #define MVPP2_PRS_IPV4_HEAD 0x40
413 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
414 #define MVPP2_PRS_IPV4_MC 0xe0
415 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
416 #define MVPP2_PRS_IPV4_BC_MASK 0xff
417 #define MVPP2_PRS_IPV4_IHL 0x5
418 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
419 #define MVPP2_PRS_IPV6_MC 0xff
420 #define MVPP2_PRS_IPV6_MC_MASK 0xff
421 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
422 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
423 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
424 #define MVPP2_PRS_DBL_VLANS_MAX 100
427 * - lookup ID - 4 bits
429 * - additional information - 1 byte
430 * - header data - 8 bytes
431 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
433 #define MVPP2_PRS_AI_BITS 8
434 #define MVPP2_PRS_PORT_MASK 0xff
435 #define MVPP2_PRS_LU_MASK 0xf
436 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
437 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
438 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
439 (((offs) * 2) - ((offs) % 2) + 2)
440 #define MVPP2_PRS_TCAM_AI_BYTE 16
441 #define MVPP2_PRS_TCAM_PORT_BYTE 17
442 #define MVPP2_PRS_TCAM_LU_BYTE 20
443 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
444 #define MVPP2_PRS_TCAM_INV_WORD 5
445 /* Tcam entries ID */
446 #define MVPP2_PE_DROP_ALL 0
447 #define MVPP2_PE_FIRST_FREE_TID 1
448 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
449 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
450 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
451 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
452 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
453 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
454 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
455 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
456 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
457 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
458 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
459 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
460 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
461 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
462 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
463 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
464 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
465 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
466 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
467 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
468 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
469 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
470 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
471 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
472 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
475 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
477 #define MVPP2_PRS_SRAM_RI_OFFS 0
478 #define MVPP2_PRS_SRAM_RI_WORD 0
479 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
480 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
481 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
482 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
483 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
484 #define MVPP2_PRS_SRAM_UDF_OFFS 73
485 #define MVPP2_PRS_SRAM_UDF_BITS 8
486 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
487 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
488 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
489 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
490 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
491 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
492 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
493 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
494 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
495 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
496 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
497 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
498 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
499 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
500 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
501 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
502 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
503 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
504 #define MVPP2_PRS_SRAM_AI_OFFS 90
505 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
506 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
507 #define MVPP2_PRS_SRAM_AI_MASK 0xff
508 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
509 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
510 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
511 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
513 /* Sram result info bits assignment */
514 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
515 #define MVPP2_PRS_RI_DSA_MASK 0x2
516 #define MVPP2_PRS_RI_VLAN_MASK 0xc
517 #define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
518 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
519 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
520 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
521 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
522 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
523 #define MVPP2_PRS_RI_L2_CAST_MASK 0x600
524 #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
525 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
526 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
527 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
528 #define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
529 #define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
530 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
531 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
532 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
533 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
534 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
535 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
536 #define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
537 #define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
538 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
539 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
540 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
541 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
542 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
543 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
544 #define MVPP2_PRS_RI_L4_TCP BIT(22)
545 #define MVPP2_PRS_RI_L4_UDP BIT(23)
546 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
547 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
548 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
549 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
551 /* Sram additional info bits assignment */
552 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
553 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
554 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
555 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
556 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
557 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
558 #define MVPP2_PRS_SINGLE_VLAN_AI 0
559 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
562 #define MVPP2_PRS_TAGGED true
563 #define MVPP2_PRS_UNTAGGED false
564 #define MVPP2_PRS_EDSA true
565 #define MVPP2_PRS_DSA false
567 /* MAC entries, shadow udf */
569 MVPP2_PRS_UDF_MAC_DEF
,
570 MVPP2_PRS_UDF_MAC_RANGE
,
571 MVPP2_PRS_UDF_L2_DEF
,
572 MVPP2_PRS_UDF_L2_DEF_COPY
,
573 MVPP2_PRS_UDF_L2_USER
,
577 enum mvpp2_prs_lookup
{
591 enum mvpp2_prs_l3_cast
{
592 MVPP2_PRS_L3_UNI_CAST
,
593 MVPP2_PRS_L3_MULTI_CAST
,
594 MVPP2_PRS_L3_BROAD_CAST
597 /* Classifier constants */
598 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
599 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
600 #define MVPP2_CLS_LKP_TBL_SIZE 64
603 #define MVPP2_BM_POOLS_NUM 8
604 #define MVPP2_BM_LONG_BUF_NUM 1024
605 #define MVPP2_BM_SHORT_BUF_NUM 2048
606 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
607 #define MVPP2_BM_POOL_PTR_ALIGN 128
608 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
609 #define MVPP2_BM_SWF_SHORT_POOL 3
611 /* BM cookie (32 bits) definition */
612 #define MVPP2_BM_COOKIE_POOL_OFFS 8
613 #define MVPP2_BM_COOKIE_CPU_OFFS 24
615 /* BM short pool packet size
616 * These value assure that for SWF the total number
617 * of bytes allocated for each buffer will be 512
619 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
629 /* Shared Packet Processor resources */
631 /* Shared registers' base addresses */
633 void __iomem
*lms_base
;
639 /* List of pointers to port structures */
640 struct mvpp2_port
**port_list
;
642 /* Aggregated TXQs */
643 struct mvpp2_tx_queue
*aggr_txqs
;
646 struct mvpp2_bm_pool
*bm_pools
;
648 /* PRS shadow table */
649 struct mvpp2_prs_shadow
*prs_shadow
;
650 /* PRS auxiliary table for double vlan entries control */
651 bool *prs_double_vlans
;
657 struct mvpp2_pcpu_stats
{
658 struct u64_stats_sync syncp
;
665 /* Per-CPU port control */
666 struct mvpp2_port_pcpu
{
667 struct hrtimer tx_done_timer
;
668 bool timer_scheduled
;
669 /* Tasklet for egress finalization */
670 struct tasklet_struct tx_done_tasklet
;
680 /* Per-port registers' base address */
683 struct mvpp2_rx_queue
**rxqs
;
684 struct mvpp2_tx_queue
**txqs
;
685 struct net_device
*dev
;
689 u32 pending_cause_rx
;
690 struct napi_struct napi
;
692 /* Per-CPU port control */
693 struct mvpp2_port_pcpu __percpu
*pcpu
;
700 struct mvpp2_pcpu_stats __percpu
*stats
;
702 struct phy_device
*phy_dev
;
703 phy_interface_t phy_interface
;
704 struct device_node
*phy_node
;
709 struct mvpp2_bm_pool
*pool_long
;
710 struct mvpp2_bm_pool
*pool_short
;
712 /* Index of first port's physical RXQ */
716 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
717 * layout of the transmit and reception DMA descriptors, and their
718 * layout is therefore defined by the hardware design
721 #define MVPP2_TXD_L3_OFF_SHIFT 0
722 #define MVPP2_TXD_IP_HLEN_SHIFT 8
723 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
724 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
725 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
726 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
727 #define MVPP2_TXD_L4_UDP BIT(24)
728 #define MVPP2_TXD_L3_IP6 BIT(26)
729 #define MVPP2_TXD_L_DESC BIT(28)
730 #define MVPP2_TXD_F_DESC BIT(29)
732 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
733 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
734 #define MVPP2_RXD_ERR_CRC 0x0
735 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
736 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
737 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
738 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
739 #define MVPP2_RXD_HWF_SYNC BIT(21)
740 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
741 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
742 #define MVPP2_RXD_L4_TCP BIT(25)
743 #define MVPP2_RXD_L4_UDP BIT(26)
744 #define MVPP2_RXD_L3_IP4 BIT(28)
745 #define MVPP2_RXD_L3_IP6 BIT(30)
746 #define MVPP2_RXD_BUF_HDR BIT(31)
748 struct mvpp2_tx_desc
{
749 u32 command
; /* Options used by HW for packet transmitting.*/
750 u8 packet_offset
; /* the offset from the buffer beginning */
751 u8 phys_txq
; /* destination queue ID */
752 u16 data_size
; /* data size of transmitted packet in bytes */
753 u32 buf_phys_addr
; /* physical addr of transmitted buffer */
754 u32 buf_cookie
; /* cookie for access to TX buffer in tx path */
755 u32 reserved1
[3]; /* hw_cmd (for future use, BM, PON, PNC) */
756 u32 reserved2
; /* reserved (for future use) */
759 struct mvpp2_rx_desc
{
760 u32 status
; /* info about received packet */
761 u16 reserved1
; /* parser_info (for future use, PnC) */
762 u16 data_size
; /* size of received packet in bytes */
763 u32 buf_phys_addr
; /* physical address of the buffer */
764 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
765 u16 reserved2
; /* gem_port_id (for future use, PON) */
766 u16 reserved3
; /* csum_l4 (for future use, PnC) */
767 u8 reserved4
; /* bm_qset (for future use, BM) */
769 u16 reserved6
; /* classify_info (for future use, PnC) */
770 u32 reserved7
; /* flow_id (for future use, PnC) */
774 /* Per-CPU Tx queue control */
775 struct mvpp2_txq_pcpu
{
778 /* Number of Tx DMA descriptors in the descriptor ring */
781 /* Number of currently used Tx DMA descriptor in the
786 /* Number of Tx DMA descriptors reserved for each CPU */
789 /* Array of transmitted skb */
790 struct sk_buff
**tx_skb
;
792 /* Array of transmitted buffers' physical addresses */
793 dma_addr_t
*tx_buffs
;
795 /* Index of last TX DMA descriptor that was inserted */
798 /* Index of the TX DMA descriptor to be cleaned up */
802 struct mvpp2_tx_queue
{
803 /* Physical number of this Tx queue */
806 /* Logical number of this Tx queue */
809 /* Number of Tx DMA descriptors in the descriptor ring */
812 /* Number of currently used Tx DMA descriptor in the descriptor ring */
815 /* Per-CPU control of physical Tx queues */
816 struct mvpp2_txq_pcpu __percpu
*pcpu
;
818 /* Array of transmitted skb */
819 struct sk_buff
**tx_skb
;
823 /* Virtual address of thex Tx DMA descriptors array */
824 struct mvpp2_tx_desc
*descs
;
826 /* DMA address of the Tx DMA descriptors array */
827 dma_addr_t descs_phys
;
829 /* Index of the last Tx DMA descriptor */
832 /* Index of the next Tx DMA descriptor to process */
833 int next_desc_to_proc
;
836 struct mvpp2_rx_queue
{
837 /* RX queue number, in the range 0-31 for physical RXQs */
840 /* Num of rx descriptors in the rx descriptor ring */
846 /* Virtual address of the RX DMA descriptors array */
847 struct mvpp2_rx_desc
*descs
;
849 /* DMA address of the RX DMA descriptors array */
850 dma_addr_t descs_phys
;
852 /* Index of the last RX DMA descriptor */
855 /* Index of the next RX DMA descriptor to process */
856 int next_desc_to_proc
;
858 /* ID of port to which physical RXQ is mapped */
861 /* Port's logic RXQ number to which physical RXQ is mapped */
865 union mvpp2_prs_tcam_entry
{
866 u32 word
[MVPP2_PRS_TCAM_WORDS
];
867 u8 byte
[MVPP2_PRS_TCAM_WORDS
* 4];
870 union mvpp2_prs_sram_entry
{
871 u32 word
[MVPP2_PRS_SRAM_WORDS
];
872 u8 byte
[MVPP2_PRS_SRAM_WORDS
* 4];
875 struct mvpp2_prs_entry
{
877 union mvpp2_prs_tcam_entry tcam
;
878 union mvpp2_prs_sram_entry sram
;
881 struct mvpp2_prs_shadow
{
888 /* User defined offset */
896 struct mvpp2_cls_flow_entry
{
898 u32 data
[MVPP2_CLS_FLOWS_TBL_DATA_WORDS
];
901 struct mvpp2_cls_lookup_entry
{
907 struct mvpp2_bm_pool
{
908 /* Pool number in the range 0-7 */
910 enum mvpp2_bm_type type
;
912 /* Buffer Pointers Pool External (BPPE) size */
914 /* Number of buffers for this pool */
916 /* Pool buffer size */
921 /* BPPE virtual base address */
923 /* BPPE physical base address */
924 dma_addr_t phys_addr
;
926 /* Ports using BM pool */
929 /* Occupied buffers indicator */
934 struct mvpp2_buff_hdr
{
935 u32 next_buff_phys_addr
;
936 u32 next_buff_virt_addr
;
939 u8 reserved1
; /* bm_qset (for future use, BM) */
942 /* Buffer header info bits */
943 #define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
944 #define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
945 #define MVPP2_B_HDR_INFO_LAST_OFFS 12
946 #define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
947 #define MVPP2_B_HDR_INFO_IS_LAST(info) \
948 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
950 /* Static declaractions */
952 /* Number of RXQs used by single port */
953 static int rxq_number
= MVPP2_DEFAULT_RXQ
;
954 /* Number of TXQs used by single port */
955 static int txq_number
= MVPP2_MAX_TXQ
;
957 #define MVPP2_DRIVER_NAME "mvpp2"
958 #define MVPP2_DRIVER_VERSION "1.0"
960 /* Utility/helper methods */
962 static void mvpp2_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
964 writel(data
, priv
->base
+ offset
);
967 static u32
mvpp2_read(struct mvpp2
*priv
, u32 offset
)
969 return readl(priv
->base
+ offset
);
972 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu
*txq_pcpu
)
974 txq_pcpu
->txq_get_index
++;
975 if (txq_pcpu
->txq_get_index
== txq_pcpu
->size
)
976 txq_pcpu
->txq_get_index
= 0;
979 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu
*txq_pcpu
,
981 struct mvpp2_tx_desc
*tx_desc
)
983 txq_pcpu
->tx_skb
[txq_pcpu
->txq_put_index
] = skb
;
985 txq_pcpu
->tx_buffs
[txq_pcpu
->txq_put_index
] =
986 tx_desc
->buf_phys_addr
;
987 txq_pcpu
->txq_put_index
++;
988 if (txq_pcpu
->txq_put_index
== txq_pcpu
->size
)
989 txq_pcpu
->txq_put_index
= 0;
992 /* Get number of physical egress port */
993 static inline int mvpp2_egress_port(struct mvpp2_port
*port
)
995 return MVPP2_MAX_TCONT
+ port
->id
;
998 /* Get number of physical TXQ */
999 static inline int mvpp2_txq_phys(int port
, int txq
)
1001 return (MVPP2_MAX_TCONT
+ port
) * MVPP2_MAX_TXQ
+ txq
;
1004 /* Parser configuration routines */
1006 /* Update parser tcam and sram hw entries */
1007 static int mvpp2_prs_hw_write(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1011 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1014 /* Clear entry invalidation bit */
1015 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] &= ~MVPP2_PRS_TCAM_INV_MASK
;
1017 /* Write tcam index - indirect access */
1018 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1019 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1020 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), pe
->tcam
.word
[i
]);
1022 /* Write sram index - indirect access */
1023 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1024 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1025 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), pe
->sram
.word
[i
]);
1030 /* Read tcam entry from hw */
1031 static int mvpp2_prs_hw_read(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1035 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1038 /* Write tcam index - indirect access */
1039 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1041 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] = mvpp2_read(priv
,
1042 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
));
1043 if (pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] & MVPP2_PRS_TCAM_INV_MASK
)
1044 return MVPP2_PRS_TCAM_ENTRY_INVALID
;
1046 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1047 pe
->tcam
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_TCAM_DATA_REG(i
));
1049 /* Write sram index - indirect access */
1050 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1051 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1052 pe
->sram
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_SRAM_DATA_REG(i
));
1057 /* Invalidate tcam hw entry */
1058 static void mvpp2_prs_hw_inv(struct mvpp2
*priv
, int index
)
1060 /* Write index - indirect access */
1061 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
1062 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
),
1063 MVPP2_PRS_TCAM_INV_MASK
);
1066 /* Enable shadow table entry and set its lookup ID */
1067 static void mvpp2_prs_shadow_set(struct mvpp2
*priv
, int index
, int lu
)
1069 priv
->prs_shadow
[index
].valid
= true;
1070 priv
->prs_shadow
[index
].lu
= lu
;
1073 /* Update ri fields in shadow table entry */
1074 static void mvpp2_prs_shadow_ri_set(struct mvpp2
*priv
, int index
,
1075 unsigned int ri
, unsigned int ri_mask
)
1077 priv
->prs_shadow
[index
].ri_mask
= ri_mask
;
1078 priv
->prs_shadow
[index
].ri
= ri
;
1081 /* Update lookup field in tcam sw entry */
1082 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry
*pe
, unsigned int lu
)
1084 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE
);
1086 pe
->tcam
.byte
[MVPP2_PRS_TCAM_LU_BYTE
] = lu
;
1087 pe
->tcam
.byte
[enable_off
] = MVPP2_PRS_LU_MASK
;
1090 /* Update mask for single port in tcam sw entry */
1091 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry
*pe
,
1092 unsigned int port
, bool add
)
1094 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1097 pe
->tcam
.byte
[enable_off
] &= ~(1 << port
);
1099 pe
->tcam
.byte
[enable_off
] |= 1 << port
;
1102 /* Update port map in tcam sw entry */
1103 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry
*pe
,
1106 unsigned char port_mask
= MVPP2_PRS_PORT_MASK
;
1107 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1109 pe
->tcam
.byte
[MVPP2_PRS_TCAM_PORT_BYTE
] = 0;
1110 pe
->tcam
.byte
[enable_off
] &= ~port_mask
;
1111 pe
->tcam
.byte
[enable_off
] |= ~ports
& MVPP2_PRS_PORT_MASK
;
1114 /* Obtain port map from tcam sw entry */
1115 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry
*pe
)
1117 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1119 return ~(pe
->tcam
.byte
[enable_off
]) & MVPP2_PRS_PORT_MASK
;
1122 /* Set byte of data and its enable bits in tcam sw entry */
1123 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry
*pe
,
1124 unsigned int offs
, unsigned char byte
,
1125 unsigned char enable
)
1127 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)] = byte
;
1128 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)] = enable
;
1131 /* Get byte of data and its enable bits from tcam sw entry */
1132 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry
*pe
,
1133 unsigned int offs
, unsigned char *byte
,
1134 unsigned char *enable
)
1136 *byte
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)];
1137 *enable
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)];
1140 /* Compare tcam data bytes with a pattern */
1141 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry
*pe
, int offs
,
1144 int off
= MVPP2_PRS_TCAM_DATA_BYTE(offs
);
1147 tcam_data
= (8 << pe
->tcam
.byte
[off
+ 1]) | pe
->tcam
.byte
[off
];
1148 if (tcam_data
!= data
)
1153 /* Update ai bits in tcam sw entry */
1154 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry
*pe
,
1155 unsigned int bits
, unsigned int enable
)
1157 int i
, ai_idx
= MVPP2_PRS_TCAM_AI_BYTE
;
1159 for (i
= 0; i
< MVPP2_PRS_AI_BITS
; i
++) {
1161 if (!(enable
& BIT(i
)))
1165 pe
->tcam
.byte
[ai_idx
] |= 1 << i
;
1167 pe
->tcam
.byte
[ai_idx
] &= ~(1 << i
);
1170 pe
->tcam
.byte
[MVPP2_PRS_TCAM_EN_OFFS(ai_idx
)] |= enable
;
1173 /* Get ai bits from tcam sw entry */
1174 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry
*pe
)
1176 return pe
->tcam
.byte
[MVPP2_PRS_TCAM_AI_BYTE
];
1179 /* Set ethertype in tcam sw entry */
1180 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry
*pe
, int offset
,
1181 unsigned short ethertype
)
1183 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 0, ethertype
>> 8, 0xff);
1184 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 1, ethertype
& 0xff, 0xff);
1187 /* Set bits in sram sw entry */
1188 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry
*pe
, int bit_num
,
1191 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] |= (val
<< (bit_num
% 8));
1194 /* Clear bits in sram sw entry */
1195 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry
*pe
, int bit_num
,
1198 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] &= ~(val
<< (bit_num
% 8));
1201 /* Update ri bits in sram sw entry */
1202 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry
*pe
,
1203 unsigned int bits
, unsigned int mask
)
1207 for (i
= 0; i
< MVPP2_PRS_SRAM_RI_CTRL_BITS
; i
++) {
1208 int ri_off
= MVPP2_PRS_SRAM_RI_OFFS
;
1210 if (!(mask
& BIT(i
)))
1214 mvpp2_prs_sram_bits_set(pe
, ri_off
+ i
, 1);
1216 mvpp2_prs_sram_bits_clear(pe
, ri_off
+ i
, 1);
1218 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_RI_CTRL_OFFS
+ i
, 1);
1222 /* Obtain ri bits from sram sw entry */
1223 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry
*pe
)
1225 return pe
->sram
.word
[MVPP2_PRS_SRAM_RI_WORD
];
1228 /* Update ai bits in sram sw entry */
1229 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry
*pe
,
1230 unsigned int bits
, unsigned int mask
)
1233 int ai_off
= MVPP2_PRS_SRAM_AI_OFFS
;
1235 for (i
= 0; i
< MVPP2_PRS_SRAM_AI_CTRL_BITS
; i
++) {
1237 if (!(mask
& BIT(i
)))
1241 mvpp2_prs_sram_bits_set(pe
, ai_off
+ i
, 1);
1243 mvpp2_prs_sram_bits_clear(pe
, ai_off
+ i
, 1);
1245 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_AI_CTRL_OFFS
+ i
, 1);
1249 /* Read ai bits from sram sw entry */
1250 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry
*pe
)
1253 int ai_off
= MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS
);
1254 int ai_en_off
= ai_off
+ 1;
1255 int ai_shift
= MVPP2_PRS_SRAM_AI_OFFS
% 8;
1257 bits
= (pe
->sram
.byte
[ai_off
] >> ai_shift
) |
1258 (pe
->sram
.byte
[ai_en_off
] << (8 - ai_shift
));
1263 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1266 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry
*pe
,
1269 int sram_next_off
= MVPP2_PRS_SRAM_NEXT_LU_OFFS
;
1271 mvpp2_prs_sram_bits_clear(pe
, sram_next_off
,
1272 MVPP2_PRS_SRAM_NEXT_LU_MASK
);
1273 mvpp2_prs_sram_bits_set(pe
, sram_next_off
, lu
);
1276 /* In the sram sw entry set sign and value of the next lookup offset
1277 * and the offset value generated to the classifier
1279 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry
*pe
, int shift
,
1284 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1287 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1291 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS
)] =
1292 (unsigned char)shift
;
1294 /* Reset and set operation */
1295 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
,
1296 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK
);
1297 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
, op
);
1299 /* Set base offset as current */
1300 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1303 /* In the sram sw entry set sign and value of the user defined offset
1304 * generated to the classifier
1306 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry
*pe
,
1307 unsigned int type
, int offset
,
1312 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1313 offset
= 0 - offset
;
1315 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1319 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_OFFS
,
1320 MVPP2_PRS_SRAM_UDF_MASK
);
1321 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_OFFS
, offset
);
1322 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1323 MVPP2_PRS_SRAM_UDF_BITS
)] &=
1324 ~(MVPP2_PRS_SRAM_UDF_MASK
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1325 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1326 MVPP2_PRS_SRAM_UDF_BITS
)] |=
1327 (offset
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1329 /* Set offset type */
1330 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
,
1331 MVPP2_PRS_SRAM_UDF_TYPE_MASK
);
1332 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
, type
);
1334 /* Set offset operation */
1335 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
,
1336 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
);
1337 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
, op
);
1339 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1340 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] &=
1341 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
>>
1342 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1344 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1345 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] |=
1346 (op
>> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1348 /* Set base offset as current */
1349 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1352 /* Find parser flow entry */
1353 static struct mvpp2_prs_entry
*mvpp2_prs_flow_find(struct mvpp2
*priv
, int flow
)
1355 struct mvpp2_prs_entry
*pe
;
1358 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1361 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
1363 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1364 for (tid
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1; tid
>= 0; tid
--) {
1367 if (!priv
->prs_shadow
[tid
].valid
||
1368 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_FLOWS
)
1372 mvpp2_prs_hw_read(priv
, pe
);
1373 bits
= mvpp2_prs_sram_ai_get(pe
);
1375 /* Sram store classification lookup ID in AI bits [5:0] */
1376 if ((bits
& MVPP2_PRS_FLOW_ID_MASK
) == flow
)
1384 /* Return first free tcam index, seeking from start to end */
1385 static int mvpp2_prs_tcam_first_free(struct mvpp2
*priv
, unsigned char start
,
1393 if (end
>= MVPP2_PRS_TCAM_SRAM_SIZE
)
1394 end
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1;
1396 for (tid
= start
; tid
<= end
; tid
++) {
1397 if (!priv
->prs_shadow
[tid
].valid
)
1404 /* Enable/disable dropping all mac da's */
1405 static void mvpp2_prs_mac_drop_all_set(struct mvpp2
*priv
, int port
, bool add
)
1407 struct mvpp2_prs_entry pe
;
1409 if (priv
->prs_shadow
[MVPP2_PE_DROP_ALL
].valid
) {
1410 /* Entry exist - update port only */
1411 pe
.index
= MVPP2_PE_DROP_ALL
;
1412 mvpp2_prs_hw_read(priv
, &pe
);
1414 /* Entry doesn't exist - create new */
1415 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1416 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1417 pe
.index
= MVPP2_PE_DROP_ALL
;
1419 /* Non-promiscuous mode for all ports - DROP unknown packets */
1420 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
1421 MVPP2_PRS_RI_DROP_MASK
);
1423 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1424 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1426 /* Update shadow table */
1427 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1429 /* Mask all ports */
1430 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1433 /* Update port mask */
1434 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1436 mvpp2_prs_hw_write(priv
, &pe
);
1439 /* Set port to promiscuous mode */
1440 static void mvpp2_prs_mac_promisc_set(struct mvpp2
*priv
, int port
, bool add
)
1442 struct mvpp2_prs_entry pe
;
1444 /* Promiscuous mode - Accept unknown packets */
1446 if (priv
->prs_shadow
[MVPP2_PE_MAC_PROMISCUOUS
].valid
) {
1447 /* Entry exist - update port only */
1448 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1449 mvpp2_prs_hw_read(priv
, &pe
);
1451 /* Entry doesn't exist - create new */
1452 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1453 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1454 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1456 /* Continue - set next lookup */
1457 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1459 /* Set result info bits */
1460 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_UCAST
,
1461 MVPP2_PRS_RI_L2_CAST_MASK
);
1463 /* Shift to ethertype */
1464 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1465 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1467 /* Mask all ports */
1468 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1470 /* Update shadow table */
1471 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1474 /* Update port mask */
1475 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1477 mvpp2_prs_hw_write(priv
, &pe
);
1480 /* Accept multicast */
1481 static void mvpp2_prs_mac_multi_set(struct mvpp2
*priv
, int port
, int index
,
1484 struct mvpp2_prs_entry pe
;
1485 unsigned char da_mc
;
1487 /* Ethernet multicast address first byte is
1488 * 0x01 for IPv4 and 0x33 for IPv6
1490 da_mc
= (index
== MVPP2_PE_MAC_MC_ALL
) ? 0x01 : 0x33;
1492 if (priv
->prs_shadow
[index
].valid
) {
1493 /* Entry exist - update port only */
1495 mvpp2_prs_hw_read(priv
, &pe
);
1497 /* Entry doesn't exist - create new */
1498 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1499 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1502 /* Continue - set next lookup */
1503 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1505 /* Set result info bits */
1506 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_MCAST
,
1507 MVPP2_PRS_RI_L2_CAST_MASK
);
1509 /* Update tcam entry data first byte */
1510 mvpp2_prs_tcam_data_byte_set(&pe
, 0, da_mc
, 0xff);
1512 /* Shift to ethertype */
1513 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1514 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1516 /* Mask all ports */
1517 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1519 /* Update shadow table */
1520 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1523 /* Update port mask */
1524 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1526 mvpp2_prs_hw_write(priv
, &pe
);
1529 /* Set entry for dsa packets */
1530 static void mvpp2_prs_dsa_tag_set(struct mvpp2
*priv
, int port
, bool add
,
1531 bool tagged
, bool extend
)
1533 struct mvpp2_prs_entry pe
;
1537 tid
= tagged
? MVPP2_PE_EDSA_TAGGED
: MVPP2_PE_EDSA_UNTAGGED
;
1540 tid
= tagged
? MVPP2_PE_DSA_TAGGED
: MVPP2_PE_DSA_UNTAGGED
;
1544 if (priv
->prs_shadow
[tid
].valid
) {
1545 /* Entry exist - update port only */
1547 mvpp2_prs_hw_read(priv
, &pe
);
1549 /* Entry doesn't exist - create new */
1550 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1551 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1554 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1555 mvpp2_prs_sram_shift_set(&pe
, shift
,
1556 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1558 /* Update shadow table */
1559 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
1562 /* Set tagged bit in DSA tag */
1563 mvpp2_prs_tcam_data_byte_set(&pe
, 0,
1564 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
1565 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
1566 /* Clear all ai bits for next iteration */
1567 mvpp2_prs_sram_ai_update(&pe
, 0,
1568 MVPP2_PRS_SRAM_AI_MASK
);
1569 /* If packet is tagged continue check vlans */
1570 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1572 /* Set result info bits to 'no vlans' */
1573 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1574 MVPP2_PRS_RI_VLAN_MASK
);
1575 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1578 /* Mask all ports */
1579 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1582 /* Update port mask */
1583 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1585 mvpp2_prs_hw_write(priv
, &pe
);
1588 /* Set entry for dsa ethertype */
1589 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2
*priv
, int port
,
1590 bool add
, bool tagged
, bool extend
)
1592 struct mvpp2_prs_entry pe
;
1593 int tid
, shift
, port_mask
;
1596 tid
= tagged
? MVPP2_PE_ETYPE_EDSA_TAGGED
:
1597 MVPP2_PE_ETYPE_EDSA_UNTAGGED
;
1601 tid
= tagged
? MVPP2_PE_ETYPE_DSA_TAGGED
:
1602 MVPP2_PE_ETYPE_DSA_UNTAGGED
;
1603 port_mask
= MVPP2_PRS_PORT_MASK
;
1607 if (priv
->prs_shadow
[tid
].valid
) {
1608 /* Entry exist - update port only */
1610 mvpp2_prs_hw_read(priv
, &pe
);
1612 /* Entry doesn't exist - create new */
1613 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1614 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1618 mvpp2_prs_match_etype(&pe
, 0, ETH_P_EDSA
);
1619 mvpp2_prs_match_etype(&pe
, 2, 0);
1621 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DSA_MASK
,
1622 MVPP2_PRS_RI_DSA_MASK
);
1623 /* Shift ethertype + 2 byte reserved + tag*/
1624 mvpp2_prs_sram_shift_set(&pe
, 2 + MVPP2_ETH_TYPE_LEN
+ shift
,
1625 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1627 /* Update shadow table */
1628 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
1631 /* Set tagged bit in DSA tag */
1632 mvpp2_prs_tcam_data_byte_set(&pe
,
1633 MVPP2_ETH_TYPE_LEN
+ 2 + 3,
1634 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
1635 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
1636 /* Clear all ai bits for next iteration */
1637 mvpp2_prs_sram_ai_update(&pe
, 0,
1638 MVPP2_PRS_SRAM_AI_MASK
);
1639 /* If packet is tagged continue check vlans */
1640 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1642 /* Set result info bits to 'no vlans' */
1643 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1644 MVPP2_PRS_RI_VLAN_MASK
);
1645 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1647 /* Mask/unmask all ports, depending on dsa type */
1648 mvpp2_prs_tcam_port_map_set(&pe
, port_mask
);
1651 /* Update port mask */
1652 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1654 mvpp2_prs_hw_write(priv
, &pe
);
1657 /* Search for existing single/triple vlan entry */
1658 static struct mvpp2_prs_entry
*mvpp2_prs_vlan_find(struct mvpp2
*priv
,
1659 unsigned short tpid
, int ai
)
1661 struct mvpp2_prs_entry
*pe
;
1664 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1667 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1669 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1670 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
1671 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
1672 unsigned int ri_bits
, ai_bits
;
1675 if (!priv
->prs_shadow
[tid
].valid
||
1676 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
1681 mvpp2_prs_hw_read(priv
, pe
);
1682 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid
));
1687 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1688 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
1690 /* Get current ai value from tcam */
1691 ai_bits
= mvpp2_prs_tcam_ai_get(pe
);
1692 /* Clear double vlan bit */
1693 ai_bits
&= ~MVPP2_PRS_DBL_VLAN_AI_BIT
;
1698 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
1699 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
1707 /* Add/update single/triple vlan entry */
1708 static int mvpp2_prs_vlan_add(struct mvpp2
*priv
, unsigned short tpid
, int ai
,
1709 unsigned int port_map
)
1711 struct mvpp2_prs_entry
*pe
;
1715 pe
= mvpp2_prs_vlan_find(priv
, tpid
, ai
);
1718 /* Create new tcam entry */
1719 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_LAST_FREE_TID
,
1720 MVPP2_PE_FIRST_FREE_TID
);
1724 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1728 /* Get last double vlan tid */
1729 for (tid_aux
= MVPP2_PE_LAST_FREE_TID
;
1730 tid_aux
>= MVPP2_PE_FIRST_FREE_TID
; tid_aux
--) {
1731 unsigned int ri_bits
;
1733 if (!priv
->prs_shadow
[tid_aux
].valid
||
1734 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
1737 pe
->index
= tid_aux
;
1738 mvpp2_prs_hw_read(priv
, pe
);
1739 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1740 if ((ri_bits
& MVPP2_PRS_RI_VLAN_MASK
) ==
1741 MVPP2_PRS_RI_VLAN_DOUBLE
)
1745 if (tid
<= tid_aux
) {
1750 memset(pe
, 0 , sizeof(struct mvpp2_prs_entry
));
1751 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1754 mvpp2_prs_match_etype(pe
, 0, tpid
);
1756 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_L2
);
1757 /* Shift 4 bytes - skip 1 vlan tag */
1758 mvpp2_prs_sram_shift_set(pe
, MVPP2_VLAN_TAG_LEN
,
1759 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1760 /* Clear all ai bits for next iteration */
1761 mvpp2_prs_sram_ai_update(pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
1763 if (ai
== MVPP2_PRS_SINGLE_VLAN_AI
) {
1764 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_SINGLE
,
1765 MVPP2_PRS_RI_VLAN_MASK
);
1767 ai
|= MVPP2_PRS_DBL_VLAN_AI_BIT
;
1768 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_TRIPLE
,
1769 MVPP2_PRS_RI_VLAN_MASK
);
1771 mvpp2_prs_tcam_ai_update(pe
, ai
, MVPP2_PRS_SRAM_AI_MASK
);
1773 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
1775 /* Update ports' mask */
1776 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
1778 mvpp2_prs_hw_write(priv
, pe
);
1786 /* Get first free double vlan ai number */
1787 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2
*priv
)
1791 for (i
= 1; i
< MVPP2_PRS_DBL_VLANS_MAX
; i
++) {
1792 if (!priv
->prs_double_vlans
[i
])
1799 /* Search for existing double vlan entry */
1800 static struct mvpp2_prs_entry
*mvpp2_prs_double_vlan_find(struct mvpp2
*priv
,
1801 unsigned short tpid1
,
1802 unsigned short tpid2
)
1804 struct mvpp2_prs_entry
*pe
;
1807 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1810 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1812 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1813 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
1814 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
1815 unsigned int ri_mask
;
1818 if (!priv
->prs_shadow
[tid
].valid
||
1819 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
1823 mvpp2_prs_hw_read(priv
, pe
);
1825 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid1
))
1826 && mvpp2_prs_tcam_data_cmp(pe
, 4, swab16(tpid2
));
1831 ri_mask
= mvpp2_prs_sram_ri_get(pe
) & MVPP2_PRS_RI_VLAN_MASK
;
1832 if (ri_mask
== MVPP2_PRS_RI_VLAN_DOUBLE
)
1840 /* Add or update double vlan entry */
1841 static int mvpp2_prs_double_vlan_add(struct mvpp2
*priv
, unsigned short tpid1
,
1842 unsigned short tpid2
,
1843 unsigned int port_map
)
1845 struct mvpp2_prs_entry
*pe
;
1846 int tid_aux
, tid
, ai
, ret
= 0;
1848 pe
= mvpp2_prs_double_vlan_find(priv
, tpid1
, tpid2
);
1851 /* Create new tcam entry */
1852 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1853 MVPP2_PE_LAST_FREE_TID
);
1857 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1861 /* Set ai value for new double vlan entry */
1862 ai
= mvpp2_prs_double_vlan_ai_free_get(priv
);
1868 /* Get first single/triple vlan tid */
1869 for (tid_aux
= MVPP2_PE_FIRST_FREE_TID
;
1870 tid_aux
<= MVPP2_PE_LAST_FREE_TID
; tid_aux
++) {
1871 unsigned int ri_bits
;
1873 if (!priv
->prs_shadow
[tid_aux
].valid
||
1874 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
1877 pe
->index
= tid_aux
;
1878 mvpp2_prs_hw_read(priv
, pe
);
1879 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1880 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
1881 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
1882 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
1886 if (tid
>= tid_aux
) {
1891 memset(pe
, 0, sizeof(struct mvpp2_prs_entry
));
1892 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1895 priv
->prs_double_vlans
[ai
] = true;
1897 mvpp2_prs_match_etype(pe
, 0, tpid1
);
1898 mvpp2_prs_match_etype(pe
, 4, tpid2
);
1900 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1901 /* Shift 8 bytes - skip 2 vlan tags */
1902 mvpp2_prs_sram_shift_set(pe
, 2 * MVPP2_VLAN_TAG_LEN
,
1903 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1904 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
1905 MVPP2_PRS_RI_VLAN_MASK
);
1906 mvpp2_prs_sram_ai_update(pe
, ai
| MVPP2_PRS_DBL_VLAN_AI_BIT
,
1907 MVPP2_PRS_SRAM_AI_MASK
);
1909 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
1912 /* Update ports' mask */
1913 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
1914 mvpp2_prs_hw_write(priv
, pe
);
1921 /* IPv4 header parsing for fragmentation and L4 offset */
1922 static int mvpp2_prs_ip4_proto(struct mvpp2
*priv
, unsigned short proto
,
1923 unsigned int ri
, unsigned int ri_mask
)
1925 struct mvpp2_prs_entry pe
;
1928 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
1929 (proto
!= IPPROTO_IGMP
))
1932 /* Fragmented packet */
1933 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1934 MVPP2_PE_LAST_FREE_TID
);
1938 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1939 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1942 /* Set next lu to IPv4 */
1943 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1944 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1946 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
1947 sizeof(struct iphdr
) - 4,
1948 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1949 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
1950 MVPP2_PRS_IPV4_DIP_AI_BIT
);
1951 mvpp2_prs_sram_ri_update(&pe
, ri
| MVPP2_PRS_RI_IP_FRAG_MASK
,
1952 ri_mask
| MVPP2_PRS_RI_IP_FRAG_MASK
);
1954 mvpp2_prs_tcam_data_byte_set(&pe
, 5, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
1955 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
1956 /* Unmask all ports */
1957 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1959 /* Update shadow table and hw entry */
1960 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1961 mvpp2_prs_hw_write(priv
, &pe
);
1963 /* Not fragmented packet */
1964 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1965 MVPP2_PE_LAST_FREE_TID
);
1970 /* Clear ri before updating */
1971 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
1972 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
1973 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
1975 mvpp2_prs_tcam_data_byte_set(&pe
, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L
);
1976 mvpp2_prs_tcam_data_byte_set(&pe
, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK
);
1978 /* Update shadow table and hw entry */
1979 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1980 mvpp2_prs_hw_write(priv
, &pe
);
1985 /* IPv4 L3 multicast or broadcast */
1986 static int mvpp2_prs_ip4_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
1988 struct mvpp2_prs_entry pe
;
1991 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1992 MVPP2_PE_LAST_FREE_TID
);
1996 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1997 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2001 case MVPP2_PRS_L3_MULTI_CAST
:
2002 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV4_MC
,
2003 MVPP2_PRS_IPV4_MC_MASK
);
2004 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
2005 MVPP2_PRS_RI_L3_ADDR_MASK
);
2007 case MVPP2_PRS_L3_BROAD_CAST
:
2008 mask
= MVPP2_PRS_IPV4_BC_MASK
;
2009 mvpp2_prs_tcam_data_byte_set(&pe
, 0, mask
, mask
);
2010 mvpp2_prs_tcam_data_byte_set(&pe
, 1, mask
, mask
);
2011 mvpp2_prs_tcam_data_byte_set(&pe
, 2, mask
, mask
);
2012 mvpp2_prs_tcam_data_byte_set(&pe
, 3, mask
, mask
);
2013 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_BCAST
,
2014 MVPP2_PRS_RI_L3_ADDR_MASK
);
2020 /* Finished: go to flowid generation */
2021 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2022 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2024 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2025 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2026 /* Unmask all ports */
2027 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2029 /* Update shadow table and hw entry */
2030 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2031 mvpp2_prs_hw_write(priv
, &pe
);
2036 /* Set entries for protocols over IPv6 */
2037 static int mvpp2_prs_ip6_proto(struct mvpp2
*priv
, unsigned short proto
,
2038 unsigned int ri
, unsigned int ri_mask
)
2040 struct mvpp2_prs_entry pe
;
2043 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
2044 (proto
!= IPPROTO_ICMPV6
) && (proto
!= IPPROTO_IPIP
))
2047 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2048 MVPP2_PE_LAST_FREE_TID
);
2052 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2053 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2056 /* Finished: go to flowid generation */
2057 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2058 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2059 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
2060 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2061 sizeof(struct ipv6hdr
) - 6,
2062 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2064 mvpp2_prs_tcam_data_byte_set(&pe
, 0, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
2065 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2066 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2067 /* Unmask all ports */
2068 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2071 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2072 mvpp2_prs_hw_write(priv
, &pe
);
2077 /* IPv6 L3 multicast entry */
2078 static int mvpp2_prs_ip6_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
2080 struct mvpp2_prs_entry pe
;
2083 if (l3_cast
!= MVPP2_PRS_L3_MULTI_CAST
)
2086 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2087 MVPP2_PE_LAST_FREE_TID
);
2091 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2092 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2095 /* Finished: go to flowid generation */
2096 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2097 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
2098 MVPP2_PRS_RI_L3_ADDR_MASK
);
2099 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2100 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2101 /* Shift back to IPv6 NH */
2102 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2104 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV6_MC
,
2105 MVPP2_PRS_IPV6_MC_MASK
);
2106 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2107 /* Unmask all ports */
2108 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2110 /* Update shadow table and hw entry */
2111 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2112 mvpp2_prs_hw_write(priv
, &pe
);
2117 /* Parser per-port initialization */
2118 static void mvpp2_prs_hw_port_init(struct mvpp2
*priv
, int port
, int lu_first
,
2119 int lu_max
, int offset
)
2124 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_LOOKUP_REG
);
2125 val
&= ~MVPP2_PRS_PORT_LU_MASK(port
);
2126 val
|= MVPP2_PRS_PORT_LU_VAL(port
, lu_first
);
2127 mvpp2_write(priv
, MVPP2_PRS_INIT_LOOKUP_REG
, val
);
2129 /* Set maximum number of loops for packet received from port */
2130 val
= mvpp2_read(priv
, MVPP2_PRS_MAX_LOOP_REG(port
));
2131 val
&= ~MVPP2_PRS_MAX_LOOP_MASK(port
);
2132 val
|= MVPP2_PRS_MAX_LOOP_VAL(port
, lu_max
);
2133 mvpp2_write(priv
, MVPP2_PRS_MAX_LOOP_REG(port
), val
);
2135 /* Set initial offset for packet header extraction for the first
2138 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_OFFS_REG(port
));
2139 val
&= ~MVPP2_PRS_INIT_OFF_MASK(port
);
2140 val
|= MVPP2_PRS_INIT_OFF_VAL(port
, offset
);
2141 mvpp2_write(priv
, MVPP2_PRS_INIT_OFFS_REG(port
), val
);
2144 /* Default flow entries initialization for all ports */
2145 static void mvpp2_prs_def_flow_init(struct mvpp2
*priv
)
2147 struct mvpp2_prs_entry pe
;
2150 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
2151 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2152 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2153 pe
.index
= MVPP2_PE_FIRST_DEFAULT_FLOW
- port
;
2155 /* Mask all ports */
2156 mvpp2_prs_tcam_port_map_set(&pe
, 0);
2159 mvpp2_prs_sram_ai_update(&pe
, port
, MVPP2_PRS_FLOW_ID_MASK
);
2160 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
2162 /* Update shadow table and hw entry */
2163 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_FLOWS
);
2164 mvpp2_prs_hw_write(priv
, &pe
);
2168 /* Set default entry for Marvell Header field */
2169 static void mvpp2_prs_mh_init(struct mvpp2
*priv
)
2171 struct mvpp2_prs_entry pe
;
2173 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2175 pe
.index
= MVPP2_PE_MH_DEFAULT
;
2176 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MH
);
2177 mvpp2_prs_sram_shift_set(&pe
, MVPP2_MH_SIZE
,
2178 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2179 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2181 /* Unmask all ports */
2182 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2184 /* Update shadow table and hw entry */
2185 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MH
);
2186 mvpp2_prs_hw_write(priv
, &pe
);
2189 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2190 * multicast MAC addresses
2192 static void mvpp2_prs_mac_init(struct mvpp2
*priv
)
2194 struct mvpp2_prs_entry pe
;
2196 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2198 /* Non-promiscuous mode for all ports - DROP unknown packets */
2199 pe
.index
= MVPP2_PE_MAC_NON_PROMISCUOUS
;
2200 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2202 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
2203 MVPP2_PRS_RI_DROP_MASK
);
2204 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2205 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2207 /* Unmask all ports */
2208 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2210 /* Update shadow table and hw entry */
2211 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2212 mvpp2_prs_hw_write(priv
, &pe
);
2214 /* place holders only - no ports */
2215 mvpp2_prs_mac_drop_all_set(priv
, 0, false);
2216 mvpp2_prs_mac_promisc_set(priv
, 0, false);
2217 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_ALL
, 0, false);
2218 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_IP6
, 0, false);
2221 /* Set default entries for various types of dsa packets */
2222 static void mvpp2_prs_dsa_init(struct mvpp2
*priv
)
2224 struct mvpp2_prs_entry pe
;
2226 /* None tagged EDSA entry - place holder */
2227 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2230 /* Tagged EDSA entry - place holder */
2231 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2233 /* None tagged DSA entry - place holder */
2234 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2237 /* Tagged DSA entry - place holder */
2238 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2240 /* None tagged EDSA ethertype entry - place holder*/
2241 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2242 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
2244 /* Tagged EDSA ethertype entry - place holder*/
2245 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2246 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2248 /* None tagged DSA ethertype entry */
2249 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2250 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
2252 /* Tagged DSA ethertype entry */
2253 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2254 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2256 /* Set default entry, in case DSA or EDSA tag not found */
2257 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2258 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
2259 pe
.index
= MVPP2_PE_DSA_DEFAULT
;
2260 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2263 mvpp2_prs_sram_shift_set(&pe
, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2264 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2266 /* Clear all sram ai bits for next iteration */
2267 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2269 /* Unmask all ports */
2270 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2272 mvpp2_prs_hw_write(priv
, &pe
);
2275 /* Match basic ethertypes */
2276 static int mvpp2_prs_etype_init(struct mvpp2
*priv
)
2278 struct mvpp2_prs_entry pe
;
2281 /* Ethertype: PPPoE */
2282 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2283 MVPP2_PE_LAST_FREE_TID
);
2287 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2288 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2291 mvpp2_prs_match_etype(&pe
, 0, ETH_P_PPP_SES
);
2293 mvpp2_prs_sram_shift_set(&pe
, MVPP2_PPPOE_HDR_SIZE
,
2294 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2295 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2296 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_PPPOE_MASK
,
2297 MVPP2_PRS_RI_PPPOE_MASK
);
2299 /* Update shadow table and hw entry */
2300 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2301 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2302 priv
->prs_shadow
[pe
.index
].finish
= false;
2303 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_PPPOE_MASK
,
2304 MVPP2_PRS_RI_PPPOE_MASK
);
2305 mvpp2_prs_hw_write(priv
, &pe
);
2307 /* Ethertype: ARP */
2308 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2309 MVPP2_PE_LAST_FREE_TID
);
2313 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2314 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2317 mvpp2_prs_match_etype(&pe
, 0, ETH_P_ARP
);
2319 /* Generate flow in the next iteration*/
2320 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2321 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2322 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_ARP
,
2323 MVPP2_PRS_RI_L3_PROTO_MASK
);
2325 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2327 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2329 /* Update shadow table and hw entry */
2330 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2331 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2332 priv
->prs_shadow
[pe
.index
].finish
= true;
2333 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_ARP
,
2334 MVPP2_PRS_RI_L3_PROTO_MASK
);
2335 mvpp2_prs_hw_write(priv
, &pe
);
2337 /* Ethertype: LBTD */
2338 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2339 MVPP2_PE_LAST_FREE_TID
);
2343 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2344 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2347 mvpp2_prs_match_etype(&pe
, 0, MVPP2_IP_LBDT_TYPE
);
2349 /* Generate flow in the next iteration*/
2350 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2351 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2352 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2353 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2354 MVPP2_PRS_RI_CPU_CODE_MASK
|
2355 MVPP2_PRS_RI_UDF3_MASK
);
2357 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2359 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2361 /* Update shadow table and hw entry */
2362 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2363 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2364 priv
->prs_shadow
[pe
.index
].finish
= true;
2365 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2366 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2367 MVPP2_PRS_RI_CPU_CODE_MASK
|
2368 MVPP2_PRS_RI_UDF3_MASK
);
2369 mvpp2_prs_hw_write(priv
, &pe
);
2371 /* Ethertype: IPv4 without options */
2372 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2373 MVPP2_PE_LAST_FREE_TID
);
2377 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2378 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2381 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IP
);
2382 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2383 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2384 MVPP2_PRS_IPV4_HEAD_MASK
|
2385 MVPP2_PRS_IPV4_IHL_MASK
);
2387 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2388 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2389 MVPP2_PRS_RI_L3_PROTO_MASK
);
2390 /* Skip eth_type + 4 bytes of IP header */
2391 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2392 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2394 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2396 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2398 /* Update shadow table and hw entry */
2399 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2400 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2401 priv
->prs_shadow
[pe
.index
].finish
= false;
2402 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4
,
2403 MVPP2_PRS_RI_L3_PROTO_MASK
);
2404 mvpp2_prs_hw_write(priv
, &pe
);
2406 /* Ethertype: IPv4 with options */
2407 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2408 MVPP2_PE_LAST_FREE_TID
);
2414 /* Clear tcam data before updating */
2415 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2416 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2418 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2419 MVPP2_PRS_IPV4_HEAD
,
2420 MVPP2_PRS_IPV4_HEAD_MASK
);
2422 /* Clear ri before updating */
2423 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2424 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2425 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2426 MVPP2_PRS_RI_L3_PROTO_MASK
);
2428 /* Update shadow table and hw entry */
2429 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2430 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2431 priv
->prs_shadow
[pe
.index
].finish
= false;
2432 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4_OPT
,
2433 MVPP2_PRS_RI_L3_PROTO_MASK
);
2434 mvpp2_prs_hw_write(priv
, &pe
);
2436 /* Ethertype: IPv6 without options */
2437 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2438 MVPP2_PE_LAST_FREE_TID
);
2442 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2443 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2446 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IPV6
);
2448 /* Skip DIP of IPV6 header */
2449 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 8 +
2450 MVPP2_MAX_L3_ADDR_SIZE
,
2451 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2452 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2453 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2454 MVPP2_PRS_RI_L3_PROTO_MASK
);
2456 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2458 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2460 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2461 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2462 priv
->prs_shadow
[pe
.index
].finish
= false;
2463 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP6
,
2464 MVPP2_PRS_RI_L3_PROTO_MASK
);
2465 mvpp2_prs_hw_write(priv
, &pe
);
2467 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2468 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2469 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2470 pe
.index
= MVPP2_PE_ETH_TYPE_UN
;
2472 /* Unmask all ports */
2473 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2475 /* Generate flow in the next iteration*/
2476 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2477 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2478 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2479 MVPP2_PRS_RI_L3_PROTO_MASK
);
2480 /* Set L3 offset even it's unknown L3 */
2481 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2483 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2485 /* Update shadow table and hw entry */
2486 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2487 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2488 priv
->prs_shadow
[pe
.index
].finish
= true;
2489 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_UN
,
2490 MVPP2_PRS_RI_L3_PROTO_MASK
);
2491 mvpp2_prs_hw_write(priv
, &pe
);
2496 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2503 static int mvpp2_prs_vlan_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
2505 struct mvpp2_prs_entry pe
;
2508 priv
->prs_double_vlans
= devm_kcalloc(&pdev
->dev
, sizeof(bool),
2509 MVPP2_PRS_DBL_VLANS_MAX
,
2511 if (!priv
->prs_double_vlans
)
2514 /* Double VLAN: 0x8100, 0x88A8 */
2515 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021AD
,
2516 MVPP2_PRS_PORT_MASK
);
2520 /* Double VLAN: 0x8100, 0x8100 */
2521 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021Q
,
2522 MVPP2_PRS_PORT_MASK
);
2526 /* Single VLAN: 0x88a8 */
2527 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021AD
, MVPP2_PRS_SINGLE_VLAN_AI
,
2528 MVPP2_PRS_PORT_MASK
);
2532 /* Single VLAN: 0x8100 */
2533 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021Q
, MVPP2_PRS_SINGLE_VLAN_AI
,
2534 MVPP2_PRS_PORT_MASK
);
2538 /* Set default double vlan entry */
2539 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2540 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2541 pe
.index
= MVPP2_PE_VLAN_DBL
;
2543 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2544 /* Clear ai for next iterations */
2545 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2546 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
2547 MVPP2_PRS_RI_VLAN_MASK
);
2549 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_DBL_VLAN_AI_BIT
,
2550 MVPP2_PRS_DBL_VLAN_AI_BIT
);
2551 /* Unmask all ports */
2552 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2554 /* Update shadow table and hw entry */
2555 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2556 mvpp2_prs_hw_write(priv
, &pe
);
2558 /* Set default vlan none entry */
2559 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2560 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2561 pe
.index
= MVPP2_PE_VLAN_NONE
;
2563 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2564 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
2565 MVPP2_PRS_RI_VLAN_MASK
);
2567 /* Unmask all ports */
2568 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2570 /* Update shadow table and hw entry */
2571 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2572 mvpp2_prs_hw_write(priv
, &pe
);
2577 /* Set entries for PPPoE ethertype */
2578 static int mvpp2_prs_pppoe_init(struct mvpp2
*priv
)
2580 struct mvpp2_prs_entry pe
;
2583 /* IPv4 over PPPoE with options */
2584 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2585 MVPP2_PE_LAST_FREE_TID
);
2589 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2590 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2593 mvpp2_prs_match_etype(&pe
, 0, PPP_IP
);
2595 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2596 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2597 MVPP2_PRS_RI_L3_PROTO_MASK
);
2598 /* Skip eth_type + 4 bytes of IP header */
2599 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2600 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2602 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2604 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2606 /* Update shadow table and hw entry */
2607 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2608 mvpp2_prs_hw_write(priv
, &pe
);
2610 /* IPv4 over PPPoE without options */
2611 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2612 MVPP2_PE_LAST_FREE_TID
);
2618 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2619 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2620 MVPP2_PRS_IPV4_HEAD_MASK
|
2621 MVPP2_PRS_IPV4_IHL_MASK
);
2623 /* Clear ri before updating */
2624 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2625 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2626 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2627 MVPP2_PRS_RI_L3_PROTO_MASK
);
2629 /* Update shadow table and hw entry */
2630 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2631 mvpp2_prs_hw_write(priv
, &pe
);
2633 /* IPv6 over PPPoE */
2634 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2635 MVPP2_PE_LAST_FREE_TID
);
2639 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2640 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2643 mvpp2_prs_match_etype(&pe
, 0, PPP_IPV6
);
2645 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2646 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2647 MVPP2_PRS_RI_L3_PROTO_MASK
);
2648 /* Skip eth_type + 4 bytes of IPv6 header */
2649 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2650 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2652 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2654 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2656 /* Update shadow table and hw entry */
2657 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2658 mvpp2_prs_hw_write(priv
, &pe
);
2660 /* Non-IP over PPPoE */
2661 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2662 MVPP2_PE_LAST_FREE_TID
);
2666 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2667 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2670 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2671 MVPP2_PRS_RI_L3_PROTO_MASK
);
2673 /* Finished: go to flowid generation */
2674 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2675 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2676 /* Set L3 offset even if it's unknown L3 */
2677 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2679 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2681 /* Update shadow table and hw entry */
2682 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2683 mvpp2_prs_hw_write(priv
, &pe
);
2688 /* Initialize entries for IPv4 */
2689 static int mvpp2_prs_ip4_init(struct mvpp2
*priv
)
2691 struct mvpp2_prs_entry pe
;
2694 /* Set entries for TCP, UDP and IGMP over IPv4 */
2695 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_TCP
, MVPP2_PRS_RI_L4_TCP
,
2696 MVPP2_PRS_RI_L4_PROTO_MASK
);
2700 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_UDP
, MVPP2_PRS_RI_L4_UDP
,
2701 MVPP2_PRS_RI_L4_PROTO_MASK
);
2705 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_IGMP
,
2706 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2707 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2708 MVPP2_PRS_RI_CPU_CODE_MASK
|
2709 MVPP2_PRS_RI_UDF3_MASK
);
2713 /* IPv4 Broadcast */
2714 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_BROAD_CAST
);
2718 /* IPv4 Multicast */
2719 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
2723 /* Default IPv4 entry for unknown protocols */
2724 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2725 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2726 pe
.index
= MVPP2_PE_IP4_PROTO_UN
;
2728 /* Set next lu to IPv4 */
2729 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2730 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2732 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2733 sizeof(struct iphdr
) - 4,
2734 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2735 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2736 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2737 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
2738 MVPP2_PRS_RI_L4_PROTO_MASK
);
2740 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
2741 /* Unmask all ports */
2742 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2744 /* Update shadow table and hw entry */
2745 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2746 mvpp2_prs_hw_write(priv
, &pe
);
2748 /* Default IPv4 entry for unicast address */
2749 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2750 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2751 pe
.index
= MVPP2_PE_IP4_ADDR_UN
;
2753 /* Finished: go to flowid generation */
2754 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2755 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2756 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
2757 MVPP2_PRS_RI_L3_ADDR_MASK
);
2759 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2760 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2761 /* Unmask all ports */
2762 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2764 /* Update shadow table and hw entry */
2765 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2766 mvpp2_prs_hw_write(priv
, &pe
);
2771 /* Initialize entries for IPv6 */
2772 static int mvpp2_prs_ip6_init(struct mvpp2
*priv
)
2774 struct mvpp2_prs_entry pe
;
2777 /* Set entries for TCP, UDP and ICMP over IPv6 */
2778 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_TCP
,
2779 MVPP2_PRS_RI_L4_TCP
,
2780 MVPP2_PRS_RI_L4_PROTO_MASK
);
2784 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_UDP
,
2785 MVPP2_PRS_RI_L4_UDP
,
2786 MVPP2_PRS_RI_L4_PROTO_MASK
);
2790 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_ICMPV6
,
2791 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2792 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2793 MVPP2_PRS_RI_CPU_CODE_MASK
|
2794 MVPP2_PRS_RI_UDF3_MASK
);
2798 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2799 /* Result Info: UDF7=1, DS lite */
2800 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_IPIP
,
2801 MVPP2_PRS_RI_UDF7_IP6_LITE
,
2802 MVPP2_PRS_RI_UDF7_MASK
);
2806 /* IPv6 multicast */
2807 err
= mvpp2_prs_ip6_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
2811 /* Entry for checking hop limit */
2812 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2813 MVPP2_PE_LAST_FREE_TID
);
2817 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2818 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2821 /* Finished: go to flowid generation */
2822 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2823 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2824 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
|
2825 MVPP2_PRS_RI_DROP_MASK
,
2826 MVPP2_PRS_RI_L3_PROTO_MASK
|
2827 MVPP2_PRS_RI_DROP_MASK
);
2829 mvpp2_prs_tcam_data_byte_set(&pe
, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK
);
2830 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2831 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2833 /* Update shadow table and hw entry */
2834 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2835 mvpp2_prs_hw_write(priv
, &pe
);
2837 /* Default IPv6 entry for unknown protocols */
2838 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2839 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2840 pe
.index
= MVPP2_PE_IP6_PROTO_UN
;
2842 /* Finished: go to flowid generation */
2843 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2844 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2845 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
2846 MVPP2_PRS_RI_L4_PROTO_MASK
);
2847 /* Set L4 offset relatively to our current place */
2848 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2849 sizeof(struct ipv6hdr
) - 4,
2850 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2852 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2853 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2854 /* Unmask all ports */
2855 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2857 /* Update shadow table and hw entry */
2858 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2859 mvpp2_prs_hw_write(priv
, &pe
);
2861 /* Default IPv6 entry for unknown ext protocols */
2862 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2863 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2864 pe
.index
= MVPP2_PE_IP6_EXT_PROTO_UN
;
2866 /* Finished: go to flowid generation */
2867 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2868 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2869 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
2870 MVPP2_PRS_RI_L4_PROTO_MASK
);
2872 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_EXT_AI_BIT
,
2873 MVPP2_PRS_IPV6_EXT_AI_BIT
);
2874 /* Unmask all ports */
2875 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2877 /* Update shadow table and hw entry */
2878 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2879 mvpp2_prs_hw_write(priv
, &pe
);
2881 /* Default IPv6 entry for unicast address */
2882 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2883 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2884 pe
.index
= MVPP2_PE_IP6_ADDR_UN
;
2886 /* Finished: go to IPv6 again */
2887 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2888 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
2889 MVPP2_PRS_RI_L3_ADDR_MASK
);
2890 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2891 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2892 /* Shift back to IPV6 NH */
2893 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2895 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2896 /* Unmask all ports */
2897 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2899 /* Update shadow table and hw entry */
2900 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2901 mvpp2_prs_hw_write(priv
, &pe
);
2906 /* Parser default initialization */
2907 static int mvpp2_prs_default_init(struct platform_device
*pdev
,
2912 /* Enable tcam table */
2913 mvpp2_write(priv
, MVPP2_PRS_TCAM_CTRL_REG
, MVPP2_PRS_TCAM_EN_MASK
);
2915 /* Clear all tcam and sram entries */
2916 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++) {
2917 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
2918 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
2919 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), 0);
2921 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, index
);
2922 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
2923 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), 0);
2926 /* Invalidate all tcam entries */
2927 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++)
2928 mvpp2_prs_hw_inv(priv
, index
);
2930 priv
->prs_shadow
= devm_kcalloc(&pdev
->dev
, MVPP2_PRS_TCAM_SRAM_SIZE
,
2931 sizeof(struct mvpp2_prs_shadow
),
2933 if (!priv
->prs_shadow
)
2936 /* Always start from lookup = 0 */
2937 for (index
= 0; index
< MVPP2_MAX_PORTS
; index
++)
2938 mvpp2_prs_hw_port_init(priv
, index
, MVPP2_PRS_LU_MH
,
2939 MVPP2_PRS_PORT_LU_MAX
, 0);
2941 mvpp2_prs_def_flow_init(priv
);
2943 mvpp2_prs_mh_init(priv
);
2945 mvpp2_prs_mac_init(priv
);
2947 mvpp2_prs_dsa_init(priv
);
2949 err
= mvpp2_prs_etype_init(priv
);
2953 err
= mvpp2_prs_vlan_init(pdev
, priv
);
2957 err
= mvpp2_prs_pppoe_init(priv
);
2961 err
= mvpp2_prs_ip6_init(priv
);
2965 err
= mvpp2_prs_ip4_init(priv
);
2972 /* Compare MAC DA with tcam entry data */
2973 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry
*pe
,
2974 const u8
*da
, unsigned char *mask
)
2976 unsigned char tcam_byte
, tcam_mask
;
2979 for (index
= 0; index
< ETH_ALEN
; index
++) {
2980 mvpp2_prs_tcam_data_byte_get(pe
, index
, &tcam_byte
, &tcam_mask
);
2981 if (tcam_mask
!= mask
[index
])
2984 if ((tcam_mask
& tcam_byte
) != (da
[index
] & mask
[index
]))
2991 /* Find tcam entry with matched pair <MAC DA, port> */
2992 static struct mvpp2_prs_entry
*
2993 mvpp2_prs_mac_da_range_find(struct mvpp2
*priv
, int pmap
, const u8
*da
,
2994 unsigned char *mask
, int udf_type
)
2996 struct mvpp2_prs_entry
*pe
;
2999 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3002 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
3004 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3005 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3006 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
3007 unsigned int entry_pmap
;
3009 if (!priv
->prs_shadow
[tid
].valid
||
3010 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
3011 (priv
->prs_shadow
[tid
].udf
!= udf_type
))
3015 mvpp2_prs_hw_read(priv
, pe
);
3016 entry_pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3018 if (mvpp2_prs_mac_range_equals(pe
, da
, mask
) &&
3027 /* Update parser's mac da entry */
3028 static int mvpp2_prs_mac_da_accept(struct mvpp2
*priv
, int port
,
3029 const u8
*da
, bool add
)
3031 struct mvpp2_prs_entry
*pe
;
3032 unsigned int pmap
, len
, ri
;
3033 unsigned char mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3036 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3037 pe
= mvpp2_prs_mac_da_range_find(priv
, (1 << port
), da
, mask
,
3038 MVPP2_PRS_UDF_MAC_DEF
);
3045 /* Create new TCAM entry */
3046 /* Find first range mac entry*/
3047 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3048 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++)
3049 if (priv
->prs_shadow
[tid
].valid
&&
3050 (priv
->prs_shadow
[tid
].lu
== MVPP2_PRS_LU_MAC
) &&
3051 (priv
->prs_shadow
[tid
].udf
==
3052 MVPP2_PRS_UDF_MAC_RANGE
))
3055 /* Go through the all entries from first to last */
3056 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
3061 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3064 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
3067 /* Mask all ports */
3068 mvpp2_prs_tcam_port_map_set(pe
, 0);
3071 /* Update port mask */
3072 mvpp2_prs_tcam_port_set(pe
, port
, add
);
3074 /* Invalidate the entry if no ports are left enabled */
3075 pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3081 mvpp2_prs_hw_inv(priv
, pe
->index
);
3082 priv
->prs_shadow
[pe
->index
].valid
= false;
3087 /* Continue - set next lookup */
3088 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_DSA
);
3090 /* Set match on DA */
3093 mvpp2_prs_tcam_data_byte_set(pe
, len
, da
[len
], 0xff);
3095 /* Set result info bits */
3096 if (is_broadcast_ether_addr(da
))
3097 ri
= MVPP2_PRS_RI_L2_BCAST
;
3098 else if (is_multicast_ether_addr(da
))
3099 ri
= MVPP2_PRS_RI_L2_MCAST
;
3101 ri
= MVPP2_PRS_RI_L2_UCAST
| MVPP2_PRS_RI_MAC_ME_MASK
;
3103 mvpp2_prs_sram_ri_update(pe
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3104 MVPP2_PRS_RI_MAC_ME_MASK
);
3105 mvpp2_prs_shadow_ri_set(priv
, pe
->index
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3106 MVPP2_PRS_RI_MAC_ME_MASK
);
3108 /* Shift to ethertype */
3109 mvpp2_prs_sram_shift_set(pe
, 2 * ETH_ALEN
,
3110 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3112 /* Update shadow table and hw entry */
3113 priv
->prs_shadow
[pe
->index
].udf
= MVPP2_PRS_UDF_MAC_DEF
;
3114 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_MAC
);
3115 mvpp2_prs_hw_write(priv
, pe
);
3122 static int mvpp2_prs_update_mac_da(struct net_device
*dev
, const u8
*da
)
3124 struct mvpp2_port
*port
= netdev_priv(dev
);
3127 /* Remove old parser entry */
3128 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, dev
->dev_addr
,
3133 /* Add new parser entry */
3134 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, da
, true);
3138 /* Set addr in the device */
3139 ether_addr_copy(dev
->dev_addr
, da
);
3144 /* Delete all port's multicast simple (not range) entries */
3145 static void mvpp2_prs_mcast_del_all(struct mvpp2
*priv
, int port
)
3147 struct mvpp2_prs_entry pe
;
3150 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3151 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
3152 unsigned char da
[ETH_ALEN
], da_mask
[ETH_ALEN
];
3154 if (!priv
->prs_shadow
[tid
].valid
||
3155 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
3156 (priv
->prs_shadow
[tid
].udf
!= MVPP2_PRS_UDF_MAC_DEF
))
3159 /* Only simple mac entries */
3161 mvpp2_prs_hw_read(priv
, &pe
);
3163 /* Read mac addr from entry */
3164 for (index
= 0; index
< ETH_ALEN
; index
++)
3165 mvpp2_prs_tcam_data_byte_get(&pe
, index
, &da
[index
],
3168 if (is_multicast_ether_addr(da
) && !is_broadcast_ether_addr(da
))
3169 /* Delete this entry */
3170 mvpp2_prs_mac_da_accept(priv
, port
, da
, false);
3174 static int mvpp2_prs_tag_mode_set(struct mvpp2
*priv
, int port
, int type
)
3177 case MVPP2_TAG_TYPE_EDSA
:
3178 /* Add port to EDSA entries */
3179 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3180 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3181 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3182 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3183 /* Remove port from DSA entries */
3184 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3185 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3186 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3187 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3190 case MVPP2_TAG_TYPE_DSA
:
3191 /* Add port to DSA entries */
3192 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3193 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3194 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3195 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3196 /* Remove port from EDSA entries */
3197 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3198 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3199 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3200 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3203 case MVPP2_TAG_TYPE_MH
:
3204 case MVPP2_TAG_TYPE_NONE
:
3205 /* Remove port form EDSA and DSA entries */
3206 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3207 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3208 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3209 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3210 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3211 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3212 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3213 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3217 if ((type
< 0) || (type
> MVPP2_TAG_TYPE_EDSA
))
3224 /* Set prs flow for the port */
3225 static int mvpp2_prs_def_flow(struct mvpp2_port
*port
)
3227 struct mvpp2_prs_entry
*pe
;
3230 pe
= mvpp2_prs_flow_find(port
->priv
, port
->id
);
3232 /* Such entry not exist */
3234 /* Go through the all entires from last to first */
3235 tid
= mvpp2_prs_tcam_first_free(port
->priv
,
3236 MVPP2_PE_LAST_FREE_TID
,
3237 MVPP2_PE_FIRST_FREE_TID
);
3241 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3245 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
3249 mvpp2_prs_sram_ai_update(pe
, port
->id
, MVPP2_PRS_FLOW_ID_MASK
);
3250 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
3252 /* Update shadow table */
3253 mvpp2_prs_shadow_set(port
->priv
, pe
->index
, MVPP2_PRS_LU_FLOWS
);
3256 mvpp2_prs_tcam_port_map_set(pe
, (1 << port
->id
));
3257 mvpp2_prs_hw_write(port
->priv
, pe
);
3263 /* Classifier configuration routines */
3265 /* Update classification flow table registers */
3266 static void mvpp2_cls_flow_write(struct mvpp2
*priv
,
3267 struct mvpp2_cls_flow_entry
*fe
)
3269 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, fe
->index
);
3270 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL0_REG
, fe
->data
[0]);
3271 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL1_REG
, fe
->data
[1]);
3272 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL2_REG
, fe
->data
[2]);
3275 /* Update classification lookup table register */
3276 static void mvpp2_cls_lookup_write(struct mvpp2
*priv
,
3277 struct mvpp2_cls_lookup_entry
*le
)
3281 val
= (le
->way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | le
->lkpid
;
3282 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
3283 mvpp2_write(priv
, MVPP2_CLS_LKP_TBL_REG
, le
->data
);
3286 /* Classifier default initialization */
3287 static void mvpp2_cls_init(struct mvpp2
*priv
)
3289 struct mvpp2_cls_lookup_entry le
;
3290 struct mvpp2_cls_flow_entry fe
;
3293 /* Enable classifier */
3294 mvpp2_write(priv
, MVPP2_CLS_MODE_REG
, MVPP2_CLS_MODE_ACTIVE_MASK
);
3296 /* Clear classifier flow table */
3297 memset(&fe
.data
, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS
);
3298 for (index
= 0; index
< MVPP2_CLS_FLOWS_TBL_SIZE
; index
++) {
3300 mvpp2_cls_flow_write(priv
, &fe
);
3303 /* Clear classifier lookup table */
3305 for (index
= 0; index
< MVPP2_CLS_LKP_TBL_SIZE
; index
++) {
3308 mvpp2_cls_lookup_write(priv
, &le
);
3311 mvpp2_cls_lookup_write(priv
, &le
);
3315 static void mvpp2_cls_port_config(struct mvpp2_port
*port
)
3317 struct mvpp2_cls_lookup_entry le
;
3320 /* Set way for the port */
3321 val
= mvpp2_read(port
->priv
, MVPP2_CLS_PORT_WAY_REG
);
3322 val
&= ~MVPP2_CLS_PORT_WAY_MASK(port
->id
);
3323 mvpp2_write(port
->priv
, MVPP2_CLS_PORT_WAY_REG
, val
);
3325 /* Pick the entry to be accessed in lookup ID decoding table
3326 * according to the way and lkpid.
3328 le
.lkpid
= port
->id
;
3332 /* Set initial CPU queue for receiving packets */
3333 le
.data
&= ~MVPP2_CLS_LKP_TBL_RXQ_MASK
;
3334 le
.data
|= port
->first_rxq
;
3336 /* Disable classification engines */
3337 le
.data
&= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
3339 /* Update lookup ID table entry */
3340 mvpp2_cls_lookup_write(port
->priv
, &le
);
3343 /* Set CPU queue number for oversize packets */
3344 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port
*port
)
3348 mvpp2_write(port
->priv
, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port
->id
),
3349 port
->first_rxq
& MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK
);
3351 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_P2HQ_REG(port
->id
),
3352 (port
->first_rxq
>> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS
));
3354 val
= mvpp2_read(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
);
3355 val
|= MVPP2_CLS_SWFWD_PCTRL_MASK(port
->id
);
3356 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
, val
);
3359 /* Buffer Manager configuration routines */
3362 static int mvpp2_bm_pool_create(struct platform_device
*pdev
,
3364 struct mvpp2_bm_pool
*bm_pool
, int size
)
3369 size_bytes
= sizeof(u32
) * size
;
3370 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, size_bytes
,
3371 &bm_pool
->phys_addr
,
3373 if (!bm_pool
->virt_addr
)
3376 if (!IS_ALIGNED((u32
)bm_pool
->virt_addr
, MVPP2_BM_POOL_PTR_ALIGN
)) {
3377 dma_free_coherent(&pdev
->dev
, size_bytes
, bm_pool
->virt_addr
,
3378 bm_pool
->phys_addr
);
3379 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
3380 bm_pool
->id
, MVPP2_BM_POOL_PTR_ALIGN
);
3384 mvpp2_write(priv
, MVPP2_BM_POOL_BASE_REG(bm_pool
->id
),
3385 bm_pool
->phys_addr
);
3386 mvpp2_write(priv
, MVPP2_BM_POOL_SIZE_REG(bm_pool
->id
), size
);
3388 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3389 val
|= MVPP2_BM_START_MASK
;
3390 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3392 bm_pool
->type
= MVPP2_BM_FREE
;
3393 bm_pool
->size
= size
;
3394 bm_pool
->pkt_size
= 0;
3395 bm_pool
->buf_num
= 0;
3396 atomic_set(&bm_pool
->in_use
, 0);
3401 /* Set pool buffer size */
3402 static void mvpp2_bm_pool_bufsize_set(struct mvpp2
*priv
,
3403 struct mvpp2_bm_pool
*bm_pool
,
3408 bm_pool
->buf_size
= buf_size
;
3410 val
= ALIGN(buf_size
, 1 << MVPP2_POOL_BUF_SIZE_OFFSET
);
3411 mvpp2_write(priv
, MVPP2_POOL_BUF_SIZE_REG(bm_pool
->id
), val
);
3414 /* Free all buffers from the pool */
3415 static void mvpp2_bm_bufs_free(struct device
*dev
, struct mvpp2
*priv
,
3416 struct mvpp2_bm_pool
*bm_pool
)
3420 for (i
= 0; i
< bm_pool
->buf_num
; i
++) {
3421 dma_addr_t buf_phys_addr
;
3424 /* Get buffer virtual address (indirect access) */
3425 buf_phys_addr
= mvpp2_read(priv
,
3426 MVPP2_BM_PHY_ALLOC_REG(bm_pool
->id
));
3427 vaddr
= mvpp2_read(priv
, MVPP2_BM_VIRT_ALLOC_REG
);
3429 dma_unmap_single(dev
, buf_phys_addr
,
3430 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
3434 dev_kfree_skb_any((struct sk_buff
*)vaddr
);
3437 /* Update BM driver with number of buffers removed from pool */
3438 bm_pool
->buf_num
-= i
;
3442 static int mvpp2_bm_pool_destroy(struct platform_device
*pdev
,
3444 struct mvpp2_bm_pool
*bm_pool
)
3448 mvpp2_bm_bufs_free(&pdev
->dev
, priv
, bm_pool
);
3449 if (bm_pool
->buf_num
) {
3450 WARN(1, "cannot free all buffers in pool %d\n", bm_pool
->id
);
3454 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3455 val
|= MVPP2_BM_STOP_MASK
;
3456 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3458 dma_free_coherent(&pdev
->dev
, sizeof(u32
) * bm_pool
->size
,
3460 bm_pool
->phys_addr
);
3464 static int mvpp2_bm_pools_init(struct platform_device
*pdev
,
3468 struct mvpp2_bm_pool
*bm_pool
;
3470 /* Create all pools with maximum size */
3471 size
= MVPP2_BM_POOL_SIZE_MAX
;
3472 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3473 bm_pool
= &priv
->bm_pools
[i
];
3475 err
= mvpp2_bm_pool_create(pdev
, priv
, bm_pool
, size
);
3477 goto err_unroll_pools
;
3478 mvpp2_bm_pool_bufsize_set(priv
, bm_pool
, 0);
3483 dev_err(&pdev
->dev
, "failed to create BM pool %d, size %d\n", i
, size
);
3484 for (i
= i
- 1; i
>= 0; i
--)
3485 mvpp2_bm_pool_destroy(pdev
, priv
, &priv
->bm_pools
[i
]);
3489 static int mvpp2_bm_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
3493 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3494 /* Mask BM all interrupts */
3495 mvpp2_write(priv
, MVPP2_BM_INTR_MASK_REG(i
), 0);
3496 /* Clear BM cause register */
3497 mvpp2_write(priv
, MVPP2_BM_INTR_CAUSE_REG(i
), 0);
3500 /* Allocate and initialize BM pools */
3501 priv
->bm_pools
= devm_kcalloc(&pdev
->dev
, MVPP2_BM_POOLS_NUM
,
3502 sizeof(struct mvpp2_bm_pool
), GFP_KERNEL
);
3503 if (!priv
->bm_pools
)
3506 err
= mvpp2_bm_pools_init(pdev
, priv
);
3512 /* Attach long pool to rxq */
3513 static void mvpp2_rxq_long_pool_set(struct mvpp2_port
*port
,
3514 int lrxq
, int long_pool
)
3519 /* Get queue physical ID */
3520 prxq
= port
->rxqs
[lrxq
]->id
;
3522 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3523 val
&= ~MVPP2_RXQ_POOL_LONG_MASK
;
3524 val
|= ((long_pool
<< MVPP2_RXQ_POOL_LONG_OFFS
) &
3525 MVPP2_RXQ_POOL_LONG_MASK
);
3527 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3530 /* Attach short pool to rxq */
3531 static void mvpp2_rxq_short_pool_set(struct mvpp2_port
*port
,
3532 int lrxq
, int short_pool
)
3537 /* Get queue physical ID */
3538 prxq
= port
->rxqs
[lrxq
]->id
;
3540 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3541 val
&= ~MVPP2_RXQ_POOL_SHORT_MASK
;
3542 val
|= ((short_pool
<< MVPP2_RXQ_POOL_SHORT_OFFS
) &
3543 MVPP2_RXQ_POOL_SHORT_MASK
);
3545 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3548 /* Allocate skb for BM pool */
3549 static struct sk_buff
*mvpp2_skb_alloc(struct mvpp2_port
*port
,
3550 struct mvpp2_bm_pool
*bm_pool
,
3551 dma_addr_t
*buf_phys_addr
,
3554 struct sk_buff
*skb
;
3555 dma_addr_t phys_addr
;
3557 skb
= __dev_alloc_skb(bm_pool
->pkt_size
, gfp_mask
);
3561 phys_addr
= dma_map_single(port
->dev
->dev
.parent
, skb
->head
,
3562 MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
),
3564 if (unlikely(dma_mapping_error(port
->dev
->dev
.parent
, phys_addr
))) {
3565 dev_kfree_skb_any(skb
);
3568 *buf_phys_addr
= phys_addr
;
3573 /* Set pool number in a BM cookie */
3574 static inline u32
mvpp2_bm_cookie_pool_set(u32 cookie
, int pool
)
3578 bm
= cookie
& ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS
);
3579 bm
|= ((pool
& 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS
);
3584 /* Get pool number from a BM cookie */
3585 static inline int mvpp2_bm_cookie_pool_get(u32 cookie
)
3587 return (cookie
>> MVPP2_BM_COOKIE_POOL_OFFS
) & 0xFF;
3590 /* Release buffer to BM */
3591 static inline void mvpp2_bm_pool_put(struct mvpp2_port
*port
, int pool
,
3592 u32 buf_phys_addr
, u32 buf_virt_addr
)
3594 mvpp2_write(port
->priv
, MVPP2_BM_VIRT_RLS_REG
, buf_virt_addr
);
3595 mvpp2_write(port
->priv
, MVPP2_BM_PHY_RLS_REG(pool
), buf_phys_addr
);
3598 /* Release multicast buffer */
3599 static void mvpp2_bm_pool_mc_put(struct mvpp2_port
*port
, int pool
,
3600 u32 buf_phys_addr
, u32 buf_virt_addr
,
3605 val
|= (mc_id
& MVPP2_BM_MC_ID_MASK
);
3606 mvpp2_write(port
->priv
, MVPP2_BM_MC_RLS_REG
, val
);
3608 mvpp2_bm_pool_put(port
, pool
,
3609 buf_phys_addr
| MVPP2_BM_PHY_RLS_MC_BUFF_MASK
,
3613 /* Refill BM pool */
3614 static void mvpp2_pool_refill(struct mvpp2_port
*port
, u32 bm
,
3615 u32 phys_addr
, u32 cookie
)
3617 int pool
= mvpp2_bm_cookie_pool_get(bm
);
3619 mvpp2_bm_pool_put(port
, pool
, phys_addr
, cookie
);
3622 /* Allocate buffers for the pool */
3623 static int mvpp2_bm_bufs_add(struct mvpp2_port
*port
,
3624 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
3626 struct sk_buff
*skb
;
3627 int i
, buf_size
, total_size
;
3629 dma_addr_t phys_addr
;
3631 buf_size
= MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
);
3632 total_size
= MVPP2_RX_TOTAL_SIZE(buf_size
);
3635 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
3636 netdev_err(port
->dev
,
3637 "cannot allocate %d buffers for pool %d\n",
3638 buf_num
, bm_pool
->id
);
3642 bm
= mvpp2_bm_cookie_pool_set(0, bm_pool
->id
);
3643 for (i
= 0; i
< buf_num
; i
++) {
3644 skb
= mvpp2_skb_alloc(port
, bm_pool
, &phys_addr
, GFP_KERNEL
);
3648 mvpp2_pool_refill(port
, bm
, (u32
)phys_addr
, (u32
)skb
);
3651 /* Update BM driver with number of buffers added to pool */
3652 bm_pool
->buf_num
+= i
;
3653 bm_pool
->in_use_thresh
= bm_pool
->buf_num
/ 4;
3655 netdev_dbg(port
->dev
,
3656 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3657 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
3658 bm_pool
->id
, bm_pool
->pkt_size
, buf_size
, total_size
);
3660 netdev_dbg(port
->dev
,
3661 "%s pool %d: %d of %d buffers added\n",
3662 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
3663 bm_pool
->id
, i
, buf_num
);
3667 /* Notify the driver that BM pool is being used as specific type and return the
3668 * pool pointer on success
3670 static struct mvpp2_bm_pool
*
3671 mvpp2_bm_pool_use(struct mvpp2_port
*port
, int pool
, enum mvpp2_bm_type type
,
3674 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
3677 if (new_pool
->type
!= MVPP2_BM_FREE
&& new_pool
->type
!= type
) {
3678 netdev_err(port
->dev
, "mixing pool types is forbidden\n");
3682 if (new_pool
->type
== MVPP2_BM_FREE
)
3683 new_pool
->type
= type
;
3685 /* Allocate buffers in case BM pool is used as long pool, but packet
3686 * size doesn't match MTU or BM pool hasn't being used yet
3688 if (((type
== MVPP2_BM_SWF_LONG
) && (pkt_size
> new_pool
->pkt_size
)) ||
3689 (new_pool
->pkt_size
== 0)) {
3692 /* Set default buffer number or free all the buffers in case
3693 * the pool is not empty
3695 pkts_num
= new_pool
->buf_num
;
3697 pkts_num
= type
== MVPP2_BM_SWF_LONG
?
3698 MVPP2_BM_LONG_BUF_NUM
:
3699 MVPP2_BM_SHORT_BUF_NUM
;
3701 mvpp2_bm_bufs_free(port
->dev
->dev
.parent
,
3702 port
->priv
, new_pool
);
3704 new_pool
->pkt_size
= pkt_size
;
3706 /* Allocate buffers for this pool */
3707 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
3708 if (num
!= pkts_num
) {
3709 WARN(1, "pool %d: %d of %d allocated\n",
3710 new_pool
->id
, num
, pkts_num
);
3715 mvpp2_bm_pool_bufsize_set(port
->priv
, new_pool
,
3716 MVPP2_RX_BUF_SIZE(new_pool
->pkt_size
));
3721 /* Initialize pools for swf */
3722 static int mvpp2_swf_bm_pool_init(struct mvpp2_port
*port
)
3726 if (!port
->pool_long
) {
3728 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_LONG_POOL(port
->id
),
3731 if (!port
->pool_long
)
3734 port
->pool_long
->port_map
|= (1 << port
->id
);
3736 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
3737 mvpp2_rxq_long_pool_set(port
, rxq
, port
->pool_long
->id
);
3740 if (!port
->pool_short
) {
3742 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_SHORT_POOL
,
3744 MVPP2_BM_SHORT_PKT_SIZE
);
3745 if (!port
->pool_short
)
3748 port
->pool_short
->port_map
|= (1 << port
->id
);
3750 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
3751 mvpp2_rxq_short_pool_set(port
, rxq
,
3752 port
->pool_short
->id
);
3758 static int mvpp2_bm_update_mtu(struct net_device
*dev
, int mtu
)
3760 struct mvpp2_port
*port
= netdev_priv(dev
);
3761 struct mvpp2_bm_pool
*port_pool
= port
->pool_long
;
3762 int num
, pkts_num
= port_pool
->buf_num
;
3763 int pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
3765 /* Update BM pool with new buffer size */
3766 mvpp2_bm_bufs_free(dev
->dev
.parent
, port
->priv
, port_pool
);
3767 if (port_pool
->buf_num
) {
3768 WARN(1, "cannot free all buffers in pool %d\n", port_pool
->id
);
3772 port_pool
->pkt_size
= pkt_size
;
3773 num
= mvpp2_bm_bufs_add(port
, port_pool
, pkts_num
);
3774 if (num
!= pkts_num
) {
3775 WARN(1, "pool %d: %d of %d allocated\n",
3776 port_pool
->id
, num
, pkts_num
);
3780 mvpp2_bm_pool_bufsize_set(port
->priv
, port_pool
,
3781 MVPP2_RX_BUF_SIZE(port_pool
->pkt_size
));
3783 netdev_update_features(dev
);
3787 static inline void mvpp2_interrupts_enable(struct mvpp2_port
*port
)
3789 int cpu
, cpu_mask
= 0;
3791 for_each_present_cpu(cpu
)
3792 cpu_mask
|= 1 << cpu
;
3793 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
3794 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask
));
3797 static inline void mvpp2_interrupts_disable(struct mvpp2_port
*port
)
3799 int cpu
, cpu_mask
= 0;
3801 for_each_present_cpu(cpu
)
3802 cpu_mask
|= 1 << cpu
;
3803 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
3804 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask
));
3807 /* Mask the current CPU's Rx/Tx interrupts */
3808 static void mvpp2_interrupts_mask(void *arg
)
3810 struct mvpp2_port
*port
= arg
;
3812 mvpp2_write(port
->priv
, MVPP2_ISR_RX_TX_MASK_REG(port
->id
), 0);
3815 /* Unmask the current CPU's Rx/Tx interrupts */
3816 static void mvpp2_interrupts_unmask(void *arg
)
3818 struct mvpp2_port
*port
= arg
;
3820 mvpp2_write(port
->priv
, MVPP2_ISR_RX_TX_MASK_REG(port
->id
),
3821 (MVPP2_CAUSE_MISC_SUM_MASK
|
3822 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
));
3825 /* Port configuration routines */
3827 static void mvpp2_port_mii_set(struct mvpp2_port
*port
)
3831 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3833 switch (port
->phy_interface
) {
3834 case PHY_INTERFACE_MODE_SGMII
:
3835 val
|= MVPP2_GMAC_INBAND_AN_MASK
;
3837 case PHY_INTERFACE_MODE_RGMII
:
3838 val
|= MVPP2_GMAC_PORT_RGMII_MASK
;
3840 val
&= ~MVPP2_GMAC_PCS_ENABLE_MASK
;
3843 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3846 static void mvpp2_port_fc_adv_enable(struct mvpp2_port
*port
)
3850 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
3851 val
|= MVPP2_GMAC_FC_ADV_EN
;
3852 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
3855 static void mvpp2_port_enable(struct mvpp2_port
*port
)
3859 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3860 val
|= MVPP2_GMAC_PORT_EN_MASK
;
3861 val
|= MVPP2_GMAC_MIB_CNTR_EN_MASK
;
3862 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3865 static void mvpp2_port_disable(struct mvpp2_port
*port
)
3869 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3870 val
&= ~(MVPP2_GMAC_PORT_EN_MASK
);
3871 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3874 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3875 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port
*port
)
3879 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
) &
3880 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
3881 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3884 /* Configure loopback port */
3885 static void mvpp2_port_loopback_set(struct mvpp2_port
*port
)
3889 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3891 if (port
->speed
== 1000)
3892 val
|= MVPP2_GMAC_GMII_LB_EN_MASK
;
3894 val
&= ~MVPP2_GMAC_GMII_LB_EN_MASK
;
3896 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
3897 val
|= MVPP2_GMAC_PCS_LB_EN_MASK
;
3899 val
&= ~MVPP2_GMAC_PCS_LB_EN_MASK
;
3901 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3904 static void mvpp2_port_reset(struct mvpp2_port
*port
)
3908 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
3909 ~MVPP2_GMAC_PORT_RESET_MASK
;
3910 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3912 while (readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
3913 MVPP2_GMAC_PORT_RESET_MASK
)
3917 /* Change maximum receive size of the port */
3918 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port
*port
)
3922 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3923 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
3924 val
|= (((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
3925 MVPP2_GMAC_MAX_RX_SIZE_OFFS
);
3926 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3929 /* Set defaults to the MVPP2 port */
3930 static void mvpp2_defaults_set(struct mvpp2_port
*port
)
3932 int tx_port_num
, val
, queue
, ptxq
, lrxq
;
3934 /* Configure port to loopback if needed */
3935 if (port
->flags
& MVPP2_F_LOOPBACK
)
3936 mvpp2_port_loopback_set(port
);
3938 /* Update TX FIFO MIN Threshold */
3939 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3940 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
3941 /* Min. TX threshold must be less than minimal packet length */
3942 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3943 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3945 /* Disable Legacy WRR, Disable EJP, Release from reset */
3946 tx_port_num
= mvpp2_egress_port(port
);
3947 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
,
3949 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_CMD_1_REG
, 0);
3951 /* Close bandwidth for all queues */
3952 for (queue
= 0; queue
< MVPP2_MAX_TXQ
; queue
++) {
3953 ptxq
= mvpp2_txq_phys(port
->id
, queue
);
3954 mvpp2_write(port
->priv
,
3955 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq
), 0);
3958 /* Set refill period to 1 usec, refill tokens
3959 * and bucket size to maximum
3961 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PERIOD_REG
,
3962 port
->priv
->tclk
/ USEC_PER_SEC
);
3963 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
);
3964 val
&= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK
;
3965 val
|= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3966 val
|= MVPP2_TXP_REFILL_TOKENS_ALL_MASK
;
3967 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
, val
);
3968 val
= MVPP2_TXP_TOKEN_SIZE_MAX
;
3969 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
3971 /* Set MaximumLowLatencyPacketSize value to 256 */
3972 mvpp2_write(port
->priv
, MVPP2_RX_CTRL_REG(port
->id
),
3973 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK
|
3974 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3976 /* Enable Rx cache snoop */
3977 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
3978 queue
= port
->rxqs
[lrxq
]->id
;
3979 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
3980 val
|= MVPP2_SNOOP_PKT_SIZE_MASK
|
3981 MVPP2_SNOOP_BUF_HDR_MASK
;
3982 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
3985 /* At default, mask all interrupts to all present cpus */
3986 mvpp2_interrupts_disable(port
);
3989 /* Enable/disable receiving packets */
3990 static void mvpp2_ingress_enable(struct mvpp2_port
*port
)
3995 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
3996 queue
= port
->rxqs
[lrxq
]->id
;
3997 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
3998 val
&= ~MVPP2_RXQ_DISABLE_MASK
;
3999 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4003 static void mvpp2_ingress_disable(struct mvpp2_port
*port
)
4008 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
4009 queue
= port
->rxqs
[lrxq
]->id
;
4010 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4011 val
|= MVPP2_RXQ_DISABLE_MASK
;
4012 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4016 /* Enable transmit via physical egress queue
4017 * - HW starts take descriptors from DRAM
4019 static void mvpp2_egress_enable(struct mvpp2_port
*port
)
4023 int tx_port_num
= mvpp2_egress_port(port
);
4025 /* Enable all initialized TXs. */
4027 for (queue
= 0; queue
< txq_number
; queue
++) {
4028 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
4030 if (txq
->descs
!= NULL
)
4031 qmap
|= (1 << queue
);
4034 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4035 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
, qmap
);
4038 /* Disable transmit via physical egress queue
4039 * - HW doesn't take descriptors from DRAM
4041 static void mvpp2_egress_disable(struct mvpp2_port
*port
)
4045 int tx_port_num
= mvpp2_egress_port(port
);
4047 /* Issue stop command for active channels only */
4048 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4049 reg_data
= (mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
)) &
4050 MVPP2_TXP_SCHED_ENQ_MASK
;
4052 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
,
4053 (reg_data
<< MVPP2_TXP_SCHED_DISQ_OFFSET
));
4055 /* Wait for all Tx activity to terminate. */
4058 if (delay
>= MVPP2_TX_DISABLE_TIMEOUT_MSEC
) {
4059 netdev_warn(port
->dev
,
4060 "Tx stop timed out, status=0x%08x\n",
4067 /* Check port TX Command register that all
4068 * Tx queues are stopped
4070 reg_data
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
);
4071 } while (reg_data
& MVPP2_TXP_SCHED_ENQ_MASK
);
4074 /* Rx descriptors helper methods */
4076 /* Get number of Rx descriptors occupied by received packets */
4078 mvpp2_rxq_received(struct mvpp2_port
*port
, int rxq_id
)
4080 u32 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq_id
));
4082 return val
& MVPP2_RXQ_OCCUPIED_MASK
;
4085 /* Update Rx queue status with the number of occupied and available
4086 * Rx descriptor slots.
4089 mvpp2_rxq_status_update(struct mvpp2_port
*port
, int rxq_id
,
4090 int used_count
, int free_count
)
4092 /* Decrement the number of used descriptors and increment count
4093 * increment the number of free descriptors.
4095 u32 val
= used_count
| (free_count
<< MVPP2_RXQ_NUM_NEW_OFFSET
);
4097 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id
), val
);
4100 /* Get pointer to next RX descriptor to be processed by SW */
4101 static inline struct mvpp2_rx_desc
*
4102 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue
*rxq
)
4104 int rx_desc
= rxq
->next_desc_to_proc
;
4106 rxq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(rxq
, rx_desc
);
4107 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
4108 return rxq
->descs
+ rx_desc
;
4111 /* Set rx queue offset */
4112 static void mvpp2_rxq_offset_set(struct mvpp2_port
*port
,
4113 int prxq
, int offset
)
4117 /* Convert offset from bytes to units of 32 bytes */
4118 offset
= offset
>> 5;
4120 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
4121 val
&= ~MVPP2_RXQ_PACKET_OFFSET_MASK
;
4124 val
|= ((offset
<< MVPP2_RXQ_PACKET_OFFSET_OFFS
) &
4125 MVPP2_RXQ_PACKET_OFFSET_MASK
);
4127 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
4130 /* Obtain BM cookie information from descriptor */
4131 static u32
mvpp2_bm_cookie_build(struct mvpp2_rx_desc
*rx_desc
)
4133 int pool
= (rx_desc
->status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
4134 MVPP2_RXD_BM_POOL_ID_OFFS
;
4135 int cpu
= smp_processor_id();
4137 return ((pool
& 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS
) |
4138 ((cpu
& 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS
);
4141 /* Tx descriptors helper methods */
4143 /* Get number of Tx descriptors waiting to be transmitted by HW */
4144 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port
*port
,
4145 struct mvpp2_tx_queue
*txq
)
4149 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4150 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PENDING_REG
);
4152 return val
& MVPP2_TXQ_PENDING_MASK
;
4155 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4156 static struct mvpp2_tx_desc
*
4157 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue
*txq
)
4159 int tx_desc
= txq
->next_desc_to_proc
;
4161 txq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(txq
, tx_desc
);
4162 return txq
->descs
+ tx_desc
;
4165 /* Update HW with number of aggregated Tx descriptors to be sent */
4166 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port
*port
, int pending
)
4168 /* aggregated access - relevant TXQ number is written in TX desc */
4169 mvpp2_write(port
->priv
, MVPP2_AGGR_TXQ_UPDATE_REG
, pending
);
4173 /* Check if there are enough free descriptors in aggregated txq.
4174 * If not, update the number of occupied descriptors and repeat the check.
4176 static int mvpp2_aggr_desc_num_check(struct mvpp2
*priv
,
4177 struct mvpp2_tx_queue
*aggr_txq
, int num
)
4179 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
) {
4180 /* Update number of occupied aggregated Tx descriptors */
4181 int cpu
= smp_processor_id();
4182 u32 val
= mvpp2_read(priv
, MVPP2_AGGR_TXQ_STATUS_REG(cpu
));
4184 aggr_txq
->count
= val
& MVPP2_AGGR_TXQ_PENDING_MASK
;
4187 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
)
4193 /* Reserved Tx descriptors allocation request */
4194 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2
*priv
,
4195 struct mvpp2_tx_queue
*txq
, int num
)
4199 val
= (txq
->id
<< MVPP2_TXQ_RSVD_REQ_Q_OFFSET
) | num
;
4200 mvpp2_write(priv
, MVPP2_TXQ_RSVD_REQ_REG
, val
);
4202 val
= mvpp2_read(priv
, MVPP2_TXQ_RSVD_RSLT_REG
);
4204 return val
& MVPP2_TXQ_RSVD_RSLT_MASK
;
4207 /* Check if there are enough reserved descriptors for transmission.
4208 * If not, request chunk of reserved descriptors and check again.
4210 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2
*priv
,
4211 struct mvpp2_tx_queue
*txq
,
4212 struct mvpp2_txq_pcpu
*txq_pcpu
,
4215 int req
, cpu
, desc_count
;
4217 if (txq_pcpu
->reserved_num
>= num
)
4220 /* Not enough descriptors reserved! Update the reserved descriptor
4221 * count and check again.
4225 /* Compute total of used descriptors */
4226 for_each_present_cpu(cpu
) {
4227 struct mvpp2_txq_pcpu
*txq_pcpu_aux
;
4229 txq_pcpu_aux
= per_cpu_ptr(txq
->pcpu
, cpu
);
4230 desc_count
+= txq_pcpu_aux
->count
;
4231 desc_count
+= txq_pcpu_aux
->reserved_num
;
4234 req
= max(MVPP2_CPU_DESC_CHUNK
, num
- txq_pcpu
->reserved_num
);
4238 (txq
->size
- (num_present_cpus() * MVPP2_CPU_DESC_CHUNK
)))
4241 txq_pcpu
->reserved_num
+= mvpp2_txq_alloc_reserved_desc(priv
, txq
, req
);
4243 /* OK, the descriptor cound has been updated: check again. */
4244 if (txq_pcpu
->reserved_num
< num
)
4249 /* Release the last allocated Tx descriptor. Useful to handle DMA
4250 * mapping failures in the Tx path.
4252 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue
*txq
)
4254 if (txq
->next_desc_to_proc
== 0)
4255 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
4257 txq
->next_desc_to_proc
--;
4260 /* Set Tx descriptors fields relevant for CSUM calculation */
4261 static u32
mvpp2_txq_desc_csum(int l3_offs
, int l3_proto
,
4262 int ip_hdr_len
, int l4_proto
)
4266 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4267 * G_L4_chk, L4_type required only for checksum calculation
4269 command
= (l3_offs
<< MVPP2_TXD_L3_OFF_SHIFT
);
4270 command
|= (ip_hdr_len
<< MVPP2_TXD_IP_HLEN_SHIFT
);
4271 command
|= MVPP2_TXD_IP_CSUM_DISABLE
;
4273 if (l3_proto
== swab16(ETH_P_IP
)) {
4274 command
&= ~MVPP2_TXD_IP_CSUM_DISABLE
; /* enable IPv4 csum */
4275 command
&= ~MVPP2_TXD_L3_IP6
; /* enable IPv4 */
4277 command
|= MVPP2_TXD_L3_IP6
; /* enable IPv6 */
4280 if (l4_proto
== IPPROTO_TCP
) {
4281 command
&= ~MVPP2_TXD_L4_UDP
; /* enable TCP */
4282 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
4283 } else if (l4_proto
== IPPROTO_UDP
) {
4284 command
|= MVPP2_TXD_L4_UDP
; /* enable UDP */
4285 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
4287 command
|= MVPP2_TXD_L4_CSUM_NOT
;
4293 /* Get number of sent descriptors and decrement counter.
4294 * The number of sent descriptors is returned.
4297 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port
*port
,
4298 struct mvpp2_tx_queue
*txq
)
4302 /* Reading status reg resets transmitted descriptor counter */
4303 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SENT_REG(txq
->id
));
4305 return (val
& MVPP2_TRANSMITTED_COUNT_MASK
) >>
4306 MVPP2_TRANSMITTED_COUNT_OFFSET
;
4309 static void mvpp2_txq_sent_counter_clear(void *arg
)
4311 struct mvpp2_port
*port
= arg
;
4314 for (queue
= 0; queue
< txq_number
; queue
++) {
4315 int id
= port
->txqs
[queue
]->id
;
4317 mvpp2_read(port
->priv
, MVPP2_TXQ_SENT_REG(id
));
4321 /* Set max sizes for Tx queues */
4322 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port
*port
)
4325 int txq
, tx_port_num
;
4327 mtu
= port
->pkt_size
* 8;
4328 if (mtu
> MVPP2_TXP_MTU_MAX
)
4329 mtu
= MVPP2_TXP_MTU_MAX
;
4331 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4334 /* Indirect access to registers */
4335 tx_port_num
= mvpp2_egress_port(port
);
4336 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4339 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
);
4340 val
&= ~MVPP2_TXP_MTU_MAX
;
4342 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
, val
);
4344 /* TXP token size and all TXQs token size must be larger that MTU */
4345 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
);
4346 size
= val
& MVPP2_TXP_TOKEN_SIZE_MAX
;
4349 val
&= ~MVPP2_TXP_TOKEN_SIZE_MAX
;
4351 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
4354 for (txq
= 0; txq
< txq_number
; txq
++) {
4355 val
= mvpp2_read(port
->priv
,
4356 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
));
4357 size
= val
& MVPP2_TXQ_TOKEN_SIZE_MAX
;
4361 val
&= ~MVPP2_TXQ_TOKEN_SIZE_MAX
;
4363 mvpp2_write(port
->priv
,
4364 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
),
4370 /* Set the number of packets that will be received before Rx interrupt
4371 * will be generated by HW.
4373 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port
*port
,
4374 struct mvpp2_rx_queue
*rxq
, u32 pkts
)
4378 val
= (pkts
& MVPP2_OCCUPIED_THRESH_MASK
);
4379 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4380 mvpp2_write(port
->priv
, MVPP2_RXQ_THRESH_REG
, val
);
4382 rxq
->pkts_coal
= pkts
;
4385 /* Set the time delay in usec before Rx interrupt */
4386 static void mvpp2_rx_time_coal_set(struct mvpp2_port
*port
,
4387 struct mvpp2_rx_queue
*rxq
, u32 usec
)
4391 val
= (port
->priv
->tclk
/ USEC_PER_SEC
) * usec
;
4392 mvpp2_write(port
->priv
, MVPP2_ISR_RX_THRESHOLD_REG(rxq
->id
), val
);
4394 rxq
->time_coal
= usec
;
4397 /* Free Tx queue skbuffs */
4398 static void mvpp2_txq_bufs_free(struct mvpp2_port
*port
,
4399 struct mvpp2_tx_queue
*txq
,
4400 struct mvpp2_txq_pcpu
*txq_pcpu
, int num
)
4404 for (i
= 0; i
< num
; i
++) {
4405 dma_addr_t buf_phys_addr
=
4406 txq_pcpu
->tx_buffs
[txq_pcpu
->txq_get_index
];
4407 struct sk_buff
*skb
= txq_pcpu
->tx_skb
[txq_pcpu
->txq_get_index
];
4409 mvpp2_txq_inc_get(txq_pcpu
);
4411 dma_unmap_single(port
->dev
->dev
.parent
, buf_phys_addr
,
4412 skb_headlen(skb
), DMA_TO_DEVICE
);
4415 dev_kfree_skb_any(skb
);
4419 static inline struct mvpp2_rx_queue
*mvpp2_get_rx_queue(struct mvpp2_port
*port
,
4422 int queue
= fls(cause
) - 1;
4424 return port
->rxqs
[queue
];
4427 static inline struct mvpp2_tx_queue
*mvpp2_get_tx_queue(struct mvpp2_port
*port
,
4430 int queue
= fls(cause
) - 1;
4432 return port
->txqs
[queue
];
4435 /* Handle end of transmission */
4436 static void mvpp2_txq_done(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
4437 struct mvpp2_txq_pcpu
*txq_pcpu
)
4439 struct netdev_queue
*nq
= netdev_get_tx_queue(port
->dev
, txq
->log_id
);
4442 if (txq_pcpu
->cpu
!= smp_processor_id())
4443 netdev_err(port
->dev
, "wrong cpu on the end of Tx processing\n");
4445 tx_done
= mvpp2_txq_sent_desc_proc(port
, txq
);
4448 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, tx_done
);
4450 txq_pcpu
->count
-= tx_done
;
4452 if (netif_tx_queue_stopped(nq
))
4453 if (txq_pcpu
->size
- txq_pcpu
->count
>= MAX_SKB_FRAGS
+ 1)
4454 netif_tx_wake_queue(nq
);
4457 static unsigned int mvpp2_tx_done(struct mvpp2_port
*port
, u32 cause
)
4459 struct mvpp2_tx_queue
*txq
;
4460 struct mvpp2_txq_pcpu
*txq_pcpu
;
4461 unsigned int tx_todo
= 0;
4464 txq
= mvpp2_get_tx_queue(port
, cause
);
4468 txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
4470 if (txq_pcpu
->count
) {
4471 mvpp2_txq_done(port
, txq
, txq_pcpu
);
4472 tx_todo
+= txq_pcpu
->count
;
4475 cause
&= ~(1 << txq
->log_id
);
4480 /* Rx/Tx queue initialization/cleanup methods */
4482 /* Allocate and initialize descriptors for aggr TXQ */
4483 static int mvpp2_aggr_txq_init(struct platform_device
*pdev
,
4484 struct mvpp2_tx_queue
*aggr_txq
,
4485 int desc_num
, int cpu
,
4488 /* Allocate memory for TX descriptors */
4489 aggr_txq
->descs
= dma_alloc_coherent(&pdev
->dev
,
4490 desc_num
* MVPP2_DESC_ALIGNED_SIZE
,
4491 &aggr_txq
->descs_phys
, GFP_KERNEL
);
4492 if (!aggr_txq
->descs
)
4495 aggr_txq
->last_desc
= aggr_txq
->size
- 1;
4497 /* Aggr TXQ no reset WA */
4498 aggr_txq
->next_desc_to_proc
= mvpp2_read(priv
,
4499 MVPP2_AGGR_TXQ_INDEX_REG(cpu
));
4501 /* Set Tx descriptors queue starting address */
4502 /* indirect access */
4503 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu
),
4504 aggr_txq
->descs_phys
);
4505 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu
), desc_num
);
4510 /* Create a specified Rx queue */
4511 static int mvpp2_rxq_init(struct mvpp2_port
*port
,
4512 struct mvpp2_rx_queue
*rxq
)
4515 rxq
->size
= port
->rx_ring_size
;
4517 /* Allocate memory for RX descriptors */
4518 rxq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
4519 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4520 &rxq
->descs_phys
, GFP_KERNEL
);
4524 rxq
->last_desc
= rxq
->size
- 1;
4526 /* Zero occupied and non-occupied counters - direct access */
4527 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
4529 /* Set Rx descriptors queue starting address - indirect access */
4530 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4531 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_ADDR_REG
, rxq
->descs_phys
);
4532 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_SIZE_REG
, rxq
->size
);
4533 mvpp2_write(port
->priv
, MVPP2_RXQ_INDEX_REG
, 0);
4536 mvpp2_rxq_offset_set(port
, rxq
->id
, NET_SKB_PAD
);
4538 /* Set coalescing pkts and time */
4539 mvpp2_rx_pkts_coal_set(port
, rxq
, rxq
->pkts_coal
);
4540 mvpp2_rx_time_coal_set(port
, rxq
, rxq
->time_coal
);
4542 /* Add number of descriptors ready for receiving packets */
4543 mvpp2_rxq_status_update(port
, rxq
->id
, 0, rxq
->size
);
4548 /* Push packets received by the RXQ to BM pool */
4549 static void mvpp2_rxq_drop_pkts(struct mvpp2_port
*port
,
4550 struct mvpp2_rx_queue
*rxq
)
4554 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
4558 for (i
= 0; i
< rx_received
; i
++) {
4559 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
4560 u32 bm
= mvpp2_bm_cookie_build(rx_desc
);
4562 mvpp2_pool_refill(port
, bm
, rx_desc
->buf_phys_addr
,
4563 rx_desc
->buf_cookie
);
4565 mvpp2_rxq_status_update(port
, rxq
->id
, rx_received
, rx_received
);
4568 /* Cleanup Rx queue */
4569 static void mvpp2_rxq_deinit(struct mvpp2_port
*port
,
4570 struct mvpp2_rx_queue
*rxq
)
4572 mvpp2_rxq_drop_pkts(port
, rxq
);
4575 dma_free_coherent(port
->dev
->dev
.parent
,
4576 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4582 rxq
->next_desc_to_proc
= 0;
4583 rxq
->descs_phys
= 0;
4585 /* Clear Rx descriptors queue starting address and size;
4586 * free descriptor number
4588 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
4589 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4590 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_ADDR_REG
, 0);
4591 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_SIZE_REG
, 0);
4594 /* Create and initialize a Tx queue */
4595 static int mvpp2_txq_init(struct mvpp2_port
*port
,
4596 struct mvpp2_tx_queue
*txq
)
4599 int cpu
, desc
, desc_per_txq
, tx_port_num
;
4600 struct mvpp2_txq_pcpu
*txq_pcpu
;
4602 txq
->size
= port
->tx_ring_size
;
4604 /* Allocate memory for Tx descriptors */
4605 txq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
4606 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4607 &txq
->descs_phys
, GFP_KERNEL
);
4611 txq
->last_desc
= txq
->size
- 1;
4613 /* Set Tx descriptors queue starting address - indirect access */
4614 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4615 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_ADDR_REG
, txq
->descs_phys
);
4616 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_SIZE_REG
, txq
->size
&
4617 MVPP2_TXQ_DESC_SIZE_MASK
);
4618 mvpp2_write(port
->priv
, MVPP2_TXQ_INDEX_REG
, 0);
4619 mvpp2_write(port
->priv
, MVPP2_TXQ_RSVD_CLR_REG
,
4620 txq
->id
<< MVPP2_TXQ_RSVD_CLR_OFFSET
);
4621 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PENDING_REG
);
4622 val
&= ~MVPP2_TXQ_PENDING_MASK
;
4623 mvpp2_write(port
->priv
, MVPP2_TXQ_PENDING_REG
, val
);
4625 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4626 * for each existing TXQ.
4627 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4628 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4631 desc
= (port
->id
* MVPP2_MAX_TXQ
* desc_per_txq
) +
4632 (txq
->log_id
* desc_per_txq
);
4634 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
,
4635 MVPP2_PREF_BUF_PTR(desc
) | MVPP2_PREF_BUF_SIZE_16
|
4636 MVPP2_PREF_BUF_THRESH(desc_per_txq
/2));
4638 /* WRR / EJP configuration - indirect access */
4639 tx_port_num
= mvpp2_egress_port(port
);
4640 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4642 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
));
4643 val
&= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK
;
4644 val
|= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4645 val
|= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK
;
4646 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
), val
);
4648 val
= MVPP2_TXQ_TOKEN_SIZE_MAX
;
4649 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
->log_id
),
4652 for_each_present_cpu(cpu
) {
4653 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4654 txq_pcpu
->size
= txq
->size
;
4655 txq_pcpu
->tx_skb
= kmalloc(txq_pcpu
->size
*
4656 sizeof(*txq_pcpu
->tx_skb
),
4658 if (!txq_pcpu
->tx_skb
)
4661 txq_pcpu
->tx_buffs
= kmalloc(txq_pcpu
->size
*
4662 sizeof(dma_addr_t
), GFP_KERNEL
);
4663 if (!txq_pcpu
->tx_buffs
)
4666 txq_pcpu
->count
= 0;
4667 txq_pcpu
->reserved_num
= 0;
4668 txq_pcpu
->txq_put_index
= 0;
4669 txq_pcpu
->txq_get_index
= 0;
4675 for_each_present_cpu(cpu
) {
4676 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4677 kfree(txq_pcpu
->tx_skb
);
4678 kfree(txq_pcpu
->tx_buffs
);
4681 dma_free_coherent(port
->dev
->dev
.parent
,
4682 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4683 txq
->descs
, txq
->descs_phys
);
4688 /* Free allocated TXQ resources */
4689 static void mvpp2_txq_deinit(struct mvpp2_port
*port
,
4690 struct mvpp2_tx_queue
*txq
)
4692 struct mvpp2_txq_pcpu
*txq_pcpu
;
4695 for_each_present_cpu(cpu
) {
4696 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4697 kfree(txq_pcpu
->tx_skb
);
4698 kfree(txq_pcpu
->tx_buffs
);
4702 dma_free_coherent(port
->dev
->dev
.parent
,
4703 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4704 txq
->descs
, txq
->descs_phys
);
4708 txq
->next_desc_to_proc
= 0;
4709 txq
->descs_phys
= 0;
4711 /* Set minimum bandwidth for disabled TXQs */
4712 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq
->id
), 0);
4714 /* Set Tx descriptors queue starting address and size */
4715 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4716 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_ADDR_REG
, 0);
4717 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_SIZE_REG
, 0);
4720 /* Cleanup Tx ports */
4721 static void mvpp2_txq_clean(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
)
4723 struct mvpp2_txq_pcpu
*txq_pcpu
;
4724 int delay
, pending
, cpu
;
4727 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4728 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
);
4729 val
|= MVPP2_TXQ_DRAIN_EN_MASK
;
4730 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
, val
);
4732 /* The napi queue has been stopped so wait for all packets
4733 * to be transmitted.
4737 if (delay
>= MVPP2_TX_PENDING_TIMEOUT_MSEC
) {
4738 netdev_warn(port
->dev
,
4739 "port %d: cleaning queue %d timed out\n",
4740 port
->id
, txq
->log_id
);
4746 pending
= mvpp2_txq_pend_desc_num_get(port
, txq
);
4749 val
&= ~MVPP2_TXQ_DRAIN_EN_MASK
;
4750 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
, val
);
4752 for_each_present_cpu(cpu
) {
4753 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4755 /* Release all packets */
4756 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, txq_pcpu
->count
);
4759 txq_pcpu
->count
= 0;
4760 txq_pcpu
->txq_put_index
= 0;
4761 txq_pcpu
->txq_get_index
= 0;
4765 /* Cleanup all Tx queues */
4766 static void mvpp2_cleanup_txqs(struct mvpp2_port
*port
)
4768 struct mvpp2_tx_queue
*txq
;
4772 val
= mvpp2_read(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
);
4774 /* Reset Tx ports and delete Tx queues */
4775 val
|= MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
4776 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
4778 for (queue
= 0; queue
< txq_number
; queue
++) {
4779 txq
= port
->txqs
[queue
];
4780 mvpp2_txq_clean(port
, txq
);
4781 mvpp2_txq_deinit(port
, txq
);
4784 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
4786 val
&= ~MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
4787 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
4790 /* Cleanup all Rx queues */
4791 static void mvpp2_cleanup_rxqs(struct mvpp2_port
*port
)
4795 for (queue
= 0; queue
< rxq_number
; queue
++)
4796 mvpp2_rxq_deinit(port
, port
->rxqs
[queue
]);
4799 /* Init all Rx queues for port */
4800 static int mvpp2_setup_rxqs(struct mvpp2_port
*port
)
4804 for (queue
= 0; queue
< rxq_number
; queue
++) {
4805 err
= mvpp2_rxq_init(port
, port
->rxqs
[queue
]);
4812 mvpp2_cleanup_rxqs(port
);
4816 /* Init all tx queues for port */
4817 static int mvpp2_setup_txqs(struct mvpp2_port
*port
)
4819 struct mvpp2_tx_queue
*txq
;
4822 for (queue
= 0; queue
< txq_number
; queue
++) {
4823 txq
= port
->txqs
[queue
];
4824 err
= mvpp2_txq_init(port
, txq
);
4829 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
4833 mvpp2_cleanup_txqs(port
);
4837 /* The callback for per-port interrupt */
4838 static irqreturn_t
mvpp2_isr(int irq
, void *dev_id
)
4840 struct mvpp2_port
*port
= (struct mvpp2_port
*)dev_id
;
4842 mvpp2_interrupts_disable(port
);
4844 napi_schedule(&port
->napi
);
4850 static void mvpp2_link_event(struct net_device
*dev
)
4852 struct mvpp2_port
*port
= netdev_priv(dev
);
4853 struct phy_device
*phydev
= port
->phy_dev
;
4854 int status_change
= 0;
4858 if ((port
->speed
!= phydev
->speed
) ||
4859 (port
->duplex
!= phydev
->duplex
)) {
4862 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4863 val
&= ~(MVPP2_GMAC_CONFIG_MII_SPEED
|
4864 MVPP2_GMAC_CONFIG_GMII_SPEED
|
4865 MVPP2_GMAC_CONFIG_FULL_DUPLEX
|
4866 MVPP2_GMAC_AN_SPEED_EN
|
4867 MVPP2_GMAC_AN_DUPLEX_EN
);
4870 val
|= MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
4872 if (phydev
->speed
== SPEED_1000
)
4873 val
|= MVPP2_GMAC_CONFIG_GMII_SPEED
;
4874 else if (phydev
->speed
== SPEED_100
)
4875 val
|= MVPP2_GMAC_CONFIG_MII_SPEED
;
4877 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4879 port
->duplex
= phydev
->duplex
;
4880 port
->speed
= phydev
->speed
;
4884 if (phydev
->link
!= port
->link
) {
4885 if (!phydev
->link
) {
4890 port
->link
= phydev
->link
;
4894 if (status_change
) {
4896 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4897 val
|= (MVPP2_GMAC_FORCE_LINK_PASS
|
4898 MVPP2_GMAC_FORCE_LINK_DOWN
);
4899 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4900 mvpp2_egress_enable(port
);
4901 mvpp2_ingress_enable(port
);
4903 mvpp2_ingress_disable(port
);
4904 mvpp2_egress_disable(port
);
4906 phy_print_status(phydev
);
4910 static void mvpp2_timer_set(struct mvpp2_port_pcpu
*port_pcpu
)
4914 if (!port_pcpu
->timer_scheduled
) {
4915 port_pcpu
->timer_scheduled
= true;
4916 interval
= ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS
);
4917 hrtimer_start(&port_pcpu
->tx_done_timer
, interval
,
4918 HRTIMER_MODE_REL_PINNED
);
4922 static void mvpp2_tx_proc_cb(unsigned long data
)
4924 struct net_device
*dev
= (struct net_device
*)data
;
4925 struct mvpp2_port
*port
= netdev_priv(dev
);
4926 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
4927 unsigned int tx_todo
, cause
;
4929 if (!netif_running(dev
))
4931 port_pcpu
->timer_scheduled
= false;
4933 /* Process all the Tx queues */
4934 cause
= (1 << txq_number
) - 1;
4935 tx_todo
= mvpp2_tx_done(port
, cause
);
4937 /* Set the timer in case not all the packets were processed */
4939 mvpp2_timer_set(port_pcpu
);
4942 static enum hrtimer_restart
mvpp2_hr_timer_cb(struct hrtimer
*timer
)
4944 struct mvpp2_port_pcpu
*port_pcpu
= container_of(timer
,
4945 struct mvpp2_port_pcpu
,
4948 tasklet_schedule(&port_pcpu
->tx_done_tasklet
);
4950 return HRTIMER_NORESTART
;
4953 /* Main RX/TX processing routines */
4955 /* Display more error info */
4956 static void mvpp2_rx_error(struct mvpp2_port
*port
,
4957 struct mvpp2_rx_desc
*rx_desc
)
4959 u32 status
= rx_desc
->status
;
4961 switch (status
& MVPP2_RXD_ERR_CODE_MASK
) {
4962 case MVPP2_RXD_ERR_CRC
:
4963 netdev_err(port
->dev
, "bad rx status %08x (crc error), size=%d\n",
4964 status
, rx_desc
->data_size
);
4966 case MVPP2_RXD_ERR_OVERRUN
:
4967 netdev_err(port
->dev
, "bad rx status %08x (overrun error), size=%d\n",
4968 status
, rx_desc
->data_size
);
4970 case MVPP2_RXD_ERR_RESOURCE
:
4971 netdev_err(port
->dev
, "bad rx status %08x (resource error), size=%d\n",
4972 status
, rx_desc
->data_size
);
4977 /* Handle RX checksum offload */
4978 static void mvpp2_rx_csum(struct mvpp2_port
*port
, u32 status
,
4979 struct sk_buff
*skb
)
4981 if (((status
& MVPP2_RXD_L3_IP4
) &&
4982 !(status
& MVPP2_RXD_IP4_HEADER_ERR
)) ||
4983 (status
& MVPP2_RXD_L3_IP6
))
4984 if (((status
& MVPP2_RXD_L4_UDP
) ||
4985 (status
& MVPP2_RXD_L4_TCP
)) &&
4986 (status
& MVPP2_RXD_L4_CSUM_OK
)) {
4988 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4992 skb
->ip_summed
= CHECKSUM_NONE
;
4995 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4996 static int mvpp2_rx_refill(struct mvpp2_port
*port
,
4997 struct mvpp2_bm_pool
*bm_pool
,
4998 u32 bm
, int is_recycle
)
5000 struct sk_buff
*skb
;
5001 dma_addr_t phys_addr
;
5004 (atomic_read(&bm_pool
->in_use
) < bm_pool
->in_use_thresh
))
5007 /* No recycle or too many buffers are in use, so allocate a new skb */
5008 skb
= mvpp2_skb_alloc(port
, bm_pool
, &phys_addr
, GFP_ATOMIC
);
5012 mvpp2_pool_refill(port
, bm
, (u32
)phys_addr
, (u32
)skb
);
5013 atomic_dec(&bm_pool
->in_use
);
5017 /* Handle tx checksum */
5018 static u32
mvpp2_skb_tx_csum(struct mvpp2_port
*port
, struct sk_buff
*skb
)
5020 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5024 if (skb
->protocol
== htons(ETH_P_IP
)) {
5025 struct iphdr
*ip4h
= ip_hdr(skb
);
5027 /* Calculate IPv4 checksum and L4 checksum */
5028 ip_hdr_len
= ip4h
->ihl
;
5029 l4_proto
= ip4h
->protocol
;
5030 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
5031 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
5033 /* Read l4_protocol from one of IPv6 extra headers */
5034 if (skb_network_header_len(skb
) > 0)
5035 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
5036 l4_proto
= ip6h
->nexthdr
;
5038 return MVPP2_TXD_L4_CSUM_NOT
;
5041 return mvpp2_txq_desc_csum(skb_network_offset(skb
),
5042 skb
->protocol
, ip_hdr_len
, l4_proto
);
5045 return MVPP2_TXD_L4_CSUM_NOT
| MVPP2_TXD_IP_CSUM_DISABLE
;
5048 static void mvpp2_buff_hdr_rx(struct mvpp2_port
*port
,
5049 struct mvpp2_rx_desc
*rx_desc
)
5051 struct mvpp2_buff_hdr
*buff_hdr
;
5052 struct sk_buff
*skb
;
5053 u32 rx_status
= rx_desc
->status
;
5056 u32 buff_phys_addr_next
;
5057 u32 buff_virt_addr_next
;
5061 pool_id
= (rx_status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
5062 MVPP2_RXD_BM_POOL_ID_OFFS
;
5063 buff_phys_addr
= rx_desc
->buf_phys_addr
;
5064 buff_virt_addr
= rx_desc
->buf_cookie
;
5067 skb
= (struct sk_buff
*)buff_virt_addr
;
5068 buff_hdr
= (struct mvpp2_buff_hdr
*)skb
->head
;
5070 mc_id
= MVPP2_B_HDR_INFO_MC_ID(buff_hdr
->info
);
5072 buff_phys_addr_next
= buff_hdr
->next_buff_phys_addr
;
5073 buff_virt_addr_next
= buff_hdr
->next_buff_virt_addr
;
5075 /* Release buffer */
5076 mvpp2_bm_pool_mc_put(port
, pool_id
, buff_phys_addr
,
5077 buff_virt_addr
, mc_id
);
5079 buff_phys_addr
= buff_phys_addr_next
;
5080 buff_virt_addr
= buff_virt_addr_next
;
5082 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr
->info
));
5085 /* Main rx processing */
5086 static int mvpp2_rx(struct mvpp2_port
*port
, int rx_todo
,
5087 struct mvpp2_rx_queue
*rxq
)
5089 struct net_device
*dev
= port
->dev
;
5095 /* Get number of received packets and clamp the to-do */
5096 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
5097 if (rx_todo
> rx_received
)
5098 rx_todo
= rx_received
;
5100 while (rx_done
< rx_todo
) {
5101 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
5102 struct mvpp2_bm_pool
*bm_pool
;
5103 struct sk_buff
*skb
;
5104 dma_addr_t phys_addr
;
5106 int pool
, rx_bytes
, err
;
5109 rx_status
= rx_desc
->status
;
5110 rx_bytes
= rx_desc
->data_size
- MVPP2_MH_SIZE
;
5111 phys_addr
= rx_desc
->buf_phys_addr
;
5113 bm
= mvpp2_bm_cookie_build(rx_desc
);
5114 pool
= mvpp2_bm_cookie_pool_get(bm
);
5115 bm_pool
= &port
->priv
->bm_pools
[pool
];
5116 /* Check if buffer header is used */
5117 if (rx_status
& MVPP2_RXD_BUF_HDR
) {
5118 mvpp2_buff_hdr_rx(port
, rx_desc
);
5122 /* In case of an error, release the requested buffer pointer
5123 * to the Buffer Manager. This request process is controlled
5124 * by the hardware, and the information about the buffer is
5125 * comprised by the RX descriptor.
5127 if (rx_status
& MVPP2_RXD_ERR_SUMMARY
) {
5129 dev
->stats
.rx_errors
++;
5130 mvpp2_rx_error(port
, rx_desc
);
5131 /* Return the buffer to the pool */
5132 mvpp2_pool_refill(port
, bm
, rx_desc
->buf_phys_addr
,
5133 rx_desc
->buf_cookie
);
5137 skb
= (struct sk_buff
*)rx_desc
->buf_cookie
;
5139 err
= mvpp2_rx_refill(port
, bm_pool
, bm
, 0);
5141 netdev_err(port
->dev
, "failed to refill BM pools\n");
5142 goto err_drop_frame
;
5145 dma_unmap_single(dev
->dev
.parent
, phys_addr
,
5146 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
5149 rcvd_bytes
+= rx_bytes
;
5150 atomic_inc(&bm_pool
->in_use
);
5152 skb_reserve(skb
, MVPP2_MH_SIZE
);
5153 skb_put(skb
, rx_bytes
);
5154 skb
->protocol
= eth_type_trans(skb
, dev
);
5155 mvpp2_rx_csum(port
, rx_status
, skb
);
5157 napi_gro_receive(&port
->napi
, skb
);
5161 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
5163 u64_stats_update_begin(&stats
->syncp
);
5164 stats
->rx_packets
+= rcvd_pkts
;
5165 stats
->rx_bytes
+= rcvd_bytes
;
5166 u64_stats_update_end(&stats
->syncp
);
5169 /* Update Rx queue management counters */
5171 mvpp2_rxq_status_update(port
, rxq
->id
, rx_done
, rx_done
);
5177 tx_desc_unmap_put(struct device
*dev
, struct mvpp2_tx_queue
*txq
,
5178 struct mvpp2_tx_desc
*desc
)
5180 dma_unmap_single(dev
, desc
->buf_phys_addr
,
5181 desc
->data_size
, DMA_TO_DEVICE
);
5182 mvpp2_txq_desc_put(txq
);
5185 /* Handle tx fragmentation processing */
5186 static int mvpp2_tx_frag_process(struct mvpp2_port
*port
, struct sk_buff
*skb
,
5187 struct mvpp2_tx_queue
*aggr_txq
,
5188 struct mvpp2_tx_queue
*txq
)
5190 struct mvpp2_txq_pcpu
*txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
5191 struct mvpp2_tx_desc
*tx_desc
;
5193 dma_addr_t buf_phys_addr
;
5195 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5196 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5197 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
5199 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
5200 tx_desc
->phys_txq
= txq
->id
;
5201 tx_desc
->data_size
= frag
->size
;
5203 buf_phys_addr
= dma_map_single(port
->dev
->dev
.parent
, addr
,
5206 if (dma_mapping_error(port
->dev
->dev
.parent
, buf_phys_addr
)) {
5207 mvpp2_txq_desc_put(txq
);
5211 tx_desc
->packet_offset
= buf_phys_addr
& MVPP2_TX_DESC_ALIGN
;
5212 tx_desc
->buf_phys_addr
= buf_phys_addr
& (~MVPP2_TX_DESC_ALIGN
);
5214 if (i
== (skb_shinfo(skb
)->nr_frags
- 1)) {
5215 /* Last descriptor */
5216 tx_desc
->command
= MVPP2_TXD_L_DESC
;
5217 mvpp2_txq_inc_put(txq_pcpu
, skb
, tx_desc
);
5219 /* Descriptor in the middle: Not First, Not Last */
5220 tx_desc
->command
= 0;
5221 mvpp2_txq_inc_put(txq_pcpu
, NULL
, tx_desc
);
5228 /* Release all descriptors that were used to map fragments of
5229 * this packet, as well as the corresponding DMA mappings
5231 for (i
= i
- 1; i
>= 0; i
--) {
5232 tx_desc
= txq
->descs
+ i
;
5233 tx_desc_unmap_put(port
->dev
->dev
.parent
, txq
, tx_desc
);
5239 /* Main tx processing */
5240 static int mvpp2_tx(struct sk_buff
*skb
, struct net_device
*dev
)
5242 struct mvpp2_port
*port
= netdev_priv(dev
);
5243 struct mvpp2_tx_queue
*txq
, *aggr_txq
;
5244 struct mvpp2_txq_pcpu
*txq_pcpu
;
5245 struct mvpp2_tx_desc
*tx_desc
;
5246 dma_addr_t buf_phys_addr
;
5251 txq_id
= skb_get_queue_mapping(skb
);
5252 txq
= port
->txqs
[txq_id
];
5253 txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
5254 aggr_txq
= &port
->priv
->aggr_txqs
[smp_processor_id()];
5256 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
5258 /* Check number of available descriptors */
5259 if (mvpp2_aggr_desc_num_check(port
->priv
, aggr_txq
, frags
) ||
5260 mvpp2_txq_reserved_desc_num_proc(port
->priv
, txq
,
5266 /* Get a descriptor for the first part of the packet */
5267 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
5268 tx_desc
->phys_txq
= txq
->id
;
5269 tx_desc
->data_size
= skb_headlen(skb
);
5271 buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
5272 tx_desc
->data_size
, DMA_TO_DEVICE
);
5273 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_phys_addr
))) {
5274 mvpp2_txq_desc_put(txq
);
5278 tx_desc
->packet_offset
= buf_phys_addr
& MVPP2_TX_DESC_ALIGN
;
5279 tx_desc
->buf_phys_addr
= buf_phys_addr
& ~MVPP2_TX_DESC_ALIGN
;
5281 tx_cmd
= mvpp2_skb_tx_csum(port
, skb
);
5284 /* First and Last descriptor */
5285 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_L_DESC
;
5286 tx_desc
->command
= tx_cmd
;
5287 mvpp2_txq_inc_put(txq_pcpu
, skb
, tx_desc
);
5289 /* First but not Last */
5290 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_PADDING_DISABLE
;
5291 tx_desc
->command
= tx_cmd
;
5292 mvpp2_txq_inc_put(txq_pcpu
, NULL
, tx_desc
);
5294 /* Continue with other skb fragments */
5295 if (mvpp2_tx_frag_process(port
, skb
, aggr_txq
, txq
)) {
5296 tx_desc_unmap_put(port
->dev
->dev
.parent
, txq
, tx_desc
);
5302 txq_pcpu
->reserved_num
-= frags
;
5303 txq_pcpu
->count
+= frags
;
5304 aggr_txq
->count
+= frags
;
5306 /* Enable transmit */
5308 mvpp2_aggr_txq_pend_desc_add(port
, frags
);
5310 if (txq_pcpu
->size
- txq_pcpu
->count
< MAX_SKB_FRAGS
+ 1) {
5311 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
5313 netif_tx_stop_queue(nq
);
5317 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
5319 u64_stats_update_begin(&stats
->syncp
);
5320 stats
->tx_packets
++;
5321 stats
->tx_bytes
+= skb
->len
;
5322 u64_stats_update_end(&stats
->syncp
);
5324 dev
->stats
.tx_dropped
++;
5325 dev_kfree_skb_any(skb
);
5328 /* Finalize TX processing */
5329 if (txq_pcpu
->count
>= txq
->done_pkts_coal
)
5330 mvpp2_txq_done(port
, txq
, txq_pcpu
);
5332 /* Set the timer in case not all frags were processed */
5333 if (txq_pcpu
->count
<= frags
&& txq_pcpu
->count
> 0) {
5334 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
5336 mvpp2_timer_set(port_pcpu
);
5339 return NETDEV_TX_OK
;
5342 static inline void mvpp2_cause_error(struct net_device
*dev
, int cause
)
5344 if (cause
& MVPP2_CAUSE_FCS_ERR_MASK
)
5345 netdev_err(dev
, "FCS error\n");
5346 if (cause
& MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK
)
5347 netdev_err(dev
, "rx fifo overrun error\n");
5348 if (cause
& MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK
)
5349 netdev_err(dev
, "tx fifo underrun error\n");
5352 static int mvpp2_poll(struct napi_struct
*napi
, int budget
)
5354 u32 cause_rx_tx
, cause_rx
, cause_misc
;
5356 struct mvpp2_port
*port
= netdev_priv(napi
->dev
);
5358 /* Rx/Tx cause register
5360 * Bits 0-15: each bit indicates received packets on the Rx queue
5361 * (bit 0 is for Rx queue 0).
5363 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5364 * (bit 16 is for Tx queue 0).
5366 * Each CPU has its own Rx/Tx cause register
5368 cause_rx_tx
= mvpp2_read(port
->priv
,
5369 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
));
5370 cause_rx_tx
&= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
5371 cause_misc
= cause_rx_tx
& MVPP2_CAUSE_MISC_SUM_MASK
;
5374 mvpp2_cause_error(port
->dev
, cause_misc
);
5376 /* Clear the cause register */
5377 mvpp2_write(port
->priv
, MVPP2_ISR_MISC_CAUSE_REG
, 0);
5378 mvpp2_write(port
->priv
, MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
),
5379 cause_rx_tx
& ~MVPP2_CAUSE_MISC_SUM_MASK
);
5382 cause_rx
= cause_rx_tx
& MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
5384 /* Process RX packets */
5385 cause_rx
|= port
->pending_cause_rx
;
5386 while (cause_rx
&& budget
> 0) {
5388 struct mvpp2_rx_queue
*rxq
;
5390 rxq
= mvpp2_get_rx_queue(port
, cause_rx
);
5394 count
= mvpp2_rx(port
, budget
, rxq
);
5398 /* Clear the bit associated to this Rx queue
5399 * so that next iteration will continue from
5400 * the next Rx queue.
5402 cause_rx
&= ~(1 << rxq
->logic_rxq
);
5408 napi_complete(napi
);
5410 mvpp2_interrupts_enable(port
);
5412 port
->pending_cause_rx
= cause_rx
;
5416 /* Set hw internals when starting port */
5417 static void mvpp2_start_dev(struct mvpp2_port
*port
)
5419 mvpp2_gmac_max_rx_size_set(port
);
5420 mvpp2_txp_max_tx_size_set(port
);
5422 napi_enable(&port
->napi
);
5424 /* Enable interrupts on all CPUs */
5425 mvpp2_interrupts_enable(port
);
5427 mvpp2_port_enable(port
);
5428 phy_start(port
->phy_dev
);
5429 netif_tx_start_all_queues(port
->dev
);
5432 /* Set hw internals when stopping port */
5433 static void mvpp2_stop_dev(struct mvpp2_port
*port
)
5435 /* Stop new packets from arriving to RXQs */
5436 mvpp2_ingress_disable(port
);
5440 /* Disable interrupts on all CPUs */
5441 mvpp2_interrupts_disable(port
);
5443 napi_disable(&port
->napi
);
5445 netif_carrier_off(port
->dev
);
5446 netif_tx_stop_all_queues(port
->dev
);
5448 mvpp2_egress_disable(port
);
5449 mvpp2_port_disable(port
);
5450 phy_stop(port
->phy_dev
);
5453 /* Return positive if MTU is valid */
5454 static inline int mvpp2_check_mtu_valid(struct net_device
*dev
, int mtu
)
5457 netdev_err(dev
, "cannot change mtu to less than 68\n");
5461 /* 9676 == 9700 - 20 and rounding to 8 */
5463 netdev_info(dev
, "illegal MTU value %d, round to 9676\n", mtu
);
5467 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu
), 8)) {
5468 netdev_info(dev
, "illegal MTU value %d, round to %d\n", mtu
,
5469 ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8));
5470 mtu
= ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8);
5476 static int mvpp2_check_ringparam_valid(struct net_device
*dev
,
5477 struct ethtool_ringparam
*ring
)
5479 u16 new_rx_pending
= ring
->rx_pending
;
5480 u16 new_tx_pending
= ring
->tx_pending
;
5482 if (ring
->rx_pending
== 0 || ring
->tx_pending
== 0)
5485 if (ring
->rx_pending
> MVPP2_MAX_RXD
)
5486 new_rx_pending
= MVPP2_MAX_RXD
;
5487 else if (!IS_ALIGNED(ring
->rx_pending
, 16))
5488 new_rx_pending
= ALIGN(ring
->rx_pending
, 16);
5490 if (ring
->tx_pending
> MVPP2_MAX_TXD
)
5491 new_tx_pending
= MVPP2_MAX_TXD
;
5492 else if (!IS_ALIGNED(ring
->tx_pending
, 32))
5493 new_tx_pending
= ALIGN(ring
->tx_pending
, 32);
5495 if (ring
->rx_pending
!= new_rx_pending
) {
5496 netdev_info(dev
, "illegal Rx ring size value %d, round to %d\n",
5497 ring
->rx_pending
, new_rx_pending
);
5498 ring
->rx_pending
= new_rx_pending
;
5501 if (ring
->tx_pending
!= new_tx_pending
) {
5502 netdev_info(dev
, "illegal Tx ring size value %d, round to %d\n",
5503 ring
->tx_pending
, new_tx_pending
);
5504 ring
->tx_pending
= new_tx_pending
;
5510 static void mvpp2_get_mac_address(struct mvpp2_port
*port
, unsigned char *addr
)
5512 u32 mac_addr_l
, mac_addr_m
, mac_addr_h
;
5514 mac_addr_l
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
5515 mac_addr_m
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_MIDDLE
);
5516 mac_addr_h
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_HIGH
);
5517 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
5518 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
5519 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
5520 addr
[3] = mac_addr_h
& 0xFF;
5521 addr
[4] = mac_addr_m
& 0xFF;
5522 addr
[5] = (mac_addr_l
>> MVPP2_GMAC_SA_LOW_OFFS
) & 0xFF;
5525 static int mvpp2_phy_connect(struct mvpp2_port
*port
)
5527 struct phy_device
*phy_dev
;
5529 phy_dev
= of_phy_connect(port
->dev
, port
->phy_node
, mvpp2_link_event
, 0,
5530 port
->phy_interface
);
5532 netdev_err(port
->dev
, "cannot connect to phy\n");
5535 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
5536 phy_dev
->advertising
= phy_dev
->supported
;
5538 port
->phy_dev
= phy_dev
;
5546 static void mvpp2_phy_disconnect(struct mvpp2_port
*port
)
5548 phy_disconnect(port
->phy_dev
);
5549 port
->phy_dev
= NULL
;
5552 static int mvpp2_open(struct net_device
*dev
)
5554 struct mvpp2_port
*port
= netdev_priv(dev
);
5555 unsigned char mac_bcast
[ETH_ALEN
] = {
5556 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5559 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, mac_bcast
, true);
5561 netdev_err(dev
, "mvpp2_prs_mac_da_accept BC failed\n");
5564 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
,
5565 dev
->dev_addr
, true);
5567 netdev_err(dev
, "mvpp2_prs_mac_da_accept MC failed\n");
5570 err
= mvpp2_prs_tag_mode_set(port
->priv
, port
->id
, MVPP2_TAG_TYPE_MH
);
5572 netdev_err(dev
, "mvpp2_prs_tag_mode_set failed\n");
5575 err
= mvpp2_prs_def_flow(port
);
5577 netdev_err(dev
, "mvpp2_prs_def_flow failed\n");
5581 /* Allocate the Rx/Tx queues */
5582 err
= mvpp2_setup_rxqs(port
);
5584 netdev_err(port
->dev
, "cannot allocate Rx queues\n");
5588 err
= mvpp2_setup_txqs(port
);
5590 netdev_err(port
->dev
, "cannot allocate Tx queues\n");
5591 goto err_cleanup_rxqs
;
5594 err
= request_irq(port
->irq
, mvpp2_isr
, 0, dev
->name
, port
);
5596 netdev_err(port
->dev
, "cannot request IRQ %d\n", port
->irq
);
5597 goto err_cleanup_txqs
;
5600 /* In default link is down */
5601 netif_carrier_off(port
->dev
);
5603 err
= mvpp2_phy_connect(port
);
5607 /* Unmask interrupts on all CPUs */
5608 on_each_cpu(mvpp2_interrupts_unmask
, port
, 1);
5610 mvpp2_start_dev(port
);
5615 free_irq(port
->irq
, port
);
5617 mvpp2_cleanup_txqs(port
);
5619 mvpp2_cleanup_rxqs(port
);
5623 static int mvpp2_stop(struct net_device
*dev
)
5625 struct mvpp2_port
*port
= netdev_priv(dev
);
5626 struct mvpp2_port_pcpu
*port_pcpu
;
5629 mvpp2_stop_dev(port
);
5630 mvpp2_phy_disconnect(port
);
5632 /* Mask interrupts on all CPUs */
5633 on_each_cpu(mvpp2_interrupts_mask
, port
, 1);
5635 free_irq(port
->irq
, port
);
5636 for_each_present_cpu(cpu
) {
5637 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
5639 hrtimer_cancel(&port_pcpu
->tx_done_timer
);
5640 port_pcpu
->timer_scheduled
= false;
5641 tasklet_kill(&port_pcpu
->tx_done_tasklet
);
5643 mvpp2_cleanup_rxqs(port
);
5644 mvpp2_cleanup_txqs(port
);
5649 static void mvpp2_set_rx_mode(struct net_device
*dev
)
5651 struct mvpp2_port
*port
= netdev_priv(dev
);
5652 struct mvpp2
*priv
= port
->priv
;
5653 struct netdev_hw_addr
*ha
;
5655 bool allmulti
= dev
->flags
& IFF_ALLMULTI
;
5657 mvpp2_prs_mac_promisc_set(priv
, id
, dev
->flags
& IFF_PROMISC
);
5658 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_ALL
, allmulti
);
5659 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_IP6
, allmulti
);
5661 /* Remove all port->id's mcast enries */
5662 mvpp2_prs_mcast_del_all(priv
, id
);
5664 if (allmulti
&& !netdev_mc_empty(dev
)) {
5665 netdev_for_each_mc_addr(ha
, dev
)
5666 mvpp2_prs_mac_da_accept(priv
, id
, ha
->addr
, true);
5670 static int mvpp2_set_mac_address(struct net_device
*dev
, void *p
)
5672 struct mvpp2_port
*port
= netdev_priv(dev
);
5673 const struct sockaddr
*addr
= p
;
5676 if (!is_valid_ether_addr(addr
->sa_data
)) {
5677 err
= -EADDRNOTAVAIL
;
5681 if (!netif_running(dev
)) {
5682 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
5685 /* Reconfigure parser to accept the original MAC address */
5686 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
5691 mvpp2_stop_dev(port
);
5693 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
5697 /* Reconfigure parser accept the original MAC address */
5698 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
5702 mvpp2_start_dev(port
);
5703 mvpp2_egress_enable(port
);
5704 mvpp2_ingress_enable(port
);
5708 netdev_err(dev
, "fail to change MAC address\n");
5712 static int mvpp2_change_mtu(struct net_device
*dev
, int mtu
)
5714 struct mvpp2_port
*port
= netdev_priv(dev
);
5717 mtu
= mvpp2_check_mtu_valid(dev
, mtu
);
5723 if (!netif_running(dev
)) {
5724 err
= mvpp2_bm_update_mtu(dev
, mtu
);
5726 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
5730 /* Reconfigure BM to the original MTU */
5731 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
5736 mvpp2_stop_dev(port
);
5738 err
= mvpp2_bm_update_mtu(dev
, mtu
);
5740 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
5744 /* Reconfigure BM to the original MTU */
5745 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
5750 mvpp2_start_dev(port
);
5751 mvpp2_egress_enable(port
);
5752 mvpp2_ingress_enable(port
);
5757 netdev_err(dev
, "fail to change MTU\n");
5761 static struct rtnl_link_stats64
*
5762 mvpp2_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
5764 struct mvpp2_port
*port
= netdev_priv(dev
);
5768 for_each_possible_cpu(cpu
) {
5769 struct mvpp2_pcpu_stats
*cpu_stats
;
5775 cpu_stats
= per_cpu_ptr(port
->stats
, cpu
);
5777 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
5778 rx_packets
= cpu_stats
->rx_packets
;
5779 rx_bytes
= cpu_stats
->rx_bytes
;
5780 tx_packets
= cpu_stats
->tx_packets
;
5781 tx_bytes
= cpu_stats
->tx_bytes
;
5782 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
5784 stats
->rx_packets
+= rx_packets
;
5785 stats
->rx_bytes
+= rx_bytes
;
5786 stats
->tx_packets
+= tx_packets
;
5787 stats
->tx_bytes
+= tx_bytes
;
5790 stats
->rx_errors
= dev
->stats
.rx_errors
;
5791 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
5792 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
5797 static int mvpp2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5799 struct mvpp2_port
*port
= netdev_priv(dev
);
5805 ret
= phy_mii_ioctl(port
->phy_dev
, ifr
, cmd
);
5807 mvpp2_link_event(dev
);
5812 /* Ethtool methods */
5814 /* Get settings (phy address, speed) for ethtools */
5815 static int mvpp2_ethtool_get_settings(struct net_device
*dev
,
5816 struct ethtool_cmd
*cmd
)
5818 struct mvpp2_port
*port
= netdev_priv(dev
);
5822 return phy_ethtool_gset(port
->phy_dev
, cmd
);
5825 /* Set settings (phy address, speed) for ethtools */
5826 static int mvpp2_ethtool_set_settings(struct net_device
*dev
,
5827 struct ethtool_cmd
*cmd
)
5829 struct mvpp2_port
*port
= netdev_priv(dev
);
5833 return phy_ethtool_sset(port
->phy_dev
, cmd
);
5836 /* Set interrupt coalescing for ethtools */
5837 static int mvpp2_ethtool_set_coalesce(struct net_device
*dev
,
5838 struct ethtool_coalesce
*c
)
5840 struct mvpp2_port
*port
= netdev_priv(dev
);
5843 for (queue
= 0; queue
< rxq_number
; queue
++) {
5844 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
5846 rxq
->time_coal
= c
->rx_coalesce_usecs
;
5847 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
5848 mvpp2_rx_pkts_coal_set(port
, rxq
, rxq
->pkts_coal
);
5849 mvpp2_rx_time_coal_set(port
, rxq
, rxq
->time_coal
);
5852 for (queue
= 0; queue
< txq_number
; queue
++) {
5853 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
5855 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
5861 /* get coalescing for ethtools */
5862 static int mvpp2_ethtool_get_coalesce(struct net_device
*dev
,
5863 struct ethtool_coalesce
*c
)
5865 struct mvpp2_port
*port
= netdev_priv(dev
);
5867 c
->rx_coalesce_usecs
= port
->rxqs
[0]->time_coal
;
5868 c
->rx_max_coalesced_frames
= port
->rxqs
[0]->pkts_coal
;
5869 c
->tx_max_coalesced_frames
= port
->txqs
[0]->done_pkts_coal
;
5873 static void mvpp2_ethtool_get_drvinfo(struct net_device
*dev
,
5874 struct ethtool_drvinfo
*drvinfo
)
5876 strlcpy(drvinfo
->driver
, MVPP2_DRIVER_NAME
,
5877 sizeof(drvinfo
->driver
));
5878 strlcpy(drvinfo
->version
, MVPP2_DRIVER_VERSION
,
5879 sizeof(drvinfo
->version
));
5880 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
5881 sizeof(drvinfo
->bus_info
));
5884 static void mvpp2_ethtool_get_ringparam(struct net_device
*dev
,
5885 struct ethtool_ringparam
*ring
)
5887 struct mvpp2_port
*port
= netdev_priv(dev
);
5889 ring
->rx_max_pending
= MVPP2_MAX_RXD
;
5890 ring
->tx_max_pending
= MVPP2_MAX_TXD
;
5891 ring
->rx_pending
= port
->rx_ring_size
;
5892 ring
->tx_pending
= port
->tx_ring_size
;
5895 static int mvpp2_ethtool_set_ringparam(struct net_device
*dev
,
5896 struct ethtool_ringparam
*ring
)
5898 struct mvpp2_port
*port
= netdev_priv(dev
);
5899 u16 prev_rx_ring_size
= port
->rx_ring_size
;
5900 u16 prev_tx_ring_size
= port
->tx_ring_size
;
5903 err
= mvpp2_check_ringparam_valid(dev
, ring
);
5907 if (!netif_running(dev
)) {
5908 port
->rx_ring_size
= ring
->rx_pending
;
5909 port
->tx_ring_size
= ring
->tx_pending
;
5913 /* The interface is running, so we have to force a
5914 * reallocation of the queues
5916 mvpp2_stop_dev(port
);
5917 mvpp2_cleanup_rxqs(port
);
5918 mvpp2_cleanup_txqs(port
);
5920 port
->rx_ring_size
= ring
->rx_pending
;
5921 port
->tx_ring_size
= ring
->tx_pending
;
5923 err
= mvpp2_setup_rxqs(port
);
5925 /* Reallocate Rx queues with the original ring size */
5926 port
->rx_ring_size
= prev_rx_ring_size
;
5927 ring
->rx_pending
= prev_rx_ring_size
;
5928 err
= mvpp2_setup_rxqs(port
);
5932 err
= mvpp2_setup_txqs(port
);
5934 /* Reallocate Tx queues with the original ring size */
5935 port
->tx_ring_size
= prev_tx_ring_size
;
5936 ring
->tx_pending
= prev_tx_ring_size
;
5937 err
= mvpp2_setup_txqs(port
);
5939 goto err_clean_rxqs
;
5942 mvpp2_start_dev(port
);
5943 mvpp2_egress_enable(port
);
5944 mvpp2_ingress_enable(port
);
5949 mvpp2_cleanup_rxqs(port
);
5951 netdev_err(dev
, "fail to change ring parameters");
5957 static const struct net_device_ops mvpp2_netdev_ops
= {
5958 .ndo_open
= mvpp2_open
,
5959 .ndo_stop
= mvpp2_stop
,
5960 .ndo_start_xmit
= mvpp2_tx
,
5961 .ndo_set_rx_mode
= mvpp2_set_rx_mode
,
5962 .ndo_set_mac_address
= mvpp2_set_mac_address
,
5963 .ndo_change_mtu
= mvpp2_change_mtu
,
5964 .ndo_get_stats64
= mvpp2_get_stats64
,
5965 .ndo_do_ioctl
= mvpp2_ioctl
,
5968 static const struct ethtool_ops mvpp2_eth_tool_ops
= {
5969 .get_link
= ethtool_op_get_link
,
5970 .get_settings
= mvpp2_ethtool_get_settings
,
5971 .set_settings
= mvpp2_ethtool_set_settings
,
5972 .set_coalesce
= mvpp2_ethtool_set_coalesce
,
5973 .get_coalesce
= mvpp2_ethtool_get_coalesce
,
5974 .get_drvinfo
= mvpp2_ethtool_get_drvinfo
,
5975 .get_ringparam
= mvpp2_ethtool_get_ringparam
,
5976 .set_ringparam
= mvpp2_ethtool_set_ringparam
,
5979 /* Driver initialization */
5981 static void mvpp2_port_power_up(struct mvpp2_port
*port
)
5983 mvpp2_port_mii_set(port
);
5984 mvpp2_port_periodic_xon_disable(port
);
5985 mvpp2_port_fc_adv_enable(port
);
5986 mvpp2_port_reset(port
);
5989 /* Initialize port HW */
5990 static int mvpp2_port_init(struct mvpp2_port
*port
)
5992 struct device
*dev
= port
->dev
->dev
.parent
;
5993 struct mvpp2
*priv
= port
->priv
;
5994 struct mvpp2_txq_pcpu
*txq_pcpu
;
5995 int queue
, cpu
, err
;
5997 if (port
->first_rxq
+ rxq_number
> MVPP2_RXQ_TOTAL_NUM
)
6001 mvpp2_egress_disable(port
);
6002 mvpp2_port_disable(port
);
6004 port
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(*port
->txqs
),
6009 /* Associate physical Tx queues to this port and initialize.
6010 * The mapping is predefined.
6012 for (queue
= 0; queue
< txq_number
; queue
++) {
6013 int queue_phy_id
= mvpp2_txq_phys(port
->id
, queue
);
6014 struct mvpp2_tx_queue
*txq
;
6016 txq
= devm_kzalloc(dev
, sizeof(*txq
), GFP_KERNEL
);
6020 txq
->pcpu
= alloc_percpu(struct mvpp2_txq_pcpu
);
6023 goto err_free_percpu
;
6026 txq
->id
= queue_phy_id
;
6027 txq
->log_id
= queue
;
6028 txq
->done_pkts_coal
= MVPP2_TXDONE_COAL_PKTS_THRESH
;
6029 for_each_present_cpu(cpu
) {
6030 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
6031 txq_pcpu
->cpu
= cpu
;
6034 port
->txqs
[queue
] = txq
;
6037 port
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(*port
->rxqs
),
6041 goto err_free_percpu
;
6044 /* Allocate and initialize Rx queue for this port */
6045 for (queue
= 0; queue
< rxq_number
; queue
++) {
6046 struct mvpp2_rx_queue
*rxq
;
6048 /* Map physical Rx queue to port's logical Rx queue */
6049 rxq
= devm_kzalloc(dev
, sizeof(*rxq
), GFP_KERNEL
);
6052 goto err_free_percpu
;
6054 /* Map this Rx queue to a physical queue */
6055 rxq
->id
= port
->first_rxq
+ queue
;
6056 rxq
->port
= port
->id
;
6057 rxq
->logic_rxq
= queue
;
6059 port
->rxqs
[queue
] = rxq
;
6062 /* Configure Rx queue group interrupt for this port */
6063 mvpp2_write(priv
, MVPP2_ISR_RXQ_GROUP_REG(port
->id
), rxq_number
);
6065 /* Create Rx descriptor rings */
6066 for (queue
= 0; queue
< rxq_number
; queue
++) {
6067 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
6069 rxq
->size
= port
->rx_ring_size
;
6070 rxq
->pkts_coal
= MVPP2_RX_COAL_PKTS
;
6071 rxq
->time_coal
= MVPP2_RX_COAL_USEC
;
6074 mvpp2_ingress_disable(port
);
6076 /* Port default configuration */
6077 mvpp2_defaults_set(port
);
6079 /* Port's classifier configuration */
6080 mvpp2_cls_oversize_rxq_set(port
);
6081 mvpp2_cls_port_config(port
);
6083 /* Provide an initial Rx packet size */
6084 port
->pkt_size
= MVPP2_RX_PKT_SIZE(port
->dev
->mtu
);
6086 /* Initialize pools for swf */
6087 err
= mvpp2_swf_bm_pool_init(port
);
6089 goto err_free_percpu
;
6094 for (queue
= 0; queue
< txq_number
; queue
++) {
6095 if (!port
->txqs
[queue
])
6097 free_percpu(port
->txqs
[queue
]->pcpu
);
6102 /* Ports initialization */
6103 static int mvpp2_port_probe(struct platform_device
*pdev
,
6104 struct device_node
*port_node
,
6106 int *next_first_rxq
)
6108 struct device_node
*phy_node
;
6109 struct mvpp2_port
*port
;
6110 struct mvpp2_port_pcpu
*port_pcpu
;
6111 struct net_device
*dev
;
6112 struct resource
*res
;
6113 const char *dt_mac_addr
;
6114 const char *mac_from
;
6115 char hw_mac_addr
[ETH_ALEN
];
6119 int priv_common_regs_num
= 2;
6122 dev
= alloc_etherdev_mqs(sizeof(struct mvpp2_port
), txq_number
,
6127 phy_node
= of_parse_phandle(port_node
, "phy", 0);
6129 dev_err(&pdev
->dev
, "missing phy\n");
6131 goto err_free_netdev
;
6134 phy_mode
= of_get_phy_mode(port_node
);
6136 dev_err(&pdev
->dev
, "incorrect phy mode\n");
6138 goto err_free_netdev
;
6141 if (of_property_read_u32(port_node
, "port-id", &id
)) {
6143 dev_err(&pdev
->dev
, "missing port-id value\n");
6144 goto err_free_netdev
;
6147 dev
->tx_queue_len
= MVPP2_MAX_TXD
;
6148 dev
->watchdog_timeo
= 5 * HZ
;
6149 dev
->netdev_ops
= &mvpp2_netdev_ops
;
6150 dev
->ethtool_ops
= &mvpp2_eth_tool_ops
;
6152 port
= netdev_priv(dev
);
6154 port
->irq
= irq_of_parse_and_map(port_node
, 0);
6155 if (port
->irq
<= 0) {
6157 goto err_free_netdev
;
6160 if (of_property_read_bool(port_node
, "marvell,loopback"))
6161 port
->flags
|= MVPP2_F_LOOPBACK
;
6165 port
->first_rxq
= *next_first_rxq
;
6166 port
->phy_node
= phy_node
;
6167 port
->phy_interface
= phy_mode
;
6169 res
= platform_get_resource(pdev
, IORESOURCE_MEM
,
6170 priv_common_regs_num
+ id
);
6171 port
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
6172 if (IS_ERR(port
->base
)) {
6173 err
= PTR_ERR(port
->base
);
6177 /* Alloc per-cpu stats */
6178 port
->stats
= netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats
);
6184 dt_mac_addr
= of_get_mac_address(port_node
);
6185 if (dt_mac_addr
&& is_valid_ether_addr(dt_mac_addr
)) {
6186 mac_from
= "device tree";
6187 ether_addr_copy(dev
->dev_addr
, dt_mac_addr
);
6189 mvpp2_get_mac_address(port
, hw_mac_addr
);
6190 if (is_valid_ether_addr(hw_mac_addr
)) {
6191 mac_from
= "hardware";
6192 ether_addr_copy(dev
->dev_addr
, hw_mac_addr
);
6194 mac_from
= "random";
6195 eth_hw_addr_random(dev
);
6199 port
->tx_ring_size
= MVPP2_MAX_TXD
;
6200 port
->rx_ring_size
= MVPP2_MAX_RXD
;
6202 SET_NETDEV_DEV(dev
, &pdev
->dev
);
6204 err
= mvpp2_port_init(port
);
6206 dev_err(&pdev
->dev
, "failed to init port %d\n", id
);
6207 goto err_free_stats
;
6209 mvpp2_port_power_up(port
);
6211 port
->pcpu
= alloc_percpu(struct mvpp2_port_pcpu
);
6214 goto err_free_txq_pcpu
;
6217 for_each_present_cpu(cpu
) {
6218 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
6220 hrtimer_init(&port_pcpu
->tx_done_timer
, CLOCK_MONOTONIC
,
6221 HRTIMER_MODE_REL_PINNED
);
6222 port_pcpu
->tx_done_timer
.function
= mvpp2_hr_timer_cb
;
6223 port_pcpu
->timer_scheduled
= false;
6225 tasklet_init(&port_pcpu
->tx_done_tasklet
, mvpp2_tx_proc_cb
,
6226 (unsigned long)dev
);
6229 netif_napi_add(dev
, &port
->napi
, mvpp2_poll
, NAPI_POLL_WEIGHT
);
6230 features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
6231 dev
->features
= features
| NETIF_F_RXCSUM
;
6232 dev
->hw_features
|= features
| NETIF_F_RXCSUM
| NETIF_F_GRO
;
6233 dev
->vlan_features
|= features
;
6235 err
= register_netdev(dev
);
6237 dev_err(&pdev
->dev
, "failed to register netdev\n");
6238 goto err_free_port_pcpu
;
6240 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
, dev
->dev_addr
);
6242 /* Increment the first Rx queue number to be used by the next port */
6243 *next_first_rxq
+= rxq_number
;
6244 priv
->port_list
[id
] = port
;
6248 free_percpu(port
->pcpu
);
6250 for (i
= 0; i
< txq_number
; i
++)
6251 free_percpu(port
->txqs
[i
]->pcpu
);
6253 free_percpu(port
->stats
);
6255 irq_dispose_mapping(port
->irq
);
6261 /* Ports removal routine */
6262 static void mvpp2_port_remove(struct mvpp2_port
*port
)
6266 unregister_netdev(port
->dev
);
6267 free_percpu(port
->pcpu
);
6268 free_percpu(port
->stats
);
6269 for (i
= 0; i
< txq_number
; i
++)
6270 free_percpu(port
->txqs
[i
]->pcpu
);
6271 irq_dispose_mapping(port
->irq
);
6272 free_netdev(port
->dev
);
6275 /* Initialize decoding windows */
6276 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info
*dram
,
6282 for (i
= 0; i
< 6; i
++) {
6283 mvpp2_write(priv
, MVPP2_WIN_BASE(i
), 0);
6284 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
), 0);
6287 mvpp2_write(priv
, MVPP2_WIN_REMAP(i
), 0);
6292 for (i
= 0; i
< dram
->num_cs
; i
++) {
6293 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
6295 mvpp2_write(priv
, MVPP2_WIN_BASE(i
),
6296 (cs
->base
& 0xffff0000) | (cs
->mbus_attr
<< 8) |
6297 dram
->mbus_dram_target_id
);
6299 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
),
6300 (cs
->size
- 1) & 0xffff0000);
6302 win_enable
|= (1 << i
);
6305 mvpp2_write(priv
, MVPP2_BASE_ADDR_ENABLE
, win_enable
);
6308 /* Initialize Rx FIFO's */
6309 static void mvpp2_rx_fifo_init(struct mvpp2
*priv
)
6313 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
6314 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
6315 MVPP2_RX_FIFO_PORT_DATA_SIZE
);
6316 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
6317 MVPP2_RX_FIFO_PORT_ATTR_SIZE
);
6320 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
6321 MVPP2_RX_FIFO_PORT_MIN_PKT
);
6322 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
6325 /* Initialize network controller common part HW */
6326 static int mvpp2_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
6328 const struct mbus_dram_target_info
*dram_target_info
;
6332 /* Checks for hardware constraints */
6333 if (rxq_number
% 4 || (rxq_number
> MVPP2_MAX_RXQ
) ||
6334 (txq_number
> MVPP2_MAX_TXQ
)) {
6335 dev_err(&pdev
->dev
, "invalid queue size parameter\n");
6339 /* MBUS windows configuration */
6340 dram_target_info
= mv_mbus_dram_info();
6341 if (dram_target_info
)
6342 mvpp2_conf_mbus_windows(dram_target_info
, priv
);
6344 /* Disable HW PHY polling */
6345 val
= readl(priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
6346 val
|= MVPP2_PHY_AN_STOP_SMI0_MASK
;
6347 writel(val
, priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
6349 /* Allocate and initialize aggregated TXQs */
6350 priv
->aggr_txqs
= devm_kcalloc(&pdev
->dev
, num_present_cpus(),
6351 sizeof(struct mvpp2_tx_queue
),
6353 if (!priv
->aggr_txqs
)
6356 for_each_present_cpu(i
) {
6357 priv
->aggr_txqs
[i
].id
= i
;
6358 priv
->aggr_txqs
[i
].size
= MVPP2_AGGR_TXQ_SIZE
;
6359 err
= mvpp2_aggr_txq_init(pdev
, &priv
->aggr_txqs
[i
],
6360 MVPP2_AGGR_TXQ_SIZE
, i
, priv
);
6366 mvpp2_rx_fifo_init(priv
);
6368 /* Reset Rx queue group interrupt configuration */
6369 for (i
= 0; i
< MVPP2_MAX_PORTS
; i
++)
6370 mvpp2_write(priv
, MVPP2_ISR_RXQ_GROUP_REG(i
), rxq_number
);
6372 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT
,
6373 priv
->lms_base
+ MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG
);
6375 /* Allow cache snoop when transmiting packets */
6376 mvpp2_write(priv
, MVPP2_TX_SNOOP_REG
, 0x1);
6378 /* Buffer Manager initialization */
6379 err
= mvpp2_bm_init(pdev
, priv
);
6383 /* Parser default initialization */
6384 err
= mvpp2_prs_default_init(pdev
, priv
);
6388 /* Classifier default initialization */
6389 mvpp2_cls_init(priv
);
6394 static int mvpp2_probe(struct platform_device
*pdev
)
6396 struct device_node
*dn
= pdev
->dev
.of_node
;
6397 struct device_node
*port_node
;
6399 struct resource
*res
;
6400 int port_count
, first_rxq
;
6403 priv
= devm_kzalloc(&pdev
->dev
, sizeof(struct mvpp2
), GFP_KERNEL
);
6407 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
6408 priv
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
6409 if (IS_ERR(priv
->base
))
6410 return PTR_ERR(priv
->base
);
6412 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
6413 priv
->lms_base
= devm_ioremap_resource(&pdev
->dev
, res
);
6414 if (IS_ERR(priv
->lms_base
))
6415 return PTR_ERR(priv
->lms_base
);
6417 priv
->pp_clk
= devm_clk_get(&pdev
->dev
, "pp_clk");
6418 if (IS_ERR(priv
->pp_clk
))
6419 return PTR_ERR(priv
->pp_clk
);
6420 err
= clk_prepare_enable(priv
->pp_clk
);
6424 priv
->gop_clk
= devm_clk_get(&pdev
->dev
, "gop_clk");
6425 if (IS_ERR(priv
->gop_clk
)) {
6426 err
= PTR_ERR(priv
->gop_clk
);
6429 err
= clk_prepare_enable(priv
->gop_clk
);
6433 /* Get system's tclk rate */
6434 priv
->tclk
= clk_get_rate(priv
->pp_clk
);
6436 /* Initialize network controller */
6437 err
= mvpp2_init(pdev
, priv
);
6439 dev_err(&pdev
->dev
, "failed to initialize controller\n");
6443 port_count
= of_get_available_child_count(dn
);
6444 if (port_count
== 0) {
6445 dev_err(&pdev
->dev
, "no ports enabled\n");
6450 priv
->port_list
= devm_kcalloc(&pdev
->dev
, port_count
,
6451 sizeof(struct mvpp2_port
*),
6453 if (!priv
->port_list
) {
6458 /* Initialize ports */
6460 for_each_available_child_of_node(dn
, port_node
) {
6461 err
= mvpp2_port_probe(pdev
, port_node
, priv
, &first_rxq
);
6466 platform_set_drvdata(pdev
, priv
);
6470 clk_disable_unprepare(priv
->gop_clk
);
6472 clk_disable_unprepare(priv
->pp_clk
);
6476 static int mvpp2_remove(struct platform_device
*pdev
)
6478 struct mvpp2
*priv
= platform_get_drvdata(pdev
);
6479 struct device_node
*dn
= pdev
->dev
.of_node
;
6480 struct device_node
*port_node
;
6483 for_each_available_child_of_node(dn
, port_node
) {
6484 if (priv
->port_list
[i
])
6485 mvpp2_port_remove(priv
->port_list
[i
]);
6489 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
6490 struct mvpp2_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
6492 mvpp2_bm_pool_destroy(pdev
, priv
, bm_pool
);
6495 for_each_present_cpu(i
) {
6496 struct mvpp2_tx_queue
*aggr_txq
= &priv
->aggr_txqs
[i
];
6498 dma_free_coherent(&pdev
->dev
,
6499 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
6501 aggr_txq
->descs_phys
);
6504 clk_disable_unprepare(priv
->pp_clk
);
6505 clk_disable_unprepare(priv
->gop_clk
);
6510 static const struct of_device_id mvpp2_match
[] = {
6511 { .compatible
= "marvell,armada-375-pp2" },
6514 MODULE_DEVICE_TABLE(of
, mvpp2_match
);
6516 static struct platform_driver mvpp2_driver
= {
6517 .probe
= mvpp2_probe
,
6518 .remove
= mvpp2_remove
,
6520 .name
= MVPP2_DRIVER_NAME
,
6521 .of_match_table
= mvpp2_match
,
6525 module_platform_driver(mvpp2_driver
);
6527 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6528 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6529 MODULE_LICENSE("GPL v2");