1 // SPDX-License-Identifier: GPL-2.0-only
2 /* 10G controller driver for Samsung SoCs
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
7 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
9 #include <linux/delay.h>
10 #include <linux/export.h>
12 #include <linux/netdevice.h>
13 #include <linux/phy.h>
15 #include "sxgbe_common.h"
16 #include "sxgbe_dma.h"
17 #include "sxgbe_reg.h"
18 #include "sxgbe_desc.h"
20 /* DMA core initialization */
21 static int sxgbe_dma_init(void __iomem
*ioaddr
, int fix_burst
, int burst_map
)
25 reg_val
= readl(ioaddr
+ SXGBE_DMA_SYSBUS_MODE_REG
);
27 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
28 * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
29 * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
30 * Set burst_map irrespective of fix_burst value.
33 reg_val
|= SXGBE_DMA_AXI_UNDEF_BURST
;
35 /* write burst len map */
36 reg_val
|= (burst_map
<< SXGBE_DMA_BLENMAP_LSHIFT
);
38 writel(reg_val
, ioaddr
+ SXGBE_DMA_SYSBUS_MODE_REG
);
43 static void sxgbe_dma_channel_init(void __iomem
*ioaddr
, int cha_num
,
44 int fix_burst
, int pbl
, dma_addr_t dma_tx
,
45 dma_addr_t dma_rx
, int t_rsize
, int r_rsize
)
50 reg_val
= readl(ioaddr
+ SXGBE_DMA_CHA_CTL_REG(cha_num
));
53 reg_val
|= SXGBE_DMA_PBL_X8MODE
;
54 writel(reg_val
, ioaddr
+ SXGBE_DMA_CHA_CTL_REG(cha_num
));
55 /* program the TX pbl */
56 reg_val
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
57 reg_val
|= (pbl
<< SXGBE_DMA_TXPBL_LSHIFT
);
58 writel(reg_val
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
59 /* program the RX pbl */
60 reg_val
= readl(ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cha_num
));
61 reg_val
|= (pbl
<< SXGBE_DMA_RXPBL_LSHIFT
);
62 writel(reg_val
, ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cha_num
));
65 /* program desc registers */
66 writel(upper_32_bits(dma_tx
),
67 ioaddr
+ SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num
));
68 writel(lower_32_bits(dma_tx
),
69 ioaddr
+ SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num
));
71 writel(upper_32_bits(dma_rx
),
72 ioaddr
+ SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num
));
73 writel(lower_32_bits(dma_rx
),
74 ioaddr
+ SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num
));
76 /* program tail pointers */
77 /* assumption: upper 32 bits are constant and
78 * same as TX/RX desc list
80 dma_addr
= dma_tx
+ ((t_rsize
- 1) * SXGBE_DESC_SIZE_BYTES
);
81 writel(lower_32_bits(dma_addr
),
82 ioaddr
+ SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num
));
84 dma_addr
= dma_rx
+ ((r_rsize
- 1) * SXGBE_DESC_SIZE_BYTES
);
85 writel(lower_32_bits(dma_addr
),
86 ioaddr
+ SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num
));
87 /* program the ring sizes */
88 writel(t_rsize
- 1, ioaddr
+ SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num
));
89 writel(r_rsize
- 1, ioaddr
+ SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num
));
91 /* Enable TX/RX interrupts */
92 writel(SXGBE_DMA_ENA_INT
,
93 ioaddr
+ SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num
));
96 static void sxgbe_enable_dma_transmission(void __iomem
*ioaddr
, int cha_num
)
100 tx_config
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
101 tx_config
|= SXGBE_TX_START_DMA
;
102 writel(tx_config
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
105 static void sxgbe_enable_dma_irq(void __iomem
*ioaddr
, int dma_cnum
)
107 /* Enable TX/RX interrupts */
108 writel(SXGBE_DMA_ENA_INT
,
109 ioaddr
+ SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum
));
112 static void sxgbe_disable_dma_irq(void __iomem
*ioaddr
, int dma_cnum
)
114 /* Disable TX/RX interrupts */
115 writel(0, ioaddr
+ SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum
));
118 static void sxgbe_dma_start_tx(void __iomem
*ioaddr
, int tchannels
)
123 for (cnum
= 0; cnum
< tchannels
; cnum
++) {
124 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
125 tx_ctl_reg
|= SXGBE_TX_ENABLE
;
127 ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
131 static void sxgbe_dma_start_tx_queue(void __iomem
*ioaddr
, int dma_cnum
)
135 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
136 tx_ctl_reg
|= SXGBE_TX_ENABLE
;
137 writel(tx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
140 static void sxgbe_dma_stop_tx_queue(void __iomem
*ioaddr
, int dma_cnum
)
144 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
145 tx_ctl_reg
&= ~(SXGBE_TX_ENABLE
);
146 writel(tx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
149 static void sxgbe_dma_stop_tx(void __iomem
*ioaddr
, int tchannels
)
154 for (cnum
= 0; cnum
< tchannels
; cnum
++) {
155 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
156 tx_ctl_reg
&= ~(SXGBE_TX_ENABLE
);
157 writel(tx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
161 static void sxgbe_dma_start_rx(void __iomem
*ioaddr
, int rchannels
)
166 for (cnum
= 0; cnum
< rchannels
; cnum
++) {
167 rx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
168 rx_ctl_reg
|= SXGBE_RX_ENABLE
;
170 ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
174 static void sxgbe_dma_stop_rx(void __iomem
*ioaddr
, int rchannels
)
179 for (cnum
= 0; cnum
< rchannels
; cnum
++) {
180 rx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
181 rx_ctl_reg
&= ~(SXGBE_RX_ENABLE
);
182 writel(rx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
186 static int sxgbe_tx_dma_int_status(void __iomem
*ioaddr
, int channel_no
,
187 struct sxgbe_extra_stats
*x
)
189 u32 int_status
= readl(ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
193 /* TX Normal Interrupt Summary */
194 if (likely(int_status
& SXGBE_DMA_INT_STATUS_NIS
)) {
196 if (int_status
& SXGBE_DMA_INT_STATUS_TI
) {
197 ret_val
|= handle_tx
;
198 x
->tx_normal_irq_n
++;
199 clear_val
|= SXGBE_DMA_INT_STATUS_TI
;
202 if (int_status
& SXGBE_DMA_INT_STATUS_TBU
) {
203 x
->tx_underflow_irq
++;
204 ret_val
|= tx_bump_tc
;
205 clear_val
|= SXGBE_DMA_INT_STATUS_TBU
;
207 } else if (unlikely(int_status
& SXGBE_DMA_INT_STATUS_AIS
)) {
208 /* TX Abnormal Interrupt Summary */
209 if (int_status
& SXGBE_DMA_INT_STATUS_TPS
) {
210 ret_val
|= tx_hard_error
;
211 clear_val
|= SXGBE_DMA_INT_STATUS_TPS
;
212 x
->tx_process_stopped_irq
++;
215 if (int_status
& SXGBE_DMA_INT_STATUS_FBE
) {
216 ret_val
|= tx_hard_error
;
217 x
->fatal_bus_error_irq
++;
219 /* Assumption: FBE bit is the combination of
220 * all the bus access erros and cleared when
221 * the respective error bits cleared
224 /* check for actual cause */
225 if (int_status
& SXGBE_DMA_INT_STATUS_TEB0
) {
226 x
->tx_read_transfer_err
++;
227 clear_val
|= SXGBE_DMA_INT_STATUS_TEB0
;
229 x
->tx_write_transfer_err
++;
232 if (int_status
& SXGBE_DMA_INT_STATUS_TEB1
) {
233 x
->tx_desc_access_err
++;
234 clear_val
|= SXGBE_DMA_INT_STATUS_TEB1
;
236 x
->tx_buffer_access_err
++;
239 if (int_status
& SXGBE_DMA_INT_STATUS_TEB2
) {
240 x
->tx_data_transfer_err
++;
241 clear_val
|= SXGBE_DMA_INT_STATUS_TEB2
;
245 /* context descriptor error */
246 if (int_status
& SXGBE_DMA_INT_STATUS_CTXTERR
) {
247 x
->tx_ctxt_desc_err
++;
248 clear_val
|= SXGBE_DMA_INT_STATUS_CTXTERR
;
252 /* clear the served bits */
253 writel(clear_val
, ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
258 static int sxgbe_rx_dma_int_status(void __iomem
*ioaddr
, int channel_no
,
259 struct sxgbe_extra_stats
*x
)
261 u32 int_status
= readl(ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
265 /* RX Normal Interrupt Summary */
266 if (likely(int_status
& SXGBE_DMA_INT_STATUS_NIS
)) {
268 if (int_status
& SXGBE_DMA_INT_STATUS_RI
) {
269 ret_val
|= handle_rx
;
270 x
->rx_normal_irq_n
++;
271 clear_val
|= SXGBE_DMA_INT_STATUS_RI
;
273 } else if (unlikely(int_status
& SXGBE_DMA_INT_STATUS_AIS
)) {
274 /* RX Abnormal Interrupt Summary */
275 if (int_status
& SXGBE_DMA_INT_STATUS_RBU
) {
276 ret_val
|= rx_bump_tc
;
277 clear_val
|= SXGBE_DMA_INT_STATUS_RBU
;
278 x
->rx_underflow_irq
++;
281 if (int_status
& SXGBE_DMA_INT_STATUS_RPS
) {
282 ret_val
|= rx_hard_error
;
283 clear_val
|= SXGBE_DMA_INT_STATUS_RPS
;
284 x
->rx_process_stopped_irq
++;
287 if (int_status
& SXGBE_DMA_INT_STATUS_FBE
) {
288 ret_val
|= rx_hard_error
;
289 x
->fatal_bus_error_irq
++;
291 /* Assumption: FBE bit is the combination of
292 * all the bus access erros and cleared when
293 * the respective error bits cleared
296 /* check for actual cause */
297 if (int_status
& SXGBE_DMA_INT_STATUS_REB0
) {
298 x
->rx_read_transfer_err
++;
299 clear_val
|= SXGBE_DMA_INT_STATUS_REB0
;
301 x
->rx_write_transfer_err
++;
304 if (int_status
& SXGBE_DMA_INT_STATUS_REB1
) {
305 x
->rx_desc_access_err
++;
306 clear_val
|= SXGBE_DMA_INT_STATUS_REB1
;
308 x
->rx_buffer_access_err
++;
311 if (int_status
& SXGBE_DMA_INT_STATUS_REB2
) {
312 x
->rx_data_transfer_err
++;
313 clear_val
|= SXGBE_DMA_INT_STATUS_REB2
;
318 /* clear the served bits */
319 writel(clear_val
, ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
324 /* Program the HW RX Watchdog */
325 static void sxgbe_dma_rx_watchdog(void __iomem
*ioaddr
, u32 riwt
)
329 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES
, que_num
) {
331 ioaddr
+ SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num
));
335 static void sxgbe_enable_tso(void __iomem
*ioaddr
, u8 chan_num
)
339 ctrl
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(chan_num
));
340 ctrl
|= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE
;
341 writel(ctrl
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(chan_num
));
344 static const struct sxgbe_dma_ops sxgbe_dma_ops
= {
345 .init
= sxgbe_dma_init
,
346 .cha_init
= sxgbe_dma_channel_init
,
347 .enable_dma_transmission
= sxgbe_enable_dma_transmission
,
348 .enable_dma_irq
= sxgbe_enable_dma_irq
,
349 .disable_dma_irq
= sxgbe_disable_dma_irq
,
350 .start_tx
= sxgbe_dma_start_tx
,
351 .start_tx_queue
= sxgbe_dma_start_tx_queue
,
352 .stop_tx
= sxgbe_dma_stop_tx
,
353 .stop_tx_queue
= sxgbe_dma_stop_tx_queue
,
354 .start_rx
= sxgbe_dma_start_rx
,
355 .stop_rx
= sxgbe_dma_stop_rx
,
356 .tx_dma_int_status
= sxgbe_tx_dma_int_status
,
357 .rx_dma_int_status
= sxgbe_rx_dma_int_status
,
358 .rx_watchdog
= sxgbe_dma_rx_watchdog
,
359 .enable_tso
= sxgbe_enable_tso
,
362 const struct sxgbe_dma_ops
*sxgbe_get_dma_ops(void)
364 return &sxgbe_dma_ops
;