1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2014-2015 Hisilicon Limited.
6 #include <linux/cdev.h>
7 #include <linux/module.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <asm/cacheflush.h>
13 #include <linux/platform_device.h>
15 #include <linux/of_address.h>
16 #include <linux/of_platform.h>
17 #include <linux/of_irq.h>
18 #include <linux/spinlock.h>
20 #include "hns_dsaf_main.h"
21 #include "hns_dsaf_ppe.h"
22 #include "hns_dsaf_rcb.h"
24 #define RCB_COMMON_REG_OFFSET 0x80000
28 #define RCB_RESET_WAIT_TIMES 30
29 #define RCB_RESET_TRY_TIMES 10
31 /* Because default mtu is 1500, rcb buffer size is set to 2048 enough */
32 #define RCB_DEFAULT_BUFFER_SIZE 2048
35 *hns_rcb_wait_fbd_clean - clean fbd
36 *@qs: ring struct pointer array
40 void hns_rcb_wait_fbd_clean(struct hnae_queue
**qs
, int q_num
, u32 flag
)
45 for (wait_cnt
= i
= 0; i
< q_num
; wait_cnt
++) {
46 usleep_range(200, 300);
48 if (flag
& RCB_INT_FLAG_TX
)
49 fbd_num
+= dsaf_read_dev(qs
[i
],
50 RCB_RING_TX_RING_FBDNUM_REG
);
51 if (flag
& RCB_INT_FLAG_RX
)
52 fbd_num
+= dsaf_read_dev(qs
[i
],
53 RCB_RING_RX_RING_FBDNUM_REG
);
56 if (wait_cnt
>= 10000)
61 dev_err(qs
[i
]->handle
->owner_dev
,
62 "queue(%d) wait fbd(%d) clean fail!!\n", i
, fbd_num
);
65 int hns_rcb_wait_tx_ring_clean(struct hnae_queue
*qs
)
70 tail
= dsaf_read_dev(&qs
->tx_ring
, RCB_REG_TAIL
);
72 while (wait_cnt
++ < HNS_MAX_WAIT_CNT
) {
73 head
= dsaf_read_dev(&qs
->tx_ring
, RCB_REG_HEAD
);
77 usleep_range(100, 200);
80 if (wait_cnt
>= HNS_MAX_WAIT_CNT
) {
81 dev_err(qs
->dev
->dev
, "rcb wait timeout, head not equal to tail.\n");
89 *hns_rcb_reset_ring_hw - ring reset
90 *@q: ring struct pointer
92 void hns_rcb_reset_ring_hw(struct hnae_queue
*q
)
100 while (try_cnt
++ < RCB_RESET_TRY_TIMES
) {
101 usleep_range(100, 200);
102 tx_fbd_num
= dsaf_read_dev(q
, RCB_RING_TX_RING_FBDNUM_REG
);
106 dsaf_write_dev(q
, RCB_RING_PREFETCH_EN_REG
, 0);
108 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 1);
111 could_ret
= dsaf_read_dev(q
, RCB_RING_COULD_BE_RST
);
114 while (!could_ret
&& (wait_cnt
< RCB_RESET_WAIT_TIMES
)) {
115 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 0);
117 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 1);
120 could_ret
= dsaf_read_dev(q
, RCB_RING_COULD_BE_RST
);
125 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 0);
131 if (try_cnt
>= RCB_RESET_TRY_TIMES
)
132 dev_err(q
->dev
->dev
, "port%d reset ring fail\n",
133 hns_ae_get_vf_cb(q
->handle
)->port_index
);
137 *hns_rcb_int_ctrl_hw - rcb irq enable control
138 *@q: hnae queue struct pointer
139 *@flag:ring flag tx or rx
142 void hns_rcb_int_ctrl_hw(struct hnae_queue
*q
, u32 flag
, u32 mask
)
144 u32 int_mask_en
= !!mask
;
146 if (flag
& RCB_INT_FLAG_TX
) {
147 dsaf_write_dev(q
, RCB_RING_INTMSK_TXWL_REG
, int_mask_en
);
148 dsaf_write_dev(q
, RCB_RING_INTMSK_TX_OVERTIME_REG
,
152 if (flag
& RCB_INT_FLAG_RX
) {
153 dsaf_write_dev(q
, RCB_RING_INTMSK_RXWL_REG
, int_mask_en
);
154 dsaf_write_dev(q
, RCB_RING_INTMSK_RX_OVERTIME_REG
,
159 void hns_rcb_int_clr_hw(struct hnae_queue
*q
, u32 flag
)
161 if (flag
& RCB_INT_FLAG_TX
) {
162 dsaf_write_dev(q
, RCB_RING_INTSTS_TX_RING_REG
, 1);
163 dsaf_write_dev(q
, RCB_RING_INTSTS_TX_OVERTIME_REG
, 1);
166 if (flag
& RCB_INT_FLAG_RX
) {
167 dsaf_write_dev(q
, RCB_RING_INTSTS_RX_RING_REG
, 1);
168 dsaf_write_dev(q
, RCB_RING_INTSTS_RX_OVERTIME_REG
, 1);
172 void hns_rcbv2_int_ctrl_hw(struct hnae_queue
*q
, u32 flag
, u32 mask
)
174 u32 int_mask_en
= !!mask
;
176 if (flag
& RCB_INT_FLAG_TX
)
177 dsaf_write_dev(q
, RCB_RING_INTMSK_TXWL_REG
, int_mask_en
);
179 if (flag
& RCB_INT_FLAG_RX
)
180 dsaf_write_dev(q
, RCB_RING_INTMSK_RXWL_REG
, int_mask_en
);
183 void hns_rcbv2_int_clr_hw(struct hnae_queue
*q
, u32 flag
)
185 if (flag
& RCB_INT_FLAG_TX
)
186 dsaf_write_dev(q
, RCBV2_TX_RING_INT_STS_REG
, 1);
188 if (flag
& RCB_INT_FLAG_RX
)
189 dsaf_write_dev(q
, RCBV2_RX_RING_INT_STS_REG
, 1);
193 *hns_rcb_ring_enable_hw - enable ring
195 *@val: value to write
197 void hns_rcb_ring_enable_hw(struct hnae_queue
*q
, u32 val
)
199 dsaf_write_dev(q
, RCB_RING_PREFETCH_EN_REG
, !!val
);
202 void hns_rcb_start(struct hnae_queue
*q
, u32 val
)
204 hns_rcb_ring_enable_hw(q
, val
);
208 *hns_rcb_common_init_commit_hw - make rcb common init completed
209 *@rcb_common: rcb common device
211 void hns_rcb_common_init_commit_hw(struct rcb_common_cb
*rcb_common
)
213 wmb(); /* Sync point before breakpoint */
214 dsaf_write_dev(rcb_common
, RCB_COM_CFG_SYS_FSH_REG
, 1);
215 wmb(); /* Sync point after breakpoint */
218 /* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester
220 *@buf_size: buffer size set to hw
222 void hns_rcb_set_tx_ring_bs(struct hnae_queue
*q
, u32 buf_size
)
224 u32 bd_size_type
= hns_rcb_buf_size2type(buf_size
);
226 dsaf_write_dev(q
, RCB_RING_TX_RING_BD_LEN_REG
,
230 /* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester
232 *@buf_size: buffer size set to hw
234 void hns_rcb_set_rx_ring_bs(struct hnae_queue
*q
, u32 buf_size
)
236 u32 bd_size_type
= hns_rcb_buf_size2type(buf_size
);
238 dsaf_write_dev(q
, RCB_RING_RX_RING_BD_LEN_REG
,
243 *hns_rcb_ring_init - init rcb ring
244 *@ring_pair: ring pair control block
245 *@ring_type: ring type, RX_RING or TX_RING
247 static void hns_rcb_ring_init(struct ring_pair_cb
*ring_pair
, int ring_type
)
249 struct hnae_queue
*q
= &ring_pair
->q
;
250 struct hnae_ring
*ring
=
251 (ring_type
== RX_RING
) ? &q
->rx_ring
: &q
->tx_ring
;
252 dma_addr_t dma
= ring
->desc_dma_addr
;
254 if (ring_type
== RX_RING
) {
255 dsaf_write_dev(q
, RCB_RING_RX_RING_BASEADDR_L_REG
,
257 dsaf_write_dev(q
, RCB_RING_RX_RING_BASEADDR_H_REG
,
258 (u32
)((dma
>> 31) >> 1));
260 hns_rcb_set_rx_ring_bs(q
, ring
->buf_size
);
262 dsaf_write_dev(q
, RCB_RING_RX_RING_BD_NUM_REG
,
263 ring_pair
->port_id_in_comm
);
264 dsaf_write_dev(q
, RCB_RING_RX_RING_PKTLINE_REG
,
265 ring_pair
->port_id_in_comm
);
267 dsaf_write_dev(q
, RCB_RING_TX_RING_BASEADDR_L_REG
,
269 dsaf_write_dev(q
, RCB_RING_TX_RING_BASEADDR_H_REG
,
270 (u32
)((dma
>> 31) >> 1));
272 hns_rcb_set_tx_ring_bs(q
, ring
->buf_size
);
274 dsaf_write_dev(q
, RCB_RING_TX_RING_BD_NUM_REG
,
275 ring_pair
->port_id_in_comm
);
276 dsaf_write_dev(q
, RCB_RING_TX_RING_PKTLINE_REG
,
277 ring_pair
->port_id_in_comm
+ HNS_RCB_TX_PKTLINE_OFFSET
);
282 *hns_rcb_init_hw - init rcb hardware
285 void hns_rcb_init_hw(struct ring_pair_cb
*ring
)
287 hns_rcb_ring_init(ring
, RX_RING
);
288 hns_rcb_ring_init(ring
, TX_RING
);
292 *hns_rcb_set_port_desc_cnt - set rcb port description num
293 *@rcb_common: rcb_common device
294 *@port_idx:port index
297 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb
*rcb_common
,
298 u32 port_idx
, u32 desc_cnt
)
300 dsaf_write_dev(rcb_common
, RCB_CFG_BD_NUM_REG
+ port_idx
* 4,
304 static void hns_rcb_set_port_timeout(
305 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 timeout
)
307 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
308 dsaf_write_dev(rcb_common
, RCB_CFG_OVERTIME_REG
,
309 timeout
* HNS_RCB_CLK_FREQ_MHZ
);
310 } else if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
)) {
311 if (timeout
> HNS_RCB_DEF_GAP_TIME_USECS
)
312 dsaf_write_dev(rcb_common
,
313 RCB_PORT_INT_GAPTIME_REG
+ port_idx
* 4,
314 HNS_RCB_DEF_GAP_TIME_USECS
);
316 dsaf_write_dev(rcb_common
,
317 RCB_PORT_INT_GAPTIME_REG
+ port_idx
* 4,
320 dsaf_write_dev(rcb_common
,
321 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4,
324 dsaf_write_dev(rcb_common
,
325 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4,
330 static int hns_rcb_common_get_port_num(struct rcb_common_cb
*rcb_common
)
332 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
333 return HNS_RCB_SERVICE_NW_ENGINE_NUM
;
335 return HNS_RCB_DEBUG_NW_ENGINE_NUM
;
338 /*clr rcb comm exception irq**/
339 static void hns_rcb_comm_exc_irq_en(
340 struct rcb_common_cb
*rcb_common
, int en
)
342 u32 clr_vlue
= 0xfffffffful
;
343 u32 msk_vlue
= en
? 0 : 0xfffffffful
;
346 dsaf_write_dev(rcb_common
, RCB_COM_INTSTS_ECC_ERR_REG
, clr_vlue
);
348 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_RING_STS
, clr_vlue
);
350 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_BD_RINT_STS
, clr_vlue
);
352 dsaf_write_dev(rcb_common
, RCB_COM_RINT_TX_PKT_REG
, clr_vlue
);
353 dsaf_write_dev(rcb_common
, RCB_COM_AXI_ERR_STS
, clr_vlue
);
356 dsaf_write_dev(rcb_common
, RCB_COM_INTMASK_ECC_ERR_REG
, msk_vlue
);
358 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_INTMASK_RING
, msk_vlue
);
360 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
361 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_INTMASK_BD
, msk_vlue
| 2);
363 dsaf_write_dev(rcb_common
, RCB_COM_INTMSK_TX_PKT_REG
, msk_vlue
);
364 dsaf_write_dev(rcb_common
, RCB_COM_AXI_WR_ERR_INTMASK
, msk_vlue
);
368 *hns_rcb_common_init_hw - init rcb common hardware
369 *@rcb_common: rcb_common device
370 *retuen 0 - success , negative --fail
372 int hns_rcb_common_init_hw(struct rcb_common_cb
*rcb_common
)
376 int port_num
= hns_rcb_common_get_port_num(rcb_common
);
378 hns_rcb_comm_exc_irq_en(rcb_common
, 0);
380 reg_val
= dsaf_read_dev(rcb_common
, RCB_COM_CFG_INIT_FLAG_REG
);
381 if (0x1 != (reg_val
& 0x1)) {
382 dev_err(rcb_common
->dsaf_dev
->dev
,
383 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val
);
387 for (i
= 0; i
< port_num
; i
++) {
388 hns_rcb_set_port_desc_cnt(rcb_common
, i
, rcb_common
->desc_num
);
389 hns_rcb_set_rx_coalesced_frames(
390 rcb_common
, i
, HNS_RCB_DEF_RX_COALESCED_FRAMES
);
391 if (!AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
) &&
392 !HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
393 hns_rcb_set_tx_coalesced_frames(
394 rcb_common
, i
, HNS_RCB_DEF_TX_COALESCED_FRAMES
);
395 hns_rcb_set_port_timeout(
396 rcb_common
, i
, HNS_RCB_DEF_COALESCED_USECS
);
399 dsaf_write_dev(rcb_common
, RCB_COM_CFG_ENDIAN_REG
,
400 HNS_RCB_COMMON_ENDIAN
);
402 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
403 dsaf_write_dev(rcb_common
, RCB_COM_CFG_FNA_REG
, 0x0);
404 dsaf_write_dev(rcb_common
, RCB_COM_CFG_FA_REG
, 0x1);
406 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_USER_REG
,
407 RCB_COM_CFG_FNA_B
, false);
408 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_USER_REG
,
409 RCB_COM_CFG_FA_B
, true);
410 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_TSO_MODE_REG
,
411 RCB_COM_TSO_MODE_B
, HNS_TSO_MODE_8BD_32K
);
417 int hns_rcb_buf_size2type(u32 buf_size
)
423 bd_size_type
= HNS_BD_SIZE_512_TYPE
;
426 bd_size_type
= HNS_BD_SIZE_1024_TYPE
;
429 bd_size_type
= HNS_BD_SIZE_2048_TYPE
;
432 bd_size_type
= HNS_BD_SIZE_4096_TYPE
;
435 bd_size_type
= -EINVAL
;
441 static void hns_rcb_ring_get_cfg(struct hnae_queue
*q
, int ring_type
)
443 struct hnae_ring
*ring
;
444 struct rcb_common_cb
*rcb_common
;
445 struct ring_pair_cb
*ring_pair_cb
;
446 u16 desc_num
, mdnum_ppkt
;
447 bool irq_idx
, is_ver1
;
449 ring_pair_cb
= container_of(q
, struct ring_pair_cb
, q
);
450 is_ver1
= AE_IS_VER1(ring_pair_cb
->rcb_common
->dsaf_dev
->dsaf_ver
);
451 if (ring_type
== RX_RING
) {
453 ring
->io_base
= ring_pair_cb
->q
.io_base
;
454 irq_idx
= HNS_RCB_IRQ_IDX_RX
;
455 mdnum_ppkt
= HNS_RCB_RING_MAX_BD_PER_PKT
;
458 ring
->io_base
= ring_pair_cb
->q
.io_base
+
459 HNS_RCB_TX_REG_OFFSET
;
460 irq_idx
= HNS_RCB_IRQ_IDX_TX
;
461 mdnum_ppkt
= is_ver1
? HNS_RCB_RING_MAX_TXBD_PER_PKT
:
462 HNS_RCBV2_RING_MAX_TXBD_PER_PKT
;
465 rcb_common
= ring_pair_cb
->rcb_common
;
466 desc_num
= rcb_common
->dsaf_dev
->desc_num
;
469 ring
->desc_cb
= NULL
;
471 ring
->irq
= ring_pair_cb
->virq
[irq_idx
];
472 ring
->desc_dma_addr
= 0;
474 ring
->buf_size
= RCB_DEFAULT_BUFFER_SIZE
;
475 ring
->desc_num
= desc_num
;
476 ring
->max_desc_num_per_pkt
= mdnum_ppkt
;
477 ring
->max_raw_data_sz_per_desc
= HNS_RCB_MAX_PKT_SIZE
;
478 ring
->max_pkt_size
= HNS_RCB_MAX_PKT_SIZE
;
479 ring
->next_to_use
= 0;
480 ring
->next_to_clean
= 0;
483 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb
*ring_pair_cb
)
485 ring_pair_cb
->q
.handle
= NULL
;
487 hns_rcb_ring_get_cfg(&ring_pair_cb
->q
, RX_RING
);
488 hns_rcb_ring_get_cfg(&ring_pair_cb
->q
, TX_RING
);
491 static int hns_rcb_get_port_in_comm(
492 struct rcb_common_cb
*rcb_common
, int ring_idx
)
494 return ring_idx
/ (rcb_common
->max_q_per_vf
* rcb_common
->max_vfn
);
497 #define SERVICE_RING_IRQ_IDX(v1) \
498 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
499 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb
*rcb_common
)
501 bool is_ver1
= AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
);
503 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
504 return SERVICE_RING_IRQ_IDX(is_ver1
);
506 return HNS_DEBUG_RING_IRQ_IDX
;
509 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
510 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
512 *hns_rcb_get_cfg - get rcb config
513 *@rcb_common: rcb common device
515 int hns_rcb_get_cfg(struct rcb_common_cb
*rcb_common
)
517 struct ring_pair_cb
*ring_pair_cb
;
519 u32 ring_num
= rcb_common
->ring_num
;
520 int base_irq_idx
= hns_rcb_get_base_irq_idx(rcb_common
);
521 struct platform_device
*pdev
=
522 to_platform_device(rcb_common
->dsaf_dev
->dev
);
523 bool is_ver1
= AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
);
525 for (i
= 0; i
< ring_num
; i
++) {
526 ring_pair_cb
= &rcb_common
->ring_pair_cb
[i
];
527 ring_pair_cb
->rcb_common
= rcb_common
;
528 ring_pair_cb
->dev
= rcb_common
->dsaf_dev
->dev
;
529 ring_pair_cb
->index
= i
;
530 ring_pair_cb
->q
.io_base
=
531 RCB_COMM_BASE_TO_RING_BASE(rcb_common
->io_base
, i
);
532 ring_pair_cb
->port_id_in_comm
=
533 hns_rcb_get_port_in_comm(rcb_common
, i
);
534 ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_TX
] =
535 is_ver1
? platform_get_irq(pdev
, base_irq_idx
+ i
* 2) :
536 platform_get_irq(pdev
, base_irq_idx
+ i
* 3 + 1);
537 ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_RX
] =
538 is_ver1
? platform_get_irq(pdev
, base_irq_idx
+ i
* 2 + 1) :
539 platform_get_irq(pdev
, base_irq_idx
+ i
* 3);
540 if ((ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_TX
] == -EPROBE_DEFER
) ||
541 (ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_RX
] == -EPROBE_DEFER
))
542 return -EPROBE_DEFER
;
544 ring_pair_cb
->q
.phy_base
=
545 RCB_COMM_BASE_TO_RING_BASE(rcb_common
->phy_base
, i
);
546 hns_rcb_ring_pair_get_cfg(ring_pair_cb
);
553 *hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames
554 *@rcb_common: rcb_common device
555 *@port_idx:port id in comm
557 *Returns: coalesced_frames
559 u32
hns_rcb_get_rx_coalesced_frames(
560 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
562 return dsaf_read_dev(rcb_common
, RCB_CFG_PKTLINE_REG
+ port_idx
* 4);
566 *hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames
567 *@rcb_common: rcb_common device
568 *@port_idx:port id in comm
570 *Returns: coalesced_frames
572 u32
hns_rcb_get_tx_coalesced_frames(
573 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
577 reg
= RCB_CFG_PKTLINE_REG
+ (port_idx
+ HNS_RCB_TX_PKTLINE_OFFSET
) * 4;
578 return dsaf_read_dev(rcb_common
, reg
);
582 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
583 *@rcb_common: rcb_common device
584 *@port_idx:port id in comm
588 u32
hns_rcb_get_coalesce_usecs(
589 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
591 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
))
592 return dsaf_read_dev(rcb_common
, RCB_CFG_OVERTIME_REG
) /
593 HNS_RCB_CLK_FREQ_MHZ
;
595 return dsaf_read_dev(rcb_common
,
596 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4);
600 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
601 *@rcb_common: rcb_common device
602 *@port_idx:port id in comm
603 *@timeout:tx/rx time for coalesced time_out
606 * Zero for success, or an error code in case of failure
608 int hns_rcb_set_coalesce_usecs(
609 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 timeout
)
611 u32 old_timeout
= hns_rcb_get_coalesce_usecs(rcb_common
, port_idx
);
613 if (timeout
== old_timeout
)
616 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
617 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
)) {
618 dev_err(rcb_common
->dsaf_dev
->dev
,
619 "error: not support coalesce_usecs setting!\n");
623 if (timeout
> HNS_RCB_MAX_COALESCED_USECS
|| timeout
== 0) {
624 dev_err(rcb_common
->dsaf_dev
->dev
,
625 "error: coalesce_usecs setting supports 1~1023us\n");
628 hns_rcb_set_port_timeout(rcb_common
, port_idx
, timeout
);
633 *hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames
634 *@rcb_common: rcb_common device
635 *@port_idx:port id in comm
636 *@coalesced_frames:tx/rx BD num for coalesced frames
639 * Zero for success, or an error code in case of failure
641 int hns_rcb_set_tx_coalesced_frames(
642 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 coalesced_frames
)
645 hns_rcb_get_tx_coalesced_frames(rcb_common
, port_idx
);
648 if (coalesced_frames
== old_waterline
)
651 if (coalesced_frames
!= 1) {
652 dev_err(rcb_common
->dsaf_dev
->dev
,
653 "error: not support tx coalesce_frames setting!\n");
657 reg
= RCB_CFG_PKTLINE_REG
+ (port_idx
+ HNS_RCB_TX_PKTLINE_OFFSET
) * 4;
658 dsaf_write_dev(rcb_common
, reg
, coalesced_frames
);
663 *hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames
664 *@rcb_common: rcb_common device
665 *@port_idx:port id in comm
666 *@coalesced_frames:tx/rx BD num for coalesced frames
669 * Zero for success, or an error code in case of failure
671 int hns_rcb_set_rx_coalesced_frames(
672 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 coalesced_frames
)
675 hns_rcb_get_rx_coalesced_frames(rcb_common
, port_idx
);
677 if (coalesced_frames
== old_waterline
)
680 if (coalesced_frames
>= rcb_common
->desc_num
||
681 coalesced_frames
> HNS_RCB_MAX_COALESCED_FRAMES
||
682 coalesced_frames
< HNS_RCB_MIN_COALESCED_FRAMES
) {
683 dev_err(rcb_common
->dsaf_dev
->dev
,
684 "error: not support coalesce_frames setting!\n");
688 dsaf_write_dev(rcb_common
, RCB_CFG_PKTLINE_REG
+ port_idx
* 4,
694 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
695 * accordding to dsaf mode
696 *@dsaf_mode: dsaf mode
697 *@max_vfn : max vfn number
698 *@max_q_per_vf:max ring number per vm
700 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode
, u16
*max_vfn
,
704 case DSAF_MODE_DISABLE_6PORT_0VM
:
708 case DSAF_MODE_DISABLE_FIX
:
709 case DSAF_MODE_DISABLE_SP
:
713 case DSAF_MODE_DISABLE_2PORT_64VM
:
717 case DSAF_MODE_DISABLE_6PORT_16VM
:
728 static int hns_rcb_get_ring_num(struct dsaf_device
*dsaf_dev
)
730 switch (dsaf_dev
->dsaf_mode
) {
731 case DSAF_MODE_ENABLE_FIX
:
732 case DSAF_MODE_DISABLE_SP
:
735 case DSAF_MODE_DISABLE_FIX
:
738 case DSAF_MODE_ENABLE_0VM
:
741 case DSAF_MODE_DISABLE_6PORT_0VM
:
742 case DSAF_MODE_ENABLE_16VM
:
743 case DSAF_MODE_DISABLE_6PORT_2VM
:
744 case DSAF_MODE_DISABLE_6PORT_16VM
:
745 case DSAF_MODE_DISABLE_6PORT_4VM
:
746 case DSAF_MODE_ENABLE_8VM
:
749 case DSAF_MODE_DISABLE_2PORT_16VM
:
750 case DSAF_MODE_DISABLE_2PORT_8VM
:
751 case DSAF_MODE_ENABLE_32VM
:
752 case DSAF_MODE_DISABLE_2PORT_64VM
:
753 case DSAF_MODE_ENABLE_128VM
:
757 dev_warn(dsaf_dev
->dev
,
758 "get ring num fail,use default!dsaf_mode=%d\n",
759 dsaf_dev
->dsaf_mode
);
764 static u8 __iomem
*hns_rcb_common_get_vaddr(struct rcb_common_cb
*rcb_common
)
766 struct dsaf_device
*dsaf_dev
= rcb_common
->dsaf_dev
;
768 return dsaf_dev
->ppe_base
+ RCB_COMMON_REG_OFFSET
;
771 static phys_addr_t
hns_rcb_common_get_paddr(struct rcb_common_cb
*rcb_common
)
773 struct dsaf_device
*dsaf_dev
= rcb_common
->dsaf_dev
;
775 return dsaf_dev
->ppe_paddr
+ RCB_COMMON_REG_OFFSET
;
778 int hns_rcb_common_get_cfg(struct dsaf_device
*dsaf_dev
,
781 struct rcb_common_cb
*rcb_common
;
782 enum dsaf_mode dsaf_mode
= dsaf_dev
->dsaf_mode
;
785 int ring_num
= hns_rcb_get_ring_num(dsaf_dev
);
788 devm_kzalloc(dsaf_dev
->dev
,
789 struct_size(rcb_common
, ring_pair_cb
, ring_num
),
792 dev_err(dsaf_dev
->dev
, "rcb common devm_kzalloc fail!\n");
795 rcb_common
->comm_index
= comm_index
;
796 rcb_common
->ring_num
= ring_num
;
797 rcb_common
->dsaf_dev
= dsaf_dev
;
799 rcb_common
->desc_num
= dsaf_dev
->desc_num
;
801 hns_rcb_get_queue_mode(dsaf_mode
, &max_vfn
, &max_q_per_vf
);
802 rcb_common
->max_vfn
= max_vfn
;
803 rcb_common
->max_q_per_vf
= max_q_per_vf
;
805 rcb_common
->io_base
= hns_rcb_common_get_vaddr(rcb_common
);
806 rcb_common
->phy_base
= hns_rcb_common_get_paddr(rcb_common
);
808 dsaf_dev
->rcb_common
[comm_index
] = rcb_common
;
812 void hns_rcb_common_free_cfg(struct dsaf_device
*dsaf_dev
,
815 dsaf_dev
->rcb_common
[comm_index
] = NULL
;
818 void hns_rcb_update_stats(struct hnae_queue
*queue
)
820 struct ring_pair_cb
*ring
=
821 container_of(queue
, struct ring_pair_cb
, q
);
822 struct dsaf_device
*dsaf_dev
= ring
->rcb_common
->dsaf_dev
;
823 struct ppe_common_cb
*ppe_common
824 = dsaf_dev
->ppe_common
[ring
->rcb_common
->comm_index
];
825 struct hns_ring_hw_stats
*hw_stats
= &ring
->hw_stats
;
827 hw_stats
->rx_pkts
+= dsaf_read_dev(queue
,
828 RCB_RING_RX_RING_PKTNUM_RECORD_REG
);
829 dsaf_write_dev(queue
, RCB_RING_RX_RING_PKTNUM_RECORD_REG
, 0x1);
831 hw_stats
->ppe_rx_ok_pkts
+= dsaf_read_dev(ppe_common
,
832 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG
+ 4 * ring
->index
);
833 hw_stats
->ppe_rx_drop_pkts
+= dsaf_read_dev(ppe_common
,
834 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG
+ 4 * ring
->index
);
836 hw_stats
->tx_pkts
+= dsaf_read_dev(queue
,
837 RCB_RING_TX_RING_PKTNUM_RECORD_REG
);
838 dsaf_write_dev(queue
, RCB_RING_TX_RING_PKTNUM_RECORD_REG
, 0x1);
840 hw_stats
->ppe_tx_ok_pkts
+= dsaf_read_dev(ppe_common
,
841 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG
+ 4 * ring
->index
);
842 hw_stats
->ppe_tx_drop_pkts
+= dsaf_read_dev(ppe_common
,
843 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG
+ 4 * ring
->index
);
847 *hns_rcb_get_stats - get rcb statistic
849 *@data:statistic value
851 void hns_rcb_get_stats(struct hnae_queue
*queue
, u64
*data
)
853 u64
*regs_buff
= data
;
854 struct ring_pair_cb
*ring
=
855 container_of(queue
, struct ring_pair_cb
, q
);
856 struct hns_ring_hw_stats
*hw_stats
= &ring
->hw_stats
;
858 regs_buff
[0] = hw_stats
->tx_pkts
;
859 regs_buff
[1] = hw_stats
->ppe_tx_ok_pkts
;
860 regs_buff
[2] = hw_stats
->ppe_tx_drop_pkts
;
862 dsaf_read_dev(queue
, RCB_RING_TX_RING_FBDNUM_REG
);
864 regs_buff
[4] = queue
->tx_ring
.stats
.tx_pkts
;
865 regs_buff
[5] = queue
->tx_ring
.stats
.tx_bytes
;
866 regs_buff
[6] = queue
->tx_ring
.stats
.tx_err_cnt
;
867 regs_buff
[7] = queue
->tx_ring
.stats
.io_err_cnt
;
868 regs_buff
[8] = queue
->tx_ring
.stats
.sw_err_cnt
;
869 regs_buff
[9] = queue
->tx_ring
.stats
.seg_pkt_cnt
;
870 regs_buff
[10] = queue
->tx_ring
.stats
.restart_queue
;
871 regs_buff
[11] = queue
->tx_ring
.stats
.tx_busy
;
873 regs_buff
[12] = hw_stats
->rx_pkts
;
874 regs_buff
[13] = hw_stats
->ppe_rx_ok_pkts
;
875 regs_buff
[14] = hw_stats
->ppe_rx_drop_pkts
;
877 dsaf_read_dev(queue
, RCB_RING_RX_RING_FBDNUM_REG
);
879 regs_buff
[16] = queue
->rx_ring
.stats
.rx_pkts
;
880 regs_buff
[17] = queue
->rx_ring
.stats
.rx_bytes
;
881 regs_buff
[18] = queue
->rx_ring
.stats
.rx_err_cnt
;
882 regs_buff
[19] = queue
->rx_ring
.stats
.io_err_cnt
;
883 regs_buff
[20] = queue
->rx_ring
.stats
.sw_err_cnt
;
884 regs_buff
[21] = queue
->rx_ring
.stats
.seg_pkt_cnt
;
885 regs_buff
[22] = queue
->rx_ring
.stats
.reuse_pg_cnt
;
886 regs_buff
[23] = queue
->rx_ring
.stats
.err_pkt_len
;
887 regs_buff
[24] = queue
->rx_ring
.stats
.non_vld_descs
;
888 regs_buff
[25] = queue
->rx_ring
.stats
.err_bd_num
;
889 regs_buff
[26] = queue
->rx_ring
.stats
.l2_err
;
890 regs_buff
[27] = queue
->rx_ring
.stats
.l3l4_csum_err
;
894 *hns_rcb_get_ring_sset_count - rcb string set count
895 *@stringset:ethtool cmd
896 *return rcb ring string set count
898 int hns_rcb_get_ring_sset_count(int stringset
)
900 if (stringset
== ETH_SS_STATS
)
901 return HNS_RING_STATIC_REG_NUM
;
907 *hns_rcb_get_common_regs_count - rcb common regs count
910 int hns_rcb_get_common_regs_count(void)
912 return HNS_RCB_COMMON_DUMP_REG_NUM
;
916 *rcb_get_sset_count - rcb ring regs count
919 int hns_rcb_get_ring_regs_count(void)
921 return HNS_RCB_RING_DUMP_REG_NUM
;
925 *hns_rcb_get_strings - get rcb string set
926 *@stringset:string set index
927 *@data:strings name value
930 void hns_rcb_get_strings(int stringset
, u8
*data
, int index
)
932 char *buff
= (char *)data
;
934 if (stringset
!= ETH_SS_STATS
)
937 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_rcb_pkt_num", index
);
938 buff
= buff
+ ETH_GSTRING_LEN
;
939 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_ppe_tx_pkt_num", index
);
940 buff
= buff
+ ETH_GSTRING_LEN
;
941 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_ppe_drop_pkt_num", index
);
942 buff
= buff
+ ETH_GSTRING_LEN
;
943 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_fbd_num", index
);
944 buff
= buff
+ ETH_GSTRING_LEN
;
946 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_pkt_num", index
);
947 buff
= buff
+ ETH_GSTRING_LEN
;
948 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_bytes", index
);
949 buff
= buff
+ ETH_GSTRING_LEN
;
950 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_err_cnt", index
);
951 buff
= buff
+ ETH_GSTRING_LEN
;
952 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_io_err", index
);
953 buff
= buff
+ ETH_GSTRING_LEN
;
954 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_sw_err", index
);
955 buff
= buff
+ ETH_GSTRING_LEN
;
956 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_seg_pkt", index
);
957 buff
= buff
+ ETH_GSTRING_LEN
;
958 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_restart_queue", index
);
959 buff
= buff
+ ETH_GSTRING_LEN
;
960 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_tx_busy", index
);
961 buff
= buff
+ ETH_GSTRING_LEN
;
963 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_rcb_pkt_num", index
);
964 buff
= buff
+ ETH_GSTRING_LEN
;
965 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_ppe_pkt_num", index
);
966 buff
= buff
+ ETH_GSTRING_LEN
;
967 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_ppe_drop_pkt_num", index
);
968 buff
= buff
+ ETH_GSTRING_LEN
;
969 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_fbd_num", index
);
970 buff
= buff
+ ETH_GSTRING_LEN
;
972 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_pkt_num", index
);
973 buff
= buff
+ ETH_GSTRING_LEN
;
974 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_bytes", index
);
975 buff
= buff
+ ETH_GSTRING_LEN
;
976 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_err_cnt", index
);
977 buff
= buff
+ ETH_GSTRING_LEN
;
978 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_io_err", index
);
979 buff
= buff
+ ETH_GSTRING_LEN
;
980 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_sw_err", index
);
981 buff
= buff
+ ETH_GSTRING_LEN
;
982 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_seg_pkt", index
);
983 buff
= buff
+ ETH_GSTRING_LEN
;
984 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_reuse_pg", index
);
985 buff
= buff
+ ETH_GSTRING_LEN
;
986 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_len_err", index
);
987 buff
= buff
+ ETH_GSTRING_LEN
;
988 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_non_vld_desc_err", index
);
989 buff
= buff
+ ETH_GSTRING_LEN
;
990 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_bd_num_err", index
);
991 buff
= buff
+ ETH_GSTRING_LEN
;
992 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_l2_err", index
);
993 buff
= buff
+ ETH_GSTRING_LEN
;
994 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_l3l4csum_err", index
);
997 void hns_rcb_get_common_regs(struct rcb_common_cb
*rcb_com
, void *data
)
1000 bool is_ver1
= AE_IS_VER1(rcb_com
->dsaf_dev
->dsaf_ver
);
1001 bool is_dbg
= HNS_DSAF_IS_DEBUG(rcb_com
->dsaf_dev
);
1006 /*rcb common registers */
1007 regs
[0] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_ENDIAN_REG
);
1008 regs
[1] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_SYS_FSH_REG
);
1009 regs
[2] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_INIT_FLAG_REG
);
1011 regs
[3] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PKT_REG
);
1012 regs
[4] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_RINVLD_REG
);
1013 regs
[5] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_FNA_REG
);
1014 regs
[6] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_FA_REG
);
1015 regs
[7] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PKT_TC_BP_REG
);
1016 regs
[8] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PPE_TNL_CLKEN_REG
);
1018 regs
[9] = dsaf_read_dev(rcb_com
, RCB_COM_INTMSK_TX_PKT_REG
);
1019 regs
[10] = dsaf_read_dev(rcb_com
, RCB_COM_RINT_TX_PKT_REG
);
1020 regs
[11] = dsaf_read_dev(rcb_com
, RCB_COM_INTMASK_ECC_ERR_REG
);
1021 regs
[12] = dsaf_read_dev(rcb_com
, RCB_COM_INTSTS_ECC_ERR_REG
);
1022 regs
[13] = dsaf_read_dev(rcb_com
, RCB_COM_EBD_SRAM_ERR_REG
);
1023 regs
[14] = dsaf_read_dev(rcb_com
, RCB_COM_RXRING_ERR_REG
);
1024 regs
[15] = dsaf_read_dev(rcb_com
, RCB_COM_TXRING_ERR_REG
);
1025 regs
[16] = dsaf_read_dev(rcb_com
, RCB_COM_TX_FBD_ERR_REG
);
1026 regs
[17] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK_EN_REG
);
1027 regs
[18] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK0_REG
);
1028 regs
[19] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK1_REG
);
1029 regs
[20] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK2_REG
);
1030 regs
[21] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK3_REG
);
1031 regs
[22] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK4_REG
);
1032 regs
[23] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK5_REG
);
1033 regs
[24] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR0_REG
);
1034 regs
[25] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR3_REG
);
1035 regs
[26] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR4_REG
);
1036 regs
[27] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR5_REG
);
1038 regs
[28] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_INTMASK_RING
);
1039 regs
[29] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_RING_STS
);
1040 regs
[30] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_RING
);
1041 regs
[31] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_INTMASK_BD
);
1042 regs
[32] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_BD_RINT_STS
);
1043 regs
[33] = dsaf_read_dev(rcb_com
, RCB_COM_RCB_RD_BD_BUSY
);
1044 regs
[34] = dsaf_read_dev(rcb_com
, RCB_COM_RCB_FBD_CRT_EN
);
1045 regs
[35] = dsaf_read_dev(rcb_com
, RCB_COM_AXI_WR_ERR_INTMASK
);
1046 regs
[36] = dsaf_read_dev(rcb_com
, RCB_COM_AXI_ERR_STS
);
1047 regs
[37] = dsaf_read_dev(rcb_com
, RCB_COM_CHK_TX_FBD_NUM_REG
);
1049 /* rcb common entry registers */
1050 for (i
= 0; i
< 16; i
++) { /* total 16 model registers */
1052 = dsaf_read_dev(rcb_com
, RCB_CFG_BD_NUM_REG
+ 4 * i
);
1054 = dsaf_read_dev(rcb_com
, RCB_CFG_PKTLINE_REG
+ 4 * i
);
1057 reg_tmp
= is_ver1
? RCB_CFG_OVERTIME_REG
: RCB_PORT_CFG_OVERTIME_REG
;
1058 reg_num_tmp
= (is_ver1
|| is_dbg
) ? 1 : 6;
1059 for (i
= 0; i
< reg_num_tmp
; i
++)
1060 regs
[70 + i
] = dsaf_read_dev(rcb_com
, reg_tmp
);
1062 regs
[76] = dsaf_read_dev(rcb_com
, RCB_CFG_PKTLINE_INT_NUM_REG
);
1063 regs
[77] = dsaf_read_dev(rcb_com
, RCB_CFG_OVERTIME_INT_NUM_REG
);
1065 /* mark end of rcb common regs */
1066 for (i
= 78; i
< 80; i
++)
1067 regs
[i
] = 0xcccccccc;
1070 void hns_rcb_get_ring_regs(struct hnae_queue
*queue
, void *data
)
1073 struct ring_pair_cb
*ring_pair
1074 = container_of(queue
, struct ring_pair_cb
, q
);
1077 /*rcb ring registers */
1078 regs
[0] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BASEADDR_L_REG
);
1079 regs
[1] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BASEADDR_H_REG
);
1080 regs
[2] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BD_NUM_REG
);
1081 regs
[3] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BD_LEN_REG
);
1082 regs
[4] = dsaf_read_dev(queue
, RCB_RING_RX_RING_PKTLINE_REG
);
1083 regs
[5] = dsaf_read_dev(queue
, RCB_RING_RX_RING_TAIL_REG
);
1084 regs
[6] = dsaf_read_dev(queue
, RCB_RING_RX_RING_HEAD_REG
);
1085 regs
[7] = dsaf_read_dev(queue
, RCB_RING_RX_RING_FBDNUM_REG
);
1086 regs
[8] = dsaf_read_dev(queue
, RCB_RING_RX_RING_PKTNUM_RECORD_REG
);
1088 regs
[9] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BASEADDR_L_REG
);
1089 regs
[10] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BASEADDR_H_REG
);
1090 regs
[11] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BD_NUM_REG
);
1091 regs
[12] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BD_LEN_REG
);
1092 regs
[13] = dsaf_read_dev(queue
, RCB_RING_TX_RING_PKTLINE_REG
);
1093 regs
[15] = dsaf_read_dev(queue
, RCB_RING_TX_RING_TAIL_REG
);
1094 regs
[16] = dsaf_read_dev(queue
, RCB_RING_TX_RING_HEAD_REG
);
1095 regs
[17] = dsaf_read_dev(queue
, RCB_RING_TX_RING_FBDNUM_REG
);
1096 regs
[18] = dsaf_read_dev(queue
, RCB_RING_TX_RING_OFFSET_REG
);
1097 regs
[19] = dsaf_read_dev(queue
, RCB_RING_TX_RING_PKTNUM_RECORD_REG
);
1099 regs
[20] = dsaf_read_dev(queue
, RCB_RING_PREFETCH_EN_REG
);
1100 regs
[21] = dsaf_read_dev(queue
, RCB_RING_CFG_VF_NUM_REG
);
1101 regs
[22] = dsaf_read_dev(queue
, RCB_RING_ASID_REG
);
1102 regs
[23] = dsaf_read_dev(queue
, RCB_RING_RX_VM_REG
);
1103 regs
[24] = dsaf_read_dev(queue
, RCB_RING_T0_BE_RST
);
1104 regs
[25] = dsaf_read_dev(queue
, RCB_RING_COULD_BE_RST
);
1105 regs
[26] = dsaf_read_dev(queue
, RCB_RING_WRR_WEIGHT_REG
);
1107 regs
[27] = dsaf_read_dev(queue
, RCB_RING_INTMSK_RXWL_REG
);
1108 regs
[28] = dsaf_read_dev(queue
, RCB_RING_INTSTS_RX_RING_REG
);
1109 regs
[29] = dsaf_read_dev(queue
, RCB_RING_INTMSK_TXWL_REG
);
1110 regs
[30] = dsaf_read_dev(queue
, RCB_RING_INTSTS_TX_RING_REG
);
1111 regs
[31] = dsaf_read_dev(queue
, RCB_RING_INTMSK_RX_OVERTIME_REG
);
1112 regs
[32] = dsaf_read_dev(queue
, RCB_RING_INTSTS_RX_OVERTIME_REG
);
1113 regs
[33] = dsaf_read_dev(queue
, RCB_RING_INTMSK_TX_OVERTIME_REG
);
1114 regs
[34] = dsaf_read_dev(queue
, RCB_RING_INTSTS_TX_OVERTIME_REG
);
1116 /* mark end of ring regs */
1117 for (i
= 35; i
< 40; i
++)
1118 regs
[i
] = 0xcccccc00 + ring_pair
->index
;