2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/cdev.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <asm/cacheflush.h>
17 #include <linux/platform_device.h>
19 #include <linux/of_address.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_irq.h>
22 #include <linux/spinlock.h>
24 #include "hns_dsaf_main.h"
25 #include "hns_dsaf_ppe.h"
26 #include "hns_dsaf_rcb.h"
28 #define RCB_COMMON_REG_OFFSET 0x80000
32 #define RCB_RESET_WAIT_TIMES 30
33 #define RCB_RESET_TRY_TIMES 10
35 /* Because default mtu is 1500, rcb buffer size is set to 2048 enough */
36 #define RCB_DEFAULT_BUFFER_SIZE 2048
39 *hns_rcb_wait_fbd_clean - clean fbd
40 *@qs: ring struct pointer array
44 void hns_rcb_wait_fbd_clean(struct hnae_queue
**qs
, int q_num
, u32 flag
)
49 for (wait_cnt
= i
= 0; i
< q_num
; wait_cnt
++) {
50 usleep_range(200, 300);
52 if (flag
& RCB_INT_FLAG_TX
)
53 fbd_num
+= dsaf_read_dev(qs
[i
],
54 RCB_RING_TX_RING_FBDNUM_REG
);
55 if (flag
& RCB_INT_FLAG_RX
)
56 fbd_num
+= dsaf_read_dev(qs
[i
],
57 RCB_RING_RX_RING_FBDNUM_REG
);
60 if (wait_cnt
>= 10000)
65 dev_err(qs
[i
]->handle
->owner_dev
,
66 "queue(%d) wait fbd(%d) clean fail!!\n", i
, fbd_num
);
70 *hns_rcb_reset_ring_hw - ring reset
71 *@q: ring struct pointer
73 void hns_rcb_reset_ring_hw(struct hnae_queue
*q
)
81 while (try_cnt
++ < RCB_RESET_TRY_TIMES
) {
82 usleep_range(100, 200);
83 tx_fbd_num
= dsaf_read_dev(q
, RCB_RING_TX_RING_FBDNUM_REG
);
87 dsaf_write_dev(q
, RCB_RING_PREFETCH_EN_REG
, 0);
89 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 1);
92 could_ret
= dsaf_read_dev(q
, RCB_RING_COULD_BE_RST
);
95 while (!could_ret
&& (wait_cnt
< RCB_RESET_WAIT_TIMES
)) {
96 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 0);
98 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 1);
101 could_ret
= dsaf_read_dev(q
, RCB_RING_COULD_BE_RST
);
106 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 0);
112 if (try_cnt
>= RCB_RESET_TRY_TIMES
)
113 dev_err(q
->dev
->dev
, "port%d reset ring fail\n",
114 hns_ae_get_vf_cb(q
->handle
)->port_index
);
118 *hns_rcb_int_ctrl_hw - rcb irq enable control
119 *@q: hnae queue struct pointer
120 *@flag:ring flag tx or rx
123 void hns_rcb_int_ctrl_hw(struct hnae_queue
*q
, u32 flag
, u32 mask
)
125 u32 int_mask_en
= !!mask
;
127 if (flag
& RCB_INT_FLAG_TX
) {
128 dsaf_write_dev(q
, RCB_RING_INTMSK_TXWL_REG
, int_mask_en
);
129 dsaf_write_dev(q
, RCB_RING_INTMSK_TX_OVERTIME_REG
,
133 if (flag
& RCB_INT_FLAG_RX
) {
134 dsaf_write_dev(q
, RCB_RING_INTMSK_RXWL_REG
, int_mask_en
);
135 dsaf_write_dev(q
, RCB_RING_INTMSK_RX_OVERTIME_REG
,
140 void hns_rcb_int_clr_hw(struct hnae_queue
*q
, u32 flag
)
142 if (flag
& RCB_INT_FLAG_TX
) {
143 dsaf_write_dev(q
, RCB_RING_INTSTS_TX_RING_REG
, 1);
144 dsaf_write_dev(q
, RCB_RING_INTSTS_TX_OVERTIME_REG
, 1);
147 if (flag
& RCB_INT_FLAG_RX
) {
148 dsaf_write_dev(q
, RCB_RING_INTSTS_RX_RING_REG
, 1);
149 dsaf_write_dev(q
, RCB_RING_INTSTS_RX_OVERTIME_REG
, 1);
153 void hns_rcbv2_int_ctrl_hw(struct hnae_queue
*q
, u32 flag
, u32 mask
)
155 u32 int_mask_en
= !!mask
;
157 if (flag
& RCB_INT_FLAG_TX
)
158 dsaf_write_dev(q
, RCB_RING_INTMSK_TXWL_REG
, int_mask_en
);
160 if (flag
& RCB_INT_FLAG_RX
)
161 dsaf_write_dev(q
, RCB_RING_INTMSK_RXWL_REG
, int_mask_en
);
164 void hns_rcbv2_int_clr_hw(struct hnae_queue
*q
, u32 flag
)
166 if (flag
& RCB_INT_FLAG_TX
)
167 dsaf_write_dev(q
, RCBV2_TX_RING_INT_STS_REG
, 1);
169 if (flag
& RCB_INT_FLAG_RX
)
170 dsaf_write_dev(q
, RCBV2_RX_RING_INT_STS_REG
, 1);
174 *hns_rcb_ring_enable_hw - enable ring
177 void hns_rcb_ring_enable_hw(struct hnae_queue
*q
, u32 val
)
179 dsaf_write_dev(q
, RCB_RING_PREFETCH_EN_REG
, !!val
);
182 void hns_rcb_start(struct hnae_queue
*q
, u32 val
)
184 hns_rcb_ring_enable_hw(q
, val
);
188 *hns_rcb_common_init_commit_hw - make rcb common init completed
189 *@rcb_common: rcb common device
191 void hns_rcb_common_init_commit_hw(struct rcb_common_cb
*rcb_common
)
193 wmb(); /* Sync point before breakpoint */
194 dsaf_write_dev(rcb_common
, RCB_COM_CFG_SYS_FSH_REG
, 1);
195 wmb(); /* Sync point after breakpoint */
198 /* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester
200 *@buf_size: buffer size set to hw
202 void hns_rcb_set_tx_ring_bs(struct hnae_queue
*q
, u32 buf_size
)
204 u32 bd_size_type
= hns_rcb_buf_size2type(buf_size
);
206 dsaf_write_dev(q
, RCB_RING_TX_RING_BD_LEN_REG
,
210 /* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester
212 *@buf_size: buffer size set to hw
214 void hns_rcb_set_rx_ring_bs(struct hnae_queue
*q
, u32 buf_size
)
216 u32 bd_size_type
= hns_rcb_buf_size2type(buf_size
);
218 dsaf_write_dev(q
, RCB_RING_RX_RING_BD_LEN_REG
,
223 *hns_rcb_ring_init - init rcb ring
224 *@ring_pair: ring pair control block
225 *@ring_type: ring type, RX_RING or TX_RING
227 static void hns_rcb_ring_init(struct ring_pair_cb
*ring_pair
, int ring_type
)
229 struct hnae_queue
*q
= &ring_pair
->q
;
230 struct hnae_ring
*ring
=
231 (ring_type
== RX_RING
) ? &q
->rx_ring
: &q
->tx_ring
;
232 dma_addr_t dma
= ring
->desc_dma_addr
;
234 if (ring_type
== RX_RING
) {
235 dsaf_write_dev(q
, RCB_RING_RX_RING_BASEADDR_L_REG
,
237 dsaf_write_dev(q
, RCB_RING_RX_RING_BASEADDR_H_REG
,
238 (u32
)((dma
>> 31) >> 1));
240 hns_rcb_set_rx_ring_bs(q
, ring
->buf_size
);
242 dsaf_write_dev(q
, RCB_RING_RX_RING_BD_NUM_REG
,
243 ring_pair
->port_id_in_comm
);
244 dsaf_write_dev(q
, RCB_RING_RX_RING_PKTLINE_REG
,
245 ring_pair
->port_id_in_comm
);
247 dsaf_write_dev(q
, RCB_RING_TX_RING_BASEADDR_L_REG
,
249 dsaf_write_dev(q
, RCB_RING_TX_RING_BASEADDR_H_REG
,
250 (u32
)((dma
>> 31) >> 1));
252 hns_rcb_set_tx_ring_bs(q
, ring
->buf_size
);
254 dsaf_write_dev(q
, RCB_RING_TX_RING_BD_NUM_REG
,
255 ring_pair
->port_id_in_comm
);
256 dsaf_write_dev(q
, RCB_RING_TX_RING_PKTLINE_REG
,
257 ring_pair
->port_id_in_comm
+ HNS_RCB_TX_PKTLINE_OFFSET
);
262 *hns_rcb_init_hw - init rcb hardware
265 void hns_rcb_init_hw(struct ring_pair_cb
*ring
)
267 hns_rcb_ring_init(ring
, RX_RING
);
268 hns_rcb_ring_init(ring
, TX_RING
);
272 *hns_rcb_set_port_desc_cnt - set rcb port description num
273 *@rcb_common: rcb_common device
274 *@port_idx:port index
277 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb
*rcb_common
,
278 u32 port_idx
, u32 desc_cnt
)
280 dsaf_write_dev(rcb_common
, RCB_CFG_BD_NUM_REG
+ port_idx
* 4,
284 static void hns_rcb_set_port_timeout(
285 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 timeout
)
287 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
288 dsaf_write_dev(rcb_common
, RCB_CFG_OVERTIME_REG
,
289 timeout
* HNS_RCB_CLK_FREQ_MHZ
);
290 } else if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
)) {
291 if (timeout
> HNS_RCB_DEF_GAP_TIME_USECS
)
292 dsaf_write_dev(rcb_common
,
293 RCB_PORT_INT_GAPTIME_REG
+ port_idx
* 4,
294 HNS_RCB_DEF_GAP_TIME_USECS
);
296 dsaf_write_dev(rcb_common
,
297 RCB_PORT_INT_GAPTIME_REG
+ port_idx
* 4,
300 dsaf_write_dev(rcb_common
,
301 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4,
304 dsaf_write_dev(rcb_common
,
305 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4,
310 static int hns_rcb_common_get_port_num(struct rcb_common_cb
*rcb_common
)
312 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
313 return HNS_RCB_SERVICE_NW_ENGINE_NUM
;
315 return HNS_RCB_DEBUG_NW_ENGINE_NUM
;
318 /*clr rcb comm exception irq**/
319 static void hns_rcb_comm_exc_irq_en(
320 struct rcb_common_cb
*rcb_common
, int en
)
322 u32 clr_vlue
= 0xfffffffful
;
323 u32 msk_vlue
= en
? 0 : 0xfffffffful
;
326 dsaf_write_dev(rcb_common
, RCB_COM_INTSTS_ECC_ERR_REG
, clr_vlue
);
328 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_RING_STS
, clr_vlue
);
330 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_BD_RINT_STS
, clr_vlue
);
332 dsaf_write_dev(rcb_common
, RCB_COM_RINT_TX_PKT_REG
, clr_vlue
);
333 dsaf_write_dev(rcb_common
, RCB_COM_AXI_ERR_STS
, clr_vlue
);
336 dsaf_write_dev(rcb_common
, RCB_COM_INTMASK_ECC_ERR_REG
, msk_vlue
);
338 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_INTMASK_RING
, msk_vlue
);
340 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
341 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_INTMASK_BD
, msk_vlue
| 2);
343 dsaf_write_dev(rcb_common
, RCB_COM_INTMSK_TX_PKT_REG
, msk_vlue
);
344 dsaf_write_dev(rcb_common
, RCB_COM_AXI_WR_ERR_INTMASK
, msk_vlue
);
348 *hns_rcb_common_init_hw - init rcb common hardware
349 *@rcb_common: rcb_common device
350 *retuen 0 - success , negative --fail
352 int hns_rcb_common_init_hw(struct rcb_common_cb
*rcb_common
)
356 int port_num
= hns_rcb_common_get_port_num(rcb_common
);
358 hns_rcb_comm_exc_irq_en(rcb_common
, 0);
360 reg_val
= dsaf_read_dev(rcb_common
, RCB_COM_CFG_INIT_FLAG_REG
);
361 if (0x1 != (reg_val
& 0x1)) {
362 dev_err(rcb_common
->dsaf_dev
->dev
,
363 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val
);
367 for (i
= 0; i
< port_num
; i
++) {
368 hns_rcb_set_port_desc_cnt(rcb_common
, i
, rcb_common
->desc_num
);
369 hns_rcb_set_rx_coalesced_frames(
370 rcb_common
, i
, HNS_RCB_DEF_RX_COALESCED_FRAMES
);
371 if (!AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
) &&
372 !HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
373 hns_rcb_set_tx_coalesced_frames(
374 rcb_common
, i
, HNS_RCB_DEF_TX_COALESCED_FRAMES
);
375 hns_rcb_set_port_timeout(
376 rcb_common
, i
, HNS_RCB_DEF_COALESCED_USECS
);
379 dsaf_write_dev(rcb_common
, RCB_COM_CFG_ENDIAN_REG
,
380 HNS_RCB_COMMON_ENDIAN
);
382 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
383 dsaf_write_dev(rcb_common
, RCB_COM_CFG_FNA_REG
, 0x0);
384 dsaf_write_dev(rcb_common
, RCB_COM_CFG_FA_REG
, 0x1);
386 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_USER_REG
,
387 RCB_COM_CFG_FNA_B
, false);
388 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_USER_REG
,
389 RCB_COM_CFG_FA_B
, true);
390 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_TSO_MODE_REG
,
391 RCB_COM_TSO_MODE_B
, HNS_TSO_MODE_8BD_32K
);
397 int hns_rcb_buf_size2type(u32 buf_size
)
403 bd_size_type
= HNS_BD_SIZE_512_TYPE
;
406 bd_size_type
= HNS_BD_SIZE_1024_TYPE
;
409 bd_size_type
= HNS_BD_SIZE_2048_TYPE
;
412 bd_size_type
= HNS_BD_SIZE_4096_TYPE
;
415 bd_size_type
= -EINVAL
;
421 static void hns_rcb_ring_get_cfg(struct hnae_queue
*q
, int ring_type
)
423 struct hnae_ring
*ring
;
424 struct rcb_common_cb
*rcb_common
;
425 struct ring_pair_cb
*ring_pair_cb
;
426 u16 desc_num
, mdnum_ppkt
;
427 bool irq_idx
, is_ver1
;
429 ring_pair_cb
= container_of(q
, struct ring_pair_cb
, q
);
430 is_ver1
= AE_IS_VER1(ring_pair_cb
->rcb_common
->dsaf_dev
->dsaf_ver
);
431 if (ring_type
== RX_RING
) {
433 ring
->io_base
= ring_pair_cb
->q
.io_base
;
434 irq_idx
= HNS_RCB_IRQ_IDX_RX
;
435 mdnum_ppkt
= HNS_RCB_RING_MAX_BD_PER_PKT
;
438 ring
->io_base
= (u8 __iomem
*)ring_pair_cb
->q
.io_base
+
439 HNS_RCB_TX_REG_OFFSET
;
440 irq_idx
= HNS_RCB_IRQ_IDX_TX
;
441 mdnum_ppkt
= is_ver1
? HNS_RCB_RING_MAX_TXBD_PER_PKT
:
442 HNS_RCBV2_RING_MAX_TXBD_PER_PKT
;
445 rcb_common
= ring_pair_cb
->rcb_common
;
446 desc_num
= rcb_common
->dsaf_dev
->desc_num
;
449 ring
->desc_cb
= NULL
;
451 ring
->irq
= ring_pair_cb
->virq
[irq_idx
];
452 ring
->desc_dma_addr
= 0;
454 ring
->buf_size
= RCB_DEFAULT_BUFFER_SIZE
;
455 ring
->desc_num
= desc_num
;
456 ring
->max_desc_num_per_pkt
= mdnum_ppkt
;
457 ring
->max_raw_data_sz_per_desc
= HNS_RCB_MAX_PKT_SIZE
;
458 ring
->max_pkt_size
= HNS_RCB_MAX_PKT_SIZE
;
459 ring
->next_to_use
= 0;
460 ring
->next_to_clean
= 0;
463 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb
*ring_pair_cb
)
465 ring_pair_cb
->q
.handle
= NULL
;
467 hns_rcb_ring_get_cfg(&ring_pair_cb
->q
, RX_RING
);
468 hns_rcb_ring_get_cfg(&ring_pair_cb
->q
, TX_RING
);
471 static int hns_rcb_get_port_in_comm(
472 struct rcb_common_cb
*rcb_common
, int ring_idx
)
474 return ring_idx
/ (rcb_common
->max_q_per_vf
* rcb_common
->max_vfn
);
477 #define SERVICE_RING_IRQ_IDX(v1) \
478 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
479 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb
*rcb_common
)
481 bool is_ver1
= AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
);
483 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
484 return SERVICE_RING_IRQ_IDX(is_ver1
);
486 return HNS_DEBUG_RING_IRQ_IDX
;
489 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
490 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
492 *hns_rcb_get_cfg - get rcb config
493 *@rcb_common: rcb common device
495 int hns_rcb_get_cfg(struct rcb_common_cb
*rcb_common
)
497 struct ring_pair_cb
*ring_pair_cb
;
499 u32 ring_num
= rcb_common
->ring_num
;
500 int base_irq_idx
= hns_rcb_get_base_irq_idx(rcb_common
);
501 struct platform_device
*pdev
=
502 to_platform_device(rcb_common
->dsaf_dev
->dev
);
503 bool is_ver1
= AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
);
505 for (i
= 0; i
< ring_num
; i
++) {
506 ring_pair_cb
= &rcb_common
->ring_pair_cb
[i
];
507 ring_pair_cb
->rcb_common
= rcb_common
;
508 ring_pair_cb
->dev
= rcb_common
->dsaf_dev
->dev
;
509 ring_pair_cb
->index
= i
;
510 ring_pair_cb
->q
.io_base
=
511 RCB_COMM_BASE_TO_RING_BASE(rcb_common
->io_base
, i
);
512 ring_pair_cb
->port_id_in_comm
=
513 hns_rcb_get_port_in_comm(rcb_common
, i
);
514 ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_TX
] =
515 is_ver1
? platform_get_irq(pdev
, base_irq_idx
+ i
* 2) :
516 platform_get_irq(pdev
, base_irq_idx
+ i
* 3 + 1);
517 ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_RX
] =
518 is_ver1
? platform_get_irq(pdev
, base_irq_idx
+ i
* 2 + 1) :
519 platform_get_irq(pdev
, base_irq_idx
+ i
* 3);
520 if ((ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_TX
] == -EPROBE_DEFER
) ||
521 (ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_RX
] == -EPROBE_DEFER
))
522 return -EPROBE_DEFER
;
524 ring_pair_cb
->q
.phy_base
=
525 RCB_COMM_BASE_TO_RING_BASE(rcb_common
->phy_base
, i
);
526 hns_rcb_ring_pair_get_cfg(ring_pair_cb
);
533 *hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames
534 *@rcb_common: rcb_common device
535 *@port_idx:port id in comm
537 *Returns: coalesced_frames
539 u32
hns_rcb_get_rx_coalesced_frames(
540 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
542 return dsaf_read_dev(rcb_common
, RCB_CFG_PKTLINE_REG
+ port_idx
* 4);
546 *hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames
547 *@rcb_common: rcb_common device
548 *@port_idx:port id in comm
550 *Returns: coalesced_frames
552 u32
hns_rcb_get_tx_coalesced_frames(
553 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
557 reg
= RCB_CFG_PKTLINE_REG
+ (port_idx
+ HNS_RCB_TX_PKTLINE_OFFSET
) * 4;
558 return dsaf_read_dev(rcb_common
, reg
);
562 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
563 *@rcb_common: rcb_common device
564 *@port_idx:port id in comm
568 u32
hns_rcb_get_coalesce_usecs(
569 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
571 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
))
572 return dsaf_read_dev(rcb_common
, RCB_CFG_OVERTIME_REG
) /
573 HNS_RCB_CLK_FREQ_MHZ
;
575 return dsaf_read_dev(rcb_common
,
576 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4);
580 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
581 *@rcb_common: rcb_common device
582 *@port_idx:port id in comm
583 *@timeout:tx/rx time for coalesced time_out
586 * Zero for success, or an error code in case of failure
588 int hns_rcb_set_coalesce_usecs(
589 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 timeout
)
591 u32 old_timeout
= hns_rcb_get_coalesce_usecs(rcb_common
, port_idx
);
593 if (timeout
== old_timeout
)
596 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
597 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
)) {
598 dev_err(rcb_common
->dsaf_dev
->dev
,
599 "error: not support coalesce_usecs setting!\n");
603 if (timeout
> HNS_RCB_MAX_COALESCED_USECS
|| timeout
== 0) {
604 dev_err(rcb_common
->dsaf_dev
->dev
,
605 "error: coalesce_usecs setting supports 1~1023us\n");
608 hns_rcb_set_port_timeout(rcb_common
, port_idx
, timeout
);
613 *hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames
614 *@rcb_common: rcb_common device
615 *@port_idx:port id in comm
616 *@coalesced_frames:tx/rx BD num for coalesced frames
619 * Zero for success, or an error code in case of failure
621 int hns_rcb_set_tx_coalesced_frames(
622 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 coalesced_frames
)
625 hns_rcb_get_tx_coalesced_frames(rcb_common
, port_idx
);
628 if (coalesced_frames
== old_waterline
)
631 if (coalesced_frames
!= 1) {
632 dev_err(rcb_common
->dsaf_dev
->dev
,
633 "error: not support tx coalesce_frames setting!\n");
637 reg
= RCB_CFG_PKTLINE_REG
+ (port_idx
+ HNS_RCB_TX_PKTLINE_OFFSET
) * 4;
638 dsaf_write_dev(rcb_common
, reg
, coalesced_frames
);
643 *hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames
644 *@rcb_common: rcb_common device
645 *@port_idx:port id in comm
646 *@coalesced_frames:tx/rx BD num for coalesced frames
649 * Zero for success, or an error code in case of failure
651 int hns_rcb_set_rx_coalesced_frames(
652 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 coalesced_frames
)
655 hns_rcb_get_rx_coalesced_frames(rcb_common
, port_idx
);
657 if (coalesced_frames
== old_waterline
)
660 if (coalesced_frames
>= rcb_common
->desc_num
||
661 coalesced_frames
> HNS_RCB_MAX_COALESCED_FRAMES
||
662 coalesced_frames
< HNS_RCB_MIN_COALESCED_FRAMES
) {
663 dev_err(rcb_common
->dsaf_dev
->dev
,
664 "error: not support coalesce_frames setting!\n");
668 dsaf_write_dev(rcb_common
, RCB_CFG_PKTLINE_REG
+ port_idx
* 4,
674 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
675 * accordding to dsaf mode
676 *@dsaf_mode: dsaf mode
677 *@max_vfn : max vfn number
678 *@max_q_per_vf:max ring number per vm
680 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode
, u16
*max_vfn
,
684 case DSAF_MODE_DISABLE_6PORT_0VM
:
688 case DSAF_MODE_DISABLE_FIX
:
689 case DSAF_MODE_DISABLE_SP
:
693 case DSAF_MODE_DISABLE_2PORT_64VM
:
697 case DSAF_MODE_DISABLE_6PORT_16VM
:
708 int hns_rcb_get_ring_num(struct dsaf_device
*dsaf_dev
)
710 switch (dsaf_dev
->dsaf_mode
) {
711 case DSAF_MODE_ENABLE_FIX
:
712 case DSAF_MODE_DISABLE_SP
:
715 case DSAF_MODE_DISABLE_FIX
:
718 case DSAF_MODE_ENABLE_0VM
:
721 case DSAF_MODE_DISABLE_6PORT_0VM
:
722 case DSAF_MODE_ENABLE_16VM
:
723 case DSAF_MODE_DISABLE_6PORT_2VM
:
724 case DSAF_MODE_DISABLE_6PORT_16VM
:
725 case DSAF_MODE_DISABLE_6PORT_4VM
:
726 case DSAF_MODE_ENABLE_8VM
:
729 case DSAF_MODE_DISABLE_2PORT_16VM
:
730 case DSAF_MODE_DISABLE_2PORT_8VM
:
731 case DSAF_MODE_ENABLE_32VM
:
732 case DSAF_MODE_DISABLE_2PORT_64VM
:
733 case DSAF_MODE_ENABLE_128VM
:
737 dev_warn(dsaf_dev
->dev
,
738 "get ring num fail,use default!dsaf_mode=%d\n",
739 dsaf_dev
->dsaf_mode
);
744 void __iomem
*hns_rcb_common_get_vaddr(struct rcb_common_cb
*rcb_common
)
746 struct dsaf_device
*dsaf_dev
= rcb_common
->dsaf_dev
;
748 return dsaf_dev
->ppe_base
+ RCB_COMMON_REG_OFFSET
;
751 static phys_addr_t
hns_rcb_common_get_paddr(struct rcb_common_cb
*rcb_common
)
753 struct dsaf_device
*dsaf_dev
= rcb_common
->dsaf_dev
;
755 return dsaf_dev
->ppe_paddr
+ RCB_COMMON_REG_OFFSET
;
758 int hns_rcb_common_get_cfg(struct dsaf_device
*dsaf_dev
,
761 struct rcb_common_cb
*rcb_common
;
762 enum dsaf_mode dsaf_mode
= dsaf_dev
->dsaf_mode
;
765 int ring_num
= hns_rcb_get_ring_num(dsaf_dev
);
768 devm_kzalloc(dsaf_dev
->dev
, sizeof(*rcb_common
) +
769 ring_num
* sizeof(struct ring_pair_cb
), GFP_KERNEL
);
771 dev_err(dsaf_dev
->dev
, "rcb common devm_kzalloc fail!\n");
774 rcb_common
->comm_index
= comm_index
;
775 rcb_common
->ring_num
= ring_num
;
776 rcb_common
->dsaf_dev
= dsaf_dev
;
778 rcb_common
->desc_num
= dsaf_dev
->desc_num
;
780 hns_rcb_get_queue_mode(dsaf_mode
, &max_vfn
, &max_q_per_vf
);
781 rcb_common
->max_vfn
= max_vfn
;
782 rcb_common
->max_q_per_vf
= max_q_per_vf
;
784 rcb_common
->io_base
= hns_rcb_common_get_vaddr(rcb_common
);
785 rcb_common
->phy_base
= hns_rcb_common_get_paddr(rcb_common
);
787 dsaf_dev
->rcb_common
[comm_index
] = rcb_common
;
791 void hns_rcb_common_free_cfg(struct dsaf_device
*dsaf_dev
,
794 dsaf_dev
->rcb_common
[comm_index
] = NULL
;
797 void hns_rcb_update_stats(struct hnae_queue
*queue
)
799 struct ring_pair_cb
*ring
=
800 container_of(queue
, struct ring_pair_cb
, q
);
801 struct dsaf_device
*dsaf_dev
= ring
->rcb_common
->dsaf_dev
;
802 struct ppe_common_cb
*ppe_common
803 = dsaf_dev
->ppe_common
[ring
->rcb_common
->comm_index
];
804 struct hns_ring_hw_stats
*hw_stats
= &ring
->hw_stats
;
806 hw_stats
->rx_pkts
+= dsaf_read_dev(queue
,
807 RCB_RING_RX_RING_PKTNUM_RECORD_REG
);
808 dsaf_write_dev(queue
, RCB_RING_RX_RING_PKTNUM_RECORD_REG
, 0x1);
810 hw_stats
->ppe_rx_ok_pkts
+= dsaf_read_dev(ppe_common
,
811 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG
+ 4 * ring
->index
);
812 hw_stats
->ppe_rx_drop_pkts
+= dsaf_read_dev(ppe_common
,
813 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG
+ 4 * ring
->index
);
815 hw_stats
->tx_pkts
+= dsaf_read_dev(queue
,
816 RCB_RING_TX_RING_PKTNUM_RECORD_REG
);
817 dsaf_write_dev(queue
, RCB_RING_TX_RING_PKTNUM_RECORD_REG
, 0x1);
819 hw_stats
->ppe_tx_ok_pkts
+= dsaf_read_dev(ppe_common
,
820 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG
+ 4 * ring
->index
);
821 hw_stats
->ppe_tx_drop_pkts
+= dsaf_read_dev(ppe_common
,
822 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG
+ 4 * ring
->index
);
826 *hns_rcb_get_stats - get rcb statistic
828 *@data:statistic value
830 void hns_rcb_get_stats(struct hnae_queue
*queue
, u64
*data
)
832 u64
*regs_buff
= data
;
833 struct ring_pair_cb
*ring
=
834 container_of(queue
, struct ring_pair_cb
, q
);
835 struct hns_ring_hw_stats
*hw_stats
= &ring
->hw_stats
;
837 regs_buff
[0] = hw_stats
->tx_pkts
;
838 regs_buff
[1] = hw_stats
->ppe_tx_ok_pkts
;
839 regs_buff
[2] = hw_stats
->ppe_tx_drop_pkts
;
841 dsaf_read_dev(queue
, RCB_RING_TX_RING_FBDNUM_REG
);
843 regs_buff
[4] = queue
->tx_ring
.stats
.tx_pkts
;
844 regs_buff
[5] = queue
->tx_ring
.stats
.tx_bytes
;
845 regs_buff
[6] = queue
->tx_ring
.stats
.tx_err_cnt
;
846 regs_buff
[7] = queue
->tx_ring
.stats
.io_err_cnt
;
847 regs_buff
[8] = queue
->tx_ring
.stats
.sw_err_cnt
;
848 regs_buff
[9] = queue
->tx_ring
.stats
.seg_pkt_cnt
;
849 regs_buff
[10] = queue
->tx_ring
.stats
.restart_queue
;
850 regs_buff
[11] = queue
->tx_ring
.stats
.tx_busy
;
852 regs_buff
[12] = hw_stats
->rx_pkts
;
853 regs_buff
[13] = hw_stats
->ppe_rx_ok_pkts
;
854 regs_buff
[14] = hw_stats
->ppe_rx_drop_pkts
;
856 dsaf_read_dev(queue
, RCB_RING_RX_RING_FBDNUM_REG
);
858 regs_buff
[16] = queue
->rx_ring
.stats
.rx_pkts
;
859 regs_buff
[17] = queue
->rx_ring
.stats
.rx_bytes
;
860 regs_buff
[18] = queue
->rx_ring
.stats
.rx_err_cnt
;
861 regs_buff
[19] = queue
->rx_ring
.stats
.io_err_cnt
;
862 regs_buff
[20] = queue
->rx_ring
.stats
.sw_err_cnt
;
863 regs_buff
[21] = queue
->rx_ring
.stats
.seg_pkt_cnt
;
864 regs_buff
[22] = queue
->rx_ring
.stats
.reuse_pg_cnt
;
865 regs_buff
[23] = queue
->rx_ring
.stats
.err_pkt_len
;
866 regs_buff
[24] = queue
->rx_ring
.stats
.non_vld_descs
;
867 regs_buff
[25] = queue
->rx_ring
.stats
.err_bd_num
;
868 regs_buff
[26] = queue
->rx_ring
.stats
.l2_err
;
869 regs_buff
[27] = queue
->rx_ring
.stats
.l3l4_csum_err
;
873 *hns_rcb_get_ring_sset_count - rcb string set count
874 *@stringset:ethtool cmd
875 *return rcb ring string set count
877 int hns_rcb_get_ring_sset_count(int stringset
)
879 if (stringset
== ETH_SS_STATS
)
880 return HNS_RING_STATIC_REG_NUM
;
886 *hns_rcb_get_common_regs_count - rcb common regs count
889 int hns_rcb_get_common_regs_count(void)
891 return HNS_RCB_COMMON_DUMP_REG_NUM
;
895 *rcb_get_sset_count - rcb ring regs count
898 int hns_rcb_get_ring_regs_count(void)
900 return HNS_RCB_RING_DUMP_REG_NUM
;
904 *hns_rcb_get_strings - get rcb string set
905 *@stringset:string set index
906 *@data:strings name value
909 void hns_rcb_get_strings(int stringset
, u8
*data
, int index
)
911 char *buff
= (char *)data
;
913 if (stringset
!= ETH_SS_STATS
)
916 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_rcb_pkt_num", index
);
917 buff
= buff
+ ETH_GSTRING_LEN
;
918 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_ppe_tx_pkt_num", index
);
919 buff
= buff
+ ETH_GSTRING_LEN
;
920 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_ppe_drop_pkt_num", index
);
921 buff
= buff
+ ETH_GSTRING_LEN
;
922 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_fbd_num", index
);
923 buff
= buff
+ ETH_GSTRING_LEN
;
925 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_pkt_num", index
);
926 buff
= buff
+ ETH_GSTRING_LEN
;
927 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_bytes", index
);
928 buff
= buff
+ ETH_GSTRING_LEN
;
929 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_err_cnt", index
);
930 buff
= buff
+ ETH_GSTRING_LEN
;
931 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_io_err", index
);
932 buff
= buff
+ ETH_GSTRING_LEN
;
933 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_sw_err", index
);
934 buff
= buff
+ ETH_GSTRING_LEN
;
935 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_seg_pkt", index
);
936 buff
= buff
+ ETH_GSTRING_LEN
;
937 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_restart_queue", index
);
938 buff
= buff
+ ETH_GSTRING_LEN
;
939 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_tx_busy", index
);
940 buff
= buff
+ ETH_GSTRING_LEN
;
942 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_rcb_pkt_num", index
);
943 buff
= buff
+ ETH_GSTRING_LEN
;
944 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_ppe_pkt_num", index
);
945 buff
= buff
+ ETH_GSTRING_LEN
;
946 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_ppe_drop_pkt_num", index
);
947 buff
= buff
+ ETH_GSTRING_LEN
;
948 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_fbd_num", index
);
949 buff
= buff
+ ETH_GSTRING_LEN
;
951 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_pkt_num", index
);
952 buff
= buff
+ ETH_GSTRING_LEN
;
953 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_bytes", index
);
954 buff
= buff
+ ETH_GSTRING_LEN
;
955 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_err_cnt", index
);
956 buff
= buff
+ ETH_GSTRING_LEN
;
957 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_io_err", index
);
958 buff
= buff
+ ETH_GSTRING_LEN
;
959 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_sw_err", index
);
960 buff
= buff
+ ETH_GSTRING_LEN
;
961 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_seg_pkt", index
);
962 buff
= buff
+ ETH_GSTRING_LEN
;
963 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_reuse_pg", index
);
964 buff
= buff
+ ETH_GSTRING_LEN
;
965 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_len_err", index
);
966 buff
= buff
+ ETH_GSTRING_LEN
;
967 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_non_vld_desc_err", index
);
968 buff
= buff
+ ETH_GSTRING_LEN
;
969 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_bd_num_err", index
);
970 buff
= buff
+ ETH_GSTRING_LEN
;
971 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_l2_err", index
);
972 buff
= buff
+ ETH_GSTRING_LEN
;
973 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_l3l4csum_err", index
);
976 void hns_rcb_get_common_regs(struct rcb_common_cb
*rcb_com
, void *data
)
979 bool is_ver1
= AE_IS_VER1(rcb_com
->dsaf_dev
->dsaf_ver
);
980 bool is_dbg
= HNS_DSAF_IS_DEBUG(rcb_com
->dsaf_dev
);
985 /*rcb common registers */
986 regs
[0] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_ENDIAN_REG
);
987 regs
[1] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_SYS_FSH_REG
);
988 regs
[2] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_INIT_FLAG_REG
);
990 regs
[3] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PKT_REG
);
991 regs
[4] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_RINVLD_REG
);
992 regs
[5] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_FNA_REG
);
993 regs
[6] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_FA_REG
);
994 regs
[7] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PKT_TC_BP_REG
);
995 regs
[8] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PPE_TNL_CLKEN_REG
);
997 regs
[9] = dsaf_read_dev(rcb_com
, RCB_COM_INTMSK_TX_PKT_REG
);
998 regs
[10] = dsaf_read_dev(rcb_com
, RCB_COM_RINT_TX_PKT_REG
);
999 regs
[11] = dsaf_read_dev(rcb_com
, RCB_COM_INTMASK_ECC_ERR_REG
);
1000 regs
[12] = dsaf_read_dev(rcb_com
, RCB_COM_INTSTS_ECC_ERR_REG
);
1001 regs
[13] = dsaf_read_dev(rcb_com
, RCB_COM_EBD_SRAM_ERR_REG
);
1002 regs
[14] = dsaf_read_dev(rcb_com
, RCB_COM_RXRING_ERR_REG
);
1003 regs
[15] = dsaf_read_dev(rcb_com
, RCB_COM_TXRING_ERR_REG
);
1004 regs
[16] = dsaf_read_dev(rcb_com
, RCB_COM_TX_FBD_ERR_REG
);
1005 regs
[17] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK_EN_REG
);
1006 regs
[18] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK0_REG
);
1007 regs
[19] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK1_REG
);
1008 regs
[20] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK2_REG
);
1009 regs
[21] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK3_REG
);
1010 regs
[22] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK4_REG
);
1011 regs
[23] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK5_REG
);
1012 regs
[24] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR0_REG
);
1013 regs
[25] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR3_REG
);
1014 regs
[26] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR4_REG
);
1015 regs
[27] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR5_REG
);
1017 regs
[28] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_INTMASK_RING
);
1018 regs
[29] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_RING_STS
);
1019 regs
[30] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_RING
);
1020 regs
[31] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_INTMASK_BD
);
1021 regs
[32] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_BD_RINT_STS
);
1022 regs
[33] = dsaf_read_dev(rcb_com
, RCB_COM_RCB_RD_BD_BUSY
);
1023 regs
[34] = dsaf_read_dev(rcb_com
, RCB_COM_RCB_FBD_CRT_EN
);
1024 regs
[35] = dsaf_read_dev(rcb_com
, RCB_COM_AXI_WR_ERR_INTMASK
);
1025 regs
[36] = dsaf_read_dev(rcb_com
, RCB_COM_AXI_ERR_STS
);
1026 regs
[37] = dsaf_read_dev(rcb_com
, RCB_COM_CHK_TX_FBD_NUM_REG
);
1028 /* rcb common entry registers */
1029 for (i
= 0; i
< 16; i
++) { /* total 16 model registers */
1031 = dsaf_read_dev(rcb_com
, RCB_CFG_BD_NUM_REG
+ 4 * i
);
1033 = dsaf_read_dev(rcb_com
, RCB_CFG_PKTLINE_REG
+ 4 * i
);
1036 reg_tmp
= is_ver1
? RCB_CFG_OVERTIME_REG
: RCB_PORT_CFG_OVERTIME_REG
;
1037 reg_num_tmp
= (is_ver1
|| is_dbg
) ? 1 : 6;
1038 for (i
= 0; i
< reg_num_tmp
; i
++)
1039 regs
[70 + i
] = dsaf_read_dev(rcb_com
, reg_tmp
);
1041 regs
[76] = dsaf_read_dev(rcb_com
, RCB_CFG_PKTLINE_INT_NUM_REG
);
1042 regs
[77] = dsaf_read_dev(rcb_com
, RCB_CFG_OVERTIME_INT_NUM_REG
);
1044 /* mark end of rcb common regs */
1045 for (i
= 78; i
< 80; i
++)
1046 regs
[i
] = 0xcccccccc;
1049 void hns_rcb_get_ring_regs(struct hnae_queue
*queue
, void *data
)
1052 struct ring_pair_cb
*ring_pair
1053 = container_of(queue
, struct ring_pair_cb
, q
);
1056 /*rcb ring registers */
1057 regs
[0] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BASEADDR_L_REG
);
1058 regs
[1] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BASEADDR_H_REG
);
1059 regs
[2] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BD_NUM_REG
);
1060 regs
[3] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BD_LEN_REG
);
1061 regs
[4] = dsaf_read_dev(queue
, RCB_RING_RX_RING_PKTLINE_REG
);
1062 regs
[5] = dsaf_read_dev(queue
, RCB_RING_RX_RING_TAIL_REG
);
1063 regs
[6] = dsaf_read_dev(queue
, RCB_RING_RX_RING_HEAD_REG
);
1064 regs
[7] = dsaf_read_dev(queue
, RCB_RING_RX_RING_FBDNUM_REG
);
1065 regs
[8] = dsaf_read_dev(queue
, RCB_RING_RX_RING_PKTNUM_RECORD_REG
);
1067 regs
[9] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BASEADDR_L_REG
);
1068 regs
[10] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BASEADDR_H_REG
);
1069 regs
[11] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BD_NUM_REG
);
1070 regs
[12] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BD_LEN_REG
);
1071 regs
[13] = dsaf_read_dev(queue
, RCB_RING_TX_RING_PKTLINE_REG
);
1072 regs
[15] = dsaf_read_dev(queue
, RCB_RING_TX_RING_TAIL_REG
);
1073 regs
[16] = dsaf_read_dev(queue
, RCB_RING_TX_RING_HEAD_REG
);
1074 regs
[17] = dsaf_read_dev(queue
, RCB_RING_TX_RING_FBDNUM_REG
);
1075 regs
[18] = dsaf_read_dev(queue
, RCB_RING_TX_RING_OFFSET_REG
);
1076 regs
[19] = dsaf_read_dev(queue
, RCB_RING_TX_RING_PKTNUM_RECORD_REG
);
1078 regs
[20] = dsaf_read_dev(queue
, RCB_RING_PREFETCH_EN_REG
);
1079 regs
[21] = dsaf_read_dev(queue
, RCB_RING_CFG_VF_NUM_REG
);
1080 regs
[22] = dsaf_read_dev(queue
, RCB_RING_ASID_REG
);
1081 regs
[23] = dsaf_read_dev(queue
, RCB_RING_RX_VM_REG
);
1082 regs
[24] = dsaf_read_dev(queue
, RCB_RING_T0_BE_RST
);
1083 regs
[25] = dsaf_read_dev(queue
, RCB_RING_COULD_BE_RST
);
1084 regs
[26] = dsaf_read_dev(queue
, RCB_RING_WRR_WEIGHT_REG
);
1086 regs
[27] = dsaf_read_dev(queue
, RCB_RING_INTMSK_RXWL_REG
);
1087 regs
[28] = dsaf_read_dev(queue
, RCB_RING_INTSTS_RX_RING_REG
);
1088 regs
[29] = dsaf_read_dev(queue
, RCB_RING_INTMSK_TXWL_REG
);
1089 regs
[30] = dsaf_read_dev(queue
, RCB_RING_INTSTS_TX_RING_REG
);
1090 regs
[31] = dsaf_read_dev(queue
, RCB_RING_INTMSK_RX_OVERTIME_REG
);
1091 regs
[32] = dsaf_read_dev(queue
, RCB_RING_INTSTS_RX_OVERTIME_REG
);
1092 regs
[33] = dsaf_read_dev(queue
, RCB_RING_INTMSK_TX_OVERTIME_REG
);
1093 regs
[34] = dsaf_read_dev(queue
, RCB_RING_INTSTS_TX_OVERTIME_REG
);
1095 /* mark end of ring regs */
1096 for (i
= 35; i
< 40; i
++)
1097 regs
[i
] = 0xcccccc00 + ring_pair
->index
;