1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Applied Micro X-Gene SoC Ethernet Classifier structures
4 * Copyright (c) 2016, Applied Micro Circuits Corporation
5 * Authors: Khuong Dinh <kdinh@apm.com>
6 * Tanmay Inamdar <tinamdar@apm.com>
7 * Iyappan Subramanian <isubramanian@apm.com>
10 #include "xgene_enet_main.h"
12 /* interfaces to convert structures to HW recognized bit formats */
13 static void xgene_cle_sband_to_hw(u8 frag
, enum xgene_cle_prot_version ver
,
14 enum xgene_cle_prot_type type
, u32 len
,
17 *reg
= SET_VAL(SB_IPFRAG
, frag
) |
18 SET_VAL(SB_IPPROT
, type
) |
19 SET_VAL(SB_IPVER
, ver
) |
20 SET_VAL(SB_HDRLEN
, len
);
23 static void xgene_cle_idt_to_hw(struct xgene_enet_pdata
*pdata
,
24 u32 dstqid
, u32 fpsel
,
25 u32 nfpsel
, u32
*idt_reg
)
27 if (pdata
->enet_id
== XGENE_ENET1
) {
28 *idt_reg
= SET_VAL(IDT_DSTQID
, dstqid
) |
29 SET_VAL(IDT_FPSEL1
, fpsel
) |
30 SET_VAL(IDT_NFPSEL1
, nfpsel
);
32 *idt_reg
= SET_VAL(IDT_DSTQID
, dstqid
) |
33 SET_VAL(IDT_FPSEL
, fpsel
) |
34 SET_VAL(IDT_NFPSEL
, nfpsel
);
38 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata
*pdata
,
39 struct xgene_cle_dbptr
*dbptr
, u32
*buf
)
41 buf
[0] = SET_VAL(CLE_DROP
, dbptr
->drop
);
42 buf
[4] = SET_VAL(CLE_FPSEL
, dbptr
->fpsel
) |
43 SET_VAL(CLE_NFPSEL
, dbptr
->nxtfpsel
) |
44 SET_VAL(CLE_DSTQIDL
, dbptr
->dstqid
);
46 buf
[5] = SET_VAL(CLE_DSTQIDH
, (u32
)dbptr
->dstqid
>> CLE_DSTQIDL_LEN
) |
47 SET_VAL(CLE_PRIORITY
, dbptr
->cle_priority
);
50 static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn
*kn
, u32
*buf
)
55 buf
[j
++] = SET_VAL(CLE_TYPE
, kn
->node_type
);
56 for (i
= 0; i
< kn
->num_keys
; i
++) {
57 struct xgene_cle_ptree_key
*key
= &kn
->key
[i
];
60 buf
[j
] = SET_VAL(CLE_KN_PRIO
, key
->priority
) |
61 SET_VAL(CLE_KN_RPTR
, key
->result_pointer
);
63 data
= SET_VAL(CLE_KN_PRIO
, key
->priority
) |
64 SET_VAL(CLE_KN_RPTR
, key
->result_pointer
);
65 buf
[j
++] |= (data
<< 16);
70 static void xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn
*dn
,
73 const struct xgene_cle_ptree_branch
*br
;
77 buf
[j
++] = SET_VAL(CLE_DN_TYPE
, dn
->node_type
) |
78 SET_VAL(CLE_DN_LASTN
, dn
->last_node
) |
79 SET_VAL(CLE_DN_HLS
, dn
->hdr_len_store
) |
80 SET_VAL(CLE_DN_EXT
, dn
->hdr_extn
) |
81 SET_VAL(CLE_DN_BSTOR
, dn
->byte_store
) |
82 SET_VAL(CLE_DN_SBSTOR
, dn
->search_byte_store
) |
83 SET_VAL(CLE_DN_RPTR
, dn
->result_pointer
);
85 for (i
= 0; i
< dn
->num_branches
; i
++) {
87 npp
= br
->next_packet_pointer
;
89 if ((br
->jump_rel
== JMP_ABS
) && (npp
< CLE_PKTRAM_SIZE
))
92 buf
[j
++] = SET_VAL(CLE_BR_VALID
, br
->valid
) |
93 SET_VAL(CLE_BR_NPPTR
, npp
) |
94 SET_VAL(CLE_BR_JB
, br
->jump_bw
) |
95 SET_VAL(CLE_BR_JR
, br
->jump_rel
) |
96 SET_VAL(CLE_BR_OP
, br
->operation
) |
97 SET_VAL(CLE_BR_NNODE
, br
->next_node
) |
98 SET_VAL(CLE_BR_NBR
, br
->next_branch
);
100 buf
[j
++] = SET_VAL(CLE_BR_DATA
, br
->data
) |
101 SET_VAL(CLE_BR_MASK
, br
->mask
);
105 static int xgene_cle_poll_cmd_done(void __iomem
*base
,
106 enum xgene_cle_cmd_type cmd
)
108 u32 status
, loop
= 10;
112 status
= ioread32(base
+ INDCMD_STATUS
);
117 usleep_range(1000, 2000);
123 static int xgene_cle_dram_wr(struct xgene_enet_cle
*cle
, u32
*data
, u8 nregs
,
124 u32 index
, enum xgene_cle_dram_type type
,
125 enum xgene_cle_cmd_type cmd
)
127 enum xgene_cle_parser parser
= cle
->active_parser
;
128 void __iomem
*base
= cle
->base
;
133 /* PTREE_RAM onwards, DRAM regions are common for all parsers */
134 nparsers
= (type
>= PTREE_RAM
) ? 1 : cle
->parsers
;
136 for (i
= 0; i
< nparsers
; i
++) {
138 if ((type
< PTREE_RAM
) && (parser
!= PARSER_ALL
))
141 ind_addr
= XGENE_CLE_DRAM(type
+ (port
* 4)) | index
;
142 iowrite32(ind_addr
, base
+ INDADDR
);
143 for (j
= 0; j
< nregs
; j
++)
144 iowrite32(data
[j
], base
+ DATA_RAM0
+ (j
* 4));
145 iowrite32(cmd
, base
+ INDCMD
);
147 ret
= xgene_cle_poll_cmd_done(base
, cmd
);
155 static void xgene_cle_enable_ptree(struct xgene_enet_pdata
*pdata
,
156 struct xgene_enet_cle
*cle
)
158 struct xgene_cle_ptree
*ptree
= &cle
->ptree
;
159 void __iomem
*addr
, *base
= cle
->base
;
160 u32 offset
= CLE_PORT_OFFSET
;
163 /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */
164 ptree
->start_pkt
+= cle
->jump_bytes
;
165 for (i
= 0; i
< cle
->parsers
; i
++) {
166 if (cle
->active_parser
!= PARSER_ALL
)
167 addr
= base
+ cle
->active_parser
* offset
;
169 addr
= base
+ (i
* offset
);
171 iowrite32(ptree
->start_node
& 0x3fff, addr
+ SNPTR0
);
172 iowrite32(ptree
->start_pkt
& 0x1ff, addr
+ SPPTR0
);
176 static int xgene_cle_setup_dbptr(struct xgene_enet_pdata
*pdata
,
177 struct xgene_enet_cle
*cle
)
179 struct xgene_cle_ptree
*ptree
= &cle
->ptree
;
180 u32 buf
[CLE_DRAM_REGS
];
184 memset(buf
, 0, sizeof(buf
));
185 for (i
= 0; i
< ptree
->num_dbptr
; i
++) {
186 xgene_cle_dbptr_to_hw(pdata
, &ptree
->dbptr
[i
], buf
);
187 ret
= xgene_cle_dram_wr(cle
, buf
, 6, i
+ ptree
->start_dbptr
,
196 static const struct xgene_cle_ptree_ewdn xgene_init_ptree_dn
[] = {
203 .byte_store
= NO_BYTE
,
204 .search_byte_store
= NO_BYTE
,
205 .result_pointer
= DB_RES_DROP
,
211 .next_packet_pointer
= 22,
215 .next_node
= PKT_PROT_NODE
,
222 .next_packet_pointer
= 262,
226 .next_node
= LAST_NODE
,
239 .byte_store
= NO_BYTE
,
240 .search_byte_store
= NO_BYTE
,
241 .result_pointer
= DB_RES_DROP
,
247 .next_packet_pointer
= 26,
251 .next_node
= RSS_IPV4_TCP_NODE
,
259 .next_packet_pointer
= 26,
263 .next_node
= RSS_IPV4_UDP_NODE
,
270 .next_packet_pointer
= 26,
274 .next_node
= RSS_IPV4_OTHERS_NODE
,
282 /* RSS_IPV4_TCP_NODE */
287 .byte_store
= NO_BYTE
,
288 .search_byte_store
= BOTH_BYTES
,
289 .result_pointer
= DB_RES_DROP
,
295 .next_packet_pointer
= 28,
299 .next_node
= RSS_IPV4_TCP_NODE
,
307 .next_packet_pointer
= 30,
311 .next_node
= RSS_IPV4_TCP_NODE
,
319 .next_packet_pointer
= 32,
323 .next_node
= RSS_IPV4_TCP_NODE
,
331 .next_packet_pointer
= 34,
335 .next_node
= RSS_IPV4_TCP_NODE
,
343 .next_packet_pointer
= 36,
347 .next_node
= RSS_IPV4_TCP_NODE
,
355 .next_packet_pointer
= 256,
359 .next_node
= LAST_NODE
,
367 /* RSS_IPV4_UDP_NODE */
372 .byte_store
= NO_BYTE
,
373 .search_byte_store
= BOTH_BYTES
,
374 .result_pointer
= DB_RES_DROP
,
380 .next_packet_pointer
= 28,
384 .next_node
= RSS_IPV4_UDP_NODE
,
392 .next_packet_pointer
= 30,
396 .next_node
= RSS_IPV4_UDP_NODE
,
404 .next_packet_pointer
= 32,
408 .next_node
= RSS_IPV4_UDP_NODE
,
416 .next_packet_pointer
= 34,
420 .next_node
= RSS_IPV4_UDP_NODE
,
428 .next_packet_pointer
= 36,
432 .next_node
= RSS_IPV4_UDP_NODE
,
440 .next_packet_pointer
= 258,
444 .next_node
= LAST_NODE
,
452 /* RSS_IPV4_OTHERS_NODE */
457 .byte_store
= NO_BYTE
,
458 .search_byte_store
= BOTH_BYTES
,
459 .result_pointer
= DB_RES_DROP
,
465 .next_packet_pointer
= 28,
469 .next_node
= RSS_IPV4_OTHERS_NODE
,
477 .next_packet_pointer
= 30,
481 .next_node
= RSS_IPV4_OTHERS_NODE
,
489 .next_packet_pointer
= 32,
493 .next_node
= RSS_IPV4_OTHERS_NODE
,
501 .next_packet_pointer
= 34,
505 .next_node
= RSS_IPV4_OTHERS_NODE
,
513 .next_packet_pointer
= 36,
517 .next_node
= RSS_IPV4_OTHERS_NODE
,
525 .next_packet_pointer
= 260,
529 .next_node
= LAST_NODE
,
543 .byte_store
= NO_BYTE
,
544 .search_byte_store
= NO_BYTE
,
545 .result_pointer
= DB_RES_DROP
,
550 .next_packet_pointer
= 0,
554 .next_node
= MAX_NODES
,
563 static int xgene_cle_setup_node(struct xgene_enet_pdata
*pdata
,
564 struct xgene_enet_cle
*cle
)
566 struct xgene_cle_ptree
*ptree
= &cle
->ptree
;
567 const struct xgene_cle_ptree_ewdn
*dn
= xgene_init_ptree_dn
;
568 int num_dn
= ARRAY_SIZE(xgene_init_ptree_dn
);
569 struct xgene_cle_ptree_kn
*kn
= ptree
->kn
;
570 u32 buf
[CLE_DRAM_REGS
];
573 memset(buf
, 0, sizeof(buf
));
574 for (i
= 0; i
< num_dn
; i
++) {
575 xgene_cle_dn_to_hw(&dn
[i
], buf
, cle
->jump_bytes
);
576 ret
= xgene_cle_dram_wr(cle
, buf
, 17, i
+ ptree
->start_node
,
577 PTREE_RAM
, CLE_CMD_WR
);
582 /* continue node index for key node */
583 memset(buf
, 0, sizeof(buf
));
584 for (j
= i
; j
< (ptree
->num_kn
+ num_dn
); j
++) {
585 xgene_cle_kn_to_hw(&kn
[j
- num_dn
], buf
);
586 ret
= xgene_cle_dram_wr(cle
, buf
, 17, j
+ ptree
->start_node
,
587 PTREE_RAM
, CLE_CMD_WR
);
595 static int xgene_cle_setup_ptree(struct xgene_enet_pdata
*pdata
,
596 struct xgene_enet_cle
*cle
)
600 ret
= xgene_cle_setup_node(pdata
, cle
);
604 ret
= xgene_cle_setup_dbptr(pdata
, cle
);
608 xgene_cle_enable_ptree(pdata
, cle
);
613 static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata
*pdata
,
614 struct xgene_enet_cle
*enet_cle
,
615 struct xgene_cle_dbptr
*dbptr
,
616 u32 index
, u8 priority
)
618 void __iomem
*base
= enet_cle
->base
;
619 void __iomem
*base_addr
;
620 u32 buf
[CLE_DRAM_REGS
];
624 memset(buf
, 0, sizeof(buf
));
625 xgene_cle_dbptr_to_hw(pdata
, dbptr
, buf
);
627 for (i
= 0; i
< enet_cle
->parsers
; i
++) {
628 if (enet_cle
->active_parser
!= PARSER_ALL
) {
629 offset
= enet_cle
->active_parser
*
632 offset
= i
* CLE_PORT_OFFSET
;
635 base_addr
= base
+ DFCLSRESDB00
+ offset
;
636 for (j
= 0; j
< 6; j
++)
637 iowrite32(buf
[j
], base_addr
+ (j
* 4));
639 def_cls
= ((priority
& 0x7) << 10) | (index
& 0x3ff);
640 iowrite32(def_cls
, base
+ DFCLSRESDBPTR0
+ offset
);
644 static int xgene_cle_set_rss_sband(struct xgene_enet_cle
*cle
)
646 u32 idx
= CLE_PKTRAM_SIZE
/ sizeof(u32
);
647 u32 mac_hdr_len
= ETH_HLEN
;
653 /* Sideband: IPV4/TCP packets */
654 hdr_len
= (mac_hdr_len
<< 5) | ipv4_ihl
;
655 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4
, XGENE_CLE_TCP
, hdr_len
, ®
);
658 /* Sideband: IPv4/UDP packets */
659 hdr_len
= (mac_hdr_len
<< 5) | ipv4_ihl
;
660 xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4
, XGENE_CLE_UDP
, hdr_len
, ®
);
661 sband
|= (reg
<< 16);
663 ret
= xgene_cle_dram_wr(cle
, &sband
, 1, idx
, PKT_RAM
, CLE_CMD_WR
);
667 /* Sideband: IPv4/RAW packets */
668 hdr_len
= (mac_hdr_len
<< 5) | ipv4_ihl
;
669 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4
, XGENE_CLE_OTHER
,
673 /* Sideband: Ethernet II/RAW packets */
674 hdr_len
= (mac_hdr_len
<< 5);
675 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4
, XGENE_CLE_OTHER
,
677 sband
|= (reg
<< 16);
679 ret
= xgene_cle_dram_wr(cle
, &sband
, 1, idx
+ 1, PKT_RAM
, CLE_CMD_WR
);
686 static int xgene_cle_set_rss_skeys(struct xgene_enet_cle
*cle
)
688 u32 secret_key_ipv4
[4]; /* 16 Bytes*/
691 get_random_bytes(secret_key_ipv4
, 16);
692 ret
= xgene_cle_dram_wr(cle
, secret_key_ipv4
, 4, 0,
693 RSS_IPV4_HASH_SKEY
, CLE_CMD_WR
);
697 static int xgene_cle_set_rss_idt(struct xgene_enet_pdata
*pdata
)
699 u32 fpsel
, dstqid
, nfpsel
, idt_reg
, idx
;
703 for (i
= 0; i
< XGENE_CLE_IDT_ENTRIES
; i
++) {
704 idx
= i
% pdata
->rxq_cnt
;
705 pool_id
= pdata
->rx_ring
[idx
]->buf_pool
->id
;
706 fpsel
= xgene_enet_get_fpsel(pool_id
);
707 dstqid
= xgene_enet_dst_ring_num(pdata
->rx_ring
[idx
]);
709 if (pdata
->rx_ring
[idx
]->page_pool
) {
710 pool_id
= pdata
->rx_ring
[idx
]->page_pool
->id
;
711 nfpsel
= xgene_enet_get_fpsel(pool_id
);
715 xgene_cle_idt_to_hw(pdata
, dstqid
, fpsel
, nfpsel
, &idt_reg
);
716 ret
= xgene_cle_dram_wr(&pdata
->cle
, &idt_reg
, 1, i
,
717 RSS_IDT
, CLE_CMD_WR
);
722 ret
= xgene_cle_set_rss_skeys(&pdata
->cle
);
729 static int xgene_cle_setup_rss(struct xgene_enet_pdata
*pdata
)
731 struct xgene_enet_cle
*cle
= &pdata
->cle
;
732 void __iomem
*base
= cle
->base
;
736 offset
= CLE_PORT_OFFSET
;
737 for (i
= 0; i
< cle
->parsers
; i
++) {
738 if (cle
->active_parser
!= PARSER_ALL
)
739 offset
= cle
->active_parser
* CLE_PORT_OFFSET
;
741 offset
= i
* CLE_PORT_OFFSET
;
744 val
= (RSS_IPV4_12B
<< 1) | 0x1;
745 writel(val
, base
+ RSS_CTRL0
+ offset
);
748 /* setup sideband data */
749 ret
= xgene_cle_set_rss_sband(cle
);
753 /* setup indirection table */
754 ret
= xgene_cle_set_rss_idt(pdata
);
761 static int xgene_enet_cle_init(struct xgene_enet_pdata
*pdata
)
763 struct xgene_enet_cle
*enet_cle
= &pdata
->cle
;
764 u32 def_qid
, def_fpsel
, def_nxtfpsel
, pool_id
;
765 struct xgene_cle_dbptr dbptr
[DB_MAX_PTRS
];
766 struct xgene_cle_ptree
*ptree
;
767 struct xgene_cle_ptree_kn kn
;
770 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
)
773 ptree
= &enet_cle
->ptree
;
774 ptree
->start_pkt
= 12; /* Ethertype */
776 ret
= xgene_cle_setup_rss(pdata
);
778 netdev_err(pdata
->ndev
, "RSS initialization failed\n");
782 def_qid
= xgene_enet_dst_ring_num(pdata
->rx_ring
[0]);
783 pool_id
= pdata
->rx_ring
[0]->buf_pool
->id
;
784 def_fpsel
= xgene_enet_get_fpsel(pool_id
);
786 if (pdata
->rx_ring
[0]->page_pool
) {
787 pool_id
= pdata
->rx_ring
[0]->page_pool
->id
;
788 def_nxtfpsel
= xgene_enet_get_fpsel(pool_id
);
791 memset(dbptr
, 0, sizeof(struct xgene_cle_dbptr
) * DB_MAX_PTRS
);
792 dbptr
[DB_RES_ACCEPT
].fpsel
= def_fpsel
;
793 dbptr
[DB_RES_ACCEPT
].nxtfpsel
= def_nxtfpsel
;
794 dbptr
[DB_RES_ACCEPT
].dstqid
= def_qid
;
795 dbptr
[DB_RES_ACCEPT
].cle_priority
= 1;
797 dbptr
[DB_RES_DEF
].fpsel
= def_fpsel
;
798 dbptr
[DB_RES_DEF
].nxtfpsel
= def_nxtfpsel
;
799 dbptr
[DB_RES_DEF
].dstqid
= def_qid
;
800 dbptr
[DB_RES_DEF
].cle_priority
= 7;
801 xgene_cle_setup_def_dbptr(pdata
, enet_cle
, &dbptr
[DB_RES_DEF
],
804 dbptr
[DB_RES_DROP
].drop
= 1;
806 memset(&kn
, 0, sizeof(kn
));
809 kn
.key
[0].priority
= 0;
810 kn
.key
[0].result_pointer
= DB_RES_ACCEPT
;
813 ptree
->dbptr
= dbptr
;
815 ptree
->num_dbptr
= DB_MAX_PTRS
;
817 return xgene_cle_setup_ptree(pdata
, enet_cle
);
820 const struct xgene_cle_ops xgene_cle3in_ops
= {
821 .cle_init
= xgene_enet_cle_init
,