1 /* Applied Micro X-Gene SoC Ethernet Classifier structures
3 * Copyright (c) 2016, Applied Micro Circuits Corporation
4 * Authors: Khuong Dinh <kdinh@apm.com>
5 * Tanmay Inamdar <tinamdar@apm.com>
6 * Iyappan Subramanian <isubramanian@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "xgene_enet_main.h"
24 /* interfaces to convert structures to HW recognized bit formats */
25 static void xgene_cle_sband_to_hw(u8 frag
, enum xgene_cle_prot_version ver
,
26 enum xgene_cle_prot_type type
, u32 len
,
29 *reg
= SET_VAL(SB_IPFRAG
, frag
) |
30 SET_VAL(SB_IPPROT
, type
) |
31 SET_VAL(SB_IPVER
, ver
) |
32 SET_VAL(SB_HDRLEN
, len
);
35 static void xgene_cle_idt_to_hw(struct xgene_enet_pdata
*pdata
,
36 u32 dstqid
, u32 fpsel
,
37 u32 nfpsel
, u32
*idt_reg
)
39 if (pdata
->enet_id
== XGENE_ENET1
) {
40 *idt_reg
= SET_VAL(IDT_DSTQID
, dstqid
) |
41 SET_VAL(IDT_FPSEL1
, fpsel
) |
42 SET_VAL(IDT_NFPSEL1
, nfpsel
);
44 *idt_reg
= SET_VAL(IDT_DSTQID
, dstqid
) |
45 SET_VAL(IDT_FPSEL
, fpsel
) |
46 SET_VAL(IDT_NFPSEL
, nfpsel
);
50 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata
*pdata
,
51 struct xgene_cle_dbptr
*dbptr
, u32
*buf
)
53 buf
[0] = SET_VAL(CLE_DROP
, dbptr
->drop
);
54 buf
[4] = SET_VAL(CLE_FPSEL
, dbptr
->fpsel
) |
55 SET_VAL(CLE_NFPSEL
, dbptr
->nxtfpsel
) |
56 SET_VAL(CLE_DSTQIDL
, dbptr
->dstqid
);
58 buf
[5] = SET_VAL(CLE_DSTQIDH
, (u32
)dbptr
->dstqid
>> CLE_DSTQIDL_LEN
) |
59 SET_VAL(CLE_PRIORITY
, dbptr
->cle_priority
);
62 static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn
*kn
, u32
*buf
)
67 buf
[j
++] = SET_VAL(CLE_TYPE
, kn
->node_type
);
68 for (i
= 0; i
< kn
->num_keys
; i
++) {
69 struct xgene_cle_ptree_key
*key
= &kn
->key
[i
];
72 buf
[j
] = SET_VAL(CLE_KN_PRIO
, key
->priority
) |
73 SET_VAL(CLE_KN_RPTR
, key
->result_pointer
);
75 data
= SET_VAL(CLE_KN_PRIO
, key
->priority
) |
76 SET_VAL(CLE_KN_RPTR
, key
->result_pointer
);
77 buf
[j
++] |= (data
<< 16);
82 static void xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn
*dn
,
85 const struct xgene_cle_ptree_branch
*br
;
89 buf
[j
++] = SET_VAL(CLE_DN_TYPE
, dn
->node_type
) |
90 SET_VAL(CLE_DN_LASTN
, dn
->last_node
) |
91 SET_VAL(CLE_DN_HLS
, dn
->hdr_len_store
) |
92 SET_VAL(CLE_DN_EXT
, dn
->hdr_extn
) |
93 SET_VAL(CLE_DN_BSTOR
, dn
->byte_store
) |
94 SET_VAL(CLE_DN_SBSTOR
, dn
->search_byte_store
) |
95 SET_VAL(CLE_DN_RPTR
, dn
->result_pointer
);
97 for (i
= 0; i
< dn
->num_branches
; i
++) {
99 npp
= br
->next_packet_pointer
;
101 if ((br
->jump_rel
== JMP_ABS
) && (npp
< CLE_PKTRAM_SIZE
))
104 buf
[j
++] = SET_VAL(CLE_BR_VALID
, br
->valid
) |
105 SET_VAL(CLE_BR_NPPTR
, npp
) |
106 SET_VAL(CLE_BR_JB
, br
->jump_bw
) |
107 SET_VAL(CLE_BR_JR
, br
->jump_rel
) |
108 SET_VAL(CLE_BR_OP
, br
->operation
) |
109 SET_VAL(CLE_BR_NNODE
, br
->next_node
) |
110 SET_VAL(CLE_BR_NBR
, br
->next_branch
);
112 buf
[j
++] = SET_VAL(CLE_BR_DATA
, br
->data
) |
113 SET_VAL(CLE_BR_MASK
, br
->mask
);
117 static int xgene_cle_poll_cmd_done(void __iomem
*base
,
118 enum xgene_cle_cmd_type cmd
)
120 u32 status
, loop
= 10;
124 status
= ioread32(base
+ INDCMD_STATUS
);
129 usleep_range(1000, 2000);
135 static int xgene_cle_dram_wr(struct xgene_enet_cle
*cle
, u32
*data
, u8 nregs
,
136 u32 index
, enum xgene_cle_dram_type type
,
137 enum xgene_cle_cmd_type cmd
)
139 enum xgene_cle_parser parser
= cle
->active_parser
;
140 void __iomem
*base
= cle
->base
;
145 /* PTREE_RAM onwards, DRAM regions are common for all parsers */
146 nparsers
= (type
>= PTREE_RAM
) ? 1 : cle
->parsers
;
148 for (i
= 0; i
< nparsers
; i
++) {
150 if ((type
< PTREE_RAM
) && (parser
!= PARSER_ALL
))
153 ind_addr
= XGENE_CLE_DRAM(type
+ (port
* 4)) | index
;
154 iowrite32(ind_addr
, base
+ INDADDR
);
155 for (j
= 0; j
< nregs
; j
++)
156 iowrite32(data
[j
], base
+ DATA_RAM0
+ (j
* 4));
157 iowrite32(cmd
, base
+ INDCMD
);
159 ret
= xgene_cle_poll_cmd_done(base
, cmd
);
167 static void xgene_cle_enable_ptree(struct xgene_enet_pdata
*pdata
,
168 struct xgene_enet_cle
*cle
)
170 struct xgene_cle_ptree
*ptree
= &cle
->ptree
;
171 void __iomem
*addr
, *base
= cle
->base
;
172 u32 offset
= CLE_PORT_OFFSET
;
175 /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */
176 ptree
->start_pkt
+= cle
->jump_bytes
;
177 for (i
= 0; i
< cle
->parsers
; i
++) {
178 if (cle
->active_parser
!= PARSER_ALL
)
179 addr
= base
+ cle
->active_parser
* offset
;
181 addr
= base
+ (i
* offset
);
183 iowrite32(ptree
->start_node
& 0x3fff, addr
+ SNPTR0
);
184 iowrite32(ptree
->start_pkt
& 0x1ff, addr
+ SPPTR0
);
188 static int xgene_cle_setup_dbptr(struct xgene_enet_pdata
*pdata
,
189 struct xgene_enet_cle
*cle
)
191 struct xgene_cle_ptree
*ptree
= &cle
->ptree
;
192 u32 buf
[CLE_DRAM_REGS
];
196 memset(buf
, 0, sizeof(buf
));
197 for (i
= 0; i
< ptree
->num_dbptr
; i
++) {
198 xgene_cle_dbptr_to_hw(pdata
, &ptree
->dbptr
[i
], buf
);
199 ret
= xgene_cle_dram_wr(cle
, buf
, 6, i
+ ptree
->start_dbptr
,
208 static const struct xgene_cle_ptree_ewdn xgene_init_ptree_dn
[] = {
215 .byte_store
= NO_BYTE
,
216 .search_byte_store
= NO_BYTE
,
217 .result_pointer
= DB_RES_DROP
,
223 .next_packet_pointer
= 22,
227 .next_node
= PKT_PROT_NODE
,
234 .next_packet_pointer
= 262,
238 .next_node
= LAST_NODE
,
251 .byte_store
= NO_BYTE
,
252 .search_byte_store
= NO_BYTE
,
253 .result_pointer
= DB_RES_DROP
,
259 .next_packet_pointer
= 26,
263 .next_node
= RSS_IPV4_TCP_NODE
,
271 .next_packet_pointer
= 26,
275 .next_node
= RSS_IPV4_UDP_NODE
,
282 .next_packet_pointer
= 26,
286 .next_node
= RSS_IPV4_OTHERS_NODE
,
294 /* RSS_IPV4_TCP_NODE */
299 .byte_store
= NO_BYTE
,
300 .search_byte_store
= BOTH_BYTES
,
301 .result_pointer
= DB_RES_DROP
,
307 .next_packet_pointer
= 28,
311 .next_node
= RSS_IPV4_TCP_NODE
,
319 .next_packet_pointer
= 30,
323 .next_node
= RSS_IPV4_TCP_NODE
,
331 .next_packet_pointer
= 32,
335 .next_node
= RSS_IPV4_TCP_NODE
,
343 .next_packet_pointer
= 34,
347 .next_node
= RSS_IPV4_TCP_NODE
,
355 .next_packet_pointer
= 36,
359 .next_node
= RSS_IPV4_TCP_NODE
,
367 .next_packet_pointer
= 256,
371 .next_node
= LAST_NODE
,
379 /* RSS_IPV4_UDP_NODE */
384 .byte_store
= NO_BYTE
,
385 .search_byte_store
= BOTH_BYTES
,
386 .result_pointer
= DB_RES_DROP
,
392 .next_packet_pointer
= 28,
396 .next_node
= RSS_IPV4_UDP_NODE
,
404 .next_packet_pointer
= 30,
408 .next_node
= RSS_IPV4_UDP_NODE
,
416 .next_packet_pointer
= 32,
420 .next_node
= RSS_IPV4_UDP_NODE
,
428 .next_packet_pointer
= 34,
432 .next_node
= RSS_IPV4_UDP_NODE
,
440 .next_packet_pointer
= 36,
444 .next_node
= RSS_IPV4_UDP_NODE
,
452 .next_packet_pointer
= 258,
456 .next_node
= LAST_NODE
,
464 /* RSS_IPV4_OTHERS_NODE */
469 .byte_store
= NO_BYTE
,
470 .search_byte_store
= BOTH_BYTES
,
471 .result_pointer
= DB_RES_DROP
,
477 .next_packet_pointer
= 28,
481 .next_node
= RSS_IPV4_OTHERS_NODE
,
489 .next_packet_pointer
= 30,
493 .next_node
= RSS_IPV4_OTHERS_NODE
,
501 .next_packet_pointer
= 32,
505 .next_node
= RSS_IPV4_OTHERS_NODE
,
513 .next_packet_pointer
= 34,
517 .next_node
= RSS_IPV4_OTHERS_NODE
,
525 .next_packet_pointer
= 36,
529 .next_node
= RSS_IPV4_OTHERS_NODE
,
537 .next_packet_pointer
= 260,
541 .next_node
= LAST_NODE
,
555 .byte_store
= NO_BYTE
,
556 .search_byte_store
= NO_BYTE
,
557 .result_pointer
= DB_RES_DROP
,
562 .next_packet_pointer
= 0,
566 .next_node
= MAX_NODES
,
575 static int xgene_cle_setup_node(struct xgene_enet_pdata
*pdata
,
576 struct xgene_enet_cle
*cle
)
578 struct xgene_cle_ptree
*ptree
= &cle
->ptree
;
579 const struct xgene_cle_ptree_ewdn
*dn
= xgene_init_ptree_dn
;
580 int num_dn
= ARRAY_SIZE(xgene_init_ptree_dn
);
581 struct xgene_cle_ptree_kn
*kn
= ptree
->kn
;
582 u32 buf
[CLE_DRAM_REGS
];
585 memset(buf
, 0, sizeof(buf
));
586 for (i
= 0; i
< num_dn
; i
++) {
587 xgene_cle_dn_to_hw(&dn
[i
], buf
, cle
->jump_bytes
);
588 ret
= xgene_cle_dram_wr(cle
, buf
, 17, i
+ ptree
->start_node
,
589 PTREE_RAM
, CLE_CMD_WR
);
594 /* continue node index for key node */
595 memset(buf
, 0, sizeof(buf
));
596 for (j
= i
; j
< (ptree
->num_kn
+ num_dn
); j
++) {
597 xgene_cle_kn_to_hw(&kn
[j
- num_dn
], buf
);
598 ret
= xgene_cle_dram_wr(cle
, buf
, 17, j
+ ptree
->start_node
,
599 PTREE_RAM
, CLE_CMD_WR
);
607 static int xgene_cle_setup_ptree(struct xgene_enet_pdata
*pdata
,
608 struct xgene_enet_cle
*cle
)
612 ret
= xgene_cle_setup_node(pdata
, cle
);
616 ret
= xgene_cle_setup_dbptr(pdata
, cle
);
620 xgene_cle_enable_ptree(pdata
, cle
);
625 static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata
*pdata
,
626 struct xgene_enet_cle
*enet_cle
,
627 struct xgene_cle_dbptr
*dbptr
,
628 u32 index
, u8 priority
)
630 void __iomem
*base
= enet_cle
->base
;
631 void __iomem
*base_addr
;
632 u32 buf
[CLE_DRAM_REGS
];
636 memset(buf
, 0, sizeof(buf
));
637 xgene_cle_dbptr_to_hw(pdata
, dbptr
, buf
);
639 for (i
= 0; i
< enet_cle
->parsers
; i
++) {
640 if (enet_cle
->active_parser
!= PARSER_ALL
) {
641 offset
= enet_cle
->active_parser
*
644 offset
= i
* CLE_PORT_OFFSET
;
647 base_addr
= base
+ DFCLSRESDB00
+ offset
;
648 for (j
= 0; j
< 6; j
++)
649 iowrite32(buf
[j
], base_addr
+ (j
* 4));
651 def_cls
= ((priority
& 0x7) << 10) | (index
& 0x3ff);
652 iowrite32(def_cls
, base
+ DFCLSRESDBPTR0
+ offset
);
656 static int xgene_cle_set_rss_sband(struct xgene_enet_cle
*cle
)
658 u32 idx
= CLE_PKTRAM_SIZE
/ sizeof(u32
);
659 u32 mac_hdr_len
= ETH_HLEN
;
665 /* Sideband: IPV4/TCP packets */
666 hdr_len
= (mac_hdr_len
<< 5) | ipv4_ihl
;
667 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4
, XGENE_CLE_TCP
, hdr_len
, ®
);
670 /* Sideband: IPv4/UDP packets */
671 hdr_len
= (mac_hdr_len
<< 5) | ipv4_ihl
;
672 xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4
, XGENE_CLE_UDP
, hdr_len
, ®
);
673 sband
|= (reg
<< 16);
675 ret
= xgene_cle_dram_wr(cle
, &sband
, 1, idx
, PKT_RAM
, CLE_CMD_WR
);
679 /* Sideband: IPv4/RAW packets */
680 hdr_len
= (mac_hdr_len
<< 5) | ipv4_ihl
;
681 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4
, XGENE_CLE_OTHER
,
685 /* Sideband: Ethernet II/RAW packets */
686 hdr_len
= (mac_hdr_len
<< 5);
687 xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4
, XGENE_CLE_OTHER
,
689 sband
|= (reg
<< 16);
691 ret
= xgene_cle_dram_wr(cle
, &sband
, 1, idx
+ 1, PKT_RAM
, CLE_CMD_WR
);
698 static int xgene_cle_set_rss_skeys(struct xgene_enet_cle
*cle
)
700 u32 secret_key_ipv4
[4]; /* 16 Bytes*/
703 get_random_bytes(secret_key_ipv4
, 16);
704 ret
= xgene_cle_dram_wr(cle
, secret_key_ipv4
, 4, 0,
705 RSS_IPV4_HASH_SKEY
, CLE_CMD_WR
);
709 static int xgene_cle_set_rss_idt(struct xgene_enet_pdata
*pdata
)
711 u32 fpsel
, dstqid
, nfpsel
, idt_reg
, idx
;
715 for (i
= 0; i
< XGENE_CLE_IDT_ENTRIES
; i
++) {
716 idx
= i
% pdata
->rxq_cnt
;
717 pool_id
= pdata
->rx_ring
[idx
]->buf_pool
->id
;
718 fpsel
= xgene_enet_get_fpsel(pool_id
);
719 dstqid
= xgene_enet_dst_ring_num(pdata
->rx_ring
[idx
]);
721 if (pdata
->rx_ring
[idx
]->page_pool
) {
722 pool_id
= pdata
->rx_ring
[idx
]->page_pool
->id
;
723 nfpsel
= xgene_enet_get_fpsel(pool_id
);
727 xgene_cle_idt_to_hw(pdata
, dstqid
, fpsel
, nfpsel
, &idt_reg
);
728 ret
= xgene_cle_dram_wr(&pdata
->cle
, &idt_reg
, 1, i
,
729 RSS_IDT
, CLE_CMD_WR
);
734 ret
= xgene_cle_set_rss_skeys(&pdata
->cle
);
741 static int xgene_cle_setup_rss(struct xgene_enet_pdata
*pdata
)
743 struct xgene_enet_cle
*cle
= &pdata
->cle
;
744 void __iomem
*base
= cle
->base
;
748 offset
= CLE_PORT_OFFSET
;
749 for (i
= 0; i
< cle
->parsers
; i
++) {
750 if (cle
->active_parser
!= PARSER_ALL
)
751 offset
= cle
->active_parser
* CLE_PORT_OFFSET
;
753 offset
= i
* CLE_PORT_OFFSET
;
756 val
= (RSS_IPV4_12B
<< 1) | 0x1;
757 writel(val
, base
+ RSS_CTRL0
+ offset
);
760 /* setup sideband data */
761 ret
= xgene_cle_set_rss_sband(cle
);
765 /* setup indirection table */
766 ret
= xgene_cle_set_rss_idt(pdata
);
773 static int xgene_enet_cle_init(struct xgene_enet_pdata
*pdata
)
775 struct xgene_enet_cle
*enet_cle
= &pdata
->cle
;
776 u32 def_qid
, def_fpsel
, def_nxtfpsel
, pool_id
;
777 struct xgene_cle_dbptr dbptr
[DB_MAX_PTRS
];
778 struct xgene_cle_ptree
*ptree
;
779 struct xgene_cle_ptree_kn kn
;
782 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
)
785 ptree
= &enet_cle
->ptree
;
786 ptree
->start_pkt
= 12; /* Ethertype */
788 ret
= xgene_cle_setup_rss(pdata
);
790 netdev_err(pdata
->ndev
, "RSS initialization failed\n");
794 def_qid
= xgene_enet_dst_ring_num(pdata
->rx_ring
[0]);
795 pool_id
= pdata
->rx_ring
[0]->buf_pool
->id
;
796 def_fpsel
= xgene_enet_get_fpsel(pool_id
);
798 if (pdata
->rx_ring
[0]->page_pool
) {
799 pool_id
= pdata
->rx_ring
[0]->page_pool
->id
;
800 def_nxtfpsel
= xgene_enet_get_fpsel(pool_id
);
803 memset(dbptr
, 0, sizeof(struct xgene_cle_dbptr
) * DB_MAX_PTRS
);
804 dbptr
[DB_RES_ACCEPT
].fpsel
= def_fpsel
;
805 dbptr
[DB_RES_ACCEPT
].nxtfpsel
= def_nxtfpsel
;
806 dbptr
[DB_RES_ACCEPT
].dstqid
= def_qid
;
807 dbptr
[DB_RES_ACCEPT
].cle_priority
= 1;
809 dbptr
[DB_RES_DEF
].fpsel
= def_fpsel
;
810 dbptr
[DB_RES_DEF
].nxtfpsel
= def_nxtfpsel
;
811 dbptr
[DB_RES_DEF
].dstqid
= def_qid
;
812 dbptr
[DB_RES_DEF
].cle_priority
= 7;
813 xgene_cle_setup_def_dbptr(pdata
, enet_cle
, &dbptr
[DB_RES_DEF
],
816 dbptr
[DB_RES_DROP
].drop
= 1;
818 memset(&kn
, 0, sizeof(kn
));
821 kn
.key
[0].priority
= 0;
822 kn
.key
[0].result_pointer
= DB_RES_ACCEPT
;
825 ptree
->dbptr
= dbptr
;
827 ptree
->num_dbptr
= DB_MAX_PTRS
;
829 return xgene_cle_setup_ptree(pdata
, enet_cle
);
832 const struct xgene_cle_ops xgene_cle3in_ops
= {
833 .cle_init
= xgene_enet_cle_init
,