1 /***********************license start***************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2008 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
30 * Interface to the hardware Input Packet Data unit.
33 #ifndef __CVMX_IPD_H__
34 #define __CVMX_IPD_H__
36 #include <asm/octeon/octeon-feature.h>
38 #include <asm/octeon/cvmx-ipd-defs.h>
39 #include <asm/octeon/cvmx-pip-defs.h>
42 CVMX_IPD_OPC_MODE_STT
= 0LL, /* All blocks DRAM, not cached in L2 */
43 CVMX_IPD_OPC_MODE_STF
= 1LL, /* All blocks into L2 */
44 CVMX_IPD_OPC_MODE_STF1_STT
= 2LL, /* 1st block L2, rest DRAM */
45 CVMX_IPD_OPC_MODE_STF2_STT
= 3LL /* 1st, 2nd blocks L2, rest DRAM */
48 #ifndef CVMX_ENABLE_LEN_M8_FIX
49 #define CVMX_ENABLE_LEN_M8_FIX 0
52 /* CSR typedefs have been moved to cvmx-csr-*.h */
53 typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t
;
54 typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t
;
56 typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t
;
57 typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t
;
62 * @mbuff_size: Packets buffer size in 8 byte words
64 * Number of 8 byte words to skip in the first buffer
65 * @not_first_mbuff_skip:
66 * Number of 8 byte words to skip in each following buffer
67 * @first_back: Must be same as first_mbuff_skip / 128
69 * Must be same as not_first_mbuff_skip / 128
71 * FPA pool to get work entries from
73 * @back_pres_enable_flag:
74 * Enable or disable port back pressure
76 static inline void cvmx_ipd_config(uint64_t mbuff_size
,
77 uint64_t first_mbuff_skip
,
78 uint64_t not_first_mbuff_skip
,
81 uint64_t wqe_fpa_pool
,
82 enum cvmx_ipd_mode cache_mode
,
83 uint64_t back_pres_enable_flag
)
85 cvmx_ipd_mbuff_first_skip_t first_skip
;
86 cvmx_ipd_mbuff_not_first_skip_t not_first_skip
;
87 union cvmx_ipd_packet_mbuff_size size
;
88 cvmx_ipd_first_next_ptr_back_t first_back_struct
;
89 cvmx_ipd_second_next_ptr_back_t second_back_struct
;
90 union cvmx_ipd_wqe_fpa_queue wqe_pool
;
91 union cvmx_ipd_ctl_status ipd_ctl_reg
;
94 first_skip
.s
.skip_sz
= first_mbuff_skip
;
95 cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP
, first_skip
.u64
);
97 not_first_skip
.u64
= 0;
98 not_first_skip
.s
.skip_sz
= not_first_mbuff_skip
;
99 cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP
, not_first_skip
.u64
);
102 size
.s
.mb_size
= mbuff_size
;
103 cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE
, size
.u64
);
105 first_back_struct
.u64
= 0;
106 first_back_struct
.s
.back
= first_back
;
107 cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK
, first_back_struct
.u64
);
109 second_back_struct
.u64
= 0;
110 second_back_struct
.s
.back
= second_back
;
111 cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK
, second_back_struct
.u64
);
114 wqe_pool
.s
.wqe_pool
= wqe_fpa_pool
;
115 cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE
, wqe_pool
.u64
);
117 ipd_ctl_reg
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
118 ipd_ctl_reg
.s
.opc_mode
= cache_mode
;
119 ipd_ctl_reg
.s
.pbp_en
= back_pres_enable_flag
;
120 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_ctl_reg
.u64
);
122 /* Note: the example RED code that used to be here has been moved to
123 cvmx_helper_setup_red */
129 static inline void cvmx_ipd_enable(void)
131 union cvmx_ipd_ctl_status ipd_reg
;
132 ipd_reg
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
133 if (ipd_reg
.s
.ipd_en
) {
135 ("Warning: Enabling IPD when IPD already enabled.\n");
137 ipd_reg
.s
.ipd_en
= 1;
138 #if CVMX_ENABLE_LEN_M8_FIX
139 if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2
))
140 ipd_reg
.s
.len_m8
= TRUE
;
142 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_reg
.u64
);
148 static inline void cvmx_ipd_disable(void)
150 union cvmx_ipd_ctl_status ipd_reg
;
151 ipd_reg
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
152 ipd_reg
.s
.ipd_en
= 0;
153 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_reg
.u64
);
157 * Supportive function for cvmx_fpa_shutdown_pool.
159 static inline void cvmx_ipd_free_ptr(void)
161 /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
162 if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1
)
163 && !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2
)) {
165 union cvmx_ipd_ptr_count ipd_ptr_count
;
166 ipd_ptr_count
.u64
= cvmx_read_csr(CVMX_IPD_PTR_COUNT
);
168 /* Handle Work Queue Entry in cn56xx and cn52xx */
169 if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR
)) {
170 union cvmx_ipd_ctl_status ipd_ctl_status
;
171 ipd_ctl_status
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
172 if (ipd_ctl_status
.s
.no_wptr
)
176 /* Free the prefetched WQE */
177 if (ipd_ptr_count
.s
.wqev_cnt
) {
178 union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid
;
179 ipd_wqe_ptr_valid
.u64
=
180 cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID
);
182 cvmx_fpa_free(cvmx_phys_to_ptr
183 ((uint64_t) ipd_wqe_ptr_valid
.s
.
184 ptr
<< 7), CVMX_FPA_PACKET_POOL
,
187 cvmx_fpa_free(cvmx_phys_to_ptr
188 ((uint64_t) ipd_wqe_ptr_valid
.s
.
189 ptr
<< 7), CVMX_FPA_WQE_POOL
, 0);
192 /* Free all WQE in the fifo */
193 if (ipd_ptr_count
.s
.wqe_pcnt
) {
195 union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl
;
196 ipd_pwp_ptr_fifo_ctl
.u64
=
197 cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL
);
198 for (i
= 0; i
< ipd_ptr_count
.s
.wqe_pcnt
; i
++) {
199 ipd_pwp_ptr_fifo_ctl
.s
.cena
= 0;
200 ipd_pwp_ptr_fifo_ctl
.s
.raddr
=
201 ipd_pwp_ptr_fifo_ctl
.s
.max_cnts
+
202 (ipd_pwp_ptr_fifo_ctl
.s
.wraddr
+
203 i
) % ipd_pwp_ptr_fifo_ctl
.s
.max_cnts
;
204 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL
,
205 ipd_pwp_ptr_fifo_ctl
.u64
);
206 ipd_pwp_ptr_fifo_ctl
.u64
=
207 cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL
);
209 cvmx_fpa_free(cvmx_phys_to_ptr
211 ipd_pwp_ptr_fifo_ctl
.s
.
213 CVMX_FPA_PACKET_POOL
, 0);
215 cvmx_fpa_free(cvmx_phys_to_ptr
217 ipd_pwp_ptr_fifo_ctl
.s
.
219 CVMX_FPA_WQE_POOL
, 0);
221 ipd_pwp_ptr_fifo_ctl
.s
.cena
= 1;
222 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL
,
223 ipd_pwp_ptr_fifo_ctl
.u64
);
226 /* Free the prefetched packet */
227 if (ipd_ptr_count
.s
.pktv_cnt
) {
228 union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid
;
229 ipd_pkt_ptr_valid
.u64
=
230 cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID
);
231 cvmx_fpa_free(cvmx_phys_to_ptr
232 (ipd_pkt_ptr_valid
.s
.ptr
<< 7),
233 CVMX_FPA_PACKET_POOL
, 0);
236 /* Free the per port prefetched packets */
239 union cvmx_ipd_prc_port_ptr_fifo_ctl
240 ipd_prc_port_ptr_fifo_ctl
;
241 ipd_prc_port_ptr_fifo_ctl
.u64
=
242 cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL
);
244 for (i
= 0; i
< ipd_prc_port_ptr_fifo_ctl
.s
.max_pkt
;
246 ipd_prc_port_ptr_fifo_ctl
.s
.cena
= 0;
247 ipd_prc_port_ptr_fifo_ctl
.s
.raddr
=
248 i
% ipd_prc_port_ptr_fifo_ctl
.s
.max_pkt
;
249 cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL
,
250 ipd_prc_port_ptr_fifo_ctl
.u64
);
251 ipd_prc_port_ptr_fifo_ctl
.u64
=
253 (CVMX_IPD_PRC_PORT_PTR_FIFO_CTL
);
254 cvmx_fpa_free(cvmx_phys_to_ptr
256 ipd_prc_port_ptr_fifo_ctl
.s
.
257 ptr
<< 7), CVMX_FPA_PACKET_POOL
,
260 ipd_prc_port_ptr_fifo_ctl
.s
.cena
= 1;
261 cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL
,
262 ipd_prc_port_ptr_fifo_ctl
.u64
);
265 /* Free all packets in the holding fifo */
266 if (ipd_ptr_count
.s
.pfif_cnt
) {
268 union cvmx_ipd_prc_hold_ptr_fifo_ctl
269 ipd_prc_hold_ptr_fifo_ctl
;
271 ipd_prc_hold_ptr_fifo_ctl
.u64
=
272 cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL
);
274 for (i
= 0; i
< ipd_ptr_count
.s
.pfif_cnt
; i
++) {
275 ipd_prc_hold_ptr_fifo_ctl
.s
.cena
= 0;
276 ipd_prc_hold_ptr_fifo_ctl
.s
.raddr
=
277 (ipd_prc_hold_ptr_fifo_ctl
.s
.praddr
+
278 i
) % ipd_prc_hold_ptr_fifo_ctl
.s
.max_pkt
;
279 cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL
,
280 ipd_prc_hold_ptr_fifo_ctl
.u64
);
281 ipd_prc_hold_ptr_fifo_ctl
.u64
=
283 (CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL
);
284 cvmx_fpa_free(cvmx_phys_to_ptr
286 ipd_prc_hold_ptr_fifo_ctl
.s
.
287 ptr
<< 7), CVMX_FPA_PACKET_POOL
,
290 ipd_prc_hold_ptr_fifo_ctl
.s
.cena
= 1;
291 cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL
,
292 ipd_prc_hold_ptr_fifo_ctl
.u64
);
295 /* Free all packets in the fifo */
296 if (ipd_ptr_count
.s
.pkt_pcnt
) {
298 union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl
;
299 ipd_pwp_ptr_fifo_ctl
.u64
=
300 cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL
);
302 for (i
= 0; i
< ipd_ptr_count
.s
.pkt_pcnt
; i
++) {
303 ipd_pwp_ptr_fifo_ctl
.s
.cena
= 0;
304 ipd_pwp_ptr_fifo_ctl
.s
.raddr
=
305 (ipd_pwp_ptr_fifo_ctl
.s
.praddr
+
306 i
) % ipd_pwp_ptr_fifo_ctl
.s
.max_cnts
;
307 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL
,
308 ipd_pwp_ptr_fifo_ctl
.u64
);
309 ipd_pwp_ptr_fifo_ctl
.u64
=
310 cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL
);
311 cvmx_fpa_free(cvmx_phys_to_ptr
312 ((uint64_t) ipd_pwp_ptr_fifo_ctl
.
314 CVMX_FPA_PACKET_POOL
, 0);
316 ipd_pwp_ptr_fifo_ctl
.s
.cena
= 1;
317 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL
,
318 ipd_pwp_ptr_fifo_ctl
.u64
);
321 /* Reset the IPD to get all buffers out of it */
323 union cvmx_ipd_ctl_status ipd_ctl_status
;
324 ipd_ctl_status
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
325 ipd_ctl_status
.s
.reset
= 1;
326 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_ctl_status
.u64
);
331 union cvmx_pip_sft_rst pip_sft_rst
;
332 pip_sft_rst
.u64
= cvmx_read_csr(CVMX_PIP_SFT_RST
);
333 pip_sft_rst
.s
.rst
= 1;
334 cvmx_write_csr(CVMX_PIP_SFT_RST
, pip_sft_rst
.u64
);
339 #endif /* __CVMX_IPD_H__ */