1 /***********************license start***************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2008 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
31 * Interface to the hardware Free Pool Allocator.
36 #ifndef __CVMX_FPA_H__
37 #define __CVMX_FPA_H__
39 #include <linux/delay.h>
41 #include <asm/octeon/cvmx-address.h>
42 #include <asm/octeon/cvmx-fpa-defs.h>
44 #define CVMX_FPA_NUM_POOLS 8
45 #define CVMX_FPA_MIN_BLOCK_SIZE 128
46 #define CVMX_FPA_ALIGNMENT 128
49 * Structure describing the data format used for stores to the FPA.
54 #ifdef __BIG_ENDIAN_BITFIELD
56 * the (64-bit word) location in scratchpad to write
60 /* the number of words in the response (0 => no response) */
62 /* the ID of the device on the non-coherent bus */
65 * the address that will appear in the first tick on
76 } cvmx_fpa_iobdma_data_t
;
79 * Structure describing the current state of a FPA pool.
82 /* Name it was created under */
84 /* Size of each block */
86 /* The base memory address of whole block */
88 /* The number of elements in the pool at creation */
89 uint64_t starting_element_count
;
90 } cvmx_fpa_pool_info_t
;
93 * Current state of all the pools. Use access functions
94 * instead of using it directly.
96 extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info
[CVMX_FPA_NUM_POOLS
];
98 /* CSR typedefs have been moved to cvmx-csr-*.h */
101 * Return the name of the pool
103 * @pool: Pool to get the name of
106 static inline const char *cvmx_fpa_get_name(uint64_t pool
)
108 return cvmx_fpa_pool_info
[pool
].name
;
112 * Return the base of the pool
114 * @pool: Pool to get the base of
117 static inline void *cvmx_fpa_get_base(uint64_t pool
)
119 return cvmx_fpa_pool_info
[pool
].base
;
123 * Check if a pointer belongs to an FPA pool. Return non-zero
124 * if the supplied pointer is inside the memory controlled by
127 * @pool: Pool to check
128 * @ptr: Pointer to check
129 * Returns Non-zero if pointer is in the pool. Zero if not
131 static inline int cvmx_fpa_is_member(uint64_t pool
, void *ptr
)
133 return ((ptr
>= cvmx_fpa_pool_info
[pool
].base
) &&
135 ((char *)(cvmx_fpa_pool_info
[pool
].base
)) +
136 cvmx_fpa_pool_info
[pool
].size
*
137 cvmx_fpa_pool_info
[pool
].starting_element_count
));
141 * Enable the FPA for use. Must be performed after any CSR
142 * configuration but before any other FPA functions.
144 static inline void cvmx_fpa_enable(void)
146 union cvmx_fpa_ctl_status status
;
148 status
.u64
= cvmx_read_csr(CVMX_FPA_CTL_STATUS
);
151 ("Warning: Enabling FPA when FPA already enabled.\n");
155 * Do runtime check as we allow pass1 compiled code to run on
158 if (cvmx_octeon_is_pass1()) {
159 union cvmx_fpa_fpfx_marks marks
;
161 for (i
= 1; i
< 8; i
++) {
163 cvmx_read_csr(CVMX_FPA_FPF1_MARKS
+ (i
- 1) * 8ull);
164 marks
.s
.fpf_wr
= 0xe0;
165 cvmx_write_csr(CVMX_FPA_FPF1_MARKS
+ (i
- 1) * 8ull,
169 /* Enforce a 10 cycle delay between config and enable */
173 /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
176 cvmx_write_csr(CVMX_FPA_CTL_STATUS
, status
.u64
);
180 * Get a new block from the FPA
182 * @pool: Pool to get the block from
183 * Returns Pointer to the block or NULL on failure
185 static inline void *cvmx_fpa_alloc(uint64_t pool
)
188 cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA
, pool
)));
190 return cvmx_phys_to_ptr(address
);
196 * Asynchronously get a new block from the FPA
198 * @scr_addr: Local scratch address to put response in. This is a byte address,
199 * but must be 8 byte aligned.
200 * @pool: Pool to get the block from
202 static inline void cvmx_fpa_async_alloc(uint64_t scr_addr
, uint64_t pool
)
204 cvmx_fpa_iobdma_data_t data
;
207 * Hardware only uses 64 bit aligned locations, so convert
208 * from byte address to 64-bit index
210 data
.s
.scraddr
= scr_addr
>> 3;
212 data
.s
.did
= CVMX_FULL_DID(CVMX_OCT_DID_FPA
, pool
);
214 cvmx_send_single(data
.u64
);
218 * Free a block allocated with a FPA pool. Does NOT provide memory
219 * ordering in cases where the memory block was modified by the core.
221 * @ptr: Block to free
222 * @pool: Pool to put it in
224 * Cache lines to invalidate
226 static inline void cvmx_fpa_free_nosync(void *ptr
, uint64_t pool
,
227 uint64_t num_cache_lines
)
230 newptr
.u64
= cvmx_ptr_to_phys(ptr
);
231 newptr
.sfilldidspace
.didspace
=
232 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA
, pool
));
233 /* Prevent GCC from reordering around free */
235 /* value written is number of cache lines not written back */
236 cvmx_write_io(newptr
.u64
, num_cache_lines
);
240 * Free a block allocated with a FPA pool. Provides required memory
241 * ordering in cases where memory block was modified by core.
243 * @ptr: Block to free
244 * @pool: Pool to put it in
246 * Cache lines to invalidate
248 static inline void cvmx_fpa_free(void *ptr
, uint64_t pool
,
249 uint64_t num_cache_lines
)
252 newptr
.u64
= cvmx_ptr_to_phys(ptr
);
253 newptr
.sfilldidspace
.didspace
=
254 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA
, pool
));
256 * Make sure that any previous writes to memory go out before
257 * we free this buffer. This also serves as a barrier to
258 * prevent GCC from reordering operations to after the
262 /* value written is number of cache lines not written back */
263 cvmx_write_io(newptr
.u64
, num_cache_lines
);
267 * Setup a FPA pool to control a new block of memory.
268 * This can only be called once per pool. Make sure proper
269 * locking enforces this.
271 * @pool: Pool to initialize
273 * @name: Constant character string to name this pool.
274 * String is not copied.
275 * @buffer: Pointer to the block of memory to use. This must be
276 * accessible by all processors and external hardware.
277 * @block_size: Size for each block controlled by the FPA
278 * @num_blocks: Number of blocks
280 * Returns 0 on Success,
283 extern int cvmx_fpa_setup_pool(uint64_t pool
, const char *name
, void *buffer
,
284 uint64_t block_size
, uint64_t num_blocks
);
287 * Shutdown a Memory pool and validate that it had all of
288 * the buffers originally placed in it. This should only be
289 * called by one processor after all hardware has finished
292 * @pool: Pool to shutdown
293 * Returns Zero on success
294 * - Positive is count of missing buffers
295 * - Negative is too many buffers or corrupted pointers
297 extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool
);
300 * Get the size of blocks controlled by the pool
301 * This is resolved to a constant at compile time.
303 * @pool: Pool to access
304 * Returns Size of the block in bytes
306 uint64_t cvmx_fpa_get_block_size(uint64_t pool
);
308 #endif /* __CVM_FPA_H__ */