Linux 6.13-rc4
[linux.git] / arch / sh / include / asm / cache.h
blobb38dbc9755811c221d6a435b8e6e5a89935da85f
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $
4 * include/asm-sh/cache.h
6 * Copyright 1999 (C) Niibe Yutaka
7 * Copyright 2002, 2003 (C) Paul Mundt
8 */
9 #ifndef __ASM_SH_CACHE_H
10 #define __ASM_SH_CACHE_H
12 #include <linux/init.h>
13 #include <cpu/cache.h>
15 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
18 * Some drivers need to perform DMA into kmalloc'ed buffers
19 * and so we have to increase the kmalloc minalign for this.
21 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
23 #define __read_mostly __section(".data..read_mostly")
25 #ifndef __ASSEMBLY__
26 struct cache_info {
27 unsigned int ways; /* Number of cache ways */
28 unsigned int sets; /* Number of cache sets */
29 unsigned int linesz; /* Cache line size (bytes) */
31 unsigned int way_size; /* sets * line size */
34 * way_incr is the address offset for accessing the next way
35 * in memory mapped cache array ops.
37 unsigned int way_incr;
38 unsigned int entry_shift;
39 unsigned int entry_mask;
42 * Compute a mask which selects the address bits which overlap between
43 * 1. those used to select the cache set during indexing
44 * 2. those in the physical page number.
46 unsigned int alias_mask;
47 unsigned int n_aliases; /* Number of aliases */
49 unsigned long flags;
51 #endif /* __ASSEMBLY__ */
52 #endif /* __ASM_SH_CACHE_H */