mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
[linux/fpc-iii.git] / arch / arm64 / include / asm / cache.h
blob5082b30bc2c05fbf7b970141dd1d69800e7ed3b3
1 /*
2 * Copyright (C) 2012 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_CACHE_H
17 #define __ASM_CACHE_H
19 #include <asm/cachetype.h>
21 #define L1_CACHE_SHIFT 7
22 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
25 * Memory returned by kmalloc() may be used for DMA, so we must make
26 * sure that all such allocations are cache aligned. Otherwise,
27 * unrelated code may cause parts of the buffer to be read into the
28 * cache before the transfer is done, causing old data to be seen by
29 * the CPU.
31 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
33 #ifndef __ASSEMBLY__
35 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
37 static inline int cache_line_size(void)
39 u32 cwg = cache_type_cwg();
40 return cwg ? 4 << cwg : L1_CACHE_BYTES;
43 #endif /* __ASSEMBLY__ */
45 #endif