Merge tag 'locking-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / arm / include / asm / highmem.h
blobb4b66220952d8d54d1b96508bc72230f794c4120
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_HIGHMEM_H
3 #define _ASM_HIGHMEM_H
5 #include <asm/cachetype.h>
6 #include <asm/fixmap.h>
8 #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
9 #define LAST_PKMAP PTRS_PER_PTE
10 #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
11 #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
12 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
14 #define flush_cache_kmaps() \
15 do { \
16 if (cache_is_vivt()) \
17 flush_cache_all(); \
18 } while (0)
20 extern pte_t *pkmap_page_table;
23 * The reason for kmap_high_get() is to ensure that the currently kmap'd
24 * page usage count does not decrease to zero while we're using its
25 * existing virtual mapping in an atomic context. With a VIVT cache this
26 * is essential to do, but with a VIPT cache this is only an optimization
27 * so not to pay the price of establishing a second mapping if an existing
28 * one can be used. However, on platforms without hardware TLB maintenance
29 * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
30 * the locking involved must also disable IRQs which is incompatible with
31 * the IPI mechanism used by global TLB operations.
33 #define ARCH_NEEDS_KMAP_HIGH_GET
34 #if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
35 #undef ARCH_NEEDS_KMAP_HIGH_GET
36 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
37 #error "The sum of features in your kernel config cannot be supported together"
38 #endif
39 #endif
42 * Needed to be able to broadcast the TLB invalidation for kmap.
44 #ifdef CONFIG_ARM_ERRATA_798181
45 #undef ARCH_NEEDS_KMAP_HIGH_GET
46 #endif
48 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
49 extern void *kmap_high_get(struct page *page);
51 static inline void *arch_kmap_local_high_get(struct page *page)
53 if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
54 return NULL;
55 return kmap_high_get(page);
57 #define arch_kmap_local_high_get arch_kmap_local_high_get
59 #else /* ARCH_NEEDS_KMAP_HIGH_GET */
60 static inline void *kmap_high_get(struct page *page)
62 return NULL;
64 #endif /* !ARCH_NEEDS_KMAP_HIGH_GET */
66 #define arch_kmap_local_post_map(vaddr, pteval) \
67 local_flush_tlb_kernel_page(vaddr)
69 #define arch_kmap_local_pre_unmap(vaddr) \
70 do { \
71 if (cache_is_vivt()) \
72 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
73 } while (0)
75 #define arch_kmap_local_post_unmap(vaddr) \
76 local_flush_tlb_kernel_page(vaddr)
78 #endif