drm/panel-edp: Add STA 116QHD024002
[drm/drm-misc.git] / include / linux / highmem-internal.h
blobdd100e849f5e0a1c1e7b4cef5255848aa50daa3b
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_INTERNAL_H
3 #define _LINUX_HIGHMEM_INTERNAL_H
5 /*
6 * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
7 */
8 #ifdef CONFIG_KMAP_LOCAL
9 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
11 void kunmap_local_indexed(const void *vaddr);
12 void kmap_local_fork(struct task_struct *tsk);
13 void __kmap_local_sched_out(void);
14 void __kmap_local_sched_in(void);
15 static inline void kmap_assert_nomap(void)
17 DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
19 #else
20 static inline void kmap_local_fork(struct task_struct *tsk) { }
21 static inline void kmap_assert_nomap(void) { }
22 #endif
24 #ifdef CONFIG_HIGHMEM
25 #include <asm/highmem.h>
27 #ifndef ARCH_HAS_KMAP_FLUSH_TLB
28 static inline void kmap_flush_tlb(unsigned long addr) { }
29 #endif
31 #ifndef kmap_prot
32 #define kmap_prot PAGE_KERNEL
33 #endif
35 void *kmap_high(struct page *page);
36 void kunmap_high(struct page *page);
37 void __kmap_flush_unused(void);
38 struct page *__kmap_to_page(void *addr);
40 static inline void *kmap(struct page *page)
42 void *addr;
44 might_sleep();
45 if (!PageHighMem(page))
46 addr = page_address(page);
47 else
48 addr = kmap_high(page);
49 kmap_flush_tlb((unsigned long)addr);
50 return addr;
53 static inline void kunmap(struct page *page)
55 might_sleep();
56 if (!PageHighMem(page))
57 return;
58 kunmap_high(page);
61 static inline struct page *kmap_to_page(void *addr)
63 return __kmap_to_page(addr);
66 static inline void kmap_flush_unused(void)
68 __kmap_flush_unused();
71 static inline void *kmap_local_page(struct page *page)
73 return __kmap_local_page_prot(page, kmap_prot);
76 static inline void *kmap_local_folio(struct folio *folio, size_t offset)
78 struct page *page = folio_page(folio, offset / PAGE_SIZE);
79 return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
82 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
84 return __kmap_local_page_prot(page, prot);
87 static inline void *kmap_local_pfn(unsigned long pfn)
89 return __kmap_local_pfn_prot(pfn, kmap_prot);
92 static inline void __kunmap_local(const void *vaddr)
94 kunmap_local_indexed(vaddr);
97 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
99 if (IS_ENABLED(CONFIG_PREEMPT_RT))
100 migrate_disable();
101 else
102 preempt_disable();
104 pagefault_disable();
105 return __kmap_local_page_prot(page, prot);
108 static inline void *kmap_atomic(struct page *page)
110 return kmap_atomic_prot(page, kmap_prot);
113 static inline void *kmap_atomic_pfn(unsigned long pfn)
115 if (IS_ENABLED(CONFIG_PREEMPT_RT))
116 migrate_disable();
117 else
118 preempt_disable();
120 pagefault_disable();
121 return __kmap_local_pfn_prot(pfn, kmap_prot);
124 static inline void __kunmap_atomic(const void *addr)
126 kunmap_local_indexed(addr);
127 pagefault_enable();
128 if (IS_ENABLED(CONFIG_PREEMPT_RT))
129 migrate_enable();
130 else
131 preempt_enable();
134 unsigned long __nr_free_highpages(void);
135 unsigned long __totalhigh_pages(void);
137 static inline unsigned long nr_free_highpages(void)
139 return __nr_free_highpages();
142 static inline unsigned long totalhigh_pages(void)
144 return __totalhigh_pages();
147 static inline bool is_kmap_addr(const void *x)
149 unsigned long addr = (unsigned long)x;
151 return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
152 (addr >= __fix_to_virt(FIX_KMAP_END) &&
153 addr < __fix_to_virt(FIX_KMAP_BEGIN));
155 #else /* CONFIG_HIGHMEM */
157 static inline struct page *kmap_to_page(void *addr)
159 return virt_to_page(addr);
162 static inline void *kmap(struct page *page)
164 might_sleep();
165 return page_address(page);
168 static inline void kunmap_high(struct page *page) { }
169 static inline void kmap_flush_unused(void) { }
171 static inline void kunmap(struct page *page)
173 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
174 kunmap_flush_on_unmap(page_address(page));
175 #endif
178 static inline void *kmap_local_page(struct page *page)
180 return page_address(page);
183 static inline void *kmap_local_folio(struct folio *folio, size_t offset)
185 return page_address(&folio->page) + offset;
188 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
190 return kmap_local_page(page);
193 static inline void *kmap_local_pfn(unsigned long pfn)
195 return kmap_local_page(pfn_to_page(pfn));
198 static inline void __kunmap_local(const void *addr)
200 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
201 kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
202 #endif
205 static inline void *kmap_atomic(struct page *page)
207 if (IS_ENABLED(CONFIG_PREEMPT_RT))
208 migrate_disable();
209 else
210 preempt_disable();
211 pagefault_disable();
212 return page_address(page);
215 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
217 return kmap_atomic(page);
220 static inline void *kmap_atomic_pfn(unsigned long pfn)
222 return kmap_atomic(pfn_to_page(pfn));
225 static inline void __kunmap_atomic(const void *addr)
227 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
228 kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
229 #endif
230 pagefault_enable();
231 if (IS_ENABLED(CONFIG_PREEMPT_RT))
232 migrate_enable();
233 else
234 preempt_enable();
237 static inline unsigned long nr_free_highpages(void) { return 0; }
238 static inline unsigned long totalhigh_pages(void) { return 0; }
240 static inline bool is_kmap_addr(const void *x)
242 return false;
245 #endif /* CONFIG_HIGHMEM */
248 * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
249 * @__addr: Virtual address to be unmapped
251 * Unmaps an address previously mapped by kmap_atomic() and re-enables
252 * pagefaults. Depending on PREEMP_RT configuration, re-enables also
253 * migration and preemption. Users should not count on these side effects.
255 * Mappings should be unmapped in the reverse order that they were mapped.
256 * See kmap_local_page() for details on nesting.
258 * @__addr can be any address within the mapped page, so there is no need
259 * to subtract any offset that has been added. In contrast to kunmap(),
260 * this function takes the address returned from kmap_atomic(), not the
261 * page passed to it. The compiler will warn you if you pass the page.
263 #define kunmap_atomic(__addr) \
264 do { \
265 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
266 __kunmap_atomic(__addr); \
267 } while (0)
270 * kunmap_local - Unmap a page mapped via kmap_local_page().
271 * @__addr: An address within the page mapped
273 * @__addr can be any address within the mapped page. Commonly it is the
274 * address return from kmap_local_page(), but it can also include offsets.
276 * Unmapping should be done in the reverse order of the mapping. See
277 * kmap_local_page() for details.
279 #define kunmap_local(__addr) \
280 do { \
281 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
282 __kunmap_local(__addr); \
283 } while (0)
285 #endif