Use dentry_path() to create full path to inode object
[pohmelfs.git] / arch / arm / mm / highmem.c
blob807c0573abbe82533a884f87ea7ea243051a228d
1 /*
2 * arch/arm/mm/highmem.c -- ARM highmem support
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include "mm.h"
21 void *kmap(struct page *page)
23 might_sleep();
24 if (!PageHighMem(page))
25 return page_address(page);
26 return kmap_high(page);
28 EXPORT_SYMBOL(kmap);
30 void kunmap(struct page *page)
32 BUG_ON(in_interrupt());
33 if (!PageHighMem(page))
34 return;
35 kunmap_high(page);
37 EXPORT_SYMBOL(kunmap);
39 void *__kmap_atomic(struct page *page)
41 unsigned int idx;
42 unsigned long vaddr;
43 void *kmap;
44 int type;
46 pagefault_disable();
47 if (!PageHighMem(page))
48 return page_address(page);
50 #ifdef CONFIG_DEBUG_HIGHMEM
52 * There is no cache coherency issue when non VIVT, so force the
53 * dedicated kmap usage for better debugging purposes in that case.
55 if (!cache_is_vivt())
56 kmap = NULL;
57 else
58 #endif
59 kmap = kmap_high_get(page);
60 if (kmap)
61 return kmap;
63 type = kmap_atomic_idx_push();
65 idx = type + KM_TYPE_NR * smp_processor_id();
66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
67 #ifdef CONFIG_DEBUG_HIGHMEM
69 * With debugging enabled, kunmap_atomic forces that entry to 0.
70 * Make sure it was indeed properly unmapped.
72 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
73 #endif
74 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
76 * When debugging is off, kunmap_atomic leaves the previous mapping
77 * in place, so this TLB flush ensures the TLB is updated with the
78 * new mapping.
80 local_flush_tlb_kernel_page(vaddr);
82 return (void *)vaddr;
84 EXPORT_SYMBOL(__kmap_atomic);
86 void __kunmap_atomic(void *kvaddr)
88 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
89 int idx, type;
91 if (kvaddr >= (void *)FIXADDR_START) {
92 type = kmap_atomic_idx();
93 idx = type + KM_TYPE_NR * smp_processor_id();
95 if (cache_is_vivt())
96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
97 #ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
99 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
100 local_flush_tlb_kernel_page(vaddr);
101 #else
102 (void) idx; /* to kill a warning */
103 #endif
104 kmap_atomic_idx_pop();
105 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
106 /* this address was obtained through kmap_high_get() */
107 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
109 pagefault_enable();
111 EXPORT_SYMBOL(__kunmap_atomic);
113 void *kmap_atomic_pfn(unsigned long pfn)
115 unsigned long vaddr;
116 int idx, type;
118 pagefault_disable();
120 type = kmap_atomic_idx_push();
121 idx = type + KM_TYPE_NR * smp_processor_id();
122 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
123 #ifdef CONFIG_DEBUG_HIGHMEM
124 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
125 #endif
126 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
127 local_flush_tlb_kernel_page(vaddr);
129 return (void *)vaddr;
132 struct page *kmap_atomic_to_page(const void *ptr)
134 unsigned long vaddr = (unsigned long)ptr;
135 pte_t *pte;
137 if (vaddr < FIXADDR_START)
138 return virt_to_page(ptr);
140 pte = TOP_PTE(vaddr);
141 return pte_page(*pte);