1 #include <linux/highmem.h>
3 void *kmap(struct page
*page
)
6 if (!PageHighMem(page
))
7 return page_address(page
);
8 return kmap_high(page
);
11 void kunmap(struct page
*page
)
15 if (!PageHighMem(page
))
21 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
22 * no global lock is needed and because the kmap code must perform a global TLB
23 * invalidation when the kmap pool wraps.
25 * However when holding an atomic kmap is is not legal to sleep, so atomic
26 * kmaps are appropriate for short, tight code paths only.
28 void *kmap_atomic(struct page
*page
, enum km_type type
)
30 enum fixed_addresses idx
;
33 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
35 if (!PageHighMem(page
))
36 return page_address(page
);
38 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
39 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
40 #ifdef CONFIG_DEBUG_HIGHMEM
41 if (!pte_none(*(kmap_pte
-idx
)))
44 set_pte(kmap_pte
-idx
, mk_pte(page
, kmap_prot
));
45 __flush_tlb_one(vaddr
);
50 void kunmap_atomic(void *kvaddr
, enum km_type type
)
52 #ifdef CONFIG_DEBUG_HIGHMEM
53 unsigned long vaddr
= (unsigned long) kvaddr
& PAGE_MASK
;
54 enum fixed_addresses idx
= type
+ KM_TYPE_NR
*smp_processor_id();
56 if (vaddr
< FIXADDR_START
) { // FIXME
58 preempt_check_resched();
62 if (vaddr
!= __fix_to_virt(FIX_KMAP_BEGIN
+idx
))
66 * force other mappings to Oops if they'll try to access
67 * this pte without first remap it
69 pte_clear(&init_mm
, vaddr
, kmap_pte
-idx
);
70 __flush_tlb_one(vaddr
);
74 preempt_check_resched();
77 struct page
*kmap_atomic_to_page(void *ptr
)
79 unsigned long idx
, vaddr
= (unsigned long)ptr
;
82 if (vaddr
< FIXADDR_START
)
83 return virt_to_page(ptr
);
85 idx
= virt_to_fix(vaddr
);
86 pte
= kmap_pte
- (idx
- FIX_KMAP_BEGIN
);
87 return pte_page(*pte
);