2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
18 #include <linux/rbtree.h>
20 #include <asm/cacheflush.h>
21 #include <asm/processor.h>
22 #include <asm/tlbflush.h>
23 #include <asm/x86_init.h>
24 #include <asm/pgtable.h>
25 #include <asm/fcntl.h>
33 #include "pat_internal.h"
36 int __read_mostly pat_enabled
= 1;
38 static inline void pat_disable(const char *reason
)
41 printk(KERN_INFO
"%s\n", reason
);
44 static int __init
nopat(char *str
)
46 pat_disable("PAT support disabled.");
49 early_param("nopat", nopat
);
51 static inline void pat_disable(const char *reason
)
60 static int __init
pat_debug_setup(char *str
)
65 __setup("debugpat", pat_debug_setup
);
67 static u64 __read_mostly boot_pat_state
;
70 PAT_UC
= 0, /* uncached */
71 PAT_WC
= 1, /* Write combining */
72 PAT_WT
= 4, /* Write Through */
73 PAT_WP
= 5, /* Write Protected */
74 PAT_WB
= 6, /* Write Back (default) */
75 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
78 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
83 bool boot_cpu
= !boot_pat_state
;
89 if (!boot_pat_state
) {
90 pat_disable("PAT not supported by CPU.");
94 * If this happens we are on a secondary CPU, but
95 * switched to PAT on the boot CPU. We have no way to
98 printk(KERN_ERR
"PAT enabled, "
99 "but not supported by secondary CPU\n");
104 /* Set PWT to Write-Combining. All other bits stay the same */
106 * PTE encoding used in Linux:
111 * 000 WB _PAGE_CACHE_WB
112 * 001 WC _PAGE_CACHE_WC
113 * 010 UC- _PAGE_CACHE_UC_MINUS
114 * 011 UC _PAGE_CACHE_UC
117 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
118 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
122 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
124 wrmsrl(MSR_IA32_CR_PAT
, pat
);
127 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
128 smp_processor_id(), boot_pat_state
, pat
);
133 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype accesses */
136 * Does intersection of PAT memory type and MTRR memory type and returns
137 * the resulting memory type as PAT understands it.
138 * (Type in pat and mtrr will not have same value)
139 * The intersection is based on "Effective Memory Type" tables in IA-32
142 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
, unsigned long req_type
)
145 * Look for MTRR hint to get the effective type in case where PAT
148 if (req_type
== _PAGE_CACHE_WB
) {
151 mtrr_type
= mtrr_type_lookup(start
, end
);
152 if (mtrr_type
!= MTRR_TYPE_WRBACK
)
153 return _PAGE_CACHE_UC_MINUS
;
155 return _PAGE_CACHE_WB
;
161 struct pagerange_state
{
162 unsigned long cur_pfn
;
168 pagerange_is_ram_callback(unsigned long initial_pfn
, unsigned long total_nr_pages
, void *arg
)
170 struct pagerange_state
*state
= arg
;
172 state
->not_ram
|= initial_pfn
> state
->cur_pfn
;
173 state
->ram
|= total_nr_pages
> 0;
174 state
->cur_pfn
= initial_pfn
+ total_nr_pages
;
176 return state
->ram
&& state
->not_ram
;
179 static int pat_pagerange_is_ram(resource_size_t start
, resource_size_t end
)
182 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
183 unsigned long end_pfn
= (end
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
184 struct pagerange_state state
= {start_pfn
, 0, 0};
187 * For legacy reasons, physical address range in the legacy ISA
188 * region is tracked as non-RAM. This will allow users of
189 * /dev/mem to map portions of legacy ISA region, even when
190 * some of those portions are listed(or not even listed) with
191 * different e820 types(RAM/reserved/..)
193 if (start_pfn
< ISA_END_ADDRESS
>> PAGE_SHIFT
)
194 start_pfn
= ISA_END_ADDRESS
>> PAGE_SHIFT
;
196 if (start_pfn
< end_pfn
) {
197 ret
= walk_system_ram_range(start_pfn
, end_pfn
- start_pfn
,
198 &state
, pagerange_is_ram_callback
);
201 return (ret
> 0) ? -1 : (state
.ram
? 1 : 0);
205 * For RAM pages, we use page flags to mark the pages with appropriate type.
206 * Here we do two pass:
207 * - Find the memtype of all the pages in the range, look for any conflicts
208 * - In case of no conflicts, set the new memtype for pages in the range
210 static int reserve_ram_pages_type(u64 start
, u64 end
, unsigned long req_type
,
211 unsigned long *new_type
)
216 if (req_type
== _PAGE_CACHE_UC
) {
217 /* We do not support strong UC */
219 req_type
= _PAGE_CACHE_UC_MINUS
;
222 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
225 page
= pfn_to_page(pfn
);
226 type
= get_page_memtype(page
);
228 printk(KERN_INFO
"reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
229 start
, end
- 1, type
, req_type
);
238 *new_type
= req_type
;
240 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
241 page
= pfn_to_page(pfn
);
242 set_page_memtype(page
, req_type
);
247 static int free_ram_pages_type(u64 start
, u64 end
)
252 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
253 page
= pfn_to_page(pfn
);
254 set_page_memtype(page
, -1);
260 * req_type typically has one of the:
263 * - _PAGE_CACHE_UC_MINUS
266 * If new_type is NULL, function will return an error if it cannot reserve the
267 * region with req_type. If new_type is non-NULL, function will return
268 * available type in new_type in case of no error. In case of any error
269 * it will return a negative return value.
271 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
272 unsigned long *new_type
)
275 unsigned long actual_type
;
279 BUG_ON(start
>= end
); /* end is exclusive */
282 /* This is identical to page table setting without PAT */
284 if (req_type
== _PAGE_CACHE_WC
)
285 *new_type
= _PAGE_CACHE_UC_MINUS
;
287 *new_type
= req_type
& _PAGE_CACHE_MASK
;
292 /* Low ISA region is always mapped WB in page table. No need to track */
293 if (x86_platform
.is_untracked_pat_range(start
, end
)) {
295 *new_type
= _PAGE_CACHE_WB
;
300 * Call mtrr_lookup to get the type hint. This is an
301 * optimization for /dev/mem mmap'ers into WB memory (BIOS
302 * tools and ACPI tools). Use WB request for WB memory and use
303 * UC_MINUS otherwise.
305 actual_type
= pat_x_mtrr_type(start
, end
, req_type
& _PAGE_CACHE_MASK
);
308 *new_type
= actual_type
;
310 is_range_ram
= pat_pagerange_is_ram(start
, end
);
311 if (is_range_ram
== 1) {
313 err
= reserve_ram_pages_type(start
, end
, req_type
, new_type
);
316 } else if (is_range_ram
< 0) {
320 new = kzalloc(sizeof(struct memtype
), GFP_KERNEL
);
326 new->type
= actual_type
;
328 spin_lock(&memtype_lock
);
330 err
= rbt_memtype_check_insert(new, new_type
);
332 printk(KERN_INFO
"reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
334 cattr_name(new->type
), cattr_name(req_type
));
336 spin_unlock(&memtype_lock
);
341 spin_unlock(&memtype_lock
);
343 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
344 start
, end
- 1, cattr_name(new->type
), cattr_name(req_type
),
345 new_type
? cattr_name(*new_type
) : "-");
350 int free_memtype(u64 start
, u64 end
)
354 struct memtype
*entry
;
359 /* Low ISA region is always mapped WB. No need to track */
360 if (x86_platform
.is_untracked_pat_range(start
, end
))
363 is_range_ram
= pat_pagerange_is_ram(start
, end
);
364 if (is_range_ram
== 1) {
366 err
= free_ram_pages_type(start
, end
);
369 } else if (is_range_ram
< 0) {
373 spin_lock(&memtype_lock
);
374 entry
= rbt_memtype_erase(start
, end
);
375 spin_unlock(&memtype_lock
);
378 printk(KERN_INFO
"%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
379 current
->comm
, current
->pid
, start
, end
- 1);
385 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start
, end
- 1);
392 * lookup_memtype - Looksup the memory type for a physical address
393 * @paddr: physical address of which memory type needs to be looked up
395 * Only to be called when PAT is enabled
397 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
400 static unsigned long lookup_memtype(u64 paddr
)
402 int rettype
= _PAGE_CACHE_WB
;
403 struct memtype
*entry
;
405 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ PAGE_SIZE
))
408 if (pat_pagerange_is_ram(paddr
, paddr
+ PAGE_SIZE
)) {
410 page
= pfn_to_page(paddr
>> PAGE_SHIFT
);
411 rettype
= get_page_memtype(page
);
413 * -1 from get_page_memtype() implies RAM page is in its
414 * default state and not reserved, and hence of type WB
417 rettype
= _PAGE_CACHE_WB
;
422 spin_lock(&memtype_lock
);
424 entry
= rbt_memtype_lookup(paddr
);
426 rettype
= entry
->type
;
428 rettype
= _PAGE_CACHE_UC_MINUS
;
430 spin_unlock(&memtype_lock
);
435 * io_reserve_memtype - Request a memory type mapping for a region of memory
436 * @start: start (physical address) of the region
437 * @end: end (physical address) of the region
438 * @type: A pointer to memtype, with requested type. On success, requested
439 * or any other compatible type that was available for the region is returned
441 * On success, returns 0
442 * On failure, returns non-zero
444 int io_reserve_memtype(resource_size_t start
, resource_size_t end
,
447 resource_size_t size
= end
- start
;
448 unsigned long req_type
= *type
;
449 unsigned long new_type
;
452 WARN_ON_ONCE(iomem_map_sanity_check(start
, size
));
454 ret
= reserve_memtype(start
, end
, req_type
, &new_type
);
458 if (!is_new_memtype_allowed(start
, size
, req_type
, new_type
))
461 if (kernel_map_sync_memtype(start
, size
, new_type
) < 0)
468 free_memtype(start
, end
);
475 * io_free_memtype - Release a memory type mapping for a region of memory
476 * @start: start (physical address) of the region
477 * @end: end (physical address) of the region
479 void io_free_memtype(resource_size_t start
, resource_size_t end
)
481 free_memtype(start
, end
);
484 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
485 unsigned long size
, pgprot_t vma_prot
)
490 #ifdef CONFIG_STRICT_DEVMEM
491 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
492 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
497 /* This check is needed to avoid cache aliasing when PAT is enabled */
498 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
500 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
501 u64 to
= from
+ size
;
507 while (cursor
< to
) {
508 if (!devmem_is_allowed(pfn
)) {
509 printk(KERN_INFO
"Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
510 current
->comm
, from
, to
- 1);
518 #endif /* CONFIG_STRICT_DEVMEM */
520 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
521 unsigned long size
, pgprot_t
*vma_prot
)
523 unsigned long flags
= _PAGE_CACHE_WB
;
525 if (!range_is_allowed(pfn
, size
))
528 if (file
->f_flags
& O_DSYNC
)
529 flags
= _PAGE_CACHE_UC_MINUS
;
533 * On the PPro and successors, the MTRRs are used to set
534 * memory types for physical addresses outside main memory,
535 * so blindly setting UC or PWT on those pages is wrong.
536 * For Pentiums and earlier, the surround logic should disable
537 * caching for the high addresses through the KEN pin, but
538 * we maintain the tradition of paranoia in this code.
541 !(boot_cpu_has(X86_FEATURE_MTRR
) ||
542 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
543 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
544 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
545 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
546 flags
= _PAGE_CACHE_UC
;
550 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
556 * Change the memory type for the physial address range in kernel identity
557 * mapping space if that range is a part of identity map.
559 int kernel_map_sync_memtype(u64 base
, unsigned long size
, unsigned long flags
)
563 if (base
> __pa(high_memory
-1))
567 * some areas in the middle of the kernel identity range
568 * are not mapped, like the PCI space.
570 if (!page_is_ram(base
>> PAGE_SHIFT
))
573 id_sz
= (__pa(high_memory
-1) <= base
+ size
) ?
574 __pa(high_memory
) - base
:
577 if (ioremap_change_attr((unsigned long)__va(base
), id_sz
, flags
) < 0) {
578 printk(KERN_INFO
"%s:%d ioremap_change_attr failed %s "
579 "for [mem %#010Lx-%#010Lx]\n",
580 current
->comm
, current
->pid
,
582 base
, (unsigned long long)(base
+ size
-1));
589 * Internal interface to reserve a range of physical memory with prot.
590 * Reserved non RAM regions only and after successful reserve_memtype,
591 * this func also keeps identity mapping (if any) in sync with this new prot.
593 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
598 unsigned long want_flags
= (pgprot_val(*vma_prot
) & _PAGE_CACHE_MASK
);
599 unsigned long flags
= want_flags
;
601 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
604 * reserve_pfn_range() for RAM pages. We do not refcount to keep
605 * track of number of mappings of RAM pages. We can assert that
606 * the type requested matches the type of first page in the range.
612 flags
= lookup_memtype(paddr
);
613 if (want_flags
!= flags
) {
614 printk(KERN_WARNING
"%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
615 current
->comm
, current
->pid
,
616 cattr_name(want_flags
),
617 (unsigned long long)paddr
,
618 (unsigned long long)(paddr
+ size
- 1),
620 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
621 (~_PAGE_CACHE_MASK
)) |
627 ret
= reserve_memtype(paddr
, paddr
+ size
, want_flags
, &flags
);
631 if (flags
!= want_flags
) {
633 !is_new_memtype_allowed(paddr
, size
, want_flags
, flags
)) {
634 free_memtype(paddr
, paddr
+ size
);
635 printk(KERN_ERR
"%s:%d map pfn expected mapping type %s"
636 " for [mem %#010Lx-%#010Lx], got %s\n",
637 current
->comm
, current
->pid
,
638 cattr_name(want_flags
),
639 (unsigned long long)paddr
,
640 (unsigned long long)(paddr
+ size
- 1),
645 * We allow returning different type than the one requested in
648 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
649 (~_PAGE_CACHE_MASK
)) |
653 if (kernel_map_sync_memtype(paddr
, size
, flags
) < 0) {
654 free_memtype(paddr
, paddr
+ size
);
661 * Internal interface to free a range of physical memory.
662 * Frees non RAM regions only.
664 static void free_pfn_range(u64 paddr
, unsigned long size
)
668 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
670 free_memtype(paddr
, paddr
+ size
);
674 * track_pfn_copy is called when vma that is covering the pfnmap gets
675 * copied through copy_page_range().
677 * If the vma has a linear pfn mapping for the entire range, we get the prot
678 * from pte and reserve the entire vma range with single reserve_pfn_range call.
680 int track_pfn_copy(struct vm_area_struct
*vma
)
682 resource_size_t paddr
;
684 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
687 if (vma
->vm_flags
& VM_PAT
) {
689 * reserve the whole chunk covered by vma. We need the
690 * starting address and protection from pte.
692 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
696 pgprot
= __pgprot(prot
);
697 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
704 * prot is passed in as a parameter for the new mapping. If the vma has a
705 * linear pfn mapping for the entire range reserve the entire vma range with
706 * single reserve_pfn_range call.
708 int track_pfn_remap(struct vm_area_struct
*vma
, pgprot_t
*prot
,
709 unsigned long pfn
, unsigned long addr
, unsigned long size
)
711 resource_size_t paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
714 /* reserve the whole chunk starting from paddr */
715 if (addr
== vma
->vm_start
&& size
== (vma
->vm_end
- vma
->vm_start
)) {
718 ret
= reserve_pfn_range(paddr
, size
, prot
, 0);
720 vma
->vm_flags
|= VM_PAT
;
728 * For anything smaller than the vma size we set prot based on the
731 flags
= lookup_memtype(paddr
);
733 /* Check memtype for the remaining pages */
734 while (size
> PAGE_SIZE
) {
737 if (flags
!= lookup_memtype(paddr
))
741 *prot
= __pgprot((pgprot_val(vma
->vm_page_prot
) & (~_PAGE_CACHE_MASK
)) |
747 int track_pfn_insert(struct vm_area_struct
*vma
, pgprot_t
*prot
,
755 /* Set prot based on lookup */
756 flags
= lookup_memtype((resource_size_t
)pfn
<< PAGE_SHIFT
);
757 *prot
= __pgprot((pgprot_val(vma
->vm_page_prot
) & (~_PAGE_CACHE_MASK
)) |
764 * untrack_pfn is called while unmapping a pfnmap for a region.
765 * untrack can be called for a specific region indicated by pfn and size or
766 * can be for the entire vma (in which case pfn, size are zero).
768 void untrack_pfn(struct vm_area_struct
*vma
, unsigned long pfn
,
771 resource_size_t paddr
;
774 if (!(vma
->vm_flags
& VM_PAT
))
777 /* free the chunk starting from pfn or the whole chunk */
778 paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
779 if (!paddr
&& !size
) {
780 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
785 size
= vma
->vm_end
- vma
->vm_start
;
787 free_pfn_range(paddr
, size
);
788 vma
->vm_flags
&= ~VM_PAT
;
791 pgprot_t
pgprot_writecombine(pgprot_t prot
)
794 return __pgprot(pgprot_val(prot
) | _PAGE_CACHE_WC
);
796 return pgprot_noncached(prot
);
798 EXPORT_SYMBOL_GPL(pgprot_writecombine
);
800 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
802 static struct memtype
*memtype_get_idx(loff_t pos
)
804 struct memtype
*print_entry
;
807 print_entry
= kzalloc(sizeof(struct memtype
), GFP_KERNEL
);
811 spin_lock(&memtype_lock
);
812 ret
= rbt_memtype_copy_nth_element(print_entry
, pos
);
813 spin_unlock(&memtype_lock
);
823 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
827 seq_printf(seq
, "PAT memtype list:\n");
830 return memtype_get_idx(*pos
);
833 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
836 return memtype_get_idx(*pos
);
839 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
843 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
845 struct memtype
*print_entry
= (struct memtype
*)v
;
847 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
848 print_entry
->start
, print_entry
->end
);
854 static const struct seq_operations memtype_seq_ops
= {
855 .start
= memtype_seq_start
,
856 .next
= memtype_seq_next
,
857 .stop
= memtype_seq_stop
,
858 .show
= memtype_seq_show
,
861 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
863 return seq_open(file
, &memtype_seq_ops
);
866 static const struct file_operations memtype_fops
= {
867 .open
= memtype_seq_open
,
870 .release
= seq_release
,
873 static int __init
pat_memtype_list_init(void)
876 debugfs_create_file("pat_memtype_list", S_IRUSR
,
877 arch_debugfs_dir
, NULL
, &memtype_fops
);
882 late_initcall(pat_memtype_list_init
);
884 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */