1 // SPDX-License-Identifier: GPL-2.0-only
3 * Handle caching attributes in page tables (PAT)
5 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * Suresh B Siddha <suresh.b.siddha@intel.com>
8 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
11 #include <linux/seq_file.h>
12 #include <linux/memblock.h>
13 #include <linux/debugfs.h>
14 #include <linux/ioport.h>
15 #include <linux/kernel.h>
16 #include <linux/pfn_t.h>
17 #include <linux/slab.h>
20 #include <linux/rbtree.h>
22 #include <asm/cacheflush.h>
23 #include <asm/processor.h>
24 #include <asm/tlbflush.h>
25 #include <asm/x86_init.h>
26 #include <asm/pgtable.h>
27 #include <asm/fcntl.h>
28 #include <asm/e820/api.h>
35 #include "pat_internal.h"
36 #include "mm_internal.h"
39 #define pr_fmt(fmt) "" fmt
41 static bool __read_mostly boot_cpu_done
;
42 static bool __read_mostly pat_disabled
= !IS_ENABLED(CONFIG_X86_PAT
);
43 static bool __read_mostly pat_initialized
;
44 static bool __read_mostly init_cm_done
;
46 void pat_disable(const char *reason
)
52 WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
57 pr_info("x86/PAT: %s\n", reason
);
60 static int __init
nopat(char *str
)
62 pat_disable("PAT support disabled.");
65 early_param("nopat", nopat
);
67 bool pat_enabled(void)
69 return pat_initialized
;
71 EXPORT_SYMBOL_GPL(pat_enabled
);
75 static int __init
pat_debug_setup(char *str
)
80 __setup("debugpat", pat_debug_setup
);
84 * X86 PAT uses page flags arch_1 and uncached together to keep track of
85 * memory type of pages that have backing page struct.
87 * X86 PAT supports 4 different memory types:
88 * - _PAGE_CACHE_MODE_WB
89 * - _PAGE_CACHE_MODE_WC
90 * - _PAGE_CACHE_MODE_UC_MINUS
91 * - _PAGE_CACHE_MODE_WT
93 * _PAGE_CACHE_MODE_WB is the default type.
97 #define _PGMT_WC (1UL << PG_arch_1)
98 #define _PGMT_UC_MINUS (1UL << PG_uncached)
99 #define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1)
100 #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
101 #define _PGMT_CLEAR_MASK (~_PGMT_MASK)
103 static inline enum page_cache_mode
get_page_memtype(struct page
*pg
)
105 unsigned long pg_flags
= pg
->flags
& _PGMT_MASK
;
107 if (pg_flags
== _PGMT_WB
)
108 return _PAGE_CACHE_MODE_WB
;
109 else if (pg_flags
== _PGMT_WC
)
110 return _PAGE_CACHE_MODE_WC
;
111 else if (pg_flags
== _PGMT_UC_MINUS
)
112 return _PAGE_CACHE_MODE_UC_MINUS
;
114 return _PAGE_CACHE_MODE_WT
;
117 static inline void set_page_memtype(struct page
*pg
,
118 enum page_cache_mode memtype
)
120 unsigned long memtype_flags
;
121 unsigned long old_flags
;
122 unsigned long new_flags
;
125 case _PAGE_CACHE_MODE_WC
:
126 memtype_flags
= _PGMT_WC
;
128 case _PAGE_CACHE_MODE_UC_MINUS
:
129 memtype_flags
= _PGMT_UC_MINUS
;
131 case _PAGE_CACHE_MODE_WT
:
132 memtype_flags
= _PGMT_WT
;
134 case _PAGE_CACHE_MODE_WB
:
136 memtype_flags
= _PGMT_WB
;
141 old_flags
= pg
->flags
;
142 new_flags
= (old_flags
& _PGMT_CLEAR_MASK
) | memtype_flags
;
143 } while (cmpxchg(&pg
->flags
, old_flags
, new_flags
) != old_flags
);
146 static inline enum page_cache_mode
get_page_memtype(struct page
*pg
)
150 static inline void set_page_memtype(struct page
*pg
,
151 enum page_cache_mode memtype
)
157 PAT_UC
= 0, /* uncached */
158 PAT_WC
= 1, /* Write combining */
159 PAT_WT
= 4, /* Write Through */
160 PAT_WP
= 5, /* Write Protected */
161 PAT_WB
= 6, /* Write Back (default) */
162 PAT_UC_MINUS
= 7, /* UC, but can be overridden by MTRR */
165 #define CM(c) (_PAGE_CACHE_MODE_ ## c)
167 static enum page_cache_mode
pat_get_cache_mode(unsigned pat_val
, char *msg
)
169 enum page_cache_mode cache
;
173 case PAT_UC
: cache
= CM(UC
); cache_mode
= "UC "; break;
174 case PAT_WC
: cache
= CM(WC
); cache_mode
= "WC "; break;
175 case PAT_WT
: cache
= CM(WT
); cache_mode
= "WT "; break;
176 case PAT_WP
: cache
= CM(WP
); cache_mode
= "WP "; break;
177 case PAT_WB
: cache
= CM(WB
); cache_mode
= "WB "; break;
178 case PAT_UC_MINUS
: cache
= CM(UC_MINUS
); cache_mode
= "UC- "; break;
179 default: cache
= CM(WB
); cache_mode
= "WB "; break;
182 memcpy(msg
, cache_mode
, 4);
190 * Update the cache mode to pgprot translation tables according to PAT
192 * Using lower indices is preferred, so we start with highest index.
194 static void __init_cache_modes(u64 pat
)
196 enum page_cache_mode cache
;
201 for (i
= 7; i
>= 0; i
--) {
202 cache
= pat_get_cache_mode((pat
>> (i
* 8)) & 7,
204 update_cache_mode_entry(i
, cache
);
206 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg
);
211 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
213 static void pat_bsp_init(u64 pat
)
217 if (!boot_cpu_has(X86_FEATURE_PAT
)) {
218 pat_disable("PAT not supported by CPU.");
222 rdmsrl(MSR_IA32_CR_PAT
, tmp_pat
);
224 pat_disable("PAT MSR is 0, disabled.");
228 wrmsrl(MSR_IA32_CR_PAT
, pat
);
229 pat_initialized
= true;
231 __init_cache_modes(pat
);
234 static void pat_ap_init(u64 pat
)
236 if (!boot_cpu_has(X86_FEATURE_PAT
)) {
238 * If this happens we are on a secondary CPU, but switched to
239 * PAT on the boot CPU. We have no way to undo PAT.
241 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
244 wrmsrl(MSR_IA32_CR_PAT
, pat
);
247 void init_cache_modes(void)
254 if (boot_cpu_has(X86_FEATURE_PAT
)) {
256 * CPU supports PAT. Set PAT table to be consistent with
257 * PAT MSR. This case supports "nopat" boot option, and
258 * virtual machine environments which support PAT without
259 * MTRRs. In specific, Xen has unique setup to PAT MSR.
261 * If PAT MSR returns 0, it is considered invalid and emulates
264 rdmsrl(MSR_IA32_CR_PAT
, pat
);
269 * No PAT. Emulate the PAT table that corresponds to the two
270 * cache bits, PWT (Write Through) and PCD (Cache Disable).
271 * This setup is also the same as the BIOS default setup.
278 * 00 0 WB : _PAGE_CACHE_MODE_WB
279 * 01 1 WT : _PAGE_CACHE_MODE_WT
280 * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
281 * 11 3 UC : _PAGE_CACHE_MODE_UC
283 * NOTE: When WC or WP is used, it is redirected to UC- per
284 * the default setup in __cachemode2pte_tbl[].
286 pat
= PAT(0, WB
) | PAT(1, WT
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
287 PAT(4, WB
) | PAT(5, WT
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
290 __init_cache_modes(pat
);
294 * pat_init - Initialize PAT MSR and PAT table
296 * This function initializes PAT MSR and PAT table with an OS-defined value
297 * to enable additional cache attributes, WC, WT and WP.
299 * This function must be called on all CPUs using the specific sequence of
300 * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
306 struct cpuinfo_x86
*c
= &boot_cpu_data
;
311 if ((c
->x86_vendor
== X86_VENDOR_INTEL
) &&
312 (((c
->x86
== 0x6) && (c
->x86_model
<= 0xd)) ||
313 ((c
->x86
== 0xf) && (c
->x86_model
<= 0x6)))) {
315 * PAT support with the lower four entries. Intel Pentium 2,
316 * 3, M, and 4 are affected by PAT errata, which makes the
317 * upper four entries unusable. To be on the safe side, we don't
325 * 000 0 WB : _PAGE_CACHE_MODE_WB
326 * 001 1 WC : _PAGE_CACHE_MODE_WC
327 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
328 * 011 3 UC : _PAGE_CACHE_MODE_UC
331 * NOTE: When WT or WP is used, it is redirected to UC- per
332 * the default setup in __cachemode2pte_tbl[].
334 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
335 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
338 * Full PAT support. We put WT in slot 7 to improve
339 * robustness in the presence of errata that might cause
340 * the high PAT bit to be ignored. This way, a buggy slot 7
341 * access will hit slot 3, and slot 3 is UC, so at worst
342 * we lose performance without causing a correctness issue.
343 * Pentium 4 erratum N46 is an example for such an erratum,
344 * although we try not to use PAT at all on affected CPUs.
351 * 000 0 WB : _PAGE_CACHE_MODE_WB
352 * 001 1 WC : _PAGE_CACHE_MODE_WC
353 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
354 * 011 3 UC : _PAGE_CACHE_MODE_UC
355 * 100 4 WB : Reserved
356 * 101 5 WP : _PAGE_CACHE_MODE_WP
357 * 110 6 UC-: Reserved
358 * 111 7 WT : _PAGE_CACHE_MODE_WT
360 * The reserved slots are unused, but mapped to their
361 * corresponding types in the presence of PAT errata.
363 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
364 PAT(4, WB
) | PAT(5, WP
) | PAT(6, UC_MINUS
) | PAT(7, WT
);
367 if (!boot_cpu_done
) {
369 boot_cpu_done
= true;
377 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype accesses */
380 * Does intersection of PAT memory type and MTRR memory type and returns
381 * the resulting memory type as PAT understands it.
382 * (Type in pat and mtrr will not have same value)
383 * The intersection is based on "Effective Memory Type" tables in IA-32
386 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
,
387 enum page_cache_mode req_type
)
390 * Look for MTRR hint to get the effective type in case where PAT
393 if (req_type
== _PAGE_CACHE_MODE_WB
) {
394 u8 mtrr_type
, uniform
;
396 mtrr_type
= mtrr_type_lookup(start
, end
, &uniform
);
397 if (mtrr_type
!= MTRR_TYPE_WRBACK
)
398 return _PAGE_CACHE_MODE_UC_MINUS
;
400 return _PAGE_CACHE_MODE_WB
;
406 struct pagerange_state
{
407 unsigned long cur_pfn
;
413 pagerange_is_ram_callback(unsigned long initial_pfn
, unsigned long total_nr_pages
, void *arg
)
415 struct pagerange_state
*state
= arg
;
417 state
->not_ram
|= initial_pfn
> state
->cur_pfn
;
418 state
->ram
|= total_nr_pages
> 0;
419 state
->cur_pfn
= initial_pfn
+ total_nr_pages
;
421 return state
->ram
&& state
->not_ram
;
424 static int pat_pagerange_is_ram(resource_size_t start
, resource_size_t end
)
427 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
428 unsigned long end_pfn
= (end
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
429 struct pagerange_state state
= {start_pfn
, 0, 0};
432 * For legacy reasons, physical address range in the legacy ISA
433 * region is tracked as non-RAM. This will allow users of
434 * /dev/mem to map portions of legacy ISA region, even when
435 * some of those portions are listed(or not even listed) with
436 * different e820 types(RAM/reserved/..)
438 if (start_pfn
< ISA_END_ADDRESS
>> PAGE_SHIFT
)
439 start_pfn
= ISA_END_ADDRESS
>> PAGE_SHIFT
;
441 if (start_pfn
< end_pfn
) {
442 ret
= walk_system_ram_range(start_pfn
, end_pfn
- start_pfn
,
443 &state
, pagerange_is_ram_callback
);
446 return (ret
> 0) ? -1 : (state
.ram
? 1 : 0);
450 * For RAM pages, we use page flags to mark the pages with appropriate type.
451 * The page flags are limited to four types, WB (default), WC, WT and UC-.
452 * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting
453 * a new memory type is only allowed for a page mapped with the default WB
456 * Here we do two passes:
457 * - Find the memtype of all the pages in the range, look for any conflicts.
458 * - In case of no conflicts, set the new memtype for pages in the range.
460 static int reserve_ram_pages_type(u64 start
, u64 end
,
461 enum page_cache_mode req_type
,
462 enum page_cache_mode
*new_type
)
467 if (req_type
== _PAGE_CACHE_MODE_WP
) {
469 *new_type
= _PAGE_CACHE_MODE_UC_MINUS
;
473 if (req_type
== _PAGE_CACHE_MODE_UC
) {
474 /* We do not support strong UC */
476 req_type
= _PAGE_CACHE_MODE_UC_MINUS
;
479 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
480 enum page_cache_mode type
;
482 page
= pfn_to_page(pfn
);
483 type
= get_page_memtype(page
);
484 if (type
!= _PAGE_CACHE_MODE_WB
) {
485 pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
486 start
, end
- 1, type
, req_type
);
495 *new_type
= req_type
;
497 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
498 page
= pfn_to_page(pfn
);
499 set_page_memtype(page
, req_type
);
504 static int free_ram_pages_type(u64 start
, u64 end
)
509 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
510 page
= pfn_to_page(pfn
);
511 set_page_memtype(page
, _PAGE_CACHE_MODE_WB
);
516 static u64
sanitize_phys(u64 address
)
519 * When changing the memtype for pages containing poison allow
520 * for a "decoy" virtual address (bit 63 clear) passed to
521 * set_memory_X(). __pa() on a "decoy" address results in a
522 * physical address with bit 63 set.
524 * Decoy addresses are not present for 32-bit builds, see
527 if (IS_ENABLED(CONFIG_X86_64
))
528 return address
& __PHYSICAL_MASK
;
533 * req_type typically has one of the:
534 * - _PAGE_CACHE_MODE_WB
535 * - _PAGE_CACHE_MODE_WC
536 * - _PAGE_CACHE_MODE_UC_MINUS
537 * - _PAGE_CACHE_MODE_UC
538 * - _PAGE_CACHE_MODE_WT
540 * If new_type is NULL, function will return an error if it cannot reserve the
541 * region with req_type. If new_type is non-NULL, function will return
542 * available type in new_type in case of no error. In case of any error
543 * it will return a negative return value.
545 int reserve_memtype(u64 start
, u64 end
, enum page_cache_mode req_type
,
546 enum page_cache_mode
*new_type
)
549 enum page_cache_mode actual_type
;
553 start
= sanitize_phys(start
);
554 end
= sanitize_phys(end
);
556 WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__
,
557 start
, end
- 1, cattr_name(req_type
));
561 if (!pat_enabled()) {
562 /* This is identical to page table setting without PAT */
564 *new_type
= req_type
;
568 /* Low ISA region is always mapped WB in page table. No need to track */
569 if (x86_platform
.is_untracked_pat_range(start
, end
)) {
571 *new_type
= _PAGE_CACHE_MODE_WB
;
576 * Call mtrr_lookup to get the type hint. This is an
577 * optimization for /dev/mem mmap'ers into WB memory (BIOS
578 * tools and ACPI tools). Use WB request for WB memory and use
579 * UC_MINUS otherwise.
581 actual_type
= pat_x_mtrr_type(start
, end
, req_type
);
584 *new_type
= actual_type
;
586 is_range_ram
= pat_pagerange_is_ram(start
, end
);
587 if (is_range_ram
== 1) {
589 err
= reserve_ram_pages_type(start
, end
, req_type
, new_type
);
592 } else if (is_range_ram
< 0) {
596 new = kzalloc(sizeof(struct memtype
), GFP_KERNEL
);
602 new->type
= actual_type
;
604 spin_lock(&memtype_lock
);
606 err
= rbt_memtype_check_insert(new, new_type
);
608 pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
610 cattr_name(new->type
), cattr_name(req_type
));
612 spin_unlock(&memtype_lock
);
617 spin_unlock(&memtype_lock
);
619 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
620 start
, end
- 1, cattr_name(new->type
), cattr_name(req_type
),
621 new_type
? cattr_name(*new_type
) : "-");
626 int free_memtype(u64 start
, u64 end
)
630 struct memtype
*entry
;
635 start
= sanitize_phys(start
);
636 end
= sanitize_phys(end
);
638 /* Low ISA region is always mapped WB. No need to track */
639 if (x86_platform
.is_untracked_pat_range(start
, end
))
642 is_range_ram
= pat_pagerange_is_ram(start
, end
);
643 if (is_range_ram
== 1) {
645 err
= free_ram_pages_type(start
, end
);
648 } else if (is_range_ram
< 0) {
652 spin_lock(&memtype_lock
);
653 entry
= rbt_memtype_erase(start
, end
);
654 spin_unlock(&memtype_lock
);
657 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
658 current
->comm
, current
->pid
, start
, end
- 1);
664 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start
, end
- 1);
671 * lookup_memtype - Looksup the memory type for a physical address
672 * @paddr: physical address of which memory type needs to be looked up
674 * Only to be called when PAT is enabled
676 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
677 * or _PAGE_CACHE_MODE_WT.
679 static enum page_cache_mode
lookup_memtype(u64 paddr
)
681 enum page_cache_mode rettype
= _PAGE_CACHE_MODE_WB
;
682 struct memtype
*entry
;
684 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ PAGE_SIZE
))
687 if (pat_pagerange_is_ram(paddr
, paddr
+ PAGE_SIZE
)) {
690 page
= pfn_to_page(paddr
>> PAGE_SHIFT
);
691 return get_page_memtype(page
);
694 spin_lock(&memtype_lock
);
696 entry
= rbt_memtype_lookup(paddr
);
698 rettype
= entry
->type
;
700 rettype
= _PAGE_CACHE_MODE_UC_MINUS
;
702 spin_unlock(&memtype_lock
);
707 * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type
708 * of @pfn cannot be overridden by UC MTRR memory type.
710 * Only to be called when PAT is enabled.
712 * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC.
713 * Returns false in other cases.
715 bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn
)
717 enum page_cache_mode cm
= lookup_memtype(PFN_PHYS(pfn
));
719 return cm
== _PAGE_CACHE_MODE_UC
||
720 cm
== _PAGE_CACHE_MODE_UC_MINUS
||
721 cm
== _PAGE_CACHE_MODE_WC
;
723 EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr
);
726 * io_reserve_memtype - Request a memory type mapping for a region of memory
727 * @start: start (physical address) of the region
728 * @end: end (physical address) of the region
729 * @type: A pointer to memtype, with requested type. On success, requested
730 * or any other compatible type that was available for the region is returned
732 * On success, returns 0
733 * On failure, returns non-zero
735 int io_reserve_memtype(resource_size_t start
, resource_size_t end
,
736 enum page_cache_mode
*type
)
738 resource_size_t size
= end
- start
;
739 enum page_cache_mode req_type
= *type
;
740 enum page_cache_mode new_type
;
743 WARN_ON_ONCE(iomem_map_sanity_check(start
, size
));
745 ret
= reserve_memtype(start
, end
, req_type
, &new_type
);
749 if (!is_new_memtype_allowed(start
, size
, req_type
, new_type
))
752 if (kernel_map_sync_memtype(start
, size
, new_type
) < 0)
759 free_memtype(start
, end
);
766 * io_free_memtype - Release a memory type mapping for a region of memory
767 * @start: start (physical address) of the region
768 * @end: end (physical address) of the region
770 void io_free_memtype(resource_size_t start
, resource_size_t end
)
772 free_memtype(start
, end
);
775 int arch_io_reserve_memtype_wc(resource_size_t start
, resource_size_t size
)
777 enum page_cache_mode type
= _PAGE_CACHE_MODE_WC
;
779 return io_reserve_memtype(start
, start
+ size
, &type
);
781 EXPORT_SYMBOL(arch_io_reserve_memtype_wc
);
783 void arch_io_free_memtype_wc(resource_size_t start
, resource_size_t size
)
785 io_free_memtype(start
, start
+ size
);
787 EXPORT_SYMBOL(arch_io_free_memtype_wc
);
789 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
790 unsigned long size
, pgprot_t vma_prot
)
792 if (!phys_mem_access_encrypted(pfn
<< PAGE_SHIFT
, size
))
793 vma_prot
= pgprot_decrypted(vma_prot
);
798 #ifdef CONFIG_STRICT_DEVMEM
799 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
800 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
805 /* This check is needed to avoid cache aliasing when PAT is enabled */
806 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
808 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
809 u64 to
= from
+ size
;
815 while (cursor
< to
) {
816 if (!devmem_is_allowed(pfn
))
823 #endif /* CONFIG_STRICT_DEVMEM */
825 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
826 unsigned long size
, pgprot_t
*vma_prot
)
828 enum page_cache_mode pcm
= _PAGE_CACHE_MODE_WB
;
830 if (!range_is_allowed(pfn
, size
))
833 if (file
->f_flags
& O_DSYNC
)
834 pcm
= _PAGE_CACHE_MODE_UC_MINUS
;
836 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
837 cachemode2protval(pcm
));
842 * Change the memory type for the physial address range in kernel identity
843 * mapping space if that range is a part of identity map.
845 int kernel_map_sync_memtype(u64 base
, unsigned long size
,
846 enum page_cache_mode pcm
)
850 if (base
> __pa(high_memory
-1))
854 * some areas in the middle of the kernel identity range
855 * are not mapped, like the PCI space.
857 if (!page_is_ram(base
>> PAGE_SHIFT
))
860 id_sz
= (__pa(high_memory
-1) <= base
+ size
) ?
861 __pa(high_memory
) - base
:
864 if (ioremap_change_attr((unsigned long)__va(base
), id_sz
, pcm
) < 0) {
865 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
866 current
->comm
, current
->pid
,
868 base
, (unsigned long long)(base
+ size
-1));
875 * Internal interface to reserve a range of physical memory with prot.
876 * Reserved non RAM regions only and after successful reserve_memtype,
877 * this func also keeps identity mapping (if any) in sync with this new prot.
879 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
884 enum page_cache_mode want_pcm
= pgprot2cachemode(*vma_prot
);
885 enum page_cache_mode pcm
= want_pcm
;
887 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
890 * reserve_pfn_range() for RAM pages. We do not refcount to keep
891 * track of number of mappings of RAM pages. We can assert that
892 * the type requested matches the type of first page in the range.
898 pcm
= lookup_memtype(paddr
);
899 if (want_pcm
!= pcm
) {
900 pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
901 current
->comm
, current
->pid
,
902 cattr_name(want_pcm
),
903 (unsigned long long)paddr
,
904 (unsigned long long)(paddr
+ size
- 1),
906 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
907 (~_PAGE_CACHE_MASK
)) |
908 cachemode2protval(pcm
));
913 ret
= reserve_memtype(paddr
, paddr
+ size
, want_pcm
, &pcm
);
917 if (pcm
!= want_pcm
) {
919 !is_new_memtype_allowed(paddr
, size
, want_pcm
, pcm
)) {
920 free_memtype(paddr
, paddr
+ size
);
921 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
922 current
->comm
, current
->pid
,
923 cattr_name(want_pcm
),
924 (unsigned long long)paddr
,
925 (unsigned long long)(paddr
+ size
- 1),
930 * We allow returning different type than the one requested in
933 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
934 (~_PAGE_CACHE_MASK
)) |
935 cachemode2protval(pcm
));
938 if (kernel_map_sync_memtype(paddr
, size
, pcm
) < 0) {
939 free_memtype(paddr
, paddr
+ size
);
946 * Internal interface to free a range of physical memory.
947 * Frees non RAM regions only.
949 static void free_pfn_range(u64 paddr
, unsigned long size
)
953 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
955 free_memtype(paddr
, paddr
+ size
);
959 * track_pfn_copy is called when vma that is covering the pfnmap gets
960 * copied through copy_page_range().
962 * If the vma has a linear pfn mapping for the entire range, we get the prot
963 * from pte and reserve the entire vma range with single reserve_pfn_range call.
965 int track_pfn_copy(struct vm_area_struct
*vma
)
967 resource_size_t paddr
;
969 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
972 if (vma
->vm_flags
& VM_PAT
) {
974 * reserve the whole chunk covered by vma. We need the
975 * starting address and protection from pte.
977 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
981 pgprot
= __pgprot(prot
);
982 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
989 * prot is passed in as a parameter for the new mapping. If the vma has
990 * a linear pfn mapping for the entire range, or no vma is provided,
991 * reserve the entire pfn + size range with single reserve_pfn_range
994 int track_pfn_remap(struct vm_area_struct
*vma
, pgprot_t
*prot
,
995 unsigned long pfn
, unsigned long addr
, unsigned long size
)
997 resource_size_t paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
998 enum page_cache_mode pcm
;
1000 /* reserve the whole chunk starting from paddr */
1001 if (!vma
|| (addr
== vma
->vm_start
1002 && size
== (vma
->vm_end
- vma
->vm_start
))) {
1005 ret
= reserve_pfn_range(paddr
, size
, prot
, 0);
1006 if (ret
== 0 && vma
)
1007 vma
->vm_flags
|= VM_PAT
;
1015 * For anything smaller than the vma size we set prot based on the
1018 pcm
= lookup_memtype(paddr
);
1020 /* Check memtype for the remaining pages */
1021 while (size
> PAGE_SIZE
) {
1024 if (pcm
!= lookup_memtype(paddr
))
1028 *prot
= __pgprot((pgprot_val(*prot
) & (~_PAGE_CACHE_MASK
)) |
1029 cachemode2protval(pcm
));
1034 void track_pfn_insert(struct vm_area_struct
*vma
, pgprot_t
*prot
, pfn_t pfn
)
1036 enum page_cache_mode pcm
;
1041 /* Set prot based on lookup */
1042 pcm
= lookup_memtype(pfn_t_to_phys(pfn
));
1043 *prot
= __pgprot((pgprot_val(*prot
) & (~_PAGE_CACHE_MASK
)) |
1044 cachemode2protval(pcm
));
1048 * untrack_pfn is called while unmapping a pfnmap for a region.
1049 * untrack can be called for a specific region indicated by pfn and size or
1050 * can be for the entire vma (in which case pfn, size are zero).
1052 void untrack_pfn(struct vm_area_struct
*vma
, unsigned long pfn
,
1055 resource_size_t paddr
;
1058 if (vma
&& !(vma
->vm_flags
& VM_PAT
))
1061 /* free the chunk starting from pfn or the whole chunk */
1062 paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
1063 if (!paddr
&& !size
) {
1064 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
1069 size
= vma
->vm_end
- vma
->vm_start
;
1071 free_pfn_range(paddr
, size
);
1073 vma
->vm_flags
&= ~VM_PAT
;
1077 * untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
1078 * with the old vma after its pfnmap page table has been removed. The new
1079 * vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
1081 void untrack_pfn_moved(struct vm_area_struct
*vma
)
1083 vma
->vm_flags
&= ~VM_PAT
;
1086 pgprot_t
pgprot_writecombine(pgprot_t prot
)
1088 return __pgprot(pgprot_val(prot
) |
1089 cachemode2protval(_PAGE_CACHE_MODE_WC
));
1091 EXPORT_SYMBOL_GPL(pgprot_writecombine
);
1093 pgprot_t
pgprot_writethrough(pgprot_t prot
)
1095 return __pgprot(pgprot_val(prot
) |
1096 cachemode2protval(_PAGE_CACHE_MODE_WT
));
1098 EXPORT_SYMBOL_GPL(pgprot_writethrough
);
1100 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
1102 static struct memtype
*memtype_get_idx(loff_t pos
)
1104 struct memtype
*print_entry
;
1107 print_entry
= kzalloc(sizeof(struct memtype
), GFP_KERNEL
);
1111 spin_lock(&memtype_lock
);
1112 ret
= rbt_memtype_copy_nth_element(print_entry
, pos
);
1113 spin_unlock(&memtype_lock
);
1123 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1127 seq_puts(seq
, "PAT memtype list:\n");
1130 return memtype_get_idx(*pos
);
1133 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1136 return memtype_get_idx(*pos
);
1139 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
1143 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
1145 struct memtype
*print_entry
= (struct memtype
*)v
;
1147 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
1148 print_entry
->start
, print_entry
->end
);
1154 static const struct seq_operations memtype_seq_ops
= {
1155 .start
= memtype_seq_start
,
1156 .next
= memtype_seq_next
,
1157 .stop
= memtype_seq_stop
,
1158 .show
= memtype_seq_show
,
1161 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
1163 return seq_open(file
, &memtype_seq_ops
);
1166 static const struct file_operations memtype_fops
= {
1167 .open
= memtype_seq_open
,
1169 .llseek
= seq_lseek
,
1170 .release
= seq_release
,
1173 static int __init
pat_memtype_list_init(void)
1175 if (pat_enabled()) {
1176 debugfs_create_file("pat_memtype_list", S_IRUSR
,
1177 arch_debugfs_dir
, NULL
, &memtype_fops
);
1182 late_initcall(pat_memtype_list_init
);
1184 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */