2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
17 #include <asm/tlbflush.h>
18 #include <asm/processor.h>
20 #include <asm/pgtable.h>
23 #include <asm/cacheflush.h>
24 #include <asm/fcntl.h>
28 int pat_wc_enabled
= 1;
30 static u64 __read_mostly boot_pat_state
;
32 static int nopat(char *str
)
35 printk(KERN_INFO
"x86: PAT support disabled.\n");
39 early_param("nopat", nopat
);
41 static int pat_known_cpu(void)
50 printk(KERN_INFO
"CPU and/or kernel does not support PAT.\n");
55 PAT_UC
= 0, /* uncached */
56 PAT_WC
= 1, /* Write combining */
57 PAT_WT
= 4, /* Write Through */
58 PAT_WP
= 5, /* Write Protected */
59 PAT_WB
= 6, /* Write Back (default) */
60 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
63 #define PAT(x,y) ((u64)PAT_ ## y << ((x)*8))
69 #ifndef CONFIG_X86_PAT
73 /* Boot CPU enables PAT based on CPU feature */
74 if (!smp_processor_id() && !pat_known_cpu())
77 /* APs enable PAT iff boot CPU has enabled it before */
78 if (smp_processor_id() && !pat_wc_enabled
)
81 /* Set PWT to Write-Combining. All other bits stay the same */
83 * PTE encoding used in Linux:
88 * 000 WB _PAGE_CACHE_WB
89 * 001 WC _PAGE_CACHE_WC
90 * 010 UC- _PAGE_CACHE_UC_MINUS
91 * 011 UC _PAGE_CACHE_UC
94 pat
= PAT(0,WB
) | PAT(1,WC
) | PAT(2,UC_MINUS
) | PAT(3,UC
) |
95 PAT(4,WB
) | PAT(5,WC
) | PAT(6,UC_MINUS
) | PAT(7,UC
);
98 if (!smp_processor_id()) {
99 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
102 wrmsrl(MSR_IA32_CR_PAT
, pat
);
103 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
104 smp_processor_id(), boot_pat_state
, pat
);
109 static char *cattr_name(unsigned long flags
)
111 switch (flags
& _PAGE_CACHE_MASK
) {
112 case _PAGE_CACHE_UC
: return "uncached";
113 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
114 case _PAGE_CACHE_WB
: return "write-back";
115 case _PAGE_CACHE_WC
: return "write-combining";
116 default: return "broken";
121 * The global memtype list keeps track of memory type for specific
122 * physical memory areas. Conflicting memory types in different
123 * mappings can cause CPU cache corruption. To avoid this we keep track.
125 * The list is sorted based on starting address and can contain multiple
126 * entries for each address (this allows reference counting for overlapping
127 * areas). All the aliases have the same cache attributes of course.
128 * Zero attributes are represented as holes.
130 * Currently the data structure is a list because the number of mappings
131 * are expected to be relatively small. If this should be a problem
132 * it could be changed to a rbtree or similar.
134 * memtype_lock protects the whole list.
144 static LIST_HEAD(memtype_list
);
145 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
148 * Does intersection of PAT memory type and MTRR memory type and returns
149 * the resulting memory type as PAT understands it.
150 * (Type in pat and mtrr will not have same value)
151 * The intersection is based on "Effective Memory Type" tables in IA-32
154 static int pat_x_mtrr_type(u64 start
, u64 end
, unsigned long prot
,
155 unsigned long *ret_prot
)
157 unsigned long pat_type
;
160 mtrr_type
= mtrr_type_lookup(start
, end
);
161 if (mtrr_type
== 0xFF) { /* MTRR not enabled */
165 if (mtrr_type
== 0xFE) { /* MTRR match error */
166 *ret_prot
= _PAGE_CACHE_UC
;
169 if (mtrr_type
!= MTRR_TYPE_UNCACHABLE
&&
170 mtrr_type
!= MTRR_TYPE_WRBACK
&&
171 mtrr_type
!= MTRR_TYPE_WRCOMB
) { /* MTRR type unhandled */
172 *ret_prot
= _PAGE_CACHE_UC
;
176 pat_type
= prot
& _PAGE_CACHE_MASK
;
177 prot
&= (~_PAGE_CACHE_MASK
);
179 /* Currently doing intersection by hand. Optimize it later. */
180 if (pat_type
== _PAGE_CACHE_WC
) {
181 *ret_prot
= prot
| _PAGE_CACHE_WC
;
182 } else if (pat_type
== _PAGE_CACHE_UC_MINUS
) {
183 *ret_prot
= prot
| _PAGE_CACHE_UC_MINUS
;
184 } else if (pat_type
== _PAGE_CACHE_UC
||
185 mtrr_type
== MTRR_TYPE_UNCACHABLE
) {
186 *ret_prot
= prot
| _PAGE_CACHE_UC
;
187 } else if (mtrr_type
== MTRR_TYPE_WRCOMB
) {
188 *ret_prot
= prot
| _PAGE_CACHE_WC
;
190 *ret_prot
= prot
| _PAGE_CACHE_WB
;
197 * req_type typically has one of the:
200 * - _PAGE_CACHE_UC_MINUS
203 * req_type will have a special case value '-1', when requester want to inherit
204 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
206 * If ret_type is NULL, function will return an error if it cannot reserve the
207 * region with req_type. If ret_type is non-null, function will return
208 * available type in ret_type in case of no error. In case of any error
209 * it will return a negative return value.
211 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
212 unsigned long *ret_type
)
214 struct memtype
*new_entry
= NULL
;
215 struct memtype
*parse
;
216 unsigned long actual_type
;
219 /* Only track when pat_wc_enabled */
220 if (!pat_wc_enabled
) {
221 /* This is identical to page table setting without PAT */
223 if (req_type
== -1) {
224 *ret_type
= _PAGE_CACHE_WB
;
226 *ret_type
= req_type
;
232 /* Low ISA region is always mapped WB in page table. No need to track */
233 if (start
>= ISA_START_ADDRESS
&& (end
- 1) <= ISA_END_ADDRESS
) {
235 *ret_type
= _PAGE_CACHE_WB
;
240 if (req_type
== -1) {
242 * Special case where caller wants to inherit from mtrr or
243 * existing pat mapping, defaulting to UC_MINUS in case of
246 u8 mtrr_type
= mtrr_type_lookup(start
, end
);
247 if (mtrr_type
== 0xFE) { /* MTRR match error */
251 if (mtrr_type
== MTRR_TYPE_WRBACK
) {
252 req_type
= _PAGE_CACHE_WB
;
253 actual_type
= _PAGE_CACHE_WB
;
255 req_type
= _PAGE_CACHE_UC_MINUS
;
256 actual_type
= _PAGE_CACHE_UC_MINUS
;
259 req_type
&= _PAGE_CACHE_MASK
;
260 err
= pat_x_mtrr_type(start
, end
, req_type
, &actual_type
);
265 *ret_type
= actual_type
;
270 new_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
274 new_entry
->start
= start
;
275 new_entry
->end
= end
;
276 new_entry
->type
= actual_type
;
279 *ret_type
= actual_type
;
281 spin_lock(&memtype_lock
);
283 /* Search for existing mapping that overlaps the current range */
284 list_for_each_entry(parse
, &memtype_list
, nd
) {
285 struct memtype
*saved_ptr
;
287 if (parse
->start
>= end
) {
288 pr_debug("New Entry\n");
289 list_add(&new_entry
->nd
, parse
->nd
.prev
);
294 if (start
<= parse
->start
&& end
>= parse
->start
) {
295 if (actual_type
!= parse
->type
&& ret_type
) {
296 actual_type
= parse
->type
;
297 *ret_type
= actual_type
;
298 new_entry
->type
= actual_type
;
301 if (actual_type
!= parse
->type
) {
303 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
304 current
->comm
, current
->pid
,
306 cattr_name(actual_type
),
307 cattr_name(parse
->type
));
314 * Check to see whether the request overlaps more
315 * than one entry in the list
317 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
318 if (end
<= parse
->start
) {
322 if (actual_type
!= parse
->type
) {
324 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
325 current
->comm
, current
->pid
,
327 cattr_name(actual_type
),
328 cattr_name(parse
->type
));
338 pr_debug("Overlap at 0x%Lx-0x%Lx\n",
339 saved_ptr
->start
, saved_ptr
->end
);
340 /* No conflict. Go ahead and add this new entry */
341 list_add(&new_entry
->nd
, saved_ptr
->nd
.prev
);
346 if (start
< parse
->end
) {
347 if (actual_type
!= parse
->type
&& ret_type
) {
348 actual_type
= parse
->type
;
349 *ret_type
= actual_type
;
350 new_entry
->type
= actual_type
;
353 if (actual_type
!= parse
->type
) {
355 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
356 current
->comm
, current
->pid
,
358 cattr_name(actual_type
),
359 cattr_name(parse
->type
));
366 * Check to see whether the request overlaps more
367 * than one entry in the list
369 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
370 if (end
<= parse
->start
) {
374 if (actual_type
!= parse
->type
) {
376 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
377 current
->comm
, current
->pid
,
379 cattr_name(actual_type
),
380 cattr_name(parse
->type
));
390 printk(KERN_INFO
"Overlap at 0x%Lx-0x%Lx\n",
391 saved_ptr
->start
, saved_ptr
->end
);
392 /* No conflict. Go ahead and add this new entry */
393 list_add(&new_entry
->nd
, &saved_ptr
->nd
);
401 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
402 start
, end
, cattr_name(new_entry
->type
),
403 cattr_name(req_type
));
405 spin_unlock(&memtype_lock
);
410 /* No conflict. Not yet added to the list. Add to the tail */
411 list_add_tail(&new_entry
->nd
, &memtype_list
);
412 pr_debug("New Entry\n");
417 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
418 start
, end
, cattr_name(actual_type
),
419 cattr_name(req_type
), cattr_name(*ret_type
));
422 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
423 start
, end
, cattr_name(actual_type
),
424 cattr_name(req_type
));
427 spin_unlock(&memtype_lock
);
431 int free_memtype(u64 start
, u64 end
)
436 /* Only track when pat_wc_enabled */
437 if (!pat_wc_enabled
) {
441 /* Low ISA region is always mapped WB. No need to track */
442 if (start
>= ISA_START_ADDRESS
&& end
<= ISA_END_ADDRESS
) {
446 spin_lock(&memtype_lock
);
447 list_for_each_entry(ml
, &memtype_list
, nd
) {
448 if (ml
->start
== start
&& ml
->end
== end
) {
455 spin_unlock(&memtype_lock
);
458 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
459 current
->comm
, current
->pid
, start
, end
);
462 pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
468 * /dev/mem mmap interface. The memtype used for mapping varies:
469 * - Use UC for mappings with O_SYNC flag
470 * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
471 * inherit the memtype from existing mapping.
472 * - Else use UC_MINUS memtype (for backward compatibility with existing
475 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
476 unsigned long size
, pgprot_t vma_prot
)
481 #ifdef CONFIG_NONPROMISC_DEVMEM
482 /* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/
483 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
488 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
490 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
491 u64 to
= from
+ size
;
494 while (cursor
< to
) {
495 if (!devmem_is_allowed(pfn
)) {
497 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
498 current
->comm
, from
, to
);
506 #endif /* CONFIG_NONPROMISC_DEVMEM */
508 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
509 unsigned long size
, pgprot_t
*vma_prot
)
511 u64 offset
= ((u64
) pfn
) << PAGE_SHIFT
;
512 unsigned long flags
= _PAGE_CACHE_UC_MINUS
;
513 unsigned long ret_flags
;
516 if (!range_is_allowed(pfn
, size
))
519 if (file
->f_flags
& O_SYNC
) {
520 flags
= _PAGE_CACHE_UC
;
525 * On the PPro and successors, the MTRRs are used to set
526 * memory types for physical addresses outside main memory,
527 * so blindly setting UC or PWT on those pages is wrong.
528 * For Pentiums and earlier, the surround logic should disable
529 * caching for the high addresses through the KEN pin, but
530 * we maintain the tradition of paranoia in this code.
532 if (!pat_wc_enabled
&&
533 ! ( test_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
) ||
534 test_bit(X86_FEATURE_K6_MTRR
, boot_cpu_data
.x86_capability
) ||
535 test_bit(X86_FEATURE_CYRIX_ARR
, boot_cpu_data
.x86_capability
) ||
536 test_bit(X86_FEATURE_CENTAUR_MCR
, boot_cpu_data
.x86_capability
)) &&
537 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
538 flags
= _PAGE_CACHE_UC
;
543 * With O_SYNC, we can only take UC mapping. Fail if we cannot.
544 * Without O_SYNC, we want to get
545 * - WB for WB-able memory and no other conflicting mappings
546 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
547 * - Inherit from confliting mappings otherwise
549 if (flags
!= _PAGE_CACHE_UC_MINUS
) {
550 retval
= reserve_memtype(offset
, offset
+ size
, flags
, NULL
);
552 retval
= reserve_memtype(offset
, offset
+ size
, -1, &ret_flags
);
560 if (pfn
<= max_pfn_mapped
&&
561 ioremap_change_attr((unsigned long)__va(offset
), size
, flags
) < 0) {
562 free_memtype(offset
, offset
+ size
);
564 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
565 current
->comm
, current
->pid
,
567 offset
, offset
+ size
);
571 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
576 void map_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
578 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
580 unsigned long want_flags
= (pgprot_val(vma_prot
) & _PAGE_CACHE_MASK
);
582 reserve_memtype(addr
, addr
+ size
, want_flags
, &flags
);
583 if (flags
!= want_flags
) {
585 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
586 current
->comm
, current
->pid
,
587 cattr_name(want_flags
),
593 void unmap_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
595 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
597 free_memtype(addr
, addr
+ size
);