2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
17 #include <asm/tlbflush.h>
18 #include <asm/processor.h>
20 #include <asm/pgtable.h>
23 #include <asm/cacheflush.h>
24 #include <asm/fcntl.h>
29 int __read_mostly pat_wc_enabled
= 1;
31 void __cpuinit
pat_disable(char *reason
)
34 printk(KERN_INFO
"%s\n", reason
);
37 static int __init
nopat(char *str
)
39 pat_disable("PAT support disabled.");
42 early_param("nopat", nopat
);
45 static u64 __read_mostly boot_pat_state
;
48 PAT_UC
= 0, /* uncached */
49 PAT_WC
= 1, /* Write combining */
50 PAT_WT
= 4, /* Write Through */
51 PAT_WP
= 5, /* Write Protected */
52 PAT_WB
= 6, /* Write Back (default) */
53 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
56 #define PAT(x,y) ((u64)PAT_ ## y << ((x)*8))
67 printk(KERN_ERR
"PAT enabled, but CPU feature cleared\n");
69 * Panic if this happens on the secondary CPU, and we
70 * switched to PAT on the boot CPU. We have no way to
73 BUG_ON(boot_pat_state
);
76 /* Set PWT to Write-Combining. All other bits stay the same */
78 * PTE encoding used in Linux:
83 * 000 WB _PAGE_CACHE_WB
84 * 001 WC _PAGE_CACHE_WC
85 * 010 UC- _PAGE_CACHE_UC_MINUS
86 * 011 UC _PAGE_CACHE_UC
89 pat
= PAT(0,WB
) | PAT(1,WC
) | PAT(2,UC_MINUS
) | PAT(3,UC
) |
90 PAT(4,WB
) | PAT(5,WC
) | PAT(6,UC_MINUS
) | PAT(7,UC
);
94 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
96 wrmsrl(MSR_IA32_CR_PAT
, pat
);
97 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
98 smp_processor_id(), boot_pat_state
, pat
);
103 static char *cattr_name(unsigned long flags
)
105 switch (flags
& _PAGE_CACHE_MASK
) {
106 case _PAGE_CACHE_UC
: return "uncached";
107 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
108 case _PAGE_CACHE_WB
: return "write-back";
109 case _PAGE_CACHE_WC
: return "write-combining";
110 default: return "broken";
115 * The global memtype list keeps track of memory type for specific
116 * physical memory areas. Conflicting memory types in different
117 * mappings can cause CPU cache corruption. To avoid this we keep track.
119 * The list is sorted based on starting address and can contain multiple
120 * entries for each address (this allows reference counting for overlapping
121 * areas). All the aliases have the same cache attributes of course.
122 * Zero attributes are represented as holes.
124 * Currently the data structure is a list because the number of mappings
125 * are expected to be relatively small. If this should be a problem
126 * it could be changed to a rbtree or similar.
128 * memtype_lock protects the whole list.
138 static LIST_HEAD(memtype_list
);
139 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
142 * Does intersection of PAT memory type and MTRR memory type and returns
143 * the resulting memory type as PAT understands it.
144 * (Type in pat and mtrr will not have same value)
145 * The intersection is based on "Effective Memory Type" tables in IA-32
148 static int pat_x_mtrr_type(u64 start
, u64 end
, unsigned long prot
,
149 unsigned long *ret_prot
)
151 unsigned long pat_type
;
154 pat_type
= prot
& _PAGE_CACHE_MASK
;
155 prot
&= (~_PAGE_CACHE_MASK
);
158 * We return the PAT request directly for types where PAT takes
159 * precedence with respect to MTRR and for UC_MINUS.
160 * Consistency checks with other PAT requests is done later
161 * while going through memtype list.
163 if (pat_type
== _PAGE_CACHE_WC
) {
164 *ret_prot
= prot
| _PAGE_CACHE_WC
;
166 } else if (pat_type
== _PAGE_CACHE_UC_MINUS
) {
167 *ret_prot
= prot
| _PAGE_CACHE_UC_MINUS
;
169 } else if (pat_type
== _PAGE_CACHE_UC
) {
170 *ret_prot
= prot
| _PAGE_CACHE_UC
;
175 * Look for MTRR hint to get the effective type in case where PAT
178 mtrr_type
= mtrr_type_lookup(start
, end
);
180 if (mtrr_type
== MTRR_TYPE_UNCACHABLE
) {
181 *ret_prot
= prot
| _PAGE_CACHE_UC
;
182 } else if (mtrr_type
== MTRR_TYPE_WRCOMB
) {
183 *ret_prot
= prot
| _PAGE_CACHE_WC
;
185 *ret_prot
= prot
| _PAGE_CACHE_WB
;
192 * req_type typically has one of the:
195 * - _PAGE_CACHE_UC_MINUS
198 * req_type will have a special case value '-1', when requester want to inherit
199 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
201 * If ret_type is NULL, function will return an error if it cannot reserve the
202 * region with req_type. If ret_type is non-null, function will return
203 * available type in ret_type in case of no error. In case of any error
204 * it will return a negative return value.
206 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
207 unsigned long *ret_type
)
209 struct memtype
*new_entry
= NULL
;
210 struct memtype
*parse
;
211 unsigned long actual_type
;
214 /* Only track when pat_wc_enabled */
215 if (!pat_wc_enabled
) {
216 /* This is identical to page table setting without PAT */
218 if (req_type
== -1) {
219 *ret_type
= _PAGE_CACHE_WB
;
221 *ret_type
= req_type
;
227 /* Low ISA region is always mapped WB in page table. No need to track */
228 if (start
>= ISA_START_ADDRESS
&& (end
- 1) <= ISA_END_ADDRESS
) {
230 *ret_type
= _PAGE_CACHE_WB
;
235 if (req_type
== -1) {
237 * Call mtrr_lookup to get the type hint. This is an
238 * optimization for /dev/mem mmap'ers into WB memory (BIOS
239 * tools and ACPI tools). Use WB request for WB memory and use
240 * UC_MINUS otherwise.
242 u8 mtrr_type
= mtrr_type_lookup(start
, end
);
244 if (mtrr_type
== MTRR_TYPE_WRBACK
) {
245 req_type
= _PAGE_CACHE_WB
;
246 actual_type
= _PAGE_CACHE_WB
;
248 req_type
= _PAGE_CACHE_UC_MINUS
;
249 actual_type
= _PAGE_CACHE_UC_MINUS
;
252 req_type
&= _PAGE_CACHE_MASK
;
253 err
= pat_x_mtrr_type(start
, end
, req_type
, &actual_type
);
258 *ret_type
= actual_type
;
263 new_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
267 new_entry
->start
= start
;
268 new_entry
->end
= end
;
269 new_entry
->type
= actual_type
;
272 *ret_type
= actual_type
;
274 spin_lock(&memtype_lock
);
276 /* Search for existing mapping that overlaps the current range */
277 list_for_each_entry(parse
, &memtype_list
, nd
) {
278 struct memtype
*saved_ptr
;
280 if (parse
->start
>= end
) {
281 pr_debug("New Entry\n");
282 list_add(&new_entry
->nd
, parse
->nd
.prev
);
287 if (start
<= parse
->start
&& end
>= parse
->start
) {
288 if (actual_type
!= parse
->type
&& ret_type
) {
289 actual_type
= parse
->type
;
290 *ret_type
= actual_type
;
291 new_entry
->type
= actual_type
;
294 if (actual_type
!= parse
->type
) {
296 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
297 current
->comm
, current
->pid
,
299 cattr_name(actual_type
),
300 cattr_name(parse
->type
));
307 * Check to see whether the request overlaps more
308 * than one entry in the list
310 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
311 if (end
<= parse
->start
) {
315 if (actual_type
!= parse
->type
) {
317 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
318 current
->comm
, current
->pid
,
320 cattr_name(actual_type
),
321 cattr_name(parse
->type
));
331 pr_debug("Overlap at 0x%Lx-0x%Lx\n",
332 saved_ptr
->start
, saved_ptr
->end
);
333 /* No conflict. Go ahead and add this new entry */
334 list_add(&new_entry
->nd
, saved_ptr
->nd
.prev
);
339 if (start
< parse
->end
) {
340 if (actual_type
!= parse
->type
&& ret_type
) {
341 actual_type
= parse
->type
;
342 *ret_type
= actual_type
;
343 new_entry
->type
= actual_type
;
346 if (actual_type
!= parse
->type
) {
348 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
349 current
->comm
, current
->pid
,
351 cattr_name(actual_type
),
352 cattr_name(parse
->type
));
359 * Check to see whether the request overlaps more
360 * than one entry in the list
362 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
363 if (end
<= parse
->start
) {
367 if (actual_type
!= parse
->type
) {
369 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
370 current
->comm
, current
->pid
,
372 cattr_name(actual_type
),
373 cattr_name(parse
->type
));
383 pr_debug(KERN_INFO
"Overlap at 0x%Lx-0x%Lx\n",
384 saved_ptr
->start
, saved_ptr
->end
);
385 /* No conflict. Go ahead and add this new entry */
386 list_add(&new_entry
->nd
, &saved_ptr
->nd
);
394 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
395 start
, end
, cattr_name(new_entry
->type
),
396 cattr_name(req_type
));
398 spin_unlock(&memtype_lock
);
403 /* No conflict. Not yet added to the list. Add to the tail */
404 list_add_tail(&new_entry
->nd
, &memtype_list
);
405 pr_debug("New Entry\n");
410 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
411 start
, end
, cattr_name(actual_type
),
412 cattr_name(req_type
), cattr_name(*ret_type
));
415 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
416 start
, end
, cattr_name(actual_type
),
417 cattr_name(req_type
));
420 spin_unlock(&memtype_lock
);
424 int free_memtype(u64 start
, u64 end
)
429 /* Only track when pat_wc_enabled */
430 if (!pat_wc_enabled
) {
434 /* Low ISA region is always mapped WB. No need to track */
435 if (start
>= ISA_START_ADDRESS
&& end
<= ISA_END_ADDRESS
) {
439 spin_lock(&memtype_lock
);
440 list_for_each_entry(ml
, &memtype_list
, nd
) {
441 if (ml
->start
== start
&& ml
->end
== end
) {
448 spin_unlock(&memtype_lock
);
451 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
452 current
->comm
, current
->pid
, start
, end
);
455 pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
461 * /dev/mem mmap interface. The memtype used for mapping varies:
462 * - Use UC for mappings with O_SYNC flag
463 * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
464 * inherit the memtype from existing mapping.
465 * - Else use UC_MINUS memtype (for backward compatibility with existing
468 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
469 unsigned long size
, pgprot_t vma_prot
)
474 #ifdef CONFIG_NONPROMISC_DEVMEM
475 /* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/
476 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
481 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
483 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
484 u64 to
= from
+ size
;
487 while (cursor
< to
) {
488 if (!devmem_is_allowed(pfn
)) {
490 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
491 current
->comm
, from
, to
);
499 #endif /* CONFIG_NONPROMISC_DEVMEM */
501 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
502 unsigned long size
, pgprot_t
*vma_prot
)
504 u64 offset
= ((u64
) pfn
) << PAGE_SHIFT
;
505 unsigned long flags
= _PAGE_CACHE_UC_MINUS
;
508 if (!range_is_allowed(pfn
, size
))
511 if (file
->f_flags
& O_SYNC
) {
512 flags
= _PAGE_CACHE_UC
;
517 * On the PPro and successors, the MTRRs are used to set
518 * memory types for physical addresses outside main memory,
519 * so blindly setting UC or PWT on those pages is wrong.
520 * For Pentiums and earlier, the surround logic should disable
521 * caching for the high addresses through the KEN pin, but
522 * we maintain the tradition of paranoia in this code.
524 if (!pat_wc_enabled
&&
525 ! ( test_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
) ||
526 test_bit(X86_FEATURE_K6_MTRR
, boot_cpu_data
.x86_capability
) ||
527 test_bit(X86_FEATURE_CYRIX_ARR
, boot_cpu_data
.x86_capability
) ||
528 test_bit(X86_FEATURE_CENTAUR_MCR
, boot_cpu_data
.x86_capability
)) &&
529 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
530 flags
= _PAGE_CACHE_UC
;
535 * With O_SYNC, we can only take UC mapping. Fail if we cannot.
536 * Without O_SYNC, we want to get
537 * - WB for WB-able memory and no other conflicting mappings
538 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
539 * - Inherit from confliting mappings otherwise
541 if (flags
!= _PAGE_CACHE_UC_MINUS
) {
542 retval
= reserve_memtype(offset
, offset
+ size
, flags
, NULL
);
544 retval
= reserve_memtype(offset
, offset
+ size
, -1, &flags
);
550 if (pfn
<= max_pfn_mapped
&&
551 ioremap_change_attr((unsigned long)__va(offset
), size
, flags
) < 0) {
552 free_memtype(offset
, offset
+ size
);
554 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
555 current
->comm
, current
->pid
,
557 offset
, (unsigned long long)(offset
+ size
));
561 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
566 void map_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
568 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
570 unsigned long want_flags
= (pgprot_val(vma_prot
) & _PAGE_CACHE_MASK
);
572 reserve_memtype(addr
, addr
+ size
, want_flags
, &flags
);
573 if (flags
!= want_flags
) {
575 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
576 current
->comm
, current
->pid
,
577 cattr_name(want_flags
),
578 addr
, (unsigned long long)(addr
+ size
),
583 void unmap_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
585 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
587 free_memtype(addr
, addr
+ size
);