2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/gfp.h>
18 #include <linux/rbtree.h>
20 #include <asm/cacheflush.h>
21 #include <asm/processor.h>
22 #include <asm/tlbflush.h>
23 #include <asm/pgtable.h>
24 #include <asm/fcntl.h>
33 int __read_mostly pat_enabled
= 1;
35 static inline void pat_disable(const char *reason
)
38 printk(KERN_INFO
"%s\n", reason
);
41 static int __init
nopat(char *str
)
43 pat_disable("PAT support disabled.");
46 early_param("nopat", nopat
);
48 static inline void pat_disable(const char *reason
)
55 static int debug_enable
;
57 static int __init
pat_debug_setup(char *str
)
62 __setup("debugpat", pat_debug_setup
);
64 #define dprintk(fmt, arg...) \
65 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
68 static u64 __read_mostly boot_pat_state
;
71 PAT_UC
= 0, /* uncached */
72 PAT_WC
= 1, /* Write combining */
73 PAT_WT
= 4, /* Write Through */
74 PAT_WP
= 5, /* Write Protected */
75 PAT_WB
= 6, /* Write Back (default) */
76 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
79 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
84 bool boot_cpu
= !boot_pat_state
;
90 if (!boot_pat_state
) {
91 pat_disable("PAT not supported by CPU.");
95 * If this happens we are on a secondary CPU, but
96 * switched to PAT on the boot CPU. We have no way to
99 printk(KERN_ERR
"PAT enabled, "
100 "but not supported by secondary CPU\n");
105 /* Set PWT to Write-Combining. All other bits stay the same */
107 * PTE encoding used in Linux:
112 * 000 WB _PAGE_CACHE_WB
113 * 001 WC _PAGE_CACHE_WC
114 * 010 UC- _PAGE_CACHE_UC_MINUS
115 * 011 UC _PAGE_CACHE_UC
118 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
119 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
123 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
125 wrmsrl(MSR_IA32_CR_PAT
, pat
);
128 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
129 smp_processor_id(), boot_pat_state
, pat
);
134 static char *cattr_name(unsigned long flags
)
136 switch (flags
& _PAGE_CACHE_MASK
) {
137 case _PAGE_CACHE_UC
: return "uncached";
138 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
139 case _PAGE_CACHE_WB
: return "write-back";
140 case _PAGE_CACHE_WC
: return "write-combining";
141 default: return "broken";
146 * The global memtype list keeps track of memory type for specific
147 * physical memory areas. Conflicting memory types in different
148 * mappings can cause CPU cache corruption. To avoid this we keep track.
150 * The list is sorted based on starting address and can contain multiple
151 * entries for each address (this allows reference counting for overlapping
152 * areas). All the aliases have the same cache attributes of course.
153 * Zero attributes are represented as holes.
155 * The data structure is a list that is also organized as an rbtree
156 * sorted on the start address of memtype range.
158 * memtype_lock protects both the linear list and rbtree.
169 static struct rb_root memtype_rbroot
= RB_ROOT
;
170 static LIST_HEAD(memtype_list
);
171 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
173 static struct memtype
*memtype_rb_search(struct rb_root
*root
, u64 start
)
175 struct rb_node
*node
= root
->rb_node
;
176 struct memtype
*last_lower
= NULL
;
179 struct memtype
*data
= container_of(node
, struct memtype
, rb
);
181 if (data
->start
< start
) {
183 node
= node
->rb_right
;
184 } else if (data
->start
> start
) {
185 node
= node
->rb_left
;
190 /* Will return NULL if there is no entry with its start <= start */
194 static void memtype_rb_insert(struct rb_root
*root
, struct memtype
*data
)
196 struct rb_node
**new = &(root
->rb_node
);
197 struct rb_node
*parent
= NULL
;
200 struct memtype
*this = container_of(*new, struct memtype
, rb
);
203 if (data
->start
<= this->start
)
204 new = &((*new)->rb_left
);
205 else if (data
->start
> this->start
)
206 new = &((*new)->rb_right
);
209 rb_link_node(&data
->rb
, parent
, new);
210 rb_insert_color(&data
->rb
, root
);
214 * Does intersection of PAT memory type and MTRR memory type and returns
215 * the resulting memory type as PAT understands it.
216 * (Type in pat and mtrr will not have same value)
217 * The intersection is based on "Effective Memory Type" tables in IA-32
220 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
, unsigned long req_type
)
223 * Look for MTRR hint to get the effective type in case where PAT
226 if (req_type
== _PAGE_CACHE_WB
) {
229 mtrr_type
= mtrr_type_lookup(start
, end
);
230 if (mtrr_type
!= MTRR_TYPE_WRBACK
)
231 return _PAGE_CACHE_UC_MINUS
;
233 return _PAGE_CACHE_WB
;
240 chk_conflict(struct memtype
*new, struct memtype
*entry
, unsigned long *type
)
242 if (new->type
!= entry
->type
) {
244 new->type
= entry
->type
;
250 /* check overlaps with more than one entry in the list */
251 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
252 if (new->end
<= entry
->start
)
254 else if (new->type
!= entry
->type
)
260 printk(KERN_INFO
"%s:%d conflicting memory types "
261 "%Lx-%Lx %s<->%s\n", current
->comm
, current
->pid
, new->start
,
262 new->end
, cattr_name(new->type
), cattr_name(entry
->type
));
266 static int pat_pagerange_is_ram(unsigned long start
, unsigned long end
)
268 int ram_page
= 0, not_rampage
= 0;
269 unsigned long page_nr
;
271 for (page_nr
= (start
>> PAGE_SHIFT
); page_nr
< (end
>> PAGE_SHIFT
);
274 * For legacy reasons, physical address range in the legacy ISA
275 * region is tracked as non-RAM. This will allow users of
276 * /dev/mem to map portions of legacy ISA region, even when
277 * some of those portions are listed(or not even listed) with
278 * different e820 types(RAM/reserved/..)
280 if (page_nr
>= (ISA_END_ADDRESS
>> PAGE_SHIFT
) &&
281 page_is_ram(page_nr
))
286 if (ram_page
== not_rampage
)
294 * For RAM pages, we use page flags to mark the pages with appropriate type.
295 * Here we do two pass:
296 * - Find the memtype of all the pages in the range, look for any conflicts
297 * - In case of no conflicts, set the new memtype for pages in the range
299 * Caller must hold memtype_lock for atomicity.
301 static int reserve_ram_pages_type(u64 start
, u64 end
, unsigned long req_type
,
302 unsigned long *new_type
)
307 if (req_type
== _PAGE_CACHE_UC
) {
308 /* We do not support strong UC */
310 req_type
= _PAGE_CACHE_UC_MINUS
;
313 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
316 page
= pfn_to_page(pfn
);
317 type
= get_page_memtype(page
);
319 printk(KERN_INFO
"reserve_ram_pages_type failed "
320 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
321 start
, end
, type
, req_type
);
330 *new_type
= req_type
;
332 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
333 page
= pfn_to_page(pfn
);
334 set_page_memtype(page
, req_type
);
339 static int free_ram_pages_type(u64 start
, u64 end
)
344 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
345 page
= pfn_to_page(pfn
);
346 set_page_memtype(page
, -1);
352 * req_type typically has one of the:
355 * - _PAGE_CACHE_UC_MINUS
358 * req_type will have a special case value '-1', when requester want to inherit
359 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
361 * If new_type is NULL, function will return an error if it cannot reserve the
362 * region with req_type. If new_type is non-NULL, function will return
363 * available type in new_type in case of no error. In case of any error
364 * it will return a negative return value.
366 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
367 unsigned long *new_type
)
369 struct memtype
*new, *entry
;
370 unsigned long actual_type
;
371 struct list_head
*where
;
375 BUG_ON(start
>= end
); /* end is exclusive */
378 /* This is identical to page table setting without PAT */
381 *new_type
= _PAGE_CACHE_WB
;
382 else if (req_type
== _PAGE_CACHE_WC
)
383 *new_type
= _PAGE_CACHE_UC_MINUS
;
385 *new_type
= req_type
& _PAGE_CACHE_MASK
;
390 /* Low ISA region is always mapped WB in page table. No need to track */
391 if (is_ISA_range(start
, end
- 1)) {
393 *new_type
= _PAGE_CACHE_WB
;
398 * Call mtrr_lookup to get the type hint. This is an
399 * optimization for /dev/mem mmap'ers into WB memory (BIOS
400 * tools and ACPI tools). Use WB request for WB memory and use
401 * UC_MINUS otherwise.
403 actual_type
= pat_x_mtrr_type(start
, end
, req_type
& _PAGE_CACHE_MASK
);
406 *new_type
= actual_type
;
408 is_range_ram
= pat_pagerange_is_ram(start
, end
);
409 if (is_range_ram
== 1) {
411 spin_lock(&memtype_lock
);
412 err
= reserve_ram_pages_type(start
, end
, req_type
, new_type
);
413 spin_unlock(&memtype_lock
);
416 } else if (is_range_ram
< 0) {
420 new = kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
426 new->type
= actual_type
;
428 spin_lock(&memtype_lock
);
430 /* Search for existing mapping that overlaps the current range */
432 list_for_each_entry(entry
, &memtype_list
, nd
) {
433 if (end
<= entry
->start
) {
434 where
= entry
->nd
.prev
;
436 } else if (start
<= entry
->start
) { /* end > entry->start */
437 err
= chk_conflict(new, entry
, new_type
);
439 dprintk("Overlap at 0x%Lx-0x%Lx\n",
440 entry
->start
, entry
->end
);
441 where
= entry
->nd
.prev
;
444 } else if (start
< entry
->end
) { /* start > entry->start */
445 err
= chk_conflict(new, entry
, new_type
);
447 dprintk("Overlap at 0x%Lx-0x%Lx\n",
448 entry
->start
, entry
->end
);
451 * Move to right position in the linked
452 * list to add this new entry
454 list_for_each_entry_continue(entry
,
456 if (start
<= entry
->start
) {
457 where
= entry
->nd
.prev
;
467 printk(KERN_INFO
"reserve_memtype failed 0x%Lx-0x%Lx, "
468 "track %s, req %s\n",
469 start
, end
, cattr_name(new->type
), cattr_name(req_type
));
471 spin_unlock(&memtype_lock
);
477 list_add(&new->nd
, where
);
479 list_add_tail(&new->nd
, &memtype_list
);
481 memtype_rb_insert(&memtype_rbroot
, new);
483 spin_unlock(&memtype_lock
);
485 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
486 start
, end
, cattr_name(new->type
), cattr_name(req_type
),
487 new_type
? cattr_name(*new_type
) : "-");
492 int free_memtype(u64 start
, u64 end
)
494 struct memtype
*entry
, *saved_entry
;
501 /* Low ISA region is always mapped WB. No need to track */
502 if (is_ISA_range(start
, end
- 1))
505 is_range_ram
= pat_pagerange_is_ram(start
, end
);
506 if (is_range_ram
== 1) {
508 spin_lock(&memtype_lock
);
509 err
= free_ram_pages_type(start
, end
);
510 spin_unlock(&memtype_lock
);
513 } else if (is_range_ram
< 0) {
517 spin_lock(&memtype_lock
);
519 entry
= memtype_rb_search(&memtype_rbroot
, start
);
520 if (unlikely(entry
== NULL
))
524 * Saved entry points to an entry with start same or less than what
525 * we searched for. Now go through the list in both directions to look
526 * for the entry that matches with both start and end, with list stored
527 * in sorted start address
530 list_for_each_entry_from(entry
, &memtype_list
, nd
) {
531 if (entry
->start
== start
&& entry
->end
== end
) {
532 rb_erase(&entry
->rb
, &memtype_rbroot
);
533 list_del(&entry
->nd
);
537 } else if (entry
->start
> start
) {
546 list_for_each_entry_reverse(entry
, &memtype_list
, nd
) {
547 if (entry
->start
== start
&& entry
->end
== end
) {
548 rb_erase(&entry
->rb
, &memtype_rbroot
);
549 list_del(&entry
->nd
);
553 } else if (entry
->start
< start
) {
558 spin_unlock(&memtype_lock
);
561 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
562 current
->comm
, current
->pid
, start
, end
);
565 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
572 * lookup_memtype - Looksup the memory type for a physical address
573 * @paddr: physical address of which memory type needs to be looked up
575 * Only to be called when PAT is enabled
577 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
580 static unsigned long lookup_memtype(u64 paddr
)
582 int rettype
= _PAGE_CACHE_WB
;
583 struct memtype
*entry
;
585 if (is_ISA_range(paddr
, paddr
+ PAGE_SIZE
- 1))
588 if (pat_pagerange_is_ram(paddr
, paddr
+ PAGE_SIZE
)) {
590 spin_lock(&memtype_lock
);
591 page
= pfn_to_page(paddr
>> PAGE_SHIFT
);
592 rettype
= get_page_memtype(page
);
593 spin_unlock(&memtype_lock
);
595 * -1 from get_page_memtype() implies RAM page is in its
596 * default state and not reserved, and hence of type WB
599 rettype
= _PAGE_CACHE_WB
;
604 spin_lock(&memtype_lock
);
606 entry
= memtype_rb_search(&memtype_rbroot
, paddr
);
608 rettype
= entry
->type
;
610 rettype
= _PAGE_CACHE_UC_MINUS
;
612 spin_unlock(&memtype_lock
);
617 * io_reserve_memtype - Request a memory type mapping for a region of memory
618 * @start: start (physical address) of the region
619 * @end: end (physical address) of the region
620 * @type: A pointer to memtype, with requested type. On success, requested
621 * or any other compatible type that was available for the region is returned
623 * On success, returns 0
624 * On failure, returns non-zero
626 int io_reserve_memtype(resource_size_t start
, resource_size_t end
,
629 resource_size_t size
= end
- start
;
630 unsigned long req_type
= *type
;
631 unsigned long new_type
;
634 WARN_ON_ONCE(iomem_map_sanity_check(start
, size
));
636 ret
= reserve_memtype(start
, end
, req_type
, &new_type
);
640 if (!is_new_memtype_allowed(start
, size
, req_type
, new_type
))
643 if (kernel_map_sync_memtype(start
, size
, new_type
) < 0)
650 free_memtype(start
, end
);
657 * io_free_memtype - Release a memory type mapping for a region of memory
658 * @start: start (physical address) of the region
659 * @end: end (physical address) of the region
661 void io_free_memtype(resource_size_t start
, resource_size_t end
)
663 free_memtype(start
, end
);
666 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
667 unsigned long size
, pgprot_t vma_prot
)
672 #ifdef CONFIG_STRICT_DEVMEM
673 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
674 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
679 /* This check is needed to avoid cache aliasing when PAT is enabled */
680 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
682 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
683 u64 to
= from
+ size
;
689 while (cursor
< to
) {
690 if (!devmem_is_allowed(pfn
)) {
692 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
693 current
->comm
, from
, to
);
701 #endif /* CONFIG_STRICT_DEVMEM */
703 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
704 unsigned long size
, pgprot_t
*vma_prot
)
706 unsigned long flags
= _PAGE_CACHE_WB
;
708 if (!range_is_allowed(pfn
, size
))
711 if (file
->f_flags
& O_SYNC
) {
712 flags
= _PAGE_CACHE_UC_MINUS
;
717 * On the PPro and successors, the MTRRs are used to set
718 * memory types for physical addresses outside main memory,
719 * so blindly setting UC or PWT on those pages is wrong.
720 * For Pentiums and earlier, the surround logic should disable
721 * caching for the high addresses through the KEN pin, but
722 * we maintain the tradition of paranoia in this code.
725 !(boot_cpu_has(X86_FEATURE_MTRR
) ||
726 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
727 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
728 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
729 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
730 flags
= _PAGE_CACHE_UC
;
734 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
740 * Change the memory type for the physial address range in kernel identity
741 * mapping space if that range is a part of identity map.
743 int kernel_map_sync_memtype(u64 base
, unsigned long size
, unsigned long flags
)
747 if (base
>= __pa(high_memory
))
750 id_sz
= (__pa(high_memory
) < base
+ size
) ?
751 __pa(high_memory
) - base
:
754 if (ioremap_change_attr((unsigned long)__va(base
), id_sz
, flags
) < 0) {
756 "%s:%d ioremap_change_attr failed %s "
758 current
->comm
, current
->pid
,
760 base
, (unsigned long long)(base
+ size
));
767 * Internal interface to reserve a range of physical memory with prot.
768 * Reserved non RAM regions only and after successful reserve_memtype,
769 * this func also keeps identity mapping (if any) in sync with this new prot.
771 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
776 unsigned long want_flags
= (pgprot_val(*vma_prot
) & _PAGE_CACHE_MASK
);
777 unsigned long flags
= want_flags
;
779 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
782 * reserve_pfn_range() for RAM pages. We do not refcount to keep
783 * track of number of mappings of RAM pages. We can assert that
784 * the type requested matches the type of first page in the range.
790 flags
= lookup_memtype(paddr
);
791 if (want_flags
!= flags
) {
793 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
794 current
->comm
, current
->pid
,
795 cattr_name(want_flags
),
796 (unsigned long long)paddr
,
797 (unsigned long long)(paddr
+ size
),
799 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
800 (~_PAGE_CACHE_MASK
)) |
806 ret
= reserve_memtype(paddr
, paddr
+ size
, want_flags
, &flags
);
810 if (flags
!= want_flags
) {
812 !is_new_memtype_allowed(paddr
, size
, want_flags
, flags
)) {
813 free_memtype(paddr
, paddr
+ size
);
814 printk(KERN_ERR
"%s:%d map pfn expected mapping type %s"
815 " for %Lx-%Lx, got %s\n",
816 current
->comm
, current
->pid
,
817 cattr_name(want_flags
),
818 (unsigned long long)paddr
,
819 (unsigned long long)(paddr
+ size
),
824 * We allow returning different type than the one requested in
827 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
828 (~_PAGE_CACHE_MASK
)) |
832 if (kernel_map_sync_memtype(paddr
, size
, flags
) < 0) {
833 free_memtype(paddr
, paddr
+ size
);
840 * Internal interface to free a range of physical memory.
841 * Frees non RAM regions only.
843 static void free_pfn_range(u64 paddr
, unsigned long size
)
847 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
849 free_memtype(paddr
, paddr
+ size
);
853 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
854 * copied through copy_page_range().
856 * If the vma has a linear pfn mapping for the entire range, we get the prot
857 * from pte and reserve the entire vma range with single reserve_pfn_range call.
859 int track_pfn_vma_copy(struct vm_area_struct
*vma
)
861 resource_size_t paddr
;
863 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
866 if (is_linear_pfn_mapping(vma
)) {
868 * reserve the whole chunk covered by vma. We need the
869 * starting address and protection from pte.
871 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
875 pgprot
= __pgprot(prot
);
876 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
883 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
884 * for physical range indicated by pfn and size.
886 * prot is passed in as a parameter for the new mapping. If the vma has a
887 * linear pfn mapping for the entire range reserve the entire vma range with
888 * single reserve_pfn_range call.
890 int track_pfn_vma_new(struct vm_area_struct
*vma
, pgprot_t
*prot
,
891 unsigned long pfn
, unsigned long size
)
894 resource_size_t paddr
;
895 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
897 if (is_linear_pfn_mapping(vma
)) {
898 /* reserve the whole chunk starting from vm_pgoff */
899 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
900 return reserve_pfn_range(paddr
, vma_size
, prot
, 0);
906 /* for vm_insert_pfn and friends, we set prot based on lookup */
907 flags
= lookup_memtype(pfn
<< PAGE_SHIFT
);
908 *prot
= __pgprot((pgprot_val(vma
->vm_page_prot
) & (~_PAGE_CACHE_MASK
)) |
915 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
916 * untrack can be called for a specific region indicated by pfn and size or
917 * can be for the entire vma (in which case size can be zero).
919 void untrack_pfn_vma(struct vm_area_struct
*vma
, unsigned long pfn
,
922 resource_size_t paddr
;
923 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
925 if (is_linear_pfn_mapping(vma
)) {
926 /* free the whole chunk starting from vm_pgoff */
927 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
928 free_pfn_range(paddr
, vma_size
);
933 pgprot_t
pgprot_writecombine(pgprot_t prot
)
936 return __pgprot(pgprot_val(prot
) | _PAGE_CACHE_WC
);
938 return pgprot_noncached(prot
);
940 EXPORT_SYMBOL_GPL(pgprot_writecombine
);
942 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
944 /* get Nth element of the linked list */
945 static struct memtype
*memtype_get_idx(loff_t pos
)
947 struct memtype
*list_node
, *print_entry
;
950 print_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
954 spin_lock(&memtype_lock
);
955 list_for_each_entry(list_node
, &memtype_list
, nd
) {
957 *print_entry
= *list_node
;
958 spin_unlock(&memtype_lock
);
963 spin_unlock(&memtype_lock
);
969 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
973 seq_printf(seq
, "PAT memtype list:\n");
976 return memtype_get_idx(*pos
);
979 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
982 return memtype_get_idx(*pos
);
985 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
989 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
991 struct memtype
*print_entry
= (struct memtype
*)v
;
993 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
994 print_entry
->start
, print_entry
->end
);
1000 static const struct seq_operations memtype_seq_ops
= {
1001 .start
= memtype_seq_start
,
1002 .next
= memtype_seq_next
,
1003 .stop
= memtype_seq_stop
,
1004 .show
= memtype_seq_show
,
1007 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
1009 return seq_open(file
, &memtype_seq_ops
);
1012 static const struct file_operations memtype_fops
= {
1013 .open
= memtype_seq_open
,
1015 .llseek
= seq_lseek
,
1016 .release
= seq_release
,
1019 static int __init
pat_memtype_list_init(void)
1021 debugfs_create_file("pat_memtype_list", S_IRUSR
, arch_debugfs_dir
,
1022 NULL
, &memtype_fops
);
1026 late_initcall(pat_memtype_list_init
);
1028 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */