4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/bitops.h>
19 #include <linux/debugfs.h>
21 #include <linux/seq_file.h>
22 #include <linux/err.h>
24 #include <linux/spinlock.h>
25 #include <linux/vmalloc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sizes.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
33 #include <asm/mmu_context.h>
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry
*link
;
59 { .size
= SZ_512M
, .flag
= PMB_SZ_512M
, },
60 { .size
= SZ_128M
, .flag
= PMB_SZ_128M
, },
61 { .size
= SZ_64M
, .flag
= PMB_SZ_64M
, },
62 { .size
= SZ_16M
, .flag
= PMB_SZ_16M
, },
65 static void pmb_unmap_entry(struct pmb_entry
*, int depth
);
67 static DEFINE_RWLOCK(pmb_rwlock
);
68 static struct pmb_entry pmb_entry_list
[NR_PMB_ENTRIES
];
69 static DECLARE_BITMAP(pmb_map
, NR_PMB_ENTRIES
);
71 static unsigned int pmb_iomapping_enabled
;
73 static __always_inline
unsigned long mk_pmb_entry(unsigned int entry
)
75 return (entry
& PMB_E_MASK
) << PMB_E_SHIFT
;
78 static __always_inline
unsigned long mk_pmb_addr(unsigned int entry
)
80 return mk_pmb_entry(entry
) | PMB_ADDR
;
83 static __always_inline
unsigned long mk_pmb_data(unsigned int entry
)
85 return mk_pmb_entry(entry
) | PMB_DATA
;
88 static __always_inline
unsigned int pmb_ppn_in_range(unsigned long ppn
)
90 return ppn
>= __pa(memory_start
) && ppn
< __pa(memory_end
);
94 * Ensure that the PMB entries match our cache configuration.
96 * When we are in 32-bit address extended mode, CCR.CB becomes
97 * invalid, so care must be taken to manually adjust cacheable
100 static __always_inline
unsigned long pmb_cache_flags(void)
102 unsigned long flags
= 0;
104 #if defined(CONFIG_CACHE_OFF)
105 flags
|= PMB_WT
| PMB_UB
;
106 #elif defined(CONFIG_CACHE_WRITETHROUGH)
107 flags
|= PMB_C
| PMB_WT
| PMB_UB
;
108 #elif defined(CONFIG_CACHE_WRITEBACK)
116 * Convert typical pgprot value to the PMB equivalent
118 static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot
)
120 unsigned long pmb_flags
= 0;
121 u64 flags
= pgprot_val(prot
);
123 if (flags
& _PAGE_CACHABLE
)
125 if (flags
& _PAGE_WT
)
126 pmb_flags
|= PMB_WT
| PMB_UB
;
131 static inline bool pmb_can_merge(struct pmb_entry
*a
, struct pmb_entry
*b
)
133 return (b
->vpn
== (a
->vpn
+ a
->size
)) &&
134 (b
->ppn
== (a
->ppn
+ a
->size
)) &&
135 (b
->flags
== a
->flags
);
138 static bool pmb_mapping_exists(unsigned long vaddr
, phys_addr_t phys
,
143 read_lock(&pmb_rwlock
);
145 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
146 struct pmb_entry
*pmbe
, *iter
;
149 if (!test_bit(i
, pmb_map
))
152 pmbe
= &pmb_entry_list
[i
];
155 * See if VPN and PPN are bounded by an existing mapping.
157 if ((vaddr
< pmbe
->vpn
) || (vaddr
>= (pmbe
->vpn
+ pmbe
->size
)))
159 if ((phys
< pmbe
->ppn
) || (phys
>= (pmbe
->ppn
+ pmbe
->size
)))
163 * Now see if we're in range of a simple mapping.
165 if (size
<= pmbe
->size
) {
166 read_unlock(&pmb_rwlock
);
173 * Finally for sizes that involve compound mappings, walk
176 for (iter
= pmbe
->link
; iter
; iter
= iter
->link
)
180 * Nothing else to do if the range requirements are met.
183 read_unlock(&pmb_rwlock
);
188 read_unlock(&pmb_rwlock
);
192 static bool pmb_size_valid(unsigned long size
)
196 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
197 if (pmb_sizes
[i
].size
== size
)
203 static inline bool pmb_addr_valid(unsigned long addr
, unsigned long size
)
205 return (addr
>= P1SEG
&& (addr
+ size
- 1) < P3SEG
);
208 static inline bool pmb_prot_valid(pgprot_t prot
)
210 return (pgprot_val(prot
) & _PAGE_USER
) == 0;
213 static int pmb_size_to_flags(unsigned long size
)
217 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
218 if (pmb_sizes
[i
].size
== size
)
219 return pmb_sizes
[i
].flag
;
224 static int pmb_alloc_entry(void)
228 pos
= find_first_zero_bit(pmb_map
, NR_PMB_ENTRIES
);
229 if (pos
>= 0 && pos
< NR_PMB_ENTRIES
)
230 __set_bit(pos
, pmb_map
);
237 static struct pmb_entry
*pmb_alloc(unsigned long vpn
, unsigned long ppn
,
238 unsigned long flags
, int entry
)
240 struct pmb_entry
*pmbe
;
241 unsigned long irqflags
;
245 write_lock_irqsave(&pmb_rwlock
, irqflags
);
247 if (entry
== PMB_NO_ENTRY
) {
248 pos
= pmb_alloc_entry();
249 if (unlikely(pos
< 0)) {
254 if (__test_and_set_bit(entry
, pmb_map
)) {
255 ret
= ERR_PTR(-ENOSPC
);
262 write_unlock_irqrestore(&pmb_rwlock
, irqflags
);
264 pmbe
= &pmb_entry_list
[pos
];
266 memset(pmbe
, 0, sizeof(struct pmb_entry
));
268 raw_spin_lock_init(&pmbe
->lock
);
278 write_unlock_irqrestore(&pmb_rwlock
, irqflags
);
282 static void pmb_free(struct pmb_entry
*pmbe
)
284 __clear_bit(pmbe
->entry
, pmb_map
);
286 pmbe
->entry
= PMB_NO_ENTRY
;
291 * Must be run uncached.
293 static void __set_pmb_entry(struct pmb_entry
*pmbe
)
295 unsigned long addr
, data
;
297 addr
= mk_pmb_addr(pmbe
->entry
);
298 data
= mk_pmb_data(pmbe
->entry
);
303 __raw_writel(pmbe
->vpn
| PMB_V
, addr
);
304 __raw_writel(pmbe
->ppn
| pmbe
->flags
| PMB_V
, data
);
309 static void __clear_pmb_entry(struct pmb_entry
*pmbe
)
311 unsigned long addr
, data
;
312 unsigned long addr_val
, data_val
;
314 addr
= mk_pmb_addr(pmbe
->entry
);
315 data
= mk_pmb_data(pmbe
->entry
);
317 addr_val
= __raw_readl(addr
);
318 data_val
= __raw_readl(data
);
321 writel_uncached(addr_val
& ~PMB_V
, addr
);
322 writel_uncached(data_val
& ~PMB_V
, data
);
326 static void set_pmb_entry(struct pmb_entry
*pmbe
)
330 raw_spin_lock_irqsave(&pmbe
->lock
, flags
);
331 __set_pmb_entry(pmbe
);
332 raw_spin_unlock_irqrestore(&pmbe
->lock
, flags
);
334 #endif /* CONFIG_PM */
336 int pmb_bolt_mapping(unsigned long vaddr
, phys_addr_t phys
,
337 unsigned long size
, pgprot_t prot
)
339 struct pmb_entry
*pmbp
, *pmbe
;
340 unsigned long orig_addr
, orig_size
;
341 unsigned long flags
, pmb_flags
;
346 if (!pmb_addr_valid(vaddr
, size
))
348 if (pmb_mapping_exists(vaddr
, phys
, size
))
354 flush_tlb_kernel_range(vaddr
, vaddr
+ size
);
356 pmb_flags
= pgprot_to_pmb_flags(prot
);
360 for (i
= mapped
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++) {
361 if (size
< pmb_sizes
[i
].size
)
364 pmbe
= pmb_alloc(vaddr
, phys
, pmb_flags
|
365 pmb_sizes
[i
].flag
, PMB_NO_ENTRY
);
367 pmb_unmap_entry(pmbp
, mapped
);
368 return PTR_ERR(pmbe
);
371 raw_spin_lock_irqsave(&pmbe
->lock
, flags
);
373 pmbe
->size
= pmb_sizes
[i
].size
;
375 __set_pmb_entry(pmbe
);
382 * Link adjacent entries that span multiple PMB
383 * entries for easier tear-down.
386 raw_spin_lock_nested(&pmbp
->lock
,
387 SINGLE_DEPTH_NESTING
);
389 raw_spin_unlock(&pmbp
->lock
);
395 * Instead of trying smaller sizes on every
396 * iteration (even if we succeed in allocating
397 * space), try using pmb_sizes[i].size again.
402 raw_spin_unlock_irqrestore(&pmbe
->lock
, flags
);
404 } while (size
>= SZ_16M
);
406 flush_cache_vmap(orig_addr
, orig_addr
+ orig_size
);
411 void __iomem
*pmb_remap_caller(phys_addr_t phys
, unsigned long size
,
412 pgprot_t prot
, void *caller
)
415 phys_addr_t offset
, last_addr
;
416 phys_addr_t align_mask
;
417 unsigned long aligned
;
418 struct vm_struct
*area
;
421 if (!pmb_iomapping_enabled
)
425 * Small mappings need to go through the TLB.
428 return ERR_PTR(-EINVAL
);
429 if (!pmb_prot_valid(prot
))
430 return ERR_PTR(-EINVAL
);
432 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
433 if (size
>= pmb_sizes
[i
].size
)
436 last_addr
= phys
+ size
;
437 align_mask
= ~(pmb_sizes
[i
].size
- 1);
438 offset
= phys
& ~align_mask
;
440 aligned
= ALIGN(last_addr
, pmb_sizes
[i
].size
) - phys
;
443 * XXX: This should really start from uncached_end, but this
444 * causes the MMU to reset, so for now we restrict it to the
445 * 0xb000...0xc000 range.
447 area
= __get_vm_area_caller(aligned
, VM_IOREMAP
, 0xb0000000,
452 area
->phys_addr
= phys
;
453 vaddr
= (unsigned long)area
->addr
;
455 ret
= pmb_bolt_mapping(vaddr
, phys
, size
, prot
);
456 if (unlikely(ret
!= 0))
459 return (void __iomem
*)(offset
+ (char *)vaddr
);
462 int pmb_unmap(void __iomem
*addr
)
464 struct pmb_entry
*pmbe
= NULL
;
465 unsigned long vaddr
= (unsigned long __force
)addr
;
468 read_lock(&pmb_rwlock
);
470 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
471 if (test_bit(i
, pmb_map
)) {
472 pmbe
= &pmb_entry_list
[i
];
473 if (pmbe
->vpn
== vaddr
) {
480 read_unlock(&pmb_rwlock
);
483 pmb_unmap_entry(pmbe
, NR_PMB_ENTRIES
);
490 static void __pmb_unmap_entry(struct pmb_entry
*pmbe
, int depth
)
493 struct pmb_entry
*pmblink
= pmbe
;
496 * We may be called before this pmb_entry has been
497 * entered into the PMB table via set_pmb_entry(), but
498 * that's OK because we've allocated a unique slot for
499 * this entry in pmb_alloc() (even if we haven't filled
502 * Therefore, calling __clear_pmb_entry() is safe as no
503 * other mapping can be using that slot.
505 __clear_pmb_entry(pmbe
);
507 flush_cache_vunmap(pmbe
->vpn
, pmbe
->vpn
+ pmbe
->size
);
509 pmbe
= pmblink
->link
;
512 } while (pmbe
&& --depth
);
515 static void pmb_unmap_entry(struct pmb_entry
*pmbe
, int depth
)
522 write_lock_irqsave(&pmb_rwlock
, flags
);
523 __pmb_unmap_entry(pmbe
, depth
);
524 write_unlock_irqrestore(&pmb_rwlock
, flags
);
527 static void __init
pmb_notify(void)
531 pr_info("PMB: boot mappings:\n");
533 read_lock(&pmb_rwlock
);
535 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
536 struct pmb_entry
*pmbe
;
538 if (!test_bit(i
, pmb_map
))
541 pmbe
= &pmb_entry_list
[i
];
543 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
544 pmbe
->vpn
>> PAGE_SHIFT
, pmbe
->ppn
>> PAGE_SHIFT
,
545 pmbe
->size
>> 20, (pmbe
->flags
& PMB_C
) ? "" : "un");
548 read_unlock(&pmb_rwlock
);
552 * Sync our software copy of the PMB mappings with those in hardware. The
553 * mappings in the hardware PMB were either set up by the bootloader or
554 * very early on by the kernel.
556 static void __init
pmb_synchronize(void)
558 struct pmb_entry
*pmbp
= NULL
;
562 * Run through the initial boot mappings, log the established
563 * ones, and blow away anything that falls outside of the valid
564 * PPN range. Specifically, we only care about existing mappings
565 * that impact the cached/uncached sections.
567 * Note that touching these can be a bit of a minefield; the boot
568 * loader can establish multi-page mappings with the same caching
569 * attributes, so we need to ensure that we aren't modifying a
570 * mapping that we're presently executing from, or may execute
571 * from in the case of straddling page boundaries.
573 * In the future we will have to tidy up after the boot loader by
574 * jumping between the cached and uncached mappings and tearing
575 * down alternating mappings while executing from the other.
577 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
578 unsigned long addr
, data
;
579 unsigned long addr_val
, data_val
;
580 unsigned long ppn
, vpn
, flags
;
581 unsigned long irqflags
;
583 struct pmb_entry
*pmbe
;
585 addr
= mk_pmb_addr(i
);
586 data
= mk_pmb_data(i
);
588 addr_val
= __raw_readl(addr
);
589 data_val
= __raw_readl(data
);
592 * Skip over any bogus entries
594 if (!(data_val
& PMB_V
) || !(addr_val
& PMB_V
))
597 ppn
= data_val
& PMB_PFN_MASK
;
598 vpn
= addr_val
& PMB_PFN_MASK
;
601 * Only preserve in-range mappings.
603 if (!pmb_ppn_in_range(ppn
)) {
605 * Invalidate anything out of bounds.
607 writel_uncached(addr_val
& ~PMB_V
, addr
);
608 writel_uncached(data_val
& ~PMB_V
, data
);
613 * Update the caching attributes if necessary
615 if (data_val
& PMB_C
) {
616 data_val
&= ~PMB_CACHE_MASK
;
617 data_val
|= pmb_cache_flags();
619 writel_uncached(data_val
, data
);
622 size
= data_val
& PMB_SZ_MASK
;
623 flags
= size
| (data_val
& PMB_CACHE_MASK
);
625 pmbe
= pmb_alloc(vpn
, ppn
, flags
, i
);
631 raw_spin_lock_irqsave(&pmbe
->lock
, irqflags
);
633 for (j
= 0; j
< ARRAY_SIZE(pmb_sizes
); j
++)
634 if (pmb_sizes
[j
].flag
== size
)
635 pmbe
->size
= pmb_sizes
[j
].size
;
638 raw_spin_lock_nested(&pmbp
->lock
, SINGLE_DEPTH_NESTING
);
640 * Compare the previous entry against the current one to
641 * see if the entries span a contiguous mapping. If so,
642 * setup the entry links accordingly. Compound mappings
643 * are later coalesced.
645 if (pmb_can_merge(pmbp
, pmbe
))
647 raw_spin_unlock(&pmbp
->lock
);
652 raw_spin_unlock_irqrestore(&pmbe
->lock
, irqflags
);
656 static void __init
pmb_merge(struct pmb_entry
*head
)
658 unsigned long span
, newsize
;
659 struct pmb_entry
*tail
;
660 int i
= 1, depth
= 0;
662 span
= newsize
= head
->size
;
668 if (pmb_size_valid(span
)) {
673 /* This is the end of the line.. */
682 * The merged page size must be valid.
684 if (!depth
|| !pmb_size_valid(newsize
))
687 head
->flags
&= ~PMB_SZ_MASK
;
688 head
->flags
|= pmb_size_to_flags(newsize
);
690 head
->size
= newsize
;
692 __pmb_unmap_entry(head
->link
, depth
);
693 __set_pmb_entry(head
);
696 static void __init
pmb_coalesce(void)
701 write_lock_irqsave(&pmb_rwlock
, flags
);
703 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
704 struct pmb_entry
*pmbe
;
706 if (!test_bit(i
, pmb_map
))
709 pmbe
= &pmb_entry_list
[i
];
712 * We're only interested in compound mappings
718 * Nothing to do if it already uses the largest possible
721 if (pmbe
->size
== SZ_512M
)
727 write_unlock_irqrestore(&pmb_rwlock
, flags
);
730 #ifdef CONFIG_UNCACHED_MAPPING
731 static void __init
pmb_resize(void)
736 * If the uncached mapping was constructed by the kernel, it will
737 * already be a reasonable size.
739 if (uncached_size
== SZ_16M
)
742 read_lock(&pmb_rwlock
);
744 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
745 struct pmb_entry
*pmbe
;
748 if (!test_bit(i
, pmb_map
))
751 pmbe
= &pmb_entry_list
[i
];
753 if (pmbe
->vpn
!= uncached_start
)
757 * Found it, now resize it.
759 raw_spin_lock_irqsave(&pmbe
->lock
, flags
);
762 pmbe
->flags
&= ~PMB_SZ_MASK
;
763 pmbe
->flags
|= pmb_size_to_flags(pmbe
->size
);
765 uncached_resize(pmbe
->size
);
767 __set_pmb_entry(pmbe
);
769 raw_spin_unlock_irqrestore(&pmbe
->lock
, flags
);
772 read_unlock(&pmb_rwlock
);
776 static int __init
early_pmb(char *p
)
781 if (strstr(p
, "iomap"))
782 pmb_iomapping_enabled
= 1;
786 early_param("pmb", early_pmb
);
788 void __init
pmb_init(void)
790 /* Synchronize software state */
793 /* Attempt to combine compound mappings */
796 #ifdef CONFIG_UNCACHED_MAPPING
797 /* Resize initial mappings, if necessary */
804 writel_uncached(0, PMB_IRMCR
);
806 /* Flush out the TLB */
807 local_flush_tlb_all();
811 bool __in_29bit_mode(void)
813 return (__raw_readl(PMB_PASCR
) & PASCR_SE
) == 0;
816 static int pmb_seq_show(struct seq_file
*file
, void *iter
)
820 seq_printf(file
, "V: Valid, C: Cacheable, WT: Write-Through\n"
821 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
822 seq_printf(file
, "ety vpn ppn size flags\n");
824 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
825 unsigned long addr
, data
;
829 addr
= __raw_readl(mk_pmb_addr(i
));
830 data
= __raw_readl(mk_pmb_data(i
));
832 size
= data
& PMB_SZ_MASK
;
833 sz_str
= (size
== PMB_SZ_16M
) ? " 16MB":
834 (size
== PMB_SZ_64M
) ? " 64MB":
835 (size
== PMB_SZ_128M
) ? "128MB":
838 /* 02: V 0x88 0x08 128MB C CB B */
839 seq_printf(file
, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
840 i
, ((addr
& PMB_V
) && (data
& PMB_V
)) ? 'V' : ' ',
841 (addr
>> 24) & 0xff, (data
>> 24) & 0xff,
842 sz_str
, (data
& PMB_C
) ? 'C' : ' ',
843 (data
& PMB_WT
) ? "WT" : "CB",
844 (data
& PMB_UB
) ? "UB" : " B");
850 static int pmb_debugfs_open(struct inode
*inode
, struct file
*file
)
852 return single_open(file
, pmb_seq_show
, NULL
);
855 static const struct file_operations pmb_debugfs_fops
= {
856 .owner
= THIS_MODULE
,
857 .open
= pmb_debugfs_open
,
860 .release
= single_release
,
863 static int __init
pmb_debugfs_init(void)
865 struct dentry
*dentry
;
867 dentry
= debugfs_create_file("pmb", S_IFREG
| S_IRUGO
,
868 arch_debugfs_dir
, NULL
, &pmb_debugfs_fops
);
874 subsys_initcall(pmb_debugfs_init
);
877 static int pmb_sysdev_suspend(struct sys_device
*dev
, pm_message_t state
)
879 static pm_message_t prev_state
;
882 /* Restore the PMB after a resume from hibernation */
883 if (state
.event
== PM_EVENT_ON
&&
884 prev_state
.event
== PM_EVENT_FREEZE
) {
885 struct pmb_entry
*pmbe
;
887 read_lock(&pmb_rwlock
);
889 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
890 if (test_bit(i
, pmb_map
)) {
891 pmbe
= &pmb_entry_list
[i
];
896 read_unlock(&pmb_rwlock
);
904 static int pmb_sysdev_resume(struct sys_device
*dev
)
906 return pmb_sysdev_suspend(dev
, PMSG_ON
);
909 static struct sysdev_driver pmb_sysdev_driver
= {
910 .suspend
= pmb_sysdev_suspend
,
911 .resume
= pmb_sysdev_resume
,
914 static int __init
pmb_sysdev_init(void)
916 return sysdev_driver_register(&cpu_sysdev_class
, &pmb_sysdev_driver
);
918 subsys_initcall(pmb_sysdev_init
);