3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
23 #include <linux/config.h>
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/bootmem.h>
38 #include <linux/highmem.h>
39 #include <linux/idr.h>
40 #include <linux/nodemask.h>
41 #include <linux/module.h>
43 #include <asm/pgalloc.h>
45 #include <asm/abs_addr.h>
50 #include <asm/mmu_context.h>
51 #include <asm/pgtable.h>
53 #include <asm/uaccess.h>
55 #include <asm/machdep.h>
58 #include <asm/processor.h>
59 #include <asm/mmzone.h>
60 #include <asm/cputable.h>
61 #include <asm/ppcdebug.h>
62 #include <asm/sections.h>
63 #include <asm/system.h>
64 #include <asm/iommu.h>
65 #include <asm/abs_addr.h>
67 #include <asm/imalloc.h>
70 unsigned long ioremap_bot
= IMALLOC_BASE
;
71 static unsigned long phbs_io_bot
= PHBS_IO_BASE
;
73 extern pgd_t swapper_pg_dir
[];
74 extern struct task_struct
*current_set
[NR_CPUS
];
76 extern pgd_t ioremap_dir
[];
77 pgd_t
* ioremap_pgd
= (pgd_t
*)&ioremap_dir
;
79 unsigned long klimit
= (unsigned long)_end
;
81 unsigned long _SDR1
=0;
84 /* max amount of RAM to use */
85 unsigned long __max_memory
;
87 /* info on what we think the IO hole is */
88 unsigned long io_hole_start
;
89 unsigned long io_hole_size
;
93 unsigned long total
= 0, reserved
= 0;
94 unsigned long shared
= 0, cached
= 0;
99 printk("Mem-info:\n");
101 printk("Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
102 for_each_pgdat(pgdat
) {
103 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
104 page
= pgdat
->node_mem_map
+ i
;
106 if (PageReserved(page
))
108 else if (PageSwapCache(page
))
110 else if (page_count(page
))
111 shared
+= page_count(page
) - 1;
114 printk("%ld pages of RAM\n", total
);
115 printk("%ld reserved pages\n", reserved
);
116 printk("%ld pages shared\n", shared
);
117 printk("%ld pages swap cached\n", cached
);
120 #ifdef CONFIG_PPC_ISERIES
122 void __iomem
*ioremap(unsigned long addr
, unsigned long size
)
124 return (void __iomem
*)addr
;
127 extern void __iomem
*__ioremap(unsigned long addr
, unsigned long size
,
130 return (void __iomem
*)addr
;
133 void iounmap(volatile void __iomem
*addr
)
140 static void unmap_im_area_pte(pmd_t
*pmd
, unsigned long addr
,
145 pte
= pte_offset_kernel(pmd
, addr
);
147 pte_t ptent
= ptep_get_and_clear(&ioremap_mm
, addr
, pte
);
148 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
149 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
152 static inline void unmap_im_area_pmd(pud_t
*pud
, unsigned long addr
,
158 pmd
= pmd_offset(pud
, addr
);
160 next
= pmd_addr_end(addr
, end
);
161 if (pmd_none_or_clear_bad(pmd
))
163 unmap_im_area_pte(pmd
, addr
, next
);
164 } while (pmd
++, addr
= next
, addr
!= end
);
167 static inline void unmap_im_area_pud(pgd_t
*pgd
, unsigned long addr
,
173 pud
= pud_offset(pgd
, addr
);
175 next
= pud_addr_end(addr
, end
);
176 if (pud_none_or_clear_bad(pud
))
178 unmap_im_area_pmd(pud
, addr
, next
);
179 } while (pud
++, addr
= next
, addr
!= end
);
182 static void unmap_im_area(unsigned long addr
, unsigned long end
)
184 struct mm_struct
*mm
= &ioremap_mm
;
188 spin_lock(&mm
->page_table_lock
);
190 pgd
= pgd_offset_i(addr
);
191 flush_cache_vunmap(addr
, end
);
193 next
= pgd_addr_end(addr
, end
);
194 if (pgd_none_or_clear_bad(pgd
))
196 unmap_im_area_pud(pgd
, addr
, next
);
197 } while (pgd
++, addr
= next
, addr
!= end
);
198 flush_tlb_kernel_range(start
, end
);
200 spin_unlock(&mm
->page_table_lock
);
204 * map_io_page currently only called by __ioremap
205 * map_io_page adds an entry to the ioremap page table
206 * and adds an entry to the HPT, possibly bolting it
208 static int map_io_page(unsigned long ea
, unsigned long pa
, int flags
)
217 spin_lock(&ioremap_mm
.page_table_lock
);
218 pgdp
= pgd_offset_i(ea
);
219 pudp
= pud_alloc(&ioremap_mm
, pgdp
, ea
);
222 pmdp
= pmd_alloc(&ioremap_mm
, pudp
, ea
);
225 ptep
= pte_alloc_kernel(&ioremap_mm
, pmdp
, ea
);
228 pa
= abs_to_phys(pa
);
229 set_pte_at(&ioremap_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
,
231 spin_unlock(&ioremap_mm
.page_table_lock
);
233 unsigned long va
, vpn
, hash
, hpteg
;
236 * If the mm subsystem is not fully up, we cannot create a
237 * linux page table entry for this mapping. Simply bolt an
238 * entry in the hardware page table.
240 vsid
= get_kernel_vsid(ea
);
241 va
= (vsid
<< 28) | (ea
& 0xFFFFFFF);
242 vpn
= va
>> PAGE_SHIFT
;
244 hash
= hpt_hash(vpn
, 0);
246 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
248 /* Panic if a pte grpup is full */
249 if (ppc_md
.hpte_insert(hpteg
, va
, pa
>> PAGE_SHIFT
, 0,
250 _PAGE_NO_CACHE
|_PAGE_GUARDED
|PP_RWXX
,
252 panic("map_io_page: could not insert mapping");
259 static void __iomem
* __ioremap_com(unsigned long addr
, unsigned long pa
,
260 unsigned long ea
, unsigned long size
,
265 if ((flags
& _PAGE_PRESENT
) == 0)
266 flags
|= pgprot_val(PAGE_KERNEL
);
268 for (i
= 0; i
< size
; i
+= PAGE_SIZE
)
269 if (map_io_page(ea
+i
, pa
+i
, flags
))
272 return (void __iomem
*) (ea
+ (addr
& ~PAGE_MASK
));
275 unmap_im_area(ea
, ea
+ size
);
281 ioremap(unsigned long addr
, unsigned long size
)
283 return __ioremap(addr
, size
, _PAGE_NO_CACHE
| _PAGE_GUARDED
);
286 void __iomem
* __ioremap(unsigned long addr
, unsigned long size
,
289 unsigned long pa
, ea
;
293 * Choose an address to map it to.
294 * Once the imalloc system is running, we use it.
295 * Before that, we map using addresses going
296 * up from ioremap_bot. imalloc will use
297 * the addresses from ioremap_bot through
298 * IMALLOC_END (0xE000001fffffffff)
301 pa
= addr
& PAGE_MASK
;
302 size
= PAGE_ALIGN(addr
+ size
) - pa
;
308 struct vm_struct
*area
;
309 area
= im_get_free_area(size
);
312 ea
= (unsigned long)(area
->addr
);
313 ret
= __ioremap_com(addr
, pa
, ea
, size
, flags
);
318 ret
= __ioremap_com(addr
, pa
, ea
, size
, flags
);
325 #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
327 int __ioremap_explicit(unsigned long pa
, unsigned long ea
,
328 unsigned long size
, unsigned long flags
)
330 struct vm_struct
*area
;
333 /* For now, require page-aligned values for pa, ea, and size */
334 if (!IS_PAGE_ALIGNED(pa
) || !IS_PAGE_ALIGNED(ea
) ||
335 !IS_PAGE_ALIGNED(size
)) {
336 printk(KERN_ERR
"unaligned value in %s\n", __FUNCTION__
);
340 if (!mem_init_done
) {
341 /* Two things to consider in this case:
342 * 1) No records will be kept (imalloc, etc) that the region
344 * 2) It won't be easy to iounmap() the region later (because
349 area
= im_get_area(ea
, size
,
350 IM_REGION_UNUSED
|IM_REGION_SUBSET
|IM_REGION_EXISTS
);
352 /* Expected when PHB-dlpar is in play */
355 if (ea
!= (unsigned long) area
->addr
) {
356 printk(KERN_ERR
"unexpected addr return from "
362 ret
= __ioremap_com(pa
, pa
, ea
, size
, flags
);
364 printk(KERN_ERR
"ioremap_explicit() allocation failure !\n");
367 if (ret
!= (void *) ea
) {
368 printk(KERN_ERR
"__ioremap_com() returned unexpected addr\n");
376 * Unmap an IO region and remove it from imalloc'd list.
377 * Access to IO memory should be serialized by driver.
378 * This code is modeled after vmalloc code - unmap_vm_area()
380 * XXX what about calls before mem_init_done (ie python_countermeasures())
382 void iounmap(volatile void __iomem
*token
)
384 unsigned long address
, size
;
390 addr
= (void *) ((unsigned long __force
) token
& PAGE_MASK
);
392 if ((size
= im_free(addr
)) == 0)
395 address
= (unsigned long)addr
;
396 unmap_im_area(address
, address
+ size
);
399 static int iounmap_subset_regions(unsigned long addr
, unsigned long size
)
401 struct vm_struct
*area
;
403 /* Check whether subsets of this region exist */
404 area
= im_get_area(addr
, size
, IM_REGION_SUPERSET
);
409 iounmap((void __iomem
*) area
->addr
);
410 area
= im_get_area(addr
, size
,
417 int iounmap_explicit(volatile void __iomem
*start
, unsigned long size
)
419 struct vm_struct
*area
;
423 addr
= (unsigned long __force
) start
& PAGE_MASK
;
425 /* Verify that the region either exists or is a subset of an existing
426 * region. In the latter case, split the parent region to create
429 area
= im_get_area(addr
, size
,
430 IM_REGION_EXISTS
| IM_REGION_SUBSET
);
432 /* Determine whether subset regions exist. If so, unmap */
433 rc
= iounmap_subset_regions(addr
, size
);
436 "%s() cannot unmap nonexistent range 0x%lx\n",
441 iounmap((void __iomem
*) area
->addr
);
444 * FIXME! This can't be right:
446 * Maybe it should be "iounmap(area);"
453 EXPORT_SYMBOL(ioremap
);
454 EXPORT_SYMBOL(__ioremap
);
455 EXPORT_SYMBOL(iounmap
);
457 void free_initmem(void)
461 addr
= (unsigned long)__init_begin
;
462 for (; addr
< (unsigned long)__init_end
; addr
+= PAGE_SIZE
) {
463 ClearPageReserved(virt_to_page(addr
));
464 set_page_count(virt_to_page(addr
), 1);
468 printk ("Freeing unused kernel memory: %luk freed\n",
469 ((unsigned long)__init_end
- (unsigned long)__init_begin
) >> 10);
472 #ifdef CONFIG_BLK_DEV_INITRD
473 void free_initrd_mem(unsigned long start
, unsigned long end
)
476 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
477 for (; start
< end
; start
+= PAGE_SIZE
) {
478 ClearPageReserved(virt_to_page(start
));
479 set_page_count(virt_to_page(start
), 1);
486 static DEFINE_SPINLOCK(mmu_context_lock
);
487 static DEFINE_IDR(mmu_context_idr
);
489 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
494 #ifdef CONFIG_HUGETLB_PAGE
495 /* We leave htlb_segs as it was, but for a fork, we need to
496 * clear the huge_pgdir. */
497 mm
->context
.huge_pgdir
= NULL
;
501 if (!idr_pre_get(&mmu_context_idr
, GFP_KERNEL
))
504 spin_lock(&mmu_context_lock
);
505 err
= idr_get_new_above(&mmu_context_idr
, NULL
, 1, &index
);
506 spin_unlock(&mmu_context_lock
);
513 if (index
> MAX_CONTEXT
) {
514 idr_remove(&mmu_context_idr
, index
);
518 mm
->context
.id
= index
;
523 void destroy_context(struct mm_struct
*mm
)
525 spin_lock(&mmu_context_lock
);
526 idr_remove(&mmu_context_idr
, mm
->context
.id
);
527 spin_unlock(&mmu_context_lock
);
529 mm
->context
.id
= NO_CONTEXT
;
531 hugetlb_mm_free_pgd(mm
);
535 * Do very early mm setup.
537 void __init
mm_init_ppc64(void)
539 #ifndef CONFIG_PPC_ISERIES
543 ppc64_boot_msg(0x100, "MM Init");
545 /* This is the story of the IO hole... please, keep seated,
546 * unfortunately, we are out of oxygen masks at the moment.
547 * So we need some rough way to tell where your big IO hole
548 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
549 * that area as well, on POWER4 we don't have one, etc...
550 * We need that as a "hint" when sizing the TCE table on POWER3
551 * So far, the simplest way that seem work well enough for us it
552 * to just assume that the first discontinuity in our physical
553 * RAM layout is the IO hole. That may not be correct in the future
554 * (and isn't on iSeries but then we don't care ;)
557 #ifndef CONFIG_PPC_ISERIES
558 for (i
= 1; i
< lmb
.memory
.cnt
; i
++) {
559 unsigned long base
, prevbase
, prevsize
;
561 prevbase
= lmb
.memory
.region
[i
-1].physbase
;
562 prevsize
= lmb
.memory
.region
[i
-1].size
;
563 base
= lmb
.memory
.region
[i
].physbase
;
564 if (base
> (prevbase
+ prevsize
)) {
565 io_hole_start
= prevbase
+ prevsize
;
566 io_hole_size
= base
- (prevbase
+ prevsize
);
570 #endif /* CONFIG_PPC_ISERIES */
572 printk("IO Hole assumed to be %lx -> %lx\n",
573 io_hole_start
, io_hole_start
+ io_hole_size
- 1);
575 ppc64_boot_msg(0x100, "MM Init Done");
579 * This is called by /dev/mem to know if a given address has to
580 * be mapped non-cacheable or not
582 int page_is_ram(unsigned long pfn
)
585 unsigned long paddr
= (pfn
<< PAGE_SHIFT
);
587 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
590 #ifdef CONFIG_MSCHUNKS
591 base
= lmb
.memory
.region
[i
].physbase
;
593 base
= lmb
.memory
.region
[i
].base
;
595 if ((paddr
>= base
) &&
596 (paddr
< (base
+ lmb
.memory
.region
[i
].size
))) {
603 EXPORT_SYMBOL(page_is_ram
);
606 * Initialize the bootmem system and give it all the memory we
609 #ifndef CONFIG_DISCONTIGMEM
610 void __init
do_init_bootmem(void)
613 unsigned long start
, bootmap_pages
;
614 unsigned long total_pages
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
618 * Find an area to use for the bootmem bitmap. Calculate the size of
619 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
620 * Add 1 additional page in case the address isn't page-aligned.
622 bootmap_pages
= bootmem_bootmap_pages(total_pages
);
624 start
= abs_to_phys(lmb_alloc(bootmap_pages
<<PAGE_SHIFT
, PAGE_SIZE
));
627 boot_mapsize
= init_bootmem(start
>> PAGE_SHIFT
, total_pages
);
629 max_pfn
= max_low_pfn
;
631 /* add all physical memory to the bootmem map. Also find the first */
632 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
633 unsigned long physbase
, size
;
635 physbase
= lmb
.memory
.region
[i
].physbase
;
636 size
= lmb
.memory
.region
[i
].size
;
637 free_bootmem(physbase
, size
);
640 /* reserve the sections we're already using */
641 for (i
=0; i
< lmb
.reserved
.cnt
; i
++) {
642 unsigned long physbase
= lmb
.reserved
.region
[i
].physbase
;
643 unsigned long size
= lmb
.reserved
.region
[i
].size
;
645 reserve_bootmem(physbase
, size
);
650 * paging_init() sets up the page tables - in fact we've already done this.
652 void __init
paging_init(void)
654 unsigned long zones_size
[MAX_NR_ZONES
];
655 unsigned long zholes_size
[MAX_NR_ZONES
];
656 unsigned long total_ram
= lmb_phys_mem_size();
657 unsigned long top_of_ram
= lmb_end_of_DRAM();
659 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
660 top_of_ram
, total_ram
);
661 printk(KERN_INFO
"Memory hole size: %ldMB\n",
662 (top_of_ram
- total_ram
) >> 20);
664 * All pages are DMA-able so we put them all in the DMA zone.
666 memset(zones_size
, 0, sizeof(zones_size
));
667 memset(zholes_size
, 0, sizeof(zholes_size
));
669 zones_size
[ZONE_DMA
] = top_of_ram
>> PAGE_SHIFT
;
670 zholes_size
[ZONE_DMA
] = (top_of_ram
- total_ram
) >> PAGE_SHIFT
;
672 free_area_init_node(0, NODE_DATA(0), zones_size
,
673 __pa(PAGE_OFFSET
) >> PAGE_SHIFT
, zholes_size
);
675 #endif /* CONFIG_DISCONTIGMEM */
677 static struct kcore_list kcore_vmem
;
679 static int __init
setup_kcore(void)
683 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
684 unsigned long physbase
, size
;
685 struct kcore_list
*kcore_mem
;
687 physbase
= lmb
.memory
.region
[i
].physbase
;
688 size
= lmb
.memory
.region
[i
].size
;
690 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
691 kcore_mem
= kmalloc(sizeof(struct kcore_list
), GFP_ATOMIC
);
693 panic("mem_init: kmalloc failed\n");
695 kclist_add(kcore_mem
, __va(physbase
), size
);
698 kclist_add(&kcore_vmem
, (void *)VMALLOC_START
, VMALLOC_END
-VMALLOC_START
);
702 module_init(setup_kcore
);
704 void __init
mem_init(void)
706 #ifdef CONFIG_DISCONTIGMEM
712 unsigned long reservedpages
= 0, codesize
, initsize
, datasize
, bsssize
;
714 num_physpages
= max_low_pfn
; /* RAM is assumed contiguous */
715 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
717 #ifdef CONFIG_DISCONTIGMEM
718 for_each_online_node(nid
) {
719 if (NODE_DATA(nid
)->node_spanned_pages
!= 0) {
720 printk("freeing bootmem node %x\n", nid
);
722 free_all_bootmem_node(NODE_DATA(nid
));
726 max_mapnr
= num_physpages
;
727 totalram_pages
+= free_all_bootmem();
730 for_each_pgdat(pgdat
) {
731 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
732 page
= pgdat
->node_mem_map
+ i
;
733 if (PageReserved(page
))
738 codesize
= (unsigned long)&_etext
- (unsigned long)&_stext
;
739 initsize
= (unsigned long)&__init_end
- (unsigned long)&__init_begin
;
740 datasize
= (unsigned long)&_edata
- (unsigned long)&__init_end
;
741 bsssize
= (unsigned long)&__bss_stop
- (unsigned long)&__bss_start
;
743 printk(KERN_INFO
"Memory: %luk/%luk available (%luk kernel code, "
744 "%luk reserved, %luk data, %luk bss, %luk init)\n",
745 (unsigned long)nr_free_pages() << (PAGE_SHIFT
-10),
746 num_physpages
<< (PAGE_SHIFT
-10),
748 reservedpages
<< (PAGE_SHIFT
-10),
755 #ifdef CONFIG_PPC_ISERIES
758 /* Initialize the vDSO */
763 * This is called when a page has been modified by the kernel.
764 * It just marks the page as not i-cache clean. We do the i-cache
765 * flush later when the page is given to a user process, if necessary.
767 void flush_dcache_page(struct page
*page
)
769 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
771 /* avoid an atomic op if possible */
772 if (test_bit(PG_arch_1
, &page
->flags
))
773 clear_bit(PG_arch_1
, &page
->flags
);
775 EXPORT_SYMBOL(flush_dcache_page
);
777 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
781 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
784 * We shouldnt have to do this, but some versions of glibc
785 * require it (ld.so assumes zero filled pages are icache clean)
789 /* avoid an atomic op if possible */
790 if (test_bit(PG_arch_1
, &pg
->flags
))
791 clear_bit(PG_arch_1
, &pg
->flags
);
793 EXPORT_SYMBOL(clear_user_page
);
795 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
798 copy_page(vto
, vfrom
);
801 * We should be able to use the following optimisation, however
802 * there are two problems.
803 * Firstly a bug in some versions of binutils meant PLT sections
804 * were not marked executable.
805 * Secondly the first word in the GOT section is blrl, used
806 * to establish the GOT address. Until recently the GOT was
807 * not marked executable.
811 if (!vma
->vm_file
&& ((vma
->vm_flags
& VM_EXEC
) == 0))
815 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
818 /* avoid an atomic op if possible */
819 if (test_bit(PG_arch_1
, &pg
->flags
))
820 clear_bit(PG_arch_1
, &pg
->flags
);
823 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
824 unsigned long addr
, int len
)
828 maddr
= (unsigned long)page_address(page
) + (addr
& ~PAGE_MASK
);
829 flush_icache_range(maddr
, maddr
+ len
);
831 EXPORT_SYMBOL(flush_icache_user_range
);
834 * This is called at the end of handling a user page fault, when the
835 * fault has been handled by updating a PTE in the linux page tables.
836 * We use it to preload an HPTE into the hash table corresponding to
837 * the updated linux PTE.
839 * This must always be called with the mm->page_table_lock held
841 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long ea
,
851 /* handle i-cache coherency */
852 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE
) &&
853 !cpu_has_feature(CPU_FTR_NOEXECUTE
)) {
854 unsigned long pfn
= pte_pfn(pte
);
855 if (pfn_valid(pfn
)) {
856 struct page
*page
= pfn_to_page(pfn
);
857 if (!PageReserved(page
)
858 && !test_bit(PG_arch_1
, &page
->flags
)) {
859 __flush_dcache_icache(page_address(page
));
860 set_bit(PG_arch_1
, &page
->flags
);
865 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
869 pgdir
= vma
->vm_mm
->pgd
;
873 ptep
= find_linux_pte(pgdir
, ea
);
877 vsid
= get_vsid(vma
->vm_mm
->context
.id
, ea
);
879 local_irq_save(flags
);
880 tmp
= cpumask_of_cpu(smp_processor_id());
881 if (cpus_equal(vma
->vm_mm
->cpu_vm_mask
, tmp
))
884 __hash_page(ea
, pte_val(pte
) & (_PAGE_USER
|_PAGE_RW
), vsid
, ptep
,
886 local_irq_restore(flags
);
889 void __iomem
* reserve_phb_iospace(unsigned long size
)
891 void __iomem
*virt_addr
;
893 if (phbs_io_bot
>= IMALLOC_BASE
)
894 panic("reserve_phb_iospace(): phb io space overflow\n");
896 virt_addr
= (void __iomem
*) phbs_io_bot
;
902 kmem_cache_t
*zero_cache
;
904 static void zero_ctor(void *pte
, kmem_cache_t
*cache
, unsigned long flags
)
906 memset(pte
, 0, PAGE_SIZE
);
909 void pgtable_cache_init(void)
911 zero_cache
= kmem_cache_create("zero",
914 SLAB_HWCACHE_ALIGN
| SLAB_MUST_HWCACHE_ALIGN
,
918 panic("pgtable_cache_init(): could not create zero_cache!\n");
921 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long addr
,
922 unsigned long size
, pgprot_t vma_prot
)
924 if (ppc_md
.phys_mem_access_prot
)
925 return ppc_md
.phys_mem_access_prot(file
, addr
, size
, vma_prot
);
927 if (!page_is_ram(addr
>> PAGE_SHIFT
))
928 vma_prot
= __pgprot(pgprot_val(vma_prot
)
929 | _PAGE_GUARDED
| _PAGE_NO_CACHE
);
932 EXPORT_SYMBOL(phys_mem_access_prot
);