2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/export.h>
12 #include <linux/pagemap.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
17 #include <asm/tlbflush.h>
19 /* Remap IO memory, the same way as remap_pfn_range(), but use
20 * the obio memory space.
22 * They use a pgprot that sets PAGE_IO and does not check the
23 * mem_map table as this is independent of normal memory.
25 static inline void io_remap_pte_range(struct mm_struct
*mm
, pte_t
* pte
,
26 unsigned long address
,
28 unsigned long offset
, pgprot_t prot
,
33 /* clear hack bit that was used as a write_combine side-effect flag */
41 unsigned long curend
= address
+ PAGE_SIZE
;
43 entry
= mk_pte_io(offset
, prot
, space
, PAGE_SIZE
);
44 if (!(address
& 0xffff)) {
45 if (PAGE_SIZE
< (4 * 1024 * 1024) &&
46 !(address
& 0x3fffff) &&
47 !(offset
& 0x3ffffe) &&
48 end
>= address
+ 0x400000) {
49 entry
= mk_pte_io(offset
, prot
, space
,
51 curend
= address
+ 0x400000;
53 } else if (PAGE_SIZE
< (512 * 1024) &&
54 !(address
& 0x7ffff) &&
55 !(offset
& 0x7fffe) &&
56 end
>= address
+ 0x80000) {
57 entry
= mk_pte_io(offset
, prot
, space
,
59 curend
= address
+ 0x80000;
61 } else if (PAGE_SIZE
< (64 * 1024) &&
63 end
>= address
+ 0x10000) {
64 entry
= mk_pte_io(offset
, prot
, space
,
66 curend
= address
+ 0x10000;
74 entry
= pte_mkdirty(entry
);
76 BUG_ON(!pte_none(*pte
));
77 set_pte_at(mm
, address
, pte
, entry
);
79 pte_val(entry
) += PAGE_SIZE
;
81 } while (address
< curend
);
82 } while (address
< end
);
85 static inline int io_remap_pmd_range(struct mm_struct
*mm
, pmd_t
* pmd
, unsigned long address
, unsigned long size
,
86 unsigned long offset
, pgprot_t prot
, int space
)
90 address
&= ~PGDIR_MASK
;
96 pte_t
*pte
= pte_alloc_map(mm
, NULL
, pmd
, address
);
99 io_remap_pte_range(mm
, pte
, address
, end
- address
, address
+ offset
, prot
, space
);
101 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
103 } while (address
< end
);
107 static inline int io_remap_pud_range(struct mm_struct
*mm
, pud_t
* pud
, unsigned long address
, unsigned long size
,
108 unsigned long offset
, pgprot_t prot
, int space
)
112 address
&= ~PUD_MASK
;
113 end
= address
+ size
;
118 pmd_t
*pmd
= pmd_alloc(mm
, pud
, address
);
121 io_remap_pmd_range(mm
, pmd
, address
, end
- address
, address
+ offset
, prot
, space
);
122 address
= (address
+ PUD_SIZE
) & PUD_MASK
;
124 } while (address
< end
);
128 int io_remap_pfn_range(struct vm_area_struct
*vma
, unsigned long from
,
129 unsigned long pfn
, unsigned long size
, pgprot_t prot
)
133 unsigned long beg
= from
;
134 unsigned long end
= from
+ size
;
135 struct mm_struct
*mm
= vma
->vm_mm
;
136 int space
= GET_IOSPACE(pfn
);
137 unsigned long offset
= GET_PFN(pfn
) << PAGE_SHIFT
;
138 unsigned long phys_base
;
140 phys_base
= offset
| (((unsigned long) space
) << 32UL);
142 /* See comment in mm/memory.c remap_pfn_range */
143 vma
->vm_flags
|= VM_IO
| VM_RESERVED
| VM_PFNMAP
;
144 vma
->vm_pgoff
= phys_base
>> PAGE_SHIFT
;
147 dir
= pgd_offset(mm
, from
);
148 flush_cache_range(vma
, beg
, end
);
151 pud_t
*pud
= pud_alloc(mm
, dir
, from
);
155 error
= io_remap_pud_range(mm
, pud
, from
, end
- from
, offset
+ from
, prot
, space
);
158 from
= (from
+ PGDIR_SIZE
) & PGDIR_MASK
;
162 flush_tlb_range(vma
, beg
, end
);
165 EXPORT_SYMBOL(io_remap_pfn_range
);