2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/pagemap.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
16 #include <asm/tlbflush.h>
18 /* Remap IO memory, the same way as remap_pfn_range(), but use
19 * the obio memory space.
21 * They use a pgprot that sets PAGE_IO and does not check the
22 * mem_map table as this is independent of normal memory.
24 static inline void io_remap_pte_range(struct mm_struct
*mm
, pte_t
* pte
,
25 unsigned long address
,
27 unsigned long offset
, pgprot_t prot
,
32 /* clear hack bit that was used as a write_combine side-effect flag */
40 unsigned long curend
= address
+ PAGE_SIZE
;
42 entry
= mk_pte_io(offset
, prot
, space
, PAGE_SIZE
);
43 if (!(address
& 0xffff)) {
44 if (PAGE_SIZE
< (4 * 1024 * 1024) &&
45 !(address
& 0x3fffff) &&
46 !(offset
& 0x3ffffe) &&
47 end
>= address
+ 0x400000) {
48 entry
= mk_pte_io(offset
, prot
, space
,
50 curend
= address
+ 0x400000;
52 } else if (PAGE_SIZE
< (512 * 1024) &&
53 !(address
& 0x7ffff) &&
54 !(offset
& 0x7fffe) &&
55 end
>= address
+ 0x80000) {
56 entry
= mk_pte_io(offset
, prot
, space
,
58 curend
= address
+ 0x80000;
60 } else if (PAGE_SIZE
< (64 * 1024) &&
62 end
>= address
+ 0x10000) {
63 entry
= mk_pte_io(offset
, prot
, space
,
65 curend
= address
+ 0x10000;
73 entry
= pte_mkdirty(entry
);
75 BUG_ON(!pte_none(*pte
));
76 set_pte_at(mm
, address
, pte
, entry
);
78 pte_val(entry
) += PAGE_SIZE
;
80 } while (address
< curend
);
81 } while (address
< end
);
84 static inline int io_remap_pmd_range(struct mm_struct
*mm
, pmd_t
* pmd
, unsigned long address
, unsigned long size
,
85 unsigned long offset
, pgprot_t prot
, int space
)
89 address
&= ~PGDIR_MASK
;
95 pte_t
* pte
= pte_alloc_map(mm
, pmd
, address
);
98 io_remap_pte_range(mm
, pte
, address
, end
- address
, address
+ offset
, prot
, space
);
100 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
102 } while (address
< end
);
106 static inline int io_remap_pud_range(struct mm_struct
*mm
, pud_t
* pud
, unsigned long address
, unsigned long size
,
107 unsigned long offset
, pgprot_t prot
, int space
)
111 address
&= ~PUD_MASK
;
112 end
= address
+ size
;
117 pmd_t
*pmd
= pmd_alloc(mm
, pud
, address
);
120 io_remap_pmd_range(mm
, pmd
, address
, end
- address
, address
+ offset
, prot
, space
);
121 address
= (address
+ PUD_SIZE
) & PUD_MASK
;
123 } while (address
< end
);
127 int io_remap_pfn_range(struct vm_area_struct
*vma
, unsigned long from
,
128 unsigned long pfn
, unsigned long size
, pgprot_t prot
)
132 unsigned long beg
= from
;
133 unsigned long end
= from
+ size
;
134 struct mm_struct
*mm
= vma
->vm_mm
;
135 int space
= GET_IOSPACE(pfn
);
136 unsigned long offset
= GET_PFN(pfn
) << PAGE_SHIFT
;
137 unsigned long phys_base
;
139 phys_base
= offset
| (((unsigned long) space
) << 32UL);
141 /* See comment in mm/memory.c remap_pfn_range */
142 vma
->vm_flags
|= VM_IO
| VM_RESERVED
| VM_PFNMAP
;
143 vma
->vm_pgoff
= phys_base
>> PAGE_SHIFT
;
146 dir
= pgd_offset(mm
, from
);
147 flush_cache_range(vma
, beg
, end
);
150 pud_t
*pud
= pud_alloc(mm
, dir
, from
);
154 error
= io_remap_pud_range(mm
, pud
, from
, end
- from
, offset
+ from
, prot
, space
);
157 from
= (from
+ PGDIR_SIZE
) & PGDIR_MASK
;
161 flush_tlb_range(vma
, beg
, end
);
164 EXPORT_SYMBOL(io_remap_pfn_range
);