* add p cc
[mascara-docs.git] / i386 / linux / linux-2.3.21 / include / asm-ppc / pgtable.h
blob3a822590b18a3bc37ad8096f7904c8cc63a9a375
1 #include <linux/config.h>
3 #ifndef _PPC_PGTABLE_H
4 #define _PPC_PGTABLE_H
6 #ifndef __ASSEMBLY__
7 #include <linux/mm.h>
8 #include <asm/processor.h> /* For TASK_SIZE */
9 #include <asm/mmu.h>
10 #include <asm/page.h>
12 #ifndef CONFIG_8xx
13 extern void local_flush_tlb_all(void);
14 extern void local_flush_tlb_mm(struct mm_struct *mm);
15 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
16 extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
17 unsigned long end);
18 #else /* CONFIG_8xx */
19 #define __tlbia() asm volatile ("tlbia" : : )
21 extern inline void local_flush_tlb_all(void)
22 { __tlbia(); }
23 extern inline void local_flush_tlb_mm(struct mm_struct *mm)
24 { __tlbia(); }
25 extern inline void local_flush_tlb_page(struct vm_area_struct *vma,
26 unsigned long vmaddr)
27 { __tlbia(); }
28 extern inline void local_flush_tlb_range(struct mm_struct *mm,
29 unsigned long start, unsigned long end)
30 { __tlbia(); }
31 extern inline void flush_hash_page(unsigned context, unsigned long va)
32 { }
33 #endif
35 #define flush_tlb_all local_flush_tlb_all
36 #define flush_tlb_mm local_flush_tlb_mm
37 #define flush_tlb_page local_flush_tlb_page
38 #define flush_tlb_range local_flush_tlb_range
41 * No cache flushing is required when address mappings are
42 * changed, because the caches on PowerPCs are physically
43 * addressed.
44 * Also, when SMP we use the coherency (M) bit of the
45 * BATs and PTEs. -- Cort
47 #define flush_cache_all() do { } while (0)
48 #define flush_cache_mm(mm) do { } while (0)
49 #define flush_cache_range(mm, a, b) do { } while (0)
50 #define flush_cache_page(vma, p) do { } while (0)
52 extern void flush_icache_range(unsigned long, unsigned long);
53 extern void flush_page_to_ram(unsigned long);
55 extern unsigned long va_to_phys(unsigned long address);
56 extern pte_t *va_to_pte(struct task_struct *tsk, unsigned long address);
57 extern unsigned long ioremap_bot, ioremap_base;
58 #endif /* __ASSEMBLY__ */
61 * The PowerPC MMU uses a hash table containing PTEs, together with
62 * a set of 16 segment registers (on 32-bit implementations), to define
63 * the virtual to physical address mapping.
65 * We use the hash table as an extended TLB, i.e. a cache of currently
66 * active mappings. We maintain a two-level page table tree, much like
67 * that used by the i386, for the sake of the Linux memory management code.
68 * Low-level assembler code in head.S (procedure hash_page) is responsible
69 * for extracting ptes from the tree and putting them into the hash table
70 * when necessary, and updating the accessed and modified bits in the
71 * page table tree.
75 * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
76 * We also use the two level tables, but we can put the real bits in them
77 * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
78 * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
79 * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
80 * based upon user/super access. The TLB does not have accessed nor write
81 * protect. We assume that if the TLB get loaded with an entry it is
82 * accessed, and overload the changed bit for write protect. We use
83 * two bits in the software pte that are supposed to be set to zero in
84 * the TLB entry (24 and 25) for these indicators. Although the level 1
85 * descriptor contains the guarded and writethrough/copyback bits, we can
86 * set these at the page level since they get copied from the Mx_TWC
87 * register when the TLB entry is loaded. We will use bit 27 for guard, since
88 * that is where it exists in the MD_TWC, and bit 26 for writethrough.
89 * These will get masked from the level 2 descriptor at TLB load time, and
90 * copied to the MD_TWC before it gets loaded.
93 /* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
94 #define PMD_SHIFT 22
95 #define PMD_SIZE (1UL << PMD_SHIFT)
96 #define PMD_MASK (~(PMD_SIZE-1))
98 /* PGDIR_SHIFT determines what a third-level page table entry can map */
99 #define PGDIR_SHIFT 22
100 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
101 #define PGDIR_MASK (~(PGDIR_SIZE-1))
104 * entries per page directory level: our page-table tree is two-level, so
105 * we don't really have any PMD directory.
107 #define PTRS_PER_PTE 1024
108 #define PTRS_PER_PMD 1
109 #define PTRS_PER_PGD 1024
110 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
113 * Just any arbitrary offset to the start of the vmalloc VM area: the
114 * current 64MB value just means that there will be a 64MB "hole" after the
115 * physical memory until the kernel virtual memory starts. That means that
116 * any out-of-bounds memory accesses will hopefully be caught.
117 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
118 * area for the same reason. ;)
120 * We no longer map larger than phys RAM with the BATs so we don't have
121 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
122 * about clashes between our early calls to ioremap() that start growing down
123 * from ioremap_base being run into the VM area allocations (growing upwards
124 * from VMALLOC_START). For this reason we have ioremap_bot to check when
125 * we actually run into our mappings setup in the early boot with the VM
126 * system. This really does become a problem for machines with good amounts
127 * of RAM. -- Cort
129 #define VMALLOC_OFFSET (0x4000000) /* 64M */
130 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
131 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
132 #define VMALLOC_END ioremap_bot
135 * Bits in a linux-style PTE. These match the bits in the
136 * (hardware-defined) PowerPC PTE as closely as possible.
138 #ifndef CONFIG_8xx
139 /* Definitions for 60x, 740/750, etc. */
140 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
141 #define _PAGE_USER 0x002 /* matches one of the PP bits */
142 #define _PAGE_RW 0x004 /* software: user write access allowed */
143 #define _PAGE_GUARDED 0x008
144 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
145 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
146 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
147 #define _PAGE_DIRTY 0x080 /* C: page changed */
148 #define _PAGE_ACCESSED 0x100 /* R: page referenced */
149 #define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */
150 #define _PAGE_SHARED 0
152 #else
153 /* Definitions for 8xx embedded chips. */
154 #define _PAGE_PRESENT 0x0001 /* Page is valid */
155 #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
156 #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
158 /* These four software bits must be masked out when the entry is loaded
159 * into the TLB.
161 #define _PAGE_GUARDED 0x0010 /* software: guarded access */
162 #define _PAGE_WRITETHRU 0x0020 /* software: use writethrough cache */
163 #define _PAGE_RW 0x0040 /* software: user write access allowed */
164 #define _PAGE_ACCESSED 0x0080 /* software: page referenced */
166 #define _PAGE_DIRTY 0x0100 /* C: page changed (write protect) */
167 #define _PAGE_USER 0x0800 /* One of the PP bits, the other must be 0 */
169 /* This is used to enable or disable the actual hardware write
170 * protection.
172 #define _PAGE_HWWRITE _PAGE_DIRTY
174 #endif /* CONFIG_8xx */
176 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
178 #ifdef __SMP__
179 #define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT
180 #else
181 #define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED
182 #endif
183 #define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
185 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
187 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | \
188 _PAGE_SHARED)
189 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
190 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
191 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED)
192 #define PAGE_KERNEL_CI __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | \
193 _PAGE_NO_CACHE )
196 * The PowerPC can only do execute protection on a segment (256MB) basis,
197 * not on a page basis. So we consider execute permission the same as read.
198 * Also, write permissions imply read permissions.
199 * This is the closest we can get..
201 #define __P000 PAGE_NONE
202 #define __P001 PAGE_READONLY
203 #define __P010 PAGE_COPY
204 #define __P011 PAGE_COPY
205 #define __P100 PAGE_READONLY
206 #define __P101 PAGE_READONLY
207 #define __P110 PAGE_COPY
208 #define __P111 PAGE_COPY
210 #define __S000 PAGE_NONE
211 #define __S001 PAGE_READONLY
212 #define __S010 PAGE_SHARED
213 #define __S011 PAGE_SHARED
214 #define __S100 PAGE_READONLY
215 #define __S101 PAGE_READONLY
216 #define __S110 PAGE_SHARED
217 #define __S111 PAGE_SHARED
220 * BAD_PAGETABLE is used when we need a bogus page-table, while
221 * BAD_PAGE is used for a bogus page.
223 * ZERO_PAGE is a global shared page that is always zero: used
224 * for zero-mapped memory areas etc..
226 #ifndef __ASSEMBLY__
227 extern pte_t __bad_page(void);
228 extern pte_t * __bad_pagetable(void);
230 extern unsigned long empty_zero_page[1024];
231 #endif __ASSEMBLY__
232 #define BAD_PAGETABLE __bad_pagetable()
233 #define BAD_PAGE __bad_page()
234 #define ZERO_PAGE(vaddr) ((unsigned long) empty_zero_page)
236 /* number of bits that fit into a memory pointer */
237 #define BITS_PER_PTR (8*sizeof(unsigned long))
239 /* to align the pointer to a pointer address */
240 #define PTR_MASK (~(sizeof(void*)-1))
242 /* sizeof(void*) == 1<<SIZEOF_PTR_LOG2 */
243 /* 64-bit machines, beware! SRB. */
244 #define SIZEOF_PTR_LOG2 2
246 #ifndef __ASSEMBLY__
247 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
248 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
249 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
251 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
252 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~PAGE_MASK) != 0; }
253 extern inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) & PAGE_MASK) != 0; }
254 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
258 * The "pgd_xxx()" functions here are trivial for a folded two-level
259 * setup: the pgd is never bad, and a pmd always exists (as it's folded
260 * into the pgd entry)
262 extern inline int pgd_none(pgd_t pgd) { return 0; }
263 extern inline int pgd_bad(pgd_t pgd) { return 0; }
264 extern inline int pgd_present(pgd_t pgd) { return 1; }
265 extern inline void pgd_clear(pgd_t * pgdp) { }
268 * The following only work if pte_present() is true.
269 * Undefined behaviour if not..
271 extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
272 extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
273 extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
274 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
275 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
277 extern inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
278 extern inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
280 extern inline pte_t pte_rdprotect(pte_t pte) {
281 pte_val(pte) &= ~_PAGE_USER; return pte; }
282 extern inline pte_t pte_exprotect(pte_t pte) {
283 pte_val(pte) &= ~_PAGE_USER; return pte; }
284 extern inline pte_t pte_wrprotect(pte_t pte) {
285 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
286 extern inline pte_t pte_mkclean(pte_t pte) {
287 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
288 extern inline pte_t pte_mkold(pte_t pte) {
289 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
291 extern inline pte_t pte_mkread(pte_t pte) {
292 pte_val(pte) |= _PAGE_USER; return pte; }
293 extern inline pte_t pte_mkexec(pte_t pte) {
294 pte_val(pte) |= _PAGE_USER; return pte; }
295 extern inline pte_t pte_mkwrite(pte_t pte)
297 pte_val(pte) |= _PAGE_RW;
298 if (pte_val(pte) & _PAGE_DIRTY)
299 pte_val(pte) |= _PAGE_HWWRITE;
300 return pte;
302 extern inline pte_t pte_mkdirty(pte_t pte)
304 pte_val(pte) |= _PAGE_DIRTY;
305 if (pte_val(pte) & _PAGE_RW)
306 pte_val(pte) |= _PAGE_HWWRITE;
307 return pte;
309 extern inline pte_t pte_mkyoung(pte_t pte) {
310 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
312 /* Certain architectures need to do special things when pte's
313 * within a page table are directly modified. Thus, the following
314 * hook is made available.
316 #if 1
317 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
318 #else
319 extern inline void set_pte(pte_t *pteptr, pte_t pteval)
321 unsigned long val = pte_val(pteval);
322 extern void xmon(void *);
324 if ((val & _PAGE_PRESENT) && ((val < 0x111000 || (val & 0x800)
325 || ((val & _PAGE_HWWRITE) && (~val & (_PAGE_RW|_PAGE_DIRTY)))) {
326 printk("bad pte val %lx ptr=%p\n", val, pteptr);
327 xmon(0);
329 *pteptr = pteval;
331 #endif
334 * Conversion functions: convert a page and protection to a page entry,
335 * and a page entry and page directory to the page they refer to.
338 static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
339 { pte_t pte; pte_val(pte) = (page) | pgprot_val(pgprot); return pte; }
341 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
342 { pte_t pte; pte_val(pte) = __pa(page) | pgprot_val(pgprot); return pte; }
344 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
345 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
347 extern inline unsigned long pte_page(pte_t pte)
348 { return (unsigned long) __va(pte_val(pte) & PAGE_MASK); }
350 extern inline unsigned long pmd_page(pmd_t pmd)
351 { return pmd_val(pmd); }
354 /* to find an entry in a kernel page-table-directory */
355 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
357 /* to find an entry in a page-table-directory */
358 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
360 return mm->pgd + (address >> PGDIR_SHIFT);
363 /* Find an entry in the second-level page table.. */
364 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
366 return (pmd_t *) dir;
369 /* Find an entry in the third-level page table.. */
370 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
372 return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
376 * This is handled very differently on the PPC since out page tables
377 * are all 0's and I want to be able to use these zero'd pages elsewhere
378 * as well - it gives us quite a speedup.
380 * Note that the SMP/UP versions are the same but we don't need a
381 * per cpu list of zero pages because we do the zero-ing with the cache
382 * off and the access routines are lock-free but the pgt cache stuff
383 * is per-cpu since it isn't done with any lock-free access routines
384 * (although I think we need arch-specific routines so I can do lock-free).
386 * I need to generalize this so we can use it for other arch's as well.
387 * -- Cort
389 #ifdef __SMP__
390 #define quicklists cpu_data[smp_processor_id()]
391 #else
392 extern struct pgtable_cache_struct {
393 unsigned long *pgd_cache;
394 unsigned long *pte_cache;
395 unsigned long pgtable_cache_sz;
396 } quicklists;
397 #endif
399 #define pgd_quicklist (quicklists.pgd_cache)
400 #define pmd_quicklist ((unsigned long *)0)
401 #define pte_quicklist (quicklists.pte_cache)
402 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
404 extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
405 extern unsigned long zero_sz; /* # currently pre-zero'd pages */
406 extern unsigned long zeropage_hits; /* # zero'd pages request that we've done */
407 extern unsigned long zeropage_calls; /* # zero'd pages request that've been made */
408 extern unsigned long zerototal; /* # pages zero'd over time */
410 #define zero_quicklist (zero_cache)
411 #define zero_cache_sz (zero_sz)
412 #define zero_cache_calls (zeropage_calls)
413 #define zero_cache_hits (zeropage_hits)
414 #define zero_cache_total (zerototal)
416 /* return a pre-zero'd page from the list, return NULL if none available -- Cort */
417 extern unsigned long get_zero_page_fast(void);
419 extern __inline__ pgd_t *get_pgd_slow(void)
421 pgd_t *ret/* = (pgd_t *)__get_free_page(GFP_KERNEL)*/, *init;
423 if ( (ret = (pgd_t *)get_zero_page_fast()) == NULL )
425 if ( (ret = (pgd_t *)__get_free_page(GFP_KERNEL)) != NULL )
426 memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
428 if (ret) {
429 init = pgd_offset(&init_mm, 0);
430 /*memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));*/
431 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
432 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
434 return ret;
437 extern __inline__ pgd_t *get_pgd_fast(void)
439 unsigned long *ret;
441 if((ret = pgd_quicklist) != NULL) {
442 pgd_quicklist = (unsigned long *)(*ret);
443 ret[0] = ret[1];
444 pgtable_cache_size--;
445 } else
446 ret = (unsigned long *)get_pgd_slow();
447 return (pgd_t *)ret;
450 extern __inline__ void free_pgd_fast(pgd_t *pgd)
452 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
453 pgd_quicklist = (unsigned long *) pgd;
454 pgtable_cache_size++;
457 extern __inline__ void free_pgd_slow(pgd_t *pgd)
459 free_page((unsigned long)pgd);
462 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
464 extern __inline__ pte_t *get_pte_fast(void)
466 unsigned long *ret;
468 if((ret = (unsigned long *)pte_quicklist) != NULL) {
469 pte_quicklist = (unsigned long *)(*ret);
470 ret[0] = ret[1];
471 pgtable_cache_size--;
473 return (pte_t *)ret;
476 extern __inline__ void free_pte_fast(pte_t *pte)
478 *(unsigned long *)pte = (unsigned long) pte_quicklist;
479 pte_quicklist = (unsigned long *) pte;
480 pgtable_cache_size++;
483 extern __inline__ void free_pte_slow(pte_t *pte)
485 free_page((unsigned long)pte);
488 /* We don't use pmd cache, so this is a dummy routine */
489 extern __inline__ pmd_t *get_pmd_fast(void)
491 return (pmd_t *)0;
494 extern __inline__ void free_pmd_fast(pmd_t *pmd)
498 extern __inline__ void free_pmd_slow(pmd_t *pmd)
502 extern void __bad_pte(pmd_t *pmd);
504 #define pte_free_kernel(pte) free_pte_fast(pte)
505 #define pte_free(pte) free_pte_fast(pte)
506 #define pgd_free(pgd) free_pgd_fast(pgd)
507 #define pgd_alloc() get_pgd_fast()
509 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
511 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
512 if (pmd_none(*pmd)) {
513 pte_t * page = (pte_t *) get_pte_fast();
515 if (!page)
516 return get_pte_slow(pmd, address);
517 pmd_val(*pmd) = (unsigned long) page;
518 return page + address;
520 if (pmd_bad(*pmd)) {
521 __bad_pte(pmd);
522 return NULL;
524 return (pte_t *) pmd_page(*pmd) + address;
528 * allocating and freeing a pmd is trivial: the 1-entry pmd is
529 * inside the pgd, so has no extra memory associated with it.
531 extern inline void pmd_free(pmd_t * pmd)
535 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
537 return (pmd_t *) pgd;
540 #define pmd_free_kernel pmd_free
541 #define pmd_alloc_kernel pmd_alloc
542 #define pte_alloc_kernel pte_alloc
544 extern int do_check_pgt_cache(int, int);
546 extern inline void set_pgdir(unsigned long address, pgd_t entry)
548 struct task_struct * p;
549 pgd_t *pgd;
550 #ifdef __SMP__
551 int i;
552 #endif
554 read_lock(&tasklist_lock);
555 for_each_task(p) {
556 if (!p->mm)
557 continue;
558 *pgd_offset(p->mm,address) = entry;
560 read_unlock(&tasklist_lock);
561 #ifndef __SMP__
562 for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
563 pgd[address >> PGDIR_SHIFT] = entry;
564 #else
565 /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
566 modify pgd caches of other CPUs as well. -jj */
567 for (i = 0; i < NR_CPUS; i++)
568 for (pgd = (pgd_t *)cpu_data[i].pgd_cache; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
569 pgd[address >> PGDIR_SHIFT] = entry;
570 #endif
573 extern pgd_t swapper_pg_dir[1024];
575 extern __inline__ pte_t *find_pte(struct mm_struct *mm,unsigned long va)
577 pgd_t *dir;
578 pmd_t *pmd;
579 pte_t *pte;
581 va &= PAGE_MASK;
583 dir = pgd_offset( mm, va );
584 if (dir)
586 pmd = pmd_offset(dir, va & PAGE_MASK);
587 if (pmd && pmd_present(*pmd))
589 pte = pte_offset(pmd, va);
590 if (pte && pte_present(*pte))
592 pte_uncache(*pte);
593 flush_tlb_page(find_vma(mm,va),va);
597 return pte;
601 * Page tables may have changed. We don't need to do anything here
602 * as entries are faulted into the hash table by the low-level
603 * data/instruction access exception handlers.
605 #define update_mmu_cache(vma, addr, pte) do { } while (0)
608 * When flushing the tlb entry for a page, we also need to flush the
609 * hash table entry. flush_hash_page is assembler (for speed) in head.S.
611 extern void flush_hash_segments(unsigned low_vsid, unsigned high_vsid);
612 extern void flush_hash_page(unsigned context, unsigned long va);
615 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
616 #define SWP_OFFSET(entry) ((entry) >> 8)
617 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
619 #define module_map vmalloc
620 #define module_unmap vfree
622 /* CONFIG_APUS */
623 /* For virtual address to physical address conversion */
624 extern void cache_clear(__u32 addr, int length);
625 extern void cache_push(__u32 addr, int length);
626 extern int mm_end_of_chunk (unsigned long addr, int len);
627 extern unsigned long iopa(unsigned long addr);
628 extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
630 /* Values for nocacheflag and cmode */
631 /* These are not used by the APUS kernel_map, but prevents
632 compilation errors. */
633 #define KERNELMAP_FULL_CACHING 0
634 #define KERNELMAP_NOCACHE_SER 1
635 #define KERNELMAP_NOCACHE_NONSER 2
636 #define KERNELMAP_NO_COPYBACK 3
639 * Map some physical address range into the kernel address space.
641 extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
642 int nocacheflag, unsigned long *memavailp );
645 * Set cache mode of (kernel space) address range.
647 extern void kernel_set_cachemode (unsigned long address, unsigned long size,
648 unsigned int cmode);
650 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
651 #define PageSkip(page) (0)
652 #define kern_addr_valid(addr) (1)
654 #define io_remap_page_range remap_page_range
657 #endif __ASSEMBLY__
658 #endif /* _PPC_PGTABLE_H */