ia64/pv_ops/xen: implement xen pv_time_ops.
[pv_ops_mirror.git] / mm / pagewalk.c
blob0afd2387e507d8f8deed9697f1971fc953f4b4b6
1 #include <linux/mm.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
5 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
6 const struct mm_walk *walk, void *private)
8 pte_t *pte;
9 int err = 0;
11 pte = pte_offset_map(pmd, addr);
12 for (;;) {
13 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private);
14 if (err)
15 break;
16 addr += PAGE_SIZE;
17 if (addr == end)
18 break;
19 pte++;
22 pte_unmap(pte);
23 return err;
26 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
27 const struct mm_walk *walk, void *private)
29 pmd_t *pmd;
30 unsigned long next;
31 int err = 0;
33 pmd = pmd_offset(pud, addr);
34 do {
35 next = pmd_addr_end(addr, end);
36 if (pmd_none_or_clear_bad(pmd)) {
37 if (walk->pte_hole)
38 err = walk->pte_hole(addr, next, private);
39 if (err)
40 break;
41 continue;
43 if (walk->pmd_entry)
44 err = walk->pmd_entry(pmd, addr, next, private);
45 if (!err && walk->pte_entry)
46 err = walk_pte_range(pmd, addr, next, walk, private);
47 if (err)
48 break;
49 } while (pmd++, addr = next, addr != end);
51 return err;
54 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
55 const struct mm_walk *walk, void *private)
57 pud_t *pud;
58 unsigned long next;
59 int err = 0;
61 pud = pud_offset(pgd, addr);
62 do {
63 next = pud_addr_end(addr, end);
64 if (pud_none_or_clear_bad(pud)) {
65 if (walk->pte_hole)
66 err = walk->pte_hole(addr, next, private);
67 if (err)
68 break;
69 continue;
71 if (walk->pud_entry)
72 err = walk->pud_entry(pud, addr, next, private);
73 if (!err && (walk->pmd_entry || walk->pte_entry))
74 err = walk_pmd_range(pud, addr, next, walk, private);
75 if (err)
76 break;
77 } while (pud++, addr = next, addr != end);
79 return err;
82 /**
83 * walk_page_range - walk a memory map's page tables with a callback
84 * @mm: memory map to walk
85 * @addr: starting address
86 * @end: ending address
87 * @walk: set of callbacks to invoke for each level of the tree
88 * @private: private data passed to the callback function
90 * Recursively walk the page table for the memory area in a VMA,
91 * calling supplied callbacks. Callbacks are called in-order (first
92 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
93 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
95 * Each callback receives an entry pointer, the start and end of the
96 * associated range, and a caller-supplied private data pointer.
98 * No locks are taken, but the bottom level iterator will map PTE
99 * directories from highmem if necessary.
101 * If any callback returns a non-zero value, the walk is aborted and
102 * the return value is propagated back to the caller. Otherwise 0 is returned.
104 int walk_page_range(const struct mm_struct *mm,
105 unsigned long addr, unsigned long end,
106 const struct mm_walk *walk, void *private)
108 pgd_t *pgd;
109 unsigned long next;
110 int err = 0;
112 if (addr >= end)
113 return err;
115 pgd = pgd_offset(mm, addr);
116 do {
117 next = pgd_addr_end(addr, end);
118 if (pgd_none_or_clear_bad(pgd)) {
119 if (walk->pte_hole)
120 err = walk->pte_hole(addr, next, private);
121 if (err)
122 break;
123 continue;
125 if (walk->pgd_entry)
126 err = walk->pgd_entry(pgd, addr, next, private);
127 if (!err &&
128 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
129 err = walk_pud_range(pgd, addr, next, walk, private);
130 if (err)
131 break;
132 } while (pgd++, addr = next, addr != end);
134 return err;