Linux 3.16-rc2
[linux/fpc-iii.git] / arch / sparc / mm / gup.c
blob1aed0432c64bab8344ad524ed28b835381588035
1 /*
2 * Lockless get_user_pages_fast for sparc, cribbed from powerpc
4 * Copyright (C) 2008 Nick Piggin
5 * Copyright (C) 2008 Novell Inc.
6 */
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/vmstat.h>
11 #include <linux/pagemap.h>
12 #include <linux/rwsem.h>
13 #include <asm/pgtable.h>
16 * The performance critical leaf functions are made noinline otherwise gcc
17 * inlines everything into a single function which results in too much
18 * register pressure.
20 static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
21 unsigned long end, int write, struct page **pages, int *nr)
23 unsigned long mask, result;
24 pte_t *ptep;
26 if (tlb_type == hypervisor) {
27 result = _PAGE_PRESENT_4V|_PAGE_P_4V;
28 if (write)
29 result |= _PAGE_WRITE_4V;
30 } else {
31 result = _PAGE_PRESENT_4U|_PAGE_P_4U;
32 if (write)
33 result |= _PAGE_WRITE_4U;
35 mask = result | _PAGE_SPECIAL;
37 ptep = pte_offset_kernel(&pmd, addr);
38 do {
39 struct page *page, *head;
40 pte_t pte = *ptep;
42 if ((pte_val(pte) & mask) != result)
43 return 0;
44 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
46 /* The hugepage case is simplified on sparc64 because
47 * we encode the sub-page pfn offsets into the
48 * hugepage PTEs. We could optimize this in the future
49 * use page_cache_add_speculative() for the hugepage case.
51 page = pte_page(pte);
52 head = compound_head(page);
53 if (!page_cache_get_speculative(head))
54 return 0;
55 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
56 put_page(head);
57 return 0;
59 if (head != page)
60 get_huge_page_tail(page);
62 pages[*nr] = page;
63 (*nr)++;
64 } while (ptep++, addr += PAGE_SIZE, addr != end);
66 return 1;
69 static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
70 unsigned long end, int write, struct page **pages,
71 int *nr)
73 struct page *head, *page, *tail;
74 int refs;
76 if (!(pmd_val(pmd) & _PAGE_VALID))
77 return 0;
79 if (write && !pmd_write(pmd))
80 return 0;
82 refs = 0;
83 head = pmd_page(pmd);
84 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
85 tail = page;
86 do {
87 VM_BUG_ON(compound_head(page) != head);
88 pages[*nr] = page;
89 (*nr)++;
90 page++;
91 refs++;
92 } while (addr += PAGE_SIZE, addr != end);
94 if (!page_cache_add_speculative(head, refs)) {
95 *nr -= refs;
96 return 0;
99 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
100 *nr -= refs;
101 while (refs--)
102 put_page(head);
103 return 0;
106 /* Any tail page need their mapcount reference taken before we
107 * return.
109 while (refs--) {
110 if (PageTail(tail))
111 get_huge_page_tail(tail);
112 tail++;
115 return 1;
118 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
119 int write, struct page **pages, int *nr)
121 unsigned long next;
122 pmd_t *pmdp;
124 pmdp = pmd_offset(&pud, addr);
125 do {
126 pmd_t pmd = *pmdp;
128 next = pmd_addr_end(addr, end);
129 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
130 return 0;
131 if (unlikely(pmd_large(pmd))) {
132 if (!gup_huge_pmd(pmdp, pmd, addr, next,
133 write, pages, nr))
134 return 0;
135 } else if (!gup_pte_range(pmd, addr, next, write,
136 pages, nr))
137 return 0;
138 } while (pmdp++, addr = next, addr != end);
140 return 1;
143 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
144 int write, struct page **pages, int *nr)
146 unsigned long next;
147 pud_t *pudp;
149 pudp = pud_offset(&pgd, addr);
150 do {
151 pud_t pud = *pudp;
153 next = pud_addr_end(addr, end);
154 if (pud_none(pud))
155 return 0;
156 if (!gup_pmd_range(pud, addr, next, write, pages, nr))
157 return 0;
158 } while (pudp++, addr = next, addr != end);
160 return 1;
163 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
164 struct page **pages)
166 struct mm_struct *mm = current->mm;
167 unsigned long addr, len, end;
168 unsigned long next;
169 pgd_t *pgdp;
170 int nr = 0;
172 start &= PAGE_MASK;
173 addr = start;
174 len = (unsigned long) nr_pages << PAGE_SHIFT;
175 end = start + len;
178 * XXX: batch / limit 'nr', to avoid large irq off latency
179 * needs some instrumenting to determine the common sizes used by
180 * important workloads (eg. DB2), and whether limiting the batch size
181 * will decrease performance.
183 * It seems like we're in the clear for the moment. Direct-IO is
184 * the main guy that batches up lots of get_user_pages, and even
185 * they are limited to 64-at-a-time which is not so many.
188 * This doesn't prevent pagetable teardown, but does prevent
189 * the pagetables from being freed on sparc.
191 * So long as we atomically load page table pointers versus teardown,
192 * we can follow the address down to the the page and take a ref on it.
194 local_irq_disable();
196 pgdp = pgd_offset(mm, addr);
197 do {
198 pgd_t pgd = *pgdp;
200 next = pgd_addr_end(addr, end);
201 if (pgd_none(pgd))
202 goto slow;
203 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
204 goto slow;
205 } while (pgdp++, addr = next, addr != end);
207 local_irq_enable();
209 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
210 return nr;
213 int ret;
215 slow:
216 local_irq_enable();
218 /* Try to get the remaining pages with get_user_pages */
219 start += nr << PAGE_SHIFT;
220 pages += nr;
222 down_read(&mm->mmap_sem);
223 ret = get_user_pages(current, mm, start,
224 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
225 up_read(&mm->mmap_sem);
227 /* Have to be a bit careful with return values */
228 if (nr > 0) {
229 if (ret < 0)
230 ret = nr;
231 else
232 ret += nr;
235 return ret;