1 // SPDX-License-Identifier: GPL-2.0
3 * Lockless get_user_pages_fast for sparc, cribbed from powerpc
5 * Copyright (C) 2008 Nick Piggin
6 * Copyright (C) 2008 Novell Inc.
9 #include <linux/sched.h>
11 #include <linux/vmstat.h>
12 #include <linux/pagemap.h>
13 #include <linux/rwsem.h>
14 #include <asm/pgtable.h>
17 * The performance critical leaf functions are made noinline otherwise gcc
18 * inlines everything into a single function which results in too much
21 static noinline
int gup_pte_range(pmd_t pmd
, unsigned long addr
,
22 unsigned long end
, int write
, struct page
**pages
, int *nr
)
24 unsigned long mask
, result
;
27 if (tlb_type
== hypervisor
) {
28 result
= _PAGE_PRESENT_4V
|_PAGE_P_4V
;
30 result
|= _PAGE_WRITE_4V
;
32 result
= _PAGE_PRESENT_4U
|_PAGE_P_4U
;
34 result
|= _PAGE_WRITE_4U
;
36 mask
= result
| _PAGE_SPECIAL
;
38 ptep
= pte_offset_kernel(&pmd
, addr
);
40 struct page
*page
, *head
;
43 if ((pte_val(pte
) & mask
) != result
)
45 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
47 /* The hugepage case is simplified on sparc64 because
48 * we encode the sub-page pfn offsets into the
49 * hugepage PTEs. We could optimize this in the future
50 * use page_cache_add_speculative() for the hugepage case.
53 head
= compound_head(page
);
54 if (!page_cache_get_speculative(head
))
56 if (unlikely(pte_val(pte
) != pte_val(*ptep
))) {
63 } while (ptep
++, addr
+= PAGE_SIZE
, addr
!= end
);
68 static int gup_huge_pmd(pmd_t
*pmdp
, pmd_t pmd
, unsigned long addr
,
69 unsigned long end
, int write
, struct page
**pages
,
72 struct page
*head
, *page
;
75 if (!(pmd_val(pmd
) & _PAGE_VALID
))
78 if (write
&& !pmd_write(pmd
))
82 page
= pmd_page(pmd
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
83 head
= compound_head(page
);
85 VM_BUG_ON(compound_head(page
) != head
);
90 } while (addr
+= PAGE_SIZE
, addr
!= end
);
92 if (!page_cache_add_speculative(head
, refs
)) {
97 if (unlikely(pmd_val(pmd
) != pmd_val(*pmdp
))) {
107 static int gup_huge_pud(pud_t
*pudp
, pud_t pud
, unsigned long addr
,
108 unsigned long end
, int write
, struct page
**pages
,
111 struct page
*head
, *page
;
114 if (!(pud_val(pud
) & _PAGE_VALID
))
117 if (write
&& !pud_write(pud
))
121 page
= pud_page(pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
122 head
= compound_head(page
);
124 VM_BUG_ON(compound_head(page
) != head
);
129 } while (addr
+= PAGE_SIZE
, addr
!= end
);
131 if (!page_cache_add_speculative(head
, refs
)) {
136 if (unlikely(pud_val(pud
) != pud_val(*pudp
))) {
146 static int gup_pmd_range(pud_t pud
, unsigned long addr
, unsigned long end
,
147 int write
, struct page
**pages
, int *nr
)
152 pmdp
= pmd_offset(&pud
, addr
);
156 next
= pmd_addr_end(addr
, end
);
159 if (unlikely(pmd_large(pmd
))) {
160 if (!gup_huge_pmd(pmdp
, pmd
, addr
, next
,
163 } else if (!gup_pte_range(pmd
, addr
, next
, write
,
166 } while (pmdp
++, addr
= next
, addr
!= end
);
171 static int gup_pud_range(pgd_t pgd
, unsigned long addr
, unsigned long end
,
172 int write
, struct page
**pages
, int *nr
)
177 pudp
= pud_offset(&pgd
, addr
);
181 next
= pud_addr_end(addr
, end
);
184 if (unlikely(pud_large(pud
))) {
185 if (!gup_huge_pud(pudp
, pud
, addr
, next
,
188 } else if (!gup_pmd_range(pud
, addr
, next
, write
, pages
, nr
))
190 } while (pudp
++, addr
= next
, addr
!= end
);
195 int __get_user_pages_fast(unsigned long start
, int nr_pages
, int write
,
198 struct mm_struct
*mm
= current
->mm
;
199 unsigned long addr
, len
, end
;
200 unsigned long next
, flags
;
206 len
= (unsigned long) nr_pages
<< PAGE_SHIFT
;
209 local_irq_save(flags
);
210 pgdp
= pgd_offset(mm
, addr
);
214 next
= pgd_addr_end(addr
, end
);
217 if (!gup_pud_range(pgd
, addr
, next
, write
, pages
, &nr
))
219 } while (pgdp
++, addr
= next
, addr
!= end
);
220 local_irq_restore(flags
);
225 int get_user_pages_fast(unsigned long start
, int nr_pages
, int write
,
228 struct mm_struct
*mm
= current
->mm
;
229 unsigned long addr
, len
, end
;
236 len
= (unsigned long) nr_pages
<< PAGE_SHIFT
;
240 * XXX: batch / limit 'nr', to avoid large irq off latency
241 * needs some instrumenting to determine the common sizes used by
242 * important workloads (eg. DB2), and whether limiting the batch size
243 * will decrease performance.
245 * It seems like we're in the clear for the moment. Direct-IO is
246 * the main guy that batches up lots of get_user_pages, and even
247 * they are limited to 64-at-a-time which is not so many.
250 * This doesn't prevent pagetable teardown, but does prevent
251 * the pagetables from being freed on sparc.
253 * So long as we atomically load page table pointers versus teardown,
254 * we can follow the address down to the the page and take a ref on it.
258 pgdp
= pgd_offset(mm
, addr
);
262 next
= pgd_addr_end(addr
, end
);
265 if (!gup_pud_range(pgd
, addr
, next
, write
, pages
, &nr
))
267 } while (pgdp
++, addr
= next
, addr
!= end
);
271 VM_BUG_ON(nr
!= (end
- start
) >> PAGE_SHIFT
);
280 /* Try to get the remaining pages with get_user_pages */
281 start
+= nr
<< PAGE_SHIFT
;
284 ret
= get_user_pages_unlocked(start
,
285 (end
- start
) >> PAGE_SHIFT
, pages
,
286 write
? FOLL_WRITE
: 0);
288 /* Have to be a bit careful with return values */