2 * Lockless get_user_pages_fast for s390
4 * Copyright IBM Corp. 2010
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 #include <linux/sched.h>
9 #include <linux/hugetlb.h>
10 #include <linux/vmstat.h>
11 #include <linux/pagemap.h>
12 #include <linux/rwsem.h>
13 #include <asm/pgtable.h>
16 * The performance critical leaf functions are made noinline otherwise gcc
17 * inlines everything into a single function which results in too much
20 static inline int gup_pte_range(pmd_t
*pmdp
, pmd_t pmd
, unsigned long addr
,
21 unsigned long end
, int write
, struct page
**pages
, int *nr
)
27 mask
= (write
? _PAGE_PROTECT
: 0) | _PAGE_INVALID
| _PAGE_SPECIAL
;
29 ptep
= ((pte_t
*) pmd_deref(pmd
)) + pte_index(addr
);
33 /* Similar to the PMD case, NUMA hinting must take slow path */
34 if (pte_protnone(pte
))
36 if ((pte_val(pte
) & mask
) != 0)
38 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
40 if (!page_cache_get_speculative(page
))
42 if (unlikely(pte_val(pte
) != pte_val(*ptep
))) {
49 } while (ptep
++, addr
+= PAGE_SIZE
, addr
!= end
);
54 static inline int gup_huge_pmd(pmd_t
*pmdp
, pmd_t pmd
, unsigned long addr
,
55 unsigned long end
, int write
, struct page
**pages
, int *nr
)
57 unsigned long mask
, result
;
58 struct page
*head
, *page
, *tail
;
61 result
= write
? 0 : _SEGMENT_ENTRY_PROTECT
;
62 mask
= result
| _SEGMENT_ENTRY_INVALID
;
63 if ((pmd_val(pmd
) & mask
) != result
)
65 VM_BUG_ON(!pfn_valid(pmd_val(pmd
) >> PAGE_SHIFT
));
69 page
= head
+ ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
72 VM_BUG_ON(compound_head(page
) != head
);
77 } while (addr
+= PAGE_SIZE
, addr
!= end
);
79 if (!page_cache_add_speculative(head
, refs
)) {
84 if (unlikely(pmd_val(pmd
) != pmd_val(*pmdp
))) {
92 * Any tail page need their mapcount reference taken before we
97 get_huge_page_tail(tail
);
105 static inline int gup_pmd_range(pud_t
*pudp
, pud_t pud
, unsigned long addr
,
106 unsigned long end
, int write
, struct page
**pages
, int *nr
)
111 pmdp
= (pmd_t
*) pudp
;
112 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
113 pmdp
= (pmd_t
*) pud_deref(pud
);
114 pmdp
+= pmd_index(addr
);
118 next
= pmd_addr_end(addr
, end
);
120 * The pmd_trans_splitting() check below explains why
121 * pmdp_splitting_flush() has to serialize with
122 * smp_call_function() against our disabled IRQs, to stop
123 * this gup-fast code from running while we set the
124 * splitting bit in the pmd. Returning zero will take
125 * the slow path that will call wait_split_huge_page()
126 * if the pmd is still in splitting state.
128 if (pmd_none(pmd
) || pmd_trans_splitting(pmd
))
130 if (unlikely(pmd_large(pmd
))) {
132 * NUMA hinting faults need to be handled in the GUP
133 * slowpath for accounting purposes and so that they
134 * can be serialised against THP migration.
136 if (pmd_protnone(pmd
))
138 if (!gup_huge_pmd(pmdp
, pmd
, addr
, next
,
141 } else if (!gup_pte_range(pmdp
, pmd
, addr
, next
,
144 } while (pmdp
++, addr
= next
, addr
!= end
);
149 static inline int gup_pud_range(pgd_t
*pgdp
, pgd_t pgd
, unsigned long addr
,
150 unsigned long end
, int write
, struct page
**pages
, int *nr
)
155 pudp
= (pud_t
*) pgdp
;
156 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
157 pudp
= (pud_t
*) pgd_deref(pgd
);
158 pudp
+= pud_index(addr
);
162 next
= pud_addr_end(addr
, end
);
165 if (!gup_pmd_range(pudp
, pud
, addr
, next
, write
, pages
, nr
))
167 } while (pudp
++, addr
= next
, addr
!= end
);
173 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
174 * back to the regular GUP.
176 int __get_user_pages_fast(unsigned long start
, int nr_pages
, int write
,
179 struct mm_struct
*mm
= current
->mm
;
180 unsigned long addr
, len
, end
;
181 unsigned long next
, flags
;
187 len
= (unsigned long) nr_pages
<< PAGE_SHIFT
;
189 if ((end
<= start
) || (end
> TASK_SIZE
))
192 * local_irq_save() doesn't prevent pagetable teardown, but does
193 * prevent the pagetables from being freed on s390.
195 * So long as we atomically load page table pointers versus teardown,
196 * we can follow the address down to the the page and take a ref on it.
198 local_irq_save(flags
);
199 pgdp
= pgd_offset(mm
, addr
);
203 next
= pgd_addr_end(addr
, end
);
206 if (!gup_pud_range(pgdp
, pgd
, addr
, next
, write
, pages
, &nr
))
208 } while (pgdp
++, addr
= next
, addr
!= end
);
209 local_irq_restore(flags
);
215 * get_user_pages_fast() - pin user pages in memory
216 * @start: starting user address
217 * @nr_pages: number of pages from start to pin
218 * @write: whether pages will be written to
219 * @pages: array that receives pointers to the pages pinned.
220 * Should be at least nr_pages long.
222 * Attempt to pin user pages in memory without taking mm->mmap_sem.
223 * If not successful, it will fall back to taking the lock and
224 * calling get_user_pages().
226 * Returns number of pages pinned. This may be fewer than the number
227 * requested. If nr_pages is 0 or negative, returns 0. If no pages
228 * were pinned, returns -errno.
230 int get_user_pages_fast(unsigned long start
, int nr_pages
, int write
,
233 struct mm_struct
*mm
= current
->mm
;
237 nr
= __get_user_pages_fast(start
, nr_pages
, write
, pages
);
241 /* Try to get the remaining pages with get_user_pages */
242 start
+= nr
<< PAGE_SHIFT
;
244 ret
= get_user_pages_unlocked(current
, mm
, start
,
245 nr_pages
- nr
, write
, 0, pages
);
246 /* Have to be a bit careful with return values */
248 ret
= (ret
< 0) ? nr
: ret
+ nr
;