spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / arch / sparc / mm / gup.c
blob42c55df3aec300b5baee79e6af5123ab99c7b126
1 /*
2 * Lockless get_user_pages_fast for sparc, cribbed from powerpc
4 * Copyright (C) 2008 Nick Piggin
5 * Copyright (C) 2008 Novell Inc.
6 */
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/vmstat.h>
11 #include <linux/pagemap.h>
12 #include <linux/rwsem.h>
13 #include <asm/pgtable.h>
16 * The performance critical leaf functions are made noinline otherwise gcc
17 * inlines everything into a single function which results in too much
18 * register pressure.
20 static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
21 unsigned long end, int write, struct page **pages, int *nr)
23 unsigned long mask, result;
24 pte_t *ptep;
26 if (tlb_type == hypervisor) {
27 result = _PAGE_PRESENT_4V|_PAGE_P_4V;
28 if (write)
29 result |= _PAGE_WRITE_4V;
30 } else {
31 result = _PAGE_PRESENT_4U|_PAGE_P_4U;
32 if (write)
33 result |= _PAGE_WRITE_4U;
35 mask = result | _PAGE_SPECIAL;
37 ptep = pte_offset_kernel(&pmd, addr);
38 do {
39 struct page *page, *head;
40 pte_t pte = *ptep;
42 if ((pte_val(pte) & mask) != result)
43 return 0;
44 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
46 /* The hugepage case is simplified on sparc64 because
47 * we encode the sub-page pfn offsets into the
48 * hugepage PTEs. We could optimize this in the future
49 * use page_cache_add_speculative() for the hugepage case.
51 page = pte_page(pte);
52 head = compound_head(page);
53 if (!page_cache_get_speculative(head))
54 return 0;
55 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
56 put_page(head);
57 return 0;
59 if (head != page)
60 get_huge_page_tail(page);
62 pages[*nr] = page;
63 (*nr)++;
64 } while (ptep++, addr += PAGE_SIZE, addr != end);
66 return 1;
69 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
70 int write, struct page **pages, int *nr)
72 unsigned long next;
73 pmd_t *pmdp;
75 pmdp = pmd_offset(&pud, addr);
76 do {
77 pmd_t pmd = *pmdp;
79 next = pmd_addr_end(addr, end);
80 if (pmd_none(pmd))
81 return 0;
82 if (!gup_pte_range(pmd, addr, next, write, pages, nr))
83 return 0;
84 } while (pmdp++, addr = next, addr != end);
86 return 1;
89 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
90 int write, struct page **pages, int *nr)
92 unsigned long next;
93 pud_t *pudp;
95 pudp = pud_offset(&pgd, addr);
96 do {
97 pud_t pud = *pudp;
99 next = pud_addr_end(addr, end);
100 if (pud_none(pud))
101 return 0;
102 if (!gup_pmd_range(pud, addr, next, write, pages, nr))
103 return 0;
104 } while (pudp++, addr = next, addr != end);
106 return 1;
109 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
110 struct page **pages)
112 struct mm_struct *mm = current->mm;
113 unsigned long addr, len, end;
114 unsigned long next;
115 pgd_t *pgdp;
116 int nr = 0;
118 start &= PAGE_MASK;
119 addr = start;
120 len = (unsigned long) nr_pages << PAGE_SHIFT;
121 end = start + len;
124 * XXX: batch / limit 'nr', to avoid large irq off latency
125 * needs some instrumenting to determine the common sizes used by
126 * important workloads (eg. DB2), and whether limiting the batch size
127 * will decrease performance.
129 * It seems like we're in the clear for the moment. Direct-IO is
130 * the main guy that batches up lots of get_user_pages, and even
131 * they are limited to 64-at-a-time which is not so many.
134 * This doesn't prevent pagetable teardown, but does prevent
135 * the pagetables from being freed on sparc.
137 * So long as we atomically load page table pointers versus teardown,
138 * we can follow the address down to the the page and take a ref on it.
140 local_irq_disable();
142 pgdp = pgd_offset(mm, addr);
143 do {
144 pgd_t pgd = *pgdp;
146 next = pgd_addr_end(addr, end);
147 if (pgd_none(pgd))
148 goto slow;
149 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
150 goto slow;
151 } while (pgdp++, addr = next, addr != end);
153 local_irq_enable();
155 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
156 return nr;
159 int ret;
161 slow:
162 local_irq_enable();
164 /* Try to get the remaining pages with get_user_pages */
165 start += nr << PAGE_SHIFT;
166 pages += nr;
168 down_read(&mm->mmap_sem);
169 ret = get_user_pages(current, mm, start,
170 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
171 up_read(&mm->mmap_sem);
173 /* Have to be a bit careful with return values */
174 if (nr > 0) {
175 if (ret < 0)
176 ret = nr;
177 else
178 ret += nr;
181 return ret;