2 * CoProcessor (SPU/AFU) mm fault handler
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 * Author: Jeremy Kerr <jk@ozlabs.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/sched.h>
25 #include <linux/export.h>
27 #include <asm/copro.h>
29 #include <misc/cxl-base.h>
32 * This ought to be kept in sync with the powerpc specific do_page_fault
33 * function. Currently, there are a few corner cases that we haven't had
34 * to handle fortunately.
36 int copro_handle_mm_fault(struct mm_struct
*mm
, unsigned long ea
,
37 unsigned long dsisr
, unsigned *flt
)
39 struct vm_area_struct
*vma
;
40 unsigned long is_write
;
49 down_read(&mm
->mmap_sem
);
51 vma
= find_vma(mm
, ea
);
55 if (ea
< vma
->vm_start
) {
56 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
58 if (expand_stack(vma
, ea
))
62 is_write
= dsisr
& DSISR_ISSTORE
;
64 if (!(vma
->vm_flags
& VM_WRITE
))
67 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
70 * PROT_NONE is covered by the VMA check above.
71 * and hash should get a NOHPTE fault instead of
72 * a PROTFAULT in case fixup is needed for things
76 WARN_ON_ONCE(dsisr
& DSISR_PROTFAULT
);
80 *flt
= handle_mm_fault(vma
, ea
, is_write
? FAULT_FLAG_WRITE
: 0);
81 if (unlikely(*flt
& VM_FAULT_ERROR
)) {
82 if (*flt
& VM_FAULT_OOM
) {
85 } else if (*flt
& (VM_FAULT_SIGBUS
| VM_FAULT_SIGSEGV
)) {
92 if (*flt
& VM_FAULT_MAJOR
)
98 up_read(&mm
->mmap_sem
);
101 EXPORT_SYMBOL_GPL(copro_handle_mm_fault
);
103 int copro_calculate_slb(struct mm_struct
*mm
, u64 ea
, struct copro_slb
*slb
)
108 switch (REGION_ID(ea
)) {
110 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__
, ea
);
113 psize
= get_slice_psize(mm
, ea
);
114 ssize
= user_segment_size(ea
);
115 vsid
= get_vsid(mm
->context
.id
, ea
, ssize
);
116 vsidkey
= SLB_VSID_USER
;
118 case VMALLOC_REGION_ID
:
119 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__
, ea
);
120 if (ea
< VMALLOC_END
)
121 psize
= mmu_vmalloc_psize
;
123 psize
= mmu_io_psize
;
124 ssize
= mmu_kernel_ssize
;
125 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
126 vsidkey
= SLB_VSID_KERNEL
;
128 case KERNEL_REGION_ID
:
129 pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__
, ea
);
130 psize
= mmu_linear_psize
;
131 ssize
= mmu_kernel_ssize
;
132 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
133 vsidkey
= SLB_VSID_KERNEL
;
136 pr_debug("%s: invalid region access at %016llx\n", __func__
, ea
);
143 vsid
= (vsid
<< slb_vsid_shift(ssize
)) | vsidkey
;
145 vsid
|= mmu_psize_defs
[psize
].sllp
|
146 ((ssize
== MMU_SEGSIZE_1T
) ? SLB_VSID_B_1T
: 0);
148 slb
->esid
= (ea
& (ssize
== MMU_SEGSIZE_1T
? ESID_MASK_1T
: ESID_MASK
)) | SLB_ESID_V
;
153 EXPORT_SYMBOL_GPL(copro_calculate_slb
);
155 void copro_flush_all_slbs(struct mm_struct
*mm
)
157 #ifdef CONFIG_SPU_BASE
158 spu_flush_all_slbs(mm
);
162 EXPORT_SYMBOL_GPL(copro_flush_all_slbs
);