2 * CoProcessor (SPU/AFU) mm fault handler
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 * Author: Jeremy Kerr <jk@ozlabs.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/sched.h>
25 #include <linux/export.h>
27 #include <asm/copro.h>
29 #include <misc/cxl-base.h>
32 * This ought to be kept in sync with the powerpc specific do_page_fault
33 * function. Currently, there are a few corner cases that we haven't had
34 * to handle fortunately.
36 int copro_handle_mm_fault(struct mm_struct
*mm
, unsigned long ea
,
37 unsigned long dsisr
, unsigned *flt
)
39 struct vm_area_struct
*vma
;
40 unsigned long is_write
;
49 down_read(&mm
->mmap_sem
);
51 vma
= find_vma(mm
, ea
);
55 if (ea
< vma
->vm_start
) {
56 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
58 if (expand_stack(vma
, ea
))
62 is_write
= dsisr
& DSISR_ISSTORE
;
64 if (!(vma
->vm_flags
& VM_WRITE
))
67 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
70 * protfault should only happen due to us
71 * mapping a region readonly temporarily. PROT_NONE
72 * is also covered by the VMA check above.
74 WARN_ON_ONCE(dsisr
& DSISR_PROTFAULT
);
78 *flt
= handle_mm_fault(mm
, vma
, ea
, is_write
? FAULT_FLAG_WRITE
: 0);
79 if (unlikely(*flt
& VM_FAULT_ERROR
)) {
80 if (*flt
& VM_FAULT_OOM
) {
83 } else if (*flt
& (VM_FAULT_SIGBUS
| VM_FAULT_SIGSEGV
)) {
90 if (*flt
& VM_FAULT_MAJOR
)
96 up_read(&mm
->mmap_sem
);
99 EXPORT_SYMBOL_GPL(copro_handle_mm_fault
);
101 int copro_calculate_slb(struct mm_struct
*mm
, u64 ea
, struct copro_slb
*slb
)
106 switch (REGION_ID(ea
)) {
108 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__
, ea
);
109 psize
= get_slice_psize(mm
, ea
);
110 ssize
= user_segment_size(ea
);
111 vsid
= get_vsid(mm
->context
.id
, ea
, ssize
);
112 vsidkey
= SLB_VSID_USER
;
114 case VMALLOC_REGION_ID
:
115 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__
, ea
);
116 if (ea
< VMALLOC_END
)
117 psize
= mmu_vmalloc_psize
;
119 psize
= mmu_io_psize
;
120 ssize
= mmu_kernel_ssize
;
121 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
122 vsidkey
= SLB_VSID_KERNEL
;
124 case KERNEL_REGION_ID
:
125 pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__
, ea
);
126 psize
= mmu_linear_psize
;
127 ssize
= mmu_kernel_ssize
;
128 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
129 vsidkey
= SLB_VSID_KERNEL
;
132 pr_debug("%s: invalid region access at %016llx\n", __func__
, ea
);
136 vsid
= (vsid
<< slb_vsid_shift(ssize
)) | vsidkey
;
138 vsid
|= mmu_psize_defs
[psize
].sllp
|
139 ((ssize
== MMU_SEGSIZE_1T
) ? SLB_VSID_B_1T
: 0);
141 slb
->esid
= (ea
& (ssize
== MMU_SEGSIZE_1T
? ESID_MASK_1T
: ESID_MASK
)) | SLB_ESID_V
;
146 EXPORT_SYMBOL_GPL(copro_calculate_slb
);
148 void copro_flush_all_slbs(struct mm_struct
*mm
)
150 #ifdef CONFIG_SPU_BASE
151 spu_flush_all_slbs(mm
);
155 EXPORT_SYMBOL_GPL(copro_flush_all_slbs
);