1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * CoProcessor (SPU/AFU) mm fault handler
5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
7 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * Author: Jeremy Kerr <jk@ozlabs.org>
10 #include <linux/sched.h>
12 #include <linux/export.h>
14 #include <asm/copro.h>
16 #include <misc/cxl-base.h>
19 * This ought to be kept in sync with the powerpc specific do_page_fault
20 * function. Currently, there are a few corner cases that we haven't had
21 * to handle fortunately.
23 int copro_handle_mm_fault(struct mm_struct
*mm
, unsigned long ea
,
24 unsigned long dsisr
, vm_fault_t
*flt
)
26 struct vm_area_struct
*vma
;
27 unsigned long is_write
;
36 vma
= lock_mm_and_find_vma(mm
, ea
, NULL
);
41 is_write
= dsisr
& DSISR_ISSTORE
;
43 if (!(vma
->vm_flags
& VM_WRITE
))
46 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
49 * PROT_NONE is covered by the VMA check above.
50 * and hash should get a NOHPTE fault instead of
51 * a PROTFAULT in case fixup is needed for things
55 WARN_ON_ONCE(dsisr
& DSISR_PROTFAULT
);
59 *flt
= handle_mm_fault(vma
, ea
, is_write
? FAULT_FLAG_WRITE
: 0, NULL
);
61 /* The fault is fully completed (including releasing mmap lock) */
62 if (*flt
& VM_FAULT_COMPLETED
)
65 if (unlikely(*flt
& VM_FAULT_ERROR
)) {
66 if (*flt
& VM_FAULT_OOM
) {
69 } else if (*flt
& (VM_FAULT_SIGBUS
| VM_FAULT_SIGSEGV
)) {
80 EXPORT_SYMBOL_GPL(copro_handle_mm_fault
);
82 #ifdef CONFIG_PPC_64S_HASH_MMU
83 int copro_calculate_slb(struct mm_struct
*mm
, u64 ea
, struct copro_slb
*slb
)
88 switch (get_region_id(ea
)) {
90 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__
, ea
);
93 psize
= get_slice_psize(mm
, ea
);
94 ssize
= user_segment_size(ea
);
95 vsid
= get_user_vsid(&mm
->context
, ea
, ssize
);
96 vsidkey
= SLB_VSID_USER
;
98 case VMALLOC_REGION_ID
:
99 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__
, ea
);
100 psize
= mmu_vmalloc_psize
;
101 ssize
= mmu_kernel_ssize
;
102 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
103 vsidkey
= SLB_VSID_KERNEL
;
106 pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__
, ea
);
107 psize
= mmu_io_psize
;
108 ssize
= mmu_kernel_ssize
;
109 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
110 vsidkey
= SLB_VSID_KERNEL
;
112 case LINEAR_MAP_REGION_ID
:
113 pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__
, ea
);
114 psize
= mmu_linear_psize
;
115 ssize
= mmu_kernel_ssize
;
116 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
117 vsidkey
= SLB_VSID_KERNEL
;
120 pr_debug("%s: invalid region access at %016llx\n", __func__
, ea
);
127 vsid
= (vsid
<< slb_vsid_shift(ssize
)) | vsidkey
;
129 vsid
|= mmu_psize_defs
[psize
].sllp
|
130 ((ssize
== MMU_SEGSIZE_1T
) ? SLB_VSID_B_1T
: 0);
132 slb
->esid
= (ea
& (ssize
== MMU_SEGSIZE_1T
? ESID_MASK_1T
: ESID_MASK
)) | SLB_ESID_V
;
137 EXPORT_SYMBOL_GPL(copro_calculate_slb
);
139 void copro_flush_all_slbs(struct mm_struct
*mm
)
141 #ifdef CONFIG_SPU_BASE
142 spu_flush_all_slbs(mm
);
146 EXPORT_SYMBOL_GPL(copro_flush_all_slbs
);