WIP FPC-III support
[linux/fpc-iii.git] / arch / powerpc / kernel / hw_breakpoint_constraints.c
blob867ee4aa026ad7800ff3bfbd10b397d831fd8dbc
1 // SPDX-License-Identifier: GPL-2.0+
2 #include <linux/kernel.h>
3 #include <linux/uaccess.h>
4 #include <linux/sched.h>
5 #include <asm/hw_breakpoint.h>
6 #include <asm/sstep.h>
7 #include <asm/cache.h>
9 static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info)
11 return ((info->address <= dar) && (dar - info->address < info->len));
14 static bool ea_user_range_overlaps(unsigned long ea, int size,
15 struct arch_hw_breakpoint *info)
17 return ((ea < info->address + info->len) &&
18 (ea + size > info->address));
21 static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
23 unsigned long hw_start_addr, hw_end_addr;
25 hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
26 hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
28 return ((hw_start_addr <= dar) && (hw_end_addr > dar));
31 static bool ea_hw_range_overlaps(unsigned long ea, int size,
32 struct arch_hw_breakpoint *info)
34 unsigned long hw_start_addr, hw_end_addr;
35 unsigned long align_size = HW_BREAKPOINT_SIZE;
38 * On p10 predecessors, quadword is handle differently then
39 * other instructions.
41 if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
42 align_size = HW_BREAKPOINT_SIZE_QUADWORD;
44 hw_start_addr = ALIGN_DOWN(info->address, align_size);
45 hw_end_addr = ALIGN(info->address + info->len, align_size);
47 return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
51 * If hw has multiple DAWR registers, we also need to check all
52 * dawrx constraint bits to confirm this is _really_ a valid event.
53 * If type is UNKNOWN, but privilege level matches, consider it as
54 * a positive match.
56 static bool check_dawrx_constraints(struct pt_regs *regs, int type,
57 struct arch_hw_breakpoint *info)
59 if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
60 return false;
63 * The Cache Management instructions other than dcbz never
64 * cause a match. i.e. if type is CACHEOP, the instruction
65 * is dcbz, and dcbz is treated as Store.
67 if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE))
68 return false;
70 if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
71 return false;
73 if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER))
74 return false;
76 return true;
80 * Return true if the event is valid wrt dawr configuration,
81 * including extraneous exception. Otherwise return false.
83 bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
84 unsigned long ea, int type, int size,
85 struct arch_hw_breakpoint *info)
87 bool in_user_range = dar_in_user_range(regs->dar, info);
88 bool dawrx_constraints;
91 * 8xx supports only one breakpoint and thus we can
92 * unconditionally return true.
94 if (IS_ENABLED(CONFIG_PPC_8xx)) {
95 if (!in_user_range)
96 info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
97 return true;
100 if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) {
101 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
102 !dar_in_hw_range(regs->dar, info))
103 return false;
105 return true;
108 dawrx_constraints = check_dawrx_constraints(regs, type, info);
110 if (type == UNKNOWN) {
111 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
112 !dar_in_hw_range(regs->dar, info))
113 return false;
115 return dawrx_constraints;
118 if (ea_user_range_overlaps(ea, size, info))
119 return dawrx_constraints;
121 if (ea_hw_range_overlaps(ea, size, info)) {
122 if (dawrx_constraints) {
123 info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
124 return true;
127 return false;
130 static int cache_op_size(void)
132 #ifdef __powerpc64__
133 return ppc64_caches.l1d.block_size;
134 #else
135 return L1_CACHE_BYTES;
136 #endif
139 void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
140 int *type, int *size, unsigned long *ea)
142 struct instruction_op op;
144 if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip))
145 return;
147 analyse_instr(&op, regs, *instr);
148 *type = GETTYPE(op.type);
149 *ea = op.ea;
150 #ifdef __powerpc64__
151 if (!(regs->msr & MSR_64BIT))
152 *ea &= 0xffffffffUL;
153 #endif
155 *size = GETSIZE(op.type);
156 if (*type == CACHEOP) {
157 *size = cache_op_size();
158 *ea &= ~(*size - 1);
159 } else if (*type == LOAD_VMX || *type == STORE_VMX) {
160 *ea &= ~(*size - 1);