1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <arch/encoding.h>
6 #include <console/console.h>
7 #include <commonlib/helpers.h>
9 #define GRANULE (1 << PMP_SHIFT)
12 * This structure is used to temporarily record PMP
13 * configuration information.
16 /* used to record the value of pmpcfg[i] */
19 * When generating a TOR type configuration,
20 * the previous entry needs to record the starting address.
21 * used to record the value of pmpaddr[i - 1]
23 uintptr_t previous_address
;
24 /* used to record the value of pmpaddr[i] */
28 /* This variable is used to record which entries have been used. */
29 static uintptr_t pmp_entry_used_mask
;
31 /* The architectural spec says that up to 16 PMP entries are
33 * "Up to 16 PMP entries are supported. If any PMP entries are
34 * implemented, then all PMP CSRs must be implemented,
35 * but all PMP CSR fields are WARL and may be hardwired to zero."
37 int pmp_entries_num(void)
42 /* helper function used to read pmpcfg[idx] */
43 static uintptr_t read_pmpcfg(int idx
)
45 #if __riscv_xlen == 32
46 int shift
= 8 * (idx
& 3);
49 return (read_csr(pmpcfg0
) >> shift
) & 0xff;
51 return (read_csr(pmpcfg1
) >> shift
) & 0xff;
53 return (read_csr(pmpcfg2
) >> shift
) & 0xff;
55 return (read_csr(pmpcfg3
) >> shift
) & 0xff;
57 #elif __riscv_xlen == 64
58 int shift
= 8 * (idx
& 7);
61 return (read_csr(pmpcfg0
) >> shift
) & 0xff;
63 return (read_csr(pmpcfg2
) >> shift
) & 0xff;
69 /* helper function used to write pmpcfg[idx] */
70 static void write_pmpcfg(int idx
, uintptr_t cfg
)
74 #if __riscv_xlen == 32
75 int shift
= 8 * (idx
& 3);
78 old
= read_csr(pmpcfg0
);
79 new = (old
& ~((uintptr_t)0xff << shift
))
80 | ((cfg
& 0xff) << shift
);
81 write_csr(pmpcfg0
, new);
84 old
= read_csr(pmpcfg1
);
85 new = (old
& ~((uintptr_t)0xff << shift
))
86 | ((cfg
& 0xff) << shift
);
87 write_csr(pmpcfg1
, new);
90 old
= read_csr(pmpcfg2
);
91 new = (old
& ~((uintptr_t)0xff << shift
))
92 | ((cfg
& 0xff) << shift
);
93 write_csr(pmpcfg2
, new);
96 old
= read_csr(pmpcfg3
);
97 new = (old
& ~((uintptr_t)0xff << shift
))
98 | ((cfg
& 0xff) << shift
);
99 write_csr(pmpcfg3
, new);
102 #elif __riscv_xlen == 64
103 int shift
= 8 * (idx
& 7);
106 old
= read_csr(pmpcfg0
);
107 new = (old
& ~((uintptr_t)0xff << shift
))
108 | ((cfg
& 0xff) << shift
);
109 write_csr(pmpcfg0
, new);
110 printk(BIOS_INFO
, "%s(%d, %lx) = %lx\n", __func__
, idx
, cfg
, read_csr(pmpcfg0
));
113 old
= read_csr(pmpcfg2
);
114 new = (old
& ~((uintptr_t)0xff << shift
))
115 | ((cfg
& 0xff) << shift
);
116 write_csr(pmpcfg2
, new);
117 printk(BIOS_INFO
, "%s(%d, %lx) = %lx\n", __func__
, idx
, cfg
, read_csr(pmpcfg2
));
121 if (read_pmpcfg(idx
) != cfg
) {
122 printk(BIOS_WARNING
, "%s: PMPcfg%d: Wrote %lx, read %lx\n", __func__
, idx
, cfg
, read_pmpcfg(idx
));
123 die("PMPcfg write failed");
127 /* helper function used to read pmpaddr[idx] */
128 static uintptr_t read_pmpaddr(int idx
)
132 return read_csr(pmpaddr0
);
134 return read_csr(pmpaddr1
);
136 return read_csr(pmpaddr2
);
138 return read_csr(pmpaddr3
);
140 return read_csr(pmpaddr4
);
142 return read_csr(pmpaddr5
);
144 return read_csr(pmpaddr6
);
146 return read_csr(pmpaddr7
);
148 return read_csr(pmpaddr8
);
150 return read_csr(pmpaddr9
);
152 return read_csr(pmpaddr10
);
154 return read_csr(pmpaddr11
);
156 return read_csr(pmpaddr12
);
158 return read_csr(pmpaddr13
);
160 return read_csr(pmpaddr14
);
162 return read_csr(pmpaddr15
);
167 /* helper function used to write pmpaddr[idx] */
168 static void write_pmpaddr(int idx
, uintptr_t val
)
172 write_csr(pmpaddr0
, val
);
175 write_csr(pmpaddr1
, val
);
178 write_csr(pmpaddr2
, val
);
181 write_csr(pmpaddr3
, val
);
184 write_csr(pmpaddr4
, val
);
187 write_csr(pmpaddr5
, val
);
190 write_csr(pmpaddr6
, val
);
193 write_csr(pmpaddr7
, val
);
196 write_csr(pmpaddr8
, val
);
199 write_csr(pmpaddr9
, val
);
202 write_csr(pmpaddr10
, val
);
205 write_csr(pmpaddr11
, val
);
208 write_csr(pmpaddr12
, val
);
211 write_csr(pmpaddr13
, val
);
214 write_csr(pmpaddr14
, val
);
217 write_csr(pmpaddr15
, val
);
221 printk(BIOS_INFO
, "%s(%d, %lx) = %lx\n", __func__
, idx
, val
, read_pmpaddr(idx
));
222 /* The PMP is not required to return what we wrote. On some SoC, many bits are cleared. */
223 if (read_pmpaddr(idx
) != val
) {
224 printk(BIOS_WARNING
, "%s: PMPaddr%d: Wrote %lx, read %lx\n", __func__
,
225 idx
, val
, read_pmpaddr(idx
));
229 /* Generate a PMP configuration for all memory */
230 static void generate_pmp_all(struct pmpcfg
*p
)
232 p
->cfg
= PMP_NAPOT
| PMP_R
| PMP_W
| PMP_X
;
233 p
->previous_address
= 0;
234 p
->address
= (uintptr_t) -1;
237 /* Generate a PMP configuration of type NA4/NAPOT */
238 static void generate_pmp_napot(struct pmpcfg
*p
, uintptr_t base
, uintptr_t size
, u8 flags
)
240 flags
= flags
& (PMP_R
| PMP_W
| PMP_X
| PMP_L
);
241 p
->cfg
= flags
| (size
> GRANULE
? PMP_NAPOT
: PMP_NA4
);
242 p
->previous_address
= 0;
243 p
->address
= (base
+ (size
/ 2 - 1));
246 /* Generate a PMP configuration of type TOR */
247 static void generate_pmp_range(struct pmpcfg
*p
, uintptr_t base
, uintptr_t size
, u8 flags
)
249 flags
= flags
& (PMP_R
| PMP_W
| PMP_X
| PMP_L
);
250 p
->cfg
= flags
| PMP_TOR
;
251 p
->previous_address
= base
;
252 p
->address
= (base
+ size
);
256 * Generate a PMP configuration.
257 * reminder: base and size are 34 bit numbers on RV32.
259 static int generate_pmp(struct pmpcfg
*p
, u64 base
, u64 size
, u8 flags
)
261 /* Convert the byte address and byte size to units of 32-bit words */
262 uintptr_t b
= (uintptr_t) base
>> PMP_SHIFT
, s
= (uintptr_t) size
>> PMP_SHIFT
;
263 #if __riscv_xlen == 32
264 /* verify that base + size fits in 34 bits */
265 if ((base
+ size
- 1) >> 34) {
266 printk(BIOS_EMERG
, "%s: base (%llx) + size (%llx) - 1 is more than 34 bits\n",
267 __func__
, base
, size
);
271 /* if base is -1, that means "match all" */
272 if (base
== (u64
)-1) {
274 } else if (IS_POWER_OF_2(size
) && (size
>= 4) && ((base
& (size
- 1)) == 0)) {
275 generate_pmp_napot(p
, b
, s
, flags
);
277 generate_pmp_range(p
, b
, s
, flags
);
283 * find empty PMP entry by type
284 * TOR type configuration requires two consecutive PMP entries,
285 * others requires one.
287 static int find_empty_pmp_entry(int is_range
)
289 int free_entries
= 0;
290 for (int i
= 0; i
< pmp_entries_num(); i
++) {
291 if (pmp_entry_used_mask
& (1 << i
))
295 if (is_range
&& (free_entries
== 2))
297 if (!is_range
&& (free_entries
== 1))
300 die("Too many PMP configurations, no free entries can be used!");
305 * mark PMP entry has be used
306 * this function need be used with find_entry_pmp_entry
308 * n = find_empty_pmp_entry(is_range)
309 * ... // PMP set operate
310 * mask_pmp_entry_used(n);
312 static void mask_pmp_entry_used(int idx
)
314 pmp_entry_used_mask
|= 1 << idx
;
317 /* prints the pmp regions by reading the PMP address and configuration registers */
318 void print_pmp_regions(void)
320 uintptr_t prev_pmpaddr
= 0;
324 for (int i
= 0; i
< pmp_entries_num(); i
++) {
325 uintptr_t pmpcfg
= read_pmpcfg(i
);
326 uintptr_t pmpaddr
= read_pmpaddr(i
);
327 if ((pmpcfg
& PMP_A
) == 0) {
328 continue; // PMP entry is disabled
329 } else if (pmpcfg
& PMP_NA4
) {
330 base
= pmpaddr
<< PMP_SHIFT
;
333 } else if (pmpcfg
& PMP_NAPOT
) {
334 unsigned int count_trailing_ones
= 0;
338 break; // we got a zero
339 count_trailing_ones
++;
342 size
= 8 << count_trailing_ones
;
343 base
= (pmpaddr
& ~((2 << count_trailing_ones
) - 1)) >> PMP_SHIFT
;
345 } else if (pmpcfg
& PMP_TOR
) {
347 size
= pmpaddr
- prev_pmpaddr
;
351 printk(BIOS_DEBUG
, "base: 0x%lx, size: 0x%lx, perm: %c%c%c, mode: %s, locked: %d\n",
353 (pmpcfg
& PMP_R
) ? 'r' : ' ',
354 (pmpcfg
& PMP_W
) ? 'w' : ' ',
355 (pmpcfg
& PMP_X
) ? 'x' : ' ',
357 (pmpcfg
& PMP_L
) ? 1 : 0);
359 prev_pmpaddr
= pmpaddr
;
363 /* reset PMP setting */
366 for (int i
= 0; i
< pmp_entries_num(); i
++) {
367 if (read_pmpcfg(i
) & PMP_L
)
368 die("Some PMP configurations are locked and cannot be reset!");
376 * Why are these u64 and not uintptr_t?
377 * because, per the spec:
378 * The Sv32 page-based virtual-memory scheme described in Section 4.3
379 * supports 34-bit physical addresses for RV32, so the PMP scheme must
380 * support addresses wider than XLEN for RV32.
381 * Yes, in RV32, these are 34-bit numbers.
382 * Rather than require every future user of these to remember that,
383 * this ABI is 64 bits.
384 * generate_pmp will check for out of range values.
386 void setup_pmp(u64 base
, u64 size
, u8 flags
)
391 if (generate_pmp(&p
, base
, size
, flags
))
394 is_range
= ((p
.cfg
& PMP_A
) == PMP_TOR
);
396 n
= find_empty_pmp_entry(is_range
);
399 * NOTE! you MUST write the cfg register first, or on (e.g.)
400 * the SiFive FU740, it will not take all the bits.
401 * This is different than QEMU. NASTY!
403 write_pmpcfg(n
, p
.cfg
);
405 write_pmpaddr(n
, p
.address
);
407 write_pmpaddr(n
- 1, p
.previous_address
);
409 mask_pmp_entry_used(n
);
411 mask_pmp_entry_used(n
- 1);
415 * close_pmp will "close" the pmp.
416 * This consists of adding the "match every address" entry.
417 * This should be the last pmp function that is called.
418 * Because we can not be certain that there is not some reason for it
419 * NOT to be last, we do not check -- perhaps, later, a check would
420 * make sense, but, for now, we do not check.
421 * If previous code has used up all pmp entries, print a warning
423 * The huge constant for the memory size may seem a bit odd here.
424 * Recall that PMP is to protect a *limited* number of M mode
425 * memory ranges from S and U modes. Therefore, the last range
426 * entry should cover all possible addresses, up to
427 * an architectural limit. It is entirely acceptable
428 * for it to cover memory that does not exist -- PMP
429 * protects M mode, nothing more.
430 * Think of this range as the final catch-all else
431 * in an if-then-else.
435 setup_pmp((u64
)-1, 0, PMP_R
|PMP_W
|PMP_X
);