2 * MMU hypercalls for the sPAPR (pseries) vHyp hypervisor that is used by TCG
4 * Copyright (c) 2004-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2010 David Gibson, IBM Corporation.
8 * SPDX-License-Identifier: MIT
10 #include "qemu/osdep.h"
11 #include "qemu/cutils.h"
12 #include "qemu/memalign.h"
13 #include "qemu/error-report.h"
15 #include "helper_regs.h"
16 #include "hw/ppc/spapr.h"
17 #include "mmu-hash64.h"
19 static target_ulong
h_enter(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
20 target_ulong opcode
, target_ulong
*args
)
22 target_ulong flags
= args
[0];
23 target_ulong ptex
= args
[1];
24 target_ulong pteh
= args
[2];
25 target_ulong ptel
= args
[3];
29 const ppc_hash_pte64_t
*hptes
;
31 apshift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pteh
, ptel
);
33 /* Bad page size encoding */
37 raddr
= (ptel
& HPTE64_R_RPN
) & ~((1ULL << apshift
) - 1);
39 if (is_ram_address(spapr
, raddr
)) {
40 /* Regular RAM - should have WIMG=0010 */
41 if ((ptel
& HPTE64_R_WIMG
) != HPTE64_R_M
) {
45 target_ulong wimg_flags
;
46 /* Looks like an IO address */
47 /* FIXME: What WIMG combinations could be sensible for IO?
48 * For now we allow WIMG=010x, but are there others? */
49 /* FIXME: Should we check against registered IO addresses? */
50 wimg_flags
= (ptel
& (HPTE64_R_W
| HPTE64_R_I
| HPTE64_R_M
));
52 if (wimg_flags
!= HPTE64_R_I
&&
53 wimg_flags
!= (HPTE64_R_I
| HPTE64_R_M
)) {
60 if (!ppc_hash64_valid_ptex(cpu
, ptex
)) {
67 if (likely((flags
& H_EXACT
) == 0)) {
68 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
69 for (slot
= 0; slot
< 8; slot
++) {
70 if (!(ppc_hash64_hpte0(cpu
, hptes
, slot
) & HPTE64_V_VALID
)) {
74 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
79 hptes
= ppc_hash64_map_hptes(cpu
, ptex
+ slot
, 1);
80 if (ppc_hash64_hpte0(cpu
, hptes
, 0) & HPTE64_V_VALID
) {
81 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
+ slot
, 1);
84 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
87 spapr_store_hpte(cpu
, ptex
+ slot
, pteh
| HPTE64_V_HPTE_DIRTY
, ptel
);
89 args
[0] = ptex
+ slot
;
100 static RemoveResult
remove_hpte(PowerPCCPU
*cpu
104 target_ulong
*vp
, target_ulong
*rp
)
106 const ppc_hash_pte64_t
*hptes
;
109 if (!ppc_hash64_valid_ptex(cpu
, ptex
)) {
113 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, 1);
114 v
= ppc_hash64_hpte0(cpu
, hptes
, 0);
115 r
= ppc_hash64_hpte1(cpu
, hptes
, 0);
116 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
118 if ((v
& HPTE64_V_VALID
) == 0 ||
119 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
) ||
120 ((flags
& H_ANDCOND
) && (v
& avpn
) != 0)) {
121 return REMOVE_NOT_FOUND
;
125 spapr_store_hpte(cpu
, ptex
, HPTE64_V_HPTE_DIRTY
, 0);
126 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
127 return REMOVE_SUCCESS
;
130 static target_ulong
h_remove(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
131 target_ulong opcode
, target_ulong
*args
)
133 CPUPPCState
*env
= &cpu
->env
;
134 target_ulong flags
= args
[0];
135 target_ulong ptex
= args
[1];
136 target_ulong avpn
= args
[2];
139 ret
= remove_hpte(cpu
, ptex
, avpn
, flags
,
144 check_tlb_flush(env
, true);
147 case REMOVE_NOT_FOUND
:
157 g_assert_not_reached();
160 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
161 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
162 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
163 #define H_BULK_REMOVE_END 0xc000000000000000ULL
164 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
165 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
166 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
167 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
168 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
169 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
170 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
171 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
172 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
173 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
174 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
176 #define H_BULK_REMOVE_MAX_BATCH 4
178 static target_ulong
h_bulk_remove(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
179 target_ulong opcode
, target_ulong
*args
)
181 CPUPPCState
*env
= &cpu
->env
;
183 target_ulong rc
= H_SUCCESS
;
185 for (i
= 0; i
< H_BULK_REMOVE_MAX_BATCH
; i
++) {
186 target_ulong
*tsh
= &args
[i
*2];
187 target_ulong tsl
= args
[i
*2 + 1];
188 target_ulong v
, r
, ret
;
190 if ((*tsh
& H_BULK_REMOVE_TYPE
) == H_BULK_REMOVE_END
) {
192 } else if ((*tsh
& H_BULK_REMOVE_TYPE
) != H_BULK_REMOVE_REQUEST
) {
196 *tsh
&= H_BULK_REMOVE_PTEX
| H_BULK_REMOVE_FLAGS
;
197 *tsh
|= H_BULK_REMOVE_RESPONSE
;
199 if ((*tsh
& H_BULK_REMOVE_ANDCOND
) && (*tsh
& H_BULK_REMOVE_AVPN
)) {
200 *tsh
|= H_BULK_REMOVE_PARM
;
204 ret
= remove_hpte(cpu
, *tsh
& H_BULK_REMOVE_PTEX
, tsl
,
205 (*tsh
& H_BULK_REMOVE_FLAGS
) >> 26,
212 *tsh
|= (r
& (HPTE64_R_C
| HPTE64_R_R
)) << 43;
225 check_tlb_flush(env
, true);
230 static target_ulong
h_protect(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
231 target_ulong opcode
, target_ulong
*args
)
233 CPUPPCState
*env
= &cpu
->env
;
234 target_ulong flags
= args
[0];
235 target_ulong ptex
= args
[1];
236 target_ulong avpn
= args
[2];
237 const ppc_hash_pte64_t
*hptes
;
240 if (!ppc_hash64_valid_ptex(cpu
, ptex
)) {
244 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, 1);
245 v
= ppc_hash64_hpte0(cpu
, hptes
, 0);
246 r
= ppc_hash64_hpte1(cpu
, hptes
, 0);
247 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
249 if ((v
& HPTE64_V_VALID
) == 0 ||
250 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
)) {
254 r
&= ~(HPTE64_R_PP0
| HPTE64_R_PP
| HPTE64_R_N
|
255 HPTE64_R_KEY_HI
| HPTE64_R_KEY_LO
);
256 r
|= (flags
<< 55) & HPTE64_R_PP0
;
257 r
|= (flags
<< 48) & HPTE64_R_KEY_HI
;
258 r
|= flags
& (HPTE64_R_PP
| HPTE64_R_N
| HPTE64_R_KEY_LO
);
259 spapr_store_hpte(cpu
, ptex
,
260 (v
& ~HPTE64_V_VALID
) | HPTE64_V_HPTE_DIRTY
, 0);
261 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
263 check_tlb_flush(env
, true);
264 /* Don't need a memory barrier, due to qemu's global lock */
265 spapr_store_hpte(cpu
, ptex
, v
| HPTE64_V_HPTE_DIRTY
, r
);
269 static target_ulong
h_read(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
270 target_ulong opcode
, target_ulong
*args
)
272 target_ulong flags
= args
[0];
273 target_ulong ptex
= args
[1];
274 int i
, ridx
, n_entries
= 1;
275 const ppc_hash_pte64_t
*hptes
;
277 if (!ppc_hash64_valid_ptex(cpu
, ptex
)) {
281 if (flags
& H_READ_4
) {
282 /* Clear the two low order bits */
287 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, n_entries
);
288 for (i
= 0, ridx
= 0; i
< n_entries
; i
++) {
289 args
[ridx
++] = ppc_hash64_hpte0(cpu
, hptes
, i
);
290 args
[ridx
++] = ppc_hash64_hpte1(cpu
, hptes
, i
);
292 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, n_entries
);
297 struct SpaprPendingHpt
{
298 /* These fields are read-only after initialization */
302 /* These fields are protected by the BQL */
305 /* These fields are private to the preparation thread if
306 * !complete, otherwise protected by the BQL */
311 static void free_pending_hpt(SpaprPendingHpt
*pending
)
314 qemu_vfree(pending
->hpt
);
320 static void *hpt_prepare_thread(void *opaque
)
322 SpaprPendingHpt
*pending
= opaque
;
323 size_t size
= 1ULL << pending
->shift
;
325 pending
->hpt
= qemu_try_memalign(size
, size
);
327 memset(pending
->hpt
, 0, size
);
328 pending
->ret
= H_SUCCESS
;
330 pending
->ret
= H_NO_MEM
;
335 if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt
== pending
) {
337 pending
->complete
= true;
339 /* We've been cancelled, clean ourselves up */
340 free_pending_hpt(pending
);
347 /* Must be called with BQL held */
348 static void cancel_hpt_prepare(SpaprMachineState
*spapr
)
350 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
352 /* Let the thread know it's cancelled */
353 spapr
->pending_hpt
= NULL
;
360 if (!pending
->complete
) {
361 /* thread will clean itself up */
365 free_pending_hpt(pending
);
368 target_ulong
vhyp_mmu_resize_hpt_prepare(PowerPCCPU
*cpu
,
369 SpaprMachineState
*spapr
,
372 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
375 /* something already in progress */
376 if (pending
->shift
== shift
) {
377 /* and it's suitable */
378 if (pending
->complete
) {
381 return H_LONG_BUSY_ORDER_100_MSEC
;
385 /* not suitable, cancel and replace */
386 cancel_hpt_prepare(spapr
);
394 /* start new prepare */
396 pending
= g_new0(SpaprPendingHpt
, 1);
397 pending
->shift
= shift
;
398 pending
->ret
= H_HARDWARE
;
400 qemu_thread_create(&pending
->thread
, "sPAPR HPT prepare",
401 hpt_prepare_thread
, pending
, QEMU_THREAD_DETACHED
);
403 spapr
->pending_hpt
= pending
;
405 /* In theory we could estimate the time more accurately based on
406 * the new size, but there's not much point */
407 return H_LONG_BUSY_ORDER_100_MSEC
;
410 static uint64_t new_hpte_load0(void *htab
, uint64_t pteg
, int slot
)
412 uint8_t *addr
= htab
;
414 addr
+= pteg
* HASH_PTEG_SIZE_64
;
415 addr
+= slot
* HASH_PTE_SIZE_64
;
419 static void new_hpte_store(void *htab
, uint64_t pteg
, int slot
,
420 uint64_t pte0
, uint64_t pte1
)
422 uint8_t *addr
= htab
;
424 addr
+= pteg
* HASH_PTEG_SIZE_64
;
425 addr
+= slot
* HASH_PTE_SIZE_64
;
428 stq_p(addr
+ HPTE64_DW1
, pte1
);
431 static int rehash_hpte(PowerPCCPU
*cpu
,
432 const ppc_hash_pte64_t
*hptes
,
433 void *old_hpt
, uint64_t oldsize
,
434 void *new_hpt
, uint64_t newsize
,
435 uint64_t pteg
, int slot
)
437 uint64_t old_hash_mask
= (oldsize
>> 7) - 1;
438 uint64_t new_hash_mask
= (newsize
>> 7) - 1;
439 target_ulong pte0
= ppc_hash64_hpte0(cpu
, hptes
, slot
);
442 unsigned base_pg_shift
;
443 uint64_t hash
, new_pteg
, replace_pte0
;
445 if (!(pte0
& HPTE64_V_VALID
) || !(pte0
& HPTE64_V_BOLTED
)) {
449 pte1
= ppc_hash64_hpte1(cpu
, hptes
, slot
);
451 base_pg_shift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pte0
, pte1
);
452 assert(base_pg_shift
); /* H_ENTER shouldn't allow a bad encoding */
453 avpn
= HPTE64_V_AVPN_VAL(pte0
) & ~(((1ULL << base_pg_shift
) - 1) >> 23);
455 if (pte0
& HPTE64_V_SECONDARY
) {
459 if ((pte0
& HPTE64_V_SSIZE
) == HPTE64_V_SSIZE_256M
) {
460 uint64_t offset
, vsid
;
462 /* We only have 28 - 23 bits of offset in avpn */
463 offset
= (avpn
& 0x1f) << 23;
465 /* We can find more bits from the pteg value */
466 if (base_pg_shift
< 23) {
467 offset
|= ((vsid
^ pteg
) & old_hash_mask
) << base_pg_shift
;
470 hash
= vsid
^ (offset
>> base_pg_shift
);
471 } else if ((pte0
& HPTE64_V_SSIZE
) == HPTE64_V_SSIZE_1T
) {
472 uint64_t offset
, vsid
;
474 /* We only have 40 - 23 bits of seg_off in avpn */
475 offset
= (avpn
& 0x1ffff) << 23;
477 if (base_pg_shift
< 23) {
478 offset
|= ((vsid
^ (vsid
<< 25) ^ pteg
) & old_hash_mask
)
482 hash
= vsid
^ (vsid
<< 25) ^ (offset
>> base_pg_shift
);
484 error_report("rehash_pte: Bad segment size in HPTE");
488 new_pteg
= hash
& new_hash_mask
;
489 if (pte0
& HPTE64_V_SECONDARY
) {
490 assert(~pteg
== (hash
& old_hash_mask
));
491 new_pteg
= ~new_pteg
;
493 assert(pteg
== (hash
& old_hash_mask
));
495 assert((oldsize
!= newsize
) || (pteg
== new_pteg
));
496 replace_pte0
= new_hpte_load0(new_hpt
, new_pteg
, slot
);
498 * Strictly speaking, we don't need all these tests, since we only
499 * ever rehash bolted HPTEs. We might in future handle non-bolted
500 * HPTEs, though so make the logic correct for those cases as
503 if (replace_pte0
& HPTE64_V_VALID
) {
504 assert(newsize
< oldsize
);
505 if (replace_pte0
& HPTE64_V_BOLTED
) {
506 if (pte0
& HPTE64_V_BOLTED
) {
507 /* Bolted collision, nothing we can do */
510 /* Discard this hpte */
516 new_hpte_store(new_hpt
, new_pteg
, slot
, pte0
, pte1
);
520 static int rehash_hpt(PowerPCCPU
*cpu
,
521 void *old_hpt
, uint64_t oldsize
,
522 void *new_hpt
, uint64_t newsize
)
524 uint64_t n_ptegs
= oldsize
>> 7;
529 for (pteg
= 0; pteg
< n_ptegs
; pteg
++) {
530 hwaddr ptex
= pteg
* HPTES_PER_GROUP
;
531 const ppc_hash_pte64_t
*hptes
532 = ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
538 for (slot
= 0; slot
< HPTES_PER_GROUP
; slot
++) {
539 rc
= rehash_hpte(cpu
, hptes
, old_hpt
, oldsize
, new_hpt
, newsize
,
541 if (rc
!= H_SUCCESS
) {
542 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
546 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
552 target_ulong
vhyp_mmu_resize_hpt_commit(PowerPCCPU
*cpu
,
553 SpaprMachineState
*spapr
,
557 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
565 if (!pending
|| (pending
->shift
!= shift
)) {
566 /* no matching prepare */
570 if (!pending
->complete
) {
571 /* prepare has not completed */
575 /* Shouldn't have got past PREPARE without an HPT */
576 g_assert(spapr
->htab_shift
);
578 newsize
= 1ULL << pending
->shift
;
579 rc
= rehash_hpt(cpu
, spapr
->htab
, HTAB_SIZE(spapr
),
580 pending
->hpt
, newsize
);
581 if (rc
== H_SUCCESS
) {
582 qemu_vfree(spapr
->htab
);
583 spapr
->htab
= pending
->hpt
;
584 spapr
->htab_shift
= pending
->shift
;
586 push_sregs_to_kvm_pr(spapr
);
588 pending
->hpt
= NULL
; /* so it's not free()d */
592 spapr
->pending_hpt
= NULL
;
593 free_pending_hpt(pending
);
598 static void hypercall_register_types(void)
601 spapr_register_hypercall(H_ENTER
, h_enter
);
602 spapr_register_hypercall(H_REMOVE
, h_remove
);
603 spapr_register_hypercall(H_PROTECT
, h_protect
);
604 spapr_register_hypercall(H_READ
, h_read
);
607 spapr_register_hypercall(H_BULK_REMOVE
, h_bulk_remove
);
611 type_init(hypercall_register_types
)