2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "exec/helper-proto.h"
26 #include "hw/core/cpu.h"
29 hwaddr
hppa_abs_to_phys_pa2_w1(vaddr addr
)
32 * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
33 * an algorithm in which a 62-bit absolute address is transformed to
34 * a 64-bit physical address. This must then be combined with that
35 * pictured in Figure H-11 "Physical Address Space Mapping", in which
36 * the full physical address is truncated to the N-bit physical address
37 * supported by the implementation.
39 * Since the supported physical address space is below 54 bits, the
40 * H-8 algorithm is moot and all that is left is to truncate.
42 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> 54);
43 return sextract64(addr
, 0, TARGET_PHYS_ADDR_SPACE_BITS
);
46 hwaddr
hppa_abs_to_phys_pa2_w0(vaddr addr
)
49 * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
50 * combined with Figure H-11, as above.
52 if (likely(extract32(addr
, 28, 4) != 0xf)) {
53 /* Memory address space */
54 addr
= (uint32_t)addr
;
55 } else if (extract32(addr
, 24, 4) != 0) {
56 /* I/O address space */
61 * Figures H-10 and H-11 of the parisc2.0 spec do not specify
62 * where to map into the 64-bit PDC address space.
63 * We map with an offset which equals the 32-bit address, which
64 * is what can be seen on physical machines too.
66 addr
= (uint32_t)addr
;
67 addr
|= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS
- 4);
72 static HPPATLBEntry
*hppa_find_tlb(CPUHPPAState
*env
, vaddr addr
)
74 IntervalTreeNode
*i
= interval_tree_iter_first(&env
->tlb_root
, addr
, addr
);
77 HPPATLBEntry
*ent
= container_of(i
, HPPATLBEntry
, itree
);
78 trace_hppa_tlb_find_entry(env
, ent
, ent
->entry_valid
,
79 ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
82 trace_hppa_tlb_find_entry_not_found(env
, addr
);
86 static void hppa_flush_tlb_ent(CPUHPPAState
*env
, HPPATLBEntry
*ent
,
87 bool force_flush_btlb
)
89 CPUState
*cs
= env_cpu(env
);
92 if (!ent
->entry_valid
) {
96 trace_hppa_tlb_flush_ent(env
, ent
, ent
->itree
.start
,
97 ent
->itree
.last
, ent
->pa
);
99 tlb_flush_range_by_mmuidx(cs
, ent
->itree
.start
,
100 ent
->itree
.last
- ent
->itree
.start
+ 1,
101 HPPA_MMU_FLUSH_MASK
, TARGET_LONG_BITS
);
103 /* Never clear BTLBs, unless forced to do so. */
104 is_btlb
= ent
< &env
->tlb
[HPPA_BTLB_ENTRIES(env
)];
105 if (is_btlb
&& !force_flush_btlb
) {
109 interval_tree_remove(&ent
->itree
, &env
->tlb_root
);
110 memset(ent
, 0, sizeof(*ent
));
113 ent
->unused_next
= env
->tlb_unused
;
114 env
->tlb_unused
= ent
;
118 static void hppa_flush_tlb_range(CPUHPPAState
*env
, vaddr va_b
, vaddr va_e
)
120 IntervalTreeNode
*i
, *n
;
122 i
= interval_tree_iter_first(&env
->tlb_root
, va_b
, va_e
);
124 HPPATLBEntry
*ent
= container_of(i
, HPPATLBEntry
, itree
);
127 * Find the next entry now: In the normal case the current entry
128 * will be removed, but in the BTLB case it will remain.
130 n
= interval_tree_iter_next(i
, va_b
, va_e
);
131 hppa_flush_tlb_ent(env
, ent
, false);
135 static HPPATLBEntry
*hppa_alloc_tlb_ent(CPUHPPAState
*env
)
137 HPPATLBEntry
*ent
= env
->tlb_unused
;
140 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
141 uint32_t i
= env
->tlb_last
;
143 if (i
< btlb_entries
|| i
>= ARRAY_SIZE(env
->tlb
)) {
146 env
->tlb_last
= i
+ 1;
149 hppa_flush_tlb_ent(env
, ent
, false);
152 env
->tlb_unused
= ent
->unused_next
;
156 #define ACCESS_ID_MASK 0xffff
158 /* Return the set of protections allowed by a PID match. */
159 static int match_prot_id_1(uint32_t access_id
, uint32_t prot_id
)
161 if (((access_id
^ (prot_id
>> 1)) & ACCESS_ID_MASK
) == 0) {
163 ? PAGE_EXEC
| PAGE_READ
164 : PAGE_EXEC
| PAGE_READ
| PAGE_WRITE
);
169 static int match_prot_id32(CPUHPPAState
*env
, uint32_t access_id
)
173 for (i
= CR_PID1
; i
<= CR_PID4
; ++i
) {
174 r
= match_prot_id_1(access_id
, env
->cr
[i
]);
182 static int match_prot_id64(CPUHPPAState
*env
, uint32_t access_id
)
186 for (i
= CR_PID1
; i
<= CR_PID4
; ++i
) {
187 r
= match_prot_id_1(access_id
, env
->cr
[i
]);
191 r
= match_prot_id_1(access_id
, env
->cr
[i
] >> 32);
199 int hppa_get_physical_address(CPUHPPAState
*env
, vaddr addr
, int mmu_idx
,
200 int type
, MemOp mop
, hwaddr
*pphys
, int *pprot
)
203 int prot
, r_prot
, w_prot
, x_prot
, priv
;
207 /* Virtual translation disabled. Map absolute to physical. */
208 if (MMU_IDX_MMU_DISABLED(mmu_idx
)) {
211 phys
= hppa_abs_to_phys_pa2_w1(addr
);
214 if (hppa_is_pa20(env
)) {
215 phys
= hppa_abs_to_phys_pa2_w0(addr
);
217 phys
= (uint32_t)addr
;
221 g_assert_not_reached();
223 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
227 /* Find a valid tlb entry that matches the virtual address. */
228 ent
= hppa_find_tlb(env
, addr
);
232 ret
= (type
== PAGE_EXEC
) ? EXCP_ITLB_MISS
: EXCP_DTLB_MISS
;
236 /* We now know the physical address. */
237 phys
= ent
->pa
+ (addr
- ent
->itree
.start
);
239 /* Map TLB access_rights field to QEMU protection. */
240 priv
= MMU_IDX_TO_PRIV(mmu_idx
);
241 r_prot
= (priv
<= ent
->ar_pl1
) * PAGE_READ
;
242 w_prot
= (priv
<= ent
->ar_pl2
) * PAGE_WRITE
;
243 x_prot
= (ent
->ar_pl2
<= priv
&& priv
<= ent
->ar_pl1
) * PAGE_EXEC
;
244 switch (ent
->ar_type
) {
245 case 0: /* read-only: data page */
248 case 1: /* read/write: dynamic data page */
249 prot
= r_prot
| w_prot
;
251 case 2: /* read/execute: normal code page */
252 prot
= r_prot
| x_prot
;
254 case 3: /* read/write/execute: dynamic code page */
255 prot
= r_prot
| w_prot
| x_prot
;
257 default: /* execute: promote to privilege level type & 3 */
263 * No guest access type indicates a non-architectural access from
264 * within QEMU. Bypass checks for access, D, B, P and T bits.
270 if (unlikely(!(prot
& type
))) {
271 /* Not allowed -- Inst/Data Memory Access Rights Fault. */
272 ret
= (type
& PAGE_EXEC
) ? EXCP_IMP
: EXCP_DMAR
;
276 /* access_id == 0 means public page and no check is performed */
277 if (ent
->access_id
&& MMU_IDX_TO_P(mmu_idx
)) {
278 int access_prot
= (hppa_is_pa20(env
)
279 ? match_prot_id64(env
, ent
->access_id
)
280 : match_prot_id32(env
, ent
->access_id
));
281 if (unlikely(!(type
& access_prot
))) {
282 /* Not allowed -- Inst/Data Memory Protection Id Fault. */
283 ret
= type
& PAGE_EXEC
? EXCP_IMP
: EXCP_DMPI
;
286 /* Otherwise exclude permissions not allowed (i.e WD). */
291 * In reverse priority order, check for conditions which raise faults.
292 * Remove PROT bits that cover the condition we want to check,
293 * so that the resulting PROT will force a re-check of the
294 * architectural TLB entry for the next access.
296 if (unlikely(ent
->t
)) {
298 if (!(type
& PAGE_EXEC
)) {
299 /* The T bit is set -- Page Reference Fault. */
303 if (unlikely(!ent
->d
)) {
304 prot
&= PAGE_READ
| PAGE_EXEC
;
305 if (type
& PAGE_WRITE
) {
306 /* The D bit is not set -- TLB Dirty Bit Fault. */
307 ret
= EXCP_TLB_DIRTY
;
310 if (unlikely(ent
->b
)) {
311 prot
&= PAGE_READ
| PAGE_EXEC
;
312 if (type
& PAGE_WRITE
) {
314 * The B bit is set -- Data Memory Break Fault.
315 * Except when PSW_X is set, allow this single access to succeed.
316 * The write bit will be invalidated for subsequent accesses.
318 if (env
->psw_xb
& PSW_X
) {
319 prot
|= PAGE_WRITE_INV
;
327 if (addr
& ((1u << memop_alignment_bits(mop
)) - 1)) {
334 trace_hppa_tlb_get_physical_address(env
, ret
, prot
, addr
, phys
);
338 hwaddr
hppa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
340 HPPACPU
*cpu
= HPPA_CPU(cs
);
342 int prot
, excp
, mmu_idx
;
344 /* If the (data) mmu is disabled, bypass translation. */
345 /* ??? We really ought to know if the code mmu is disabled too,
346 in order to get the correct debugging dumps. */
347 mmu_idx
= (cpu
->env
.psw
& PSW_D
? MMU_KERNEL_IDX
:
348 cpu
->env
.psw
& PSW_W
? MMU_ABS_W_IDX
: MMU_ABS_IDX
);
350 excp
= hppa_get_physical_address(&cpu
->env
, addr
, mmu_idx
, 0, 0,
353 /* Since we're translating for debugging, the only error that is a
354 hard error is no translation at all. Otherwise, while a real cpu
355 access might not have permission, the debugger does. */
356 return excp
== EXCP_DTLB_MISS
? -1 : phys
;
359 void hppa_set_ior_and_isr(CPUHPPAState
*env
, vaddr addr
, bool mmu_disabled
)
361 if (env
->psw
& PSW_Q
) {
363 * For pa1.x, the offset and space never overlap, and so we
364 * simply extract the high and low part of the virtual address.
366 * For pa2.0, the formation of these are described in section
367 * "Interruption Parameter Registers", page 2-15.
369 env
->cr
[CR_IOR
] = (uint32_t)addr
;
370 env
->cr
[CR_ISR
] = addr
>> 32;
372 if (hppa_is_pa20(env
)) {
375 * If data translation was disabled, the ISR contains
376 * the upper portion of the abs address, zero-extended.
378 env
->cr
[CR_ISR
] &= 0x3fffffff;
381 * If data translation was enabled, the upper two bits
382 * of the IOR (the b field) are equal to the two space
383 * bits from the base register used to form the gva.
387 b
= env
->unwind_breg
? env
->gr
[env
->unwind_breg
] : 0;
388 b
>>= (env
->psw
& PSW_W
? 62 : 30);
389 env
->cr
[CR_IOR
] |= b
<< 62;
395 G_NORETURN
static void
396 raise_exception_with_ior(CPUHPPAState
*env
, int excp
, uintptr_t retaddr
,
397 vaddr addr
, bool mmu_disabled
)
399 CPUState
*cs
= env_cpu(env
);
401 cs
->exception_index
= excp
;
402 cpu_restore_state(cs
, retaddr
);
403 hppa_set_ior_and_isr(env
, addr
, mmu_disabled
);
408 void hppa_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
409 vaddr addr
, unsigned size
,
410 MMUAccessType access_type
,
411 int mmu_idx
, MemTxAttrs attrs
,
412 MemTxResult response
, uintptr_t retaddr
)
414 CPUHPPAState
*env
= cpu_env(cs
);
416 qemu_log_mask(LOG_GUEST_ERROR
, "HPMC at " TARGET_FMT_lx
":" TARGET_FMT_lx
417 " while accessing I/O at %#08" HWADDR_PRIx
"\n",
418 env
->iasq_f
, env
->iaoq_f
, physaddr
);
420 /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
422 raise_exception_with_ior(env
, EXCP_HPMC
, retaddr
, addr
,
423 MMU_IDX_MMU_DISABLED(mmu_idx
));
427 bool hppa_cpu_tlb_fill_align(CPUState
*cs
, CPUTLBEntryFull
*out
, vaddr addr
,
428 MMUAccessType type
, int mmu_idx
,
429 MemOp memop
, int size
, bool probe
, uintptr_t ra
)
431 CPUHPPAState
*env
= cpu_env(cs
);
432 int prot
, excp
, a_prot
;
447 excp
= hppa_get_physical_address(env
, addr
, mmu_idx
, a_prot
, memop
,
449 if (unlikely(excp
>= 0)) {
453 trace_hppa_tlb_fill_excp(env
, addr
, size
, type
, mmu_idx
);
455 /* Failure. Raise the indicated exception. */
456 raise_exception_with_ior(env
, excp
, ra
, addr
,
457 MMU_IDX_MMU_DISABLED(mmu_idx
));
460 trace_hppa_tlb_fill_success(env
, addr
& TARGET_PAGE_MASK
,
461 phys
& TARGET_PAGE_MASK
, size
, type
, mmu_idx
);
464 * Success! Store the translation into the QEMU TLB.
465 * Note that we always install a single-page entry, because that
466 * is what works best with softmmu -- anything else will trigger
467 * the large page protection mask. We do not require this,
468 * because we record the large page here in the hppa tlb.
470 memset(out
, 0, sizeof(*out
));
471 out
->phys_addr
= phys
;
473 out
->attrs
= MEMTXATTRS_UNSPECIFIED
;
474 out
->lg_page_size
= TARGET_PAGE_BITS
;
479 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
480 void HELPER(itlba_pa11
)(CPUHPPAState
*env
, target_ulong addr
, target_ulong reg
)
484 /* Zap any old entries covering ADDR. */
485 addr
&= TARGET_PAGE_MASK
;
486 hppa_flush_tlb_range(env
, addr
, addr
+ TARGET_PAGE_SIZE
- 1);
488 ent
= env
->tlb_partial
;
490 ent
= hppa_alloc_tlb_ent(env
);
491 env
->tlb_partial
= ent
;
494 /* Note that ent->entry_valid == 0 already. */
495 ent
->itree
.start
= addr
;
496 ent
->itree
.last
= addr
+ TARGET_PAGE_SIZE
- 1;
497 ent
->pa
= extract32(reg
, 5, 20) << TARGET_PAGE_BITS
;
498 trace_hppa_tlb_itlba(env
, ent
, ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
501 static void set_access_bits_pa11(CPUHPPAState
*env
, HPPATLBEntry
*ent
,
504 ent
->access_id
= extract32(reg
, 1, 18);
505 ent
->u
= extract32(reg
, 19, 1);
506 ent
->ar_pl2
= extract32(reg
, 20, 2);
507 ent
->ar_pl1
= extract32(reg
, 22, 2);
508 ent
->ar_type
= extract32(reg
, 24, 3);
509 ent
->b
= extract32(reg
, 27, 1);
510 ent
->d
= extract32(reg
, 28, 1);
511 ent
->t
= extract32(reg
, 29, 1);
512 ent
->entry_valid
= 1;
514 interval_tree_insert(&ent
->itree
, &env
->tlb_root
);
515 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
, ent
->ar_pl2
,
516 ent
->ar_pl1
, ent
->ar_type
, ent
->b
, ent
->d
, ent
->t
);
519 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
520 void HELPER(itlbp_pa11
)(CPUHPPAState
*env
, target_ulong addr
, target_ulong reg
)
522 HPPATLBEntry
*ent
= env
->tlb_partial
;
525 env
->tlb_partial
= NULL
;
526 if (ent
->itree
.start
<= addr
&& addr
<= ent
->itree
.last
) {
527 set_access_bits_pa11(env
, ent
, reg
);
531 qemu_log_mask(LOG_GUEST_ERROR
, "ITLBP not following ITLBA\n");
534 static void itlbt_pa20(CPUHPPAState
*env
, target_ulong r1
,
535 target_ulong r2
, vaddr va_b
)
542 mask_shift
= 2 * (r1
& 0xf);
543 va_size
= (uint64_t)TARGET_PAGE_SIZE
<< mask_shift
;
545 va_e
= va_b
+ va_size
- 1;
547 hppa_flush_tlb_range(env
, va_b
, va_e
);
548 ent
= hppa_alloc_tlb_ent(env
);
550 ent
->itree
.start
= va_b
;
551 ent
->itree
.last
= va_e
;
553 /* Extract all 52 bits present in the page table entry. */
554 ent
->pa
= r1
<< (TARGET_PAGE_BITS
- 5);
555 /* Align per the page size. */
556 ent
->pa
&= TARGET_PAGE_MASK
<< mask_shift
;
557 /* Ignore the bits beyond physical address space. */
558 ent
->pa
= sextract64(ent
->pa
, 0, TARGET_PHYS_ADDR_SPACE_BITS
);
560 ent
->t
= extract64(r2
, 61, 1);
561 ent
->d
= extract64(r2
, 60, 1);
562 ent
->b
= extract64(r2
, 59, 1);
563 ent
->ar_type
= extract64(r2
, 56, 3);
564 ent
->ar_pl1
= extract64(r2
, 54, 2);
565 ent
->ar_pl2
= extract64(r2
, 52, 2);
566 ent
->u
= extract64(r2
, 51, 1);
569 ent
->access_id
= extract64(r2
, 1, 31);
570 ent
->entry_valid
= 1;
572 interval_tree_insert(&ent
->itree
, &env
->tlb_root
);
573 trace_hppa_tlb_itlba(env
, ent
, ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
574 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
,
575 ent
->ar_pl2
, ent
->ar_pl1
, ent
->ar_type
,
576 ent
->b
, ent
->d
, ent
->t
);
579 void HELPER(idtlbt_pa20
)(CPUHPPAState
*env
, target_ulong r1
, target_ulong r2
)
581 vaddr va_b
= deposit64(env
->cr
[CR_IOR
], 32, 32, env
->cr
[CR_ISR
]);
582 itlbt_pa20(env
, r1
, r2
, va_b
);
585 void HELPER(iitlbt_pa20
)(CPUHPPAState
*env
, target_ulong r1
, target_ulong r2
)
587 vaddr va_b
= deposit64(env
->cr
[CR_IIAOQ
], 32, 32, env
->cr
[CR_IIASQ
]);
588 itlbt_pa20(env
, r1
, r2
, va_b
);
591 /* Purge (Insn/Data) TLB. */
592 static void ptlb_work(CPUState
*cpu
, run_on_cpu_data data
)
594 vaddr start
= data
.target_ptr
;
598 * PA2.0 allows a range of pages encoded into GR[b], which we have
599 * copied into the bottom bits of the otherwise page-aligned address.
600 * PA1.x will always provide zero here, for a single page flush.
603 start
&= TARGET_PAGE_MASK
;
604 end
= (vaddr
)TARGET_PAGE_SIZE
<< (2 * end
);
605 end
= start
+ end
- 1;
607 hppa_flush_tlb_range(cpu_env(cpu
), start
, end
);
610 /* This is local to the current cpu. */
611 void HELPER(ptlb_l
)(CPUHPPAState
*env
, target_ulong addr
)
613 trace_hppa_tlb_ptlb_local(env
);
614 ptlb_work(env_cpu(env
), RUN_ON_CPU_TARGET_PTR(addr
));
617 /* This is synchronous across all processors. */
618 void HELPER(ptlb
)(CPUHPPAState
*env
, target_ulong addr
)
620 CPUState
*src
= env_cpu(env
);
624 trace_hppa_tlb_ptlb(env
);
625 run_on_cpu_data data
= RUN_ON_CPU_TARGET_PTR(addr
);
629 async_run_on_cpu(cpu
, ptlb_work
, data
);
634 async_safe_run_on_cpu(src
, ptlb_work
, data
);
636 ptlb_work(src
, data
);
640 void hppa_ptlbe(CPUHPPAState
*env
)
642 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
645 /* Zap the (non-btlb) tlb entries themselves. */
646 memset(&env
->tlb
[btlb_entries
], 0,
647 sizeof(env
->tlb
) - btlb_entries
* sizeof(env
->tlb
[0]));
648 env
->tlb_last
= btlb_entries
;
649 env
->tlb_partial
= NULL
;
651 /* Put them all onto the unused list. */
652 env
->tlb_unused
= &env
->tlb
[btlb_entries
];
653 for (i
= btlb_entries
; i
< ARRAY_SIZE(env
->tlb
) - 1; ++i
) {
654 env
->tlb
[i
].unused_next
= &env
->tlb
[i
+ 1];
657 /* Re-initialize the interval tree with only the btlb entries. */
658 memset(&env
->tlb_root
, 0, sizeof(env
->tlb_root
));
659 for (i
= 0; i
< btlb_entries
; ++i
) {
660 if (env
->tlb
[i
].entry_valid
) {
661 interval_tree_insert(&env
->tlb
[i
].itree
, &env
->tlb_root
);
665 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_MASK
);
668 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
669 number of pages/entries (we choose all), and is local to the cpu. */
670 void HELPER(ptlbe
)(CPUHPPAState
*env
)
672 trace_hppa_tlb_ptlbe(env
);
673 qemu_log_mask(CPU_LOG_MMU
, "FLUSH ALL TLB ENTRIES\n");
677 void cpu_hppa_change_prot_id(CPUHPPAState
*env
)
679 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_P_MASK
);
682 void HELPER(change_prot_id
)(CPUHPPAState
*env
)
684 cpu_hppa_change_prot_id(env
);
687 target_ulong
HELPER(lpa
)(CPUHPPAState
*env
, target_ulong addr
)
692 excp
= hppa_get_physical_address(env
, addr
, MMU_KERNEL_IDX
, 0, 0,
695 if (excp
== EXCP_DTLB_MISS
) {
696 excp
= EXCP_NA_DTLB_MISS
;
698 trace_hppa_tlb_lpa_failed(env
, addr
);
699 raise_exception_with_ior(env
, excp
, GETPC(), addr
, false);
701 trace_hppa_tlb_lpa_success(env
, addr
, phys
);
706 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
707 * allow operating systems to modify the Block TLB (BTLB) entries.
708 * For implementation details see page 1-13 in
709 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
711 void HELPER(diag_btlb
)(CPUHPPAState
*env
)
713 unsigned int phys_page
, len
, slot
;
714 int mmu_idx
= cpu_mmu_index(env_cpu(env
), 0);
715 uintptr_t ra
= GETPC();
719 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
721 /* BTLBs are not supported on 64-bit CPUs */
722 if (btlb_entries
== 0) {
723 env
->gr
[28] = -1; /* nonexistent procedure */
727 env
->gr
[28] = 0; /* PDC_OK */
729 switch (env
->gr
[25]) {
731 /* return BTLB parameters */
732 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
733 vaddr
= probe_access(env
, env
->gr
[24], 4 * sizeof(uint32_t),
734 MMU_DATA_STORE
, mmu_idx
, ra
);
736 env
->gr
[28] = -10; /* invalid argument */
738 vaddr
[0] = cpu_to_be32(1);
739 vaddr
[1] = cpu_to_be32(16 * 1024);
740 vaddr
[2] = cpu_to_be32(PA10_BTLB_FIXED
);
741 vaddr
[3] = cpu_to_be32(PA10_BTLB_VARIABLE
);
745 /* insert BTLB entry */
746 virt_page
= env
->gr
[24]; /* upper 32 bits */
748 virt_page
|= env
->gr
[23]; /* lower 32 bits */
749 phys_page
= env
->gr
[22];
752 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
753 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
755 (long long) virt_page
<< TARGET_PAGE_BITS
,
756 (long long) (virt_page
+ len
) << TARGET_PAGE_BITS
,
757 (long long) virt_page
, phys_page
, len
, slot
);
758 if (slot
< btlb_entries
) {
759 btlb
= &env
->tlb
[slot
];
761 /* Force flush of possibly existing BTLB entry. */
762 hppa_flush_tlb_ent(env
, btlb
, true);
764 /* Create new BTLB entry */
765 btlb
->itree
.start
= virt_page
<< TARGET_PAGE_BITS
;
766 btlb
->itree
.last
= btlb
->itree
.start
+ len
* TARGET_PAGE_SIZE
- 1;
767 btlb
->pa
= phys_page
<< TARGET_PAGE_BITS
;
768 set_access_bits_pa11(env
, btlb
, env
->gr
[20]);
772 env
->gr
[28] = -10; /* invalid argument */
776 /* Purge BTLB entry */
778 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
780 if (slot
< btlb_entries
) {
781 btlb
= &env
->tlb
[slot
];
782 hppa_flush_tlb_ent(env
, btlb
, true);
784 env
->gr
[28] = -10; /* invalid argument */
788 /* Purge all BTLB entries */
789 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
790 for (slot
= 0; slot
< btlb_entries
; slot
++) {
791 btlb
= &env
->tlb
[slot
];
792 hppa_flush_tlb_ent(env
, btlb
, true);
796 env
->gr
[28] = -2; /* nonexistent option */
801 uint64_t HELPER(b_gate_priv
)(CPUHPPAState
*env
, uint64_t iaoq_f
)
803 uint64_t gva
= hppa_form_gva(env
, env
->iasq_f
, iaoq_f
);
804 HPPATLBEntry
*ent
= hppa_find_tlb(env
, gva
);
807 raise_exception_with_ior(env
, EXCP_ITLB_MISS
, GETPC(), gva
, false);
811 * There should be no need to check page permissions, as that will
812 * already have been done by tb_lookup via get_page_addr_code.
813 * All we need at this point is to check the ar_type.
815 * No change for non-gateway pages or for priv decrease.
817 if (ent
->ar_type
& 4) {
818 int old_priv
= iaoq_f
& 3;
819 int new_priv
= ent
->ar_type
& 3;
821 if (new_priv
< old_priv
) {
822 iaoq_f
= (iaoq_f
& -4) | new_priv
;