3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
5 * pSeries LPAR support.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /* Enables debugging of low-level hash table routines - careful! */
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/console.h>
28 #include <linux/export.h>
29 #include <asm/processor.h>
32 #include <asm/pgtable.h>
33 #include <asm/machdep.h>
34 #include <asm/mmu_context.h>
35 #include <asm/iommu.h>
36 #include <asm/tlbflush.h>
39 #include <asm/cputable.h>
42 #include <asm/trace.h>
43 #include <asm/firmware.h>
44 #include <asm/plpar_wrappers.h>
48 /* Flag bits for H_BULK_REMOVE */
49 #define HBR_REQUEST 0x4000000000000000UL
50 #define HBR_RESPONSE 0x8000000000000000UL
51 #define HBR_END 0xc000000000000000UL
52 #define HBR_AVPN 0x0200000000000000UL
53 #define HBR_ANDCOND 0x0100000000000000UL
57 EXPORT_SYMBOL(plpar_hcall
);
58 EXPORT_SYMBOL(plpar_hcall9
);
59 EXPORT_SYMBOL(plpar_hcall_norets
);
61 extern void pSeries_find_serial_port(void);
63 void vpa_init(int cpu
)
65 int hwcpu
= get_hard_smp_processor_id(cpu
);
68 struct paca_struct
*pp
;
69 struct dtl_entry
*dtl
;
72 * The spec says it "may be problematic" if CPU x registers the VPA of
73 * CPU y. We should never do that, but wail if we ever do.
75 WARN_ON(cpu
!= smp_processor_id());
77 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
78 lppaca_of(cpu
).vmxregs_in_use
= 1;
80 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
81 lppaca_of(cpu
).ebb_regs_in_use
= 1;
83 addr
= __pa(&lppaca_of(cpu
));
84 ret
= register_vpa(hwcpu
, addr
);
87 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
88 "%lx failed with %ld\n", cpu
, hwcpu
, addr
, ret
);
92 * PAPR says this feature is SLB-Buffer but firmware never
93 * reports that. All SPLPAR support SLB shadow buffer.
95 addr
= __pa(paca
[cpu
].slb_shadow_ptr
);
96 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
97 ret
= register_slb_shadow(hwcpu
, addr
);
99 pr_err("WARNING: SLB shadow buffer registration for "
100 "cpu %d (hw %d) of area %lx failed with %ld\n",
101 cpu
, hwcpu
, addr
, ret
);
105 * Register dispatch trace log, if one has been allocated.
108 dtl
= pp
->dispatch_log
;
112 lppaca_of(cpu
).dtl_idx
= 0;
114 /* hypervisor reads buffer length from this field */
115 dtl
->enqueue_to_dispatch_time
= cpu_to_be32(DISPATCH_LOG_BYTES
);
116 ret
= register_dtl(hwcpu
, __pa(dtl
));
118 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
119 "failed with %ld\n", smp_processor_id(),
121 lppaca_of(cpu
).dtl_enable_mask
= 2;
125 static long pSeries_lpar_hpte_insert(unsigned long hpte_group
,
126 unsigned long vpn
, unsigned long pa
,
127 unsigned long rflags
, unsigned long vflags
,
128 int psize
, int apsize
, int ssize
)
130 unsigned long lpar_rc
;
133 unsigned long hpte_v
, hpte_r
;
135 if (!(vflags
& HPTE_V_BOLTED
))
136 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
137 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
138 hpte_group
, vpn
, pa
, rflags
, vflags
, psize
);
140 hpte_v
= hpte_encode_v(vpn
, psize
, apsize
, ssize
) | vflags
| HPTE_V_VALID
;
141 hpte_r
= hpte_encode_r(pa
, psize
, apsize
) | rflags
;
143 if (!(vflags
& HPTE_V_BOLTED
))
144 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v
, hpte_r
);
146 /* Now fill in the actual HPTE */
147 /* Set CEC cookie to 0 */
149 /* I-cache Invalidate = 0 */
150 /* I-cache synchronize = 0 */
154 /* Make pHyp happy */
155 if ((rflags
& _PAGE_NO_CACHE
) && !(rflags
& _PAGE_WRITETHRU
))
158 if (firmware_has_feature(FW_FEATURE_XCMO
) && !(hpte_r
& HPTE_R_N
))
159 flags
|= H_COALESCE_CAND
;
161 lpar_rc
= plpar_pte_enter(flags
, hpte_group
, hpte_v
, hpte_r
, &slot
);
162 if (unlikely(lpar_rc
== H_PTEG_FULL
)) {
163 if (!(vflags
& HPTE_V_BOLTED
))
169 * Since we try and ioremap PHBs we don't own, the pte insert
170 * will fail. However we must catch the failure in hash_page
171 * or we will loop forever, so return -2 in this case.
173 if (unlikely(lpar_rc
!= H_SUCCESS
)) {
174 if (!(vflags
& HPTE_V_BOLTED
))
175 pr_devel(" lpar err %ld\n", lpar_rc
);
178 if (!(vflags
& HPTE_V_BOLTED
))
179 pr_devel(" -> slot: %lu\n", slot
& 7);
181 /* Because of iSeries, we have to pass down the secondary
182 * bucket bit here as well
184 return (slot
& 7) | (!!(vflags
& HPTE_V_SECONDARY
) << 3);
187 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock
);
189 static long pSeries_lpar_hpte_remove(unsigned long hpte_group
)
191 unsigned long slot_offset
;
192 unsigned long lpar_rc
;
194 unsigned long dummy1
, dummy2
;
196 /* pick a random slot to start at */
197 slot_offset
= mftb() & 0x7;
199 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
201 /* don't remove a bolted entry */
202 lpar_rc
= plpar_pte_remove(H_ANDCOND
, hpte_group
+ slot_offset
,
203 (0x1UL
<< 4), &dummy1
, &dummy2
);
204 if (lpar_rc
== H_SUCCESS
)
208 * The test for adjunct partition is performed before the
209 * ANDCOND test. H_RESOURCE may be returned, so we need to
210 * check for that as well.
212 BUG_ON(lpar_rc
!= H_NOT_FOUND
&& lpar_rc
!= H_RESOURCE
);
221 static void pSeries_lpar_hptab_clear(void)
223 unsigned long size_bytes
= 1UL << ppc64_pft_size
;
224 unsigned long hpte_count
= size_bytes
>> 4;
232 /* Read in batches of 4,
233 * invalidate only valid entries not in the VRMA
234 * hpte_count will be a multiple of 4
236 for (i
= 0; i
< hpte_count
; i
+= 4) {
237 lpar_rc
= plpar_pte_read_4_raw(0, i
, (void *)ptes
);
238 if (lpar_rc
!= H_SUCCESS
)
240 for (j
= 0; j
< 4; j
++){
241 if ((ptes
[j
].pteh
& HPTE_V_VRMA_MASK
) ==
244 if (ptes
[j
].pteh
& HPTE_V_VALID
)
245 plpar_pte_remove_raw(0, i
+ j
, 0,
246 &(ptes
[j
].pteh
), &(ptes
[j
].ptel
));
250 #ifdef __LITTLE_ENDIAN__
251 /* Reset exceptions to big endian */
252 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
255 rc
= pseries_big_endian_exceptions();
257 * At this point it is unlikely panic() will get anything
258 * out to the user, but at least this will stop us from
259 * continuing on further and creating an even more
260 * difficult to debug situation.
263 panic("Could not enable big endian exceptions");
269 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
270 * the low 3 bits of flags happen to line up. So no transform is needed.
271 * We can probably optimize here and assume the high bits of newpp are
272 * already zero. For now I am paranoid.
274 static long pSeries_lpar_hpte_updatepp(unsigned long slot
,
277 int psize
, int apsize
,
278 int ssize
, int local
)
280 unsigned long lpar_rc
;
281 unsigned long flags
= (newpp
& 7) | H_AVPN
;
282 unsigned long want_v
;
284 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
286 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
287 want_v
, slot
, flags
, psize
);
289 lpar_rc
= plpar_pte_protect(flags
, slot
, want_v
);
291 if (lpar_rc
== H_NOT_FOUND
) {
292 pr_devel("not found !\n");
298 BUG_ON(lpar_rc
!= H_SUCCESS
);
303 static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot
)
305 unsigned long dword0
;
306 unsigned long lpar_rc
;
307 unsigned long dummy_word1
;
310 /* Read 1 pte at a time */
311 /* Do not need RPN to logical page translation */
312 /* No cross CEC PFT access */
315 lpar_rc
= plpar_pte_read(flags
, slot
, &dword0
, &dummy_word1
);
317 BUG_ON(lpar_rc
!= H_SUCCESS
);
322 static long pSeries_lpar_hpte_find(unsigned long vpn
, int psize
, int ssize
)
327 unsigned long want_v
, hpte_v
;
329 hash
= hpt_hash(vpn
, mmu_psize_defs
[psize
].shift
, ssize
);
330 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
332 /* Bolted entries are always in the primary group */
333 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
334 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
335 hpte_v
= pSeries_lpar_hpte_getword0(slot
);
337 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
))
346 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp
,
348 int psize
, int ssize
)
351 unsigned long lpar_rc
, slot
, vsid
, flags
;
353 vsid
= get_kernel_vsid(ea
, ssize
);
354 vpn
= hpt_vpn(ea
, vsid
, ssize
);
356 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
360 lpar_rc
= plpar_pte_protect(flags
, slot
, 0);
362 BUG_ON(lpar_rc
!= H_SUCCESS
);
365 static void pSeries_lpar_hpte_invalidate(unsigned long slot
, unsigned long vpn
,
366 int psize
, int apsize
,
367 int ssize
, int local
)
369 unsigned long want_v
;
370 unsigned long lpar_rc
;
371 unsigned long dummy1
, dummy2
;
373 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
374 slot
, vpn
, psize
, local
);
376 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
377 lpar_rc
= plpar_pte_remove(H_AVPN
, slot
, want_v
, &dummy1
, &dummy2
);
378 if (lpar_rc
== H_NOT_FOUND
)
381 BUG_ON(lpar_rc
!= H_SUCCESS
);
385 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
386 * to make sure that we avoid bouncing the hypervisor tlbie lock.
388 #define PPC64_HUGE_HPTE_BATCH 12
390 static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot
,
391 unsigned long *vpn
, int count
,
392 int psize
, int ssize
)
394 unsigned long param
[8];
395 int i
= 0, pix
= 0, rc
;
396 unsigned long flags
= 0;
397 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
400 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
402 for (i
= 0; i
< count
; i
++) {
404 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
405 pSeries_lpar_hpte_invalidate(slot
[i
], vpn
[i
], psize
, 0,
408 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
[i
];
409 param
[pix
+1] = hpte_encode_avpn(vpn
[i
], psize
, ssize
);
412 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
413 param
[0], param
[1], param
[2],
414 param
[3], param
[4], param
[5],
416 BUG_ON(rc
!= H_SUCCESS
);
422 param
[pix
] = HBR_END
;
423 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
424 param
[2], param
[3], param
[4], param
[5],
426 BUG_ON(rc
!= H_SUCCESS
);
430 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
433 static void pSeries_lpar_hugepage_invalidate(struct mm_struct
*mm
,
434 unsigned char *hpte_slot_array
,
435 unsigned long addr
, int psize
)
437 int ssize
= 0, i
, index
= 0;
438 unsigned long s_addr
= addr
;
439 unsigned int max_hpte_count
, valid
;
440 unsigned long vpn_array
[PPC64_HUGE_HPTE_BATCH
];
441 unsigned long slot_array
[PPC64_HUGE_HPTE_BATCH
];
442 unsigned long shift
, hidx
, vpn
= 0, vsid
, hash
, slot
;
444 shift
= mmu_psize_defs
[psize
].shift
;
445 max_hpte_count
= 1U << (PMD_SHIFT
- shift
);
447 for (i
= 0; i
< max_hpte_count
; i
++) {
448 valid
= hpte_valid(hpte_slot_array
, i
);
451 hidx
= hpte_hash_index(hpte_slot_array
, i
);
454 addr
= s_addr
+ (i
* (1ul << shift
));
455 if (!is_kernel_addr(addr
)) {
456 ssize
= user_segment_size(addr
);
457 vsid
= get_vsid(mm
->context
.id
, addr
, ssize
);
460 vsid
= get_kernel_vsid(addr
, mmu_kernel_ssize
);
461 ssize
= mmu_kernel_ssize
;
464 vpn
= hpt_vpn(addr
, vsid
, ssize
);
465 hash
= hpt_hash(vpn
, shift
, ssize
);
466 if (hidx
& _PTEIDX_SECONDARY
)
469 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
470 slot
+= hidx
& _PTEIDX_GROUP_IX
;
472 slot_array
[index
] = slot
;
473 vpn_array
[index
] = vpn
;
474 if (index
== PPC64_HUGE_HPTE_BATCH
- 1) {
476 * Now do a bluk invalidate
478 __pSeries_lpar_hugepage_invalidate(slot_array
,
480 PPC64_HUGE_HPTE_BATCH
,
487 __pSeries_lpar_hugepage_invalidate(slot_array
, vpn_array
,
488 index
, psize
, ssize
);
491 static void pSeries_lpar_hpte_removebolted(unsigned long ea
,
492 int psize
, int ssize
)
495 unsigned long slot
, vsid
;
497 vsid
= get_kernel_vsid(ea
, ssize
);
498 vpn
= hpt_vpn(ea
, vsid
, ssize
);
500 slot
= pSeries_lpar_hpte_find(vpn
, psize
, ssize
);
503 * lpar doesn't use the passed actual page size
505 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
, 0, ssize
, 0);
509 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
512 static void pSeries_lpar_flush_hash_range(unsigned long number
, int local
)
515 unsigned long i
, pix
, rc
;
516 unsigned long flags
= 0;
517 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
518 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
519 unsigned long param
[9];
520 unsigned long hash
, index
, shift
, hidx
, slot
;
525 spin_lock_irqsave(&pSeries_lpar_tlbie_lock
, flags
);
527 psize
= batch
->psize
;
528 ssize
= batch
->ssize
;
530 for (i
= 0; i
< number
; i
++) {
533 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
534 hash
= hpt_hash(vpn
, shift
, ssize
);
535 hidx
= __rpte_to_hidx(pte
, index
);
536 if (hidx
& _PTEIDX_SECONDARY
)
538 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
539 slot
+= hidx
& _PTEIDX_GROUP_IX
;
540 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
542 * lpar doesn't use the passed actual page size
544 pSeries_lpar_hpte_invalidate(slot
, vpn
, psize
,
547 param
[pix
] = HBR_REQUEST
| HBR_AVPN
| slot
;
548 param
[pix
+1] = hpte_encode_avpn(vpn
, psize
,
552 rc
= plpar_hcall9(H_BULK_REMOVE
, param
,
553 param
[0], param
[1], param
[2],
554 param
[3], param
[4], param
[5],
556 BUG_ON(rc
!= H_SUCCESS
);
560 } pte_iterate_hashed_end();
563 param
[pix
] = HBR_END
;
564 rc
= plpar_hcall9(H_BULK_REMOVE
, param
, param
[0], param
[1],
565 param
[2], param
[3], param
[4], param
[5],
567 BUG_ON(rc
!= H_SUCCESS
);
571 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock
, flags
);
574 static int __init
disable_bulk_remove(char *str
)
576 if (strcmp(str
, "off") == 0 &&
577 firmware_has_feature(FW_FEATURE_BULK_REMOVE
)) {
578 printk(KERN_INFO
"Disabling BULK_REMOVE firmware feature");
579 powerpc_firmware_features
&= ~FW_FEATURE_BULK_REMOVE
;
584 __setup("bulk_remove=", disable_bulk_remove
);
586 void __init
hpte_init_lpar(void)
588 ppc_md
.hpte_invalidate
= pSeries_lpar_hpte_invalidate
;
589 ppc_md
.hpte_updatepp
= pSeries_lpar_hpte_updatepp
;
590 ppc_md
.hpte_updateboltedpp
= pSeries_lpar_hpte_updateboltedpp
;
591 ppc_md
.hpte_insert
= pSeries_lpar_hpte_insert
;
592 ppc_md
.hpte_remove
= pSeries_lpar_hpte_remove
;
593 ppc_md
.hpte_removebolted
= pSeries_lpar_hpte_removebolted
;
594 ppc_md
.flush_hash_range
= pSeries_lpar_flush_hash_range
;
595 ppc_md
.hpte_clear_all
= pSeries_lpar_hptab_clear
;
596 ppc_md
.hugepage_invalidate
= pSeries_lpar_hugepage_invalidate
;
599 #ifdef CONFIG_PPC_SMLPAR
600 #define CMO_FREE_HINT_DEFAULT 1
601 static int cmo_free_hint_flag
= CMO_FREE_HINT_DEFAULT
;
603 static int __init
cmo_free_hint(char *str
)
606 parm
= strstrip(str
);
608 if (strcasecmp(parm
, "no") == 0 || strcasecmp(parm
, "off") == 0) {
609 printk(KERN_INFO
"cmo_free_hint: CMO free page hinting is not active.\n");
610 cmo_free_hint_flag
= 0;
614 cmo_free_hint_flag
= 1;
615 printk(KERN_INFO
"cmo_free_hint: CMO free page hinting is active.\n");
617 if (strcasecmp(parm
, "yes") == 0 || strcasecmp(parm
, "on") == 0)
623 __setup("cmo_free_hint=", cmo_free_hint
);
625 static void pSeries_set_page_state(struct page
*page
, int order
,
629 unsigned long cmo_page_sz
, addr
;
631 cmo_page_sz
= cmo_get_page_size();
632 addr
= __pa((unsigned long)page_address(page
));
634 for (i
= 0; i
< (1 << order
); i
++, addr
+= PAGE_SIZE
) {
635 for (j
= 0; j
< PAGE_SIZE
; j
+= cmo_page_sz
)
636 plpar_hcall_norets(H_PAGE_INIT
, state
, addr
+ j
, 0);
640 void arch_free_page(struct page
*page
, int order
)
642 if (!cmo_free_hint_flag
|| !firmware_has_feature(FW_FEATURE_CMO
))
645 pSeries_set_page_state(page
, order
, H_PAGE_SET_UNUSED
);
647 EXPORT_SYMBOL(arch_free_page
);
651 #ifdef CONFIG_TRACEPOINTS
653 * We optimise our hcall path by placing hcall_tracepoint_refcount
654 * directly in the TOC so we can check if the hcall tracepoints are
655 * enabled via a single load.
658 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
659 extern long hcall_tracepoint_refcount
;
662 * Since the tracing code might execute hcalls we need to guard against
663 * recursion. One example of this are spinlocks calling H_YIELD on
664 * shared processor partitions.
666 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth
);
668 void hcall_tracepoint_regfunc(void)
670 hcall_tracepoint_refcount
++;
673 void hcall_tracepoint_unregfunc(void)
675 hcall_tracepoint_refcount
--;
678 void __trace_hcall_entry(unsigned long opcode
, unsigned long *args
)
684 * We cannot call tracepoints inside RCU idle regions which
685 * means we must not trace H_CEDE.
687 if (opcode
== H_CEDE
)
690 local_irq_save(flags
);
692 depth
= &__get_cpu_var(hcall_trace_depth
);
699 trace_hcall_entry(opcode
, args
);
703 local_irq_restore(flags
);
706 void __trace_hcall_exit(long opcode
, unsigned long retval
,
707 unsigned long *retbuf
)
712 if (opcode
== H_CEDE
)
715 local_irq_save(flags
);
717 depth
= &__get_cpu_var(hcall_trace_depth
);
723 trace_hcall_exit(opcode
, retval
, retbuf
);
728 local_irq_restore(flags
);
734 * H_GET_MPP hcall returns info in 7 parms
736 int h_get_mpp(struct hvcall_mpp_data
*mpp_data
)
739 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
];
741 rc
= plpar_hcall9(H_GET_MPP
, retbuf
);
743 mpp_data
->entitled_mem
= retbuf
[0];
744 mpp_data
->mapped_mem
= retbuf
[1];
746 mpp_data
->group_num
= (retbuf
[2] >> 2 * 8) & 0xffff;
747 mpp_data
->pool_num
= retbuf
[2] & 0xffff;
749 mpp_data
->mem_weight
= (retbuf
[3] >> 7 * 8) & 0xff;
750 mpp_data
->unallocated_mem_weight
= (retbuf
[3] >> 6 * 8) & 0xff;
751 mpp_data
->unallocated_entitlement
= retbuf
[3] & 0xffffffffffffUL
;
753 mpp_data
->pool_size
= retbuf
[4];
754 mpp_data
->loan_request
= retbuf
[5];
755 mpp_data
->backing_mem
= retbuf
[6];
759 EXPORT_SYMBOL(h_get_mpp
);
761 int h_get_mpp_x(struct hvcall_mpp_x_data
*mpp_x_data
)
764 unsigned long retbuf
[PLPAR_HCALL9_BUFSIZE
] = { 0 };
766 rc
= plpar_hcall9(H_GET_MPP_X
, retbuf
);
768 mpp_x_data
->coalesced_bytes
= retbuf
[0];
769 mpp_x_data
->pool_coalesced_bytes
= retbuf
[1];
770 mpp_x_data
->pool_purr_cycles
= retbuf
[2];
771 mpp_x_data
->pool_spurr_cycles
= retbuf
[3];