2 * Copyright 2017, Nicholas Piggin, IBM Corporation
3 * Licensed under GPLv2.
6 #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/jump_label.h>
11 #include <linux/libfdt.h>
12 #include <linux/memblock.h>
13 #include <linux/printk.h>
14 #include <linux/sched.h>
15 #include <linux/string.h>
16 #include <linux/threads.h>
18 #include <asm/cputable.h>
19 #include <asm/dt_cpu_ftrs.h>
21 #include <asm/oprofile_impl.h>
23 #include <asm/setup.h>
26 /* Device-tree visible constants follow */
27 #define ISA_V2_07B 2070
28 #define ISA_V3_0B 3000
30 #define USABLE_PR (1U << 0)
31 #define USABLE_OS (1U << 1)
32 #define USABLE_HV (1U << 2)
34 #define HV_SUPPORT_HFSCR (1U << 0)
35 #define OS_SUPPORT_FSCR (1U << 0)
37 /* For parsing, we define all bits set as "NONE" case */
38 #define HV_SUPPORT_NONE 0xffffffffU
39 #define OS_SUPPORT_NONE 0xffffffffU
41 struct dt_cpu_feature
{
44 uint32_t usable_privilege
;
47 uint32_t hfscr_bit_nr
;
49 uint32_t hwcap_bit_nr
;
56 #define CPU_FTRS_BASE \
59 CPU_FTR_FPU_UNAVAILABLE |\
60 CPU_FTR_NODSISRALIGN |\
62 CPU_FTR_COHERENT_ICACHE | \
63 CPU_FTR_STCX_CHECKS_ADDRESS |\
64 CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
69 #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
71 #define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
72 PPC_FEATURE_ARCH_2_06 |\
73 PPC_FEATURE_ICACHE_SNOOP)
74 #define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
80 extern long __machine_check_early_realmode_p8(struct pt_regs
*regs
);
81 extern long __machine_check_early_realmode_p9(struct pt_regs
*regs
);
91 static void (*init_pmu_registers
)(void);
93 static void __restore_cpu_cpufeatures(void)
96 * LPCR is restored by the power on engine already. It can be changed
97 * after early init e.g., by radix enable, and we have no unified API
98 * for saving and restoring such SPRs.
100 * This ->restore hook should really be removed from idle and register
101 * restore moved directly into the idle restore code, because this code
102 * doesn't know how idle is implemented or what it needs restored here.
104 * The best we can do to accommodate secondary boot and idle restore
105 * for now is "or" LPCR with existing.
108 mtspr(SPRN_LPCR
, system_registers
.lpcr
| mfspr(SPRN_LPCR
));
111 mtspr(SPRN_HFSCR
, system_registers
.hfscr
);
113 mtspr(SPRN_FSCR
, system_registers
.fscr
);
115 if (init_pmu_registers
)
116 init_pmu_registers();
119 static char dt_cpu_name
[64];
121 static struct cpu_spec __initdata base_cpu_spec
= {
123 .cpu_features
= CPU_FTRS_BASE
,
124 .cpu_user_features
= COMMON_USER_BASE
,
125 .cpu_user_features2
= COMMON_USER2_BASE
,
127 .icache_bsize
= 32, /* minimum block size, fixed by */
128 .dcache_bsize
= 32, /* cache info init. */
130 .pmc_type
= PPC_PMC_DEFAULT
,
131 .oprofile_cpu_type
= NULL
,
132 .oprofile_type
= PPC_OPROFILE_INVALID
,
134 .cpu_restore
= __restore_cpu_cpufeatures
,
135 .machine_check_early
= NULL
,
139 static void __init
cpufeatures_setup_cpu(void)
141 set_cur_cpu_spec(&base_cpu_spec
);
143 cur_cpu_spec
->pvr_mask
= -1;
144 cur_cpu_spec
->pvr_value
= mfspr(SPRN_PVR
);
146 /* Initialize the base environment -- clear FSCR/HFSCR. */
147 hv_mode
= !!(mfmsr() & MSR_HV
);
149 /* CPU_FTR_HVMODE is used early in PACA setup */
150 cur_cpu_spec
->cpu_features
|= CPU_FTR_HVMODE
;
151 mtspr(SPRN_HFSCR
, 0);
156 * LPCR does not get cleared, to match behaviour with secondaries
157 * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
158 * could clear LPCR too.
162 static int __init
feat_try_enable_unknown(struct dt_cpu_feature
*f
)
164 if (f
->hv_support
== HV_SUPPORT_NONE
) {
165 } else if (f
->hv_support
& HV_SUPPORT_HFSCR
) {
166 u64 hfscr
= mfspr(SPRN_HFSCR
);
167 hfscr
|= 1UL << f
->hfscr_bit_nr
;
168 mtspr(SPRN_HFSCR
, hfscr
);
170 /* Does not have a known recipe */
174 if (f
->os_support
== OS_SUPPORT_NONE
) {
175 } else if (f
->os_support
& OS_SUPPORT_FSCR
) {
176 u64 fscr
= mfspr(SPRN_FSCR
);
177 fscr
|= 1UL << f
->fscr_bit_nr
;
178 mtspr(SPRN_FSCR
, fscr
);
180 /* Does not have a known recipe */
184 if ((f
->usable_privilege
& USABLE_PR
) && (f
->hwcap_bit_nr
!= -1)) {
185 uint32_t word
= f
->hwcap_bit_nr
/ 32;
186 uint32_t bit
= f
->hwcap_bit_nr
% 32;
189 cur_cpu_spec
->cpu_user_features
|= 1U << bit
;
191 cur_cpu_spec
->cpu_user_features2
|= 1U << bit
;
193 pr_err("%s could not advertise to user (no hwcap bits)\n", f
->name
);
199 static int __init
feat_enable(struct dt_cpu_feature
*f
)
201 if (f
->hv_support
!= HV_SUPPORT_NONE
) {
202 if (f
->hfscr_bit_nr
!= -1) {
203 u64 hfscr
= mfspr(SPRN_HFSCR
);
204 hfscr
|= 1UL << f
->hfscr_bit_nr
;
205 mtspr(SPRN_HFSCR
, hfscr
);
209 if (f
->os_support
!= OS_SUPPORT_NONE
) {
210 if (f
->fscr_bit_nr
!= -1) {
211 u64 fscr
= mfspr(SPRN_FSCR
);
212 fscr
|= 1UL << f
->fscr_bit_nr
;
213 mtspr(SPRN_FSCR
, fscr
);
217 if ((f
->usable_privilege
& USABLE_PR
) && (f
->hwcap_bit_nr
!= -1)) {
218 uint32_t word
= f
->hwcap_bit_nr
/ 32;
219 uint32_t bit
= f
->hwcap_bit_nr
% 32;
222 cur_cpu_spec
->cpu_user_features
|= 1U << bit
;
224 cur_cpu_spec
->cpu_user_features2
|= 1U << bit
;
226 pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f
->name
);
232 static int __init
feat_disable(struct dt_cpu_feature
*f
)
237 static int __init
feat_enable_hv(struct dt_cpu_feature
*f
)
242 pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
248 lpcr
= mfspr(SPRN_LPCR
);
249 lpcr
&= ~LPCR_LPES0
; /* HV external interrupts */
250 mtspr(SPRN_LPCR
, lpcr
);
252 cur_cpu_spec
->cpu_features
|= CPU_FTR_HVMODE
;
257 static int __init
feat_enable_le(struct dt_cpu_feature
*f
)
259 cur_cpu_spec
->cpu_user_features
|= PPC_FEATURE_TRUE_LE
;
263 static int __init
feat_enable_smt(struct dt_cpu_feature
*f
)
265 cur_cpu_spec
->cpu_features
|= CPU_FTR_SMT
;
266 cur_cpu_spec
->cpu_user_features
|= PPC_FEATURE_SMT
;
270 static int __init
feat_enable_idle_nap(struct dt_cpu_feature
*f
)
274 /* Set PECE wakeup modes for ISA 207 */
275 lpcr
= mfspr(SPRN_LPCR
);
279 mtspr(SPRN_LPCR
, lpcr
);
284 static int __init
feat_enable_align_dsisr(struct dt_cpu_feature
*f
)
286 cur_cpu_spec
->cpu_features
&= ~CPU_FTR_NODSISRALIGN
;
291 static int __init
feat_enable_idle_stop(struct dt_cpu_feature
*f
)
295 /* Set PECE wakeup modes for ISAv3.0B */
296 lpcr
= mfspr(SPRN_LPCR
);
300 mtspr(SPRN_LPCR
, lpcr
);
305 static int __init
feat_enable_mmu_hash(struct dt_cpu_feature
*f
)
309 lpcr
= mfspr(SPRN_LPCR
);
315 lpcr
|= 0x10UL
<< LPCR_VRMASD_SH
; /* L=1 LP=00 */
316 mtspr(SPRN_LPCR
, lpcr
);
318 cur_cpu_spec
->mmu_features
|= MMU_FTRS_HASH_BASE
;
319 cur_cpu_spec
->cpu_user_features
|= PPC_FEATURE_HAS_MMU
;
324 static int __init
feat_enable_mmu_hash_v3(struct dt_cpu_feature
*f
)
328 lpcr
= mfspr(SPRN_LPCR
);
330 mtspr(SPRN_LPCR
, lpcr
);
332 cur_cpu_spec
->mmu_features
|= MMU_FTRS_HASH_BASE
;
333 cur_cpu_spec
->cpu_user_features
|= PPC_FEATURE_HAS_MMU
;
339 static int __init
feat_enable_mmu_radix(struct dt_cpu_feature
*f
)
341 #ifdef CONFIG_PPC_RADIX_MMU
342 cur_cpu_spec
->mmu_features
|= MMU_FTR_TYPE_RADIX
;
343 cur_cpu_spec
->mmu_features
|= MMU_FTRS_HASH_BASE
;
344 cur_cpu_spec
->cpu_user_features
|= PPC_FEATURE_HAS_MMU
;
351 static int __init
feat_enable_dscr(struct dt_cpu_feature
*f
)
357 lpcr
= mfspr(SPRN_LPCR
);
359 lpcr
|= (4UL << LPCR_DPFD_SH
);
360 mtspr(SPRN_LPCR
, lpcr
);
365 static void hfscr_pmu_enable(void)
367 u64 hfscr
= mfspr(SPRN_HFSCR
);
368 hfscr
|= PPC_BIT(60);
369 mtspr(SPRN_HFSCR
, hfscr
);
372 static void init_pmu_power8(void)
375 mtspr(SPRN_MMCRC
, 0);
376 mtspr(SPRN_MMCRH
, 0);
379 mtspr(SPRN_MMCRA
, 0);
380 mtspr(SPRN_MMCR0
, 0);
381 mtspr(SPRN_MMCR1
, 0);
382 mtspr(SPRN_MMCR2
, 0);
383 mtspr(SPRN_MMCRS
, 0);
386 static int __init
feat_enable_mce_power8(struct dt_cpu_feature
*f
)
388 cur_cpu_spec
->platform
= "power8";
389 cur_cpu_spec
->machine_check_early
= __machine_check_early_realmode_p8
;
394 static int __init
feat_enable_pmu_power8(struct dt_cpu_feature
*f
)
399 init_pmu_registers
= init_pmu_power8
;
401 cur_cpu_spec
->cpu_features
|= CPU_FTR_MMCRA
;
402 cur_cpu_spec
->cpu_user_features
|= PPC_FEATURE_PSERIES_PERFMON_COMPAT
;
403 if (pvr_version_is(PVR_POWER8E
))
404 cur_cpu_spec
->cpu_features
|= CPU_FTR_PMAO_BUG
;
406 cur_cpu_spec
->num_pmcs
= 6;
407 cur_cpu_spec
->pmc_type
= PPC_PMC_IBM
;
408 cur_cpu_spec
->oprofile_cpu_type
= "ppc64/power8";
413 static void init_pmu_power9(void)
416 mtspr(SPRN_MMCRC
, 0);
418 mtspr(SPRN_MMCRA
, 0);
419 mtspr(SPRN_MMCR0
, 0);
420 mtspr(SPRN_MMCR1
, 0);
421 mtspr(SPRN_MMCR2
, 0);
424 static int __init
feat_enable_mce_power9(struct dt_cpu_feature
*f
)
426 cur_cpu_spec
->platform
= "power9";
427 cur_cpu_spec
->machine_check_early
= __machine_check_early_realmode_p9
;
432 static int __init
feat_enable_pmu_power9(struct dt_cpu_feature
*f
)
437 init_pmu_registers
= init_pmu_power9
;
439 cur_cpu_spec
->cpu_features
|= CPU_FTR_MMCRA
;
440 cur_cpu_spec
->cpu_user_features
|= PPC_FEATURE_PSERIES_PERFMON_COMPAT
;
442 cur_cpu_spec
->num_pmcs
= 6;
443 cur_cpu_spec
->pmc_type
= PPC_PMC_IBM
;
444 cur_cpu_spec
->oprofile_cpu_type
= "ppc64/power9";
449 static int __init
feat_enable_tm(struct dt_cpu_feature
*f
)
451 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
453 cur_cpu_spec
->cpu_user_features2
|= PPC_FEATURE2_HTM_NOSC
;
459 static int __init
feat_enable_fp(struct dt_cpu_feature
*f
)
462 cur_cpu_spec
->cpu_features
&= ~CPU_FTR_FPU_UNAVAILABLE
;
467 static int __init
feat_enable_vector(struct dt_cpu_feature
*f
)
469 #ifdef CONFIG_ALTIVEC
471 cur_cpu_spec
->cpu_features
|= CPU_FTR_ALTIVEC
;
472 cur_cpu_spec
->cpu_features
|= CPU_FTR_VMX_COPY
;
473 cur_cpu_spec
->cpu_user_features
|= PPC_FEATURE_HAS_ALTIVEC
;
480 static int __init
feat_enable_vsx(struct dt_cpu_feature
*f
)
484 cur_cpu_spec
->cpu_features
|= CPU_FTR_VSX
;
485 cur_cpu_spec
->cpu_user_features
|= PPC_FEATURE_HAS_VSX
;
492 static int __init
feat_enable_purr(struct dt_cpu_feature
*f
)
494 cur_cpu_spec
->cpu_features
|= CPU_FTR_PURR
| CPU_FTR_SPURR
;
499 static int __init
feat_enable_ebb(struct dt_cpu_feature
*f
)
502 * PPC_FEATURE2_EBB is enabled in PMU init code because it has
503 * historically been related to the PMU facility. This may have
504 * to be decoupled if EBB becomes more generic. For now, follow
505 * existing convention.
507 f
->hwcap_bit_nr
= -1;
513 static int __init
feat_enable_dbell(struct dt_cpu_feature
*f
)
517 /* P9 has an HFSCR for privileged state */
520 cur_cpu_spec
->cpu_features
|= CPU_FTR_DBELL
;
522 lpcr
= mfspr(SPRN_LPCR
);
523 lpcr
|= LPCR_PECEDH
; /* hyp doorbell wakeup */
524 mtspr(SPRN_LPCR
, lpcr
);
529 static int __init
feat_enable_hvi(struct dt_cpu_feature
*f
)
534 * POWER9 XIVE interrupts including in OPAL XICS compatibility
535 * are always delivered as hypervisor virtualization interrupts (HVI)
538 * However LPES0 is not set here, in the chance that an EE does get
539 * delivered to the host somehow, the EE handler would not expect it
540 * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
541 * happen if there is a bug in interrupt controller code, or IC is
542 * misconfigured in systemsim.
545 lpcr
= mfspr(SPRN_LPCR
);
546 lpcr
|= LPCR_HVICE
; /* enable hvi interrupts */
547 lpcr
|= LPCR_HEIC
; /* disable ee interrupts when MSR_HV */
548 lpcr
|= LPCR_PECE_HVEE
; /* hvi can wake from stop */
549 mtspr(SPRN_LPCR
, lpcr
);
554 static int __init
feat_enable_large_ci(struct dt_cpu_feature
*f
)
556 cur_cpu_spec
->mmu_features
|= MMU_FTR_CI_LARGE_PAGE
;
561 struct dt_cpu_feature_match
{
563 int (*enable
)(struct dt_cpu_feature
*f
);
564 u64 cpu_ftr_bit_mask
;
567 static struct dt_cpu_feature_match __initdata
568 dt_cpu_feature_match_table
[] = {
569 {"hypervisor", feat_enable_hv
, 0},
570 {"big-endian", feat_enable
, 0},
571 {"little-endian", feat_enable_le
, CPU_FTR_REAL_LE
},
572 {"smt", feat_enable_smt
, 0},
573 {"interrupt-facilities", feat_enable
, 0},
574 {"timer-facilities", feat_enable
, 0},
575 {"timer-facilities-v3", feat_enable
, 0},
576 {"debug-facilities", feat_enable
, 0},
577 {"come-from-address-register", feat_enable
, CPU_FTR_CFAR
},
578 {"branch-tracing", feat_enable
, 0},
579 {"floating-point", feat_enable_fp
, 0},
580 {"vector", feat_enable_vector
, 0},
581 {"vector-scalar", feat_enable_vsx
, 0},
582 {"vector-scalar-v3", feat_enable
, 0},
583 {"decimal-floating-point", feat_enable
, 0},
584 {"decimal-integer", feat_enable
, 0},
585 {"quadword-load-store", feat_enable
, 0},
586 {"vector-crypto", feat_enable
, 0},
587 {"mmu-hash", feat_enable_mmu_hash
, 0},
588 {"mmu-radix", feat_enable_mmu_radix
, 0},
589 {"mmu-hash-v3", feat_enable_mmu_hash_v3
, 0},
590 {"virtual-page-class-key-protection", feat_enable
, 0},
591 {"transactional-memory", feat_enable_tm
, CPU_FTR_TM
},
592 {"transactional-memory-v3", feat_enable_tm
, 0},
593 {"idle-nap", feat_enable_idle_nap
, 0},
594 {"alignment-interrupt-dsisr", feat_enable_align_dsisr
, 0},
595 {"idle-stop", feat_enable_idle_stop
, 0},
596 {"machine-check-power8", feat_enable_mce_power8
, 0},
597 {"performance-monitor-power8", feat_enable_pmu_power8
, 0},
598 {"data-stream-control-register", feat_enable_dscr
, CPU_FTR_DSCR
},
599 {"event-based-branch", feat_enable_ebb
, 0},
600 {"target-address-register", feat_enable
, 0},
601 {"branch-history-rolling-buffer", feat_enable
, 0},
602 {"control-register", feat_enable
, CPU_FTR_CTRL
},
603 {"processor-control-facility", feat_enable_dbell
, CPU_FTR_DBELL
},
604 {"processor-control-facility-v3", feat_enable_dbell
, CPU_FTR_DBELL
},
605 {"processor-utilization-of-resources-register", feat_enable_purr
, 0},
606 {"no-execute", feat_enable
, 0},
607 {"strong-access-ordering", feat_enable
, CPU_FTR_SAO
},
608 {"cache-inhibited-large-page", feat_enable_large_ci
, 0},
609 {"coprocessor-icswx", feat_enable
, 0},
610 {"hypervisor-virtualization-interrupt", feat_enable_hvi
, 0},
611 {"program-priority-register", feat_enable
, CPU_FTR_HAS_PPR
},
612 {"wait", feat_enable
, 0},
613 {"atomic-memory-operations", feat_enable
, 0},
614 {"branch-v3", feat_enable
, 0},
615 {"copy-paste", feat_enable
, 0},
616 {"decimal-floating-point-v3", feat_enable
, 0},
617 {"decimal-integer-v3", feat_enable
, 0},
618 {"fixed-point-v3", feat_enable
, 0},
619 {"floating-point-v3", feat_enable
, 0},
620 {"group-start-register", feat_enable
, 0},
621 {"pc-relative-addressing", feat_enable
, 0},
622 {"machine-check-power9", feat_enable_mce_power9
, 0},
623 {"performance-monitor-power9", feat_enable_pmu_power9
, 0},
624 {"event-based-branch-v3", feat_enable
, 0},
625 {"random-number-generator", feat_enable
, 0},
626 {"system-call-vectored", feat_disable
, 0},
627 {"trace-interrupt-v3", feat_enable
, 0},
628 {"vector-v3", feat_enable
, 0},
629 {"vector-binary128", feat_enable
, 0},
630 {"vector-binary16", feat_enable
, 0},
631 {"wait-v3", feat_enable
, 0},
634 static bool __initdata using_dt_cpu_ftrs
;
635 static bool __initdata enable_unknown
= true;
637 static int __init
dt_cpu_ftrs_parse(char *str
)
642 if (!strcmp(str
, "off"))
643 using_dt_cpu_ftrs
= false;
644 else if (!strcmp(str
, "known"))
645 enable_unknown
= false;
651 early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse
);
653 static void __init
cpufeatures_setup_start(u32 isa
)
655 pr_info("setup for ISA %d\n", isa
);
658 cur_cpu_spec
->cpu_features
|= CPU_FTR_ARCH_300
;
659 cur_cpu_spec
->cpu_user_features2
|= PPC_FEATURE2_ARCH_3_00
;
663 static bool __init
cpufeatures_process_feature(struct dt_cpu_feature
*f
)
665 const struct dt_cpu_feature_match
*m
;
669 for (i
= 0; i
< ARRAY_SIZE(dt_cpu_feature_match_table
); i
++) {
670 m
= &dt_cpu_feature_match_table
[i
];
671 if (!strcmp(f
->name
, m
->name
)) {
676 pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
682 if (!known
&& enable_unknown
) {
683 if (!feat_try_enable_unknown(f
)) {
684 pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
690 if (m
->cpu_ftr_bit_mask
)
691 cur_cpu_spec
->cpu_features
|= m
->cpu_ftr_bit_mask
;
694 pr_debug("enabling: %s\n", f
->name
);
696 pr_debug("enabling: %s (unknown)\n", f
->name
);
701 static __init
void cpufeatures_cpu_quirks(void)
703 int version
= mfspr(SPRN_PVR
);
706 * Not all quirks can be derived from the cpufeatures device tree.
708 if ((version
& 0xffffff00) == 0x004e0100)
709 cur_cpu_spec
->cpu_features
|= CPU_FTR_POWER9_DD1
;
710 else if ((version
& 0xffffefff) == 0x004e0201)
711 cur_cpu_spec
->cpu_features
|= CPU_FTR_POWER9_DD2_1
;
714 static void __init
cpufeatures_setup_finished(void)
716 cpufeatures_cpu_quirks();
718 if (hv_mode
&& !(cur_cpu_spec
->cpu_features
& CPU_FTR_HVMODE
)) {
719 pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
720 cur_cpu_spec
->cpu_features
|= CPU_FTR_HVMODE
;
723 system_registers
.lpcr
= mfspr(SPRN_LPCR
);
724 system_registers
.hfscr
= mfspr(SPRN_HFSCR
);
725 system_registers
.fscr
= mfspr(SPRN_FSCR
);
727 pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
728 cur_cpu_spec
->cpu_features
, cur_cpu_spec
->mmu_features
);
731 static int __init
disabled_on_cmdline(void)
733 unsigned long root
, chosen
;
736 root
= of_get_flat_dt_root();
737 chosen
= of_get_flat_dt_subnode_by_name(root
, "chosen");
738 if (chosen
== -FDT_ERR_NOTFOUND
)
741 p
= of_get_flat_dt_prop(chosen
, "bootargs", NULL
);
745 if (strstr(p
, "dt_cpu_ftrs=off"))
751 static int __init
fdt_find_cpu_features(unsigned long node
, const char *uname
,
752 int depth
, void *data
)
754 if (of_flat_dt_is_compatible(node
, "ibm,powerpc-cpu-features")
755 && of_get_flat_dt_prop(node
, "isa", NULL
))
761 bool __init
dt_cpu_ftrs_in_use(void)
763 return using_dt_cpu_ftrs
;
766 bool __init
dt_cpu_ftrs_init(void *fdt
)
768 using_dt_cpu_ftrs
= false;
770 /* Setup and verify the FDT, if it fails we just bail */
771 if (!early_init_dt_verify(fdt
))
774 if (!of_scan_flat_dt(fdt_find_cpu_features
, NULL
))
777 if (disabled_on_cmdline())
780 cpufeatures_setup_cpu();
782 using_dt_cpu_ftrs
= true;
786 static int nr_dt_cpu_features
;
787 static struct dt_cpu_feature
*dt_cpu_features
;
789 static int __init
process_cpufeatures_node(unsigned long node
,
790 const char *uname
, int i
)
793 struct dt_cpu_feature
*f
;
796 f
= &dt_cpu_features
[i
];
797 memset(f
, 0, sizeof(struct dt_cpu_feature
));
803 prop
= of_get_flat_dt_prop(node
, "isa", &len
);
805 pr_warn("%s: missing isa property\n", uname
);
808 f
->isa
= be32_to_cpup(prop
);
810 prop
= of_get_flat_dt_prop(node
, "usable-privilege", &len
);
812 pr_warn("%s: missing usable-privilege property", uname
);
815 f
->usable_privilege
= be32_to_cpup(prop
);
817 prop
= of_get_flat_dt_prop(node
, "hv-support", &len
);
819 f
->hv_support
= be32_to_cpup(prop
);
821 f
->hv_support
= HV_SUPPORT_NONE
;
823 prop
= of_get_flat_dt_prop(node
, "os-support", &len
);
825 f
->os_support
= be32_to_cpup(prop
);
827 f
->os_support
= OS_SUPPORT_NONE
;
829 prop
= of_get_flat_dt_prop(node
, "hfscr-bit-nr", &len
);
831 f
->hfscr_bit_nr
= be32_to_cpup(prop
);
833 f
->hfscr_bit_nr
= -1;
834 prop
= of_get_flat_dt_prop(node
, "fscr-bit-nr", &len
);
836 f
->fscr_bit_nr
= be32_to_cpup(prop
);
839 prop
= of_get_flat_dt_prop(node
, "hwcap-bit-nr", &len
);
841 f
->hwcap_bit_nr
= be32_to_cpup(prop
);
843 f
->hwcap_bit_nr
= -1;
845 if (f
->usable_privilege
& USABLE_HV
) {
846 if (!(mfmsr() & MSR_HV
)) {
847 pr_warn("%s: HV feature passed to guest\n", uname
);
851 if (f
->hv_support
== HV_SUPPORT_NONE
&& f
->hfscr_bit_nr
!= -1) {
852 pr_warn("%s: unwanted hfscr_bit_nr\n", uname
);
856 if (f
->hv_support
== HV_SUPPORT_HFSCR
) {
857 if (f
->hfscr_bit_nr
== -1) {
858 pr_warn("%s: missing hfscr_bit_nr\n", uname
);
863 if (f
->hv_support
!= HV_SUPPORT_NONE
|| f
->hfscr_bit_nr
!= -1) {
864 pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname
);
869 if (f
->usable_privilege
& USABLE_OS
) {
870 if (f
->os_support
== OS_SUPPORT_NONE
&& f
->fscr_bit_nr
!= -1) {
871 pr_warn("%s: unwanted fscr_bit_nr\n", uname
);
875 if (f
->os_support
== OS_SUPPORT_FSCR
) {
876 if (f
->fscr_bit_nr
== -1) {
877 pr_warn("%s: missing fscr_bit_nr\n", uname
);
882 if (f
->os_support
!= OS_SUPPORT_NONE
|| f
->fscr_bit_nr
!= -1) {
883 pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname
);
888 if (!(f
->usable_privilege
& USABLE_PR
)) {
889 if (f
->hwcap_bit_nr
!= -1) {
890 pr_warn("%s: unwanted hwcap_bit_nr\n", uname
);
895 /* Do all the independent features in the first pass */
896 if (!of_get_flat_dt_prop(node
, "dependencies", &len
)) {
897 if (cpufeatures_process_feature(f
))
906 static void __init
cpufeatures_deps_enable(struct dt_cpu_feature
*f
)
913 if (f
->enabled
|| f
->disabled
)
916 prop
= of_get_flat_dt_prop(f
->node
, "dependencies", &len
);
918 pr_warn("%s: missing dependencies property", f
->name
);
922 nr_deps
= len
/ sizeof(int);
924 for (i
= 0; i
< nr_deps
; i
++) {
925 unsigned long phandle
= be32_to_cpu(prop
[i
]);
928 for (j
= 0; j
< nr_dt_cpu_features
; j
++) {
929 struct dt_cpu_feature
*d
= &dt_cpu_features
[j
];
931 if (of_get_flat_dt_phandle(d
->node
) == phandle
) {
932 cpufeatures_deps_enable(d
);
941 if (cpufeatures_process_feature(f
))
947 static int __init
scan_cpufeatures_subnodes(unsigned long node
,
953 process_cpufeatures_node(node
, uname
, *count
);
960 static int __init
count_cpufeatures_subnodes(unsigned long node
,
971 static int __init
dt_cpu_ftrs_scan_callback(unsigned long node
, const char
972 *uname
, int depth
, void *data
)
978 /* We are scanning "ibm,powerpc-cpu-features" nodes only */
979 if (!of_flat_dt_is_compatible(node
, "ibm,powerpc-cpu-features"))
982 prop
= of_get_flat_dt_prop(node
, "isa", NULL
);
984 /* We checked before, "can't happen" */
987 isa
= be32_to_cpup(prop
);
989 /* Count and allocate space for cpu features */
990 of_scan_flat_dt_subnodes(node
, count_cpufeatures_subnodes
,
991 &nr_dt_cpu_features
);
992 dt_cpu_features
= __va(
993 memblock_alloc(sizeof(struct dt_cpu_feature
)*
994 nr_dt_cpu_features
, PAGE_SIZE
));
996 cpufeatures_setup_start(isa
);
998 /* Scan nodes into dt_cpu_features and enable those without deps */
1000 of_scan_flat_dt_subnodes(node
, scan_cpufeatures_subnodes
, &count
);
1002 /* Recursive enable remaining features with dependencies */
1003 for (i
= 0; i
< nr_dt_cpu_features
; i
++) {
1004 struct dt_cpu_feature
*f
= &dt_cpu_features
[i
];
1006 cpufeatures_deps_enable(f
);
1009 prop
= of_get_flat_dt_prop(node
, "display-name", NULL
);
1010 if (prop
&& strlen((char *)prop
) != 0) {
1011 strlcpy(dt_cpu_name
, (char *)prop
, sizeof(dt_cpu_name
));
1012 cur_cpu_spec
->cpu_name
= dt_cpu_name
;
1015 cpufeatures_setup_finished();
1017 memblock_free(__pa(dt_cpu_features
),
1018 sizeof(struct dt_cpu_feature
)*nr_dt_cpu_features
);
1023 void __init
dt_cpu_ftrs_scan(void)
1025 if (!using_dt_cpu_ftrs
)
1028 of_scan_flat_dt(dt_cpu_ftrs_scan_callback
, NULL
);