2 * Routines to indentify additional cpu features that are scattered in
8 #include <asm/processor.h>
26 void __cpuinit
init_scattered_cpuid_features(struct cpuinfo_x86
*c
)
30 const struct cpuid_bit
*cb
;
32 static const struct cpuid_bit __cpuinitconst cpuid_bits
[] = {
33 { X86_FEATURE_IDA
, CR_EAX
, 1, 0x00000006 },
34 { X86_FEATURE_ARAT
, CR_EAX
, 2, 0x00000006 },
35 { X86_FEATURE_NPT
, CR_EDX
, 0, 0x8000000a },
36 { X86_FEATURE_LBRV
, CR_EDX
, 1, 0x8000000a },
37 { X86_FEATURE_SVML
, CR_EDX
, 2, 0x8000000a },
38 { X86_FEATURE_NRIPS
, CR_EDX
, 3, 0x8000000a },
42 for (cb
= cpuid_bits
; cb
->feature
; cb
++) {
44 /* Verify that the level is valid */
45 max_level
= cpuid_eax(cb
->level
& 0xffff0000);
46 if (max_level
< cb
->level
||
47 max_level
> (cb
->level
| 0xffff))
50 cpuid(cb
->level
, ®s
[CR_EAX
], ®s
[CR_EBX
],
51 ®s
[CR_ECX
], ®s
[CR_EDX
]);
53 if (regs
[cb
->reg
] & (1 << cb
->bit
))
54 set_cpu_cap(c
, cb
->feature
);
58 /* leaf 0xb SMT level */
61 /* leaf 0xb sub-leaf types */
62 #define INVALID_TYPE 0
66 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
67 #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
68 #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
71 * Check for extended topology enumeration cpuid leaf 0xb and if it
72 * exists, use it for populating initial_apicid and cpu topology
75 void __cpuinit
detect_extended_topology(struct cpuinfo_x86
*c
)
78 unsigned int eax
, ebx
, ecx
, edx
, sub_index
;
79 unsigned int ht_mask_width
, core_plus_mask_width
;
80 unsigned int core_select_mask
, core_level_siblings
;
83 if (c
->cpuid_level
< 0xb)
86 cpuid_count(0xb, SMT_LEVEL
, &eax
, &ebx
, &ecx
, &edx
);
89 * check if the cpuid leaf 0xb is actually implemented.
91 if (ebx
== 0 || (LEAFB_SUBTYPE(ecx
) != SMT_TYPE
))
94 set_cpu_cap(c
, X86_FEATURE_XTOPOLOGY
);
97 * initial apic id, which also represents 32-bit extended x2apic id.
99 c
->initial_apicid
= edx
;
102 * Populate HT related information from sub-leaf level 0.
104 core_level_siblings
= smp_num_siblings
= LEVEL_MAX_SIBLINGS(ebx
);
105 core_plus_mask_width
= ht_mask_width
= BITS_SHIFT_NEXT_LEVEL(eax
);
109 cpuid_count(0xb, sub_index
, &eax
, &ebx
, &ecx
, &edx
);
112 * Check for the Core type in the implemented sub leaves.
114 if (LEAFB_SUBTYPE(ecx
) == CORE_TYPE
) {
115 core_level_siblings
= LEVEL_MAX_SIBLINGS(ebx
);
116 core_plus_mask_width
= BITS_SHIFT_NEXT_LEVEL(eax
);
121 } while (LEAFB_SUBTYPE(ecx
) != INVALID_TYPE
);
123 core_select_mask
= (~(-1 << core_plus_mask_width
)) >> ht_mask_width
;
125 c
->cpu_core_id
= apic
->phys_pkg_id(c
->initial_apicid
, ht_mask_width
)
127 c
->phys_proc_id
= apic
->phys_pkg_id(c
->initial_apicid
, core_plus_mask_width
);
129 * Reinit the apicid, now that we have extended initial_apicid.
131 c
->apicid
= apic
->phys_pkg_id(c
->initial_apicid
, 0);
133 c
->x86_max_cores
= (core_level_siblings
/ smp_num_siblings
);
136 printk(KERN_INFO
"CPU: Physical Processor ID: %d\n",
138 if (c
->x86_max_cores
> 1)
139 printk(KERN_INFO
"CPU: Processor Core ID: %d\n",