2 * Resource Director Technology(RDT)
3 * - Cache Allocation code.
5 * Copyright (C) 2016 Intel Corporation
8 * Fenghua Yu <fenghua.yu@intel.com>
9 * Tony Luck <tony.luck@intel.com>
10 * Vikas Shivappa <vikas.shivappa@intel.com>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * More information about RDT be found in the Intel (R) x86 Architecture
22 * Software Developer Manual June 2016, volume 3, section 17.17.
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/slab.h>
28 #include <linux/err.h>
29 #include <linux/cacheinfo.h>
30 #include <linux/cpuhotplug.h>
32 #include <asm/intel-family.h>
33 #include <asm/intel_rdt_sched.h>
34 #include "intel_rdt.h"
36 #define MBA_IS_LINEAR 0x4
37 #define MBA_MAX_MBPS U32_MAX
39 /* Mutex to protect rdtgroup access. */
40 DEFINE_MUTEX(rdtgroup_mutex
);
43 * The cached intel_pqr_state is strictly per CPU and can never be
44 * updated from a remote CPU. Functions which modify the state
45 * are called with interrupts disabled and no preemption, which
46 * is sufficient for the protection.
48 DEFINE_PER_CPU(struct intel_pqr_state
, pqr_state
);
51 * Used to store the max resource name width and max resource data width
52 * to display the schemata in a tabular format
54 int max_name_width
, max_data_width
;
57 * Global boolean for rdt_alloc which is true if any
58 * resource allocation is enabled.
60 bool rdt_alloc_capable
;
63 mba_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
);
65 cat_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
);
67 #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
69 struct rdt_resource rdt_resources_all
[] = {
72 .rid
= RDT_RESOURCE_L3
,
74 .domains
= domain_init(RDT_RESOURCE_L3
),
75 .msr_base
= IA32_L3_CBM_BASE
,
76 .msr_update
= cat_wrmsr
,
83 .parse_ctrlval
= parse_cbm
,
84 .format_str
= "%d=%0*x",
85 .fflags
= RFTYPE_RES_CACHE
,
87 [RDT_RESOURCE_L3DATA
] =
89 .rid
= RDT_RESOURCE_L3DATA
,
91 .domains
= domain_init(RDT_RESOURCE_L3DATA
),
92 .msr_base
= IA32_L3_CBM_BASE
,
93 .msr_update
= cat_wrmsr
,
100 .parse_ctrlval
= parse_cbm
,
101 .format_str
= "%d=%0*x",
102 .fflags
= RFTYPE_RES_CACHE
,
104 [RDT_RESOURCE_L3CODE
] =
106 .rid
= RDT_RESOURCE_L3CODE
,
108 .domains
= domain_init(RDT_RESOURCE_L3CODE
),
109 .msr_base
= IA32_L3_CBM_BASE
,
110 .msr_update
= cat_wrmsr
,
117 .parse_ctrlval
= parse_cbm
,
118 .format_str
= "%d=%0*x",
119 .fflags
= RFTYPE_RES_CACHE
,
123 .rid
= RDT_RESOURCE_L2
,
125 .domains
= domain_init(RDT_RESOURCE_L2
),
126 .msr_base
= IA32_L2_CBM_BASE
,
127 .msr_update
= cat_wrmsr
,
134 .parse_ctrlval
= parse_cbm
,
135 .format_str
= "%d=%0*x",
136 .fflags
= RFTYPE_RES_CACHE
,
138 [RDT_RESOURCE_L2DATA
] =
140 .rid
= RDT_RESOURCE_L2DATA
,
142 .domains
= domain_init(RDT_RESOURCE_L2DATA
),
143 .msr_base
= IA32_L2_CBM_BASE
,
144 .msr_update
= cat_wrmsr
,
151 .parse_ctrlval
= parse_cbm
,
152 .format_str
= "%d=%0*x",
153 .fflags
= RFTYPE_RES_CACHE
,
155 [RDT_RESOURCE_L2CODE
] =
157 .rid
= RDT_RESOURCE_L2CODE
,
159 .domains
= domain_init(RDT_RESOURCE_L2CODE
),
160 .msr_base
= IA32_L2_CBM_BASE
,
161 .msr_update
= cat_wrmsr
,
168 .parse_ctrlval
= parse_cbm
,
169 .format_str
= "%d=%0*x",
170 .fflags
= RFTYPE_RES_CACHE
,
174 .rid
= RDT_RESOURCE_MBA
,
176 .domains
= domain_init(RDT_RESOURCE_MBA
),
177 .msr_base
= IA32_MBA_THRTL_BASE
,
178 .msr_update
= mba_wrmsr
,
180 .parse_ctrlval
= parse_bw
,
181 .format_str
= "%d=%*u",
182 .fflags
= RFTYPE_RES_MB
,
186 static unsigned int cbm_idx(struct rdt_resource
*r
, unsigned int closid
)
188 return closid
* r
->cache
.cbm_idx_mult
+ r
->cache
.cbm_idx_offset
;
192 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
193 * as they do not have CPUID enumeration support for Cache allocation.
194 * The check for Vendor/Family/Model is not enough to guarantee that
195 * the MSRs won't #GP fault because only the following SKUs support
197 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
198 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
199 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
200 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
201 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
202 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
204 * Probe by trying to write the first of the L3 cach mask registers
205 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
206 * is always 20 on hsw server parts. The minimum cache bitmask length
207 * allowed for HSW server is always 2 bits. Hardcode all of them.
209 static inline void cache_alloc_hsw_probe(void)
211 struct rdt_resource
*r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
212 u32 l
, h
, max_cbm
= BIT_MASK(20) - 1;
214 if (wrmsr_safe(IA32_L3_CBM_BASE
, max_cbm
, 0))
216 rdmsr(IA32_L3_CBM_BASE
, l
, h
);
218 /* If all the bits were set in MSR, return success */
223 r
->default_ctrl
= max_cbm
;
224 r
->cache
.cbm_len
= 20;
225 r
->cache
.shareable_bits
= 0xc0000;
226 r
->cache
.min_cbm_bits
= 2;
227 r
->alloc_capable
= true;
228 r
->alloc_enabled
= true;
230 rdt_alloc_capable
= true;
233 bool is_mba_sc(struct rdt_resource
*r
)
236 return rdt_resources_all
[RDT_RESOURCE_MBA
].membw
.mba_sc
;
238 return r
->membw
.mba_sc
;
242 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
243 * exposed to user interface and the h/w understandable delay values.
245 * The non-linear delay values have the granularity of power of two
246 * and also the h/w does not guarantee a curve for configured delay
247 * values vs. actual b/w enforced.
248 * Hence we need a mapping that is pre calibrated so the user can
249 * express the memory b/w as a percentage value.
251 static inline bool rdt_get_mb_table(struct rdt_resource
*r
)
254 * There are no Intel SKUs as of now to support non-linear delay.
256 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
257 boot_cpu_data
.x86
, boot_cpu_data
.x86_model
);
262 static bool rdt_get_mem_config(struct rdt_resource
*r
)
264 union cpuid_0x10_3_eax eax
;
265 union cpuid_0x10_x_edx edx
;
268 cpuid_count(0x00000010, 3, &eax
.full
, &ebx
, &ecx
, &edx
.full
);
269 r
->num_closid
= edx
.split
.cos_max
+ 1;
270 r
->membw
.max_delay
= eax
.split
.max_delay
+ 1;
271 r
->default_ctrl
= MAX_MBA_BW
;
272 if (ecx
& MBA_IS_LINEAR
) {
273 r
->membw
.delay_linear
= true;
274 r
->membw
.min_bw
= MAX_MBA_BW
- r
->membw
.max_delay
;
275 r
->membw
.bw_gran
= MAX_MBA_BW
- r
->membw
.max_delay
;
277 if (!rdt_get_mb_table(r
))
282 r
->alloc_capable
= true;
283 r
->alloc_enabled
= true;
288 static void rdt_get_cache_alloc_cfg(int idx
, struct rdt_resource
*r
)
290 union cpuid_0x10_1_eax eax
;
291 union cpuid_0x10_x_edx edx
;
294 cpuid_count(0x00000010, idx
, &eax
.full
, &ebx
, &ecx
, &edx
.full
);
295 r
->num_closid
= edx
.split
.cos_max
+ 1;
296 r
->cache
.cbm_len
= eax
.split
.cbm_len
+ 1;
297 r
->default_ctrl
= BIT_MASK(eax
.split
.cbm_len
+ 1) - 1;
298 r
->cache
.shareable_bits
= ebx
& r
->default_ctrl
;
299 r
->data_width
= (r
->cache
.cbm_len
+ 3) / 4;
300 r
->alloc_capable
= true;
301 r
->alloc_enabled
= true;
304 static void rdt_get_cdp_config(int level
, int type
)
306 struct rdt_resource
*r_l
= &rdt_resources_all
[level
];
307 struct rdt_resource
*r
= &rdt_resources_all
[type
];
309 r
->num_closid
= r_l
->num_closid
/ 2;
310 r
->cache
.cbm_len
= r_l
->cache
.cbm_len
;
311 r
->default_ctrl
= r_l
->default_ctrl
;
312 r
->cache
.shareable_bits
= r_l
->cache
.shareable_bits
;
313 r
->data_width
= (r
->cache
.cbm_len
+ 3) / 4;
314 r
->alloc_capable
= true;
316 * By default, CDP is disabled. CDP can be enabled by mount parameter
317 * "cdp" during resctrl file system mount time.
319 r
->alloc_enabled
= false;
322 static void rdt_get_cdp_l3_config(void)
324 rdt_get_cdp_config(RDT_RESOURCE_L3
, RDT_RESOURCE_L3DATA
);
325 rdt_get_cdp_config(RDT_RESOURCE_L3
, RDT_RESOURCE_L3CODE
);
328 static void rdt_get_cdp_l2_config(void)
330 rdt_get_cdp_config(RDT_RESOURCE_L2
, RDT_RESOURCE_L2DATA
);
331 rdt_get_cdp_config(RDT_RESOURCE_L2
, RDT_RESOURCE_L2CODE
);
334 static int get_cache_id(int cpu
, int level
)
336 struct cpu_cacheinfo
*ci
= get_cpu_cacheinfo(cpu
);
339 for (i
= 0; i
< ci
->num_leaves
; i
++) {
340 if (ci
->info_list
[i
].level
== level
)
341 return ci
->info_list
[i
].id
;
348 * Map the memory b/w percentage value to delay values
349 * that can be written to QOS_MSRs.
350 * There are currently no SKUs which support non linear delay values.
352 u32
delay_bw_map(unsigned long bw
, struct rdt_resource
*r
)
354 if (r
->membw
.delay_linear
)
355 return MAX_MBA_BW
- bw
;
357 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
358 return r
->default_ctrl
;
362 mba_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
)
366 /* Write the delay values for mba. */
367 for (i
= m
->low
; i
< m
->high
; i
++)
368 wrmsrl(r
->msr_base
+ i
, delay_bw_map(d
->ctrl_val
[i
], r
));
372 cat_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
)
376 for (i
= m
->low
; i
< m
->high
; i
++)
377 wrmsrl(r
->msr_base
+ cbm_idx(r
, i
), d
->ctrl_val
[i
]);
380 struct rdt_domain
*get_domain_from_cpu(int cpu
, struct rdt_resource
*r
)
382 struct rdt_domain
*d
;
384 list_for_each_entry(d
, &r
->domains
, list
) {
385 /* Find the domain that contains this CPU */
386 if (cpumask_test_cpu(cpu
, &d
->cpu_mask
))
393 void rdt_ctrl_update(void *arg
)
395 struct msr_param
*m
= arg
;
396 struct rdt_resource
*r
= m
->res
;
397 int cpu
= smp_processor_id();
398 struct rdt_domain
*d
;
400 d
= get_domain_from_cpu(cpu
, r
);
402 r
->msr_update(d
, m
, r
);
405 pr_warn_once("cpu %d not found in any domain for resource %s\n",
410 * rdt_find_domain - Find a domain in a resource that matches input resource id
412 * Search resource r's domain list to find the resource id. If the resource
413 * id is found in a domain, return the domain. Otherwise, if requested by
414 * caller, return the first domain whose id is bigger than the input id.
415 * The domain list is sorted by id in ascending order.
417 struct rdt_domain
*rdt_find_domain(struct rdt_resource
*r
, int id
,
418 struct list_head
**pos
)
420 struct rdt_domain
*d
;
426 list_for_each(l
, &r
->domains
) {
427 d
= list_entry(l
, struct rdt_domain
, list
);
428 /* When id is found, return its domain. */
431 /* Stop searching when finding id's position in sorted list. */
442 void setup_default_ctrlval(struct rdt_resource
*r
, u32
*dc
, u32
*dm
)
447 * Initialize the Control MSRs to having no control.
448 * For Cache Allocation: Set all bits in cbm
449 * For Memory Allocation: Set b/w requested to 100%
450 * and the bandwidth in MBps to U32_MAX
452 for (i
= 0; i
< r
->num_closid
; i
++, dc
++, dm
++) {
453 *dc
= r
->default_ctrl
;
458 static int domain_setup_ctrlval(struct rdt_resource
*r
, struct rdt_domain
*d
)
463 dc
= kmalloc_array(r
->num_closid
, sizeof(*d
->ctrl_val
), GFP_KERNEL
);
467 dm
= kmalloc_array(r
->num_closid
, sizeof(*d
->mbps_val
), GFP_KERNEL
);
475 setup_default_ctrlval(r
, dc
, dm
);
478 m
.high
= r
->num_closid
;
479 r
->msr_update(d
, &m
, r
);
483 static int domain_setup_mon_state(struct rdt_resource
*r
, struct rdt_domain
*d
)
487 if (is_llc_occupancy_enabled()) {
488 d
->rmid_busy_llc
= bitmap_zalloc(r
->num_rmid
, GFP_KERNEL
);
489 if (!d
->rmid_busy_llc
)
491 INIT_DELAYED_WORK(&d
->cqm_limbo
, cqm_handle_limbo
);
493 if (is_mbm_total_enabled()) {
494 tsize
= sizeof(*d
->mbm_total
);
495 d
->mbm_total
= kcalloc(r
->num_rmid
, tsize
, GFP_KERNEL
);
497 bitmap_free(d
->rmid_busy_llc
);
501 if (is_mbm_local_enabled()) {
502 tsize
= sizeof(*d
->mbm_local
);
503 d
->mbm_local
= kcalloc(r
->num_rmid
, tsize
, GFP_KERNEL
);
505 bitmap_free(d
->rmid_busy_llc
);
511 if (is_mbm_enabled()) {
512 INIT_DELAYED_WORK(&d
->mbm_over
, mbm_handle_overflow
);
513 mbm_setup_overflow_handler(d
, MBM_OVERFLOW_INTERVAL
);
520 * domain_add_cpu - Add a cpu to a resource's domain list.
522 * If an existing domain in the resource r's domain list matches the cpu's
523 * resource id, add the cpu in the domain.
525 * Otherwise, a new domain is allocated and inserted into the right position
526 * in the domain list sorted by id in ascending order.
528 * The order in the domain list is visible to users when we print entries
529 * in the schemata file and schemata input is validated to have the same order
532 static void domain_add_cpu(int cpu
, struct rdt_resource
*r
)
534 int id
= get_cache_id(cpu
, r
->cache_level
);
535 struct list_head
*add_pos
= NULL
;
536 struct rdt_domain
*d
;
538 d
= rdt_find_domain(r
, id
, &add_pos
);
540 pr_warn("Could't find cache id for cpu %d\n", cpu
);
545 cpumask_set_cpu(cpu
, &d
->cpu_mask
);
549 d
= kzalloc_node(sizeof(*d
), GFP_KERNEL
, cpu_to_node(cpu
));
554 cpumask_set_cpu(cpu
, &d
->cpu_mask
);
556 if (r
->alloc_capable
&& domain_setup_ctrlval(r
, d
)) {
561 if (r
->mon_capable
&& domain_setup_mon_state(r
, d
)) {
566 list_add_tail(&d
->list
, add_pos
);
569 * If resctrl is mounted, add
570 * per domain monitor data directories.
572 if (static_branch_unlikely(&rdt_mon_enable_key
))
573 mkdir_mondata_subdir_allrdtgrp(r
, d
);
576 static void domain_remove_cpu(int cpu
, struct rdt_resource
*r
)
578 int id
= get_cache_id(cpu
, r
->cache_level
);
579 struct rdt_domain
*d
;
581 d
= rdt_find_domain(r
, id
, NULL
);
582 if (IS_ERR_OR_NULL(d
)) {
583 pr_warn("Could't find cache id for cpu %d\n", cpu
);
587 cpumask_clear_cpu(cpu
, &d
->cpu_mask
);
588 if (cpumask_empty(&d
->cpu_mask
)) {
590 * If resctrl is mounted, remove all the
591 * per domain monitor data directories.
593 if (static_branch_unlikely(&rdt_mon_enable_key
))
594 rmdir_mondata_subdir_allrdtgrp(r
, d
->id
);
596 if (is_mbm_enabled())
597 cancel_delayed_work(&d
->mbm_over
);
598 if (is_llc_occupancy_enabled() && has_busy_rmid(r
, d
)) {
600 * When a package is going down, forcefully
601 * decrement rmid->ebusy. There is no way to know
602 * that the L3 was flushed and hence may lead to
603 * incorrect counts in rare scenarios, but leaving
604 * the RMID as busy creates RMID leaks if the
605 * package never comes back.
607 __check_limbo(d
, true);
608 cancel_delayed_work(&d
->cqm_limbo
);
612 * rdt_domain "d" is going to be freed below, so clear
613 * its pointer from pseudo_lock_region struct.
620 bitmap_free(d
->rmid_busy_llc
);
627 if (r
== &rdt_resources_all
[RDT_RESOURCE_L3
]) {
628 if (is_mbm_enabled() && cpu
== d
->mbm_work_cpu
) {
629 cancel_delayed_work(&d
->mbm_over
);
630 mbm_setup_overflow_handler(d
, 0);
632 if (is_llc_occupancy_enabled() && cpu
== d
->cqm_work_cpu
&&
633 has_busy_rmid(r
, d
)) {
634 cancel_delayed_work(&d
->cqm_limbo
);
635 cqm_setup_limbo_handler(d
, 0);
640 static void clear_closid_rmid(int cpu
)
642 struct intel_pqr_state
*state
= this_cpu_ptr(&pqr_state
);
644 state
->default_closid
= 0;
645 state
->default_rmid
= 0;
646 state
->cur_closid
= 0;
648 wrmsr(IA32_PQR_ASSOC
, 0, 0);
651 static int intel_rdt_online_cpu(unsigned int cpu
)
653 struct rdt_resource
*r
;
655 mutex_lock(&rdtgroup_mutex
);
656 for_each_capable_rdt_resource(r
)
657 domain_add_cpu(cpu
, r
);
658 /* The cpu is set in default rdtgroup after online. */
659 cpumask_set_cpu(cpu
, &rdtgroup_default
.cpu_mask
);
660 clear_closid_rmid(cpu
);
661 mutex_unlock(&rdtgroup_mutex
);
666 static void clear_childcpus(struct rdtgroup
*r
, unsigned int cpu
)
670 list_for_each_entry(cr
, &r
->mon
.crdtgrp_list
, mon
.crdtgrp_list
) {
671 if (cpumask_test_and_clear_cpu(cpu
, &cr
->cpu_mask
)) {
677 static int intel_rdt_offline_cpu(unsigned int cpu
)
679 struct rdtgroup
*rdtgrp
;
680 struct rdt_resource
*r
;
682 mutex_lock(&rdtgroup_mutex
);
683 for_each_capable_rdt_resource(r
)
684 domain_remove_cpu(cpu
, r
);
685 list_for_each_entry(rdtgrp
, &rdt_all_groups
, rdtgroup_list
) {
686 if (cpumask_test_and_clear_cpu(cpu
, &rdtgrp
->cpu_mask
)) {
687 clear_childcpus(rdtgrp
, cpu
);
691 clear_closid_rmid(cpu
);
692 mutex_unlock(&rdtgroup_mutex
);
698 * Choose a width for the resource name and resource data based on the
699 * resource that has widest name and cbm.
701 static __init
void rdt_init_padding(void)
703 struct rdt_resource
*r
;
706 for_each_alloc_capable_rdt_resource(r
) {
707 cl
= strlen(r
->name
);
708 if (cl
> max_name_width
)
711 if (r
->data_width
> max_data_width
)
712 max_data_width
= r
->data_width
;
727 #define RDT_OPT(idx, n, f) \
736 bool force_off
, force_on
;
739 static struct rdt_options rdt_options
[] __initdata
= {
740 RDT_OPT(RDT_FLAG_CMT
, "cmt", X86_FEATURE_CQM_OCCUP_LLC
),
741 RDT_OPT(RDT_FLAG_MBM_TOTAL
, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL
),
742 RDT_OPT(RDT_FLAG_MBM_LOCAL
, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL
),
743 RDT_OPT(RDT_FLAG_L3_CAT
, "l3cat", X86_FEATURE_CAT_L3
),
744 RDT_OPT(RDT_FLAG_L3_CDP
, "l3cdp", X86_FEATURE_CDP_L3
),
745 RDT_OPT(RDT_FLAG_L2_CAT
, "l2cat", X86_FEATURE_CAT_L2
),
746 RDT_OPT(RDT_FLAG_L2_CDP
, "l2cdp", X86_FEATURE_CDP_L2
),
747 RDT_OPT(RDT_FLAG_MBA
, "mba", X86_FEATURE_MBA
),
749 #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
751 static int __init
set_rdt_options(char *str
)
753 struct rdt_options
*o
;
759 while ((tok
= strsep(&str
, ",")) != NULL
) {
760 force_off
= *tok
== '!';
763 for (o
= rdt_options
; o
< &rdt_options
[NUM_RDT_OPTIONS
]; o
++) {
764 if (strcmp(tok
, o
->name
) == 0) {
775 __setup("rdt", set_rdt_options
);
777 static bool __init
rdt_cpu_has(int flag
)
779 bool ret
= boot_cpu_has(flag
);
780 struct rdt_options
*o
;
785 for (o
= rdt_options
; o
< &rdt_options
[NUM_RDT_OPTIONS
]; o
++) {
786 if (flag
== o
->flag
) {
797 static __init
bool get_rdt_alloc_resources(void)
801 if (rdt_alloc_capable
)
804 if (!boot_cpu_has(X86_FEATURE_RDT_A
))
807 if (rdt_cpu_has(X86_FEATURE_CAT_L3
)) {
808 rdt_get_cache_alloc_cfg(1, &rdt_resources_all
[RDT_RESOURCE_L3
]);
809 if (rdt_cpu_has(X86_FEATURE_CDP_L3
))
810 rdt_get_cdp_l3_config();
813 if (rdt_cpu_has(X86_FEATURE_CAT_L2
)) {
814 /* CPUID 0x10.2 fields are same format at 0x10.1 */
815 rdt_get_cache_alloc_cfg(2, &rdt_resources_all
[RDT_RESOURCE_L2
]);
816 if (rdt_cpu_has(X86_FEATURE_CDP_L2
))
817 rdt_get_cdp_l2_config();
821 if (rdt_cpu_has(X86_FEATURE_MBA
)) {
822 if (rdt_get_mem_config(&rdt_resources_all
[RDT_RESOURCE_MBA
]))
828 static __init
bool get_rdt_mon_resources(void)
830 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC
))
831 rdt_mon_features
|= (1 << QOS_L3_OCCUP_EVENT_ID
);
832 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL
))
833 rdt_mon_features
|= (1 << QOS_L3_MBM_TOTAL_EVENT_ID
);
834 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL
))
835 rdt_mon_features
|= (1 << QOS_L3_MBM_LOCAL_EVENT_ID
);
837 if (!rdt_mon_features
)
840 return !rdt_get_mon_l3_config(&rdt_resources_all
[RDT_RESOURCE_L3
]);
843 static __init
void rdt_quirks(void)
845 switch (boot_cpu_data
.x86_model
) {
846 case INTEL_FAM6_HASWELL_X
:
847 if (!rdt_options
[RDT_FLAG_L3_CAT
].force_off
)
848 cache_alloc_hsw_probe();
850 case INTEL_FAM6_SKYLAKE_X
:
851 if (boot_cpu_data
.x86_stepping
<= 4)
852 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
854 set_rdt_options("!l3cat");
858 static __init
bool get_rdt_resources(void)
861 rdt_alloc_capable
= get_rdt_alloc_resources();
862 rdt_mon_capable
= get_rdt_mon_resources();
864 return (rdt_mon_capable
|| rdt_alloc_capable
);
867 static enum cpuhp_state rdt_online
;
869 static int __init
intel_rdt_late_init(void)
871 struct rdt_resource
*r
;
874 if (!get_rdt_resources())
879 state
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
,
880 "x86/rdt/cat:online:",
881 intel_rdt_online_cpu
, intel_rdt_offline_cpu
);
885 ret
= rdtgroup_init();
887 cpuhp_remove_state(state
);
892 for_each_alloc_capable_rdt_resource(r
)
893 pr_info("Intel RDT %s allocation detected\n", r
->name
);
895 for_each_mon_capable_rdt_resource(r
)
896 pr_info("Intel RDT %s monitoring detected\n", r
->name
);
901 late_initcall(intel_rdt_late_init
);
903 static void __exit
intel_rdt_exit(void)
905 cpuhp_remove_state(rdt_online
);
909 __exitcall(intel_rdt_exit
);