2 * Resource Director Technology(RDT)
3 * - Cache Allocation code.
5 * Copyright (C) 2016 Intel Corporation
8 * Fenghua Yu <fenghua.yu@intel.com>
9 * Tony Luck <tony.luck@intel.com>
10 * Vikas Shivappa <vikas.shivappa@intel.com>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * More information about RDT be found in the Intel (R) x86 Architecture
22 * Software Developer Manual June 2016, volume 3, section 17.17.
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/slab.h>
28 #include <linux/err.h>
29 #include <linux/cacheinfo.h>
30 #include <linux/cpuhotplug.h>
32 #include <asm/intel-family.h>
33 #include <asm/intel_rdt_sched.h>
34 #include "intel_rdt.h"
36 #define MBA_IS_LINEAR 0x4
37 #define MBA_MAX_MBPS U32_MAX
39 /* Mutex to protect rdtgroup access. */
40 DEFINE_MUTEX(rdtgroup_mutex
);
43 * The cached intel_pqr_state is strictly per CPU and can never be
44 * updated from a remote CPU. Functions which modify the state
45 * are called with interrupts disabled and no preemption, which
46 * is sufficient for the protection.
48 DEFINE_PER_CPU(struct intel_pqr_state
, pqr_state
);
51 * Used to store the max resource name width and max resource data width
52 * to display the schemata in a tabular format
54 int max_name_width
, max_data_width
;
57 * Global boolean for rdt_alloc which is true if any
58 * resource allocation is enabled.
60 bool rdt_alloc_capable
;
63 mba_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
);
65 cat_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
);
67 #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
69 struct rdt_resource rdt_resources_all
[] = {
72 .rid
= RDT_RESOURCE_L3
,
74 .domains
= domain_init(RDT_RESOURCE_L3
),
75 .msr_base
= IA32_L3_CBM_BASE
,
76 .msr_update
= cat_wrmsr
,
83 .parse_ctrlval
= parse_cbm
,
84 .format_str
= "%d=%0*x",
85 .fflags
= RFTYPE_RES_CACHE
,
87 [RDT_RESOURCE_L3DATA
] =
89 .rid
= RDT_RESOURCE_L3DATA
,
91 .domains
= domain_init(RDT_RESOURCE_L3DATA
),
92 .msr_base
= IA32_L3_CBM_BASE
,
93 .msr_update
= cat_wrmsr
,
100 .parse_ctrlval
= parse_cbm
,
101 .format_str
= "%d=%0*x",
102 .fflags
= RFTYPE_RES_CACHE
,
104 [RDT_RESOURCE_L3CODE
] =
106 .rid
= RDT_RESOURCE_L3CODE
,
108 .domains
= domain_init(RDT_RESOURCE_L3CODE
),
109 .msr_base
= IA32_L3_CBM_BASE
,
110 .msr_update
= cat_wrmsr
,
117 .parse_ctrlval
= parse_cbm
,
118 .format_str
= "%d=%0*x",
119 .fflags
= RFTYPE_RES_CACHE
,
123 .rid
= RDT_RESOURCE_L2
,
125 .domains
= domain_init(RDT_RESOURCE_L2
),
126 .msr_base
= IA32_L2_CBM_BASE
,
127 .msr_update
= cat_wrmsr
,
134 .parse_ctrlval
= parse_cbm
,
135 .format_str
= "%d=%0*x",
136 .fflags
= RFTYPE_RES_CACHE
,
138 [RDT_RESOURCE_L2DATA
] =
140 .rid
= RDT_RESOURCE_L2DATA
,
142 .domains
= domain_init(RDT_RESOURCE_L2DATA
),
143 .msr_base
= IA32_L2_CBM_BASE
,
144 .msr_update
= cat_wrmsr
,
151 .parse_ctrlval
= parse_cbm
,
152 .format_str
= "%d=%0*x",
153 .fflags
= RFTYPE_RES_CACHE
,
155 [RDT_RESOURCE_L2CODE
] =
157 .rid
= RDT_RESOURCE_L2CODE
,
159 .domains
= domain_init(RDT_RESOURCE_L2CODE
),
160 .msr_base
= IA32_L2_CBM_BASE
,
161 .msr_update
= cat_wrmsr
,
168 .parse_ctrlval
= parse_cbm
,
169 .format_str
= "%d=%0*x",
170 .fflags
= RFTYPE_RES_CACHE
,
174 .rid
= RDT_RESOURCE_MBA
,
176 .domains
= domain_init(RDT_RESOURCE_MBA
),
177 .msr_base
= IA32_MBA_THRTL_BASE
,
178 .msr_update
= mba_wrmsr
,
180 .parse_ctrlval
= parse_bw
,
181 .format_str
= "%d=%*u",
182 .fflags
= RFTYPE_RES_MB
,
186 static unsigned int cbm_idx(struct rdt_resource
*r
, unsigned int closid
)
188 return closid
* r
->cache
.cbm_idx_mult
+ r
->cache
.cbm_idx_offset
;
192 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
193 * as they do not have CPUID enumeration support for Cache allocation.
194 * The check for Vendor/Family/Model is not enough to guarantee that
195 * the MSRs won't #GP fault because only the following SKUs support
197 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
198 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
199 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
200 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
201 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
202 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
204 * Probe by trying to write the first of the L3 cach mask registers
205 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
206 * is always 20 on hsw server parts. The minimum cache bitmask length
207 * allowed for HSW server is always 2 bits. Hardcode all of them.
209 static inline void cache_alloc_hsw_probe(void)
211 struct rdt_resource
*r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
212 u32 l
, h
, max_cbm
= BIT_MASK(20) - 1;
214 if (wrmsr_safe(IA32_L3_CBM_BASE
, max_cbm
, 0))
216 rdmsr(IA32_L3_CBM_BASE
, l
, h
);
218 /* If all the bits were set in MSR, return success */
223 r
->default_ctrl
= max_cbm
;
224 r
->cache
.cbm_len
= 20;
225 r
->cache
.shareable_bits
= 0xc0000;
226 r
->cache
.min_cbm_bits
= 2;
227 r
->alloc_capable
= true;
228 r
->alloc_enabled
= true;
230 rdt_alloc_capable
= true;
233 bool is_mba_sc(struct rdt_resource
*r
)
236 return rdt_resources_all
[RDT_RESOURCE_MBA
].membw
.mba_sc
;
238 return r
->membw
.mba_sc
;
242 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
243 * exposed to user interface and the h/w understandable delay values.
245 * The non-linear delay values have the granularity of power of two
246 * and also the h/w does not guarantee a curve for configured delay
247 * values vs. actual b/w enforced.
248 * Hence we need a mapping that is pre calibrated so the user can
249 * express the memory b/w as a percentage value.
251 static inline bool rdt_get_mb_table(struct rdt_resource
*r
)
254 * There are no Intel SKUs as of now to support non-linear delay.
256 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
257 boot_cpu_data
.x86
, boot_cpu_data
.x86_model
);
262 static bool rdt_get_mem_config(struct rdt_resource
*r
)
264 union cpuid_0x10_3_eax eax
;
265 union cpuid_0x10_x_edx edx
;
268 cpuid_count(0x00000010, 3, &eax
.full
, &ebx
, &ecx
, &edx
.full
);
269 r
->num_closid
= edx
.split
.cos_max
+ 1;
270 r
->membw
.max_delay
= eax
.split
.max_delay
+ 1;
271 r
->default_ctrl
= MAX_MBA_BW
;
272 if (ecx
& MBA_IS_LINEAR
) {
273 r
->membw
.delay_linear
= true;
274 r
->membw
.min_bw
= MAX_MBA_BW
- r
->membw
.max_delay
;
275 r
->membw
.bw_gran
= MAX_MBA_BW
- r
->membw
.max_delay
;
277 if (!rdt_get_mb_table(r
))
282 r
->alloc_capable
= true;
283 r
->alloc_enabled
= true;
288 static void rdt_get_cache_alloc_cfg(int idx
, struct rdt_resource
*r
)
290 union cpuid_0x10_1_eax eax
;
291 union cpuid_0x10_x_edx edx
;
294 cpuid_count(0x00000010, idx
, &eax
.full
, &ebx
, &ecx
, &edx
.full
);
295 r
->num_closid
= edx
.split
.cos_max
+ 1;
296 r
->cache
.cbm_len
= eax
.split
.cbm_len
+ 1;
297 r
->default_ctrl
= BIT_MASK(eax
.split
.cbm_len
+ 1) - 1;
298 r
->cache
.shareable_bits
= ebx
& r
->default_ctrl
;
299 r
->data_width
= (r
->cache
.cbm_len
+ 3) / 4;
300 r
->alloc_capable
= true;
301 r
->alloc_enabled
= true;
304 static void rdt_get_cdp_config(int level
, int type
)
306 struct rdt_resource
*r_l
= &rdt_resources_all
[level
];
307 struct rdt_resource
*r
= &rdt_resources_all
[type
];
309 r
->num_closid
= r_l
->num_closid
/ 2;
310 r
->cache
.cbm_len
= r_l
->cache
.cbm_len
;
311 r
->default_ctrl
= r_l
->default_ctrl
;
312 r
->cache
.shareable_bits
= r_l
->cache
.shareable_bits
;
313 r
->data_width
= (r
->cache
.cbm_len
+ 3) / 4;
314 r
->alloc_capable
= true;
316 * By default, CDP is disabled. CDP can be enabled by mount parameter
317 * "cdp" during resctrl file system mount time.
319 r
->alloc_enabled
= false;
322 static void rdt_get_cdp_l3_config(void)
324 rdt_get_cdp_config(RDT_RESOURCE_L3
, RDT_RESOURCE_L3DATA
);
325 rdt_get_cdp_config(RDT_RESOURCE_L3
, RDT_RESOURCE_L3CODE
);
328 static void rdt_get_cdp_l2_config(void)
330 rdt_get_cdp_config(RDT_RESOURCE_L2
, RDT_RESOURCE_L2DATA
);
331 rdt_get_cdp_config(RDT_RESOURCE_L2
, RDT_RESOURCE_L2CODE
);
334 static int get_cache_id(int cpu
, int level
)
336 struct cpu_cacheinfo
*ci
= get_cpu_cacheinfo(cpu
);
339 for (i
= 0; i
< ci
->num_leaves
; i
++) {
340 if (ci
->info_list
[i
].level
== level
)
341 return ci
->info_list
[i
].id
;
348 * Map the memory b/w percentage value to delay values
349 * that can be written to QOS_MSRs.
350 * There are currently no SKUs which support non linear delay values.
352 u32
delay_bw_map(unsigned long bw
, struct rdt_resource
*r
)
354 if (r
->membw
.delay_linear
)
355 return MAX_MBA_BW
- bw
;
357 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
358 return r
->default_ctrl
;
362 mba_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
)
366 /* Write the delay values for mba. */
367 for (i
= m
->low
; i
< m
->high
; i
++)
368 wrmsrl(r
->msr_base
+ i
, delay_bw_map(d
->ctrl_val
[i
], r
));
372 cat_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
)
376 for (i
= m
->low
; i
< m
->high
; i
++)
377 wrmsrl(r
->msr_base
+ cbm_idx(r
, i
), d
->ctrl_val
[i
]);
380 struct rdt_domain
*get_domain_from_cpu(int cpu
, struct rdt_resource
*r
)
382 struct rdt_domain
*d
;
384 list_for_each_entry(d
, &r
->domains
, list
) {
385 /* Find the domain that contains this CPU */
386 if (cpumask_test_cpu(cpu
, &d
->cpu_mask
))
393 void rdt_ctrl_update(void *arg
)
395 struct msr_param
*m
= arg
;
396 struct rdt_resource
*r
= m
->res
;
397 int cpu
= smp_processor_id();
398 struct rdt_domain
*d
;
400 d
= get_domain_from_cpu(cpu
, r
);
402 r
->msr_update(d
, m
, r
);
405 pr_warn_once("cpu %d not found in any domain for resource %s\n",
410 * rdt_find_domain - Find a domain in a resource that matches input resource id
412 * Search resource r's domain list to find the resource id. If the resource
413 * id is found in a domain, return the domain. Otherwise, if requested by
414 * caller, return the first domain whose id is bigger than the input id.
415 * The domain list is sorted by id in ascending order.
417 struct rdt_domain
*rdt_find_domain(struct rdt_resource
*r
, int id
,
418 struct list_head
**pos
)
420 struct rdt_domain
*d
;
426 list_for_each(l
, &r
->domains
) {
427 d
= list_entry(l
, struct rdt_domain
, list
);
428 /* When id is found, return its domain. */
431 /* Stop searching when finding id's position in sorted list. */
442 void setup_default_ctrlval(struct rdt_resource
*r
, u32
*dc
, u32
*dm
)
447 * Initialize the Control MSRs to having no control.
448 * For Cache Allocation: Set all bits in cbm
449 * For Memory Allocation: Set b/w requested to 100%
450 * and the bandwidth in MBps to U32_MAX
452 for (i
= 0; i
< r
->num_closid
; i
++, dc
++, dm
++) {
453 *dc
= r
->default_ctrl
;
458 static int domain_setup_ctrlval(struct rdt_resource
*r
, struct rdt_domain
*d
)
463 dc
= kmalloc_array(r
->num_closid
, sizeof(*d
->ctrl_val
), GFP_KERNEL
);
467 dm
= kmalloc_array(r
->num_closid
, sizeof(*d
->mbps_val
), GFP_KERNEL
);
475 setup_default_ctrlval(r
, dc
, dm
);
478 m
.high
= r
->num_closid
;
479 r
->msr_update(d
, &m
, r
);
483 static int domain_setup_mon_state(struct rdt_resource
*r
, struct rdt_domain
*d
)
487 if (is_llc_occupancy_enabled()) {
488 d
->rmid_busy_llc
= kcalloc(BITS_TO_LONGS(r
->num_rmid
),
489 sizeof(unsigned long),
491 if (!d
->rmid_busy_llc
)
493 INIT_DELAYED_WORK(&d
->cqm_limbo
, cqm_handle_limbo
);
495 if (is_mbm_total_enabled()) {
496 tsize
= sizeof(*d
->mbm_total
);
497 d
->mbm_total
= kcalloc(r
->num_rmid
, tsize
, GFP_KERNEL
);
499 kfree(d
->rmid_busy_llc
);
503 if (is_mbm_local_enabled()) {
504 tsize
= sizeof(*d
->mbm_local
);
505 d
->mbm_local
= kcalloc(r
->num_rmid
, tsize
, GFP_KERNEL
);
507 kfree(d
->rmid_busy_llc
);
513 if (is_mbm_enabled()) {
514 INIT_DELAYED_WORK(&d
->mbm_over
, mbm_handle_overflow
);
515 mbm_setup_overflow_handler(d
, MBM_OVERFLOW_INTERVAL
);
522 * domain_add_cpu - Add a cpu to a resource's domain list.
524 * If an existing domain in the resource r's domain list matches the cpu's
525 * resource id, add the cpu in the domain.
527 * Otherwise, a new domain is allocated and inserted into the right position
528 * in the domain list sorted by id in ascending order.
530 * The order in the domain list is visible to users when we print entries
531 * in the schemata file and schemata input is validated to have the same order
534 static void domain_add_cpu(int cpu
, struct rdt_resource
*r
)
536 int id
= get_cache_id(cpu
, r
->cache_level
);
537 struct list_head
*add_pos
= NULL
;
538 struct rdt_domain
*d
;
540 d
= rdt_find_domain(r
, id
, &add_pos
);
542 pr_warn("Could't find cache id for cpu %d\n", cpu
);
547 cpumask_set_cpu(cpu
, &d
->cpu_mask
);
551 d
= kzalloc_node(sizeof(*d
), GFP_KERNEL
, cpu_to_node(cpu
));
556 cpumask_set_cpu(cpu
, &d
->cpu_mask
);
558 if (r
->alloc_capable
&& domain_setup_ctrlval(r
, d
)) {
563 if (r
->mon_capable
&& domain_setup_mon_state(r
, d
)) {
568 list_add_tail(&d
->list
, add_pos
);
571 * If resctrl is mounted, add
572 * per domain monitor data directories.
574 if (static_branch_unlikely(&rdt_mon_enable_key
))
575 mkdir_mondata_subdir_allrdtgrp(r
, d
);
578 static void domain_remove_cpu(int cpu
, struct rdt_resource
*r
)
580 int id
= get_cache_id(cpu
, r
->cache_level
);
581 struct rdt_domain
*d
;
583 d
= rdt_find_domain(r
, id
, NULL
);
584 if (IS_ERR_OR_NULL(d
)) {
585 pr_warn("Could't find cache id for cpu %d\n", cpu
);
589 cpumask_clear_cpu(cpu
, &d
->cpu_mask
);
590 if (cpumask_empty(&d
->cpu_mask
)) {
592 * If resctrl is mounted, remove all the
593 * per domain monitor data directories.
595 if (static_branch_unlikely(&rdt_mon_enable_key
))
596 rmdir_mondata_subdir_allrdtgrp(r
, d
->id
);
598 if (is_mbm_enabled())
599 cancel_delayed_work(&d
->mbm_over
);
600 if (is_llc_occupancy_enabled() && has_busy_rmid(r
, d
)) {
602 * When a package is going down, forcefully
603 * decrement rmid->ebusy. There is no way to know
604 * that the L3 was flushed and hence may lead to
605 * incorrect counts in rare scenarios, but leaving
606 * the RMID as busy creates RMID leaks if the
607 * package never comes back.
609 __check_limbo(d
, true);
610 cancel_delayed_work(&d
->cqm_limbo
);
615 kfree(d
->rmid_busy_llc
);
622 if (r
== &rdt_resources_all
[RDT_RESOURCE_L3
]) {
623 if (is_mbm_enabled() && cpu
== d
->mbm_work_cpu
) {
624 cancel_delayed_work(&d
->mbm_over
);
625 mbm_setup_overflow_handler(d
, 0);
627 if (is_llc_occupancy_enabled() && cpu
== d
->cqm_work_cpu
&&
628 has_busy_rmid(r
, d
)) {
629 cancel_delayed_work(&d
->cqm_limbo
);
630 cqm_setup_limbo_handler(d
, 0);
635 static void clear_closid_rmid(int cpu
)
637 struct intel_pqr_state
*state
= this_cpu_ptr(&pqr_state
);
639 state
->default_closid
= 0;
640 state
->default_rmid
= 0;
641 state
->cur_closid
= 0;
643 wrmsr(IA32_PQR_ASSOC
, 0, 0);
646 static int intel_rdt_online_cpu(unsigned int cpu
)
648 struct rdt_resource
*r
;
650 mutex_lock(&rdtgroup_mutex
);
651 for_each_capable_rdt_resource(r
)
652 domain_add_cpu(cpu
, r
);
653 /* The cpu is set in default rdtgroup after online. */
654 cpumask_set_cpu(cpu
, &rdtgroup_default
.cpu_mask
);
655 clear_closid_rmid(cpu
);
656 mutex_unlock(&rdtgroup_mutex
);
661 static void clear_childcpus(struct rdtgroup
*r
, unsigned int cpu
)
665 list_for_each_entry(cr
, &r
->mon
.crdtgrp_list
, mon
.crdtgrp_list
) {
666 if (cpumask_test_and_clear_cpu(cpu
, &cr
->cpu_mask
)) {
672 static int intel_rdt_offline_cpu(unsigned int cpu
)
674 struct rdtgroup
*rdtgrp
;
675 struct rdt_resource
*r
;
677 mutex_lock(&rdtgroup_mutex
);
678 for_each_capable_rdt_resource(r
)
679 domain_remove_cpu(cpu
, r
);
680 list_for_each_entry(rdtgrp
, &rdt_all_groups
, rdtgroup_list
) {
681 if (cpumask_test_and_clear_cpu(cpu
, &rdtgrp
->cpu_mask
)) {
682 clear_childcpus(rdtgrp
, cpu
);
686 clear_closid_rmid(cpu
);
687 mutex_unlock(&rdtgroup_mutex
);
693 * Choose a width for the resource name and resource data based on the
694 * resource that has widest name and cbm.
696 static __init
void rdt_init_padding(void)
698 struct rdt_resource
*r
;
701 for_each_alloc_capable_rdt_resource(r
) {
702 cl
= strlen(r
->name
);
703 if (cl
> max_name_width
)
706 if (r
->data_width
> max_data_width
)
707 max_data_width
= r
->data_width
;
722 #define RDT_OPT(idx, n, f) \
731 bool force_off
, force_on
;
734 static struct rdt_options rdt_options
[] __initdata
= {
735 RDT_OPT(RDT_FLAG_CMT
, "cmt", X86_FEATURE_CQM_OCCUP_LLC
),
736 RDT_OPT(RDT_FLAG_MBM_TOTAL
, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL
),
737 RDT_OPT(RDT_FLAG_MBM_LOCAL
, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL
),
738 RDT_OPT(RDT_FLAG_L3_CAT
, "l3cat", X86_FEATURE_CAT_L3
),
739 RDT_OPT(RDT_FLAG_L3_CDP
, "l3cdp", X86_FEATURE_CDP_L3
),
740 RDT_OPT(RDT_FLAG_L2_CAT
, "l2cat", X86_FEATURE_CAT_L2
),
741 RDT_OPT(RDT_FLAG_L2_CDP
, "l2cdp", X86_FEATURE_CDP_L2
),
742 RDT_OPT(RDT_FLAG_MBA
, "mba", X86_FEATURE_MBA
),
744 #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
746 static int __init
set_rdt_options(char *str
)
748 struct rdt_options
*o
;
754 while ((tok
= strsep(&str
, ",")) != NULL
) {
755 force_off
= *tok
== '!';
758 for (o
= rdt_options
; o
< &rdt_options
[NUM_RDT_OPTIONS
]; o
++) {
759 if (strcmp(tok
, o
->name
) == 0) {
770 __setup("rdt", set_rdt_options
);
772 static bool __init
rdt_cpu_has(int flag
)
774 bool ret
= boot_cpu_has(flag
);
775 struct rdt_options
*o
;
780 for (o
= rdt_options
; o
< &rdt_options
[NUM_RDT_OPTIONS
]; o
++) {
781 if (flag
== o
->flag
) {
792 static __init
bool get_rdt_alloc_resources(void)
796 if (rdt_alloc_capable
)
799 if (!boot_cpu_has(X86_FEATURE_RDT_A
))
802 if (rdt_cpu_has(X86_FEATURE_CAT_L3
)) {
803 rdt_get_cache_alloc_cfg(1, &rdt_resources_all
[RDT_RESOURCE_L3
]);
804 if (rdt_cpu_has(X86_FEATURE_CDP_L3
))
805 rdt_get_cdp_l3_config();
808 if (rdt_cpu_has(X86_FEATURE_CAT_L2
)) {
809 /* CPUID 0x10.2 fields are same format at 0x10.1 */
810 rdt_get_cache_alloc_cfg(2, &rdt_resources_all
[RDT_RESOURCE_L2
]);
811 if (rdt_cpu_has(X86_FEATURE_CDP_L2
))
812 rdt_get_cdp_l2_config();
816 if (rdt_cpu_has(X86_FEATURE_MBA
)) {
817 if (rdt_get_mem_config(&rdt_resources_all
[RDT_RESOURCE_MBA
]))
823 static __init
bool get_rdt_mon_resources(void)
825 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC
))
826 rdt_mon_features
|= (1 << QOS_L3_OCCUP_EVENT_ID
);
827 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL
))
828 rdt_mon_features
|= (1 << QOS_L3_MBM_TOTAL_EVENT_ID
);
829 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL
))
830 rdt_mon_features
|= (1 << QOS_L3_MBM_LOCAL_EVENT_ID
);
832 if (!rdt_mon_features
)
835 return !rdt_get_mon_l3_config(&rdt_resources_all
[RDT_RESOURCE_L3
]);
838 static __init
void rdt_quirks(void)
840 switch (boot_cpu_data
.x86_model
) {
841 case INTEL_FAM6_HASWELL_X
:
842 if (!rdt_options
[RDT_FLAG_L3_CAT
].force_off
)
843 cache_alloc_hsw_probe();
845 case INTEL_FAM6_SKYLAKE_X
:
846 if (boot_cpu_data
.x86_stepping
<= 4)
847 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
849 set_rdt_options("!l3cat");
853 static __init
bool get_rdt_resources(void)
856 rdt_alloc_capable
= get_rdt_alloc_resources();
857 rdt_mon_capable
= get_rdt_mon_resources();
859 return (rdt_mon_capable
|| rdt_alloc_capable
);
862 static int __init
intel_rdt_late_init(void)
864 struct rdt_resource
*r
;
867 if (!get_rdt_resources())
872 state
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
,
873 "x86/rdt/cat:online:",
874 intel_rdt_online_cpu
, intel_rdt_offline_cpu
);
878 ret
= rdtgroup_init();
880 cpuhp_remove_state(state
);
884 for_each_alloc_capable_rdt_resource(r
)
885 pr_info("Intel RDT %s allocation detected\n", r
->name
);
887 for_each_mon_capable_rdt_resource(r
)
888 pr_info("Intel RDT %s monitoring detected\n", r
->name
);
893 late_initcall(intel_rdt_late_init
);