2 * Resource Director Technology(RDT)
5 * Copyright (C) 2017 Intel Corporation
8 * Vikas Shivappa <vikas.shivappa@intel.com>
10 * This replaces the cqm.c based on perf but we reuse a lot of
11 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * More information about RDT be found in the Intel (R) x86 Architecture
23 * Software Developer Manual June 2016, volume 3, section 17.17.
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <asm/cpu_device_id.h>
29 #include "intel_rdt.h"
31 #define MSR_IA32_QM_CTR 0x0c8e
32 #define MSR_IA32_QM_EVTSEL 0x0c8d
37 struct list_head list
;
41 * @rmid_free_lru A least recently used list of free RMIDs
42 * These RMIDs are guaranteed to have an occupancy less than the
45 static LIST_HEAD(rmid_free_lru
);
48 * @rmid_limbo_count count of currently unused but (potentially)
50 * This counts RMIDs that no one is currently using but that
51 * may have a occupancy value > intel_cqm_threshold. User can change
52 * the threshold occupancy value.
54 unsigned int rmid_limbo_count
;
57 * @rmid_entry - The entry in the limbo and free lists.
59 static struct rmid_entry
*rmid_ptrs
;
62 * Global boolean for rdt_monitor which is true if any
63 * resource monitoring is enabled.
68 * Global to indicate which monitoring events are enabled.
70 unsigned int rdt_mon_features
;
73 * This is the threshold cache occupancy at which we will consider an
74 * RMID available for re-allocation.
76 unsigned int intel_cqm_threshold
;
78 static inline struct rmid_entry
*__rmid_entry(u32 rmid
)
80 struct rmid_entry
*entry
;
82 entry
= &rmid_ptrs
[rmid
];
83 WARN_ON(entry
->rmid
!= rmid
);
88 static u64
__rmid_read(u32 rmid
, u32 eventid
)
93 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
94 * with a valid event code for supported resource type and the bits
95 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
96 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
97 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
100 wrmsr(MSR_IA32_QM_EVTSEL
, eventid
, rmid
);
101 rdmsrl(MSR_IA32_QM_CTR
, val
);
106 static bool rmid_dirty(struct rmid_entry
*entry
)
108 u64 val
= __rmid_read(entry
->rmid
, QOS_L3_OCCUP_EVENT_ID
);
110 return val
>= intel_cqm_threshold
;
114 * Check the RMIDs that are marked as busy for this domain. If the
115 * reported LLC occupancy is below the threshold clear the busy bit and
116 * decrement the count. If the busy count gets to zero on an RMID, we
119 void __check_limbo(struct rdt_domain
*d
, bool force_free
)
121 struct rmid_entry
*entry
;
122 struct rdt_resource
*r
;
123 u32 crmid
= 1, nrmid
;
125 r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
128 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
129 * are marked as busy for occupancy < threshold. If the occupancy
130 * is less than the threshold decrement the busy counter of the
131 * RMID and move it to the free list when the counter reaches 0.
134 nrmid
= find_next_bit(d
->rmid_busy_llc
, r
->num_rmid
, crmid
);
135 if (nrmid
>= r
->num_rmid
)
138 entry
= __rmid_entry(nrmid
);
139 if (force_free
|| !rmid_dirty(entry
)) {
140 clear_bit(entry
->rmid
, d
->rmid_busy_llc
);
141 if (!--entry
->busy
) {
143 list_add_tail(&entry
->list
, &rmid_free_lru
);
150 bool has_busy_rmid(struct rdt_resource
*r
, struct rdt_domain
*d
)
152 return find_first_bit(d
->rmid_busy_llc
, r
->num_rmid
) != r
->num_rmid
;
156 * As of now the RMIDs allocation is global.
157 * However we keep track of which packages the RMIDs
158 * are used to optimize the limbo list management.
162 struct rmid_entry
*entry
;
164 lockdep_assert_held(&rdtgroup_mutex
);
166 if (list_empty(&rmid_free_lru
))
167 return rmid_limbo_count
? -EBUSY
: -ENOSPC
;
169 entry
= list_first_entry(&rmid_free_lru
,
170 struct rmid_entry
, list
);
171 list_del(&entry
->list
);
176 static void add_rmid_to_limbo(struct rmid_entry
*entry
)
178 struct rdt_resource
*r
;
179 struct rdt_domain
*d
;
183 r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
187 list_for_each_entry(d
, &r
->domains
, list
) {
188 if (cpumask_test_cpu(cpu
, &d
->cpu_mask
)) {
189 val
= __rmid_read(entry
->rmid
, QOS_L3_OCCUP_EVENT_ID
);
190 if (val
<= intel_cqm_threshold
)
195 * For the first limbo RMID in the domain,
196 * setup up the limbo worker.
198 if (!has_busy_rmid(r
, d
))
199 cqm_setup_limbo_handler(d
, CQM_LIMBOCHECK_INTERVAL
);
200 set_bit(entry
->rmid
, d
->rmid_busy_llc
);
208 list_add_tail(&entry
->list
, &rmid_free_lru
);
211 void free_rmid(u32 rmid
)
213 struct rmid_entry
*entry
;
218 lockdep_assert_held(&rdtgroup_mutex
);
220 entry
= __rmid_entry(rmid
);
222 if (is_llc_occupancy_enabled())
223 add_rmid_to_limbo(entry
);
225 list_add_tail(&entry
->list
, &rmid_free_lru
);
228 static int __mon_event_count(u32 rmid
, struct rmid_read
*rr
)
230 u64 chunks
, shift
, tval
;
233 tval
= __rmid_read(rmid
, rr
->evtid
);
234 if (tval
& (RMID_VAL_ERROR
| RMID_VAL_UNAVAIL
)) {
239 case QOS_L3_OCCUP_EVENT_ID
:
242 case QOS_L3_MBM_TOTAL_EVENT_ID
:
243 m
= &rr
->d
->mbm_total
[rmid
];
245 case QOS_L3_MBM_LOCAL_EVENT_ID
:
246 m
= &rr
->d
->mbm_local
[rmid
];
250 * Code would never reach here because
251 * an invalid event id would fail the __rmid_read.
262 shift
= 64 - MBM_CNTR_WIDTH
;
263 chunks
= (tval
<< shift
) - (m
->prev_msr
<< shift
);
268 rr
->val
+= m
->chunks
;
273 * This is called via IPI to read the CQM/MBM counters
276 void mon_event_count(void *info
)
278 struct rdtgroup
*rdtgrp
, *entry
;
279 struct rmid_read
*rr
= info
;
280 struct list_head
*head
;
284 if (__mon_event_count(rdtgrp
->mon
.rmid
, rr
))
288 * For Ctrl groups read data from child monitor groups.
290 head
= &rdtgrp
->mon
.crdtgrp_list
;
292 if (rdtgrp
->type
== RDTCTRL_GROUP
) {
293 list_for_each_entry(entry
, head
, mon
.crdtgrp_list
) {
294 if (__mon_event_count(entry
->mon
.rmid
, rr
))
300 static void mbm_update(struct rdt_domain
*d
, int rmid
)
308 * This is protected from concurrent reads from user
309 * as both the user and we hold the global mutex.
311 if (is_mbm_total_enabled()) {
312 rr
.evtid
= QOS_L3_MBM_TOTAL_EVENT_ID
;
313 __mon_event_count(rmid
, &rr
);
315 if (is_mbm_local_enabled()) {
316 rr
.evtid
= QOS_L3_MBM_LOCAL_EVENT_ID
;
317 __mon_event_count(rmid
, &rr
);
322 * Handler to scan the limbo list and move the RMIDs
323 * to free list whose occupancy < threshold_occupancy.
325 void cqm_handle_limbo(struct work_struct
*work
)
327 unsigned long delay
= msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL
);
328 int cpu
= smp_processor_id();
329 struct rdt_resource
*r
;
330 struct rdt_domain
*d
;
332 mutex_lock(&rdtgroup_mutex
);
334 r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
335 d
= get_domain_from_cpu(cpu
, r
);
338 pr_warn_once("Failure to get domain for limbo worker\n");
342 __check_limbo(d
, false);
344 if (has_busy_rmid(r
, d
))
345 schedule_delayed_work_on(cpu
, &d
->cqm_limbo
, delay
);
348 mutex_unlock(&rdtgroup_mutex
);
351 void cqm_setup_limbo_handler(struct rdt_domain
*dom
, unsigned long delay_ms
)
353 unsigned long delay
= msecs_to_jiffies(delay_ms
);
354 struct rdt_resource
*r
;
357 r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
359 cpu
= cpumask_any(&dom
->cpu_mask
);
360 dom
->cqm_work_cpu
= cpu
;
362 schedule_delayed_work_on(cpu
, &dom
->cqm_limbo
, delay
);
365 void mbm_handle_overflow(struct work_struct
*work
)
367 unsigned long delay
= msecs_to_jiffies(MBM_OVERFLOW_INTERVAL
);
368 struct rdtgroup
*prgrp
, *crgrp
;
369 int cpu
= smp_processor_id();
370 struct list_head
*head
;
371 struct rdt_domain
*d
;
373 mutex_lock(&rdtgroup_mutex
);
375 if (!static_branch_likely(&rdt_enable_key
))
378 d
= get_domain_from_cpu(cpu
, &rdt_resources_all
[RDT_RESOURCE_L3
]);
382 list_for_each_entry(prgrp
, &rdt_all_groups
, rdtgroup_list
) {
383 mbm_update(d
, prgrp
->mon
.rmid
);
385 head
= &prgrp
->mon
.crdtgrp_list
;
386 list_for_each_entry(crgrp
, head
, mon
.crdtgrp_list
)
387 mbm_update(d
, crgrp
->mon
.rmid
);
390 schedule_delayed_work_on(cpu
, &d
->mbm_over
, delay
);
393 mutex_unlock(&rdtgroup_mutex
);
396 void mbm_setup_overflow_handler(struct rdt_domain
*dom
, unsigned long delay_ms
)
398 unsigned long delay
= msecs_to_jiffies(delay_ms
);
401 if (!static_branch_likely(&rdt_enable_key
))
403 cpu
= cpumask_any(&dom
->cpu_mask
);
404 dom
->mbm_work_cpu
= cpu
;
405 schedule_delayed_work_on(cpu
, &dom
->mbm_over
, delay
);
408 static int dom_data_init(struct rdt_resource
*r
)
410 struct rmid_entry
*entry
= NULL
;
413 nr_rmids
= r
->num_rmid
;
414 rmid_ptrs
= kcalloc(nr_rmids
, sizeof(struct rmid_entry
), GFP_KERNEL
);
418 for (i
= 0; i
< nr_rmids
; i
++) {
419 entry
= &rmid_ptrs
[i
];
420 INIT_LIST_HEAD(&entry
->list
);
423 list_add_tail(&entry
->list
, &rmid_free_lru
);
427 * RMID 0 is special and is always allocated. It's used for all
428 * tasks that are not monitored.
430 entry
= __rmid_entry(0);
431 list_del(&entry
->list
);
436 static struct mon_evt llc_occupancy_event
= {
437 .name
= "llc_occupancy",
438 .evtid
= QOS_L3_OCCUP_EVENT_ID
,
441 static struct mon_evt mbm_total_event
= {
442 .name
= "mbm_total_bytes",
443 .evtid
= QOS_L3_MBM_TOTAL_EVENT_ID
,
446 static struct mon_evt mbm_local_event
= {
447 .name
= "mbm_local_bytes",
448 .evtid
= QOS_L3_MBM_LOCAL_EVENT_ID
,
452 * Initialize the event list for the resource.
454 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
455 * because as per the SDM the total and local memory bandwidth
456 * are enumerated as part of L3 monitoring.
458 static void l3_mon_evt_init(struct rdt_resource
*r
)
460 INIT_LIST_HEAD(&r
->evt_list
);
462 if (is_llc_occupancy_enabled())
463 list_add_tail(&llc_occupancy_event
.list
, &r
->evt_list
);
464 if (is_mbm_total_enabled())
465 list_add_tail(&mbm_total_event
.list
, &r
->evt_list
);
466 if (is_mbm_local_enabled())
467 list_add_tail(&mbm_local_event
.list
, &r
->evt_list
);
470 int rdt_get_mon_l3_config(struct rdt_resource
*r
)
474 r
->mon_scale
= boot_cpu_data
.x86_cache_occ_scale
;
475 r
->num_rmid
= boot_cpu_data
.x86_cache_max_rmid
+ 1;
478 * A reasonable upper limit on the max threshold is the number
479 * of lines tagged per RMID if all RMIDs have the same number of
480 * lines tagged in the LLC.
482 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
484 intel_cqm_threshold
= boot_cpu_data
.x86_cache_size
* 1024 / r
->num_rmid
;
486 /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
487 intel_cqm_threshold
/= r
->mon_scale
;
489 ret
= dom_data_init(r
);
495 r
->mon_capable
= true;
496 r
->mon_enabled
= true;