drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / x86 / kernel / cpu / resctrl / pseudo_lock.c
blob972e6b6b0481ff23ab045ce3fdb774d54b8fe252
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Resource Director Technology (RDT)
5 * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
7 * Copyright (C) 2018 Intel Corporation
9 * Author: Reinette Chatre <reinette.chatre@intel.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/debugfs.h>
17 #include <linux/kthread.h>
18 #include <linux/mman.h>
19 #include <linux/perf_event.h>
20 #include <linux/pm_qos.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
24 #include <asm/cacheflush.h>
25 #include <asm/cpu_device_id.h>
26 #include <asm/resctrl.h>
27 #include <asm/perf_event.h>
29 #include "../../events/perf_event.h" /* For X86_CONFIG() */
30 #include "internal.h"
32 #define CREATE_TRACE_POINTS
33 #include "trace.h"
36 * The bits needed to disable hardware prefetching varies based on the
37 * platform. During initialization we will discover which bits to use.
39 static u64 prefetch_disable_bits;
42 * Major number assigned to and shared by all devices exposing
43 * pseudo-locked regions.
45 static unsigned int pseudo_lock_major;
46 static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
48 static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode)
50 const struct rdtgroup *rdtgrp;
52 rdtgrp = dev_get_drvdata(dev);
53 if (mode)
54 *mode = 0600;
55 return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
58 static const struct class pseudo_lock_class = {
59 .name = "pseudo_lock",
60 .devnode = pseudo_lock_devnode,
63 /**
64 * get_prefetch_disable_bits - prefetch disable bits of supported platforms
65 * @void: It takes no parameters.
67 * Capture the list of platforms that have been validated to support
68 * pseudo-locking. This includes testing to ensure pseudo-locked regions
69 * with low cache miss rates can be created under variety of load conditions
70 * as well as that these pseudo-locked regions can maintain their low cache
71 * miss rates under variety of load conditions for significant lengths of time.
73 * After a platform has been validated to support pseudo-locking its
74 * hardware prefetch disable bits are included here as they are documented
75 * in the SDM.
77 * When adding a platform here also add support for its cache events to
78 * measure_cycles_perf_fn()
80 * Return:
81 * If platform is supported, the bits to disable hardware prefetchers, 0
82 * if platform is not supported.
84 static u64 get_prefetch_disable_bits(void)
86 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
87 boot_cpu_data.x86 != 6)
88 return 0;
90 switch (boot_cpu_data.x86_vfm) {
91 case INTEL_BROADWELL_X:
93 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
94 * as:
95 * 0 L2 Hardware Prefetcher Disable (R/W)
96 * 1 L2 Adjacent Cache Line Prefetcher Disable (R/W)
97 * 2 DCU Hardware Prefetcher Disable (R/W)
98 * 3 DCU IP Prefetcher Disable (R/W)
99 * 63:4 Reserved
101 return 0xF;
102 case INTEL_ATOM_GOLDMONT:
103 case INTEL_ATOM_GOLDMONT_PLUS:
105 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
106 * as:
107 * 0 L2 Hardware Prefetcher Disable (R/W)
108 * 1 Reserved
109 * 2 DCU Hardware Prefetcher Disable (R/W)
110 * 63:3 Reserved
112 return 0x5;
115 return 0;
119 * pseudo_lock_minor_get - Obtain available minor number
120 * @minor: Pointer to where new minor number will be stored
122 * A bitmask is used to track available minor numbers. Here the next free
123 * minor number is marked as unavailable and returned.
125 * Return: 0 on success, <0 on failure.
127 static int pseudo_lock_minor_get(unsigned int *minor)
129 unsigned long first_bit;
131 first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
133 if (first_bit == MINORBITS)
134 return -ENOSPC;
136 __clear_bit(first_bit, &pseudo_lock_minor_avail);
137 *minor = first_bit;
139 return 0;
143 * pseudo_lock_minor_release - Return minor number to available
144 * @minor: The minor number made available
146 static void pseudo_lock_minor_release(unsigned int minor)
148 __set_bit(minor, &pseudo_lock_minor_avail);
152 * region_find_by_minor - Locate a pseudo-lock region by inode minor number
153 * @minor: The minor number of the device representing pseudo-locked region
155 * When the character device is accessed we need to determine which
156 * pseudo-locked region it belongs to. This is done by matching the minor
157 * number of the device to the pseudo-locked region it belongs.
159 * Minor numbers are assigned at the time a pseudo-locked region is associated
160 * with a cache instance.
162 * Return: On success return pointer to resource group owning the pseudo-locked
163 * region, NULL on failure.
165 static struct rdtgroup *region_find_by_minor(unsigned int minor)
167 struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
169 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
170 if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
171 rdtgrp_match = rdtgrp;
172 break;
175 return rdtgrp_match;
179 * struct pseudo_lock_pm_req - A power management QoS request list entry
180 * @list: Entry within the @pm_reqs list for a pseudo-locked region
181 * @req: PM QoS request
183 struct pseudo_lock_pm_req {
184 struct list_head list;
185 struct dev_pm_qos_request req;
188 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
190 struct pseudo_lock_pm_req *pm_req, *next;
192 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
193 dev_pm_qos_remove_request(&pm_req->req);
194 list_del(&pm_req->list);
195 kfree(pm_req);
200 * pseudo_lock_cstates_constrain - Restrict cores from entering C6
201 * @plr: Pseudo-locked region
203 * To prevent the cache from being affected by power management entering
204 * C6 has to be avoided. This is accomplished by requesting a latency
205 * requirement lower than lowest C6 exit latency of all supported
206 * platforms as found in the cpuidle state tables in the intel_idle driver.
207 * At this time it is possible to do so with a single latency requirement
208 * for all supported platforms.
210 * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
211 * the ACPI latencies need to be considered while keeping in mind that C2
212 * may be set to map to deeper sleep states. In this case the latency
213 * requirement needs to prevent entering C2 also.
215 * Return: 0 on success, <0 on failure
217 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
219 struct pseudo_lock_pm_req *pm_req;
220 int cpu;
221 int ret;
223 for_each_cpu(cpu, &plr->d->hdr.cpu_mask) {
224 pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
225 if (!pm_req) {
226 rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
227 ret = -ENOMEM;
228 goto out_err;
230 ret = dev_pm_qos_add_request(get_cpu_device(cpu),
231 &pm_req->req,
232 DEV_PM_QOS_RESUME_LATENCY,
233 30);
234 if (ret < 0) {
235 rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
236 cpu);
237 kfree(pm_req);
238 ret = -1;
239 goto out_err;
241 list_add(&pm_req->list, &plr->pm_reqs);
244 return 0;
246 out_err:
247 pseudo_lock_cstates_relax(plr);
248 return ret;
252 * pseudo_lock_region_clear - Reset pseudo-lock region data
253 * @plr: pseudo-lock region
255 * All content of the pseudo-locked region is reset - any memory allocated
256 * freed.
258 * Return: void
260 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
262 plr->size = 0;
263 plr->line_size = 0;
264 kfree(plr->kmem);
265 plr->kmem = NULL;
266 plr->s = NULL;
267 if (plr->d)
268 plr->d->plr = NULL;
269 plr->d = NULL;
270 plr->cbm = 0;
271 plr->debugfs_dir = NULL;
275 * pseudo_lock_region_init - Initialize pseudo-lock region information
276 * @plr: pseudo-lock region
278 * Called after user provided a schemata to be pseudo-locked. From the
279 * schemata the &struct pseudo_lock_region is on entry already initialized
280 * with the resource, domain, and capacity bitmask. Here the information
281 * required for pseudo-locking is deduced from this data and &struct
282 * pseudo_lock_region initialized further. This information includes:
283 * - size in bytes of the region to be pseudo-locked
284 * - cache line size to know the stride with which data needs to be accessed
285 * to be pseudo-locked
286 * - a cpu associated with the cache instance on which the pseudo-locking
287 * flow can be executed
289 * Return: 0 on success, <0 on failure. Descriptive error will be written
290 * to last_cmd_status buffer.
292 static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
294 enum resctrl_scope scope = plr->s->res->ctrl_scope;
295 struct cacheinfo *ci;
296 int ret;
298 if (WARN_ON_ONCE(scope != RESCTRL_L2_CACHE && scope != RESCTRL_L3_CACHE))
299 return -ENODEV;
301 /* Pick the first cpu we find that is associated with the cache. */
302 plr->cpu = cpumask_first(&plr->d->hdr.cpu_mask);
304 if (!cpu_online(plr->cpu)) {
305 rdt_last_cmd_printf("CPU %u associated with cache not online\n",
306 plr->cpu);
307 ret = -ENODEV;
308 goto out_region;
311 ci = get_cpu_cacheinfo_level(plr->cpu, scope);
312 if (ci) {
313 plr->line_size = ci->coherency_line_size;
314 plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
315 return 0;
318 ret = -1;
319 rdt_last_cmd_puts("Unable to determine cache line size\n");
320 out_region:
321 pseudo_lock_region_clear(plr);
322 return ret;
326 * pseudo_lock_init - Initialize a pseudo-lock region
327 * @rdtgrp: resource group to which new pseudo-locked region will belong
329 * A pseudo-locked region is associated with a resource group. When this
330 * association is created the pseudo-locked region is initialized. The
331 * details of the pseudo-locked region are not known at this time so only
332 * allocation is done and association established.
334 * Return: 0 on success, <0 on failure
336 static int pseudo_lock_init(struct rdtgroup *rdtgrp)
338 struct pseudo_lock_region *plr;
340 plr = kzalloc(sizeof(*plr), GFP_KERNEL);
341 if (!plr)
342 return -ENOMEM;
344 init_waitqueue_head(&plr->lock_thread_wq);
345 INIT_LIST_HEAD(&plr->pm_reqs);
346 rdtgrp->plr = plr;
347 return 0;
351 * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
352 * @plr: pseudo-lock region
354 * Initialize the details required to set up the pseudo-locked region and
355 * allocate the contiguous memory that will be pseudo-locked to the cache.
357 * Return: 0 on success, <0 on failure. Descriptive error will be written
358 * to last_cmd_status buffer.
360 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
362 int ret;
364 ret = pseudo_lock_region_init(plr);
365 if (ret < 0)
366 return ret;
369 * We do not yet support contiguous regions larger than
370 * KMALLOC_MAX_SIZE.
372 if (plr->size > KMALLOC_MAX_SIZE) {
373 rdt_last_cmd_puts("Requested region exceeds maximum size\n");
374 ret = -E2BIG;
375 goto out_region;
378 plr->kmem = kzalloc(plr->size, GFP_KERNEL);
379 if (!plr->kmem) {
380 rdt_last_cmd_puts("Unable to allocate memory\n");
381 ret = -ENOMEM;
382 goto out_region;
385 ret = 0;
386 goto out;
387 out_region:
388 pseudo_lock_region_clear(plr);
389 out:
390 return ret;
394 * pseudo_lock_free - Free a pseudo-locked region
395 * @rdtgrp: resource group to which pseudo-locked region belonged
397 * The pseudo-locked region's resources have already been released, or not
398 * yet created at this point. Now it can be freed and disassociated from the
399 * resource group.
401 * Return: void
403 static void pseudo_lock_free(struct rdtgroup *rdtgrp)
405 pseudo_lock_region_clear(rdtgrp->plr);
406 kfree(rdtgrp->plr);
407 rdtgrp->plr = NULL;
411 * pseudo_lock_fn - Load kernel memory into cache
412 * @_rdtgrp: resource group to which pseudo-lock region belongs
414 * This is the core pseudo-locking flow.
416 * First we ensure that the kernel memory cannot be found in the cache.
417 * Then, while taking care that there will be as little interference as
418 * possible, the memory to be loaded is accessed while core is running
419 * with class of service set to the bitmask of the pseudo-locked region.
420 * After this is complete no future CAT allocations will be allowed to
421 * overlap with this bitmask.
423 * Local register variables are utilized to ensure that the memory region
424 * to be locked is the only memory access made during the critical locking
425 * loop.
427 * Return: 0. Waiter on waitqueue will be woken on completion.
429 static int pseudo_lock_fn(void *_rdtgrp)
431 struct rdtgroup *rdtgrp = _rdtgrp;
432 struct pseudo_lock_region *plr = rdtgrp->plr;
433 u32 rmid_p, closid_p;
434 unsigned long i;
435 u64 saved_msr;
436 #ifdef CONFIG_KASAN
438 * The registers used for local register variables are also used
439 * when KASAN is active. When KASAN is active we use a regular
440 * variable to ensure we always use a valid pointer, but the cost
441 * is that this variable will enter the cache through evicting the
442 * memory we are trying to lock into the cache. Thus expect lower
443 * pseudo-locking success rate when KASAN is active.
445 unsigned int line_size;
446 unsigned int size;
447 void *mem_r;
448 #else
449 register unsigned int line_size asm("esi");
450 register unsigned int size asm("edi");
451 register void *mem_r asm(_ASM_BX);
452 #endif /* CONFIG_KASAN */
455 * Make sure none of the allocated memory is cached. If it is we
456 * will get a cache hit in below loop from outside of pseudo-locked
457 * region.
458 * wbinvd (as opposed to clflush/clflushopt) is required to
459 * increase likelihood that allocated cache portion will be filled
460 * with associated memory.
462 native_wbinvd();
465 * Always called with interrupts enabled. By disabling interrupts
466 * ensure that we will not be preempted during this critical section.
468 local_irq_disable();
471 * Call wrmsr and rdmsr as directly as possible to avoid tracing
472 * clobbering local register variables or affecting cache accesses.
474 * Disable the hardware prefetcher so that when the end of the memory
475 * being pseudo-locked is reached the hardware will not read beyond
476 * the buffer and evict pseudo-locked memory read earlier from the
477 * cache.
479 saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
480 __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
481 closid_p = this_cpu_read(pqr_state.cur_closid);
482 rmid_p = this_cpu_read(pqr_state.cur_rmid);
483 mem_r = plr->kmem;
484 size = plr->size;
485 line_size = plr->line_size;
487 * Critical section begin: start by writing the closid associated
488 * with the capacity bitmask of the cache region being
489 * pseudo-locked followed by reading of kernel memory to load it
490 * into the cache.
492 __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
494 * Cache was flushed earlier. Now access kernel memory to read it
495 * into cache region associated with just activated plr->closid.
496 * Loop over data twice:
497 * - In first loop the cache region is shared with the page walker
498 * as it populates the paging structure caches (including TLB).
499 * - In the second loop the paging structure caches are used and
500 * cache region is populated with the memory being referenced.
502 for (i = 0; i < size; i += PAGE_SIZE) {
504 * Add a barrier to prevent speculative execution of this
505 * loop reading beyond the end of the buffer.
507 rmb();
508 asm volatile("mov (%0,%1,1), %%eax\n\t"
510 : "r" (mem_r), "r" (i)
511 : "%eax", "memory");
513 for (i = 0; i < size; i += line_size) {
515 * Add a barrier to prevent speculative execution of this
516 * loop reading beyond the end of the buffer.
518 rmb();
519 asm volatile("mov (%0,%1,1), %%eax\n\t"
521 : "r" (mem_r), "r" (i)
522 : "%eax", "memory");
525 * Critical section end: restore closid with capacity bitmask that
526 * does not overlap with pseudo-locked region.
528 __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
530 /* Re-enable the hardware prefetcher(s) */
531 wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
532 local_irq_enable();
534 plr->thread_done = 1;
535 wake_up_interruptible(&plr->lock_thread_wq);
536 return 0;
540 * rdtgroup_monitor_in_progress - Test if monitoring in progress
541 * @rdtgrp: resource group being queried
543 * Return: 1 if monitor groups have been created for this resource
544 * group, 0 otherwise.
546 static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
548 return !list_empty(&rdtgrp->mon.crdtgrp_list);
552 * rdtgroup_locksetup_user_restrict - Restrict user access to group
553 * @rdtgrp: resource group needing access restricted
555 * A resource group used for cache pseudo-locking cannot have cpus or tasks
556 * assigned to it. This is communicated to the user by restricting access
557 * to all the files that can be used to make such changes.
559 * Permissions restored with rdtgroup_locksetup_user_restore()
561 * Return: 0 on success, <0 on failure. If a failure occurs during the
562 * restriction of access an attempt will be made to restore permissions but
563 * the state of the mode of these files will be uncertain when a failure
564 * occurs.
566 static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
568 int ret;
570 ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
571 if (ret)
572 return ret;
574 ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
575 if (ret)
576 goto err_tasks;
578 ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
579 if (ret)
580 goto err_cpus;
582 if (resctrl_arch_mon_capable()) {
583 ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
584 if (ret)
585 goto err_cpus_list;
588 ret = 0;
589 goto out;
591 err_cpus_list:
592 rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
593 err_cpus:
594 rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
595 err_tasks:
596 rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
597 out:
598 return ret;
602 * rdtgroup_locksetup_user_restore - Restore user access to group
603 * @rdtgrp: resource group needing access restored
605 * Restore all file access previously removed using
606 * rdtgroup_locksetup_user_restrict()
608 * Return: 0 on success, <0 on failure. If a failure occurs during the
609 * restoration of access an attempt will be made to restrict permissions
610 * again but the state of the mode of these files will be uncertain when
611 * a failure occurs.
613 static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
615 int ret;
617 ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
618 if (ret)
619 return ret;
621 ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
622 if (ret)
623 goto err_tasks;
625 ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
626 if (ret)
627 goto err_cpus;
629 if (resctrl_arch_mon_capable()) {
630 ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
631 if (ret)
632 goto err_cpus_list;
635 ret = 0;
636 goto out;
638 err_cpus_list:
639 rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
640 err_cpus:
641 rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
642 err_tasks:
643 rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
644 out:
645 return ret;
649 * rdtgroup_locksetup_enter - Resource group enters locksetup mode
650 * @rdtgrp: resource group requested to enter locksetup mode
652 * A resource group enters locksetup mode to reflect that it would be used
653 * to represent a pseudo-locked region and is in the process of being set
654 * up to do so. A resource group used for a pseudo-locked region would
655 * lose the closid associated with it so we cannot allow it to have any
656 * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
657 * future. Monitoring of a pseudo-locked region is not allowed either.
659 * The above and more restrictions on a pseudo-locked region are checked
660 * for and enforced before the resource group enters the locksetup mode.
662 * Returns: 0 if the resource group successfully entered locksetup mode, <0
663 * on failure. On failure the last_cmd_status buffer is updated with text to
664 * communicate details of failure to the user.
666 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
668 int ret;
671 * The default resource group can neither be removed nor lose the
672 * default closid associated with it.
674 if (rdtgrp == &rdtgroup_default) {
675 rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
676 return -EINVAL;
680 * Cache Pseudo-locking not supported when CDP is enabled.
682 * Some things to consider if you would like to enable this
683 * support (using L3 CDP as example):
684 * - When CDP is enabled two separate resources are exposed,
685 * L3DATA and L3CODE, but they are actually on the same cache.
686 * The implication for pseudo-locking is that if a
687 * pseudo-locked region is created on a domain of one
688 * resource (eg. L3CODE), then a pseudo-locked region cannot
689 * be created on that same domain of the other resource
690 * (eg. L3DATA). This is because the creation of a
691 * pseudo-locked region involves a call to wbinvd that will
692 * affect all cache allocations on particular domain.
693 * - Considering the previous, it may be possible to only
694 * expose one of the CDP resources to pseudo-locking and
695 * hide the other. For example, we could consider to only
696 * expose L3DATA and since the L3 cache is unified it is
697 * still possible to place instructions there are execute it.
698 * - If only one region is exposed to pseudo-locking we should
699 * still keep in mind that availability of a portion of cache
700 * for pseudo-locking should take into account both resources.
701 * Similarly, if a pseudo-locked region is created in one
702 * resource, the portion of cache used by it should be made
703 * unavailable to all future allocations from both resources.
705 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) ||
706 resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) {
707 rdt_last_cmd_puts("CDP enabled\n");
708 return -EINVAL;
712 * Not knowing the bits to disable prefetching implies that this
713 * platform does not support Cache Pseudo-Locking.
715 prefetch_disable_bits = get_prefetch_disable_bits();
716 if (prefetch_disable_bits == 0) {
717 rdt_last_cmd_puts("Pseudo-locking not supported\n");
718 return -EINVAL;
721 if (rdtgroup_monitor_in_progress(rdtgrp)) {
722 rdt_last_cmd_puts("Monitoring in progress\n");
723 return -EINVAL;
726 if (rdtgroup_tasks_assigned(rdtgrp)) {
727 rdt_last_cmd_puts("Tasks assigned to resource group\n");
728 return -EINVAL;
731 if (!cpumask_empty(&rdtgrp->cpu_mask)) {
732 rdt_last_cmd_puts("CPUs assigned to resource group\n");
733 return -EINVAL;
736 if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
737 rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
738 return -EIO;
741 ret = pseudo_lock_init(rdtgrp);
742 if (ret) {
743 rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
744 goto out_release;
748 * If this system is capable of monitoring a rmid would have been
749 * allocated when the control group was created. This is not needed
750 * anymore when this group would be used for pseudo-locking. This
751 * is safe to call on platforms not capable of monitoring.
753 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
755 ret = 0;
756 goto out;
758 out_release:
759 rdtgroup_locksetup_user_restore(rdtgrp);
760 out:
761 return ret;
765 * rdtgroup_locksetup_exit - resource group exist locksetup mode
766 * @rdtgrp: resource group
768 * When a resource group exits locksetup mode the earlier restrictions are
769 * lifted.
771 * Return: 0 on success, <0 on failure
773 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
775 int ret;
777 if (resctrl_arch_mon_capable()) {
778 ret = alloc_rmid(rdtgrp->closid);
779 if (ret < 0) {
780 rdt_last_cmd_puts("Out of RMIDs\n");
781 return ret;
783 rdtgrp->mon.rmid = ret;
786 ret = rdtgroup_locksetup_user_restore(rdtgrp);
787 if (ret) {
788 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
789 return ret;
792 pseudo_lock_free(rdtgrp);
793 return 0;
797 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
798 * @d: RDT domain
799 * @cbm: CBM to test
801 * @d represents a cache instance and @cbm a capacity bitmask that is
802 * considered for it. Determine if @cbm overlaps with any existing
803 * pseudo-locked region on @d.
805 * @cbm is unsigned long, even if only 32 bits are used, to make the
806 * bitmap functions work correctly.
808 * Return: true if @cbm overlaps with pseudo-locked region on @d, false
809 * otherwise.
811 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm)
813 unsigned int cbm_len;
814 unsigned long cbm_b;
816 if (d->plr) {
817 cbm_len = d->plr->s->res->cache.cbm_len;
818 cbm_b = d->plr->cbm;
819 if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
820 return true;
822 return false;
826 * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
827 * @d: RDT domain under test
829 * The setup of a pseudo-locked region affects all cache instances within
830 * the hierarchy of the region. It is thus essential to know if any
831 * pseudo-locked regions exist within a cache hierarchy to prevent any
832 * attempts to create new pseudo-locked regions in the same hierarchy.
834 * Return: true if a pseudo-locked region exists in the hierarchy of @d or
835 * if it is not possible to test due to memory allocation issue,
836 * false otherwise.
838 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
840 struct rdt_ctrl_domain *d_i;
841 cpumask_var_t cpu_with_psl;
842 struct rdt_resource *r;
843 bool ret = false;
845 /* Walking r->domains, ensure it can't race with cpuhp */
846 lockdep_assert_cpus_held();
848 if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
849 return true;
852 * First determine which cpus have pseudo-locked regions
853 * associated with them.
855 for_each_alloc_capable_rdt_resource(r) {
856 list_for_each_entry(d_i, &r->ctrl_domains, hdr.list) {
857 if (d_i->plr)
858 cpumask_or(cpu_with_psl, cpu_with_psl,
859 &d_i->hdr.cpu_mask);
864 * Next test if new pseudo-locked region would intersect with
865 * existing region.
867 if (cpumask_intersects(&d->hdr.cpu_mask, cpu_with_psl))
868 ret = true;
870 free_cpumask_var(cpu_with_psl);
871 return ret;
875 * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
876 * @_plr: pseudo-lock region to measure
878 * There is no deterministic way to test if a memory region is cached. One
879 * way is to measure how long it takes to read the memory, the speed of
880 * access is a good way to learn how close to the cpu the data was. Even
881 * more, if the prefetcher is disabled and the memory is read at a stride
882 * of half the cache line, then a cache miss will be easy to spot since the
883 * read of the first half would be significantly slower than the read of
884 * the second half.
886 * Return: 0. Waiter on waitqueue will be woken on completion.
888 static int measure_cycles_lat_fn(void *_plr)
890 struct pseudo_lock_region *plr = _plr;
891 u32 saved_low, saved_high;
892 unsigned long i;
893 u64 start, end;
894 void *mem_r;
896 local_irq_disable();
898 * Disable hardware prefetchers.
900 rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
901 wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
902 mem_r = READ_ONCE(plr->kmem);
904 * Dummy execute of the time measurement to load the needed
905 * instructions into the L1 instruction cache.
907 start = rdtsc_ordered();
908 for (i = 0; i < plr->size; i += 32) {
909 start = rdtsc_ordered();
910 asm volatile("mov (%0,%1,1), %%eax\n\t"
912 : "r" (mem_r), "r" (i)
913 : "%eax", "memory");
914 end = rdtsc_ordered();
915 trace_pseudo_lock_mem_latency((u32)(end - start));
917 wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
918 local_irq_enable();
919 plr->thread_done = 1;
920 wake_up_interruptible(&plr->lock_thread_wq);
921 return 0;
925 * Create a perf_event_attr for the hit and miss perf events that will
926 * be used during the performance measurement. A perf_event maintains
927 * a pointer to its perf_event_attr so a unique attribute structure is
928 * created for each perf_event.
930 * The actual configuration of the event is set right before use in order
931 * to use the X86_CONFIG macro.
933 static struct perf_event_attr perf_miss_attr = {
934 .type = PERF_TYPE_RAW,
935 .size = sizeof(struct perf_event_attr),
936 .pinned = 1,
937 .disabled = 0,
938 .exclude_user = 1,
941 static struct perf_event_attr perf_hit_attr = {
942 .type = PERF_TYPE_RAW,
943 .size = sizeof(struct perf_event_attr),
944 .pinned = 1,
945 .disabled = 0,
946 .exclude_user = 1,
949 struct residency_counts {
950 u64 miss_before, hits_before;
951 u64 miss_after, hits_after;
954 static int measure_residency_fn(struct perf_event_attr *miss_attr,
955 struct perf_event_attr *hit_attr,
956 struct pseudo_lock_region *plr,
957 struct residency_counts *counts)
959 u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
960 struct perf_event *miss_event, *hit_event;
961 int hit_pmcnum, miss_pmcnum;
962 u32 saved_low, saved_high;
963 unsigned int line_size;
964 unsigned int size;
965 unsigned long i;
966 void *mem_r;
967 u64 tmp;
969 miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu,
970 NULL, NULL, NULL);
971 if (IS_ERR(miss_event))
972 goto out;
974 hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu,
975 NULL, NULL, NULL);
976 if (IS_ERR(hit_event))
977 goto out_miss;
979 local_irq_disable();
981 * Check any possible error state of events used by performing
982 * one local read.
984 if (perf_event_read_local(miss_event, &tmp, NULL, NULL)) {
985 local_irq_enable();
986 goto out_hit;
988 if (perf_event_read_local(hit_event, &tmp, NULL, NULL)) {
989 local_irq_enable();
990 goto out_hit;
994 * Disable hardware prefetchers.
996 rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
997 wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
999 /* Initialize rest of local variables */
1001 * Performance event has been validated right before this with
1002 * interrupts disabled - it is thus safe to read the counter index.
1004 miss_pmcnum = x86_perf_rdpmc_index(miss_event);
1005 hit_pmcnum = x86_perf_rdpmc_index(hit_event);
1006 line_size = READ_ONCE(plr->line_size);
1007 mem_r = READ_ONCE(plr->kmem);
1008 size = READ_ONCE(plr->size);
1011 * Read counter variables twice - first to load the instructions
1012 * used in L1 cache, second to capture accurate value that does not
1013 * include cache misses incurred because of instruction loads.
1015 rdpmcl(hit_pmcnum, hits_before);
1016 rdpmcl(miss_pmcnum, miss_before);
1018 * From SDM: Performing back-to-back fast reads are not guaranteed
1019 * to be monotonic.
1020 * Use LFENCE to ensure all previous instructions are retired
1021 * before proceeding.
1023 rmb();
1024 rdpmcl(hit_pmcnum, hits_before);
1025 rdpmcl(miss_pmcnum, miss_before);
1027 * Use LFENCE to ensure all previous instructions are retired
1028 * before proceeding.
1030 rmb();
1031 for (i = 0; i < size; i += line_size) {
1033 * Add a barrier to prevent speculative execution of this
1034 * loop reading beyond the end of the buffer.
1036 rmb();
1037 asm volatile("mov (%0,%1,1), %%eax\n\t"
1039 : "r" (mem_r), "r" (i)
1040 : "%eax", "memory");
1043 * Use LFENCE to ensure all previous instructions are retired
1044 * before proceeding.
1046 rmb();
1047 rdpmcl(hit_pmcnum, hits_after);
1048 rdpmcl(miss_pmcnum, miss_after);
1050 * Use LFENCE to ensure all previous instructions are retired
1051 * before proceeding.
1053 rmb();
1054 /* Re-enable hardware prefetchers */
1055 wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
1056 local_irq_enable();
1057 out_hit:
1058 perf_event_release_kernel(hit_event);
1059 out_miss:
1060 perf_event_release_kernel(miss_event);
1061 out:
1063 * All counts will be zero on failure.
1065 counts->miss_before = miss_before;
1066 counts->hits_before = hits_before;
1067 counts->miss_after = miss_after;
1068 counts->hits_after = hits_after;
1069 return 0;
1072 static int measure_l2_residency(void *_plr)
1074 struct pseudo_lock_region *plr = _plr;
1075 struct residency_counts counts = {0};
1078 * Non-architectural event for the Goldmont Microarchitecture
1079 * from Intel x86 Architecture Software Developer Manual (SDM):
1080 * MEM_LOAD_UOPS_RETIRED D1H (event number)
1081 * Umask values:
1082 * L2_HIT 02H
1083 * L2_MISS 10H
1085 switch (boot_cpu_data.x86_vfm) {
1086 case INTEL_ATOM_GOLDMONT:
1087 case INTEL_ATOM_GOLDMONT_PLUS:
1088 perf_miss_attr.config = X86_CONFIG(.event = 0xd1,
1089 .umask = 0x10);
1090 perf_hit_attr.config = X86_CONFIG(.event = 0xd1,
1091 .umask = 0x2);
1092 break;
1093 default:
1094 goto out;
1097 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
1099 * If a failure prevented the measurements from succeeding
1100 * tracepoints will still be written and all counts will be zero.
1102 trace_pseudo_lock_l2(counts.hits_after - counts.hits_before,
1103 counts.miss_after - counts.miss_before);
1104 out:
1105 plr->thread_done = 1;
1106 wake_up_interruptible(&plr->lock_thread_wq);
1107 return 0;
1110 static int measure_l3_residency(void *_plr)
1112 struct pseudo_lock_region *plr = _plr;
1113 struct residency_counts counts = {0};
1116 * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
1117 * has two "no fix" errata associated with it: BDM35 and BDM100. On
1118 * this platform the following events are used instead:
1119 * LONGEST_LAT_CACHE 2EH (Documented in SDM)
1120 * REFERENCE 4FH
1121 * MISS 41H
1124 switch (boot_cpu_data.x86_vfm) {
1125 case INTEL_BROADWELL_X:
1126 /* On BDW the hit event counts references, not hits */
1127 perf_hit_attr.config = X86_CONFIG(.event = 0x2e,
1128 .umask = 0x4f);
1129 perf_miss_attr.config = X86_CONFIG(.event = 0x2e,
1130 .umask = 0x41);
1131 break;
1132 default:
1133 goto out;
1136 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
1138 * If a failure prevented the measurements from succeeding
1139 * tracepoints will still be written and all counts will be zero.
1142 counts.miss_after -= counts.miss_before;
1143 if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_X) {
1145 * On BDW references and misses are counted, need to adjust.
1146 * Sometimes the "hits" counter is a bit more than the
1147 * references, for example, x references but x + 1 hits.
1148 * To not report invalid hit values in this case we treat
1149 * that as misses equal to references.
1151 /* First compute the number of cache references measured */
1152 counts.hits_after -= counts.hits_before;
1153 /* Next convert references to cache hits */
1154 counts.hits_after -= min(counts.miss_after, counts.hits_after);
1155 } else {
1156 counts.hits_after -= counts.hits_before;
1159 trace_pseudo_lock_l3(counts.hits_after, counts.miss_after);
1160 out:
1161 plr->thread_done = 1;
1162 wake_up_interruptible(&plr->lock_thread_wq);
1163 return 0;
1167 * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
1168 * @rdtgrp: Resource group to which the pseudo-locked region belongs.
1169 * @sel: Selector of which measurement to perform on a pseudo-locked region.
1171 * The measurement of latency to access a pseudo-locked region should be
1172 * done from a cpu that is associated with that pseudo-locked region.
1173 * Determine which cpu is associated with this region and start a thread on
1174 * that cpu to perform the measurement, wait for that thread to complete.
1176 * Return: 0 on success, <0 on failure
1178 static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
1180 struct pseudo_lock_region *plr = rdtgrp->plr;
1181 struct task_struct *thread;
1182 unsigned int cpu;
1183 int ret = -1;
1185 cpus_read_lock();
1186 mutex_lock(&rdtgroup_mutex);
1188 if (rdtgrp->flags & RDT_DELETED) {
1189 ret = -ENODEV;
1190 goto out;
1193 if (!plr->d) {
1194 ret = -ENODEV;
1195 goto out;
1198 plr->thread_done = 0;
1199 cpu = cpumask_first(&plr->d->hdr.cpu_mask);
1200 if (!cpu_online(cpu)) {
1201 ret = -ENODEV;
1202 goto out;
1205 plr->cpu = cpu;
1207 if (sel == 1)
1208 thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
1209 cpu_to_node(cpu),
1210 "pseudo_lock_measure/%u",
1211 cpu);
1212 else if (sel == 2)
1213 thread = kthread_create_on_node(measure_l2_residency, plr,
1214 cpu_to_node(cpu),
1215 "pseudo_lock_measure/%u",
1216 cpu);
1217 else if (sel == 3)
1218 thread = kthread_create_on_node(measure_l3_residency, plr,
1219 cpu_to_node(cpu),
1220 "pseudo_lock_measure/%u",
1221 cpu);
1222 else
1223 goto out;
1225 if (IS_ERR(thread)) {
1226 ret = PTR_ERR(thread);
1227 goto out;
1229 kthread_bind(thread, cpu);
1230 wake_up_process(thread);
1232 ret = wait_event_interruptible(plr->lock_thread_wq,
1233 plr->thread_done == 1);
1234 if (ret < 0)
1235 goto out;
1237 ret = 0;
1239 out:
1240 mutex_unlock(&rdtgroup_mutex);
1241 cpus_read_unlock();
1242 return ret;
1245 static ssize_t pseudo_lock_measure_trigger(struct file *file,
1246 const char __user *user_buf,
1247 size_t count, loff_t *ppos)
1249 struct rdtgroup *rdtgrp = file->private_data;
1250 size_t buf_size;
1251 char buf[32];
1252 int ret;
1253 int sel;
1255 buf_size = min(count, (sizeof(buf) - 1));
1256 if (copy_from_user(buf, user_buf, buf_size))
1257 return -EFAULT;
1259 buf[buf_size] = '\0';
1260 ret = kstrtoint(buf, 10, &sel);
1261 if (ret == 0) {
1262 if (sel != 1 && sel != 2 && sel != 3)
1263 return -EINVAL;
1264 ret = debugfs_file_get(file->f_path.dentry);
1265 if (ret)
1266 return ret;
1267 ret = pseudo_lock_measure_cycles(rdtgrp, sel);
1268 if (ret == 0)
1269 ret = count;
1270 debugfs_file_put(file->f_path.dentry);
1273 return ret;
1276 static const struct file_operations pseudo_measure_fops = {
1277 .write = pseudo_lock_measure_trigger,
1278 .open = simple_open,
1279 .llseek = default_llseek,
1283 * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
1284 * @rdtgrp: resource group to which pseudo-lock region belongs
1286 * Called when a resource group in the pseudo-locksetup mode receives a
1287 * valid schemata that should be pseudo-locked. Since the resource group is
1288 * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
1289 * allocated and initialized with the essential information. If a failure
1290 * occurs the resource group remains in the pseudo-locksetup mode with the
1291 * &struct pseudo_lock_region associated with it, but cleared from all
1292 * information and ready for the user to re-attempt pseudo-locking by
1293 * writing the schemata again.
1295 * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
1296 * on failure. Descriptive error will be written to last_cmd_status buffer.
1298 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
1300 struct pseudo_lock_region *plr = rdtgrp->plr;
1301 struct task_struct *thread;
1302 unsigned int new_minor;
1303 struct device *dev;
1304 int ret;
1306 ret = pseudo_lock_region_alloc(plr);
1307 if (ret < 0)
1308 return ret;
1310 ret = pseudo_lock_cstates_constrain(plr);
1311 if (ret < 0) {
1312 ret = -EINVAL;
1313 goto out_region;
1316 plr->thread_done = 0;
1318 thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp,
1319 cpu_to_node(plr->cpu),
1320 "pseudo_lock/%u", plr->cpu);
1321 if (IS_ERR(thread)) {
1322 ret = PTR_ERR(thread);
1323 rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
1324 goto out_cstates;
1327 kthread_bind(thread, plr->cpu);
1328 wake_up_process(thread);
1330 ret = wait_event_interruptible(plr->lock_thread_wq,
1331 plr->thread_done == 1);
1332 if (ret < 0) {
1334 * If the thread does not get on the CPU for whatever
1335 * reason and the process which sets up the region is
1336 * interrupted then this will leave the thread in runnable
1337 * state and once it gets on the CPU it will dereference
1338 * the cleared, but not freed, plr struct resulting in an
1339 * empty pseudo-locking loop.
1341 rdt_last_cmd_puts("Locking thread interrupted\n");
1342 goto out_cstates;
1345 ret = pseudo_lock_minor_get(&new_minor);
1346 if (ret < 0) {
1347 rdt_last_cmd_puts("Unable to obtain a new minor number\n");
1348 goto out_cstates;
1352 * Unlock access but do not release the reference. The
1353 * pseudo-locked region will still be here on return.
1355 * The mutex has to be released temporarily to avoid a potential
1356 * deadlock with the mm->mmap_lock which is obtained in the
1357 * device_create() and debugfs_create_dir() callpath below as well as
1358 * before the mmap() callback is called.
1360 mutex_unlock(&rdtgroup_mutex);
1362 if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
1363 plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
1364 debugfs_resctrl);
1365 if (!IS_ERR_OR_NULL(plr->debugfs_dir))
1366 debugfs_create_file("pseudo_lock_measure", 0200,
1367 plr->debugfs_dir, rdtgrp,
1368 &pseudo_measure_fops);
1371 dev = device_create(&pseudo_lock_class, NULL,
1372 MKDEV(pseudo_lock_major, new_minor),
1373 rdtgrp, "%s", rdtgrp->kn->name);
1375 mutex_lock(&rdtgroup_mutex);
1377 if (IS_ERR(dev)) {
1378 ret = PTR_ERR(dev);
1379 rdt_last_cmd_printf("Failed to create character device: %d\n",
1380 ret);
1381 goto out_debugfs;
1384 /* We released the mutex - check if group was removed while we did so */
1385 if (rdtgrp->flags & RDT_DELETED) {
1386 ret = -ENODEV;
1387 goto out_device;
1390 plr->minor = new_minor;
1392 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
1393 closid_free(rdtgrp->closid);
1394 rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
1395 rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
1397 ret = 0;
1398 goto out;
1400 out_device:
1401 device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
1402 out_debugfs:
1403 debugfs_remove_recursive(plr->debugfs_dir);
1404 pseudo_lock_minor_release(new_minor);
1405 out_cstates:
1406 pseudo_lock_cstates_relax(plr);
1407 out_region:
1408 pseudo_lock_region_clear(plr);
1409 out:
1410 return ret;
1414 * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
1415 * @rdtgrp: resource group to which the pseudo-locked region belongs
1417 * The removal of a pseudo-locked region can be initiated when the resource
1418 * group is removed from user space via a "rmdir" from userspace or the
1419 * unmount of the resctrl filesystem. On removal the resource group does
1420 * not go back to pseudo-locksetup mode before it is removed, instead it is
1421 * removed directly. There is thus asymmetry with the creation where the
1422 * &struct pseudo_lock_region is removed here while it was not created in
1423 * rdtgroup_pseudo_lock_create().
1425 * Return: void
1427 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
1429 struct pseudo_lock_region *plr = rdtgrp->plr;
1431 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1433 * Default group cannot be a pseudo-locked region so we can
1434 * free closid here.
1436 closid_free(rdtgrp->closid);
1437 goto free;
1440 pseudo_lock_cstates_relax(plr);
1441 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
1442 device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
1443 pseudo_lock_minor_release(plr->minor);
1445 free:
1446 pseudo_lock_free(rdtgrp);
1449 static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
1451 struct rdtgroup *rdtgrp;
1453 mutex_lock(&rdtgroup_mutex);
1455 rdtgrp = region_find_by_minor(iminor(inode));
1456 if (!rdtgrp) {
1457 mutex_unlock(&rdtgroup_mutex);
1458 return -ENODEV;
1461 filp->private_data = rdtgrp;
1462 atomic_inc(&rdtgrp->waitcount);
1463 /* Perform a non-seekable open - llseek is not supported */
1464 filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1466 mutex_unlock(&rdtgroup_mutex);
1468 return 0;
1471 static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
1473 struct rdtgroup *rdtgrp;
1475 mutex_lock(&rdtgroup_mutex);
1476 rdtgrp = filp->private_data;
1477 WARN_ON(!rdtgrp);
1478 if (!rdtgrp) {
1479 mutex_unlock(&rdtgroup_mutex);
1480 return -ENODEV;
1482 filp->private_data = NULL;
1483 atomic_dec(&rdtgrp->waitcount);
1484 mutex_unlock(&rdtgroup_mutex);
1485 return 0;
1488 static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
1490 /* Not supported */
1491 return -EINVAL;
1494 static const struct vm_operations_struct pseudo_mmap_ops = {
1495 .mremap = pseudo_lock_dev_mremap,
1498 static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
1500 unsigned long vsize = vma->vm_end - vma->vm_start;
1501 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1502 struct pseudo_lock_region *plr;
1503 struct rdtgroup *rdtgrp;
1504 unsigned long physical;
1505 unsigned long psize;
1507 mutex_lock(&rdtgroup_mutex);
1509 rdtgrp = filp->private_data;
1510 WARN_ON(!rdtgrp);
1511 if (!rdtgrp) {
1512 mutex_unlock(&rdtgroup_mutex);
1513 return -ENODEV;
1516 plr = rdtgrp->plr;
1518 if (!plr->d) {
1519 mutex_unlock(&rdtgroup_mutex);
1520 return -ENODEV;
1524 * Task is required to run with affinity to the cpus associated
1525 * with the pseudo-locked region. If this is not the case the task
1526 * may be scheduled elsewhere and invalidate entries in the
1527 * pseudo-locked region.
1529 if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) {
1530 mutex_unlock(&rdtgroup_mutex);
1531 return -EINVAL;
1534 physical = __pa(plr->kmem) >> PAGE_SHIFT;
1535 psize = plr->size - off;
1537 if (off > plr->size) {
1538 mutex_unlock(&rdtgroup_mutex);
1539 return -ENOSPC;
1543 * Ensure changes are carried directly to the memory being mapped,
1544 * do not allow copy-on-write mapping.
1546 if (!(vma->vm_flags & VM_SHARED)) {
1547 mutex_unlock(&rdtgroup_mutex);
1548 return -EINVAL;
1551 if (vsize > psize) {
1552 mutex_unlock(&rdtgroup_mutex);
1553 return -ENOSPC;
1556 memset(plr->kmem + off, 0, vsize);
1558 if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
1559 vsize, vma->vm_page_prot)) {
1560 mutex_unlock(&rdtgroup_mutex);
1561 return -EAGAIN;
1563 vma->vm_ops = &pseudo_mmap_ops;
1564 mutex_unlock(&rdtgroup_mutex);
1565 return 0;
1568 static const struct file_operations pseudo_lock_dev_fops = {
1569 .owner = THIS_MODULE,
1570 .read = NULL,
1571 .write = NULL,
1572 .open = pseudo_lock_dev_open,
1573 .release = pseudo_lock_dev_release,
1574 .mmap = pseudo_lock_dev_mmap,
1577 int rdt_pseudo_lock_init(void)
1579 int ret;
1581 ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
1582 if (ret < 0)
1583 return ret;
1585 pseudo_lock_major = ret;
1587 ret = class_register(&pseudo_lock_class);
1588 if (ret) {
1589 unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1590 return ret;
1593 return 0;
1596 void rdt_pseudo_lock_release(void)
1598 class_unregister(&pseudo_lock_class);
1599 unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1600 pseudo_lock_major = 0;