drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / x86 / kernel / cpu / resctrl / ctrlmondata.c
blob50fa1fe9a073f5832e687d01a88c1efcc91e22c9
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
6 * Copyright (C) 2016 Intel Corporation
8 * Authors:
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Tony Luck <tony.luck@intel.com>
12 * More information about RDT be found in the Intel (R) x86 Architecture
13 * Software Developer Manual June 2016, volume 3, section 17.17.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/cpu.h>
19 #include <linux/kernfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/tick.h>
24 #include "internal.h"
27 * Check whether MBA bandwidth percentage value is correct. The value is
28 * checked against the minimum and max bandwidth values specified by the
29 * hardware. The allocated bandwidth percentage is rounded to the next
30 * control step available on the hardware.
32 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
34 unsigned long bw;
35 int ret;
38 * Only linear delay values is supported for current Intel SKUs.
40 if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
41 rdt_last_cmd_puts("No support for non-linear MB domains\n");
42 return false;
45 ret = kstrtoul(buf, 10, &bw);
46 if (ret) {
47 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
48 return false;
51 if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
52 !is_mba_sc(r)) {
53 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
54 r->membw.min_bw, r->default_ctrl);
55 return false;
58 *data = roundup(bw, (unsigned long)r->membw.bw_gran);
59 return true;
62 int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
63 struct rdt_ctrl_domain *d)
65 struct resctrl_staged_config *cfg;
66 u32 closid = data->rdtgrp->closid;
67 struct rdt_resource *r = s->res;
68 unsigned long bw_val;
70 cfg = &d->staged_config[s->conf_type];
71 if (cfg->have_new_ctrl) {
72 rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
73 return -EINVAL;
76 if (!bw_validate(data->buf, &bw_val, r))
77 return -EINVAL;
79 if (is_mba_sc(r)) {
80 d->mbps_val[closid] = bw_val;
81 return 0;
84 cfg->new_ctrl = bw_val;
85 cfg->have_new_ctrl = true;
87 return 0;
91 * Check whether a cache bit mask is valid.
92 * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID:
93 * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1
94 * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1
96 * Haswell does not support a non-contiguous 1s value and additionally
97 * requires at least two bits set.
98 * AMD allows non-contiguous bitmasks.
100 static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
102 unsigned long first_bit, zero_bit, val;
103 unsigned int cbm_len = r->cache.cbm_len;
104 int ret;
106 ret = kstrtoul(buf, 16, &val);
107 if (ret) {
108 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
109 return false;
112 if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) {
113 rdt_last_cmd_puts("Mask out of range\n");
114 return false;
117 first_bit = find_first_bit(&val, cbm_len);
118 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
120 /* Are non-contiguous bitmasks allowed? */
121 if (!r->cache.arch_has_sparse_bitmasks &&
122 (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
123 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
124 return false;
127 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
128 rdt_last_cmd_printf("Need at least %d bits in the mask\n",
129 r->cache.min_cbm_bits);
130 return false;
133 *data = val;
134 return true;
138 * Read one cache bit mask (hex). Check that it is valid for the current
139 * resource type.
141 int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
142 struct rdt_ctrl_domain *d)
144 struct rdtgroup *rdtgrp = data->rdtgrp;
145 struct resctrl_staged_config *cfg;
146 struct rdt_resource *r = s->res;
147 u32 cbm_val;
149 cfg = &d->staged_config[s->conf_type];
150 if (cfg->have_new_ctrl) {
151 rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
152 return -EINVAL;
156 * Cannot set up more than one pseudo-locked region in a cache
157 * hierarchy.
159 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
160 rdtgroup_pseudo_locked_in_hierarchy(d)) {
161 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
162 return -EINVAL;
165 if (!cbm_validate(data->buf, &cbm_val, r))
166 return -EINVAL;
168 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
169 rdtgrp->mode == RDT_MODE_SHAREABLE) &&
170 rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
171 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
172 return -EINVAL;
176 * The CBM may not overlap with the CBM of another closid if
177 * either is exclusive.
179 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
180 rdt_last_cmd_puts("Overlaps with exclusive group\n");
181 return -EINVAL;
184 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
185 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
186 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
187 rdt_last_cmd_puts("Overlaps with other group\n");
188 return -EINVAL;
192 cfg->new_ctrl = cbm_val;
193 cfg->have_new_ctrl = true;
195 return 0;
199 * For each domain in this resource we expect to find a series of:
200 * id=mask
201 * separated by ";". The "id" is in decimal, and must match one of
202 * the "id"s for this resource.
204 static int parse_line(char *line, struct resctrl_schema *s,
205 struct rdtgroup *rdtgrp)
207 enum resctrl_conf_type t = s->conf_type;
208 struct resctrl_staged_config *cfg;
209 struct rdt_resource *r = s->res;
210 struct rdt_parse_data data;
211 struct rdt_ctrl_domain *d;
212 char *dom = NULL, *id;
213 unsigned long dom_id;
215 /* Walking r->domains, ensure it can't race with cpuhp */
216 lockdep_assert_cpus_held();
218 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
219 (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) {
220 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
221 return -EINVAL;
224 next:
225 if (!line || line[0] == '\0')
226 return 0;
227 dom = strsep(&line, ";");
228 id = strsep(&dom, "=");
229 if (!dom || kstrtoul(id, 10, &dom_id)) {
230 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
231 return -EINVAL;
233 dom = strim(dom);
234 list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
235 if (d->hdr.id == dom_id) {
236 data.buf = dom;
237 data.rdtgrp = rdtgrp;
238 if (r->parse_ctrlval(&data, s, d))
239 return -EINVAL;
240 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
241 cfg = &d->staged_config[t];
243 * In pseudo-locking setup mode and just
244 * parsed a valid CBM that should be
245 * pseudo-locked. Only one locked region per
246 * resource group and domain so just do
247 * the required initialization for single
248 * region and return.
250 rdtgrp->plr->s = s;
251 rdtgrp->plr->d = d;
252 rdtgrp->plr->cbm = cfg->new_ctrl;
253 d->plr = rdtgrp->plr;
254 return 0;
256 goto next;
259 return -EINVAL;
262 static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
264 switch (type) {
265 default:
266 case CDP_NONE:
267 return closid;
268 case CDP_CODE:
269 return closid * 2 + 1;
270 case CDP_DATA:
271 return closid * 2;
275 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
276 u32 closid, enum resctrl_conf_type t, u32 cfg_val)
278 struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(d);
279 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
280 u32 idx = get_config_index(closid, t);
281 struct msr_param msr_param;
283 if (!cpumask_test_cpu(smp_processor_id(), &d->hdr.cpu_mask))
284 return -EINVAL;
286 hw_dom->ctrl_val[idx] = cfg_val;
288 msr_param.res = r;
289 msr_param.dom = d;
290 msr_param.low = idx;
291 msr_param.high = idx + 1;
292 hw_res->msr_update(&msr_param);
294 return 0;
297 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
299 struct resctrl_staged_config *cfg;
300 struct rdt_hw_ctrl_domain *hw_dom;
301 struct msr_param msr_param;
302 struct rdt_ctrl_domain *d;
303 enum resctrl_conf_type t;
304 u32 idx;
306 /* Walking r->domains, ensure it can't race with cpuhp */
307 lockdep_assert_cpus_held();
309 list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
310 hw_dom = resctrl_to_arch_ctrl_dom(d);
311 msr_param.res = NULL;
312 for (t = 0; t < CDP_NUM_TYPES; t++) {
313 cfg = &hw_dom->d_resctrl.staged_config[t];
314 if (!cfg->have_new_ctrl)
315 continue;
317 idx = get_config_index(closid, t);
318 if (cfg->new_ctrl == hw_dom->ctrl_val[idx])
319 continue;
320 hw_dom->ctrl_val[idx] = cfg->new_ctrl;
322 if (!msr_param.res) {
323 msr_param.low = idx;
324 msr_param.high = msr_param.low + 1;
325 msr_param.res = r;
326 msr_param.dom = d;
327 } else {
328 msr_param.low = min(msr_param.low, idx);
329 msr_param.high = max(msr_param.high, idx + 1);
332 if (msr_param.res)
333 smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1);
336 return 0;
339 static int rdtgroup_parse_resource(char *resname, char *tok,
340 struct rdtgroup *rdtgrp)
342 struct resctrl_schema *s;
344 list_for_each_entry(s, &resctrl_schema_all, list) {
345 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
346 return parse_line(tok, s, rdtgrp);
348 rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
349 return -EINVAL;
352 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
353 char *buf, size_t nbytes, loff_t off)
355 struct resctrl_schema *s;
356 struct rdtgroup *rdtgrp;
357 struct rdt_resource *r;
358 char *tok, *resname;
359 int ret = 0;
361 /* Valid input requires a trailing newline */
362 if (nbytes == 0 || buf[nbytes - 1] != '\n')
363 return -EINVAL;
364 buf[nbytes - 1] = '\0';
366 rdtgrp = rdtgroup_kn_lock_live(of->kn);
367 if (!rdtgrp) {
368 rdtgroup_kn_unlock(of->kn);
369 return -ENOENT;
371 rdt_last_cmd_clear();
374 * No changes to pseudo-locked region allowed. It has to be removed
375 * and re-created instead.
377 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
378 ret = -EINVAL;
379 rdt_last_cmd_puts("Resource group is pseudo-locked\n");
380 goto out;
383 rdt_staged_configs_clear();
385 while ((tok = strsep(&buf, "\n")) != NULL) {
386 resname = strim(strsep(&tok, ":"));
387 if (!tok) {
388 rdt_last_cmd_puts("Missing ':'\n");
389 ret = -EINVAL;
390 goto out;
392 if (tok[0] == '\0') {
393 rdt_last_cmd_printf("Missing '%s' value\n", resname);
394 ret = -EINVAL;
395 goto out;
397 ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
398 if (ret)
399 goto out;
402 list_for_each_entry(s, &resctrl_schema_all, list) {
403 r = s->res;
406 * Writes to mba_sc resources update the software controller,
407 * not the control MSR.
409 if (is_mba_sc(r))
410 continue;
412 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
413 if (ret)
414 goto out;
417 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
419 * If pseudo-locking fails we keep the resource group in
420 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
421 * active and updated for just the domain the pseudo-locked
422 * region was requested for.
424 ret = rdtgroup_pseudo_lock_create(rdtgrp);
427 out:
428 rdt_staged_configs_clear();
429 rdtgroup_kn_unlock(of->kn);
430 return ret ?: nbytes;
433 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
434 u32 closid, enum resctrl_conf_type type)
436 struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(d);
437 u32 idx = get_config_index(closid, type);
439 return hw_dom->ctrl_val[idx];
442 static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
444 struct rdt_resource *r = schema->res;
445 struct rdt_ctrl_domain *dom;
446 bool sep = false;
447 u32 ctrl_val;
449 /* Walking r->domains, ensure it can't race with cpuhp */
450 lockdep_assert_cpus_held();
452 seq_printf(s, "%*s:", max_name_width, schema->name);
453 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
454 if (sep)
455 seq_puts(s, ";");
457 if (is_mba_sc(r))
458 ctrl_val = dom->mbps_val[closid];
459 else
460 ctrl_val = resctrl_arch_get_config(r, dom, closid,
461 schema->conf_type);
463 seq_printf(s, r->format_str, dom->hdr.id, max_data_width,
464 ctrl_val);
465 sep = true;
467 seq_puts(s, "\n");
470 int rdtgroup_schemata_show(struct kernfs_open_file *of,
471 struct seq_file *s, void *v)
473 struct resctrl_schema *schema;
474 struct rdtgroup *rdtgrp;
475 int ret = 0;
476 u32 closid;
478 rdtgrp = rdtgroup_kn_lock_live(of->kn);
479 if (rdtgrp) {
480 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
481 list_for_each_entry(schema, &resctrl_schema_all, list) {
482 seq_printf(s, "%s:uninitialized\n", schema->name);
484 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
485 if (!rdtgrp->plr->d) {
486 rdt_last_cmd_clear();
487 rdt_last_cmd_puts("Cache domain offline\n");
488 ret = -ENODEV;
489 } else {
490 seq_printf(s, "%s:%d=%x\n",
491 rdtgrp->plr->s->res->name,
492 rdtgrp->plr->d->hdr.id,
493 rdtgrp->plr->cbm);
495 } else {
496 closid = rdtgrp->closid;
497 list_for_each_entry(schema, &resctrl_schema_all, list) {
498 if (closid < schema->num_closid)
499 show_doms(s, schema, closid);
502 } else {
503 ret = -ENOENT;
505 rdtgroup_kn_unlock(of->kn);
506 return ret;
509 static int smp_mon_event_count(void *arg)
511 mon_event_count(arg);
513 return 0;
516 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
517 struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
518 cpumask_t *cpumask, int evtid, int first)
520 int cpu;
522 /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */
523 lockdep_assert_cpus_held();
526 * Setup the parameters to pass to mon_event_count() to read the data.
528 rr->rgrp = rdtgrp;
529 rr->evtid = evtid;
530 rr->r = r;
531 rr->d = d;
532 rr->first = first;
533 rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
534 if (IS_ERR(rr->arch_mon_ctx)) {
535 rr->err = -EINVAL;
536 return;
539 cpu = cpumask_any_housekeeping(cpumask, RESCTRL_PICK_ANY_CPU);
542 * cpumask_any_housekeeping() prefers housekeeping CPUs, but
543 * are all the CPUs nohz_full? If yes, pick a CPU to IPI.
544 * MPAM's resctrl_arch_rmid_read() is unable to read the
545 * counters on some platforms if its called in IRQ context.
547 if (tick_nohz_full_cpu(cpu))
548 smp_call_function_any(cpumask, mon_event_count, rr, 1);
549 else
550 smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
552 resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
555 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
557 struct kernfs_open_file *of = m->private;
558 struct rdt_domain_hdr *hdr;
559 struct rmid_read rr = {0};
560 struct rdt_mon_domain *d;
561 u32 resid, evtid, domid;
562 struct rdtgroup *rdtgrp;
563 struct rdt_resource *r;
564 union mon_data_bits md;
565 int ret = 0;
567 rdtgrp = rdtgroup_kn_lock_live(of->kn);
568 if (!rdtgrp) {
569 ret = -ENOENT;
570 goto out;
573 md.priv = of->kn->priv;
574 resid = md.u.rid;
575 domid = md.u.domid;
576 evtid = md.u.evtid;
577 r = &rdt_resources_all[resid].r_resctrl;
579 if (md.u.sum) {
581 * This file requires summing across all domains that share
582 * the L3 cache id that was provided in the "domid" field of the
583 * mon_data_bits union. Search all domains in the resource for
584 * one that matches this cache id.
586 list_for_each_entry(d, &r->mon_domains, hdr.list) {
587 if (d->ci->id == domid) {
588 rr.ci = d->ci;
589 mon_event_read(&rr, r, NULL, rdtgrp,
590 &d->ci->shared_cpu_map, evtid, false);
591 goto checkresult;
594 ret = -ENOENT;
595 goto out;
596 } else {
598 * This file provides data from a single domain. Search
599 * the resource to find the domain with "domid".
601 hdr = rdt_find_domain(&r->mon_domains, domid, NULL);
602 if (!hdr || WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN)) {
603 ret = -ENOENT;
604 goto out;
606 d = container_of(hdr, struct rdt_mon_domain, hdr);
607 mon_event_read(&rr, r, d, rdtgrp, &d->hdr.cpu_mask, evtid, false);
610 checkresult:
612 if (rr.err == -EIO)
613 seq_puts(m, "Error\n");
614 else if (rr.err == -EINVAL)
615 seq_puts(m, "Unavailable\n");
616 else
617 seq_printf(m, "%llu\n", rr.val);
619 out:
620 rdtgroup_kn_unlock(of->kn);
621 return ret;