1 // SPDX-License-Identifier: GPL-2.0-only
3 * The hwprobe interface, for allowing userspace to probe to see which features
4 * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for
7 #include <linux/syscalls.h>
8 #include <asm/cacheflush.h>
9 #include <asm/cpufeature.h>
10 #include <asm/hwprobe.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h>
14 #include <asm/switch_to.h>
15 #include <asm/uaccess.h>
16 #include <asm/unistd.h>
17 #include <asm/vector.h>
18 #include <vdso/vsyscall.h>
21 static void hwprobe_arch_id(struct riscv_hwprobe
*pair
,
22 const struct cpumask
*cpus
)
28 for_each_cpu(cpu
, cpus
) {
32 case RISCV_HWPROBE_KEY_MVENDORID
:
33 cpu_id
= riscv_cached_mvendorid(cpu
);
35 case RISCV_HWPROBE_KEY_MIMPID
:
36 cpu_id
= riscv_cached_mimpid(cpu
);
38 case RISCV_HWPROBE_KEY_MARCHID
:
39 cpu_id
= riscv_cached_marchid(cpu
);
49 * If there's a mismatch for the given set, return -1 in the
61 static void hwprobe_isa_ext0(struct riscv_hwprobe
*pair
,
62 const struct cpumask
*cpus
)
69 pair
->value
|= RISCV_HWPROBE_IMA_FD
;
71 if (riscv_isa_extension_available(NULL
, c
))
72 pair
->value
|= RISCV_HWPROBE_IMA_C
;
74 if (has_vector() && riscv_isa_extension_available(NULL
, v
))
75 pair
->value
|= RISCV_HWPROBE_IMA_V
;
78 * Loop through and record extensions that 1) anyone has, and 2) anyone
81 for_each_cpu(cpu
, cpus
) {
82 struct riscv_isainfo
*isainfo
= &hart_isa
[cpu
];
84 #define EXT_KEY(ext) \
86 if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
87 pair->value |= RISCV_HWPROBE_EXT_##ext; \
89 missing |= RISCV_HWPROBE_EXT_##ext; \
93 * Only use EXT_KEY() for extensions which can be exposed to userspace,
94 * regardless of the kernel's configuration, as no other checks, besides
95 * presence in the hart_isa bitmap, are made.
112 EXT_KEY(ZIHINTPAUSE
);
123 * All the following extensions must depend on the kernel
154 if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM
))
159 /* Now turn off reporting features if any CPU is missing it. */
160 pair
->value
&= ~missing
;
163 static bool hwprobe_ext0_has(const struct cpumask
*cpus
, unsigned long ext
)
165 struct riscv_hwprobe pair
;
167 hwprobe_isa_ext0(&pair
, cpus
);
168 return (pair
.value
& ext
);
171 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
172 static u64
hwprobe_misaligned(const struct cpumask
*cpus
)
177 for_each_cpu(cpu
, cpus
) {
178 int this_perf
= per_cpu(misaligned_access_speed
, cpu
);
183 if (perf
!= this_perf
) {
184 perf
= RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN
;
190 return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN
;
195 static u64
hwprobe_misaligned(const struct cpumask
*cpus
)
197 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS
))
198 return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST
;
200 if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS
) && unaligned_ctl_available())
201 return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED
;
203 return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW
;
207 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
208 static u64
hwprobe_vec_misaligned(const struct cpumask
*cpus
)
213 /* Return if supported or not even if speed wasn't probed */
214 for_each_cpu(cpu
, cpus
) {
215 int this_perf
= per_cpu(vector_misaligned_access
, cpu
);
220 if (perf
!= this_perf
) {
221 perf
= RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN
;
227 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN
;
232 static u64
hwprobe_vec_misaligned(const struct cpumask
*cpus
)
234 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
))
235 return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST
;
237 if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS
))
238 return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW
;
240 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN
;
244 static void hwprobe_one_pair(struct riscv_hwprobe
*pair
,
245 const struct cpumask
*cpus
)
248 case RISCV_HWPROBE_KEY_MVENDORID
:
249 case RISCV_HWPROBE_KEY_MARCHID
:
250 case RISCV_HWPROBE_KEY_MIMPID
:
251 hwprobe_arch_id(pair
, cpus
);
254 * The kernel already assumes that the base single-letter ISA
255 * extensions are supported on all harts, and only supports the
256 * IMA base, so just cheat a bit here and tell that to
259 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR
:
260 pair
->value
= RISCV_HWPROBE_BASE_BEHAVIOR_IMA
;
263 case RISCV_HWPROBE_KEY_IMA_EXT_0
:
264 hwprobe_isa_ext0(pair
, cpus
);
267 case RISCV_HWPROBE_KEY_CPUPERF_0
:
268 case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF
:
269 pair
->value
= hwprobe_misaligned(cpus
);
272 case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF
:
273 pair
->value
= hwprobe_vec_misaligned(cpus
);
276 case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE
:
278 if (hwprobe_ext0_has(cpus
, RISCV_HWPROBE_EXT_ZICBOZ
))
279 pair
->value
= riscv_cboz_block_size
;
281 case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS
:
282 pair
->value
= user_max_virt_addr();
285 case RISCV_HWPROBE_KEY_TIME_CSR_FREQ
:
286 pair
->value
= riscv_timebase
;
290 * For forward compatibility, unknown keys don't fail the whole
291 * call, but get their element key set to -1 and value set to 0
292 * indicating they're unrecognized.
301 static int hwprobe_get_values(struct riscv_hwprobe __user
*pairs
,
302 size_t pair_count
, size_t cpusetsize
,
303 unsigned long __user
*cpus_user
,
310 /* Check the reserved flags. */
315 * The interface supports taking in a CPU mask, and returns values that
316 * are consistent across that mask. Allow userspace to specify NULL and
317 * 0 as a shortcut to all online CPUs.
319 cpumask_clear(&cpus
);
320 if (!cpusetsize
&& !cpus_user
) {
321 cpumask_copy(&cpus
, cpu_online_mask
);
323 if (cpusetsize
> cpumask_size())
324 cpusetsize
= cpumask_size();
326 ret
= copy_from_user(&cpus
, cpus_user
, cpusetsize
);
331 * Userspace must provide at least one online CPU, without that
332 * there's no way to define what is supported.
334 cpumask_and(&cpus
, &cpus
, cpu_online_mask
);
335 if (cpumask_empty(&cpus
))
339 for (out
= 0; out
< pair_count
; out
++, pairs
++) {
340 struct riscv_hwprobe pair
;
342 if (get_user(pair
.key
, &pairs
->key
))
346 hwprobe_one_pair(&pair
, &cpus
);
347 ret
= put_user(pair
.key
, &pairs
->key
);
349 ret
= put_user(pair
.value
, &pairs
->value
);
358 static int hwprobe_get_cpus(struct riscv_hwprobe __user
*pairs
,
359 size_t pair_count
, size_t cpusetsize
,
360 unsigned long __user
*cpus_user
,
363 cpumask_t cpus
, one_cpu
;
364 bool clear_all
= false;
368 if (flags
!= RISCV_HWPROBE_WHICH_CPUS
)
371 if (!cpusetsize
|| !cpus_user
)
374 if (cpusetsize
> cpumask_size())
375 cpusetsize
= cpumask_size();
377 ret
= copy_from_user(&cpus
, cpus_user
, cpusetsize
);
381 if (cpumask_empty(&cpus
))
382 cpumask_copy(&cpus
, cpu_online_mask
);
384 cpumask_and(&cpus
, &cpus
, cpu_online_mask
);
386 cpumask_clear(&one_cpu
);
388 for (i
= 0; i
< pair_count
; i
++) {
389 struct riscv_hwprobe pair
, tmp
;
392 ret
= copy_from_user(&pair
, &pairs
[i
], sizeof(pair
));
396 if (!riscv_hwprobe_key_is_valid(pair
.key
)) {
398 pair
= (struct riscv_hwprobe
){ .key
= -1, };
399 ret
= copy_to_user(&pairs
[i
], &pair
, sizeof(pair
));
407 tmp
= (struct riscv_hwprobe
){ .key
= pair
.key
, };
409 for_each_cpu(cpu
, &cpus
) {
410 cpumask_set_cpu(cpu
, &one_cpu
);
412 hwprobe_one_pair(&tmp
, &one_cpu
);
414 if (!riscv_hwprobe_pair_cmp(&tmp
, &pair
))
415 cpumask_clear_cpu(cpu
, &cpus
);
417 cpumask_clear_cpu(cpu
, &one_cpu
);
422 cpumask_clear(&cpus
);
424 ret
= copy_to_user(cpus_user
, &cpus
, cpusetsize
);
431 static int do_riscv_hwprobe(struct riscv_hwprobe __user
*pairs
,
432 size_t pair_count
, size_t cpusetsize
,
433 unsigned long __user
*cpus_user
,
436 if (flags
& RISCV_HWPROBE_WHICH_CPUS
)
437 return hwprobe_get_cpus(pairs
, pair_count
, cpusetsize
,
440 return hwprobe_get_values(pairs
, pair_count
, cpusetsize
,
446 static int __init
init_hwprobe_vdso_data(void)
448 struct vdso_data
*vd
= __arch_get_k_vdso_data();
449 struct arch_vdso_time_data
*avd
= &vd
->arch_data
;
451 struct riscv_hwprobe pair
;
455 * Initialize vDSO data with the answers for the "all CPUs" case, to
456 * save a syscall in the common case.
458 for (key
= 0; key
<= RISCV_HWPROBE_MAX_KEY
; key
++) {
460 hwprobe_one_pair(&pair
, cpu_online_mask
);
462 WARN_ON_ONCE(pair
.key
< 0);
464 avd
->all_cpu_hwprobe_values
[key
] = pair
.value
;
466 * Smash together the vendor, arch, and impl IDs to see if
467 * they're all 0 or any negative.
469 if (key
<= RISCV_HWPROBE_KEY_MIMPID
)
470 id_bitsmash
|= pair
.value
;
474 * If the arch, vendor, and implementation ID are all the same across
475 * all harts, then assume all CPUs are the same, and allow the vDSO to
476 * answer queries for arbitrary masks. However if all values are 0 (not
477 * populated) or any value returns -1 (varies across CPUs), then the
478 * vDSO should defer to the kernel for exotic cpu masks.
480 avd
->homogeneous_cpus
= id_bitsmash
!= 0 && id_bitsmash
!= -1;
484 arch_initcall_sync(init_hwprobe_vdso_data
);
486 #endif /* CONFIG_MMU */
488 SYSCALL_DEFINE5(riscv_hwprobe
, struct riscv_hwprobe __user
*, pairs
,
489 size_t, pair_count
, size_t, cpusetsize
, unsigned long __user
*,
490 cpus
, unsigned int, flags
)
492 return do_riscv_hwprobe(pairs
, pair_count
, cpusetsize
,