1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Arm Ltd.
4 #include <linux/arm-smccc.h>
5 #include <linux/kvm_host.h>
7 #include <asm/kvm_emulate.h>
9 #include <kvm/arm_hypercalls.h>
10 #include <kvm/arm_psci.h>
12 #define KVM_ARM_SMCCC_STD_FEATURES \
13 GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0)
14 #define KVM_ARM_SMCCC_STD_HYP_FEATURES \
15 GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
16 #define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES \
17 GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0)
19 static void kvm_ptp_get_time(struct kvm_vcpu
*vcpu
, u64
*val
)
21 struct system_time_snapshot systime_snapshot
;
26 * system time and counter value must captured at the same
27 * time to keep consistency and precision.
29 ktime_get_snapshot(&systime_snapshot
);
32 * This is only valid if the current clocksource is the
33 * architected counter, as this is the only one the guest
36 if (systime_snapshot
.cs_id
!= CSID_ARM_ARCH_COUNTER
)
40 * The guest selects one of the two reference counters
41 * (virtual or physical) with the first argument of the SMCCC
42 * call. In case the identifier is not supported, error out.
44 feature
= smccc_get_arg1(vcpu
);
46 case KVM_PTP_VIRT_COUNTER
:
47 cycles
= systime_snapshot
.cycles
- vcpu
->kvm
->arch
.timer_data
.voffset
;
49 case KVM_PTP_PHYS_COUNTER
:
50 cycles
= systime_snapshot
.cycles
- vcpu
->kvm
->arch
.timer_data
.poffset
;
57 * This relies on the top bit of val[0] never being set for
58 * valid values of system time, because that is *really* far
59 * in the future (about 292 years from 1970, and at that stage
60 * nobody will give a damn about it).
62 val
[0] = upper_32_bits(systime_snapshot
.real
);
63 val
[1] = lower_32_bits(systime_snapshot
.real
);
64 val
[2] = upper_32_bits(cycles
);
65 val
[3] = lower_32_bits(cycles
);
68 static bool kvm_smccc_default_allowed(u32 func_id
)
72 * List of function-ids that are not gated with the bitmapped
73 * feature firmware registers, and are to be allowed for
74 * servicing the call by default.
76 case ARM_SMCCC_VERSION_FUNC_ID
:
77 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID
:
80 /* PSCI 0.2 and up is in the 0:0x1f range */
81 if (ARM_SMCCC_OWNER_NUM(func_id
) == ARM_SMCCC_OWNER_STANDARD
&&
82 ARM_SMCCC_FUNC_NUM(func_id
) <= 0x1f)
86 * KVM's PSCI 0.1 doesn't comply with SMCCC, and has
87 * its own function-id base and range
89 if (func_id
>= KVM_PSCI_FN(0) && func_id
<= KVM_PSCI_FN(3))
96 static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu
*vcpu
, u32 func_id
)
98 struct kvm_smccc_features
*smccc_feat
= &vcpu
->kvm
->arch
.smccc_feat
;
101 case ARM_SMCCC_TRNG_VERSION
:
102 case ARM_SMCCC_TRNG_FEATURES
:
103 case ARM_SMCCC_TRNG_GET_UUID
:
104 case ARM_SMCCC_TRNG_RND32
:
105 case ARM_SMCCC_TRNG_RND64
:
106 return test_bit(KVM_REG_ARM_STD_BIT_TRNG_V1_0
,
107 &smccc_feat
->std_bmap
);
108 case ARM_SMCCC_HV_PV_TIME_FEATURES
:
109 case ARM_SMCCC_HV_PV_TIME_ST
:
110 return test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME
,
111 &smccc_feat
->std_hyp_bmap
);
112 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID
:
113 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID
:
114 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT
,
115 &smccc_feat
->vendor_hyp_bmap
);
116 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID
:
117 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP
,
118 &smccc_feat
->vendor_hyp_bmap
);
124 #define SMC32_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID
125 #define SMC32_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
127 0, ARM_SMCCC_FUNC_MASK)
129 #define SMC64_ARCH_RANGE_BEGIN ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
132 #define SMC64_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
134 0, ARM_SMCCC_FUNC_MASK)
136 static int kvm_smccc_filter_insert_reserved(struct kvm
*kvm
)
141 * Prevent userspace from handling any SMCCC calls in the architecture
142 * range, avoiding the risk of misrepresenting Spectre mitigation status
145 r
= mtree_insert_range(&kvm
->arch
.smccc_filter
,
146 SMC32_ARCH_RANGE_BEGIN
, SMC32_ARCH_RANGE_END
,
147 xa_mk_value(KVM_SMCCC_FILTER_HANDLE
),
152 r
= mtree_insert_range(&kvm
->arch
.smccc_filter
,
153 SMC64_ARCH_RANGE_BEGIN
, SMC64_ARCH_RANGE_END
,
154 xa_mk_value(KVM_SMCCC_FILTER_HANDLE
),
161 mtree_destroy(&kvm
->arch
.smccc_filter
);
165 static bool kvm_smccc_filter_configured(struct kvm
*kvm
)
167 return !mtree_empty(&kvm
->arch
.smccc_filter
);
170 static int kvm_smccc_set_filter(struct kvm
*kvm
, struct kvm_smccc_filter __user
*uaddr
)
172 const void *zero_page
= page_to_virt(ZERO_PAGE(0));
173 struct kvm_smccc_filter filter
;
177 if (copy_from_user(&filter
, uaddr
, sizeof(filter
)))
180 if (memcmp(filter
.pad
, zero_page
, sizeof(filter
.pad
)))
184 end
= start
+ filter
.nr_functions
- 1;
186 if (end
< start
|| filter
.action
>= NR_SMCCC_FILTER_ACTIONS
)
189 mutex_lock(&kvm
->arch
.config_lock
);
191 if (kvm_vm_has_ran_once(kvm
)) {
196 if (!kvm_smccc_filter_configured(kvm
)) {
197 r
= kvm_smccc_filter_insert_reserved(kvm
);
202 r
= mtree_insert_range(&kvm
->arch
.smccc_filter
, start
, end
,
203 xa_mk_value(filter
.action
), GFP_KERNEL_ACCOUNT
);
205 mutex_unlock(&kvm
->arch
.config_lock
);
209 static u8
kvm_smccc_filter_get_action(struct kvm
*kvm
, u32 func_id
)
211 unsigned long idx
= func_id
;
214 if (!kvm_smccc_filter_configured(kvm
))
215 return KVM_SMCCC_FILTER_HANDLE
;
218 * But where's the error handling, you say?
220 * mt_find() returns NULL if no entry was found, which just so happens
221 * to match KVM_SMCCC_FILTER_HANDLE.
223 val
= mt_find(&kvm
->arch
.smccc_filter
, &idx
, idx
);
224 return xa_to_value(val
);
227 static u8
kvm_smccc_get_action(struct kvm_vcpu
*vcpu
, u32 func_id
)
230 * Intervening actions in the SMCCC filter take precedence over the
231 * pseudo-firmware register bitmaps.
233 u8 action
= kvm_smccc_filter_get_action(vcpu
->kvm
, func_id
);
234 if (action
!= KVM_SMCCC_FILTER_HANDLE
)
237 if (kvm_smccc_test_fw_bmap(vcpu
, func_id
) ||
238 kvm_smccc_default_allowed(func_id
))
239 return KVM_SMCCC_FILTER_HANDLE
;
241 return KVM_SMCCC_FILTER_DENY
;
244 static void kvm_prepare_hypercall_exit(struct kvm_vcpu
*vcpu
, u32 func_id
)
246 u8 ec
= ESR_ELx_EC(kvm_vcpu_get_esr(vcpu
));
247 struct kvm_run
*run
= vcpu
->run
;
250 if (ec
== ESR_ELx_EC_SMC32
|| ec
== ESR_ELx_EC_SMC64
)
251 flags
|= KVM_HYPERCALL_EXIT_SMC
;
253 if (!kvm_vcpu_trap_il_is32bit(vcpu
))
254 flags
|= KVM_HYPERCALL_EXIT_16BIT
;
256 run
->exit_reason
= KVM_EXIT_HYPERCALL
;
257 run
->hypercall
= (typeof(run
->hypercall
)) {
263 int kvm_smccc_call_handler(struct kvm_vcpu
*vcpu
)
265 struct kvm_smccc_features
*smccc_feat
= &vcpu
->kvm
->arch
.smccc_feat
;
266 u32 func_id
= smccc_get_function(vcpu
);
267 u64 val
[4] = {SMCCC_RET_NOT_SUPPORTED
};
272 action
= kvm_smccc_get_action(vcpu
, func_id
);
274 case KVM_SMCCC_FILTER_HANDLE
:
276 case KVM_SMCCC_FILTER_DENY
:
278 case KVM_SMCCC_FILTER_FWD_TO_USER
:
279 kvm_prepare_hypercall_exit(vcpu
, func_id
);
282 WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action
);
287 case ARM_SMCCC_VERSION_FUNC_ID
:
288 val
[0] = ARM_SMCCC_VERSION_1_1
;
290 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID
:
291 feature
= smccc_get_arg1(vcpu
);
293 case ARM_SMCCC_ARCH_WORKAROUND_1
:
294 switch (arm64_get_spectre_v2_state()) {
295 case SPECTRE_VULNERABLE
:
297 case SPECTRE_MITIGATED
:
298 val
[0] = SMCCC_RET_SUCCESS
;
300 case SPECTRE_UNAFFECTED
:
301 val
[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED
;
305 case ARM_SMCCC_ARCH_WORKAROUND_2
:
306 switch (arm64_get_spectre_v4_state()) {
307 case SPECTRE_VULNERABLE
:
309 case SPECTRE_MITIGATED
:
311 * SSBS everywhere: Indicate no firmware
312 * support, as the SSBS support will be
313 * indicated to the guest and the default is
316 * Otherwise, expose a permanent mitigation
317 * to the guest, and hide SSBS so that the
318 * guest stays protected.
320 if (kvm_has_feat(vcpu
->kvm
, ID_AA64PFR1_EL1
, SSBS
, IMP
))
323 case SPECTRE_UNAFFECTED
:
324 val
[0] = SMCCC_RET_NOT_REQUIRED
;
328 case ARM_SMCCC_ARCH_WORKAROUND_3
:
329 switch (arm64_get_spectre_bhb_state()) {
330 case SPECTRE_VULNERABLE
:
332 case SPECTRE_MITIGATED
:
333 val
[0] = SMCCC_RET_SUCCESS
;
335 case SPECTRE_UNAFFECTED
:
336 val
[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED
;
340 case ARM_SMCCC_HV_PV_TIME_FEATURES
:
341 if (test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME
,
342 &smccc_feat
->std_hyp_bmap
))
343 val
[0] = SMCCC_RET_SUCCESS
;
347 case ARM_SMCCC_HV_PV_TIME_FEATURES
:
348 val
[0] = kvm_hypercall_pv_features(vcpu
);
350 case ARM_SMCCC_HV_PV_TIME_ST
:
351 gpa
= kvm_init_stolen_time(vcpu
);
352 if (gpa
!= INVALID_GPA
)
355 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID
:
356 val
[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0
;
357 val
[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1
;
358 val
[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2
;
359 val
[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3
;
361 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID
:
362 val
[0] = smccc_feat
->vendor_hyp_bmap
;
364 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID
:
365 kvm_ptp_get_time(vcpu
, val
);
367 case ARM_SMCCC_TRNG_VERSION
:
368 case ARM_SMCCC_TRNG_FEATURES
:
369 case ARM_SMCCC_TRNG_GET_UUID
:
370 case ARM_SMCCC_TRNG_RND32
:
371 case ARM_SMCCC_TRNG_RND64
:
372 return kvm_trng_call(vcpu
);
374 return kvm_psci_call(vcpu
);
378 smccc_set_retval(vcpu
, val
[0], val
[1], val
[2], val
[3]);
382 static const u64 kvm_arm_fw_reg_ids
[] = {
383 KVM_REG_ARM_PSCI_VERSION
,
384 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1
,
385 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2
,
386 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3
,
387 KVM_REG_ARM_STD_BMAP
,
388 KVM_REG_ARM_STD_HYP_BMAP
,
389 KVM_REG_ARM_VENDOR_HYP_BMAP
,
392 void kvm_arm_init_hypercalls(struct kvm
*kvm
)
394 struct kvm_smccc_features
*smccc_feat
= &kvm
->arch
.smccc_feat
;
396 smccc_feat
->std_bmap
= KVM_ARM_SMCCC_STD_FEATURES
;
397 smccc_feat
->std_hyp_bmap
= KVM_ARM_SMCCC_STD_HYP_FEATURES
;
398 smccc_feat
->vendor_hyp_bmap
= KVM_ARM_SMCCC_VENDOR_HYP_FEATURES
;
400 mt_init(&kvm
->arch
.smccc_filter
);
403 void kvm_arm_teardown_hypercalls(struct kvm
*kvm
)
405 mtree_destroy(&kvm
->arch
.smccc_filter
);
408 int kvm_arm_get_fw_num_regs(struct kvm_vcpu
*vcpu
)
410 return ARRAY_SIZE(kvm_arm_fw_reg_ids
);
413 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
417 for (i
= 0; i
< ARRAY_SIZE(kvm_arm_fw_reg_ids
); i
++) {
418 if (put_user(kvm_arm_fw_reg_ids
[i
], uindices
++))
425 #define KVM_REG_FEATURE_LEVEL_MASK GENMASK(3, 0)
428 * Convert the workaround level into an easy-to-compare number, where higher
429 * values mean better protection.
431 static int get_kernel_wa_level(struct kvm_vcpu
*vcpu
, u64 regid
)
434 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1
:
435 switch (arm64_get_spectre_v2_state()) {
436 case SPECTRE_VULNERABLE
:
437 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL
;
438 case SPECTRE_MITIGATED
:
439 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL
;
440 case SPECTRE_UNAFFECTED
:
441 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED
;
443 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL
;
444 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2
:
445 switch (arm64_get_spectre_v4_state()) {
446 case SPECTRE_MITIGATED
:
448 * As for the hypercall discovery, we pretend we
449 * don't have any FW mitigation if SSBS is there at
452 if (kvm_has_feat(vcpu
->kvm
, ID_AA64PFR1_EL1
, SSBS
, IMP
))
453 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL
;
455 case SPECTRE_UNAFFECTED
:
456 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED
;
457 case SPECTRE_VULNERABLE
:
458 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL
;
461 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3
:
462 switch (arm64_get_spectre_bhb_state()) {
463 case SPECTRE_VULNERABLE
:
464 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL
;
465 case SPECTRE_MITIGATED
:
466 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL
;
467 case SPECTRE_UNAFFECTED
:
468 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED
;
470 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL
;
476 int kvm_arm_get_fw_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
478 struct kvm_smccc_features
*smccc_feat
= &vcpu
->kvm
->arch
.smccc_feat
;
479 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
483 case KVM_REG_ARM_PSCI_VERSION
:
484 val
= kvm_psci_version(vcpu
);
486 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1
:
487 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2
:
488 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3
:
489 val
= get_kernel_wa_level(vcpu
, reg
->id
) & KVM_REG_FEATURE_LEVEL_MASK
;
491 case KVM_REG_ARM_STD_BMAP
:
492 val
= READ_ONCE(smccc_feat
->std_bmap
);
494 case KVM_REG_ARM_STD_HYP_BMAP
:
495 val
= READ_ONCE(smccc_feat
->std_hyp_bmap
);
497 case KVM_REG_ARM_VENDOR_HYP_BMAP
:
498 val
= READ_ONCE(smccc_feat
->vendor_hyp_bmap
);
504 if (copy_to_user(uaddr
, &val
, KVM_REG_SIZE(reg
->id
)))
510 static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu
*vcpu
, u64 reg_id
, u64 val
)
513 struct kvm
*kvm
= vcpu
->kvm
;
514 struct kvm_smccc_features
*smccc_feat
= &kvm
->arch
.smccc_feat
;
515 unsigned long *fw_reg_bmap
, fw_reg_features
;
518 case KVM_REG_ARM_STD_BMAP
:
519 fw_reg_bmap
= &smccc_feat
->std_bmap
;
520 fw_reg_features
= KVM_ARM_SMCCC_STD_FEATURES
;
522 case KVM_REG_ARM_STD_HYP_BMAP
:
523 fw_reg_bmap
= &smccc_feat
->std_hyp_bmap
;
524 fw_reg_features
= KVM_ARM_SMCCC_STD_HYP_FEATURES
;
526 case KVM_REG_ARM_VENDOR_HYP_BMAP
:
527 fw_reg_bmap
= &smccc_feat
->vendor_hyp_bmap
;
528 fw_reg_features
= KVM_ARM_SMCCC_VENDOR_HYP_FEATURES
;
534 /* Check for unsupported bit */
535 if (val
& ~fw_reg_features
)
538 mutex_lock(&kvm
->arch
.config_lock
);
540 if (kvm_vm_has_ran_once(kvm
) && val
!= *fw_reg_bmap
) {
545 WRITE_ONCE(*fw_reg_bmap
, val
);
547 mutex_unlock(&kvm
->arch
.config_lock
);
551 int kvm_arm_set_fw_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
553 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
557 if (KVM_REG_SIZE(reg
->id
) != sizeof(val
))
559 if (copy_from_user(&val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
563 case KVM_REG_ARM_PSCI_VERSION
:
567 wants_02
= vcpu_has_feature(vcpu
, KVM_ARM_VCPU_PSCI_0_2
);
570 case KVM_ARM_PSCI_0_1
:
573 vcpu
->kvm
->arch
.psci_version
= val
;
575 case KVM_ARM_PSCI_0_2
:
576 case KVM_ARM_PSCI_1_0
:
577 case KVM_ARM_PSCI_1_1
:
578 case KVM_ARM_PSCI_1_2
:
579 case KVM_ARM_PSCI_1_3
:
582 vcpu
->kvm
->arch
.psci_version
= val
;
588 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1
:
589 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3
:
590 if (val
& ~KVM_REG_FEATURE_LEVEL_MASK
)
593 if (get_kernel_wa_level(vcpu
, reg
->id
) < val
)
598 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2
:
599 if (val
& ~(KVM_REG_FEATURE_LEVEL_MASK
|
600 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED
))
603 /* The enabled bit must not be set unless the level is AVAIL. */
604 if ((val
& KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED
) &&
605 (val
& KVM_REG_FEATURE_LEVEL_MASK
) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL
)
609 * Map all the possible incoming states to the only two we
610 * really want to deal with.
612 switch (val
& KVM_REG_FEATURE_LEVEL_MASK
) {
613 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL
:
614 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN
:
615 wa_level
= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL
;
617 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL
:
618 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED
:
619 wa_level
= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED
;
626 * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
629 if (get_kernel_wa_level(vcpu
, reg
->id
) < wa_level
)
633 case KVM_REG_ARM_STD_BMAP
:
634 case KVM_REG_ARM_STD_HYP_BMAP
:
635 case KVM_REG_ARM_VENDOR_HYP_BMAP
:
636 return kvm_arm_set_fw_reg_bmap(vcpu
, reg
->id
, val
);
644 int kvm_vm_smccc_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
646 switch (attr
->attr
) {
647 case KVM_ARM_VM_SMCCC_FILTER
:
654 int kvm_vm_smccc_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
656 void __user
*uaddr
= (void __user
*)attr
->addr
;
658 switch (attr
->attr
) {
659 case KVM_ARM_VM_SMCCC_FILTER
:
660 return kvm_smccc_set_filter(kvm
, uaddr
);