1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Secure Encrypted Virtualization (SEV) guest driver interface
5 * Copyright (C) 2021-2024 Advanced Micro Devices, Inc.
7 * Author: Brijesh Singh <brijesh.singh@amd.com>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
19 #include <linux/tsm.h>
20 #include <crypto/gcm.h>
21 #include <linux/psp-sev.h>
22 #include <linux/sockptr.h>
23 #include <linux/cleanup.h>
24 #include <linux/uuid.h>
25 #include <linux/configfs.h>
26 #include <uapi/linux/sev-guest.h>
27 #include <uapi/linux/psp-sev.h>
32 #define DEVICE_NAME "sev-guest"
34 #define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
35 #define SNP_REQ_RETRY_DELAY (2*HZ)
37 #define SVSM_MAX_RETRIES 3
39 struct snp_guest_dev
{
41 struct miscdevice misc
;
43 struct snp_msg_desc
*msg_desc
;
46 struct snp_report_req report
;
47 struct snp_derived_key_req derived_key
;
48 struct snp_ext_report_req ext_report
;
53 * The VMPCK ID represents the key used by the SNP guest to communicate with the
54 * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key
55 * used will be the key associated with the VMPL at which the guest is running.
56 * Should the default key be wiped (see snp_disable_vmpck()), this parameter
57 * allows for using one of the remaining VMPCKs.
59 static int vmpck_id
= -1;
60 module_param(vmpck_id
, int, 0444);
61 MODULE_PARM_DESC(vmpck_id
, "The VMPCK ID to use when communicating with the PSP.");
63 /* Mutex to serialize the shared buffer access and command handling. */
64 static DEFINE_MUTEX(snp_cmd_mutex
);
66 static bool is_vmpck_empty(struct snp_msg_desc
*mdesc
)
68 char zero_key
[VMPCK_KEY_LEN
] = {0};
71 return !memcmp(mdesc
->vmpck
, zero_key
, VMPCK_KEY_LEN
);
77 * If an error is received from the host or AMD Secure Processor (ASP) there
78 * are two options. Either retry the exact same encrypted request or discontinue
81 * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
82 * encrypt the requests. The IV for this scheme is the sequence number. GCM
83 * cannot tolerate IV reuse.
85 * The ASP FW v1.51 only increments the sequence numbers on a successful
86 * guest<->ASP back and forth and only accepts messages at its exact sequence
89 * So if the sequence number were to be reused the encryption scheme is
90 * vulnerable. If the sequence number were incremented for a fresh IV the ASP
91 * will reject the request.
93 static void snp_disable_vmpck(struct snp_msg_desc
*mdesc
)
95 pr_alert("Disabling VMPCK%d communication key to prevent IV reuse.\n",
97 memzero_explicit(mdesc
->vmpck
, VMPCK_KEY_LEN
);
101 static inline u64
__snp_get_msg_seqno(struct snp_msg_desc
*mdesc
)
105 lockdep_assert_held(&snp_cmd_mutex
);
107 /* Read the current message sequence counter from secrets pages */
108 count
= *mdesc
->os_area_msg_seqno
;
113 /* Return a non-zero on success */
114 static u64
snp_get_msg_seqno(struct snp_msg_desc
*mdesc
)
116 u64 count
= __snp_get_msg_seqno(mdesc
);
119 * The message sequence counter for the SNP guest request is a 64-bit
120 * value but the version 2 of GHCB specification defines a 32-bit storage
121 * for it. If the counter exceeds the 32-bit value then return zero.
122 * The caller should check the return value, but if the caller happens to
123 * not check the value and use it, then the firmware treats zero as an
124 * invalid number and will fail the message request.
126 if (count
>= UINT_MAX
) {
127 pr_err("request message sequence counter overflow\n");
134 static void snp_inc_msg_seqno(struct snp_msg_desc
*mdesc
)
137 * The counter is also incremented by the PSP, so increment it by 2
138 * and save in secrets page.
140 *mdesc
->os_area_msg_seqno
+= 2;
143 static inline struct snp_guest_dev
*to_snp_dev(struct file
*file
)
145 struct miscdevice
*dev
= file
->private_data
;
147 return container_of(dev
, struct snp_guest_dev
, misc
);
150 static struct aesgcm_ctx
*snp_init_crypto(u8
*key
, size_t keylen
)
152 struct aesgcm_ctx
*ctx
;
154 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL_ACCOUNT
);
158 if (aesgcm_expandkey(ctx
, key
, keylen
, AUTHTAG_LEN
)) {
159 pr_err("Crypto context initialization failed\n");
167 static int verify_and_dec_payload(struct snp_msg_desc
*mdesc
, struct snp_guest_req
*req
)
169 struct snp_guest_msg
*resp_msg
= &mdesc
->secret_response
;
170 struct snp_guest_msg
*req_msg
= &mdesc
->secret_request
;
171 struct snp_guest_msg_hdr
*req_msg_hdr
= &req_msg
->hdr
;
172 struct snp_guest_msg_hdr
*resp_msg_hdr
= &resp_msg
->hdr
;
173 struct aesgcm_ctx
*ctx
= mdesc
->ctx
;
174 u8 iv
[GCM_AES_IV_SIZE
] = {};
176 pr_debug("response [seqno %lld type %d version %d sz %d]\n",
177 resp_msg_hdr
->msg_seqno
, resp_msg_hdr
->msg_type
, resp_msg_hdr
->msg_version
,
178 resp_msg_hdr
->msg_sz
);
180 /* Copy response from shared memory to encrypted memory. */
181 memcpy(resp_msg
, mdesc
->response
, sizeof(*resp_msg
));
183 /* Verify that the sequence counter is incremented by 1 */
184 if (unlikely(resp_msg_hdr
->msg_seqno
!= (req_msg_hdr
->msg_seqno
+ 1)))
187 /* Verify response message type and version number. */
188 if (resp_msg_hdr
->msg_type
!= (req_msg_hdr
->msg_type
+ 1) ||
189 resp_msg_hdr
->msg_version
!= req_msg_hdr
->msg_version
)
193 * If the message size is greater than our buffer length then return
196 if (unlikely((resp_msg_hdr
->msg_sz
+ ctx
->authsize
) > req
->resp_sz
))
199 /* Decrypt the payload */
200 memcpy(iv
, &resp_msg_hdr
->msg_seqno
, min(sizeof(iv
), sizeof(resp_msg_hdr
->msg_seqno
)));
201 if (!aesgcm_decrypt(ctx
, req
->resp_buf
, resp_msg
->payload
, resp_msg_hdr
->msg_sz
,
202 &resp_msg_hdr
->algo
, AAD_LEN
, iv
, resp_msg_hdr
->authtag
))
208 static int enc_payload(struct snp_msg_desc
*mdesc
, u64 seqno
, struct snp_guest_req
*req
)
210 struct snp_guest_msg
*msg
= &mdesc
->secret_request
;
211 struct snp_guest_msg_hdr
*hdr
= &msg
->hdr
;
212 struct aesgcm_ctx
*ctx
= mdesc
->ctx
;
213 u8 iv
[GCM_AES_IV_SIZE
] = {};
215 memset(msg
, 0, sizeof(*msg
));
217 hdr
->algo
= SNP_AEAD_AES_256_GCM
;
218 hdr
->hdr_version
= MSG_HDR_VER
;
219 hdr
->hdr_sz
= sizeof(*hdr
);
220 hdr
->msg_type
= req
->msg_type
;
221 hdr
->msg_version
= req
->msg_version
;
222 hdr
->msg_seqno
= seqno
;
223 hdr
->msg_vmpck
= req
->vmpck_id
;
224 hdr
->msg_sz
= req
->req_sz
;
226 /* Verify the sequence number is non-zero */
230 pr_debug("request [seqno %lld type %d version %d sz %d]\n",
231 hdr
->msg_seqno
, hdr
->msg_type
, hdr
->msg_version
, hdr
->msg_sz
);
233 if (WARN_ON((req
->req_sz
+ ctx
->authsize
) > sizeof(msg
->payload
)))
236 memcpy(iv
, &hdr
->msg_seqno
, min(sizeof(iv
), sizeof(hdr
->msg_seqno
)));
237 aesgcm_encrypt(ctx
, msg
->payload
, req
->req_buf
, req
->req_sz
, &hdr
->algo
,
238 AAD_LEN
, iv
, hdr
->authtag
);
243 static int __handle_guest_request(struct snp_msg_desc
*mdesc
, struct snp_guest_req
*req
,
244 struct snp_guest_request_ioctl
*rio
)
246 unsigned long req_start
= jiffies
;
247 unsigned int override_npages
= 0;
248 u64 override_err
= 0;
253 * Call firmware to process the request. In this function the encrypted
254 * message enters shared memory with the host. So after this call the
255 * sequence number must be incremented or the VMPCK must be deleted to
256 * prevent reuse of the IV.
258 rc
= snp_issue_guest_request(req
, &mdesc
->input
, rio
);
262 * If the extended guest request fails due to having too
263 * small of a certificate data buffer, retry the same
264 * guest request without the extended data request in
265 * order to increment the sequence number and thus avoid
268 override_npages
= mdesc
->input
.data_npages
;
269 req
->exit_code
= SVM_VMGEXIT_GUEST_REQUEST
;
272 * Override the error to inform callers the given extended
273 * request buffer size was too small and give the caller the
274 * required buffer size.
276 override_err
= SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN
);
279 * If this call to the firmware succeeds, the sequence number can
280 * be incremented allowing for continued use of the VMPCK. If
281 * there is an error reflected in the return value, this value
282 * is checked further down and the result will be the deletion
283 * of the VMPCK and the error code being propagated back to the
284 * user as an ioctl() return code.
289 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
290 * throttled. Retry in the driver to avoid returning and reusing the
291 * message sequence number on a different message.
294 if (jiffies
- req_start
> SNP_REQ_MAX_RETRY_DURATION
) {
298 schedule_timeout_killable(SNP_REQ_RETRY_DELAY
);
303 * Increment the message sequence number. There is no harm in doing
304 * this now because decryption uses the value stored in the response
305 * structure and any failure will wipe the VMPCK, preventing further
308 snp_inc_msg_seqno(mdesc
);
311 rio
->exitinfo2
= override_err
;
314 * If an extended guest request was issued and the supplied certificate
315 * buffer was not large enough, a standard guest request was issued to
316 * prevent IV reuse. If the standard request was successful, return -EIO
317 * back to the caller as would have originally been returned.
319 if (!rc
&& override_err
== SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN
))
324 mdesc
->input
.data_npages
= override_npages
;
329 static int snp_send_guest_request(struct snp_msg_desc
*mdesc
, struct snp_guest_req
*req
,
330 struct snp_guest_request_ioctl
*rio
)
335 guard(mutex
)(&snp_cmd_mutex
);
337 /* Check if the VMPCK is not empty */
338 if (is_vmpck_empty(mdesc
)) {
339 pr_err_ratelimited("VMPCK is disabled\n");
343 /* Get message sequence and verify that its a non-zero */
344 seqno
= snp_get_msg_seqno(mdesc
);
348 /* Clear shared memory's response for the host to populate. */
349 memset(mdesc
->response
, 0, sizeof(struct snp_guest_msg
));
351 /* Encrypt the userspace provided payload in mdesc->secret_request. */
352 rc
= enc_payload(mdesc
, seqno
, req
);
357 * Write the fully encrypted request to the shared unencrypted
360 memcpy(mdesc
->request
, &mdesc
->secret_request
,
361 sizeof(mdesc
->secret_request
));
363 rc
= __handle_guest_request(mdesc
, req
, rio
);
366 rio
->exitinfo2
== SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN
))
369 pr_alert("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
372 snp_disable_vmpck(mdesc
);
376 rc
= verify_and_dec_payload(mdesc
, req
);
378 pr_alert("Detected unexpected decode failure from ASP. rc: %d\n", rc
);
379 snp_disable_vmpck(mdesc
);
386 struct snp_req_resp
{
391 static int get_report(struct snp_guest_dev
*snp_dev
, struct snp_guest_request_ioctl
*arg
)
393 struct snp_report_req
*report_req
= &snp_dev
->req
.report
;
394 struct snp_msg_desc
*mdesc
= snp_dev
->msg_desc
;
395 struct snp_report_resp
*report_resp
;
396 struct snp_guest_req req
= {};
399 if (!arg
->req_data
|| !arg
->resp_data
)
402 if (copy_from_user(report_req
, (void __user
*)arg
->req_data
, sizeof(*report_req
)))
406 * The intermediate response buffer is used while decrypting the
407 * response payload. Make sure that it has enough space to cover the
410 resp_len
= sizeof(report_resp
->data
) + mdesc
->ctx
->authsize
;
411 report_resp
= kzalloc(resp_len
, GFP_KERNEL_ACCOUNT
);
415 req
.msg_version
= arg
->msg_version
;
416 req
.msg_type
= SNP_MSG_REPORT_REQ
;
417 req
.vmpck_id
= vmpck_id
;
418 req
.req_buf
= report_req
;
419 req
.req_sz
= sizeof(*report_req
);
420 req
.resp_buf
= report_resp
->data
;
421 req
.resp_sz
= resp_len
;
422 req
.exit_code
= SVM_VMGEXIT_GUEST_REQUEST
;
424 rc
= snp_send_guest_request(mdesc
, &req
, arg
);
428 if (copy_to_user((void __user
*)arg
->resp_data
, report_resp
, sizeof(*report_resp
)))
436 static int get_derived_key(struct snp_guest_dev
*snp_dev
, struct snp_guest_request_ioctl
*arg
)
438 struct snp_derived_key_req
*derived_key_req
= &snp_dev
->req
.derived_key
;
439 struct snp_derived_key_resp derived_key_resp
= {0};
440 struct snp_msg_desc
*mdesc
= snp_dev
->msg_desc
;
441 struct snp_guest_req req
= {};
443 /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
446 if (!arg
->req_data
|| !arg
->resp_data
)
450 * The intermediate response buffer is used while decrypting the
451 * response payload. Make sure that it has enough space to cover the
454 resp_len
= sizeof(derived_key_resp
.data
) + mdesc
->ctx
->authsize
;
455 if (sizeof(buf
) < resp_len
)
458 if (copy_from_user(derived_key_req
, (void __user
*)arg
->req_data
,
459 sizeof(*derived_key_req
)))
462 req
.msg_version
= arg
->msg_version
;
463 req
.msg_type
= SNP_MSG_KEY_REQ
;
464 req
.vmpck_id
= vmpck_id
;
465 req
.req_buf
= derived_key_req
;
466 req
.req_sz
= sizeof(*derived_key_req
);
468 req
.resp_sz
= resp_len
;
469 req
.exit_code
= SVM_VMGEXIT_GUEST_REQUEST
;
471 rc
= snp_send_guest_request(mdesc
, &req
, arg
);
475 memcpy(derived_key_resp
.data
, buf
, sizeof(derived_key_resp
.data
));
476 if (copy_to_user((void __user
*)arg
->resp_data
, &derived_key_resp
,
477 sizeof(derived_key_resp
)))
480 /* The response buffer contains the sensitive data, explicitly clear it. */
481 memzero_explicit(buf
, sizeof(buf
));
482 memzero_explicit(&derived_key_resp
, sizeof(derived_key_resp
));
486 static int get_ext_report(struct snp_guest_dev
*snp_dev
, struct snp_guest_request_ioctl
*arg
,
487 struct snp_req_resp
*io
)
490 struct snp_ext_report_req
*report_req
= &snp_dev
->req
.ext_report
;
491 struct snp_msg_desc
*mdesc
= snp_dev
->msg_desc
;
492 struct snp_report_resp
*report_resp
;
493 struct snp_guest_req req
= {};
494 int ret
, npages
= 0, resp_len
;
495 sockptr_t certs_address
;
497 if (sockptr_is_null(io
->req_data
) || sockptr_is_null(io
->resp_data
))
500 if (copy_from_sockptr(report_req
, io
->req_data
, sizeof(*report_req
)))
503 /* caller does not want certificate data */
504 if (!report_req
->certs_len
|| !report_req
->certs_address
)
507 if (report_req
->certs_len
> SEV_FW_BLOB_MAX_SIZE
||
508 !IS_ALIGNED(report_req
->certs_len
, PAGE_SIZE
))
511 if (sockptr_is_kernel(io
->resp_data
)) {
512 certs_address
= KERNEL_SOCKPTR((void *)report_req
->certs_address
);
514 certs_address
= USER_SOCKPTR((void __user
*)report_req
->certs_address
);
515 if (!access_ok(certs_address
.user
, report_req
->certs_len
))
520 * Initialize the intermediate buffer with all zeros. This buffer
521 * is used in the guest request message to get the certs blob from
522 * the host. If host does not supply any certs in it, then copy
523 * zeros to indicate that certificate data was not provided.
525 memset(mdesc
->certs_data
, 0, report_req
->certs_len
);
526 npages
= report_req
->certs_len
>> PAGE_SHIFT
;
529 * The intermediate response buffer is used while decrypting the
530 * response payload. Make sure that it has enough space to cover the
533 resp_len
= sizeof(report_resp
->data
) + mdesc
->ctx
->authsize
;
534 report_resp
= kzalloc(resp_len
, GFP_KERNEL_ACCOUNT
);
538 mdesc
->input
.data_npages
= npages
;
540 req
.msg_version
= arg
->msg_version
;
541 req
.msg_type
= SNP_MSG_REPORT_REQ
;
542 req
.vmpck_id
= vmpck_id
;
543 req
.req_buf
= &report_req
->data
;
544 req
.req_sz
= sizeof(report_req
->data
);
545 req
.resp_buf
= report_resp
->data
;
546 req
.resp_sz
= resp_len
;
547 req
.exit_code
= SVM_VMGEXIT_EXT_GUEST_REQUEST
;
549 ret
= snp_send_guest_request(mdesc
, &req
, arg
);
551 /* If certs length is invalid then copy the returned length */
552 if (arg
->vmm_error
== SNP_GUEST_VMM_ERR_INVALID_LEN
) {
553 report_req
->certs_len
= mdesc
->input
.data_npages
<< PAGE_SHIFT
;
555 if (copy_to_sockptr(io
->req_data
, report_req
, sizeof(*report_req
)))
562 if (npages
&& copy_to_sockptr(certs_address
, mdesc
->certs_data
, report_req
->certs_len
)) {
567 if (copy_to_sockptr(io
->resp_data
, report_resp
, sizeof(*report_resp
)))
575 static long snp_guest_ioctl(struct file
*file
, unsigned int ioctl
, unsigned long arg
)
577 struct snp_guest_dev
*snp_dev
= to_snp_dev(file
);
578 void __user
*argp
= (void __user
*)arg
;
579 struct snp_guest_request_ioctl input
;
580 struct snp_req_resp io
;
583 if (copy_from_user(&input
, argp
, sizeof(input
)))
586 input
.exitinfo2
= 0xff;
588 /* Message version must be non-zero */
589 if (!input
.msg_version
)
594 ret
= get_report(snp_dev
, &input
);
596 case SNP_GET_DERIVED_KEY
:
597 ret
= get_derived_key(snp_dev
, &input
);
599 case SNP_GET_EXT_REPORT
:
601 * As get_ext_report() may be called from the ioctl() path and a
602 * kernel internal path (configfs-tsm), decorate the passed
603 * buffers as user pointers.
605 io
.req_data
= USER_SOCKPTR((void __user
*)input
.req_data
);
606 io
.resp_data
= USER_SOCKPTR((void __user
*)input
.resp_data
);
607 ret
= get_ext_report(snp_dev
, &input
, &io
);
613 if (input
.exitinfo2
&& copy_to_user(argp
, &input
, sizeof(input
)))
619 static void free_shared_pages(void *buf
, size_t sz
)
621 unsigned int npages
= PAGE_ALIGN(sz
) >> PAGE_SHIFT
;
627 ret
= set_memory_encrypted((unsigned long)buf
, npages
);
629 WARN_ONCE(ret
, "failed to restore encryption mask (leak it)\n");
633 __free_pages(virt_to_page(buf
), get_order(sz
));
636 static void *alloc_shared_pages(struct device
*dev
, size_t sz
)
638 unsigned int npages
= PAGE_ALIGN(sz
) >> PAGE_SHIFT
;
642 page
= alloc_pages(GFP_KERNEL_ACCOUNT
, get_order(sz
));
646 ret
= set_memory_decrypted((unsigned long)page_address(page
), npages
);
648 dev_err(dev
, "failed to mark page shared, ret=%d\n", ret
);
649 __free_pages(page
, get_order(sz
));
653 return page_address(page
);
656 static const struct file_operations snp_guest_fops
= {
657 .owner
= THIS_MODULE
,
658 .unlocked_ioctl
= snp_guest_ioctl
,
661 static u8
*get_vmpck(int id
, struct snp_secrets_page
*secrets
, u32
**seqno
)
667 *seqno
= &secrets
->os_area
.msg_seqno_0
;
668 key
= secrets
->vmpck0
;
671 *seqno
= &secrets
->os_area
.msg_seqno_1
;
672 key
= secrets
->vmpck1
;
675 *seqno
= &secrets
->os_area
.msg_seqno_2
;
676 key
= secrets
->vmpck2
;
679 *seqno
= &secrets
->os_area
.msg_seqno_3
;
680 key
= secrets
->vmpck3
;
689 struct snp_msg_report_resp_hdr
{
695 struct snp_msg_cert_entry
{
701 static int sev_svsm_report_new(struct tsm_report
*report
, void *data
)
703 unsigned int rep_len
, man_len
, certs_len
;
704 struct tsm_desc
*desc
= &report
->desc
;
705 struct svsm_attest_call ac
= {};
706 unsigned int retry_count
;
707 void *rep
, *man
, *certs
;
708 struct svsm_call call
;
716 * Allocate pages for the request:
718 * - Manifest blob (4K)
719 * - Certificate blob (16K)
721 * Above addresses must be 4K aligned
725 certs_len
= SEV_FW_BLOB_MAX_SIZE
;
727 if (guid_is_null(&desc
->service_guid
)) {
728 call_id
= SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES
);
730 export_guid(ac
.service_guid
, &desc
->service_guid
);
731 ac
.service_manifest_ver
= desc
->service_manifest_version
;
733 call_id
= SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE
);
739 memset(&call
, 0, sizeof(call
));
741 size
= rep_len
+ man_len
+ certs_len
;
742 buffer
= alloc_pages_exact(size
, __GFP_ZERO
);
747 ac
.report_buf
.pa
= __pa(rep
);
748 ac
.report_buf
.len
= rep_len
;
751 ac
.manifest_buf
.pa
= __pa(man
);
752 ac
.manifest_buf
.len
= man_len
;
754 certs
= man
+ man_len
;
755 ac
.certificates_buf
.pa
= __pa(certs
);
756 ac
.certificates_buf
.len
= certs_len
;
758 ac
.nonce
.pa
= __pa(desc
->inblob
);
759 ac
.nonce
.len
= desc
->inblob_len
;
761 ret
= snp_issue_svsm_attest_req(call_id
, &call
, &ac
);
763 free_pages_exact(buffer
, size
);
765 switch (call
.rax_out
) {
766 case SVSM_ERR_INVALID_PARAMETER
:
769 if (ac
.report_buf
.len
> rep_len
) {
770 rep_len
= PAGE_ALIGN(ac
.report_buf
.len
);
774 if (ac
.manifest_buf
.len
> man_len
) {
775 man_len
= PAGE_ALIGN(ac
.manifest_buf
.len
);
779 if (ac
.certificates_buf
.len
> certs_len
) {
780 certs_len
= PAGE_ALIGN(ac
.certificates_buf
.len
);
784 /* If one of the buffers wasn't large enough, retry the request */
785 if (try_again
&& retry_count
< SVSM_MAX_RETRIES
) {
792 pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n",
799 * Allocate all the blob memory buffers at once so that the cleanup is
800 * done for errors that occur after the first allocation (i.e. before
801 * using no_free_ptr()).
803 rep_len
= ac
.report_buf
.len
;
804 void *rbuf
__free(kvfree
) = kvzalloc(rep_len
, GFP_KERNEL
);
806 man_len
= ac
.manifest_buf
.len
;
807 void *mbuf
__free(kvfree
) = kvzalloc(man_len
, GFP_KERNEL
);
809 certs_len
= ac
.certificates_buf
.len
;
810 void *cbuf
__free(kvfree
) = certs_len
? kvzalloc(certs_len
, GFP_KERNEL
) : NULL
;
812 if (!rbuf
|| !mbuf
|| (certs_len
&& !cbuf
)) {
813 free_pages_exact(buffer
, size
);
817 memcpy(rbuf
, rep
, rep_len
);
818 report
->outblob
= no_free_ptr(rbuf
);
819 report
->outblob_len
= rep_len
;
821 memcpy(mbuf
, man
, man_len
);
822 report
->manifestblob
= no_free_ptr(mbuf
);
823 report
->manifestblob_len
= man_len
;
826 memcpy(cbuf
, certs
, certs_len
);
827 report
->auxblob
= no_free_ptr(cbuf
);
828 report
->auxblob_len
= certs_len
;
831 free_pages_exact(buffer
, size
);
836 static int sev_report_new(struct tsm_report
*report
, void *data
)
838 struct snp_msg_cert_entry
*cert_table
;
839 struct tsm_desc
*desc
= &report
->desc
;
840 struct snp_guest_dev
*snp_dev
= data
;
841 struct snp_msg_report_resp_hdr hdr
;
842 const u32 report_size
= SZ_4K
;
843 const u32 ext_size
= SEV_FW_BLOB_MAX_SIZE
;
844 u32 certs_size
, i
, size
= report_size
+ ext_size
;
847 if (desc
->inblob_len
!= SNP_REPORT_USER_DATA_SIZE
)
850 if (desc
->service_provider
) {
851 if (strcmp(desc
->service_provider
, "svsm"))
854 return sev_svsm_report_new(report
, data
);
857 void *buf
__free(kvfree
) = kvzalloc(size
, GFP_KERNEL
);
861 cert_table
= buf
+ report_size
;
862 struct snp_ext_report_req ext_req
= {
863 .data
= { .vmpl
= desc
->privlevel
},
864 .certs_address
= (__u64
)cert_table
,
865 .certs_len
= ext_size
,
867 memcpy(&ext_req
.data
.user_data
, desc
->inblob
, desc
->inblob_len
);
869 struct snp_guest_request_ioctl input
= {
871 .req_data
= (__u64
)&ext_req
,
872 .resp_data
= (__u64
)buf
,
875 struct snp_req_resp io
= {
876 .req_data
= KERNEL_SOCKPTR(&ext_req
),
877 .resp_data
= KERNEL_SOCKPTR(buf
),
880 ret
= get_ext_report(snp_dev
, &input
, &io
);
884 memcpy(&hdr
, buf
, sizeof(hdr
));
885 if (hdr
.status
== SEV_RET_INVALID_PARAM
)
887 if (hdr
.status
== SEV_RET_INVALID_KEY
)
891 if ((hdr
.report_size
+ sizeof(hdr
)) > report_size
)
894 void *rbuf
__free(kvfree
) = kvzalloc(hdr
.report_size
, GFP_KERNEL
);
898 memcpy(rbuf
, buf
+ sizeof(hdr
), hdr
.report_size
);
899 report
->outblob
= no_free_ptr(rbuf
);
900 report
->outblob_len
= hdr
.report_size
;
903 for (i
= 0; i
< ext_size
/ sizeof(struct snp_msg_cert_entry
); i
++) {
904 struct snp_msg_cert_entry
*ent
= &cert_table
[i
];
906 if (guid_is_null(&ent
->guid
) && !ent
->offset
&& !ent
->length
)
908 certs_size
= max(certs_size
, ent
->offset
+ ent
->length
);
911 /* Suspicious that the response populated entries without populating size */
912 if (!certs_size
&& i
)
913 dev_warn_ratelimited(snp_dev
->dev
, "certificate slots conveyed without size\n");
915 /* No certs to report */
919 /* Suspicious that the certificate blob size contract was violated
921 if (certs_size
> ext_size
) {
922 dev_warn_ratelimited(snp_dev
->dev
, "certificate data truncated\n");
923 certs_size
= ext_size
;
926 void *cbuf
__free(kvfree
) = kvzalloc(certs_size
, GFP_KERNEL
);
930 memcpy(cbuf
, cert_table
, certs_size
);
931 report
->auxblob
= no_free_ptr(cbuf
);
932 report
->auxblob_len
= certs_size
;
937 static bool sev_report_attr_visible(int n
)
940 case TSM_REPORT_GENERATION
:
941 case TSM_REPORT_PROVIDER
:
942 case TSM_REPORT_PRIVLEVEL
:
943 case TSM_REPORT_PRIVLEVEL_FLOOR
:
945 case TSM_REPORT_SERVICE_PROVIDER
:
946 case TSM_REPORT_SERVICE_GUID
:
947 case TSM_REPORT_SERVICE_MANIFEST_VER
:
954 static bool sev_report_bin_attr_visible(int n
)
957 case TSM_REPORT_INBLOB
:
958 case TSM_REPORT_OUTBLOB
:
959 case TSM_REPORT_AUXBLOB
:
961 case TSM_REPORT_MANIFESTBLOB
:
968 static struct tsm_ops sev_tsm_ops
= {
969 .name
= KBUILD_MODNAME
,
970 .report_new
= sev_report_new
,
971 .report_attr_visible
= sev_report_attr_visible
,
972 .report_bin_attr_visible
= sev_report_bin_attr_visible
,
975 static void unregister_sev_tsm(void *data
)
977 tsm_unregister(&sev_tsm_ops
);
980 static int __init
sev_guest_probe(struct platform_device
*pdev
)
982 struct sev_guest_platform_data
*data
;
983 struct snp_secrets_page
*secrets
;
984 struct device
*dev
= &pdev
->dev
;
985 struct snp_guest_dev
*snp_dev
;
986 struct snp_msg_desc
*mdesc
;
987 struct miscdevice
*misc
;
988 void __iomem
*mapping
;
991 BUILD_BUG_ON(sizeof(struct snp_guest_msg
) > PAGE_SIZE
);
993 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP
))
996 if (!dev
->platform_data
)
999 data
= (struct sev_guest_platform_data
*)dev
->platform_data
;
1000 mapping
= ioremap_encrypted(data
->secrets_gpa
, PAGE_SIZE
);
1004 secrets
= (__force
void *)mapping
;
1007 snp_dev
= devm_kzalloc(&pdev
->dev
, sizeof(struct snp_guest_dev
), GFP_KERNEL
);
1011 mdesc
= devm_kzalloc(&pdev
->dev
, sizeof(struct snp_msg_desc
), GFP_KERNEL
);
1015 /* Adjust the default VMPCK key based on the executing VMPL level */
1017 vmpck_id
= snp_vmpl
;
1020 mdesc
->vmpck
= get_vmpck(vmpck_id
, secrets
, &mdesc
->os_area_msg_seqno
);
1021 if (!mdesc
->vmpck
) {
1022 dev_err(dev
, "Invalid VMPCK%d communication key\n", vmpck_id
);
1026 /* Verify that VMPCK is not zero. */
1027 if (is_vmpck_empty(mdesc
)) {
1028 dev_err(dev
, "Empty VMPCK%d communication key\n", vmpck_id
);
1032 platform_set_drvdata(pdev
, snp_dev
);
1034 mdesc
->secrets
= secrets
;
1036 /* Allocate the shared page used for the request and response message. */
1037 mdesc
->request
= alloc_shared_pages(dev
, sizeof(struct snp_guest_msg
));
1038 if (!mdesc
->request
)
1041 mdesc
->response
= alloc_shared_pages(dev
, sizeof(struct snp_guest_msg
));
1042 if (!mdesc
->response
)
1043 goto e_free_request
;
1045 mdesc
->certs_data
= alloc_shared_pages(dev
, SEV_FW_BLOB_MAX_SIZE
);
1046 if (!mdesc
->certs_data
)
1047 goto e_free_response
;
1050 mdesc
->ctx
= snp_init_crypto(mdesc
->vmpck
, VMPCK_KEY_LEN
);
1052 goto e_free_cert_data
;
1054 misc
= &snp_dev
->misc
;
1055 misc
->minor
= MISC_DYNAMIC_MINOR
;
1056 misc
->name
= DEVICE_NAME
;
1057 misc
->fops
= &snp_guest_fops
;
1059 /* Initialize the input addresses for guest request */
1060 mdesc
->input
.req_gpa
= __pa(mdesc
->request
);
1061 mdesc
->input
.resp_gpa
= __pa(mdesc
->response
);
1062 mdesc
->input
.data_gpa
= __pa(mdesc
->certs_data
);
1064 /* Set the privlevel_floor attribute based on the vmpck_id */
1065 sev_tsm_ops
.privlevel_floor
= vmpck_id
;
1067 ret
= tsm_register(&sev_tsm_ops
, snp_dev
);
1069 goto e_free_cert_data
;
1071 ret
= devm_add_action_or_reset(&pdev
->dev
, unregister_sev_tsm
, NULL
);
1073 goto e_free_cert_data
;
1075 ret
= misc_register(misc
);
1079 snp_dev
->msg_desc
= mdesc
;
1080 dev_info(dev
, "Initialized SEV guest driver (using VMPCK%d communication key)\n", vmpck_id
);
1086 free_shared_pages(mdesc
->certs_data
, SEV_FW_BLOB_MAX_SIZE
);
1088 free_shared_pages(mdesc
->response
, sizeof(struct snp_guest_msg
));
1090 free_shared_pages(mdesc
->request
, sizeof(struct snp_guest_msg
));
1096 static void __exit
sev_guest_remove(struct platform_device
*pdev
)
1098 struct snp_guest_dev
*snp_dev
= platform_get_drvdata(pdev
);
1099 struct snp_msg_desc
*mdesc
= snp_dev
->msg_desc
;
1101 free_shared_pages(mdesc
->certs_data
, SEV_FW_BLOB_MAX_SIZE
);
1102 free_shared_pages(mdesc
->response
, sizeof(struct snp_guest_msg
));
1103 free_shared_pages(mdesc
->request
, sizeof(struct snp_guest_msg
));
1105 misc_deregister(&snp_dev
->misc
);
1109 * This driver is meant to be a common SEV guest interface driver and to
1110 * support any SEV guest API. As such, even though it has been introduced
1111 * with the SEV-SNP support, it is named "sev-guest".
1113 * sev_guest_remove() lives in .exit.text. For drivers registered via
1114 * module_platform_driver_probe() this is ok because they cannot get unbound
1115 * at runtime. So mark the driver struct with __refdata to prevent modpost
1116 * triggering a section mismatch warning.
1118 static struct platform_driver sev_guest_driver __refdata
= {
1119 .remove
= __exit_p(sev_guest_remove
),
1121 .name
= "sev-guest",
1125 module_platform_driver_probe(sev_guest_driver
, sev_guest_probe
);
1127 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
1128 MODULE_LICENSE("GPL");
1129 MODULE_VERSION("1.0.0");
1130 MODULE_DESCRIPTION("AMD SEV Guest Driver");
1131 MODULE_ALIAS("platform:sev-guest");