io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
[linux/fpc-iii.git] / drivers / char / tpm / xen-tpmfront.c
blobda5b30771418f39930c13b7b35b46e24421764ff
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Implementation of the Xen vTPM device frontend
5 * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
6 */
7 #include <linux/errno.h>
8 #include <linux/err.h>
9 #include <linux/interrupt.h>
10 #include <linux/freezer.h>
11 #include <xen/xen.h>
12 #include <xen/events.h>
13 #include <xen/interface/io/tpmif.h>
14 #include <xen/grant_table.h>
15 #include <xen/xenbus.h>
16 #include <xen/page.h>
17 #include "tpm.h"
18 #include <xen/platform_pci.h>
20 struct tpm_private {
21 struct tpm_chip *chip;
22 struct xenbus_device *dev;
24 struct vtpm_shared_page *shr;
26 unsigned int evtchn;
27 int ring_ref;
28 domid_t backend_id;
29 int irq;
30 wait_queue_head_t read_queue;
33 enum status_bits {
34 VTPM_STATUS_RUNNING = 0x1,
35 VTPM_STATUS_IDLE = 0x2,
36 VTPM_STATUS_RESULT = 0x4,
37 VTPM_STATUS_CANCELED = 0x8,
40 static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
41 bool check_cancel, bool *canceled)
43 u8 status = chip->ops->status(chip);
45 *canceled = false;
46 if ((status & mask) == mask)
47 return true;
48 if (check_cancel && chip->ops->req_canceled(chip, status)) {
49 *canceled = true;
50 return true;
52 return false;
55 static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
56 unsigned long timeout, wait_queue_head_t *queue,
57 bool check_cancel)
59 unsigned long stop;
60 long rc;
61 u8 status;
62 bool canceled = false;
64 /* check current status */
65 status = chip->ops->status(chip);
66 if ((status & mask) == mask)
67 return 0;
69 stop = jiffies + timeout;
71 if (chip->flags & TPM_CHIP_FLAG_IRQ) {
72 again:
73 timeout = stop - jiffies;
74 if ((long)timeout <= 0)
75 return -ETIME;
76 rc = wait_event_interruptible_timeout(*queue,
77 wait_for_tpm_stat_cond(chip, mask, check_cancel,
78 &canceled),
79 timeout);
80 if (rc > 0) {
81 if (canceled)
82 return -ECANCELED;
83 return 0;
85 if (rc == -ERESTARTSYS && freezing(current)) {
86 clear_thread_flag(TIF_SIGPENDING);
87 goto again;
89 } else {
90 do {
91 tpm_msleep(TPM_TIMEOUT);
92 status = chip->ops->status(chip);
93 if ((status & mask) == mask)
94 return 0;
95 } while (time_before(jiffies, stop));
97 return -ETIME;
100 static u8 vtpm_status(struct tpm_chip *chip)
102 struct tpm_private *priv = dev_get_drvdata(&chip->dev);
103 switch (priv->shr->state) {
104 case VTPM_STATE_IDLE:
105 return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
106 case VTPM_STATE_FINISH:
107 return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
108 case VTPM_STATE_SUBMIT:
109 case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
110 return VTPM_STATUS_RUNNING;
111 default:
112 return 0;
116 static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
118 return status & VTPM_STATUS_CANCELED;
121 static void vtpm_cancel(struct tpm_chip *chip)
123 struct tpm_private *priv = dev_get_drvdata(&chip->dev);
124 priv->shr->state = VTPM_STATE_CANCEL;
125 wmb();
126 notify_remote_via_evtchn(priv->evtchn);
129 static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
131 return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
134 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
136 struct tpm_private *priv = dev_get_drvdata(&chip->dev);
137 struct vtpm_shared_page *shr = priv->shr;
138 unsigned int offset = shr_data_offset(shr);
140 u32 ordinal;
141 unsigned long duration;
143 if (offset > PAGE_SIZE)
144 return -EINVAL;
146 if (offset + count > PAGE_SIZE)
147 return -EINVAL;
149 /* Wait for completion of any existing command or cancellation */
150 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c,
151 &priv->read_queue, true) < 0) {
152 vtpm_cancel(chip);
153 return -ETIME;
156 memcpy(offset + (u8 *)shr, buf, count);
157 shr->length = count;
158 barrier();
159 shr->state = VTPM_STATE_SUBMIT;
160 wmb();
161 notify_remote_via_evtchn(priv->evtchn);
163 ordinal = be32_to_cpu(((struct tpm_header *)buf)->ordinal);
164 duration = tpm_calc_ordinal_duration(chip, ordinal);
166 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
167 &priv->read_queue, true) < 0) {
168 /* got a signal or timeout, try to cancel */
169 vtpm_cancel(chip);
170 return -ETIME;
173 return 0;
176 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
178 struct tpm_private *priv = dev_get_drvdata(&chip->dev);
179 struct vtpm_shared_page *shr = priv->shr;
180 unsigned int offset = shr_data_offset(shr);
181 size_t length = shr->length;
183 if (shr->state == VTPM_STATE_IDLE)
184 return -ECANCELED;
186 /* In theory the wait at the end of _send makes this one unnecessary */
187 if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c,
188 &priv->read_queue, true) < 0) {
189 vtpm_cancel(chip);
190 return -ETIME;
193 if (offset > PAGE_SIZE)
194 return -EIO;
196 if (offset + length > PAGE_SIZE)
197 length = PAGE_SIZE - offset;
199 if (length > count)
200 length = count;
202 memcpy(buf, offset + (u8 *)shr, length);
204 return length;
207 static const struct tpm_class_ops tpm_vtpm = {
208 .status = vtpm_status,
209 .recv = vtpm_recv,
210 .send = vtpm_send,
211 .cancel = vtpm_cancel,
212 .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
213 .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
214 .req_canceled = vtpm_req_canceled,
217 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
219 struct tpm_private *priv = dev_id;
221 switch (priv->shr->state) {
222 case VTPM_STATE_IDLE:
223 case VTPM_STATE_FINISH:
224 wake_up_interruptible(&priv->read_queue);
225 break;
226 case VTPM_STATE_SUBMIT:
227 case VTPM_STATE_CANCEL:
228 default:
229 break;
231 return IRQ_HANDLED;
234 static int setup_chip(struct device *dev, struct tpm_private *priv)
236 struct tpm_chip *chip;
238 chip = tpmm_chip_alloc(dev, &tpm_vtpm);
239 if (IS_ERR(chip))
240 return PTR_ERR(chip);
242 init_waitqueue_head(&priv->read_queue);
244 priv->chip = chip;
245 dev_set_drvdata(&chip->dev, priv);
247 return 0;
250 /* caller must clean up in case of errors */
251 static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
253 struct xenbus_transaction xbt;
254 const char *message = NULL;
255 int rv;
256 grant_ref_t gref;
258 priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
259 if (!priv->shr) {
260 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
261 return -ENOMEM;
264 rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
265 if (rv < 0)
266 return rv;
268 priv->ring_ref = gref;
270 rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
271 if (rv)
272 return rv;
274 rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
275 "tpmif", priv);
276 if (rv <= 0) {
277 xenbus_dev_fatal(dev, rv, "allocating TPM irq");
278 return rv;
280 priv->irq = rv;
282 again:
283 rv = xenbus_transaction_start(&xbt);
284 if (rv) {
285 xenbus_dev_fatal(dev, rv, "starting transaction");
286 return rv;
289 rv = xenbus_printf(xbt, dev->nodename,
290 "ring-ref", "%u", priv->ring_ref);
291 if (rv) {
292 message = "writing ring-ref";
293 goto abort_transaction;
296 rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
297 priv->evtchn);
298 if (rv) {
299 message = "writing event-channel";
300 goto abort_transaction;
303 rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
304 if (rv) {
305 message = "writing feature-protocol-v2";
306 goto abort_transaction;
309 rv = xenbus_transaction_end(xbt, 0);
310 if (rv == -EAGAIN)
311 goto again;
312 if (rv) {
313 xenbus_dev_fatal(dev, rv, "completing transaction");
314 return rv;
317 xenbus_switch_state(dev, XenbusStateInitialised);
319 return 0;
321 abort_transaction:
322 xenbus_transaction_end(xbt, 1);
323 if (message)
324 xenbus_dev_error(dev, rv, "%s", message);
326 return rv;
329 static void ring_free(struct tpm_private *priv)
331 if (!priv)
332 return;
334 if (priv->ring_ref)
335 gnttab_end_foreign_access(priv->ring_ref, 0,
336 (unsigned long)priv->shr);
337 else
338 free_page((unsigned long)priv->shr);
340 if (priv->irq)
341 unbind_from_irqhandler(priv->irq, priv);
343 kfree(priv);
346 static int tpmfront_probe(struct xenbus_device *dev,
347 const struct xenbus_device_id *id)
349 struct tpm_private *priv;
350 int rv;
352 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
353 if (!priv) {
354 xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
355 return -ENOMEM;
358 rv = setup_chip(&dev->dev, priv);
359 if (rv) {
360 kfree(priv);
361 return rv;
364 rv = setup_ring(dev, priv);
365 if (rv) {
366 ring_free(priv);
367 return rv;
370 tpm_get_timeouts(priv->chip);
372 return tpm_chip_register(priv->chip);
375 static int tpmfront_remove(struct xenbus_device *dev)
377 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
378 struct tpm_private *priv = dev_get_drvdata(&chip->dev);
379 tpm_chip_unregister(chip);
380 ring_free(priv);
381 dev_set_drvdata(&chip->dev, NULL);
382 return 0;
385 static int tpmfront_resume(struct xenbus_device *dev)
387 /* A suspend/resume/migrate will interrupt a vTPM anyway */
388 tpmfront_remove(dev);
389 return tpmfront_probe(dev, NULL);
392 static void backend_changed(struct xenbus_device *dev,
393 enum xenbus_state backend_state)
395 switch (backend_state) {
396 case XenbusStateInitialised:
397 case XenbusStateConnected:
398 if (dev->state == XenbusStateConnected)
399 break;
401 if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
402 0)) {
403 xenbus_dev_fatal(dev, -EINVAL,
404 "vTPM protocol 2 required");
405 return;
407 xenbus_switch_state(dev, XenbusStateConnected);
408 break;
410 case XenbusStateClosing:
411 case XenbusStateClosed:
412 device_unregister(&dev->dev);
413 xenbus_frontend_closed(dev);
414 break;
415 default:
416 break;
420 static const struct xenbus_device_id tpmfront_ids[] = {
421 { "vtpm" },
422 { "" }
424 MODULE_ALIAS("xen:vtpm");
426 static struct xenbus_driver tpmfront_driver = {
427 .ids = tpmfront_ids,
428 .probe = tpmfront_probe,
429 .remove = tpmfront_remove,
430 .resume = tpmfront_resume,
431 .otherend_changed = backend_changed,
434 static int __init xen_tpmfront_init(void)
436 if (!xen_domain())
437 return -ENODEV;
439 if (!xen_has_pv_devices())
440 return -ENODEV;
442 return xenbus_register_frontend(&tpmfront_driver);
444 module_init(xen_tpmfront_init);
446 static void __exit xen_tpmfront_exit(void)
448 xenbus_unregister_driver(&tpmfront_driver);
450 module_exit(xen_tpmfront_exit);
452 MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
453 MODULE_DESCRIPTION("Xen vTPM Driver");
454 MODULE_LICENSE("GPL");