spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / char / tpm / tpm_tis.c
bloba1748621111be511a8694a4d91ab74094f45c631
1 /*
2 * Copyright (C) 2005, 2006 IBM Corporation
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
8 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
10 * Device driver for TCG/TCPA TPM (trusted platform module).
11 * Specifications at www.trustedcomputinggroup.org
13 * This device driver implements the TPM interface as defined in
14 * the TCG TPM Interface Spec version 1.2, revision 1.0.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
19 * License.
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/pnp.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <linux/acpi.h>
29 #include <linux/freezer.h>
30 #include "tpm.h"
32 enum tis_access {
33 TPM_ACCESS_VALID = 0x80,
34 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
35 TPM_ACCESS_REQUEST_PENDING = 0x04,
36 TPM_ACCESS_REQUEST_USE = 0x02,
39 enum tis_status {
40 TPM_STS_VALID = 0x80,
41 TPM_STS_COMMAND_READY = 0x40,
42 TPM_STS_GO = 0x20,
43 TPM_STS_DATA_AVAIL = 0x10,
44 TPM_STS_DATA_EXPECT = 0x08,
47 enum tis_int_flags {
48 TPM_GLOBAL_INT_ENABLE = 0x80000000,
49 TPM_INTF_BURST_COUNT_STATIC = 0x100,
50 TPM_INTF_CMD_READY_INT = 0x080,
51 TPM_INTF_INT_EDGE_FALLING = 0x040,
52 TPM_INTF_INT_EDGE_RISING = 0x020,
53 TPM_INTF_INT_LEVEL_LOW = 0x010,
54 TPM_INTF_INT_LEVEL_HIGH = 0x008,
55 TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
56 TPM_INTF_STS_VALID_INT = 0x002,
57 TPM_INTF_DATA_AVAIL_INT = 0x001,
60 enum tis_defaults {
61 TIS_MEM_BASE = 0xFED40000,
62 TIS_MEM_LEN = 0x5000,
63 TIS_SHORT_TIMEOUT = 750, /* ms */
64 TIS_LONG_TIMEOUT = 2000, /* 2 sec */
67 #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
68 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
69 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
70 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
71 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
72 #define TPM_STS(l) (0x0018 | ((l) << 12))
73 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
75 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
76 #define TPM_RID(l) (0x0F04 | ((l) << 12))
78 static LIST_HEAD(tis_chips);
79 static DEFINE_SPINLOCK(tis_lock);
81 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
82 static int is_itpm(struct pnp_dev *dev)
84 struct acpi_device *acpi = pnp_acpi_device(dev);
85 struct acpi_hardware_id *id;
87 list_for_each_entry(id, &acpi->pnp.ids, list) {
88 if (!strcmp("INTC0102", id->id))
89 return 1;
92 return 0;
94 #else
95 static inline int is_itpm(struct pnp_dev *dev)
97 return 0;
99 #endif
101 static int check_locality(struct tpm_chip *chip, int l)
103 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
104 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
105 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
106 return chip->vendor.locality = l;
108 return -1;
111 static void release_locality(struct tpm_chip *chip, int l, int force)
113 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
114 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
115 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
116 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
117 chip->vendor.iobase + TPM_ACCESS(l));
120 static int request_locality(struct tpm_chip *chip, int l)
122 unsigned long stop, timeout;
123 long rc;
125 if (check_locality(chip, l) >= 0)
126 return l;
128 iowrite8(TPM_ACCESS_REQUEST_USE,
129 chip->vendor.iobase + TPM_ACCESS(l));
131 stop = jiffies + chip->vendor.timeout_a;
133 if (chip->vendor.irq) {
134 again:
135 timeout = stop - jiffies;
136 if ((long)timeout <= 0)
137 return -1;
138 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
139 (check_locality
140 (chip, l) >= 0),
141 timeout);
142 if (rc > 0)
143 return l;
144 if (rc == -ERESTARTSYS && freezing(current)) {
145 clear_thread_flag(TIF_SIGPENDING);
146 goto again;
148 } else {
149 /* wait for burstcount */
150 do {
151 if (check_locality(chip, l) >= 0)
152 return l;
153 msleep(TPM_TIMEOUT);
155 while (time_before(jiffies, stop));
157 return -1;
160 static u8 tpm_tis_status(struct tpm_chip *chip)
162 return ioread8(chip->vendor.iobase +
163 TPM_STS(chip->vendor.locality));
166 static void tpm_tis_ready(struct tpm_chip *chip)
168 /* this causes the current command to be aborted */
169 iowrite8(TPM_STS_COMMAND_READY,
170 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
173 static int get_burstcount(struct tpm_chip *chip)
175 unsigned long stop;
176 int burstcnt;
178 /* wait for burstcount */
179 /* which timeout value, spec has 2 answers (c & d) */
180 stop = jiffies + chip->vendor.timeout_d;
181 do {
182 burstcnt = ioread8(chip->vendor.iobase +
183 TPM_STS(chip->vendor.locality) + 1);
184 burstcnt += ioread8(chip->vendor.iobase +
185 TPM_STS(chip->vendor.locality) +
186 2) << 8;
187 if (burstcnt)
188 return burstcnt;
189 msleep(TPM_TIMEOUT);
190 } while (time_before(jiffies, stop));
191 return -EBUSY;
194 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
196 int size = 0, burstcnt;
197 while (size < count &&
198 wait_for_tpm_stat(chip,
199 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
200 chip->vendor.timeout_c,
201 &chip->vendor.read_queue)
202 == 0) {
203 burstcnt = get_burstcount(chip);
204 for (; burstcnt > 0 && size < count; burstcnt--)
205 buf[size++] = ioread8(chip->vendor.iobase +
206 TPM_DATA_FIFO(chip->vendor.
207 locality));
209 return size;
212 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
214 int size = 0;
215 int expected, status;
217 if (count < TPM_HEADER_SIZE) {
218 size = -EIO;
219 goto out;
222 /* read first 10 bytes, including tag, paramsize, and result */
223 if ((size =
224 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
225 dev_err(chip->dev, "Unable to read header\n");
226 goto out;
229 expected = be32_to_cpu(*(__be32 *) (buf + 2));
230 if (expected > count) {
231 size = -EIO;
232 goto out;
235 if ((size +=
236 recv_data(chip, &buf[TPM_HEADER_SIZE],
237 expected - TPM_HEADER_SIZE)) < expected) {
238 dev_err(chip->dev, "Unable to read remainder of result\n");
239 size = -ETIME;
240 goto out;
243 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
244 &chip->vendor.int_queue);
245 status = tpm_tis_status(chip);
246 if (status & TPM_STS_DATA_AVAIL) { /* retry? */
247 dev_err(chip->dev, "Error left over data\n");
248 size = -EIO;
249 goto out;
252 out:
253 tpm_tis_ready(chip);
254 release_locality(chip, chip->vendor.locality, 0);
255 return size;
258 static bool itpm;
259 module_param(itpm, bool, 0444);
260 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
263 * If interrupts are used (signaled by an irq set in the vendor structure)
264 * tpm.c can skip polling for the data to be available as the interrupt is
265 * waited for here
267 static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
269 int rc, status, burstcnt;
270 size_t count = 0;
272 if (request_locality(chip, 0) < 0)
273 return -EBUSY;
275 status = tpm_tis_status(chip);
276 if ((status & TPM_STS_COMMAND_READY) == 0) {
277 tpm_tis_ready(chip);
278 if (wait_for_tpm_stat
279 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
280 &chip->vendor.int_queue) < 0) {
281 rc = -ETIME;
282 goto out_err;
286 while (count < len - 1) {
287 burstcnt = get_burstcount(chip);
288 for (; burstcnt > 0 && count < len - 1; burstcnt--) {
289 iowrite8(buf[count], chip->vendor.iobase +
290 TPM_DATA_FIFO(chip->vendor.locality));
291 count++;
294 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
295 &chip->vendor.int_queue);
296 status = tpm_tis_status(chip);
297 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
298 rc = -EIO;
299 goto out_err;
303 /* write last byte */
304 iowrite8(buf[count],
305 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
306 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
307 &chip->vendor.int_queue);
308 status = tpm_tis_status(chip);
309 if ((status & TPM_STS_DATA_EXPECT) != 0) {
310 rc = -EIO;
311 goto out_err;
314 return 0;
316 out_err:
317 tpm_tis_ready(chip);
318 release_locality(chip, chip->vendor.locality, 0);
319 return rc;
323 * If interrupts are used (signaled by an irq set in the vendor structure)
324 * tpm.c can skip polling for the data to be available as the interrupt is
325 * waited for here
327 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
329 int rc;
330 u32 ordinal;
332 rc = tpm_tis_send_data(chip, buf, len);
333 if (rc < 0)
334 return rc;
336 /* go and do it */
337 iowrite8(TPM_STS_GO,
338 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
340 if (chip->vendor.irq) {
341 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
342 if (wait_for_tpm_stat
343 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
344 tpm_calc_ordinal_duration(chip, ordinal),
345 &chip->vendor.read_queue) < 0) {
346 rc = -ETIME;
347 goto out_err;
350 return len;
351 out_err:
352 tpm_tis_ready(chip);
353 release_locality(chip, chip->vendor.locality, 0);
354 return rc;
358 * Early probing for iTPM with STS_DATA_EXPECT flaw.
359 * Try sending command without itpm flag set and if that
360 * fails, repeat with itpm flag set.
362 static int probe_itpm(struct tpm_chip *chip)
364 int rc = 0;
365 u8 cmd_getticks[] = {
366 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
367 0x00, 0x00, 0x00, 0xf1
369 size_t len = sizeof(cmd_getticks);
370 int rem_itpm = itpm;
372 itpm = 0;
374 rc = tpm_tis_send_data(chip, cmd_getticks, len);
375 if (rc == 0)
376 goto out;
378 tpm_tis_ready(chip);
379 release_locality(chip, chip->vendor.locality, 0);
381 itpm = 1;
383 rc = tpm_tis_send_data(chip, cmd_getticks, len);
384 if (rc == 0) {
385 dev_info(chip->dev, "Detected an iTPM.\n");
386 rc = 1;
387 } else
388 rc = -EFAULT;
390 out:
391 itpm = rem_itpm;
392 tpm_tis_ready(chip);
393 /* some TPMs need a break here otherwise they will not work
394 * correctly on the immediately subsequent command */
395 msleep(chip->vendor.timeout_b);
396 release_locality(chip, chip->vendor.locality, 0);
398 return rc;
401 static const struct file_operations tis_ops = {
402 .owner = THIS_MODULE,
403 .llseek = no_llseek,
404 .open = tpm_open,
405 .read = tpm_read,
406 .write = tpm_write,
407 .release = tpm_release,
410 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
411 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
412 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
413 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
414 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
415 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
416 NULL);
417 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
418 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
419 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
420 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
422 static struct attribute *tis_attrs[] = {
423 &dev_attr_pubek.attr,
424 &dev_attr_pcrs.attr,
425 &dev_attr_enabled.attr,
426 &dev_attr_active.attr,
427 &dev_attr_owned.attr,
428 &dev_attr_temp_deactivated.attr,
429 &dev_attr_caps.attr,
430 &dev_attr_cancel.attr,
431 &dev_attr_durations.attr,
432 &dev_attr_timeouts.attr, NULL,
435 static struct attribute_group tis_attr_grp = {
436 .attrs = tis_attrs
439 static struct tpm_vendor_specific tpm_tis = {
440 .status = tpm_tis_status,
441 .recv = tpm_tis_recv,
442 .send = tpm_tis_send,
443 .cancel = tpm_tis_ready,
444 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
445 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
446 .req_canceled = TPM_STS_COMMAND_READY,
447 .attr_group = &tis_attr_grp,
448 .miscdev = {
449 .fops = &tis_ops,},
452 static irqreturn_t tis_int_probe(int irq, void *dev_id)
454 struct tpm_chip *chip = dev_id;
455 u32 interrupt;
457 interrupt = ioread32(chip->vendor.iobase +
458 TPM_INT_STATUS(chip->vendor.locality));
460 if (interrupt == 0)
461 return IRQ_NONE;
463 chip->vendor.probed_irq = irq;
465 /* Clear interrupts handled with TPM_EOI */
466 iowrite32(interrupt,
467 chip->vendor.iobase +
468 TPM_INT_STATUS(chip->vendor.locality));
469 return IRQ_HANDLED;
472 static irqreturn_t tis_int_handler(int dummy, void *dev_id)
474 struct tpm_chip *chip = dev_id;
475 u32 interrupt;
476 int i;
478 interrupt = ioread32(chip->vendor.iobase +
479 TPM_INT_STATUS(chip->vendor.locality));
481 if (interrupt == 0)
482 return IRQ_NONE;
484 if (interrupt & TPM_INTF_DATA_AVAIL_INT)
485 wake_up_interruptible(&chip->vendor.read_queue);
486 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
487 for (i = 0; i < 5; i++)
488 if (check_locality(chip, i) >= 0)
489 break;
490 if (interrupt &
491 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
492 TPM_INTF_CMD_READY_INT))
493 wake_up_interruptible(&chip->vendor.int_queue);
495 /* Clear interrupts handled with TPM_EOI */
496 iowrite32(interrupt,
497 chip->vendor.iobase +
498 TPM_INT_STATUS(chip->vendor.locality));
499 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
500 return IRQ_HANDLED;
503 static bool interrupts = 1;
504 module_param(interrupts, bool, 0444);
505 MODULE_PARM_DESC(interrupts, "Enable interrupts");
507 static int tpm_tis_init(struct device *dev, resource_size_t start,
508 resource_size_t len, unsigned int irq)
510 u32 vendor, intfcaps, intmask;
511 int rc, i, irq_s, irq_e;
512 struct tpm_chip *chip;
514 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
515 return -ENODEV;
517 chip->vendor.iobase = ioremap(start, len);
518 if (!chip->vendor.iobase) {
519 rc = -EIO;
520 goto out_err;
523 /* Default timeouts */
524 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
525 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
526 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
527 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
529 if (request_locality(chip, 0) != 0) {
530 rc = -ENODEV;
531 goto out_err;
534 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
536 dev_info(dev,
537 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
538 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
540 if (!itpm) {
541 itpm = probe_itpm(chip);
542 if (itpm < 0) {
543 rc = -ENODEV;
544 goto out_err;
548 if (itpm)
549 dev_info(dev, "Intel iTPM workaround enabled\n");
552 /* Figure out the capabilities */
553 intfcaps =
554 ioread32(chip->vendor.iobase +
555 TPM_INTF_CAPS(chip->vendor.locality));
556 dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
557 intfcaps);
558 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
559 dev_dbg(dev, "\tBurst Count Static\n");
560 if (intfcaps & TPM_INTF_CMD_READY_INT)
561 dev_dbg(dev, "\tCommand Ready Int Support\n");
562 if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
563 dev_dbg(dev, "\tInterrupt Edge Falling\n");
564 if (intfcaps & TPM_INTF_INT_EDGE_RISING)
565 dev_dbg(dev, "\tInterrupt Edge Rising\n");
566 if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
567 dev_dbg(dev, "\tInterrupt Level Low\n");
568 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
569 dev_dbg(dev, "\tInterrupt Level High\n");
570 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
571 dev_dbg(dev, "\tLocality Change Int Support\n");
572 if (intfcaps & TPM_INTF_STS_VALID_INT)
573 dev_dbg(dev, "\tSts Valid Int Support\n");
574 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
575 dev_dbg(dev, "\tData Avail Int Support\n");
577 /* get the timeouts before testing for irqs */
578 if (tpm_get_timeouts(chip)) {
579 dev_err(dev, "Could not get TPM timeouts and durations\n");
580 rc = -ENODEV;
581 goto out_err;
584 if (tpm_do_selftest(chip)) {
585 dev_err(dev, "TPM self test failed\n");
586 rc = -ENODEV;
587 goto out_err;
590 /* INTERRUPT Setup */
591 init_waitqueue_head(&chip->vendor.read_queue);
592 init_waitqueue_head(&chip->vendor.int_queue);
594 intmask =
595 ioread32(chip->vendor.iobase +
596 TPM_INT_ENABLE(chip->vendor.locality));
598 intmask |= TPM_INTF_CMD_READY_INT
599 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
600 | TPM_INTF_STS_VALID_INT;
602 iowrite32(intmask,
603 chip->vendor.iobase +
604 TPM_INT_ENABLE(chip->vendor.locality));
605 if (interrupts)
606 chip->vendor.irq = irq;
607 if (interrupts && !chip->vendor.irq) {
608 irq_s =
609 ioread8(chip->vendor.iobase +
610 TPM_INT_VECTOR(chip->vendor.locality));
611 if (irq_s) {
612 irq_e = irq_s;
613 } else {
614 irq_s = 3;
615 irq_e = 15;
618 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
619 iowrite8(i, chip->vendor.iobase +
620 TPM_INT_VECTOR(chip->vendor.locality));
621 if (request_irq
622 (i, tis_int_probe, IRQF_SHARED,
623 chip->vendor.miscdev.name, chip) != 0) {
624 dev_info(chip->dev,
625 "Unable to request irq: %d for probe\n",
627 continue;
630 /* Clear all existing */
631 iowrite32(ioread32
632 (chip->vendor.iobase +
633 TPM_INT_STATUS(chip->vendor.locality)),
634 chip->vendor.iobase +
635 TPM_INT_STATUS(chip->vendor.locality));
637 /* Turn on */
638 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
639 chip->vendor.iobase +
640 TPM_INT_ENABLE(chip->vendor.locality));
642 chip->vendor.probed_irq = 0;
644 /* Generate Interrupts */
645 tpm_gen_interrupt(chip);
647 chip->vendor.irq = chip->vendor.probed_irq;
649 /* free_irq will call into tis_int_probe;
650 clear all irqs we haven't seen while doing
651 tpm_gen_interrupt */
652 iowrite32(ioread32
653 (chip->vendor.iobase +
654 TPM_INT_STATUS(chip->vendor.locality)),
655 chip->vendor.iobase +
656 TPM_INT_STATUS(chip->vendor.locality));
658 /* Turn off */
659 iowrite32(intmask,
660 chip->vendor.iobase +
661 TPM_INT_ENABLE(chip->vendor.locality));
662 free_irq(i, chip);
665 if (chip->vendor.irq) {
666 iowrite8(chip->vendor.irq,
667 chip->vendor.iobase +
668 TPM_INT_VECTOR(chip->vendor.locality));
669 if (request_irq
670 (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
671 chip->vendor.miscdev.name, chip) != 0) {
672 dev_info(chip->dev,
673 "Unable to request irq: %d for use\n",
674 chip->vendor.irq);
675 chip->vendor.irq = 0;
676 } else {
677 /* Clear all existing */
678 iowrite32(ioread32
679 (chip->vendor.iobase +
680 TPM_INT_STATUS(chip->vendor.locality)),
681 chip->vendor.iobase +
682 TPM_INT_STATUS(chip->vendor.locality));
684 /* Turn on */
685 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
686 chip->vendor.iobase +
687 TPM_INT_ENABLE(chip->vendor.locality));
691 INIT_LIST_HEAD(&chip->vendor.list);
692 spin_lock(&tis_lock);
693 list_add(&chip->vendor.list, &tis_chips);
694 spin_unlock(&tis_lock);
697 return 0;
698 out_err:
699 if (chip->vendor.iobase)
700 iounmap(chip->vendor.iobase);
701 tpm_remove_hardware(chip->dev);
702 return rc;
705 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
707 u32 intmask;
709 /* reenable interrupts that device may have lost or
710 BIOS/firmware may have disabled */
711 iowrite8(chip->vendor.irq, chip->vendor.iobase +
712 TPM_INT_VECTOR(chip->vendor.locality));
714 intmask =
715 ioread32(chip->vendor.iobase +
716 TPM_INT_ENABLE(chip->vendor.locality));
718 intmask |= TPM_INTF_CMD_READY_INT
719 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
720 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
722 iowrite32(intmask,
723 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
727 #ifdef CONFIG_PNP
728 static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
729 const struct pnp_device_id *pnp_id)
731 resource_size_t start, len;
732 unsigned int irq = 0;
734 start = pnp_mem_start(pnp_dev, 0);
735 len = pnp_mem_len(pnp_dev, 0);
737 if (pnp_irq_valid(pnp_dev, 0))
738 irq = pnp_irq(pnp_dev, 0);
739 else
740 interrupts = 0;
742 if (is_itpm(pnp_dev))
743 itpm = 1;
745 return tpm_tis_init(&pnp_dev->dev, start, len, irq);
748 static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
750 return tpm_pm_suspend(&dev->dev, msg);
753 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
755 struct tpm_chip *chip = pnp_get_drvdata(dev);
756 int ret;
758 if (chip->vendor.irq)
759 tpm_tis_reenable_interrupts(chip);
761 ret = tpm_pm_resume(&dev->dev);
762 if (!ret)
763 tpm_do_selftest(chip);
765 return ret;
768 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
769 {"PNP0C31", 0}, /* TPM */
770 {"ATM1200", 0}, /* Atmel */
771 {"IFX0102", 0}, /* Infineon */
772 {"BCM0101", 0}, /* Broadcom */
773 {"BCM0102", 0}, /* Broadcom */
774 {"NSC1200", 0}, /* National */
775 {"ICO0102", 0}, /* Intel */
776 /* Add new here */
777 {"", 0}, /* User Specified */
778 {"", 0} /* Terminator */
780 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
782 static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
784 struct tpm_chip *chip = pnp_get_drvdata(dev);
786 tpm_dev_vendor_release(chip);
788 kfree(chip);
792 static struct pnp_driver tis_pnp_driver = {
793 .name = "tpm_tis",
794 .id_table = tpm_pnp_tbl,
795 .probe = tpm_tis_pnp_init,
796 .suspend = tpm_tis_pnp_suspend,
797 .resume = tpm_tis_pnp_resume,
798 .remove = tpm_tis_pnp_remove,
801 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
802 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
803 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
804 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
805 #endif
806 static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
808 return tpm_pm_suspend(&dev->dev, msg);
811 static int tpm_tis_resume(struct platform_device *dev)
813 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
815 if (chip->vendor.irq)
816 tpm_tis_reenable_interrupts(chip);
818 return tpm_pm_resume(&dev->dev);
820 static struct platform_driver tis_drv = {
821 .driver = {
822 .name = "tpm_tis",
823 .owner = THIS_MODULE,
825 .suspend = tpm_tis_suspend,
826 .resume = tpm_tis_resume,
829 static struct platform_device *pdev;
831 static bool force;
832 module_param(force, bool, 0444);
833 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
834 static int __init init_tis(void)
836 int rc;
837 #ifdef CONFIG_PNP
838 if (!force)
839 return pnp_register_driver(&tis_pnp_driver);
840 #endif
842 rc = platform_driver_register(&tis_drv);
843 if (rc < 0)
844 return rc;
845 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
846 return PTR_ERR(pdev);
847 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
848 platform_device_unregister(pdev);
849 platform_driver_unregister(&tis_drv);
851 return rc;
854 static void __exit cleanup_tis(void)
856 struct tpm_vendor_specific *i, *j;
857 struct tpm_chip *chip;
858 spin_lock(&tis_lock);
859 list_for_each_entry_safe(i, j, &tis_chips, list) {
860 chip = to_tpm_chip(i);
861 tpm_remove_hardware(chip->dev);
862 iowrite32(~TPM_GLOBAL_INT_ENABLE &
863 ioread32(chip->vendor.iobase +
864 TPM_INT_ENABLE(chip->vendor.
865 locality)),
866 chip->vendor.iobase +
867 TPM_INT_ENABLE(chip->vendor.locality));
868 release_locality(chip, chip->vendor.locality, 1);
869 if (chip->vendor.irq)
870 free_irq(chip->vendor.irq, chip);
871 iounmap(i->iobase);
872 list_del(&i->list);
874 spin_unlock(&tis_lock);
875 #ifdef CONFIG_PNP
876 if (!force) {
877 pnp_unregister_driver(&tis_pnp_driver);
878 return;
880 #endif
881 platform_device_unregister(pdev);
882 platform_driver_unregister(&tis_drv);
885 module_init(init_tis);
886 module_exit(cleanup_tis);
887 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
888 MODULE_DESCRIPTION("TPM Driver");
889 MODULE_VERSION("2.0");
890 MODULE_LICENSE("GPL");