2 * Copyright (C) 2005, 2006 IBM Corporation
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
8 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
10 * Device driver for TCG/TCPA TPM (trusted platform module).
11 * Specifications at www.trustedcomputinggroup.org
13 * This device driver implements the TPM interface as defined in
14 * the TCG TPM Interface Spec version 1.2, revision 1.0.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/pnp.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <linux/acpi.h>
31 #define TPM_HEADER_SIZE 10
34 TPM_ACCESS_VALID
= 0x80,
35 TPM_ACCESS_ACTIVE_LOCALITY
= 0x20,
36 TPM_ACCESS_REQUEST_PENDING
= 0x04,
37 TPM_ACCESS_REQUEST_USE
= 0x02,
42 TPM_STS_COMMAND_READY
= 0x40,
44 TPM_STS_DATA_AVAIL
= 0x10,
45 TPM_STS_DATA_EXPECT
= 0x08,
49 TPM_GLOBAL_INT_ENABLE
= 0x80000000,
50 TPM_INTF_BURST_COUNT_STATIC
= 0x100,
51 TPM_INTF_CMD_READY_INT
= 0x080,
52 TPM_INTF_INT_EDGE_FALLING
= 0x040,
53 TPM_INTF_INT_EDGE_RISING
= 0x020,
54 TPM_INTF_INT_LEVEL_LOW
= 0x010,
55 TPM_INTF_INT_LEVEL_HIGH
= 0x008,
56 TPM_INTF_LOCALITY_CHANGE_INT
= 0x004,
57 TPM_INTF_STS_VALID_INT
= 0x002,
58 TPM_INTF_DATA_AVAIL_INT
= 0x001,
62 TIS_MEM_BASE
= 0xFED40000,
64 TIS_SHORT_TIMEOUT
= 750, /* ms */
65 TIS_LONG_TIMEOUT
= 2000, /* 2 sec */
68 #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
69 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
70 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
71 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
72 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
73 #define TPM_STS(l) (0x0018 | ((l) << 12))
74 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
76 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
77 #define TPM_RID(l) (0x0F04 | ((l) << 12))
79 static LIST_HEAD(tis_chips
);
80 static DEFINE_SPINLOCK(tis_lock
);
83 static int is_itpm(struct pnp_dev
*dev
)
85 struct acpi_device
*acpi
= pnp_acpi_device(dev
);
86 struct acpi_hardware_id
*id
;
88 list_for_each_entry(id
, &acpi
->pnp
.ids
, list
) {
89 if (!strcmp("INTC0102", id
->id
))
96 static int is_itpm(struct pnp_dev
*dev
)
102 static int check_locality(struct tpm_chip
*chip
, int l
)
104 if ((ioread8(chip
->vendor
.iobase
+ TPM_ACCESS(l
)) &
105 (TPM_ACCESS_ACTIVE_LOCALITY
| TPM_ACCESS_VALID
)) ==
106 (TPM_ACCESS_ACTIVE_LOCALITY
| TPM_ACCESS_VALID
))
107 return chip
->vendor
.locality
= l
;
112 static void release_locality(struct tpm_chip
*chip
, int l
, int force
)
114 if (force
|| (ioread8(chip
->vendor
.iobase
+ TPM_ACCESS(l
)) &
115 (TPM_ACCESS_REQUEST_PENDING
| TPM_ACCESS_VALID
)) ==
116 (TPM_ACCESS_REQUEST_PENDING
| TPM_ACCESS_VALID
))
117 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY
,
118 chip
->vendor
.iobase
+ TPM_ACCESS(l
));
121 static int request_locality(struct tpm_chip
*chip
, int l
)
126 if (check_locality(chip
, l
) >= 0)
129 iowrite8(TPM_ACCESS_REQUEST_USE
,
130 chip
->vendor
.iobase
+ TPM_ACCESS(l
));
132 if (chip
->vendor
.irq
) {
133 rc
= wait_event_interruptible_timeout(chip
->vendor
.int_queue
,
136 chip
->vendor
.timeout_a
);
141 /* wait for burstcount */
142 stop
= jiffies
+ chip
->vendor
.timeout_a
;
144 if (check_locality(chip
, l
) >= 0)
148 while (time_before(jiffies
, stop
));
153 static u8
tpm_tis_status(struct tpm_chip
*chip
)
155 return ioread8(chip
->vendor
.iobase
+
156 TPM_STS(chip
->vendor
.locality
));
159 static void tpm_tis_ready(struct tpm_chip
*chip
)
161 /* this causes the current command to be aborted */
162 iowrite8(TPM_STS_COMMAND_READY
,
163 chip
->vendor
.iobase
+ TPM_STS(chip
->vendor
.locality
));
166 static int get_burstcount(struct tpm_chip
*chip
)
171 /* wait for burstcount */
172 /* which timeout value, spec has 2 answers (c & d) */
173 stop
= jiffies
+ chip
->vendor
.timeout_d
;
175 burstcnt
= ioread8(chip
->vendor
.iobase
+
176 TPM_STS(chip
->vendor
.locality
) + 1);
177 burstcnt
+= ioread8(chip
->vendor
.iobase
+
178 TPM_STS(chip
->vendor
.locality
) +
183 } while (time_before(jiffies
, stop
));
187 static int wait_for_stat(struct tpm_chip
*chip
, u8 mask
, unsigned long timeout
,
188 wait_queue_head_t
*queue
)
194 /* check current status */
195 status
= tpm_tis_status(chip
);
196 if ((status
& mask
) == mask
)
199 if (chip
->vendor
.irq
) {
200 rc
= wait_event_interruptible_timeout(*queue
,
207 stop
= jiffies
+ timeout
;
210 status
= tpm_tis_status(chip
);
211 if ((status
& mask
) == mask
)
213 } while (time_before(jiffies
, stop
));
218 static int recv_data(struct tpm_chip
*chip
, u8
*buf
, size_t count
)
220 int size
= 0, burstcnt
;
221 while (size
< count
&&
223 TPM_STS_DATA_AVAIL
| TPM_STS_VALID
,
224 chip
->vendor
.timeout_c
,
225 &chip
->vendor
.read_queue
)
227 burstcnt
= get_burstcount(chip
);
228 for (; burstcnt
> 0 && size
< count
; burstcnt
--)
229 buf
[size
++] = ioread8(chip
->vendor
.iobase
+
230 TPM_DATA_FIFO(chip
->vendor
.
236 static int tpm_tis_recv(struct tpm_chip
*chip
, u8
*buf
, size_t count
)
239 int expected
, status
;
241 if (count
< TPM_HEADER_SIZE
) {
246 /* read first 10 bytes, including tag, paramsize, and result */
248 recv_data(chip
, buf
, TPM_HEADER_SIZE
)) < TPM_HEADER_SIZE
) {
249 dev_err(chip
->dev
, "Unable to read header\n");
253 expected
= be32_to_cpu(*(__be32
*) (buf
+ 2));
254 if (expected
> count
) {
260 recv_data(chip
, &buf
[TPM_HEADER_SIZE
],
261 expected
- TPM_HEADER_SIZE
)) < expected
) {
262 dev_err(chip
->dev
, "Unable to read remainder of result\n");
267 wait_for_stat(chip
, TPM_STS_VALID
, chip
->vendor
.timeout_c
,
268 &chip
->vendor
.int_queue
);
269 status
= tpm_tis_status(chip
);
270 if (status
& TPM_STS_DATA_AVAIL
) { /* retry? */
271 dev_err(chip
->dev
, "Error left over data\n");
278 release_locality(chip
, chip
->vendor
.locality
, 0);
283 module_param(itpm
, bool, 0444);
284 MODULE_PARM_DESC(itpm
, "Force iTPM workarounds (found on some Lenovo laptops)");
287 * If interrupts are used (signaled by an irq set in the vendor structure)
288 * tpm.c can skip polling for the data to be available as the interrupt is
291 static int tpm_tis_send(struct tpm_chip
*chip
, u8
*buf
, size_t len
)
293 int rc
, status
, burstcnt
;
297 if (request_locality(chip
, 0) < 0)
300 status
= tpm_tis_status(chip
);
301 if ((status
& TPM_STS_COMMAND_READY
) == 0) {
304 (chip
, TPM_STS_COMMAND_READY
, chip
->vendor
.timeout_b
,
305 &chip
->vendor
.int_queue
) < 0) {
311 while (count
< len
- 1) {
312 burstcnt
= get_burstcount(chip
);
313 for (; burstcnt
> 0 && count
< len
- 1; burstcnt
--) {
314 iowrite8(buf
[count
], chip
->vendor
.iobase
+
315 TPM_DATA_FIFO(chip
->vendor
.locality
));
319 wait_for_stat(chip
, TPM_STS_VALID
, chip
->vendor
.timeout_c
,
320 &chip
->vendor
.int_queue
);
321 status
= tpm_tis_status(chip
);
322 if (!itpm
&& (status
& TPM_STS_DATA_EXPECT
) == 0) {
328 /* write last byte */
330 chip
->vendor
.iobase
+
331 TPM_DATA_FIFO(chip
->vendor
.locality
));
332 wait_for_stat(chip
, TPM_STS_VALID
, chip
->vendor
.timeout_c
,
333 &chip
->vendor
.int_queue
);
334 status
= tpm_tis_status(chip
);
335 if ((status
& TPM_STS_DATA_EXPECT
) != 0) {
342 chip
->vendor
.iobase
+ TPM_STS(chip
->vendor
.locality
));
344 if (chip
->vendor
.irq
) {
345 ordinal
= be32_to_cpu(*((__be32
*) (buf
+ 6)));
347 (chip
, TPM_STS_DATA_AVAIL
| TPM_STS_VALID
,
348 tpm_calc_ordinal_duration(chip
, ordinal
),
349 &chip
->vendor
.read_queue
) < 0) {
357 release_locality(chip
, chip
->vendor
.locality
, 0);
361 static const struct file_operations tis_ops
= {
362 .owner
= THIS_MODULE
,
367 .release
= tpm_release
,
370 static DEVICE_ATTR(pubek
, S_IRUGO
, tpm_show_pubek
, NULL
);
371 static DEVICE_ATTR(pcrs
, S_IRUGO
, tpm_show_pcrs
, NULL
);
372 static DEVICE_ATTR(enabled
, S_IRUGO
, tpm_show_enabled
, NULL
);
373 static DEVICE_ATTR(active
, S_IRUGO
, tpm_show_active
, NULL
);
374 static DEVICE_ATTR(owned
, S_IRUGO
, tpm_show_owned
, NULL
);
375 static DEVICE_ATTR(temp_deactivated
, S_IRUGO
, tpm_show_temp_deactivated
,
377 static DEVICE_ATTR(caps
, S_IRUGO
, tpm_show_caps_1_2
, NULL
);
378 static DEVICE_ATTR(cancel
, S_IWUSR
| S_IWGRP
, NULL
, tpm_store_cancel
);
380 static struct attribute
*tis_attrs
[] = {
381 &dev_attr_pubek
.attr
,
383 &dev_attr_enabled
.attr
,
384 &dev_attr_active
.attr
,
385 &dev_attr_owned
.attr
,
386 &dev_attr_temp_deactivated
.attr
,
388 &dev_attr_cancel
.attr
, NULL
,
391 static struct attribute_group tis_attr_grp
= {
395 static struct tpm_vendor_specific tpm_tis
= {
396 .status
= tpm_tis_status
,
397 .recv
= tpm_tis_recv
,
398 .send
= tpm_tis_send
,
399 .cancel
= tpm_tis_ready
,
400 .req_complete_mask
= TPM_STS_DATA_AVAIL
| TPM_STS_VALID
,
401 .req_complete_val
= TPM_STS_DATA_AVAIL
| TPM_STS_VALID
,
402 .req_canceled
= TPM_STS_COMMAND_READY
,
403 .attr_group
= &tis_attr_grp
,
408 static irqreturn_t
tis_int_probe(int irq
, void *dev_id
)
410 struct tpm_chip
*chip
= dev_id
;
413 interrupt
= ioread32(chip
->vendor
.iobase
+
414 TPM_INT_STATUS(chip
->vendor
.locality
));
419 chip
->vendor
.irq
= irq
;
421 /* Clear interrupts handled with TPM_EOI */
423 chip
->vendor
.iobase
+
424 TPM_INT_STATUS(chip
->vendor
.locality
));
428 static irqreturn_t
tis_int_handler(int dummy
, void *dev_id
)
430 struct tpm_chip
*chip
= dev_id
;
434 interrupt
= ioread32(chip
->vendor
.iobase
+
435 TPM_INT_STATUS(chip
->vendor
.locality
));
440 if (interrupt
& TPM_INTF_DATA_AVAIL_INT
)
441 wake_up_interruptible(&chip
->vendor
.read_queue
);
442 if (interrupt
& TPM_INTF_LOCALITY_CHANGE_INT
)
443 for (i
= 0; i
< 5; i
++)
444 if (check_locality(chip
, i
) >= 0)
447 (TPM_INTF_LOCALITY_CHANGE_INT
| TPM_INTF_STS_VALID_INT
|
448 TPM_INTF_CMD_READY_INT
))
449 wake_up_interruptible(&chip
->vendor
.int_queue
);
451 /* Clear interrupts handled with TPM_EOI */
453 chip
->vendor
.iobase
+
454 TPM_INT_STATUS(chip
->vendor
.locality
));
455 ioread32(chip
->vendor
.iobase
+ TPM_INT_STATUS(chip
->vendor
.locality
));
459 static int interrupts
= 1;
460 module_param(interrupts
, bool, 0444);
461 MODULE_PARM_DESC(interrupts
, "Enable interrupts");
463 static int tpm_tis_init(struct device
*dev
, resource_size_t start
,
464 resource_size_t len
, unsigned int irq
)
466 u32 vendor
, intfcaps
, intmask
;
468 struct tpm_chip
*chip
;
470 if (!(chip
= tpm_register_hardware(dev
, &tpm_tis
)))
473 chip
->vendor
.iobase
= ioremap(start
, len
);
474 if (!chip
->vendor
.iobase
) {
479 /* Default timeouts */
480 chip
->vendor
.timeout_a
= msecs_to_jiffies(TIS_SHORT_TIMEOUT
);
481 chip
->vendor
.timeout_b
= msecs_to_jiffies(TIS_LONG_TIMEOUT
);
482 chip
->vendor
.timeout_c
= msecs_to_jiffies(TIS_SHORT_TIMEOUT
);
483 chip
->vendor
.timeout_d
= msecs_to_jiffies(TIS_SHORT_TIMEOUT
);
485 if (request_locality(chip
, 0) != 0) {
490 vendor
= ioread32(chip
->vendor
.iobase
+ TPM_DID_VID(0));
493 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
494 vendor
>> 16, ioread8(chip
->vendor
.iobase
+ TPM_RID(0)));
497 dev_info(dev
, "Intel iTPM workaround enabled\n");
500 /* Figure out the capabilities */
502 ioread32(chip
->vendor
.iobase
+
503 TPM_INTF_CAPS(chip
->vendor
.locality
));
504 dev_dbg(dev
, "TPM interface capabilities (0x%x):\n",
506 if (intfcaps
& TPM_INTF_BURST_COUNT_STATIC
)
507 dev_dbg(dev
, "\tBurst Count Static\n");
508 if (intfcaps
& TPM_INTF_CMD_READY_INT
)
509 dev_dbg(dev
, "\tCommand Ready Int Support\n");
510 if (intfcaps
& TPM_INTF_INT_EDGE_FALLING
)
511 dev_dbg(dev
, "\tInterrupt Edge Falling\n");
512 if (intfcaps
& TPM_INTF_INT_EDGE_RISING
)
513 dev_dbg(dev
, "\tInterrupt Edge Rising\n");
514 if (intfcaps
& TPM_INTF_INT_LEVEL_LOW
)
515 dev_dbg(dev
, "\tInterrupt Level Low\n");
516 if (intfcaps
& TPM_INTF_INT_LEVEL_HIGH
)
517 dev_dbg(dev
, "\tInterrupt Level High\n");
518 if (intfcaps
& TPM_INTF_LOCALITY_CHANGE_INT
)
519 dev_dbg(dev
, "\tLocality Change Int Support\n");
520 if (intfcaps
& TPM_INTF_STS_VALID_INT
)
521 dev_dbg(dev
, "\tSts Valid Int Support\n");
522 if (intfcaps
& TPM_INTF_DATA_AVAIL_INT
)
523 dev_dbg(dev
, "\tData Avail Int Support\n");
525 /* INTERRUPT Setup */
526 init_waitqueue_head(&chip
->vendor
.read_queue
);
527 init_waitqueue_head(&chip
->vendor
.int_queue
);
530 ioread32(chip
->vendor
.iobase
+
531 TPM_INT_ENABLE(chip
->vendor
.locality
));
533 intmask
|= TPM_INTF_CMD_READY_INT
534 | TPM_INTF_LOCALITY_CHANGE_INT
| TPM_INTF_DATA_AVAIL_INT
535 | TPM_INTF_STS_VALID_INT
;
538 chip
->vendor
.iobase
+
539 TPM_INT_ENABLE(chip
->vendor
.locality
));
541 chip
->vendor
.irq
= irq
;
542 if (interrupts
&& !chip
->vendor
.irq
) {
544 ioread8(chip
->vendor
.iobase
+
545 TPM_INT_VECTOR(chip
->vendor
.locality
));
547 for (i
= 3; i
< 16 && chip
->vendor
.irq
== 0; i
++) {
548 iowrite8(i
, chip
->vendor
.iobase
+
549 TPM_INT_VECTOR(chip
->vendor
.locality
));
551 (i
, tis_int_probe
, IRQF_SHARED
,
552 chip
->vendor
.miscdev
.name
, chip
) != 0) {
554 "Unable to request irq: %d for probe\n",
559 /* Clear all existing */
561 (chip
->vendor
.iobase
+
562 TPM_INT_STATUS(chip
->vendor
.locality
)),
563 chip
->vendor
.iobase
+
564 TPM_INT_STATUS(chip
->vendor
.locality
));
567 iowrite32(intmask
| TPM_GLOBAL_INT_ENABLE
,
568 chip
->vendor
.iobase
+
569 TPM_INT_ENABLE(chip
->vendor
.locality
));
571 /* Generate Interrupts */
572 tpm_gen_interrupt(chip
);
576 chip
->vendor
.iobase
+
577 TPM_INT_ENABLE(chip
->vendor
.locality
));
581 if (chip
->vendor
.irq
) {
582 iowrite8(chip
->vendor
.irq
,
583 chip
->vendor
.iobase
+
584 TPM_INT_VECTOR(chip
->vendor
.locality
));
586 (chip
->vendor
.irq
, tis_int_handler
, IRQF_SHARED
,
587 chip
->vendor
.miscdev
.name
, chip
) != 0) {
589 "Unable to request irq: %d for use\n",
591 chip
->vendor
.irq
= 0;
593 /* Clear all existing */
595 (chip
->vendor
.iobase
+
596 TPM_INT_STATUS(chip
->vendor
.locality
)),
597 chip
->vendor
.iobase
+
598 TPM_INT_STATUS(chip
->vendor
.locality
));
601 iowrite32(intmask
| TPM_GLOBAL_INT_ENABLE
,
602 chip
->vendor
.iobase
+
603 TPM_INT_ENABLE(chip
->vendor
.locality
));
607 INIT_LIST_HEAD(&chip
->vendor
.list
);
608 spin_lock(&tis_lock
);
609 list_add(&chip
->vendor
.list
, &tis_chips
);
610 spin_unlock(&tis_lock
);
612 tpm_get_timeouts(chip
);
613 tpm_continue_selftest(chip
);
617 if (chip
->vendor
.iobase
)
618 iounmap(chip
->vendor
.iobase
);
619 tpm_remove_hardware(chip
->dev
);
623 static int __devinit
tpm_tis_pnp_init(struct pnp_dev
*pnp_dev
,
624 const struct pnp_device_id
*pnp_id
)
626 resource_size_t start
, len
;
627 unsigned int irq
= 0;
629 start
= pnp_mem_start(pnp_dev
, 0);
630 len
= pnp_mem_len(pnp_dev
, 0);
632 if (pnp_irq_valid(pnp_dev
, 0))
633 irq
= pnp_irq(pnp_dev
, 0);
637 if (is_itpm(pnp_dev
))
640 return tpm_tis_init(&pnp_dev
->dev
, start
, len
, irq
);
643 static int tpm_tis_pnp_suspend(struct pnp_dev
*dev
, pm_message_t msg
)
645 return tpm_pm_suspend(&dev
->dev
, msg
);
648 static int tpm_tis_pnp_resume(struct pnp_dev
*dev
)
650 struct tpm_chip
*chip
= pnp_get_drvdata(dev
);
653 ret
= tpm_pm_resume(&dev
->dev
);
655 tpm_continue_selftest(chip
);
660 static struct pnp_device_id tpm_pnp_tbl
[] __devinitdata
= {
661 {"PNP0C31", 0}, /* TPM */
662 {"ATM1200", 0}, /* Atmel */
663 {"IFX0102", 0}, /* Infineon */
664 {"BCM0101", 0}, /* Broadcom */
665 {"BCM0102", 0}, /* Broadcom */
666 {"NSC1200", 0}, /* National */
667 {"ICO0102", 0}, /* Intel */
669 {"", 0}, /* User Specified */
670 {"", 0} /* Terminator */
672 MODULE_DEVICE_TABLE(pnp
, tpm_pnp_tbl
);
674 static __devexit
void tpm_tis_pnp_remove(struct pnp_dev
*dev
)
676 struct tpm_chip
*chip
= pnp_get_drvdata(dev
);
678 tpm_dev_vendor_release(chip
);
684 static struct pnp_driver tis_pnp_driver
= {
686 .id_table
= tpm_pnp_tbl
,
687 .probe
= tpm_tis_pnp_init
,
688 .suspend
= tpm_tis_pnp_suspend
,
689 .resume
= tpm_tis_pnp_resume
,
690 .remove
= tpm_tis_pnp_remove
,
693 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
694 module_param_string(hid
, tpm_pnp_tbl
[TIS_HID_USR_IDX
].id
,
695 sizeof(tpm_pnp_tbl
[TIS_HID_USR_IDX
].id
), 0444);
696 MODULE_PARM_DESC(hid
, "Set additional specific HID for this driver to probe");
698 static int tpm_tis_suspend(struct platform_device
*dev
, pm_message_t msg
)
700 return tpm_pm_suspend(&dev
->dev
, msg
);
703 static int tpm_tis_resume(struct platform_device
*dev
)
705 return tpm_pm_resume(&dev
->dev
);
707 static struct platform_driver tis_drv
= {
710 .owner
= THIS_MODULE
,
712 .suspend
= tpm_tis_suspend
,
713 .resume
= tpm_tis_resume
,
716 static struct platform_device
*pdev
;
719 module_param(force
, bool, 0444);
720 MODULE_PARM_DESC(force
, "Force device probe rather than using ACPI entry");
721 static int __init
init_tis(void)
726 return pnp_register_driver(&tis_pnp_driver
);
729 rc
= platform_driver_register(&tis_drv
);
732 if (IS_ERR(pdev
=platform_device_register_simple("tpm_tis", -1, NULL
, 0)))
733 return PTR_ERR(pdev
);
734 if((rc
=tpm_tis_init(&pdev
->dev
, TIS_MEM_BASE
, TIS_MEM_LEN
, 0)) != 0) {
735 platform_device_unregister(pdev
);
736 platform_driver_unregister(&tis_drv
);
741 static void __exit
cleanup_tis(void)
743 struct tpm_vendor_specific
*i
, *j
;
744 struct tpm_chip
*chip
;
745 spin_lock(&tis_lock
);
746 list_for_each_entry_safe(i
, j
, &tis_chips
, list
) {
747 chip
= to_tpm_chip(i
);
748 tpm_remove_hardware(chip
->dev
);
749 iowrite32(~TPM_GLOBAL_INT_ENABLE
&
750 ioread32(chip
->vendor
.iobase
+
751 TPM_INT_ENABLE(chip
->vendor
.
753 chip
->vendor
.iobase
+
754 TPM_INT_ENABLE(chip
->vendor
.locality
));
755 release_locality(chip
, chip
->vendor
.locality
, 1);
756 if (chip
->vendor
.irq
)
757 free_irq(chip
->vendor
.irq
, chip
);
761 spin_unlock(&tis_lock
);
764 pnp_unregister_driver(&tis_pnp_driver
);
768 platform_device_unregister(pdev
);
769 platform_driver_unregister(&tis_drv
);
772 module_init(init_tis
);
773 module_exit(cleanup_tis
);
774 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
775 MODULE_DESCRIPTION("TPM Driver");
776 MODULE_VERSION("2.0");
777 MODULE_LICENSE("GPL");