spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / x86 / kernel / tce_64.c
blob9e540fee70096f692a5179dc3ea9d21d687a14cf
1 /*
2 * This file manages the translation entries for the IBM Calgary IOMMU.
4 * Derived from arch/powerpc/platforms/pseries/iommu.c
6 * Copyright (C) IBM Corporation, 2006
8 * Author: Jon Mason <jdmason@us.ibm.com>
9 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/mm.h>
29 #include <linux/spinlock.h>
30 #include <linux/string.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bootmem.h>
34 #include <asm/tce.h>
35 #include <asm/calgary.h>
36 #include <asm/proto.h>
38 /* flush a tce at 'tceaddr' to main memory */
39 static inline void flush_tce(void* tceaddr)
41 /* a single tce can't cross a cache line */
42 if (cpu_has_clflush)
43 clflush(tceaddr);
44 else
45 wbinvd();
48 void tce_build(struct iommu_table *tbl, unsigned long index,
49 unsigned int npages, unsigned long uaddr, int direction)
51 u64* tp;
52 u64 t;
53 u64 rpn;
55 t = (1 << TCE_READ_SHIFT);
56 if (direction != DMA_TO_DEVICE)
57 t |= (1 << TCE_WRITE_SHIFT);
59 tp = ((u64*)tbl->it_base) + index;
61 while (npages--) {
62 rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
63 t &= ~TCE_RPN_MASK;
64 t |= (rpn << TCE_RPN_SHIFT);
66 *tp = cpu_to_be64(t);
67 flush_tce(tp);
69 uaddr += PAGE_SIZE;
70 tp++;
74 void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
76 u64* tp;
78 tp = ((u64*)tbl->it_base) + index;
80 while (npages--) {
81 *tp = cpu_to_be64(0);
82 flush_tce(tp);
83 tp++;
87 static inline unsigned int table_size_to_number_of_entries(unsigned char size)
90 * size is the order of the table, 0-7
91 * smallest table is 8K entries, so shift result by 13 to
92 * multiply by 8K
94 return (1 << size) << 13;
97 static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
99 unsigned int bitmapsz;
100 unsigned long bmppages;
101 int ret;
103 tbl->it_busno = dev->bus->number;
105 /* set the tce table size - measured in entries */
106 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
109 * number of bytes needed for the bitmap size in number of
110 * entries; we need one bit per entry
112 bitmapsz = tbl->it_size / BITS_PER_BYTE;
113 bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
114 if (!bmppages) {
115 printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
116 ret = -ENOMEM;
117 goto done;
120 tbl->it_map = (unsigned long*)bmppages;
122 memset(tbl->it_map, 0, bitmapsz);
124 tbl->it_hint = 0;
126 spin_lock_init(&tbl->it_lock);
128 return 0;
130 done:
131 return ret;
134 int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar)
136 struct iommu_table *tbl;
137 int ret;
139 if (pci_iommu(dev->bus)) {
140 printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
141 dev, pci_iommu(dev->bus));
142 BUG();
145 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
146 if (!tbl) {
147 printk(KERN_ERR "Calgary: error allocating iommu_table\n");
148 ret = -ENOMEM;
149 goto done;
152 ret = tce_table_setparms(dev, tbl);
153 if (ret)
154 goto free_tbl;
156 tbl->bbar = bbar;
158 set_pci_iommu(dev->bus, tbl);
160 return 0;
162 free_tbl:
163 kfree(tbl);
164 done:
165 return ret;
168 void * __init alloc_tce_table(void)
170 unsigned int size;
172 size = table_size_to_number_of_entries(specified_table_size);
173 size *= TCE_ENTRY_SIZE;
175 return __alloc_bootmem_low(size, size, 0);
178 void __init free_tce_table(void *tbl)
180 unsigned int size;
182 if (!tbl)
183 return;
185 size = table_size_to_number_of_entries(specified_table_size);
186 size *= TCE_ENTRY_SIZE;
188 free_bootmem(__pa(tbl), size);