sync hh.org
[hh.org.git] / arch / x86_64 / kernel / tce.c
blobf61fb8e4f12954b5f8f2f61dad0cf7218d9818cc
1 /*
2 * This file manages the translation entries for the IBM Calgary IOMMU.
4 * Derived from arch/powerpc/platforms/pseries/iommu.c
6 * Copyright (C) IBM Corporation, 2006
8 * Author: Jon Mason <jdmason@us.ibm.com>
9 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/mm.h>
29 #include <linux/spinlock.h>
30 #include <linux/string.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bootmem.h>
34 #include <asm/tce.h>
35 #include <asm/calgary.h>
36 #include <asm/proto.h>
38 /* flush a tce at 'tceaddr' to main memory */
39 static inline void flush_tce(void* tceaddr)
41 /* a single tce can't cross a cache line */
42 if (cpu_has_clflush)
43 asm volatile("clflush (%0)" :: "r" (tceaddr));
44 else
45 asm volatile("wbinvd":::"memory");
48 void tce_build(struct iommu_table *tbl, unsigned long index,
49 unsigned int npages, unsigned long uaddr, int direction)
51 u64* tp;
52 u64 t;
53 u64 rpn;
55 t = (1 << TCE_READ_SHIFT);
56 if (direction != DMA_TO_DEVICE)
57 t |= (1 << TCE_WRITE_SHIFT);
59 tp = ((u64*)tbl->it_base) + index;
61 while (npages--) {
62 rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
63 t &= ~TCE_RPN_MASK;
64 t |= (rpn << TCE_RPN_SHIFT);
66 *tp = cpu_to_be64(t);
67 flush_tce(tp);
69 uaddr += PAGE_SIZE;
70 tp++;
74 void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
76 u64* tp;
78 tp = ((u64*)tbl->it_base) + index;
80 while (npages--) {
81 *tp = cpu_to_be64(0);
82 flush_tce(tp);
83 tp++;
87 static inline unsigned int table_size_to_number_of_entries(unsigned char size)
90 * size is the order of the table, 0-7
91 * smallest table is 8K entries, so shift result by 13 to
92 * multiply by 8K
94 return (1 << size) << 13;
97 static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
99 unsigned int bitmapsz;
100 unsigned long bmppages;
101 int ret;
103 tbl->it_busno = dev->bus->number;
105 /* set the tce table size - measured in entries */
106 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
109 * number of bytes needed for the bitmap size in number of
110 * entries; we need one bit per entry
112 bitmapsz = tbl->it_size / BITS_PER_BYTE;
113 bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
114 if (!bmppages) {
115 printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
116 ret = -ENOMEM;
117 goto done;
120 tbl->it_map = (unsigned long*)bmppages;
122 memset(tbl->it_map, 0, bitmapsz);
124 tbl->it_hint = 0;
126 spin_lock_init(&tbl->it_lock);
128 return 0;
130 done:
131 return ret;
134 int build_tce_table(struct pci_dev *dev, void __iomem *bbar)
136 struct iommu_table *tbl;
137 int ret;
139 if (dev->sysdata) {
140 printk(KERN_ERR "Calgary: dev %p has sysdata %p\n",
141 dev, dev->sysdata);
142 BUG();
145 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
146 if (!tbl) {
147 printk(KERN_ERR "Calgary: error allocating iommu_table\n");
148 ret = -ENOMEM;
149 goto done;
152 ret = tce_table_setparms(dev, tbl);
153 if (ret)
154 goto free_tbl;
156 tbl->bbar = bbar;
159 * NUMA is already using the bus's sysdata pointer, so we use
160 * the bus's pci_dev's sysdata instead.
162 dev->sysdata = tbl;
164 return 0;
166 free_tbl:
167 kfree(tbl);
168 done:
169 return ret;
172 void* alloc_tce_table(void)
174 unsigned int size;
176 size = table_size_to_number_of_entries(specified_table_size);
177 size *= TCE_ENTRY_SIZE;
179 return __alloc_bootmem_low(size, size, 0);
182 void free_tce_table(void *tbl)
184 unsigned int size;
186 if (!tbl)
187 return;
189 size = table_size_to_number_of_entries(specified_table_size);
190 size *= TCE_ENTRY_SIZE;
192 free_bootmem(__pa(tbl), size);