can: c_can: fix race condition in c_can_open()
[linux/fpc-iii.git] / drivers / iommu / amd_iommu_init.c
blob6269eb08d06a731480015f79c19fbd14c7f7f087
1 /*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/interrupt.h>
26 #include <linux/msi.h>
27 #include <linux/amd-iommu.h>
28 #include <asm/pci-direct.h>
29 #include <asm/iommu.h>
30 #include <asm/gart.h>
31 #include <asm/x86_init.h>
32 #include <asm/iommu_table.h>
34 #include "amd_iommu_proto.h"
35 #include "amd_iommu_types.h"
38 * definitions for the ACPI scanning code
40 #define IVRS_HEADER_LENGTH 48
42 #define ACPI_IVHD_TYPE 0x10
43 #define ACPI_IVMD_TYPE_ALL 0x20
44 #define ACPI_IVMD_TYPE 0x21
45 #define ACPI_IVMD_TYPE_RANGE 0x22
47 #define IVHD_DEV_ALL 0x01
48 #define IVHD_DEV_SELECT 0x02
49 #define IVHD_DEV_SELECT_RANGE_START 0x03
50 #define IVHD_DEV_RANGE_END 0x04
51 #define IVHD_DEV_ALIAS 0x42
52 #define IVHD_DEV_ALIAS_RANGE 0x43
53 #define IVHD_DEV_EXT_SELECT 0x46
54 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
56 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
57 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
58 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
59 #define IVHD_FLAG_ISOC_EN_MASK 0x08
61 #define IVMD_FLAG_EXCL_RANGE 0x08
62 #define IVMD_FLAG_UNITY_MAP 0x01
64 #define ACPI_DEVFLAG_INITPASS 0x01
65 #define ACPI_DEVFLAG_EXTINT 0x02
66 #define ACPI_DEVFLAG_NMI 0x04
67 #define ACPI_DEVFLAG_SYSMGT1 0x10
68 #define ACPI_DEVFLAG_SYSMGT2 0x20
69 #define ACPI_DEVFLAG_LINT0 0x40
70 #define ACPI_DEVFLAG_LINT1 0x80
71 #define ACPI_DEVFLAG_ATSDIS 0x10000000
74 * ACPI table definitions
76 * These data structures are laid over the table to parse the important values
77 * out of it.
81 * structure describing one IOMMU in the ACPI table. Typically followed by one
82 * or more ivhd_entrys.
84 struct ivhd_header {
85 u8 type;
86 u8 flags;
87 u16 length;
88 u16 devid;
89 u16 cap_ptr;
90 u64 mmio_phys;
91 u16 pci_seg;
92 u16 info;
93 u32 reserved;
94 } __attribute__((packed));
97 * A device entry describing which devices a specific IOMMU translates and
98 * which requestor ids they use.
100 struct ivhd_entry {
101 u8 type;
102 u16 devid;
103 u8 flags;
104 u32 ext;
105 } __attribute__((packed));
108 * An AMD IOMMU memory definition structure. It defines things like exclusion
109 * ranges for devices and regions that should be unity mapped.
111 struct ivmd_header {
112 u8 type;
113 u8 flags;
114 u16 length;
115 u16 devid;
116 u16 aux;
117 u64 resv;
118 u64 range_start;
119 u64 range_length;
120 } __attribute__((packed));
122 bool amd_iommu_dump;
124 static int __initdata amd_iommu_detected;
125 static bool __initdata amd_iommu_disabled;
127 u16 amd_iommu_last_bdf; /* largest PCI device id we have
128 to handle */
129 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
130 we find in ACPI */
131 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
133 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
134 system */
136 /* Array to assign indices to IOMMUs*/
137 struct amd_iommu *amd_iommus[MAX_IOMMUS];
138 int amd_iommus_present;
140 /* IOMMUs have a non-present cache? */
141 bool amd_iommu_np_cache __read_mostly;
142 bool amd_iommu_iotlb_sup __read_mostly = true;
145 * The ACPI table parsing functions set this variable on an error
147 static int __initdata amd_iommu_init_err;
150 * List of protection domains - used during resume
152 LIST_HEAD(amd_iommu_pd_list);
153 spinlock_t amd_iommu_pd_lock;
156 * Pointer to the device table which is shared by all AMD IOMMUs
157 * it is indexed by the PCI device id or the HT unit id and contains
158 * information about the domain the device belongs to as well as the
159 * page table root pointer.
161 struct dev_table_entry *amd_iommu_dev_table;
164 * The alias table is a driver specific data structure which contains the
165 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
166 * More than one device can share the same requestor id.
168 u16 *amd_iommu_alias_table;
171 * The rlookup table is used to find the IOMMU which is responsible
172 * for a specific device. It is also indexed by the PCI device id.
174 struct amd_iommu **amd_iommu_rlookup_table;
177 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
178 * to know which ones are already in use.
180 unsigned long *amd_iommu_pd_alloc_bitmap;
182 static u32 dev_table_size; /* size of the device table */
183 static u32 alias_table_size; /* size of the alias table */
184 static u32 rlookup_table_size; /* size if the rlookup table */
187 * This function flushes all internal caches of
188 * the IOMMU used by this driver.
190 extern void iommu_flush_all_caches(struct amd_iommu *iommu);
192 static inline void update_last_devid(u16 devid)
194 if (devid > amd_iommu_last_bdf)
195 amd_iommu_last_bdf = devid;
198 static inline unsigned long tbl_size(int entry_size)
200 unsigned shift = PAGE_SHIFT +
201 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
203 return 1UL << shift;
206 /* Access to l1 and l2 indexed register spaces */
208 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
210 u32 val;
212 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
213 pci_read_config_dword(iommu->dev, 0xfc, &val);
214 return val;
217 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
219 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
220 pci_write_config_dword(iommu->dev, 0xfc, val);
221 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
224 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
226 u32 val;
228 pci_write_config_dword(iommu->dev, 0xf0, address);
229 pci_read_config_dword(iommu->dev, 0xf4, &val);
230 return val;
233 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
235 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
236 pci_write_config_dword(iommu->dev, 0xf4, val);
239 /****************************************************************************
241 * AMD IOMMU MMIO register space handling functions
243 * These functions are used to program the IOMMU device registers in
244 * MMIO space required for that driver.
246 ****************************************************************************/
249 * This function set the exclusion range in the IOMMU. DMA accesses to the
250 * exclusion range are passed through untranslated
252 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
254 u64 start = iommu->exclusion_start & PAGE_MASK;
255 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
256 u64 entry;
258 if (!iommu->exclusion_start)
259 return;
261 entry = start | MMIO_EXCL_ENABLE_MASK;
262 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
263 &entry, sizeof(entry));
265 entry = limit;
266 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
267 &entry, sizeof(entry));
270 /* Programs the physical address of the device table into the IOMMU hardware */
271 static void iommu_set_device_table(struct amd_iommu *iommu)
273 u64 entry;
275 BUG_ON(iommu->mmio_base == NULL);
277 entry = virt_to_phys(amd_iommu_dev_table);
278 entry |= (dev_table_size >> 12) - 1;
279 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
280 &entry, sizeof(entry));
283 /* Generic functions to enable/disable certain features of the IOMMU. */
284 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
286 u32 ctrl;
288 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
289 ctrl |= (1 << bit);
290 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
293 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
295 u32 ctrl;
297 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
298 ctrl &= ~(1 << bit);
299 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
302 /* Function to enable the hardware */
303 static void iommu_enable(struct amd_iommu *iommu)
305 static const char * const feat_str[] = {
306 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
307 "IA", "GA", "HE", "PC", NULL
309 int i;
311 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
312 dev_name(&iommu->dev->dev), iommu->cap_ptr);
314 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
315 printk(KERN_CONT " extended features: ");
316 for (i = 0; feat_str[i]; ++i)
317 if (iommu_feature(iommu, (1ULL << i)))
318 printk(KERN_CONT " %s", feat_str[i]);
320 printk(KERN_CONT "\n");
322 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
325 static void iommu_disable(struct amd_iommu *iommu)
327 /* Disable command buffer */
328 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
330 /* Disable event logging and event interrupts */
331 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
332 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
334 /* Disable IOMMU hardware itself */
335 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
339 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
340 * the system has one.
342 static u8 * __init iommu_map_mmio_space(u64 address)
344 u8 *ret;
346 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
347 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
348 address);
349 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
350 return NULL;
353 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
354 if (ret != NULL)
355 return ret;
357 release_mem_region(address, MMIO_REGION_LENGTH);
359 return NULL;
362 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
364 if (iommu->mmio_base)
365 iounmap(iommu->mmio_base);
366 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
369 /****************************************************************************
371 * The functions below belong to the first pass of AMD IOMMU ACPI table
372 * parsing. In this pass we try to find out the highest device id this
373 * code has to handle. Upon this information the size of the shared data
374 * structures is determined later.
376 ****************************************************************************/
379 * This function calculates the length of a given IVHD entry
381 static inline int ivhd_entry_length(u8 *ivhd)
383 return 0x04 << (*ivhd >> 6);
387 * This function reads the last device id the IOMMU has to handle from the PCI
388 * capability header for this IOMMU
390 static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
392 u32 cap;
394 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
395 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
397 return 0;
401 * After reading the highest device id from the IOMMU PCI capability header
402 * this function looks if there is a higher device id defined in the ACPI table
404 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
406 u8 *p = (void *)h, *end = (void *)h;
407 struct ivhd_entry *dev;
409 p += sizeof(*h);
410 end += h->length;
412 find_last_devid_on_pci(PCI_BUS(h->devid),
413 PCI_SLOT(h->devid),
414 PCI_FUNC(h->devid),
415 h->cap_ptr);
417 while (p < end) {
418 dev = (struct ivhd_entry *)p;
419 switch (dev->type) {
420 case IVHD_DEV_SELECT:
421 case IVHD_DEV_RANGE_END:
422 case IVHD_DEV_ALIAS:
423 case IVHD_DEV_EXT_SELECT:
424 /* all the above subfield types refer to device ids */
425 update_last_devid(dev->devid);
426 break;
427 default:
428 break;
430 p += ivhd_entry_length(p);
433 WARN_ON(p != end);
435 return 0;
439 * Iterate over all IVHD entries in the ACPI table and find the highest device
440 * id which we need to handle. This is the first of three functions which parse
441 * the ACPI table. So we check the checksum here.
443 static int __init find_last_devid_acpi(struct acpi_table_header *table)
445 int i;
446 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
447 struct ivhd_header *h;
450 * Validate checksum here so we don't need to do it when
451 * we actually parse the table
453 for (i = 0; i < table->length; ++i)
454 checksum += p[i];
455 if (checksum != 0) {
456 /* ACPI table corrupt */
457 amd_iommu_init_err = -ENODEV;
458 return 0;
461 p += IVRS_HEADER_LENGTH;
463 end += table->length;
464 while (p < end) {
465 h = (struct ivhd_header *)p;
466 switch (h->type) {
467 case ACPI_IVHD_TYPE:
468 find_last_devid_from_ivhd(h);
469 break;
470 default:
471 break;
473 p += h->length;
475 WARN_ON(p != end);
477 return 0;
480 /****************************************************************************
482 * The following functions belong the the code path which parses the ACPI table
483 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
484 * data structures, initialize the device/alias/rlookup table and also
485 * basically initialize the hardware.
487 ****************************************************************************/
490 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
491 * write commands to that buffer later and the IOMMU will execute them
492 * asynchronously
494 static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
496 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
497 get_order(CMD_BUFFER_SIZE));
499 if (cmd_buf == NULL)
500 return NULL;
502 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
504 return cmd_buf;
508 * This function resets the command buffer if the IOMMU stopped fetching
509 * commands from it.
511 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
513 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
515 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
516 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
518 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
522 * This function writes the command buffer address to the hardware and
523 * enables it.
525 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
527 u64 entry;
529 BUG_ON(iommu->cmd_buf == NULL);
531 entry = (u64)virt_to_phys(iommu->cmd_buf);
532 entry |= MMIO_CMD_SIZE_512;
534 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
535 &entry, sizeof(entry));
537 amd_iommu_reset_cmd_buffer(iommu);
538 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
541 static void __init free_command_buffer(struct amd_iommu *iommu)
543 free_pages((unsigned long)iommu->cmd_buf,
544 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
547 /* allocates the memory where the IOMMU will log its events to */
548 static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
550 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
551 get_order(EVT_BUFFER_SIZE));
553 if (iommu->evt_buf == NULL)
554 return NULL;
556 iommu->evt_buf_size = EVT_BUFFER_SIZE;
558 return iommu->evt_buf;
561 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
563 u64 entry;
565 BUG_ON(iommu->evt_buf == NULL);
567 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
569 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
570 &entry, sizeof(entry));
572 /* set head and tail to zero manually */
573 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
574 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
576 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
579 static void __init free_event_buffer(struct amd_iommu *iommu)
581 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
584 /* sets a specific bit in the device table entry. */
585 static void set_dev_entry_bit(u16 devid, u8 bit)
587 int i = (bit >> 5) & 0x07;
588 int _bit = bit & 0x1f;
590 amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
593 static int get_dev_entry_bit(u16 devid, u8 bit)
595 int i = (bit >> 5) & 0x07;
596 int _bit = bit & 0x1f;
598 return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
602 void amd_iommu_apply_erratum_63(u16 devid)
604 int sysmgt;
606 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
607 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
609 if (sysmgt == 0x01)
610 set_dev_entry_bit(devid, DEV_ENTRY_IW);
613 /* Writes the specific IOMMU for a device into the rlookup table */
614 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
616 amd_iommu_rlookup_table[devid] = iommu;
620 * This function takes the device specific flags read from the ACPI
621 * table and sets up the device table entry with that information
623 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
624 u16 devid, u32 flags, u32 ext_flags)
626 if (flags & ACPI_DEVFLAG_INITPASS)
627 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
628 if (flags & ACPI_DEVFLAG_EXTINT)
629 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
630 if (flags & ACPI_DEVFLAG_NMI)
631 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
632 if (flags & ACPI_DEVFLAG_SYSMGT1)
633 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
634 if (flags & ACPI_DEVFLAG_SYSMGT2)
635 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
636 if (flags & ACPI_DEVFLAG_LINT0)
637 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
638 if (flags & ACPI_DEVFLAG_LINT1)
639 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
641 amd_iommu_apply_erratum_63(devid);
643 set_iommu_for_device(iommu, devid);
647 * Reads the device exclusion range from ACPI and initialize IOMMU with
648 * it
650 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
652 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
654 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
655 return;
657 if (iommu) {
659 * We only can configure exclusion ranges per IOMMU, not
660 * per device. But we can enable the exclusion range per
661 * device. This is done here
663 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
664 iommu->exclusion_start = m->range_start;
665 iommu->exclusion_length = m->range_length;
670 * This function reads some important data from the IOMMU PCI space and
671 * initializes the driver data structure with it. It reads the hardware
672 * capabilities and the first/last device entries
674 static void __init init_iommu_from_pci(struct amd_iommu *iommu)
676 int cap_ptr = iommu->cap_ptr;
677 u32 range, misc, low, high;
678 int i, j;
680 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
681 &iommu->cap);
682 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
683 &range);
684 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
685 &misc);
687 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
688 MMIO_GET_FD(range));
689 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
690 MMIO_GET_LD(range));
691 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
693 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
694 amd_iommu_iotlb_sup = false;
696 /* read extended feature bits */
697 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
698 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
700 iommu->features = ((u64)high << 32) | low;
702 if (!is_rd890_iommu(iommu->dev))
703 return;
706 * Some rd890 systems may not be fully reconfigured by the BIOS, so
707 * it's necessary for us to store this information so it can be
708 * reprogrammed on resume
711 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
712 &iommu->stored_addr_lo);
713 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
714 &iommu->stored_addr_hi);
716 /* Low bit locks writes to configuration space */
717 iommu->stored_addr_lo &= ~1;
719 for (i = 0; i < 6; i++)
720 for (j = 0; j < 0x12; j++)
721 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
723 for (i = 0; i < 0x83; i++)
724 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
728 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
729 * initializes the hardware and our data structures with it.
731 static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
732 struct ivhd_header *h)
734 u8 *p = (u8 *)h;
735 u8 *end = p, flags = 0;
736 u16 devid = 0, devid_start = 0, devid_to = 0;
737 u32 dev_i, ext_flags = 0;
738 bool alias = false;
739 struct ivhd_entry *e;
742 * First save the recommended feature enable bits from ACPI
744 iommu->acpi_flags = h->flags;
747 * Done. Now parse the device entries
749 p += sizeof(struct ivhd_header);
750 end += h->length;
753 while (p < end) {
754 e = (struct ivhd_entry *)p;
755 switch (e->type) {
756 case IVHD_DEV_ALL:
758 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
759 " last device %02x:%02x.%x flags: %02x\n",
760 PCI_BUS(iommu->first_device),
761 PCI_SLOT(iommu->first_device),
762 PCI_FUNC(iommu->first_device),
763 PCI_BUS(iommu->last_device),
764 PCI_SLOT(iommu->last_device),
765 PCI_FUNC(iommu->last_device),
766 e->flags);
768 for (dev_i = iommu->first_device;
769 dev_i <= iommu->last_device; ++dev_i)
770 set_dev_entry_from_acpi(iommu, dev_i,
771 e->flags, 0);
772 break;
773 case IVHD_DEV_SELECT:
775 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
776 "flags: %02x\n",
777 PCI_BUS(e->devid),
778 PCI_SLOT(e->devid),
779 PCI_FUNC(e->devid),
780 e->flags);
782 devid = e->devid;
783 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
784 break;
785 case IVHD_DEV_SELECT_RANGE_START:
787 DUMP_printk(" DEV_SELECT_RANGE_START\t "
788 "devid: %02x:%02x.%x flags: %02x\n",
789 PCI_BUS(e->devid),
790 PCI_SLOT(e->devid),
791 PCI_FUNC(e->devid),
792 e->flags);
794 devid_start = e->devid;
795 flags = e->flags;
796 ext_flags = 0;
797 alias = false;
798 break;
799 case IVHD_DEV_ALIAS:
801 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
802 "flags: %02x devid_to: %02x:%02x.%x\n",
803 PCI_BUS(e->devid),
804 PCI_SLOT(e->devid),
805 PCI_FUNC(e->devid),
806 e->flags,
807 PCI_BUS(e->ext >> 8),
808 PCI_SLOT(e->ext >> 8),
809 PCI_FUNC(e->ext >> 8));
811 devid = e->devid;
812 devid_to = e->ext >> 8;
813 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
814 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
815 amd_iommu_alias_table[devid] = devid_to;
816 break;
817 case IVHD_DEV_ALIAS_RANGE:
819 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
820 "devid: %02x:%02x.%x flags: %02x "
821 "devid_to: %02x:%02x.%x\n",
822 PCI_BUS(e->devid),
823 PCI_SLOT(e->devid),
824 PCI_FUNC(e->devid),
825 e->flags,
826 PCI_BUS(e->ext >> 8),
827 PCI_SLOT(e->ext >> 8),
828 PCI_FUNC(e->ext >> 8));
830 devid_start = e->devid;
831 flags = e->flags;
832 devid_to = e->ext >> 8;
833 ext_flags = 0;
834 alias = true;
835 break;
836 case IVHD_DEV_EXT_SELECT:
838 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
839 "flags: %02x ext: %08x\n",
840 PCI_BUS(e->devid),
841 PCI_SLOT(e->devid),
842 PCI_FUNC(e->devid),
843 e->flags, e->ext);
845 devid = e->devid;
846 set_dev_entry_from_acpi(iommu, devid, e->flags,
847 e->ext);
848 break;
849 case IVHD_DEV_EXT_SELECT_RANGE:
851 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
852 "%02x:%02x.%x flags: %02x ext: %08x\n",
853 PCI_BUS(e->devid),
854 PCI_SLOT(e->devid),
855 PCI_FUNC(e->devid),
856 e->flags, e->ext);
858 devid_start = e->devid;
859 flags = e->flags;
860 ext_flags = e->ext;
861 alias = false;
862 break;
863 case IVHD_DEV_RANGE_END:
865 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
866 PCI_BUS(e->devid),
867 PCI_SLOT(e->devid),
868 PCI_FUNC(e->devid));
870 devid = e->devid;
871 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
872 if (alias) {
873 amd_iommu_alias_table[dev_i] = devid_to;
874 set_dev_entry_from_acpi(iommu,
875 devid_to, flags, ext_flags);
877 set_dev_entry_from_acpi(iommu, dev_i,
878 flags, ext_flags);
880 break;
881 default:
882 break;
885 p += ivhd_entry_length(p);
889 /* Initializes the device->iommu mapping for the driver */
890 static int __init init_iommu_devices(struct amd_iommu *iommu)
892 u32 i;
894 for (i = iommu->first_device; i <= iommu->last_device; ++i)
895 set_iommu_for_device(iommu, i);
897 return 0;
900 static void __init free_iommu_one(struct amd_iommu *iommu)
902 free_command_buffer(iommu);
903 free_event_buffer(iommu);
904 iommu_unmap_mmio_space(iommu);
907 static void __init free_iommu_all(void)
909 struct amd_iommu *iommu, *next;
911 for_each_iommu_safe(iommu, next) {
912 list_del(&iommu->list);
913 free_iommu_one(iommu);
914 kfree(iommu);
919 * This function clues the initialization function for one IOMMU
920 * together and also allocates the command buffer and programs the
921 * hardware. It does NOT enable the IOMMU. This is done afterwards.
923 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
925 spin_lock_init(&iommu->lock);
927 /* Add IOMMU to internal data structures */
928 list_add_tail(&iommu->list, &amd_iommu_list);
929 iommu->index = amd_iommus_present++;
931 if (unlikely(iommu->index >= MAX_IOMMUS)) {
932 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
933 return -ENOSYS;
936 /* Index is fine - add IOMMU to the array */
937 amd_iommus[iommu->index] = iommu;
940 * Copy data from ACPI table entry to the iommu struct
942 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
943 if (!iommu->dev)
944 return 1;
946 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
947 PCI_DEVFN(0, 0));
949 iommu->cap_ptr = h->cap_ptr;
950 iommu->pci_seg = h->pci_seg;
951 iommu->mmio_phys = h->mmio_phys;
952 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
953 if (!iommu->mmio_base)
954 return -ENOMEM;
956 iommu->cmd_buf = alloc_command_buffer(iommu);
957 if (!iommu->cmd_buf)
958 return -ENOMEM;
960 iommu->evt_buf = alloc_event_buffer(iommu);
961 if (!iommu->evt_buf)
962 return -ENOMEM;
964 iommu->int_enabled = false;
966 init_iommu_from_pci(iommu);
967 init_iommu_from_acpi(iommu, h);
968 init_iommu_devices(iommu);
970 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
971 amd_iommu_np_cache = true;
973 return pci_enable_device(iommu->dev);
977 * Iterates over all IOMMU entries in the ACPI table, allocates the
978 * IOMMU structure and initializes it with init_iommu_one()
980 static int __init init_iommu_all(struct acpi_table_header *table)
982 u8 *p = (u8 *)table, *end = (u8 *)table;
983 struct ivhd_header *h;
984 struct amd_iommu *iommu;
985 int ret;
987 end += table->length;
988 p += IVRS_HEADER_LENGTH;
990 while (p < end) {
991 h = (struct ivhd_header *)p;
992 switch (*p) {
993 case ACPI_IVHD_TYPE:
995 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
996 "seg: %d flags: %01x info %04x\n",
997 PCI_BUS(h->devid), PCI_SLOT(h->devid),
998 PCI_FUNC(h->devid), h->cap_ptr,
999 h->pci_seg, h->flags, h->info);
1000 DUMP_printk(" mmio-addr: %016llx\n",
1001 h->mmio_phys);
1003 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1004 if (iommu == NULL) {
1005 amd_iommu_init_err = -ENOMEM;
1006 return 0;
1009 ret = init_iommu_one(iommu, h);
1010 if (ret) {
1011 amd_iommu_init_err = ret;
1012 return 0;
1014 break;
1015 default:
1016 break;
1018 p += h->length;
1021 WARN_ON(p != end);
1023 return 0;
1026 /****************************************************************************
1028 * The following functions initialize the MSI interrupts for all IOMMUs
1029 * in the system. Its a bit challenging because there could be multiple
1030 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1031 * pci_dev.
1033 ****************************************************************************/
1035 static int iommu_setup_msi(struct amd_iommu *iommu)
1037 int r;
1039 r = pci_enable_msi(iommu->dev);
1040 if (r)
1041 return r;
1043 r = request_threaded_irq(iommu->dev->irq,
1044 amd_iommu_int_handler,
1045 amd_iommu_int_thread,
1046 0, "AMD-Vi",
1047 iommu->dev);
1049 if (r) {
1050 pci_disable_msi(iommu->dev);
1051 return r;
1054 iommu->int_enabled = true;
1056 return 0;
1059 static int iommu_init_msi(struct amd_iommu *iommu)
1061 int ret;
1063 if (iommu->int_enabled)
1064 goto enable_faults;
1066 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1067 ret = iommu_setup_msi(iommu);
1068 else
1069 ret = -ENODEV;
1071 if (ret)
1072 return ret;
1074 enable_faults:
1075 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1077 return 0;
1080 /****************************************************************************
1082 * The next functions belong to the third pass of parsing the ACPI
1083 * table. In this last pass the memory mapping requirements are
1084 * gathered (like exclusion and unity mapping reanges).
1086 ****************************************************************************/
1088 static void __init free_unity_maps(void)
1090 struct unity_map_entry *entry, *next;
1092 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1093 list_del(&entry->list);
1094 kfree(entry);
1098 /* called when we find an exclusion range definition in ACPI */
1099 static int __init init_exclusion_range(struct ivmd_header *m)
1101 int i;
1103 switch (m->type) {
1104 case ACPI_IVMD_TYPE:
1105 set_device_exclusion_range(m->devid, m);
1106 break;
1107 case ACPI_IVMD_TYPE_ALL:
1108 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1109 set_device_exclusion_range(i, m);
1110 break;
1111 case ACPI_IVMD_TYPE_RANGE:
1112 for (i = m->devid; i <= m->aux; ++i)
1113 set_device_exclusion_range(i, m);
1114 break;
1115 default:
1116 break;
1119 return 0;
1122 /* called for unity map ACPI definition */
1123 static int __init init_unity_map_range(struct ivmd_header *m)
1125 struct unity_map_entry *e = 0;
1126 char *s;
1128 e = kzalloc(sizeof(*e), GFP_KERNEL);
1129 if (e == NULL)
1130 return -ENOMEM;
1132 switch (m->type) {
1133 default:
1134 kfree(e);
1135 return 0;
1136 case ACPI_IVMD_TYPE:
1137 s = "IVMD_TYPEi\t\t\t";
1138 e->devid_start = e->devid_end = m->devid;
1139 break;
1140 case ACPI_IVMD_TYPE_ALL:
1141 s = "IVMD_TYPE_ALL\t\t";
1142 e->devid_start = 0;
1143 e->devid_end = amd_iommu_last_bdf;
1144 break;
1145 case ACPI_IVMD_TYPE_RANGE:
1146 s = "IVMD_TYPE_RANGE\t\t";
1147 e->devid_start = m->devid;
1148 e->devid_end = m->aux;
1149 break;
1151 e->address_start = PAGE_ALIGN(m->range_start);
1152 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1153 e->prot = m->flags >> 1;
1155 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1156 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1157 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1158 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1159 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1160 e->address_start, e->address_end, m->flags);
1162 list_add_tail(&e->list, &amd_iommu_unity_map);
1164 return 0;
1167 /* iterates over all memory definitions we find in the ACPI table */
1168 static int __init init_memory_definitions(struct acpi_table_header *table)
1170 u8 *p = (u8 *)table, *end = (u8 *)table;
1171 struct ivmd_header *m;
1173 end += table->length;
1174 p += IVRS_HEADER_LENGTH;
1176 while (p < end) {
1177 m = (struct ivmd_header *)p;
1178 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1179 init_exclusion_range(m);
1180 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1181 init_unity_map_range(m);
1183 p += m->length;
1186 return 0;
1190 * Init the device table to not allow DMA access for devices and
1191 * suppress all page faults
1193 static void init_device_table(void)
1195 u32 devid;
1197 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1198 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1199 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1203 static void iommu_init_flags(struct amd_iommu *iommu)
1205 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1206 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1207 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1209 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1210 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1211 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1213 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1214 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1215 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1217 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1218 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1219 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1222 * make IOMMU memory accesses cache coherent
1224 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1227 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1229 int i, j;
1230 u32 ioc_feature_control;
1231 struct pci_dev *pdev = iommu->root_pdev;
1233 /* RD890 BIOSes may not have completely reconfigured the iommu */
1234 if (!is_rd890_iommu(iommu->dev) || !pdev)
1235 return;
1238 * First, we need to ensure that the iommu is enabled. This is
1239 * controlled by a register in the northbridge
1242 /* Select Northbridge indirect register 0x75 and enable writing */
1243 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1244 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1246 /* Enable the iommu */
1247 if (!(ioc_feature_control & 0x1))
1248 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1250 /* Restore the iommu BAR */
1251 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1252 iommu->stored_addr_lo);
1253 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1254 iommu->stored_addr_hi);
1256 /* Restore the l1 indirect regs for each of the 6 l1s */
1257 for (i = 0; i < 6; i++)
1258 for (j = 0; j < 0x12; j++)
1259 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1261 /* Restore the l2 indirect regs */
1262 for (i = 0; i < 0x83; i++)
1263 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1265 /* Lock PCI setup registers */
1266 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1267 iommu->stored_addr_lo | 1);
1271 * This function finally enables all IOMMUs found in the system after
1272 * they have been initialized
1274 static void enable_iommus(void)
1276 struct amd_iommu *iommu;
1278 for_each_iommu(iommu) {
1279 iommu_disable(iommu);
1280 iommu_init_flags(iommu);
1281 iommu_set_device_table(iommu);
1282 iommu_enable_command_buffer(iommu);
1283 iommu_enable_event_buffer(iommu);
1284 iommu_set_exclusion_range(iommu);
1285 iommu_init_msi(iommu);
1286 iommu_enable(iommu);
1287 iommu_flush_all_caches(iommu);
1291 static void disable_iommus(void)
1293 struct amd_iommu *iommu;
1295 for_each_iommu(iommu)
1296 iommu_disable(iommu);
1300 * Suspend/Resume support
1301 * disable suspend until real resume implemented
1304 static void amd_iommu_resume(void)
1306 struct amd_iommu *iommu;
1308 for_each_iommu(iommu)
1309 iommu_apply_resume_quirks(iommu);
1311 /* re-load the hardware */
1312 enable_iommus();
1315 * we have to flush after the IOMMUs are enabled because a
1316 * disabled IOMMU will never execute the commands we send
1318 for_each_iommu(iommu)
1319 iommu_flush_all_caches(iommu);
1322 static int amd_iommu_suspend(void)
1324 /* disable IOMMUs to go out of the way for BIOS */
1325 disable_iommus();
1327 return 0;
1330 static struct syscore_ops amd_iommu_syscore_ops = {
1331 .suspend = amd_iommu_suspend,
1332 .resume = amd_iommu_resume,
1336 * This is the core init function for AMD IOMMU hardware in the system.
1337 * This function is called from the generic x86 DMA layer initialization
1338 * code.
1340 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1341 * three times:
1343 * 1 pass) Find the highest PCI device id the driver has to handle.
1344 * Upon this information the size of the data structures is
1345 * determined that needs to be allocated.
1347 * 2 pass) Initialize the data structures just allocated with the
1348 * information in the ACPI table about available AMD IOMMUs
1349 * in the system. It also maps the PCI devices in the
1350 * system to specific IOMMUs
1352 * 3 pass) After the basic data structures are allocated and
1353 * initialized we update them with information about memory
1354 * remapping requirements parsed out of the ACPI table in
1355 * this last pass.
1357 * After that the hardware is initialized and ready to go. In the last
1358 * step we do some Linux specific things like registering the driver in
1359 * the dma_ops interface and initializing the suspend/resume support
1360 * functions. Finally it prints some information about AMD IOMMUs and
1361 * the driver state and enables the hardware.
1363 static int __init amd_iommu_init(void)
1365 int i, ret = 0;
1368 * First parse ACPI tables to find the largest Bus/Dev/Func
1369 * we need to handle. Upon this information the shared data
1370 * structures for the IOMMUs in the system will be allocated
1372 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1373 return -ENODEV;
1375 ret = amd_iommu_init_err;
1376 if (ret)
1377 goto out;
1379 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1380 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1381 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1383 ret = -ENOMEM;
1385 /* Device table - directly used by all IOMMUs */
1386 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1387 get_order(dev_table_size));
1388 if (amd_iommu_dev_table == NULL)
1389 goto out;
1392 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1393 * IOMMU see for that device
1395 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1396 get_order(alias_table_size));
1397 if (amd_iommu_alias_table == NULL)
1398 goto free;
1400 /* IOMMU rlookup table - find the IOMMU for a specific device */
1401 amd_iommu_rlookup_table = (void *)__get_free_pages(
1402 GFP_KERNEL | __GFP_ZERO,
1403 get_order(rlookup_table_size));
1404 if (amd_iommu_rlookup_table == NULL)
1405 goto free;
1407 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1408 GFP_KERNEL | __GFP_ZERO,
1409 get_order(MAX_DOMAIN_ID/8));
1410 if (amd_iommu_pd_alloc_bitmap == NULL)
1411 goto free;
1413 /* init the device table */
1414 init_device_table();
1417 * let all alias entries point to itself
1419 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1420 amd_iommu_alias_table[i] = i;
1423 * never allocate domain 0 because its used as the non-allocated and
1424 * error value placeholder
1426 amd_iommu_pd_alloc_bitmap[0] = 1;
1428 spin_lock_init(&amd_iommu_pd_lock);
1431 * now the data structures are allocated and basically initialized
1432 * start the real acpi table scan
1434 ret = -ENODEV;
1435 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1436 goto free;
1438 if (amd_iommu_init_err) {
1439 ret = amd_iommu_init_err;
1440 goto free;
1443 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1444 goto free;
1446 if (amd_iommu_init_err) {
1447 ret = amd_iommu_init_err;
1448 goto free;
1451 ret = amd_iommu_init_devices();
1452 if (ret)
1453 goto free;
1455 enable_iommus();
1457 if (iommu_pass_through)
1458 ret = amd_iommu_init_passthrough();
1459 else
1460 ret = amd_iommu_init_dma_ops();
1462 if (ret)
1463 goto free_disable;
1465 amd_iommu_init_api();
1467 amd_iommu_init_notifier();
1469 register_syscore_ops(&amd_iommu_syscore_ops);
1471 if (iommu_pass_through)
1472 goto out;
1474 if (amd_iommu_unmap_flush)
1475 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1476 else
1477 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1479 x86_platform.iommu_shutdown = disable_iommus;
1480 out:
1481 return ret;
1483 free_disable:
1484 disable_iommus();
1486 free:
1487 amd_iommu_uninit_devices();
1489 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1490 get_order(MAX_DOMAIN_ID/8));
1492 free_pages((unsigned long)amd_iommu_rlookup_table,
1493 get_order(rlookup_table_size));
1495 free_pages((unsigned long)amd_iommu_alias_table,
1496 get_order(alias_table_size));
1498 free_pages((unsigned long)amd_iommu_dev_table,
1499 get_order(dev_table_size));
1501 free_iommu_all();
1503 free_unity_maps();
1505 #ifdef CONFIG_GART_IOMMU
1507 * We failed to initialize the AMD IOMMU - try fallback to GART
1508 * if possible.
1510 gart_iommu_init();
1512 #endif
1514 goto out;
1517 /****************************************************************************
1519 * Early detect code. This code runs at IOMMU detection time in the DMA
1520 * layer. It just looks if there is an IVRS ACPI table to detect AMD
1521 * IOMMUs
1523 ****************************************************************************/
1524 static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1526 return 0;
1529 int __init amd_iommu_detect(void)
1531 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1532 return -ENODEV;
1534 if (amd_iommu_disabled)
1535 return -ENODEV;
1537 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1538 iommu_detected = 1;
1539 amd_iommu_detected = 1;
1540 x86_init.iommu.iommu_init = amd_iommu_init;
1542 /* Make sure ACS will be enabled */
1543 pci_request_acs();
1544 return 1;
1546 return -ENODEV;
1549 /****************************************************************************
1551 * Parsing functions for the AMD IOMMU specific kernel command line
1552 * options.
1554 ****************************************************************************/
1556 static int __init parse_amd_iommu_dump(char *str)
1558 amd_iommu_dump = true;
1560 return 1;
1563 static int __init parse_amd_iommu_options(char *str)
1565 for (; *str; ++str) {
1566 if (strncmp(str, "fullflush", 9) == 0)
1567 amd_iommu_unmap_flush = true;
1568 if (strncmp(str, "off", 3) == 0)
1569 amd_iommu_disabled = true;
1572 return 1;
1575 __setup("amd_iommu_dump", parse_amd_iommu_dump);
1576 __setup("amd_iommu=", parse_amd_iommu_options);
1578 IOMMU_INIT_FINISH(amd_iommu_detect,
1579 gart_iommu_hole_init,