nfs: add minor version to nfs_server_key for fscache
[linux/fpc-iii.git] / drivers / iommu / amd_iommu_init.c
blob465f28a7844c227f4f6f28243ab7ef5d66d02fa1
1 /*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/bitmap.h>
24 #include <linux/slab.h>
25 #include <linux/syscore_ops.h>
26 #include <linux/interrupt.h>
27 #include <linux/msi.h>
28 #include <linux/amd-iommu.h>
29 #include <linux/export.h>
30 #include <linux/iommu.h>
31 #include <linux/kmemleak.h>
32 #include <linux/mem_encrypt.h>
33 #include <asm/pci-direct.h>
34 #include <asm/iommu.h>
35 #include <asm/gart.h>
36 #include <asm/x86_init.h>
37 #include <asm/iommu_table.h>
38 #include <asm/io_apic.h>
39 #include <asm/irq_remapping.h>
41 #include <linux/crash_dump.h>
42 #include "amd_iommu.h"
43 #include "amd_iommu_proto.h"
44 #include "amd_iommu_types.h"
45 #include "irq_remapping.h"
48 * definitions for the ACPI scanning code
50 #define IVRS_HEADER_LENGTH 48
52 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
53 #define ACPI_IVMD_TYPE_ALL 0x20
54 #define ACPI_IVMD_TYPE 0x21
55 #define ACPI_IVMD_TYPE_RANGE 0x22
57 #define IVHD_DEV_ALL 0x01
58 #define IVHD_DEV_SELECT 0x02
59 #define IVHD_DEV_SELECT_RANGE_START 0x03
60 #define IVHD_DEV_RANGE_END 0x04
61 #define IVHD_DEV_ALIAS 0x42
62 #define IVHD_DEV_ALIAS_RANGE 0x43
63 #define IVHD_DEV_EXT_SELECT 0x46
64 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
65 #define IVHD_DEV_SPECIAL 0x48
66 #define IVHD_DEV_ACPI_HID 0xf0
68 #define UID_NOT_PRESENT 0
69 #define UID_IS_INTEGER 1
70 #define UID_IS_CHARACTER 2
72 #define IVHD_SPECIAL_IOAPIC 1
73 #define IVHD_SPECIAL_HPET 2
75 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
76 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
77 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
78 #define IVHD_FLAG_ISOC_EN_MASK 0x08
80 #define IVMD_FLAG_EXCL_RANGE 0x08
81 #define IVMD_FLAG_UNITY_MAP 0x01
83 #define ACPI_DEVFLAG_INITPASS 0x01
84 #define ACPI_DEVFLAG_EXTINT 0x02
85 #define ACPI_DEVFLAG_NMI 0x04
86 #define ACPI_DEVFLAG_SYSMGT1 0x10
87 #define ACPI_DEVFLAG_SYSMGT2 0x20
88 #define ACPI_DEVFLAG_LINT0 0x40
89 #define ACPI_DEVFLAG_LINT1 0x80
90 #define ACPI_DEVFLAG_ATSDIS 0x10000000
92 #define LOOP_TIMEOUT 100000
94 * ACPI table definitions
96 * These data structures are laid over the table to parse the important values
97 * out of it.
100 extern const struct iommu_ops amd_iommu_ops;
103 * structure describing one IOMMU in the ACPI table. Typically followed by one
104 * or more ivhd_entrys.
106 struct ivhd_header {
107 u8 type;
108 u8 flags;
109 u16 length;
110 u16 devid;
111 u16 cap_ptr;
112 u64 mmio_phys;
113 u16 pci_seg;
114 u16 info;
115 u32 efr_attr;
117 /* Following only valid on IVHD type 11h and 40h */
118 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
119 u64 res;
120 } __attribute__((packed));
123 * A device entry describing which devices a specific IOMMU translates and
124 * which requestor ids they use.
126 struct ivhd_entry {
127 u8 type;
128 u16 devid;
129 u8 flags;
130 u32 ext;
131 u32 hidh;
132 u64 cid;
133 u8 uidf;
134 u8 uidl;
135 u8 uid;
136 } __attribute__((packed));
139 * An AMD IOMMU memory definition structure. It defines things like exclusion
140 * ranges for devices and regions that should be unity mapped.
142 struct ivmd_header {
143 u8 type;
144 u8 flags;
145 u16 length;
146 u16 devid;
147 u16 aux;
148 u64 resv;
149 u64 range_start;
150 u64 range_length;
151 } __attribute__((packed));
153 bool amd_iommu_dump;
154 bool amd_iommu_irq_remap __read_mostly;
156 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
157 static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
159 static bool amd_iommu_detected;
160 static bool __initdata amd_iommu_disabled;
161 static int amd_iommu_target_ivhd_type;
163 u16 amd_iommu_last_bdf; /* largest PCI device id we have
164 to handle */
165 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
166 we find in ACPI */
167 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
169 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
170 system */
172 /* Array to assign indices to IOMMUs*/
173 struct amd_iommu *amd_iommus[MAX_IOMMUS];
175 /* Number of IOMMUs present in the system */
176 static int amd_iommus_present;
178 /* IOMMUs have a non-present cache? */
179 bool amd_iommu_np_cache __read_mostly;
180 bool amd_iommu_iotlb_sup __read_mostly = true;
182 u32 amd_iommu_max_pasid __read_mostly = ~0;
184 bool amd_iommu_v2_present __read_mostly;
185 static bool amd_iommu_pc_present __read_mostly;
187 bool amd_iommu_force_isolation __read_mostly;
190 * List of protection domains - used during resume
192 LIST_HEAD(amd_iommu_pd_list);
193 spinlock_t amd_iommu_pd_lock;
196 * Pointer to the device table which is shared by all AMD IOMMUs
197 * it is indexed by the PCI device id or the HT unit id and contains
198 * information about the domain the device belongs to as well as the
199 * page table root pointer.
201 struct dev_table_entry *amd_iommu_dev_table;
203 * Pointer to a device table which the content of old device table
204 * will be copied to. It's only be used in kdump kernel.
206 static struct dev_table_entry *old_dev_tbl_cpy;
209 * The alias table is a driver specific data structure which contains the
210 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
211 * More than one device can share the same requestor id.
213 u16 *amd_iommu_alias_table;
216 * The rlookup table is used to find the IOMMU which is responsible
217 * for a specific device. It is also indexed by the PCI device id.
219 struct amd_iommu **amd_iommu_rlookup_table;
220 EXPORT_SYMBOL(amd_iommu_rlookup_table);
223 * This table is used to find the irq remapping table for a given device id
224 * quickly.
226 struct irq_remap_table **irq_lookup_table;
229 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
230 * to know which ones are already in use.
232 unsigned long *amd_iommu_pd_alloc_bitmap;
234 static u32 dev_table_size; /* size of the device table */
235 static u32 alias_table_size; /* size of the alias table */
236 static u32 rlookup_table_size; /* size if the rlookup table */
238 enum iommu_init_state {
239 IOMMU_START_STATE,
240 IOMMU_IVRS_DETECTED,
241 IOMMU_ACPI_FINISHED,
242 IOMMU_ENABLED,
243 IOMMU_PCI_INIT,
244 IOMMU_INTERRUPTS_EN,
245 IOMMU_DMA_OPS,
246 IOMMU_INITIALIZED,
247 IOMMU_NOT_FOUND,
248 IOMMU_INIT_ERROR,
249 IOMMU_CMDLINE_DISABLED,
252 /* Early ioapic and hpet maps from kernel command line */
253 #define EARLY_MAP_SIZE 4
254 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
255 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
256 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
258 static int __initdata early_ioapic_map_size;
259 static int __initdata early_hpet_map_size;
260 static int __initdata early_acpihid_map_size;
262 static bool __initdata cmdline_maps;
264 static enum iommu_init_state init_state = IOMMU_START_STATE;
266 static int amd_iommu_enable_interrupts(void);
267 static int __init iommu_go_to_state(enum iommu_init_state state);
268 static void init_device_table_dma(void);
270 static bool amd_iommu_pre_enabled = true;
272 bool translation_pre_enabled(struct amd_iommu *iommu)
274 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
276 EXPORT_SYMBOL(translation_pre_enabled);
278 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
280 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
283 static void init_translation_status(struct amd_iommu *iommu)
285 u64 ctrl;
287 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
288 if (ctrl & (1<<CONTROL_IOMMU_EN))
289 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
292 static inline void update_last_devid(u16 devid)
294 if (devid > amd_iommu_last_bdf)
295 amd_iommu_last_bdf = devid;
298 static inline unsigned long tbl_size(int entry_size)
300 unsigned shift = PAGE_SHIFT +
301 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
303 return 1UL << shift;
306 int amd_iommu_get_num_iommus(void)
308 return amd_iommus_present;
311 /* Access to l1 and l2 indexed register spaces */
313 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
315 u32 val;
317 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
318 pci_read_config_dword(iommu->dev, 0xfc, &val);
319 return val;
322 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
324 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
325 pci_write_config_dword(iommu->dev, 0xfc, val);
326 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
329 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
331 u32 val;
333 pci_write_config_dword(iommu->dev, 0xf0, address);
334 pci_read_config_dword(iommu->dev, 0xf4, &val);
335 return val;
338 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
340 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
341 pci_write_config_dword(iommu->dev, 0xf4, val);
344 /****************************************************************************
346 * AMD IOMMU MMIO register space handling functions
348 * These functions are used to program the IOMMU device registers in
349 * MMIO space required for that driver.
351 ****************************************************************************/
354 * This function set the exclusion range in the IOMMU. DMA accesses to the
355 * exclusion range are passed through untranslated
357 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
359 u64 start = iommu->exclusion_start & PAGE_MASK;
360 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
361 u64 entry;
363 if (!iommu->exclusion_start)
364 return;
366 entry = start | MMIO_EXCL_ENABLE_MASK;
367 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
368 &entry, sizeof(entry));
370 entry = limit;
371 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
372 &entry, sizeof(entry));
375 /* Programs the physical address of the device table into the IOMMU hardware */
376 static void iommu_set_device_table(struct amd_iommu *iommu)
378 u64 entry;
380 BUG_ON(iommu->mmio_base == NULL);
382 entry = iommu_virt_to_phys(amd_iommu_dev_table);
383 entry |= (dev_table_size >> 12) - 1;
384 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
385 &entry, sizeof(entry));
388 /* Generic functions to enable/disable certain features of the IOMMU. */
389 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
391 u64 ctrl;
393 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
394 ctrl |= (1ULL << bit);
395 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
398 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
400 u64 ctrl;
402 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
403 ctrl &= ~(1ULL << bit);
404 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
407 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
409 u64 ctrl;
411 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
412 ctrl &= ~CTRL_INV_TO_MASK;
413 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
414 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
417 /* Function to enable the hardware */
418 static void iommu_enable(struct amd_iommu *iommu)
420 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
423 static void iommu_disable(struct amd_iommu *iommu)
425 if (!iommu->mmio_base)
426 return;
428 /* Disable command buffer */
429 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
431 /* Disable event logging and event interrupts */
432 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
433 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
435 /* Disable IOMMU GA_LOG */
436 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
437 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
439 /* Disable IOMMU hardware itself */
440 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
444 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
445 * the system has one.
447 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
449 if (!request_mem_region(address, end, "amd_iommu")) {
450 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
451 address, end);
452 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
453 return NULL;
456 return (u8 __iomem *)ioremap_nocache(address, end);
459 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
461 if (iommu->mmio_base)
462 iounmap(iommu->mmio_base);
463 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
466 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
468 u32 size = 0;
470 switch (h->type) {
471 case 0x10:
472 size = 24;
473 break;
474 case 0x11:
475 case 0x40:
476 size = 40;
477 break;
479 return size;
482 /****************************************************************************
484 * The functions below belong to the first pass of AMD IOMMU ACPI table
485 * parsing. In this pass we try to find out the highest device id this
486 * code has to handle. Upon this information the size of the shared data
487 * structures is determined later.
489 ****************************************************************************/
492 * This function calculates the length of a given IVHD entry
494 static inline int ivhd_entry_length(u8 *ivhd)
496 u32 type = ((struct ivhd_entry *)ivhd)->type;
498 if (type < 0x80) {
499 return 0x04 << (*ivhd >> 6);
500 } else if (type == IVHD_DEV_ACPI_HID) {
501 /* For ACPI_HID, offset 21 is uid len */
502 return *((u8 *)ivhd + 21) + 22;
504 return 0;
508 * After reading the highest device id from the IOMMU PCI capability header
509 * this function looks if there is a higher device id defined in the ACPI table
511 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
513 u8 *p = (void *)h, *end = (void *)h;
514 struct ivhd_entry *dev;
516 u32 ivhd_size = get_ivhd_header_size(h);
518 if (!ivhd_size) {
519 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
520 return -EINVAL;
523 p += ivhd_size;
524 end += h->length;
526 while (p < end) {
527 dev = (struct ivhd_entry *)p;
528 switch (dev->type) {
529 case IVHD_DEV_ALL:
530 /* Use maximum BDF value for DEV_ALL */
531 update_last_devid(0xffff);
532 break;
533 case IVHD_DEV_SELECT:
534 case IVHD_DEV_RANGE_END:
535 case IVHD_DEV_ALIAS:
536 case IVHD_DEV_EXT_SELECT:
537 /* all the above subfield types refer to device ids */
538 update_last_devid(dev->devid);
539 break;
540 default:
541 break;
543 p += ivhd_entry_length(p);
546 WARN_ON(p != end);
548 return 0;
551 static int __init check_ivrs_checksum(struct acpi_table_header *table)
553 int i;
554 u8 checksum = 0, *p = (u8 *)table;
556 for (i = 0; i < table->length; ++i)
557 checksum += p[i];
558 if (checksum != 0) {
559 /* ACPI table corrupt */
560 pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
561 return -ENODEV;
564 return 0;
568 * Iterate over all IVHD entries in the ACPI table and find the highest device
569 * id which we need to handle. This is the first of three functions which parse
570 * the ACPI table. So we check the checksum here.
572 static int __init find_last_devid_acpi(struct acpi_table_header *table)
574 u8 *p = (u8 *)table, *end = (u8 *)table;
575 struct ivhd_header *h;
577 p += IVRS_HEADER_LENGTH;
579 end += table->length;
580 while (p < end) {
581 h = (struct ivhd_header *)p;
582 if (h->type == amd_iommu_target_ivhd_type) {
583 int ret = find_last_devid_from_ivhd(h);
585 if (ret)
586 return ret;
588 p += h->length;
590 WARN_ON(p != end);
592 return 0;
595 /****************************************************************************
597 * The following functions belong to the code path which parses the ACPI table
598 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
599 * data structures, initialize the device/alias/rlookup table and also
600 * basically initialize the hardware.
602 ****************************************************************************/
605 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
606 * write commands to that buffer later and the IOMMU will execute them
607 * asynchronously
609 static int __init alloc_command_buffer(struct amd_iommu *iommu)
611 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
612 get_order(CMD_BUFFER_SIZE));
614 return iommu->cmd_buf ? 0 : -ENOMEM;
618 * This function resets the command buffer if the IOMMU stopped fetching
619 * commands from it.
621 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
623 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
625 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
626 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
627 iommu->cmd_buf_head = 0;
628 iommu->cmd_buf_tail = 0;
630 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
634 * This function writes the command buffer address to the hardware and
635 * enables it.
637 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
639 u64 entry;
641 BUG_ON(iommu->cmd_buf == NULL);
643 entry = iommu_virt_to_phys(iommu->cmd_buf);
644 entry |= MMIO_CMD_SIZE_512;
646 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
647 &entry, sizeof(entry));
649 amd_iommu_reset_cmd_buffer(iommu);
653 * This function disables the command buffer
655 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
657 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
660 static void __init free_command_buffer(struct amd_iommu *iommu)
662 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
665 /* allocates the memory where the IOMMU will log its events to */
666 static int __init alloc_event_buffer(struct amd_iommu *iommu)
668 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
669 get_order(EVT_BUFFER_SIZE));
671 return iommu->evt_buf ? 0 : -ENOMEM;
674 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
676 u64 entry;
678 BUG_ON(iommu->evt_buf == NULL);
680 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
682 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
683 &entry, sizeof(entry));
685 /* set head and tail to zero manually */
686 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
687 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
689 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
693 * This function disables the event log buffer
695 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
697 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
700 static void __init free_event_buffer(struct amd_iommu *iommu)
702 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
705 /* allocates the memory where the IOMMU will log its events to */
706 static int __init alloc_ppr_log(struct amd_iommu *iommu)
708 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
709 get_order(PPR_LOG_SIZE));
711 return iommu->ppr_log ? 0 : -ENOMEM;
714 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
716 u64 entry;
718 if (iommu->ppr_log == NULL)
719 return;
721 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
723 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
724 &entry, sizeof(entry));
726 /* set head and tail to zero manually */
727 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
728 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
730 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
731 iommu_feature_enable(iommu, CONTROL_PPR_EN);
734 static void __init free_ppr_log(struct amd_iommu *iommu)
736 if (iommu->ppr_log == NULL)
737 return;
739 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
742 static void free_ga_log(struct amd_iommu *iommu)
744 #ifdef CONFIG_IRQ_REMAP
745 if (iommu->ga_log)
746 free_pages((unsigned long)iommu->ga_log,
747 get_order(GA_LOG_SIZE));
748 if (iommu->ga_log_tail)
749 free_pages((unsigned long)iommu->ga_log_tail,
750 get_order(8));
751 #endif
754 static int iommu_ga_log_enable(struct amd_iommu *iommu)
756 #ifdef CONFIG_IRQ_REMAP
757 u32 status, i;
759 if (!iommu->ga_log)
760 return -EINVAL;
762 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
764 /* Check if already running */
765 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
766 return 0;
768 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
769 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
771 for (i = 0; i < LOOP_TIMEOUT; ++i) {
772 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
773 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
774 break;
777 if (i >= LOOP_TIMEOUT)
778 return -EINVAL;
779 #endif /* CONFIG_IRQ_REMAP */
780 return 0;
783 #ifdef CONFIG_IRQ_REMAP
784 static int iommu_init_ga_log(struct amd_iommu *iommu)
786 u64 entry;
788 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
789 return 0;
791 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
792 get_order(GA_LOG_SIZE));
793 if (!iommu->ga_log)
794 goto err_out;
796 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
797 get_order(8));
798 if (!iommu->ga_log_tail)
799 goto err_out;
801 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
802 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
803 &entry, sizeof(entry));
804 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
805 (BIT_ULL(52)-1)) & ~7ULL;
806 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
807 &entry, sizeof(entry));
808 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
809 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
811 return 0;
812 err_out:
813 free_ga_log(iommu);
814 return -EINVAL;
816 #endif /* CONFIG_IRQ_REMAP */
818 static int iommu_init_ga(struct amd_iommu *iommu)
820 int ret = 0;
822 #ifdef CONFIG_IRQ_REMAP
823 /* Note: We have already checked GASup from IVRS table.
824 * Now, we need to make sure that GAMSup is set.
826 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
827 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
828 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
830 ret = iommu_init_ga_log(iommu);
831 #endif /* CONFIG_IRQ_REMAP */
833 return ret;
836 static void iommu_enable_xt(struct amd_iommu *iommu)
838 #ifdef CONFIG_IRQ_REMAP
840 * XT mode (32-bit APIC destination ID) requires
841 * GA mode (128-bit IRTE support) as a prerequisite.
843 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
844 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
845 iommu_feature_enable(iommu, CONTROL_XT_EN);
846 #endif /* CONFIG_IRQ_REMAP */
849 static void iommu_enable_gt(struct amd_iommu *iommu)
851 if (!iommu_feature(iommu, FEATURE_GT))
852 return;
854 iommu_feature_enable(iommu, CONTROL_GT_EN);
857 /* sets a specific bit in the device table entry. */
858 static void set_dev_entry_bit(u16 devid, u8 bit)
860 int i = (bit >> 6) & 0x03;
861 int _bit = bit & 0x3f;
863 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
866 static int get_dev_entry_bit(u16 devid, u8 bit)
868 int i = (bit >> 6) & 0x03;
869 int _bit = bit & 0x3f;
871 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
875 static bool copy_device_table(void)
877 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
878 struct dev_table_entry *old_devtb = NULL;
879 u32 lo, hi, devid, old_devtb_size;
880 phys_addr_t old_devtb_phys;
881 struct amd_iommu *iommu;
882 u16 dom_id, dte_v, irq_v;
883 gfp_t gfp_flag;
884 u64 tmp;
886 if (!amd_iommu_pre_enabled)
887 return false;
889 pr_warn("Translation is already enabled - trying to copy translation structures\n");
890 for_each_iommu(iommu) {
891 /* All IOMMUs should use the same device table with the same size */
892 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
893 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
894 entry = (((u64) hi) << 32) + lo;
895 if (last_entry && last_entry != entry) {
896 pr_err("IOMMU:%d should use the same dev table as others!\n",
897 iommu->index);
898 return false;
900 last_entry = entry;
902 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
903 if (old_devtb_size != dev_table_size) {
904 pr_err("The device table size of IOMMU:%d is not expected!\n",
905 iommu->index);
906 return false;
910 old_devtb_phys = entry & PAGE_MASK;
911 if (old_devtb_phys >= 0x100000000ULL) {
912 pr_err("The address of old device table is above 4G, not trustworthy!\n");
913 return false;
915 old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
916 if (!old_devtb)
917 return false;
919 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
920 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
921 get_order(dev_table_size));
922 if (old_dev_tbl_cpy == NULL) {
923 pr_err("Failed to allocate memory for copying old device table!\n");
924 return false;
927 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
928 old_dev_tbl_cpy[devid] = old_devtb[devid];
929 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
930 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
932 if (dte_v && dom_id) {
933 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
934 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
935 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
936 /* If gcr3 table existed, mask it out */
937 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
938 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
939 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
940 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
941 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
942 tmp |= DTE_FLAG_GV;
943 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
947 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
948 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
949 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
950 if (irq_v && (int_ctl || int_tab_len)) {
951 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
952 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
953 pr_err("Wrong old irq remapping flag: %#x\n", devid);
954 return false;
957 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
960 memunmap(old_devtb);
962 return true;
965 void amd_iommu_apply_erratum_63(u16 devid)
967 int sysmgt;
969 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
970 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
972 if (sysmgt == 0x01)
973 set_dev_entry_bit(devid, DEV_ENTRY_IW);
976 /* Writes the specific IOMMU for a device into the rlookup table */
977 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
979 amd_iommu_rlookup_table[devid] = iommu;
983 * This function takes the device specific flags read from the ACPI
984 * table and sets up the device table entry with that information
986 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
987 u16 devid, u32 flags, u32 ext_flags)
989 if (flags & ACPI_DEVFLAG_INITPASS)
990 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
991 if (flags & ACPI_DEVFLAG_EXTINT)
992 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
993 if (flags & ACPI_DEVFLAG_NMI)
994 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
995 if (flags & ACPI_DEVFLAG_SYSMGT1)
996 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
997 if (flags & ACPI_DEVFLAG_SYSMGT2)
998 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
999 if (flags & ACPI_DEVFLAG_LINT0)
1000 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1001 if (flags & ACPI_DEVFLAG_LINT1)
1002 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1004 amd_iommu_apply_erratum_63(devid);
1006 set_iommu_for_device(iommu, devid);
1009 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1011 struct devid_map *entry;
1012 struct list_head *list;
1014 if (type == IVHD_SPECIAL_IOAPIC)
1015 list = &ioapic_map;
1016 else if (type == IVHD_SPECIAL_HPET)
1017 list = &hpet_map;
1018 else
1019 return -EINVAL;
1021 list_for_each_entry(entry, list, list) {
1022 if (!(entry->id == id && entry->cmd_line))
1023 continue;
1025 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
1026 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1028 *devid = entry->devid;
1030 return 0;
1033 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1034 if (!entry)
1035 return -ENOMEM;
1037 entry->id = id;
1038 entry->devid = *devid;
1039 entry->cmd_line = cmd_line;
1041 list_add_tail(&entry->list, list);
1043 return 0;
1046 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1047 bool cmd_line)
1049 struct acpihid_map_entry *entry;
1050 struct list_head *list = &acpihid_map;
1052 list_for_each_entry(entry, list, list) {
1053 if (strcmp(entry->hid, hid) ||
1054 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1055 !entry->cmd_line)
1056 continue;
1058 pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
1059 hid, uid);
1060 *devid = entry->devid;
1061 return 0;
1064 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1065 if (!entry)
1066 return -ENOMEM;
1068 memcpy(entry->uid, uid, strlen(uid));
1069 memcpy(entry->hid, hid, strlen(hid));
1070 entry->devid = *devid;
1071 entry->cmd_line = cmd_line;
1072 entry->root_devid = (entry->devid & (~0x7));
1074 pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
1075 entry->cmd_line ? "cmd" : "ivrs",
1076 entry->hid, entry->uid, entry->root_devid);
1078 list_add_tail(&entry->list, list);
1079 return 0;
1082 static int __init add_early_maps(void)
1084 int i, ret;
1086 for (i = 0; i < early_ioapic_map_size; ++i) {
1087 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1088 early_ioapic_map[i].id,
1089 &early_ioapic_map[i].devid,
1090 early_ioapic_map[i].cmd_line);
1091 if (ret)
1092 return ret;
1095 for (i = 0; i < early_hpet_map_size; ++i) {
1096 ret = add_special_device(IVHD_SPECIAL_HPET,
1097 early_hpet_map[i].id,
1098 &early_hpet_map[i].devid,
1099 early_hpet_map[i].cmd_line);
1100 if (ret)
1101 return ret;
1104 for (i = 0; i < early_acpihid_map_size; ++i) {
1105 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1106 early_acpihid_map[i].uid,
1107 &early_acpihid_map[i].devid,
1108 early_acpihid_map[i].cmd_line);
1109 if (ret)
1110 return ret;
1113 return 0;
1117 * Reads the device exclusion range from ACPI and initializes the IOMMU with
1118 * it
1120 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1122 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1124 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1125 return;
1127 if (iommu) {
1129 * We only can configure exclusion ranges per IOMMU, not
1130 * per device. But we can enable the exclusion range per
1131 * device. This is done here
1133 set_dev_entry_bit(devid, DEV_ENTRY_EX);
1134 iommu->exclusion_start = m->range_start;
1135 iommu->exclusion_length = m->range_length;
1140 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1141 * initializes the hardware and our data structures with it.
1143 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1144 struct ivhd_header *h)
1146 u8 *p = (u8 *)h;
1147 u8 *end = p, flags = 0;
1148 u16 devid = 0, devid_start = 0, devid_to = 0;
1149 u32 dev_i, ext_flags = 0;
1150 bool alias = false;
1151 struct ivhd_entry *e;
1152 u32 ivhd_size;
1153 int ret;
1156 ret = add_early_maps();
1157 if (ret)
1158 return ret;
1160 amd_iommu_apply_ivrs_quirks();
1163 * First save the recommended feature enable bits from ACPI
1165 iommu->acpi_flags = h->flags;
1168 * Done. Now parse the device entries
1170 ivhd_size = get_ivhd_header_size(h);
1171 if (!ivhd_size) {
1172 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
1173 return -EINVAL;
1176 p += ivhd_size;
1178 end += h->length;
1181 while (p < end) {
1182 e = (struct ivhd_entry *)p;
1183 switch (e->type) {
1184 case IVHD_DEV_ALL:
1186 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1188 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1189 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1190 break;
1191 case IVHD_DEV_SELECT:
1193 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1194 "flags: %02x\n",
1195 PCI_BUS_NUM(e->devid),
1196 PCI_SLOT(e->devid),
1197 PCI_FUNC(e->devid),
1198 e->flags);
1200 devid = e->devid;
1201 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1202 break;
1203 case IVHD_DEV_SELECT_RANGE_START:
1205 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1206 "devid: %02x:%02x.%x flags: %02x\n",
1207 PCI_BUS_NUM(e->devid),
1208 PCI_SLOT(e->devid),
1209 PCI_FUNC(e->devid),
1210 e->flags);
1212 devid_start = e->devid;
1213 flags = e->flags;
1214 ext_flags = 0;
1215 alias = false;
1216 break;
1217 case IVHD_DEV_ALIAS:
1219 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1220 "flags: %02x devid_to: %02x:%02x.%x\n",
1221 PCI_BUS_NUM(e->devid),
1222 PCI_SLOT(e->devid),
1223 PCI_FUNC(e->devid),
1224 e->flags,
1225 PCI_BUS_NUM(e->ext >> 8),
1226 PCI_SLOT(e->ext >> 8),
1227 PCI_FUNC(e->ext >> 8));
1229 devid = e->devid;
1230 devid_to = e->ext >> 8;
1231 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1232 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1233 amd_iommu_alias_table[devid] = devid_to;
1234 break;
1235 case IVHD_DEV_ALIAS_RANGE:
1237 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1238 "devid: %02x:%02x.%x flags: %02x "
1239 "devid_to: %02x:%02x.%x\n",
1240 PCI_BUS_NUM(e->devid),
1241 PCI_SLOT(e->devid),
1242 PCI_FUNC(e->devid),
1243 e->flags,
1244 PCI_BUS_NUM(e->ext >> 8),
1245 PCI_SLOT(e->ext >> 8),
1246 PCI_FUNC(e->ext >> 8));
1248 devid_start = e->devid;
1249 flags = e->flags;
1250 devid_to = e->ext >> 8;
1251 ext_flags = 0;
1252 alias = true;
1253 break;
1254 case IVHD_DEV_EXT_SELECT:
1256 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1257 "flags: %02x ext: %08x\n",
1258 PCI_BUS_NUM(e->devid),
1259 PCI_SLOT(e->devid),
1260 PCI_FUNC(e->devid),
1261 e->flags, e->ext);
1263 devid = e->devid;
1264 set_dev_entry_from_acpi(iommu, devid, e->flags,
1265 e->ext);
1266 break;
1267 case IVHD_DEV_EXT_SELECT_RANGE:
1269 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1270 "%02x:%02x.%x flags: %02x ext: %08x\n",
1271 PCI_BUS_NUM(e->devid),
1272 PCI_SLOT(e->devid),
1273 PCI_FUNC(e->devid),
1274 e->flags, e->ext);
1276 devid_start = e->devid;
1277 flags = e->flags;
1278 ext_flags = e->ext;
1279 alias = false;
1280 break;
1281 case IVHD_DEV_RANGE_END:
1283 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1284 PCI_BUS_NUM(e->devid),
1285 PCI_SLOT(e->devid),
1286 PCI_FUNC(e->devid));
1288 devid = e->devid;
1289 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1290 if (alias) {
1291 amd_iommu_alias_table[dev_i] = devid_to;
1292 set_dev_entry_from_acpi(iommu,
1293 devid_to, flags, ext_flags);
1295 set_dev_entry_from_acpi(iommu, dev_i,
1296 flags, ext_flags);
1298 break;
1299 case IVHD_DEV_SPECIAL: {
1300 u8 handle, type;
1301 const char *var;
1302 u16 devid;
1303 int ret;
1305 handle = e->ext & 0xff;
1306 devid = (e->ext >> 8) & 0xffff;
1307 type = (e->ext >> 24) & 0xff;
1309 if (type == IVHD_SPECIAL_IOAPIC)
1310 var = "IOAPIC";
1311 else if (type == IVHD_SPECIAL_HPET)
1312 var = "HPET";
1313 else
1314 var = "UNKNOWN";
1316 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1317 var, (int)handle,
1318 PCI_BUS_NUM(devid),
1319 PCI_SLOT(devid),
1320 PCI_FUNC(devid));
1322 ret = add_special_device(type, handle, &devid, false);
1323 if (ret)
1324 return ret;
1327 * add_special_device might update the devid in case a
1328 * command-line override is present. So call
1329 * set_dev_entry_from_acpi after add_special_device.
1331 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1333 break;
1335 case IVHD_DEV_ACPI_HID: {
1336 u16 devid;
1337 u8 hid[ACPIHID_HID_LEN] = {0};
1338 u8 uid[ACPIHID_UID_LEN] = {0};
1339 int ret;
1341 if (h->type != 0x40) {
1342 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1343 e->type);
1344 break;
1347 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1348 hid[ACPIHID_HID_LEN - 1] = '\0';
1350 if (!(*hid)) {
1351 pr_err(FW_BUG "Invalid HID.\n");
1352 break;
1355 switch (e->uidf) {
1356 case UID_NOT_PRESENT:
1358 if (e->uidl != 0)
1359 pr_warn(FW_BUG "Invalid UID length.\n");
1361 break;
1362 case UID_IS_INTEGER:
1364 sprintf(uid, "%d", e->uid);
1366 break;
1367 case UID_IS_CHARACTER:
1369 memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
1370 uid[ACPIHID_UID_LEN - 1] = '\0';
1372 break;
1373 default:
1374 break;
1377 devid = e->devid;
1378 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1379 hid, uid,
1380 PCI_BUS_NUM(devid),
1381 PCI_SLOT(devid),
1382 PCI_FUNC(devid));
1384 flags = e->flags;
1386 ret = add_acpi_hid_device(hid, uid, &devid, false);
1387 if (ret)
1388 return ret;
1391 * add_special_device might update the devid in case a
1392 * command-line override is present. So call
1393 * set_dev_entry_from_acpi after add_special_device.
1395 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1397 break;
1399 default:
1400 break;
1403 p += ivhd_entry_length(p);
1406 return 0;
1409 static void __init free_iommu_one(struct amd_iommu *iommu)
1411 free_command_buffer(iommu);
1412 free_event_buffer(iommu);
1413 free_ppr_log(iommu);
1414 free_ga_log(iommu);
1415 iommu_unmap_mmio_space(iommu);
1418 static void __init free_iommu_all(void)
1420 struct amd_iommu *iommu, *next;
1422 for_each_iommu_safe(iommu, next) {
1423 list_del(&iommu->list);
1424 free_iommu_one(iommu);
1425 kfree(iommu);
1430 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1431 * Workaround:
1432 * BIOS should disable L2B micellaneous clock gating by setting
1433 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1435 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1437 u32 value;
1439 if ((boot_cpu_data.x86 != 0x15) ||
1440 (boot_cpu_data.x86_model < 0x10) ||
1441 (boot_cpu_data.x86_model > 0x1f))
1442 return;
1444 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1445 pci_read_config_dword(iommu->dev, 0xf4, &value);
1447 if (value & BIT(2))
1448 return;
1450 /* Select NB indirect register 0x90 and enable writing */
1451 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1453 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1454 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1455 dev_name(&iommu->dev->dev));
1457 /* Clear the enable writing bit */
1458 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1462 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1463 * Workaround:
1464 * BIOS should enable ATS write permission check by setting
1465 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1467 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1469 u32 value;
1471 if ((boot_cpu_data.x86 != 0x15) ||
1472 (boot_cpu_data.x86_model < 0x30) ||
1473 (boot_cpu_data.x86_model > 0x3f))
1474 return;
1476 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1477 value = iommu_read_l2(iommu, 0x47);
1479 if (value & BIT(0))
1480 return;
1482 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1483 iommu_write_l2(iommu, 0x47, value | BIT(0));
1485 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1486 dev_name(&iommu->dev->dev));
1490 * This function clues the initialization function for one IOMMU
1491 * together and also allocates the command buffer and programs the
1492 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1494 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1496 int ret;
1498 raw_spin_lock_init(&iommu->lock);
1500 /* Add IOMMU to internal data structures */
1501 list_add_tail(&iommu->list, &amd_iommu_list);
1502 iommu->index = amd_iommus_present++;
1504 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1505 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1506 return -ENOSYS;
1509 /* Index is fine - add IOMMU to the array */
1510 amd_iommus[iommu->index] = iommu;
1513 * Copy data from ACPI table entry to the iommu struct
1515 iommu->devid = h->devid;
1516 iommu->cap_ptr = h->cap_ptr;
1517 iommu->pci_seg = h->pci_seg;
1518 iommu->mmio_phys = h->mmio_phys;
1520 switch (h->type) {
1521 case 0x10:
1522 /* Check if IVHD EFR contains proper max banks/counters */
1523 if ((h->efr_attr != 0) &&
1524 ((h->efr_attr & (0xF << 13)) != 0) &&
1525 ((h->efr_attr & (0x3F << 17)) != 0))
1526 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1527 else
1528 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1529 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1530 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1531 if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
1532 amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
1533 break;
1534 case 0x11:
1535 case 0x40:
1536 if (h->efr_reg & (1 << 9))
1537 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1538 else
1539 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1540 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1541 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1542 if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
1543 amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
1544 break;
1545 default:
1546 return -EINVAL;
1549 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1550 iommu->mmio_phys_end);
1551 if (!iommu->mmio_base)
1552 return -ENOMEM;
1554 if (alloc_command_buffer(iommu))
1555 return -ENOMEM;
1557 if (alloc_event_buffer(iommu))
1558 return -ENOMEM;
1560 iommu->int_enabled = false;
1562 init_translation_status(iommu);
1563 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1564 iommu_disable(iommu);
1565 clear_translation_pre_enabled(iommu);
1566 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1567 iommu->index);
1569 if (amd_iommu_pre_enabled)
1570 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1572 ret = init_iommu_from_acpi(iommu, h);
1573 if (ret)
1574 return ret;
1576 ret = amd_iommu_create_irq_domain(iommu);
1577 if (ret)
1578 return ret;
1581 * Make sure IOMMU is not considered to translate itself. The IVRS
1582 * table tells us so, but this is a lie!
1584 amd_iommu_rlookup_table[iommu->devid] = NULL;
1586 return 0;
1590 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1591 * @ivrs Pointer to the IVRS header
1593 * This function search through all IVDB of the maximum supported IVHD
1595 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1597 u8 *base = (u8 *)ivrs;
1598 struct ivhd_header *ivhd = (struct ivhd_header *)
1599 (base + IVRS_HEADER_LENGTH);
1600 u8 last_type = ivhd->type;
1601 u16 devid = ivhd->devid;
1603 while (((u8 *)ivhd - base < ivrs->length) &&
1604 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1605 u8 *p = (u8 *) ivhd;
1607 if (ivhd->devid == devid)
1608 last_type = ivhd->type;
1609 ivhd = (struct ivhd_header *)(p + ivhd->length);
1612 return last_type;
1616 * Iterates over all IOMMU entries in the ACPI table, allocates the
1617 * IOMMU structure and initializes it with init_iommu_one()
1619 static int __init init_iommu_all(struct acpi_table_header *table)
1621 u8 *p = (u8 *)table, *end = (u8 *)table;
1622 struct ivhd_header *h;
1623 struct amd_iommu *iommu;
1624 int ret;
1626 end += table->length;
1627 p += IVRS_HEADER_LENGTH;
1629 while (p < end) {
1630 h = (struct ivhd_header *)p;
1631 if (*p == amd_iommu_target_ivhd_type) {
1633 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1634 "seg: %d flags: %01x info %04x\n",
1635 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1636 PCI_FUNC(h->devid), h->cap_ptr,
1637 h->pci_seg, h->flags, h->info);
1638 DUMP_printk(" mmio-addr: %016llx\n",
1639 h->mmio_phys);
1641 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1642 if (iommu == NULL)
1643 return -ENOMEM;
1645 ret = init_iommu_one(iommu, h);
1646 if (ret)
1647 return ret;
1649 p += h->length;
1652 WARN_ON(p != end);
1654 return 0;
1657 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1658 u8 fxn, u64 *value, bool is_write);
1660 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1662 u64 val = 0xabcd, val2 = 0;
1664 if (!iommu_feature(iommu, FEATURE_PC))
1665 return;
1667 amd_iommu_pc_present = true;
1669 /* Check if the performance counters can be written to */
1670 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1671 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1672 (val != val2)) {
1673 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1674 amd_iommu_pc_present = false;
1675 return;
1678 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1680 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1681 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1682 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1685 static ssize_t amd_iommu_show_cap(struct device *dev,
1686 struct device_attribute *attr,
1687 char *buf)
1689 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1690 return sprintf(buf, "%x\n", iommu->cap);
1692 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1694 static ssize_t amd_iommu_show_features(struct device *dev,
1695 struct device_attribute *attr,
1696 char *buf)
1698 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1699 return sprintf(buf, "%llx\n", iommu->features);
1701 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1703 static struct attribute *amd_iommu_attrs[] = {
1704 &dev_attr_cap.attr,
1705 &dev_attr_features.attr,
1706 NULL,
1709 static struct attribute_group amd_iommu_group = {
1710 .name = "amd-iommu",
1711 .attrs = amd_iommu_attrs,
1714 static const struct attribute_group *amd_iommu_groups[] = {
1715 &amd_iommu_group,
1716 NULL,
1719 static int __init iommu_init_pci(struct amd_iommu *iommu)
1721 int cap_ptr = iommu->cap_ptr;
1722 u32 range, misc, low, high;
1723 int ret;
1725 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1726 iommu->devid & 0xff);
1727 if (!iommu->dev)
1728 return -ENODEV;
1730 /* Prevent binding other PCI device drivers to IOMMU devices */
1731 iommu->dev->match_driver = false;
1733 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1734 &iommu->cap);
1735 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1736 &range);
1737 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1738 &misc);
1740 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1741 amd_iommu_iotlb_sup = false;
1743 /* read extended feature bits */
1744 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1745 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1747 iommu->features = ((u64)high << 32) | low;
1749 if (iommu_feature(iommu, FEATURE_GT)) {
1750 int glxval;
1751 u32 max_pasid;
1752 u64 pasmax;
1754 pasmax = iommu->features & FEATURE_PASID_MASK;
1755 pasmax >>= FEATURE_PASID_SHIFT;
1756 max_pasid = (1 << (pasmax + 1)) - 1;
1758 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1760 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1762 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1763 glxval >>= FEATURE_GLXVAL_SHIFT;
1765 if (amd_iommu_max_glx_val == -1)
1766 amd_iommu_max_glx_val = glxval;
1767 else
1768 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1771 if (iommu_feature(iommu, FEATURE_GT) &&
1772 iommu_feature(iommu, FEATURE_PPR)) {
1773 iommu->is_iommu_v2 = true;
1774 amd_iommu_v2_present = true;
1777 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1778 return -ENOMEM;
1780 ret = iommu_init_ga(iommu);
1781 if (ret)
1782 return ret;
1784 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1785 amd_iommu_np_cache = true;
1787 init_iommu_perf_ctr(iommu);
1789 if (is_rd890_iommu(iommu->dev)) {
1790 int i, j;
1792 iommu->root_pdev =
1793 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1794 PCI_DEVFN(0, 0));
1797 * Some rd890 systems may not be fully reconfigured by the
1798 * BIOS, so it's necessary for us to store this information so
1799 * it can be reprogrammed on resume
1801 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1802 &iommu->stored_addr_lo);
1803 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1804 &iommu->stored_addr_hi);
1806 /* Low bit locks writes to configuration space */
1807 iommu->stored_addr_lo &= ~1;
1809 for (i = 0; i < 6; i++)
1810 for (j = 0; j < 0x12; j++)
1811 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1813 for (i = 0; i < 0x83; i++)
1814 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1817 amd_iommu_erratum_746_workaround(iommu);
1818 amd_iommu_ats_write_check_workaround(iommu);
1820 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1821 amd_iommu_groups, "ivhd%d", iommu->index);
1822 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1823 iommu_device_register(&iommu->iommu);
1825 return pci_enable_device(iommu->dev);
1828 static void print_iommu_info(void)
1830 static const char * const feat_str[] = {
1831 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1832 "IA", "GA", "HE", "PC"
1834 struct amd_iommu *iommu;
1836 for_each_iommu(iommu) {
1837 int i;
1839 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1840 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1842 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1843 pr_info("AMD-Vi: Extended features (%#llx):\n",
1844 iommu->features);
1845 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1846 if (iommu_feature(iommu, (1ULL << i)))
1847 pr_cont(" %s", feat_str[i]);
1850 if (iommu->features & FEATURE_GAM_VAPIC)
1851 pr_cont(" GA_vAPIC");
1853 pr_cont("\n");
1856 if (irq_remapping_enabled) {
1857 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1858 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1859 pr_info("AMD-Vi: virtual APIC enabled\n");
1860 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1861 pr_info("AMD-Vi: X2APIC enabled\n");
1865 static int __init amd_iommu_init_pci(void)
1867 struct amd_iommu *iommu;
1868 int ret = 0;
1870 for_each_iommu(iommu) {
1871 ret = iommu_init_pci(iommu);
1872 if (ret)
1873 break;
1877 * Order is important here to make sure any unity map requirements are
1878 * fulfilled. The unity mappings are created and written to the device
1879 * table during the amd_iommu_init_api() call.
1881 * After that we call init_device_table_dma() to make sure any
1882 * uninitialized DTE will block DMA, and in the end we flush the caches
1883 * of all IOMMUs to make sure the changes to the device table are
1884 * active.
1886 ret = amd_iommu_init_api();
1888 init_device_table_dma();
1890 for_each_iommu(iommu)
1891 iommu_flush_all_caches(iommu);
1893 if (!ret)
1894 print_iommu_info();
1896 return ret;
1899 /****************************************************************************
1901 * The following functions initialize the MSI interrupts for all IOMMUs
1902 * in the system. It's a bit challenging because there could be multiple
1903 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1904 * pci_dev.
1906 ****************************************************************************/
1908 static int iommu_setup_msi(struct amd_iommu *iommu)
1910 int r;
1912 r = pci_enable_msi(iommu->dev);
1913 if (r)
1914 return r;
1916 r = request_threaded_irq(iommu->dev->irq,
1917 amd_iommu_int_handler,
1918 amd_iommu_int_thread,
1919 0, "AMD-Vi",
1920 iommu);
1922 if (r) {
1923 pci_disable_msi(iommu->dev);
1924 return r;
1927 iommu->int_enabled = true;
1929 return 0;
1932 static int iommu_init_msi(struct amd_iommu *iommu)
1934 int ret;
1936 if (iommu->int_enabled)
1937 goto enable_faults;
1939 if (iommu->dev->msi_cap)
1940 ret = iommu_setup_msi(iommu);
1941 else
1942 ret = -ENODEV;
1944 if (ret)
1945 return ret;
1947 enable_faults:
1948 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1950 if (iommu->ppr_log != NULL)
1951 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1953 iommu_ga_log_enable(iommu);
1955 return 0;
1958 /****************************************************************************
1960 * The next functions belong to the third pass of parsing the ACPI
1961 * table. In this last pass the memory mapping requirements are
1962 * gathered (like exclusion and unity mapping ranges).
1964 ****************************************************************************/
1966 static void __init free_unity_maps(void)
1968 struct unity_map_entry *entry, *next;
1970 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1971 list_del(&entry->list);
1972 kfree(entry);
1976 /* called when we find an exclusion range definition in ACPI */
1977 static int __init init_exclusion_range(struct ivmd_header *m)
1979 int i;
1981 switch (m->type) {
1982 case ACPI_IVMD_TYPE:
1983 set_device_exclusion_range(m->devid, m);
1984 break;
1985 case ACPI_IVMD_TYPE_ALL:
1986 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1987 set_device_exclusion_range(i, m);
1988 break;
1989 case ACPI_IVMD_TYPE_RANGE:
1990 for (i = m->devid; i <= m->aux; ++i)
1991 set_device_exclusion_range(i, m);
1992 break;
1993 default:
1994 break;
1997 return 0;
2000 /* called for unity map ACPI definition */
2001 static int __init init_unity_map_range(struct ivmd_header *m)
2003 struct unity_map_entry *e = NULL;
2004 char *s;
2006 e = kzalloc(sizeof(*e), GFP_KERNEL);
2007 if (e == NULL)
2008 return -ENOMEM;
2010 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2011 init_exclusion_range(m);
2013 switch (m->type) {
2014 default:
2015 kfree(e);
2016 return 0;
2017 case ACPI_IVMD_TYPE:
2018 s = "IVMD_TYPEi\t\t\t";
2019 e->devid_start = e->devid_end = m->devid;
2020 break;
2021 case ACPI_IVMD_TYPE_ALL:
2022 s = "IVMD_TYPE_ALL\t\t";
2023 e->devid_start = 0;
2024 e->devid_end = amd_iommu_last_bdf;
2025 break;
2026 case ACPI_IVMD_TYPE_RANGE:
2027 s = "IVMD_TYPE_RANGE\t\t";
2028 e->devid_start = m->devid;
2029 e->devid_end = m->aux;
2030 break;
2032 e->address_start = PAGE_ALIGN(m->range_start);
2033 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2034 e->prot = m->flags >> 1;
2036 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2037 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2038 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2039 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2040 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2041 e->address_start, e->address_end, m->flags);
2043 list_add_tail(&e->list, &amd_iommu_unity_map);
2045 return 0;
2048 /* iterates over all memory definitions we find in the ACPI table */
2049 static int __init init_memory_definitions(struct acpi_table_header *table)
2051 u8 *p = (u8 *)table, *end = (u8 *)table;
2052 struct ivmd_header *m;
2054 end += table->length;
2055 p += IVRS_HEADER_LENGTH;
2057 while (p < end) {
2058 m = (struct ivmd_header *)p;
2059 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2060 init_unity_map_range(m);
2062 p += m->length;
2065 return 0;
2069 * Init the device table to not allow DMA access for devices
2071 static void init_device_table_dma(void)
2073 u32 devid;
2075 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2076 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2077 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2081 static void __init uninit_device_table_dma(void)
2083 u32 devid;
2085 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2086 amd_iommu_dev_table[devid].data[0] = 0ULL;
2087 amd_iommu_dev_table[devid].data[1] = 0ULL;
2091 static void init_device_table(void)
2093 u32 devid;
2095 if (!amd_iommu_irq_remap)
2096 return;
2098 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2099 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2102 static void iommu_init_flags(struct amd_iommu *iommu)
2104 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2105 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2106 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2108 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2109 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2110 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2112 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2113 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2114 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2116 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2117 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2118 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2121 * make IOMMU memory accesses cache coherent
2123 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2125 /* Set IOTLB invalidation timeout to 1s */
2126 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2129 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2131 int i, j;
2132 u32 ioc_feature_control;
2133 struct pci_dev *pdev = iommu->root_pdev;
2135 /* RD890 BIOSes may not have completely reconfigured the iommu */
2136 if (!is_rd890_iommu(iommu->dev) || !pdev)
2137 return;
2140 * First, we need to ensure that the iommu is enabled. This is
2141 * controlled by a register in the northbridge
2144 /* Select Northbridge indirect register 0x75 and enable writing */
2145 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2146 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2148 /* Enable the iommu */
2149 if (!(ioc_feature_control & 0x1))
2150 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2152 /* Restore the iommu BAR */
2153 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2154 iommu->stored_addr_lo);
2155 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2156 iommu->stored_addr_hi);
2158 /* Restore the l1 indirect regs for each of the 6 l1s */
2159 for (i = 0; i < 6; i++)
2160 for (j = 0; j < 0x12; j++)
2161 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2163 /* Restore the l2 indirect regs */
2164 for (i = 0; i < 0x83; i++)
2165 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2167 /* Lock PCI setup registers */
2168 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2169 iommu->stored_addr_lo | 1);
2172 static void iommu_enable_ga(struct amd_iommu *iommu)
2174 #ifdef CONFIG_IRQ_REMAP
2175 switch (amd_iommu_guest_ir) {
2176 case AMD_IOMMU_GUEST_IR_VAPIC:
2177 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2178 /* Fall through */
2179 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2180 iommu_feature_enable(iommu, CONTROL_GA_EN);
2181 iommu->irte_ops = &irte_128_ops;
2182 break;
2183 default:
2184 iommu->irte_ops = &irte_32_ops;
2185 break;
2187 #endif
2190 static void early_enable_iommu(struct amd_iommu *iommu)
2192 iommu_disable(iommu);
2193 iommu_init_flags(iommu);
2194 iommu_set_device_table(iommu);
2195 iommu_enable_command_buffer(iommu);
2196 iommu_enable_event_buffer(iommu);
2197 iommu_set_exclusion_range(iommu);
2198 iommu_enable_ga(iommu);
2199 iommu_enable_xt(iommu);
2200 iommu_enable(iommu);
2201 iommu_flush_all_caches(iommu);
2205 * This function finally enables all IOMMUs found in the system after
2206 * they have been initialized.
2208 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2209 * the old content of device table entries. Not this case or copy failed,
2210 * just continue as normal kernel does.
2212 static void early_enable_iommus(void)
2214 struct amd_iommu *iommu;
2217 if (!copy_device_table()) {
2219 * If come here because of failure in copying device table from old
2220 * kernel with all IOMMUs enabled, print error message and try to
2221 * free allocated old_dev_tbl_cpy.
2223 if (amd_iommu_pre_enabled)
2224 pr_err("Failed to copy DEV table from previous kernel.\n");
2225 if (old_dev_tbl_cpy != NULL)
2226 free_pages((unsigned long)old_dev_tbl_cpy,
2227 get_order(dev_table_size));
2229 for_each_iommu(iommu) {
2230 clear_translation_pre_enabled(iommu);
2231 early_enable_iommu(iommu);
2233 } else {
2234 pr_info("Copied DEV table from previous kernel.\n");
2235 free_pages((unsigned long)amd_iommu_dev_table,
2236 get_order(dev_table_size));
2237 amd_iommu_dev_table = old_dev_tbl_cpy;
2238 for_each_iommu(iommu) {
2239 iommu_disable_command_buffer(iommu);
2240 iommu_disable_event_buffer(iommu);
2241 iommu_enable_command_buffer(iommu);
2242 iommu_enable_event_buffer(iommu);
2243 iommu_enable_ga(iommu);
2244 iommu_enable_xt(iommu);
2245 iommu_set_device_table(iommu);
2246 iommu_flush_all_caches(iommu);
2250 #ifdef CONFIG_IRQ_REMAP
2251 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2252 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2253 #endif
2256 static void enable_iommus_v2(void)
2258 struct amd_iommu *iommu;
2260 for_each_iommu(iommu) {
2261 iommu_enable_ppr_log(iommu);
2262 iommu_enable_gt(iommu);
2266 static void enable_iommus(void)
2268 early_enable_iommus();
2270 enable_iommus_v2();
2273 static void disable_iommus(void)
2275 struct amd_iommu *iommu;
2277 for_each_iommu(iommu)
2278 iommu_disable(iommu);
2280 #ifdef CONFIG_IRQ_REMAP
2281 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2282 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2283 #endif
2287 * Suspend/Resume support
2288 * disable suspend until real resume implemented
2291 static void amd_iommu_resume(void)
2293 struct amd_iommu *iommu;
2295 for_each_iommu(iommu)
2296 iommu_apply_resume_quirks(iommu);
2298 /* re-load the hardware */
2299 enable_iommus();
2301 amd_iommu_enable_interrupts();
2304 static int amd_iommu_suspend(void)
2306 /* disable IOMMUs to go out of the way for BIOS */
2307 disable_iommus();
2309 return 0;
2312 static struct syscore_ops amd_iommu_syscore_ops = {
2313 .suspend = amd_iommu_suspend,
2314 .resume = amd_iommu_resume,
2317 static void __init free_iommu_resources(void)
2319 kmemleak_free(irq_lookup_table);
2320 free_pages((unsigned long)irq_lookup_table,
2321 get_order(rlookup_table_size));
2322 irq_lookup_table = NULL;
2324 kmem_cache_destroy(amd_iommu_irq_cache);
2325 amd_iommu_irq_cache = NULL;
2327 free_pages((unsigned long)amd_iommu_rlookup_table,
2328 get_order(rlookup_table_size));
2329 amd_iommu_rlookup_table = NULL;
2331 free_pages((unsigned long)amd_iommu_alias_table,
2332 get_order(alias_table_size));
2333 amd_iommu_alias_table = NULL;
2335 free_pages((unsigned long)amd_iommu_dev_table,
2336 get_order(dev_table_size));
2337 amd_iommu_dev_table = NULL;
2339 free_iommu_all();
2341 #ifdef CONFIG_GART_IOMMU
2343 * We failed to initialize the AMD IOMMU - try fallback to GART
2344 * if possible.
2346 gart_iommu_init();
2348 #endif
2351 /* SB IOAPIC is always on this device in AMD systems */
2352 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2354 static bool __init check_ioapic_information(void)
2356 const char *fw_bug = FW_BUG;
2357 bool ret, has_sb_ioapic;
2358 int idx;
2360 has_sb_ioapic = false;
2361 ret = false;
2364 * If we have map overrides on the kernel command line the
2365 * messages in this function might not describe firmware bugs
2366 * anymore - so be careful
2368 if (cmdline_maps)
2369 fw_bug = "";
2371 for (idx = 0; idx < nr_ioapics; idx++) {
2372 int devid, id = mpc_ioapic_id(idx);
2374 devid = get_ioapic_devid(id);
2375 if (devid < 0) {
2376 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
2377 fw_bug, id);
2378 ret = false;
2379 } else if (devid == IOAPIC_SB_DEVID) {
2380 has_sb_ioapic = true;
2381 ret = true;
2385 if (!has_sb_ioapic) {
2387 * We expect the SB IOAPIC to be listed in the IVRS
2388 * table. The system timer is connected to the SB IOAPIC
2389 * and if we don't have it in the list the system will
2390 * panic at boot time. This situation usually happens
2391 * when the BIOS is buggy and provides us the wrong
2392 * device id for the IOAPIC in the system.
2394 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
2397 if (!ret)
2398 pr_err("AMD-Vi: Disabling interrupt remapping\n");
2400 return ret;
2403 static void __init free_dma_resources(void)
2405 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2406 get_order(MAX_DOMAIN_ID/8));
2407 amd_iommu_pd_alloc_bitmap = NULL;
2409 free_unity_maps();
2413 * This is the hardware init function for AMD IOMMU in the system.
2414 * This function is called either from amd_iommu_init or from the interrupt
2415 * remapping setup code.
2417 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2418 * four times:
2420 * 1 pass) Discover the most comprehensive IVHD type to use.
2422 * 2 pass) Find the highest PCI device id the driver has to handle.
2423 * Upon this information the size of the data structures is
2424 * determined that needs to be allocated.
2426 * 3 pass) Initialize the data structures just allocated with the
2427 * information in the ACPI table about available AMD IOMMUs
2428 * in the system. It also maps the PCI devices in the
2429 * system to specific IOMMUs
2431 * 4 pass) After the basic data structures are allocated and
2432 * initialized we update them with information about memory
2433 * remapping requirements parsed out of the ACPI table in
2434 * this last pass.
2436 * After everything is set up the IOMMUs are enabled and the necessary
2437 * hotplug and suspend notifiers are registered.
2439 static int __init early_amd_iommu_init(void)
2441 struct acpi_table_header *ivrs_base;
2442 acpi_status status;
2443 int i, remap_cache_sz, ret = 0;
2445 if (!amd_iommu_detected)
2446 return -ENODEV;
2448 status = acpi_get_table("IVRS", 0, &ivrs_base);
2449 if (status == AE_NOT_FOUND)
2450 return -ENODEV;
2451 else if (ACPI_FAILURE(status)) {
2452 const char *err = acpi_format_exception(status);
2453 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2454 return -EINVAL;
2458 * Validate checksum here so we don't need to do it when
2459 * we actually parse the table
2461 ret = check_ivrs_checksum(ivrs_base);
2462 if (ret)
2463 goto out;
2465 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2466 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2469 * First parse ACPI tables to find the largest Bus/Dev/Func
2470 * we need to handle. Upon this information the shared data
2471 * structures for the IOMMUs in the system will be allocated
2473 ret = find_last_devid_acpi(ivrs_base);
2474 if (ret)
2475 goto out;
2477 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2478 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2479 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2481 /* Device table - directly used by all IOMMUs */
2482 ret = -ENOMEM;
2483 amd_iommu_dev_table = (void *)__get_free_pages(
2484 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2485 get_order(dev_table_size));
2486 if (amd_iommu_dev_table == NULL)
2487 goto out;
2490 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2491 * IOMMU see for that device
2493 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2494 get_order(alias_table_size));
2495 if (amd_iommu_alias_table == NULL)
2496 goto out;
2498 /* IOMMU rlookup table - find the IOMMU for a specific device */
2499 amd_iommu_rlookup_table = (void *)__get_free_pages(
2500 GFP_KERNEL | __GFP_ZERO,
2501 get_order(rlookup_table_size));
2502 if (amd_iommu_rlookup_table == NULL)
2503 goto out;
2505 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2506 GFP_KERNEL | __GFP_ZERO,
2507 get_order(MAX_DOMAIN_ID/8));
2508 if (amd_iommu_pd_alloc_bitmap == NULL)
2509 goto out;
2512 * let all alias entries point to itself
2514 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2515 amd_iommu_alias_table[i] = i;
2518 * never allocate domain 0 because its used as the non-allocated and
2519 * error value placeholder
2521 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2523 spin_lock_init(&amd_iommu_pd_lock);
2526 * now the data structures are allocated and basically initialized
2527 * start the real acpi table scan
2529 ret = init_iommu_all(ivrs_base);
2530 if (ret)
2531 goto out;
2533 /* Disable any previously enabled IOMMUs */
2534 if (!is_kdump_kernel() || amd_iommu_disabled)
2535 disable_iommus();
2537 if (amd_iommu_irq_remap)
2538 amd_iommu_irq_remap = check_ioapic_information();
2540 if (amd_iommu_irq_remap) {
2542 * Interrupt remapping enabled, create kmem_cache for the
2543 * remapping tables.
2545 ret = -ENOMEM;
2546 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2547 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2548 else
2549 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2550 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2551 remap_cache_sz,
2552 IRQ_TABLE_ALIGNMENT,
2553 0, NULL);
2554 if (!amd_iommu_irq_cache)
2555 goto out;
2557 irq_lookup_table = (void *)__get_free_pages(
2558 GFP_KERNEL | __GFP_ZERO,
2559 get_order(rlookup_table_size));
2560 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2561 1, GFP_KERNEL);
2562 if (!irq_lookup_table)
2563 goto out;
2566 ret = init_memory_definitions(ivrs_base);
2567 if (ret)
2568 goto out;
2570 /* init the device table */
2571 init_device_table();
2573 out:
2574 /* Don't leak any ACPI memory */
2575 acpi_put_table(ivrs_base);
2576 ivrs_base = NULL;
2578 return ret;
2581 static int amd_iommu_enable_interrupts(void)
2583 struct amd_iommu *iommu;
2584 int ret = 0;
2586 for_each_iommu(iommu) {
2587 ret = iommu_init_msi(iommu);
2588 if (ret)
2589 goto out;
2592 out:
2593 return ret;
2596 static bool detect_ivrs(void)
2598 struct acpi_table_header *ivrs_base;
2599 acpi_status status;
2601 status = acpi_get_table("IVRS", 0, &ivrs_base);
2602 if (status == AE_NOT_FOUND)
2603 return false;
2604 else if (ACPI_FAILURE(status)) {
2605 const char *err = acpi_format_exception(status);
2606 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2607 return false;
2610 acpi_put_table(ivrs_base);
2612 /* Make sure ACS will be enabled during PCI probe */
2613 pci_request_acs();
2615 return true;
2618 /****************************************************************************
2620 * AMD IOMMU Initialization State Machine
2622 ****************************************************************************/
2624 static int __init state_next(void)
2626 int ret = 0;
2628 switch (init_state) {
2629 case IOMMU_START_STATE:
2630 if (!detect_ivrs()) {
2631 init_state = IOMMU_NOT_FOUND;
2632 ret = -ENODEV;
2633 } else {
2634 init_state = IOMMU_IVRS_DETECTED;
2636 break;
2637 case IOMMU_IVRS_DETECTED:
2638 ret = early_amd_iommu_init();
2639 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2640 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2641 pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n");
2642 free_dma_resources();
2643 free_iommu_resources();
2644 init_state = IOMMU_CMDLINE_DISABLED;
2645 ret = -EINVAL;
2647 break;
2648 case IOMMU_ACPI_FINISHED:
2649 early_enable_iommus();
2650 x86_platform.iommu_shutdown = disable_iommus;
2651 init_state = IOMMU_ENABLED;
2652 break;
2653 case IOMMU_ENABLED:
2654 register_syscore_ops(&amd_iommu_syscore_ops);
2655 ret = amd_iommu_init_pci();
2656 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2657 enable_iommus_v2();
2658 break;
2659 case IOMMU_PCI_INIT:
2660 ret = amd_iommu_enable_interrupts();
2661 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2662 break;
2663 case IOMMU_INTERRUPTS_EN:
2664 ret = amd_iommu_init_dma_ops();
2665 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2666 break;
2667 case IOMMU_DMA_OPS:
2668 init_state = IOMMU_INITIALIZED;
2669 break;
2670 case IOMMU_INITIALIZED:
2671 /* Nothing to do */
2672 break;
2673 case IOMMU_NOT_FOUND:
2674 case IOMMU_INIT_ERROR:
2675 case IOMMU_CMDLINE_DISABLED:
2676 /* Error states => do nothing */
2677 ret = -EINVAL;
2678 break;
2679 default:
2680 /* Unknown state */
2681 BUG();
2684 return ret;
2687 static int __init iommu_go_to_state(enum iommu_init_state state)
2689 int ret = -EINVAL;
2691 while (init_state != state) {
2692 if (init_state == IOMMU_NOT_FOUND ||
2693 init_state == IOMMU_INIT_ERROR ||
2694 init_state == IOMMU_CMDLINE_DISABLED)
2695 break;
2696 ret = state_next();
2699 return ret;
2702 #ifdef CONFIG_IRQ_REMAP
2703 int __init amd_iommu_prepare(void)
2705 int ret;
2707 amd_iommu_irq_remap = true;
2709 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2710 if (ret)
2711 return ret;
2712 return amd_iommu_irq_remap ? 0 : -ENODEV;
2715 int __init amd_iommu_enable(void)
2717 int ret;
2719 ret = iommu_go_to_state(IOMMU_ENABLED);
2720 if (ret)
2721 return ret;
2723 irq_remapping_enabled = 1;
2724 return amd_iommu_xt_mode;
2727 void amd_iommu_disable(void)
2729 amd_iommu_suspend();
2732 int amd_iommu_reenable(int mode)
2734 amd_iommu_resume();
2736 return 0;
2739 int __init amd_iommu_enable_faulting(void)
2741 /* We enable MSI later when PCI is initialized */
2742 return 0;
2744 #endif
2747 * This is the core init function for AMD IOMMU hardware in the system.
2748 * This function is called from the generic x86 DMA layer initialization
2749 * code.
2751 static int __init amd_iommu_init(void)
2753 struct amd_iommu *iommu;
2754 int ret;
2756 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2757 if (ret) {
2758 free_dma_resources();
2759 if (!irq_remapping_enabled) {
2760 disable_iommus();
2761 free_iommu_resources();
2762 } else {
2763 uninit_device_table_dma();
2764 for_each_iommu(iommu)
2765 iommu_flush_all_caches(iommu);
2769 for_each_iommu(iommu)
2770 amd_iommu_debugfs_setup(iommu);
2772 return ret;
2775 static bool amd_iommu_sme_check(void)
2777 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2778 return true;
2780 /* For Fam17h, a specific level of support is required */
2781 if (boot_cpu_data.microcode >= 0x08001205)
2782 return true;
2784 if ((boot_cpu_data.microcode >= 0x08001126) &&
2785 (boot_cpu_data.microcode <= 0x080011ff))
2786 return true;
2788 pr_notice("AMD-Vi: IOMMU not currently supported when SME is active\n");
2790 return false;
2793 /****************************************************************************
2795 * Early detect code. This code runs at IOMMU detection time in the DMA
2796 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2797 * IOMMUs
2799 ****************************************************************************/
2800 int __init amd_iommu_detect(void)
2802 int ret;
2804 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2805 return -ENODEV;
2807 if (!amd_iommu_sme_check())
2808 return -ENODEV;
2810 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2811 if (ret)
2812 return ret;
2814 amd_iommu_detected = true;
2815 iommu_detected = 1;
2816 x86_init.iommu.iommu_init = amd_iommu_init;
2818 return 1;
2821 /****************************************************************************
2823 * Parsing functions for the AMD IOMMU specific kernel command line
2824 * options.
2826 ****************************************************************************/
2828 static int __init parse_amd_iommu_dump(char *str)
2830 amd_iommu_dump = true;
2832 return 1;
2835 static int __init parse_amd_iommu_intr(char *str)
2837 for (; *str; ++str) {
2838 if (strncmp(str, "legacy", 6) == 0) {
2839 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2840 break;
2842 if (strncmp(str, "vapic", 5) == 0) {
2843 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2844 break;
2847 return 1;
2850 static int __init parse_amd_iommu_options(char *str)
2852 for (; *str; ++str) {
2853 if (strncmp(str, "fullflush", 9) == 0)
2854 amd_iommu_unmap_flush = true;
2855 if (strncmp(str, "off", 3) == 0)
2856 amd_iommu_disabled = true;
2857 if (strncmp(str, "force_isolation", 15) == 0)
2858 amd_iommu_force_isolation = true;
2861 return 1;
2864 static int __init parse_ivrs_ioapic(char *str)
2866 unsigned int bus, dev, fn;
2867 int ret, id, i;
2868 u16 devid;
2870 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2872 if (ret != 4) {
2873 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2874 return 1;
2877 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2878 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2879 str);
2880 return 1;
2883 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2885 cmdline_maps = true;
2886 i = early_ioapic_map_size++;
2887 early_ioapic_map[i].id = id;
2888 early_ioapic_map[i].devid = devid;
2889 early_ioapic_map[i].cmd_line = true;
2891 return 1;
2894 static int __init parse_ivrs_hpet(char *str)
2896 unsigned int bus, dev, fn;
2897 int ret, id, i;
2898 u16 devid;
2900 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2902 if (ret != 4) {
2903 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2904 return 1;
2907 if (early_hpet_map_size == EARLY_MAP_SIZE) {
2908 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2909 str);
2910 return 1;
2913 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2915 cmdline_maps = true;
2916 i = early_hpet_map_size++;
2917 early_hpet_map[i].id = id;
2918 early_hpet_map[i].devid = devid;
2919 early_hpet_map[i].cmd_line = true;
2921 return 1;
2924 static int __init parse_ivrs_acpihid(char *str)
2926 u32 bus, dev, fn;
2927 char *hid, *uid, *p;
2928 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
2929 int ret, i;
2931 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
2932 if (ret != 4) {
2933 pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
2934 return 1;
2937 p = acpiid;
2938 hid = strsep(&p, ":");
2939 uid = p;
2941 if (!hid || !(*hid) || !uid) {
2942 pr_err("AMD-Vi: Invalid command line: hid or uid\n");
2943 return 1;
2946 i = early_acpihid_map_size++;
2947 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
2948 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
2949 early_acpihid_map[i].devid =
2950 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2951 early_acpihid_map[i].cmd_line = true;
2953 return 1;
2956 __setup("amd_iommu_dump", parse_amd_iommu_dump);
2957 __setup("amd_iommu=", parse_amd_iommu_options);
2958 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
2959 __setup("ivrs_ioapic", parse_ivrs_ioapic);
2960 __setup("ivrs_hpet", parse_ivrs_hpet);
2961 __setup("ivrs_acpihid", parse_ivrs_acpihid);
2963 IOMMU_INIT_FINISH(amd_iommu_detect,
2964 gart_iommu_hole_init,
2965 NULL,
2966 NULL);
2968 bool amd_iommu_v2_supported(void)
2970 return amd_iommu_v2_present;
2972 EXPORT_SYMBOL(amd_iommu_v2_supported);
2974 struct amd_iommu *get_amd_iommu(unsigned int idx)
2976 unsigned int i = 0;
2977 struct amd_iommu *iommu;
2979 for_each_iommu(iommu)
2980 if (i++ == idx)
2981 return iommu;
2982 return NULL;
2984 EXPORT_SYMBOL(get_amd_iommu);
2986 /****************************************************************************
2988 * IOMMU EFR Performance Counter support functionality. This code allows
2989 * access to the IOMMU PC functionality.
2991 ****************************************************************************/
2993 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
2995 struct amd_iommu *iommu = get_amd_iommu(idx);
2997 if (iommu)
2998 return iommu->max_banks;
3000 return 0;
3002 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3004 bool amd_iommu_pc_supported(void)
3006 return amd_iommu_pc_present;
3008 EXPORT_SYMBOL(amd_iommu_pc_supported);
3010 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3012 struct amd_iommu *iommu = get_amd_iommu(idx);
3014 if (iommu)
3015 return iommu->max_counters;
3017 return 0;
3019 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3021 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3022 u8 fxn, u64 *value, bool is_write)
3024 u32 offset;
3025 u32 max_offset_lim;
3027 /* Make sure the IOMMU PC resource is available */
3028 if (!amd_iommu_pc_present)
3029 return -ENODEV;
3031 /* Check for valid iommu and pc register indexing */
3032 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3033 return -ENODEV;
3035 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3037 /* Limit the offset to the hw defined mmio region aperture */
3038 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3039 (iommu->max_counters << 8) | 0x28);
3040 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3041 (offset > max_offset_lim))
3042 return -EINVAL;
3044 if (is_write) {
3045 u64 val = *value & GENMASK_ULL(47, 0);
3047 writel((u32)val, iommu->mmio_base + offset);
3048 writel((val >> 32), iommu->mmio_base + offset + 4);
3049 } else {
3050 *value = readl(iommu->mmio_base + offset + 4);
3051 *value <<= 32;
3052 *value |= readl(iommu->mmio_base + offset);
3053 *value &= GENMASK_ULL(47, 0);
3056 return 0;
3059 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3061 if (!iommu)
3062 return -EINVAL;
3064 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3066 EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3068 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3070 if (!iommu)
3071 return -EINVAL;
3073 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3075 EXPORT_SYMBOL(amd_iommu_pc_set_reg);