2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/bitmap.h>
24 #include <linux/slab.h>
25 #include <linux/syscore_ops.h>
26 #include <linux/interrupt.h>
27 #include <linux/msi.h>
28 #include <linux/amd-iommu.h>
29 #include <linux/export.h>
30 #include <linux/iommu.h>
31 #include <linux/kmemleak.h>
32 #include <linux/mem_encrypt.h>
33 #include <asm/pci-direct.h>
34 #include <asm/iommu.h>
36 #include <asm/x86_init.h>
37 #include <asm/iommu_table.h>
38 #include <asm/io_apic.h>
39 #include <asm/irq_remapping.h>
41 #include <linux/crash_dump.h>
42 #include "amd_iommu_proto.h"
43 #include "amd_iommu_types.h"
44 #include "irq_remapping.h"
47 * definitions for the ACPI scanning code
49 #define IVRS_HEADER_LENGTH 48
51 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
52 #define ACPI_IVMD_TYPE_ALL 0x20
53 #define ACPI_IVMD_TYPE 0x21
54 #define ACPI_IVMD_TYPE_RANGE 0x22
56 #define IVHD_DEV_ALL 0x01
57 #define IVHD_DEV_SELECT 0x02
58 #define IVHD_DEV_SELECT_RANGE_START 0x03
59 #define IVHD_DEV_RANGE_END 0x04
60 #define IVHD_DEV_ALIAS 0x42
61 #define IVHD_DEV_ALIAS_RANGE 0x43
62 #define IVHD_DEV_EXT_SELECT 0x46
63 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
64 #define IVHD_DEV_SPECIAL 0x48
65 #define IVHD_DEV_ACPI_HID 0xf0
67 #define UID_NOT_PRESENT 0
68 #define UID_IS_INTEGER 1
69 #define UID_IS_CHARACTER 2
71 #define IVHD_SPECIAL_IOAPIC 1
72 #define IVHD_SPECIAL_HPET 2
74 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
75 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
76 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
77 #define IVHD_FLAG_ISOC_EN_MASK 0x08
79 #define IVMD_FLAG_EXCL_RANGE 0x08
80 #define IVMD_FLAG_UNITY_MAP 0x01
82 #define ACPI_DEVFLAG_INITPASS 0x01
83 #define ACPI_DEVFLAG_EXTINT 0x02
84 #define ACPI_DEVFLAG_NMI 0x04
85 #define ACPI_DEVFLAG_SYSMGT1 0x10
86 #define ACPI_DEVFLAG_SYSMGT2 0x20
87 #define ACPI_DEVFLAG_LINT0 0x40
88 #define ACPI_DEVFLAG_LINT1 0x80
89 #define ACPI_DEVFLAG_ATSDIS 0x10000000
91 #define LOOP_TIMEOUT 100000
93 * ACPI table definitions
95 * These data structures are laid over the table to parse the important values
99 extern const struct iommu_ops amd_iommu_ops
;
102 * structure describing one IOMMU in the ACPI table. Typically followed by one
103 * or more ivhd_entrys.
116 /* Following only valid on IVHD type 11h and 40h */
117 u64 efr_reg
; /* Exact copy of MMIO_EXT_FEATURES */
119 } __attribute__((packed
));
122 * A device entry describing which devices a specific IOMMU translates and
123 * which requestor ids they use.
135 } __attribute__((packed
));
138 * An AMD IOMMU memory definition structure. It defines things like exclusion
139 * ranges for devices and regions that should be unity mapped.
150 } __attribute__((packed
));
153 bool amd_iommu_irq_remap __read_mostly
;
155 int amd_iommu_guest_ir
= AMD_IOMMU_GUEST_IR_VAPIC
;
156 static int amd_iommu_xt_mode
= IRQ_REMAP_X2APIC_MODE
;
158 static bool amd_iommu_detected
;
159 static bool __initdata amd_iommu_disabled
;
160 static int amd_iommu_target_ivhd_type
;
162 u16 amd_iommu_last_bdf
; /* largest PCI device id we have
164 LIST_HEAD(amd_iommu_unity_map
); /* a list of required unity mappings
166 bool amd_iommu_unmap_flush
; /* if true, flush on every unmap */
168 LIST_HEAD(amd_iommu_list
); /* list of all AMD IOMMUs in the
171 /* Array to assign indices to IOMMUs*/
172 struct amd_iommu
*amd_iommus
[MAX_IOMMUS
];
174 /* Number of IOMMUs present in the system */
175 static int amd_iommus_present
;
177 /* IOMMUs have a non-present cache? */
178 bool amd_iommu_np_cache __read_mostly
;
179 bool amd_iommu_iotlb_sup __read_mostly
= true;
181 u32 amd_iommu_max_pasid __read_mostly
= ~0;
183 bool amd_iommu_v2_present __read_mostly
;
184 static bool amd_iommu_pc_present __read_mostly
;
186 bool amd_iommu_force_isolation __read_mostly
;
189 * List of protection domains - used during resume
191 LIST_HEAD(amd_iommu_pd_list
);
192 spinlock_t amd_iommu_pd_lock
;
195 * Pointer to the device table which is shared by all AMD IOMMUs
196 * it is indexed by the PCI device id or the HT unit id and contains
197 * information about the domain the device belongs to as well as the
198 * page table root pointer.
200 struct dev_table_entry
*amd_iommu_dev_table
;
202 * Pointer to a device table which the content of old device table
203 * will be copied to. It's only be used in kdump kernel.
205 static struct dev_table_entry
*old_dev_tbl_cpy
;
208 * The alias table is a driver specific data structure which contains the
209 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
210 * More than one device can share the same requestor id.
212 u16
*amd_iommu_alias_table
;
215 * The rlookup table is used to find the IOMMU which is responsible
216 * for a specific device. It is also indexed by the PCI device id.
218 struct amd_iommu
**amd_iommu_rlookup_table
;
219 EXPORT_SYMBOL(amd_iommu_rlookup_table
);
222 * This table is used to find the irq remapping table for a given device id
225 struct irq_remap_table
**irq_lookup_table
;
228 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
229 * to know which ones are already in use.
231 unsigned long *amd_iommu_pd_alloc_bitmap
;
233 static u32 dev_table_size
; /* size of the device table */
234 static u32 alias_table_size
; /* size of the alias table */
235 static u32 rlookup_table_size
; /* size if the rlookup table */
237 enum iommu_init_state
{
248 IOMMU_CMDLINE_DISABLED
,
251 /* Early ioapic and hpet maps from kernel command line */
252 #define EARLY_MAP_SIZE 4
253 static struct devid_map __initdata early_ioapic_map
[EARLY_MAP_SIZE
];
254 static struct devid_map __initdata early_hpet_map
[EARLY_MAP_SIZE
];
255 static struct acpihid_map_entry __initdata early_acpihid_map
[EARLY_MAP_SIZE
];
257 static int __initdata early_ioapic_map_size
;
258 static int __initdata early_hpet_map_size
;
259 static int __initdata early_acpihid_map_size
;
261 static bool __initdata cmdline_maps
;
263 static enum iommu_init_state init_state
= IOMMU_START_STATE
;
265 static int amd_iommu_enable_interrupts(void);
266 static int __init
iommu_go_to_state(enum iommu_init_state state
);
267 static void init_device_table_dma(void);
269 static bool amd_iommu_pre_enabled
= true;
271 bool translation_pre_enabled(struct amd_iommu
*iommu
)
273 return (iommu
->flags
& AMD_IOMMU_FLAG_TRANS_PRE_ENABLED
);
275 EXPORT_SYMBOL(translation_pre_enabled
);
277 static void clear_translation_pre_enabled(struct amd_iommu
*iommu
)
279 iommu
->flags
&= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED
;
282 static void init_translation_status(struct amd_iommu
*iommu
)
286 ctrl
= readq(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
287 if (ctrl
& (1<<CONTROL_IOMMU_EN
))
288 iommu
->flags
|= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED
;
291 static inline void update_last_devid(u16 devid
)
293 if (devid
> amd_iommu_last_bdf
)
294 amd_iommu_last_bdf
= devid
;
297 static inline unsigned long tbl_size(int entry_size
)
299 unsigned shift
= PAGE_SHIFT
+
300 get_order(((int)amd_iommu_last_bdf
+ 1) * entry_size
);
305 int amd_iommu_get_num_iommus(void)
307 return amd_iommus_present
;
310 /* Access to l1 and l2 indexed register spaces */
312 static u32
iommu_read_l1(struct amd_iommu
*iommu
, u16 l1
, u8 address
)
316 pci_write_config_dword(iommu
->dev
, 0xf8, (address
| l1
<< 16));
317 pci_read_config_dword(iommu
->dev
, 0xfc, &val
);
321 static void iommu_write_l1(struct amd_iommu
*iommu
, u16 l1
, u8 address
, u32 val
)
323 pci_write_config_dword(iommu
->dev
, 0xf8, (address
| l1
<< 16 | 1 << 31));
324 pci_write_config_dword(iommu
->dev
, 0xfc, val
);
325 pci_write_config_dword(iommu
->dev
, 0xf8, (address
| l1
<< 16));
328 static u32
iommu_read_l2(struct amd_iommu
*iommu
, u8 address
)
332 pci_write_config_dword(iommu
->dev
, 0xf0, address
);
333 pci_read_config_dword(iommu
->dev
, 0xf4, &val
);
337 static void iommu_write_l2(struct amd_iommu
*iommu
, u8 address
, u32 val
)
339 pci_write_config_dword(iommu
->dev
, 0xf0, (address
| 1 << 8));
340 pci_write_config_dword(iommu
->dev
, 0xf4, val
);
343 /****************************************************************************
345 * AMD IOMMU MMIO register space handling functions
347 * These functions are used to program the IOMMU device registers in
348 * MMIO space required for that driver.
350 ****************************************************************************/
353 * This function set the exclusion range in the IOMMU. DMA accesses to the
354 * exclusion range are passed through untranslated
356 static void iommu_set_exclusion_range(struct amd_iommu
*iommu
)
358 u64 start
= iommu
->exclusion_start
& PAGE_MASK
;
359 u64 limit
= (start
+ iommu
->exclusion_length
) & PAGE_MASK
;
362 if (!iommu
->exclusion_start
)
365 entry
= start
| MMIO_EXCL_ENABLE_MASK
;
366 memcpy_toio(iommu
->mmio_base
+ MMIO_EXCL_BASE_OFFSET
,
367 &entry
, sizeof(entry
));
370 memcpy_toio(iommu
->mmio_base
+ MMIO_EXCL_LIMIT_OFFSET
,
371 &entry
, sizeof(entry
));
374 /* Programs the physical address of the device table into the IOMMU hardware */
375 static void iommu_set_device_table(struct amd_iommu
*iommu
)
379 BUG_ON(iommu
->mmio_base
== NULL
);
381 entry
= iommu_virt_to_phys(amd_iommu_dev_table
);
382 entry
|= (dev_table_size
>> 12) - 1;
383 memcpy_toio(iommu
->mmio_base
+ MMIO_DEV_TABLE_OFFSET
,
384 &entry
, sizeof(entry
));
387 /* Generic functions to enable/disable certain features of the IOMMU. */
388 static void iommu_feature_enable(struct amd_iommu
*iommu
, u8 bit
)
392 ctrl
= readq(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
393 ctrl
|= (1ULL << bit
);
394 writeq(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
397 static void iommu_feature_disable(struct amd_iommu
*iommu
, u8 bit
)
401 ctrl
= readq(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
402 ctrl
&= ~(1ULL << bit
);
403 writeq(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
406 static void iommu_set_inv_tlb_timeout(struct amd_iommu
*iommu
, int timeout
)
410 ctrl
= readq(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
411 ctrl
&= ~CTRL_INV_TO_MASK
;
412 ctrl
|= (timeout
<< CONTROL_INV_TIMEOUT
) & CTRL_INV_TO_MASK
;
413 writeq(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
416 /* Function to enable the hardware */
417 static void iommu_enable(struct amd_iommu
*iommu
)
419 iommu_feature_enable(iommu
, CONTROL_IOMMU_EN
);
422 static void iommu_disable(struct amd_iommu
*iommu
)
424 /* Disable command buffer */
425 iommu_feature_disable(iommu
, CONTROL_CMDBUF_EN
);
427 /* Disable event logging and event interrupts */
428 iommu_feature_disable(iommu
, CONTROL_EVT_INT_EN
);
429 iommu_feature_disable(iommu
, CONTROL_EVT_LOG_EN
);
431 /* Disable IOMMU GA_LOG */
432 iommu_feature_disable(iommu
, CONTROL_GALOG_EN
);
433 iommu_feature_disable(iommu
, CONTROL_GAINT_EN
);
435 /* Disable IOMMU hardware itself */
436 iommu_feature_disable(iommu
, CONTROL_IOMMU_EN
);
440 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
441 * the system has one.
443 static u8 __iomem
* __init
iommu_map_mmio_space(u64 address
, u64 end
)
445 if (!request_mem_region(address
, end
, "amd_iommu")) {
446 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
448 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
452 return (u8 __iomem
*)ioremap_nocache(address
, end
);
455 static void __init
iommu_unmap_mmio_space(struct amd_iommu
*iommu
)
457 if (iommu
->mmio_base
)
458 iounmap(iommu
->mmio_base
);
459 release_mem_region(iommu
->mmio_phys
, iommu
->mmio_phys_end
);
462 static inline u32
get_ivhd_header_size(struct ivhd_header
*h
)
478 /****************************************************************************
480 * The functions below belong to the first pass of AMD IOMMU ACPI table
481 * parsing. In this pass we try to find out the highest device id this
482 * code has to handle. Upon this information the size of the shared data
483 * structures is determined later.
485 ****************************************************************************/
488 * This function calculates the length of a given IVHD entry
490 static inline int ivhd_entry_length(u8
*ivhd
)
492 u32 type
= ((struct ivhd_entry
*)ivhd
)->type
;
495 return 0x04 << (*ivhd
>> 6);
496 } else if (type
== IVHD_DEV_ACPI_HID
) {
497 /* For ACPI_HID, offset 21 is uid len */
498 return *((u8
*)ivhd
+ 21) + 22;
504 * After reading the highest device id from the IOMMU PCI capability header
505 * this function looks if there is a higher device id defined in the ACPI table
507 static int __init
find_last_devid_from_ivhd(struct ivhd_header
*h
)
509 u8
*p
= (void *)h
, *end
= (void *)h
;
510 struct ivhd_entry
*dev
;
512 u32 ivhd_size
= get_ivhd_header_size(h
);
515 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h
->type
);
523 dev
= (struct ivhd_entry
*)p
;
526 /* Use maximum BDF value for DEV_ALL */
527 update_last_devid(0xffff);
529 case IVHD_DEV_SELECT
:
530 case IVHD_DEV_RANGE_END
:
532 case IVHD_DEV_EXT_SELECT
:
533 /* all the above subfield types refer to device ids */
534 update_last_devid(dev
->devid
);
539 p
+= ivhd_entry_length(p
);
547 static int __init
check_ivrs_checksum(struct acpi_table_header
*table
)
550 u8 checksum
= 0, *p
= (u8
*)table
;
552 for (i
= 0; i
< table
->length
; ++i
)
555 /* ACPI table corrupt */
556 pr_err(FW_BUG
"AMD-Vi: IVRS invalid checksum\n");
564 * Iterate over all IVHD entries in the ACPI table and find the highest device
565 * id which we need to handle. This is the first of three functions which parse
566 * the ACPI table. So we check the checksum here.
568 static int __init
find_last_devid_acpi(struct acpi_table_header
*table
)
570 u8
*p
= (u8
*)table
, *end
= (u8
*)table
;
571 struct ivhd_header
*h
;
573 p
+= IVRS_HEADER_LENGTH
;
575 end
+= table
->length
;
577 h
= (struct ivhd_header
*)p
;
578 if (h
->type
== amd_iommu_target_ivhd_type
) {
579 int ret
= find_last_devid_from_ivhd(h
);
591 /****************************************************************************
593 * The following functions belong to the code path which parses the ACPI table
594 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
595 * data structures, initialize the device/alias/rlookup table and also
596 * basically initialize the hardware.
598 ****************************************************************************/
601 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
602 * write commands to that buffer later and the IOMMU will execute them
605 static int __init
alloc_command_buffer(struct amd_iommu
*iommu
)
607 iommu
->cmd_buf
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
608 get_order(CMD_BUFFER_SIZE
));
610 return iommu
->cmd_buf
? 0 : -ENOMEM
;
614 * This function resets the command buffer if the IOMMU stopped fetching
617 void amd_iommu_reset_cmd_buffer(struct amd_iommu
*iommu
)
619 iommu_feature_disable(iommu
, CONTROL_CMDBUF_EN
);
621 writel(0x00, iommu
->mmio_base
+ MMIO_CMD_HEAD_OFFSET
);
622 writel(0x00, iommu
->mmio_base
+ MMIO_CMD_TAIL_OFFSET
);
623 iommu
->cmd_buf_head
= 0;
624 iommu
->cmd_buf_tail
= 0;
626 iommu_feature_enable(iommu
, CONTROL_CMDBUF_EN
);
630 * This function writes the command buffer address to the hardware and
633 static void iommu_enable_command_buffer(struct amd_iommu
*iommu
)
637 BUG_ON(iommu
->cmd_buf
== NULL
);
639 entry
= iommu_virt_to_phys(iommu
->cmd_buf
);
640 entry
|= MMIO_CMD_SIZE_512
;
642 memcpy_toio(iommu
->mmio_base
+ MMIO_CMD_BUF_OFFSET
,
643 &entry
, sizeof(entry
));
645 amd_iommu_reset_cmd_buffer(iommu
);
649 * This function disables the command buffer
651 static void iommu_disable_command_buffer(struct amd_iommu
*iommu
)
653 iommu_feature_disable(iommu
, CONTROL_CMDBUF_EN
);
656 static void __init
free_command_buffer(struct amd_iommu
*iommu
)
658 free_pages((unsigned long)iommu
->cmd_buf
, get_order(CMD_BUFFER_SIZE
));
661 /* allocates the memory where the IOMMU will log its events to */
662 static int __init
alloc_event_buffer(struct amd_iommu
*iommu
)
664 iommu
->evt_buf
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
665 get_order(EVT_BUFFER_SIZE
));
667 return iommu
->evt_buf
? 0 : -ENOMEM
;
670 static void iommu_enable_event_buffer(struct amd_iommu
*iommu
)
674 BUG_ON(iommu
->evt_buf
== NULL
);
676 entry
= iommu_virt_to_phys(iommu
->evt_buf
) | EVT_LEN_MASK
;
678 memcpy_toio(iommu
->mmio_base
+ MMIO_EVT_BUF_OFFSET
,
679 &entry
, sizeof(entry
));
681 /* set head and tail to zero manually */
682 writel(0x00, iommu
->mmio_base
+ MMIO_EVT_HEAD_OFFSET
);
683 writel(0x00, iommu
->mmio_base
+ MMIO_EVT_TAIL_OFFSET
);
685 iommu_feature_enable(iommu
, CONTROL_EVT_LOG_EN
);
689 * This function disables the event log buffer
691 static void iommu_disable_event_buffer(struct amd_iommu
*iommu
)
693 iommu_feature_disable(iommu
, CONTROL_EVT_LOG_EN
);
696 static void __init
free_event_buffer(struct amd_iommu
*iommu
)
698 free_pages((unsigned long)iommu
->evt_buf
, get_order(EVT_BUFFER_SIZE
));
701 /* allocates the memory where the IOMMU will log its events to */
702 static int __init
alloc_ppr_log(struct amd_iommu
*iommu
)
704 iommu
->ppr_log
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
705 get_order(PPR_LOG_SIZE
));
707 return iommu
->ppr_log
? 0 : -ENOMEM
;
710 static void iommu_enable_ppr_log(struct amd_iommu
*iommu
)
714 if (iommu
->ppr_log
== NULL
)
717 entry
= iommu_virt_to_phys(iommu
->ppr_log
) | PPR_LOG_SIZE_512
;
719 memcpy_toio(iommu
->mmio_base
+ MMIO_PPR_LOG_OFFSET
,
720 &entry
, sizeof(entry
));
722 /* set head and tail to zero manually */
723 writel(0x00, iommu
->mmio_base
+ MMIO_PPR_HEAD_OFFSET
);
724 writel(0x00, iommu
->mmio_base
+ MMIO_PPR_TAIL_OFFSET
);
726 iommu_feature_enable(iommu
, CONTROL_PPFLOG_EN
);
727 iommu_feature_enable(iommu
, CONTROL_PPR_EN
);
730 static void __init
free_ppr_log(struct amd_iommu
*iommu
)
732 if (iommu
->ppr_log
== NULL
)
735 free_pages((unsigned long)iommu
->ppr_log
, get_order(PPR_LOG_SIZE
));
738 static void free_ga_log(struct amd_iommu
*iommu
)
740 #ifdef CONFIG_IRQ_REMAP
742 free_pages((unsigned long)iommu
->ga_log
,
743 get_order(GA_LOG_SIZE
));
744 if (iommu
->ga_log_tail
)
745 free_pages((unsigned long)iommu
->ga_log_tail
,
750 static int iommu_ga_log_enable(struct amd_iommu
*iommu
)
752 #ifdef CONFIG_IRQ_REMAP
758 status
= readl(iommu
->mmio_base
+ MMIO_STATUS_OFFSET
);
760 /* Check if already running */
761 if (status
& (MMIO_STATUS_GALOG_RUN_MASK
))
764 iommu_feature_enable(iommu
, CONTROL_GAINT_EN
);
765 iommu_feature_enable(iommu
, CONTROL_GALOG_EN
);
767 for (i
= 0; i
< LOOP_TIMEOUT
; ++i
) {
768 status
= readl(iommu
->mmio_base
+ MMIO_STATUS_OFFSET
);
769 if (status
& (MMIO_STATUS_GALOG_RUN_MASK
))
773 if (i
>= LOOP_TIMEOUT
)
775 #endif /* CONFIG_IRQ_REMAP */
779 #ifdef CONFIG_IRQ_REMAP
780 static int iommu_init_ga_log(struct amd_iommu
*iommu
)
784 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
))
787 iommu
->ga_log
= (u8
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
788 get_order(GA_LOG_SIZE
));
792 iommu
->ga_log_tail
= (u8
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
794 if (!iommu
->ga_log_tail
)
797 entry
= iommu_virt_to_phys(iommu
->ga_log
) | GA_LOG_SIZE_512
;
798 memcpy_toio(iommu
->mmio_base
+ MMIO_GA_LOG_BASE_OFFSET
,
799 &entry
, sizeof(entry
));
800 entry
= (iommu_virt_to_phys(iommu
->ga_log
) & 0xFFFFFFFFFFFFFULL
) & ~7ULL;
801 memcpy_toio(iommu
->mmio_base
+ MMIO_GA_LOG_TAIL_OFFSET
,
802 &entry
, sizeof(entry
));
803 writel(0x00, iommu
->mmio_base
+ MMIO_GA_HEAD_OFFSET
);
804 writel(0x00, iommu
->mmio_base
+ MMIO_GA_TAIL_OFFSET
);
811 #endif /* CONFIG_IRQ_REMAP */
813 static int iommu_init_ga(struct amd_iommu
*iommu
)
817 #ifdef CONFIG_IRQ_REMAP
818 /* Note: We have already checked GASup from IVRS table.
819 * Now, we need to make sure that GAMSup is set.
821 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
) &&
822 !iommu_feature(iommu
, FEATURE_GAM_VAPIC
))
823 amd_iommu_guest_ir
= AMD_IOMMU_GUEST_IR_LEGACY_GA
;
825 ret
= iommu_init_ga_log(iommu
);
826 #endif /* CONFIG_IRQ_REMAP */
831 static void iommu_enable_xt(struct amd_iommu
*iommu
)
833 #ifdef CONFIG_IRQ_REMAP
835 * XT mode (32-bit APIC destination ID) requires
836 * GA mode (128-bit IRTE support) as a prerequisite.
838 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir
) &&
839 amd_iommu_xt_mode
== IRQ_REMAP_X2APIC_MODE
)
840 iommu_feature_enable(iommu
, CONTROL_XT_EN
);
841 #endif /* CONFIG_IRQ_REMAP */
844 static void iommu_enable_gt(struct amd_iommu
*iommu
)
846 if (!iommu_feature(iommu
, FEATURE_GT
))
849 iommu_feature_enable(iommu
, CONTROL_GT_EN
);
852 /* sets a specific bit in the device table entry. */
853 static void set_dev_entry_bit(u16 devid
, u8 bit
)
855 int i
= (bit
>> 6) & 0x03;
856 int _bit
= bit
& 0x3f;
858 amd_iommu_dev_table
[devid
].data
[i
] |= (1UL << _bit
);
861 static int get_dev_entry_bit(u16 devid
, u8 bit
)
863 int i
= (bit
>> 6) & 0x03;
864 int _bit
= bit
& 0x3f;
866 return (amd_iommu_dev_table
[devid
].data
[i
] & (1UL << _bit
)) >> _bit
;
870 static bool copy_device_table(void)
872 u64 int_ctl
, int_tab_len
, entry
= 0, last_entry
= 0;
873 struct dev_table_entry
*old_devtb
= NULL
;
874 u32 lo
, hi
, devid
, old_devtb_size
;
875 phys_addr_t old_devtb_phys
;
876 struct amd_iommu
*iommu
;
877 u16 dom_id
, dte_v
, irq_v
;
881 if (!amd_iommu_pre_enabled
)
884 pr_warn("Translation is already enabled - trying to copy translation structures\n");
885 for_each_iommu(iommu
) {
886 /* All IOMMUs should use the same device table with the same size */
887 lo
= readl(iommu
->mmio_base
+ MMIO_DEV_TABLE_OFFSET
);
888 hi
= readl(iommu
->mmio_base
+ MMIO_DEV_TABLE_OFFSET
+ 4);
889 entry
= (((u64
) hi
) << 32) + lo
;
890 if (last_entry
&& last_entry
!= entry
) {
891 pr_err("IOMMU:%d should use the same dev table as others!\n",
897 old_devtb_size
= ((entry
& ~PAGE_MASK
) + 1) << 12;
898 if (old_devtb_size
!= dev_table_size
) {
899 pr_err("The device table size of IOMMU:%d is not expected!\n",
906 * When SME is enabled in the first kernel, the entry includes the
907 * memory encryption mask(sme_me_mask), we must remove the memory
908 * encryption mask to obtain the true physical address in kdump kernel.
910 old_devtb_phys
= __sme_clr(entry
) & PAGE_MASK
;
912 if (old_devtb_phys
>= 0x100000000ULL
) {
913 pr_err("The address of old device table is above 4G, not trustworthy!\n");
916 old_devtb
= (sme_active() && is_kdump_kernel())
917 ? (__force
void *)ioremap_encrypted(old_devtb_phys
,
919 : memremap(old_devtb_phys
, dev_table_size
, MEMREMAP_WB
);
924 gfp_flag
= GFP_KERNEL
| __GFP_ZERO
| GFP_DMA32
;
925 old_dev_tbl_cpy
= (void *)__get_free_pages(gfp_flag
,
926 get_order(dev_table_size
));
927 if (old_dev_tbl_cpy
== NULL
) {
928 pr_err("Failed to allocate memory for copying old device table!\n");
932 for (devid
= 0; devid
<= amd_iommu_last_bdf
; ++devid
) {
933 old_dev_tbl_cpy
[devid
] = old_devtb
[devid
];
934 dom_id
= old_devtb
[devid
].data
[1] & DEV_DOMID_MASK
;
935 dte_v
= old_devtb
[devid
].data
[0] & DTE_FLAG_V
;
937 if (dte_v
&& dom_id
) {
938 old_dev_tbl_cpy
[devid
].data
[0] = old_devtb
[devid
].data
[0];
939 old_dev_tbl_cpy
[devid
].data
[1] = old_devtb
[devid
].data
[1];
940 __set_bit(dom_id
, amd_iommu_pd_alloc_bitmap
);
941 /* If gcr3 table existed, mask it out */
942 if (old_devtb
[devid
].data
[0] & DTE_FLAG_GV
) {
943 tmp
= DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B
;
944 tmp
|= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C
;
945 old_dev_tbl_cpy
[devid
].data
[1] &= ~tmp
;
946 tmp
= DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A
;
948 old_dev_tbl_cpy
[devid
].data
[0] &= ~tmp
;
952 irq_v
= old_devtb
[devid
].data
[2] & DTE_IRQ_REMAP_ENABLE
;
953 int_ctl
= old_devtb
[devid
].data
[2] & DTE_IRQ_REMAP_INTCTL_MASK
;
954 int_tab_len
= old_devtb
[devid
].data
[2] & DTE_IRQ_TABLE_LEN_MASK
;
955 if (irq_v
&& (int_ctl
|| int_tab_len
)) {
956 if ((int_ctl
!= DTE_IRQ_REMAP_INTCTL
) ||
957 (int_tab_len
!= DTE_IRQ_TABLE_LEN
)) {
958 pr_err("Wrong old irq remapping flag: %#x\n", devid
);
962 old_dev_tbl_cpy
[devid
].data
[2] = old_devtb
[devid
].data
[2];
970 void amd_iommu_apply_erratum_63(u16 devid
)
974 sysmgt
= get_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT1
) |
975 (get_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT2
) << 1);
978 set_dev_entry_bit(devid
, DEV_ENTRY_IW
);
981 /* Writes the specific IOMMU for a device into the rlookup table */
982 static void __init
set_iommu_for_device(struct amd_iommu
*iommu
, u16 devid
)
984 amd_iommu_rlookup_table
[devid
] = iommu
;
988 * This function takes the device specific flags read from the ACPI
989 * table and sets up the device table entry with that information
991 static void __init
set_dev_entry_from_acpi(struct amd_iommu
*iommu
,
992 u16 devid
, u32 flags
, u32 ext_flags
)
994 if (flags
& ACPI_DEVFLAG_INITPASS
)
995 set_dev_entry_bit(devid
, DEV_ENTRY_INIT_PASS
);
996 if (flags
& ACPI_DEVFLAG_EXTINT
)
997 set_dev_entry_bit(devid
, DEV_ENTRY_EINT_PASS
);
998 if (flags
& ACPI_DEVFLAG_NMI
)
999 set_dev_entry_bit(devid
, DEV_ENTRY_NMI_PASS
);
1000 if (flags
& ACPI_DEVFLAG_SYSMGT1
)
1001 set_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT1
);
1002 if (flags
& ACPI_DEVFLAG_SYSMGT2
)
1003 set_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT2
);
1004 if (flags
& ACPI_DEVFLAG_LINT0
)
1005 set_dev_entry_bit(devid
, DEV_ENTRY_LINT0_PASS
);
1006 if (flags
& ACPI_DEVFLAG_LINT1
)
1007 set_dev_entry_bit(devid
, DEV_ENTRY_LINT1_PASS
);
1009 amd_iommu_apply_erratum_63(devid
);
1011 set_iommu_for_device(iommu
, devid
);
1014 static int __init
add_special_device(u8 type
, u8 id
, u16
*devid
, bool cmd_line
)
1016 struct devid_map
*entry
;
1017 struct list_head
*list
;
1019 if (type
== IVHD_SPECIAL_IOAPIC
)
1021 else if (type
== IVHD_SPECIAL_HPET
)
1026 list_for_each_entry(entry
, list
, list
) {
1027 if (!(entry
->id
== id
&& entry
->cmd_line
))
1030 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
1031 type
== IVHD_SPECIAL_IOAPIC
? "IOAPIC" : "HPET", id
);
1033 *devid
= entry
->devid
;
1038 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1043 entry
->devid
= *devid
;
1044 entry
->cmd_line
= cmd_line
;
1046 list_add_tail(&entry
->list
, list
);
1051 static int __init
add_acpi_hid_device(u8
*hid
, u8
*uid
, u16
*devid
,
1054 struct acpihid_map_entry
*entry
;
1055 struct list_head
*list
= &acpihid_map
;
1057 list_for_each_entry(entry
, list
, list
) {
1058 if (strcmp(entry
->hid
, hid
) ||
1059 (*uid
&& *entry
->uid
&& strcmp(entry
->uid
, uid
)) ||
1063 pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
1065 *devid
= entry
->devid
;
1069 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1073 memcpy(entry
->uid
, uid
, strlen(uid
));
1074 memcpy(entry
->hid
, hid
, strlen(hid
));
1075 entry
->devid
= *devid
;
1076 entry
->cmd_line
= cmd_line
;
1077 entry
->root_devid
= (entry
->devid
& (~0x7));
1079 pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
1080 entry
->cmd_line
? "cmd" : "ivrs",
1081 entry
->hid
, entry
->uid
, entry
->root_devid
);
1083 list_add_tail(&entry
->list
, list
);
1087 static int __init
add_early_maps(void)
1091 for (i
= 0; i
< early_ioapic_map_size
; ++i
) {
1092 ret
= add_special_device(IVHD_SPECIAL_IOAPIC
,
1093 early_ioapic_map
[i
].id
,
1094 &early_ioapic_map
[i
].devid
,
1095 early_ioapic_map
[i
].cmd_line
);
1100 for (i
= 0; i
< early_hpet_map_size
; ++i
) {
1101 ret
= add_special_device(IVHD_SPECIAL_HPET
,
1102 early_hpet_map
[i
].id
,
1103 &early_hpet_map
[i
].devid
,
1104 early_hpet_map
[i
].cmd_line
);
1109 for (i
= 0; i
< early_acpihid_map_size
; ++i
) {
1110 ret
= add_acpi_hid_device(early_acpihid_map
[i
].hid
,
1111 early_acpihid_map
[i
].uid
,
1112 &early_acpihid_map
[i
].devid
,
1113 early_acpihid_map
[i
].cmd_line
);
1122 * Reads the device exclusion range from ACPI and initializes the IOMMU with
1125 static void __init
set_device_exclusion_range(u16 devid
, struct ivmd_header
*m
)
1127 struct amd_iommu
*iommu
= amd_iommu_rlookup_table
[devid
];
1129 if (!(m
->flags
& IVMD_FLAG_EXCL_RANGE
))
1134 * We only can configure exclusion ranges per IOMMU, not
1135 * per device. But we can enable the exclusion range per
1136 * device. This is done here
1138 set_dev_entry_bit(devid
, DEV_ENTRY_EX
);
1139 iommu
->exclusion_start
= m
->range_start
;
1140 iommu
->exclusion_length
= m
->range_length
;
1145 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1146 * initializes the hardware and our data structures with it.
1148 static int __init
init_iommu_from_acpi(struct amd_iommu
*iommu
,
1149 struct ivhd_header
*h
)
1152 u8
*end
= p
, flags
= 0;
1153 u16 devid
= 0, devid_start
= 0, devid_to
= 0;
1154 u32 dev_i
, ext_flags
= 0;
1156 struct ivhd_entry
*e
;
1161 ret
= add_early_maps();
1166 * First save the recommended feature enable bits from ACPI
1168 iommu
->acpi_flags
= h
->flags
;
1171 * Done. Now parse the device entries
1173 ivhd_size
= get_ivhd_header_size(h
);
1175 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h
->type
);
1185 e
= (struct ivhd_entry
*)p
;
1189 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e
->flags
);
1191 for (dev_i
= 0; dev_i
<= amd_iommu_last_bdf
; ++dev_i
)
1192 set_dev_entry_from_acpi(iommu
, dev_i
, e
->flags
, 0);
1194 case IVHD_DEV_SELECT
:
1196 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1198 PCI_BUS_NUM(e
->devid
),
1204 set_dev_entry_from_acpi(iommu
, devid
, e
->flags
, 0);
1206 case IVHD_DEV_SELECT_RANGE_START
:
1208 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1209 "devid: %02x:%02x.%x flags: %02x\n",
1210 PCI_BUS_NUM(e
->devid
),
1215 devid_start
= e
->devid
;
1220 case IVHD_DEV_ALIAS
:
1222 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1223 "flags: %02x devid_to: %02x:%02x.%x\n",
1224 PCI_BUS_NUM(e
->devid
),
1228 PCI_BUS_NUM(e
->ext
>> 8),
1229 PCI_SLOT(e
->ext
>> 8),
1230 PCI_FUNC(e
->ext
>> 8));
1233 devid_to
= e
->ext
>> 8;
1234 set_dev_entry_from_acpi(iommu
, devid
, e
->flags
, 0);
1235 set_dev_entry_from_acpi(iommu
, devid_to
, e
->flags
, 0);
1236 amd_iommu_alias_table
[devid
] = devid_to
;
1238 case IVHD_DEV_ALIAS_RANGE
:
1240 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1241 "devid: %02x:%02x.%x flags: %02x "
1242 "devid_to: %02x:%02x.%x\n",
1243 PCI_BUS_NUM(e
->devid
),
1247 PCI_BUS_NUM(e
->ext
>> 8),
1248 PCI_SLOT(e
->ext
>> 8),
1249 PCI_FUNC(e
->ext
>> 8));
1251 devid_start
= e
->devid
;
1253 devid_to
= e
->ext
>> 8;
1257 case IVHD_DEV_EXT_SELECT
:
1259 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1260 "flags: %02x ext: %08x\n",
1261 PCI_BUS_NUM(e
->devid
),
1267 set_dev_entry_from_acpi(iommu
, devid
, e
->flags
,
1270 case IVHD_DEV_EXT_SELECT_RANGE
:
1272 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1273 "%02x:%02x.%x flags: %02x ext: %08x\n",
1274 PCI_BUS_NUM(e
->devid
),
1279 devid_start
= e
->devid
;
1284 case IVHD_DEV_RANGE_END
:
1286 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1287 PCI_BUS_NUM(e
->devid
),
1289 PCI_FUNC(e
->devid
));
1292 for (dev_i
= devid_start
; dev_i
<= devid
; ++dev_i
) {
1294 amd_iommu_alias_table
[dev_i
] = devid_to
;
1295 set_dev_entry_from_acpi(iommu
,
1296 devid_to
, flags
, ext_flags
);
1298 set_dev_entry_from_acpi(iommu
, dev_i
,
1302 case IVHD_DEV_SPECIAL
: {
1308 handle
= e
->ext
& 0xff;
1309 devid
= (e
->ext
>> 8) & 0xffff;
1310 type
= (e
->ext
>> 24) & 0xff;
1312 if (type
== IVHD_SPECIAL_IOAPIC
)
1314 else if (type
== IVHD_SPECIAL_HPET
)
1319 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1325 ret
= add_special_device(type
, handle
, &devid
, false);
1330 * add_special_device might update the devid in case a
1331 * command-line override is present. So call
1332 * set_dev_entry_from_acpi after add_special_device.
1334 set_dev_entry_from_acpi(iommu
, devid
, e
->flags
, 0);
1338 case IVHD_DEV_ACPI_HID
: {
1340 u8 hid
[ACPIHID_HID_LEN
] = {0};
1341 u8 uid
[ACPIHID_UID_LEN
] = {0};
1344 if (h
->type
!= 0x40) {
1345 pr_err(FW_BUG
"Invalid IVHD device type %#x\n",
1350 memcpy(hid
, (u8
*)(&e
->ext
), ACPIHID_HID_LEN
- 1);
1351 hid
[ACPIHID_HID_LEN
- 1] = '\0';
1354 pr_err(FW_BUG
"Invalid HID.\n");
1359 case UID_NOT_PRESENT
:
1362 pr_warn(FW_BUG
"Invalid UID length.\n");
1365 case UID_IS_INTEGER
:
1367 sprintf(uid
, "%d", e
->uid
);
1370 case UID_IS_CHARACTER
:
1372 memcpy(uid
, (u8
*)(&e
->uid
), ACPIHID_UID_LEN
- 1);
1373 uid
[ACPIHID_UID_LEN
- 1] = '\0';
1381 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1389 ret
= add_acpi_hid_device(hid
, uid
, &devid
, false);
1394 * add_special_device might update the devid in case a
1395 * command-line override is present. So call
1396 * set_dev_entry_from_acpi after add_special_device.
1398 set_dev_entry_from_acpi(iommu
, devid
, e
->flags
, 0);
1406 p
+= ivhd_entry_length(p
);
1412 static void __init
free_iommu_one(struct amd_iommu
*iommu
)
1414 free_command_buffer(iommu
);
1415 free_event_buffer(iommu
);
1416 free_ppr_log(iommu
);
1418 iommu_unmap_mmio_space(iommu
);
1421 static void __init
free_iommu_all(void)
1423 struct amd_iommu
*iommu
, *next
;
1425 for_each_iommu_safe(iommu
, next
) {
1426 list_del(&iommu
->list
);
1427 free_iommu_one(iommu
);
1433 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1435 * BIOS should disable L2B micellaneous clock gating by setting
1436 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1438 static void amd_iommu_erratum_746_workaround(struct amd_iommu
*iommu
)
1442 if ((boot_cpu_data
.x86
!= 0x15) ||
1443 (boot_cpu_data
.x86_model
< 0x10) ||
1444 (boot_cpu_data
.x86_model
> 0x1f))
1447 pci_write_config_dword(iommu
->dev
, 0xf0, 0x90);
1448 pci_read_config_dword(iommu
->dev
, 0xf4, &value
);
1453 /* Select NB indirect register 0x90 and enable writing */
1454 pci_write_config_dword(iommu
->dev
, 0xf0, 0x90 | (1 << 8));
1456 pci_write_config_dword(iommu
->dev
, 0xf4, value
| 0x4);
1457 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1458 dev_name(&iommu
->dev
->dev
));
1460 /* Clear the enable writing bit */
1461 pci_write_config_dword(iommu
->dev
, 0xf0, 0x90);
1465 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1467 * BIOS should enable ATS write permission check by setting
1468 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1470 static void amd_iommu_ats_write_check_workaround(struct amd_iommu
*iommu
)
1474 if ((boot_cpu_data
.x86
!= 0x15) ||
1475 (boot_cpu_data
.x86_model
< 0x30) ||
1476 (boot_cpu_data
.x86_model
> 0x3f))
1479 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1480 value
= iommu_read_l2(iommu
, 0x47);
1485 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1486 iommu_write_l2(iommu
, 0x47, value
| BIT(0));
1488 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1489 dev_name(&iommu
->dev
->dev
));
1493 * This function clues the initialization function for one IOMMU
1494 * together and also allocates the command buffer and programs the
1495 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1497 static int __init
init_iommu_one(struct amd_iommu
*iommu
, struct ivhd_header
*h
)
1501 raw_spin_lock_init(&iommu
->lock
);
1503 /* Add IOMMU to internal data structures */
1504 list_add_tail(&iommu
->list
, &amd_iommu_list
);
1505 iommu
->index
= amd_iommus_present
++;
1507 if (unlikely(iommu
->index
>= MAX_IOMMUS
)) {
1508 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1512 /* Index is fine - add IOMMU to the array */
1513 amd_iommus
[iommu
->index
] = iommu
;
1516 * Copy data from ACPI table entry to the iommu struct
1518 iommu
->devid
= h
->devid
;
1519 iommu
->cap_ptr
= h
->cap_ptr
;
1520 iommu
->pci_seg
= h
->pci_seg
;
1521 iommu
->mmio_phys
= h
->mmio_phys
;
1525 /* Check if IVHD EFR contains proper max banks/counters */
1526 if ((h
->efr_attr
!= 0) &&
1527 ((h
->efr_attr
& (0xF << 13)) != 0) &&
1528 ((h
->efr_attr
& (0x3F << 17)) != 0))
1529 iommu
->mmio_phys_end
= MMIO_REG_END_OFFSET
;
1531 iommu
->mmio_phys_end
= MMIO_CNTR_CONF_OFFSET
;
1532 if (((h
->efr_attr
& (0x1 << IOMMU_FEAT_GASUP_SHIFT
)) == 0))
1533 amd_iommu_guest_ir
= AMD_IOMMU_GUEST_IR_LEGACY
;
1534 if (((h
->efr_attr
& (0x1 << IOMMU_FEAT_XTSUP_SHIFT
)) == 0))
1535 amd_iommu_xt_mode
= IRQ_REMAP_XAPIC_MODE
;
1539 if (h
->efr_reg
& (1 << 9))
1540 iommu
->mmio_phys_end
= MMIO_REG_END_OFFSET
;
1542 iommu
->mmio_phys_end
= MMIO_CNTR_CONF_OFFSET
;
1543 if (((h
->efr_reg
& (0x1 << IOMMU_EFR_GASUP_SHIFT
)) == 0))
1544 amd_iommu_guest_ir
= AMD_IOMMU_GUEST_IR_LEGACY
;
1545 if (((h
->efr_reg
& (0x1 << IOMMU_EFR_XTSUP_SHIFT
)) == 0))
1546 amd_iommu_xt_mode
= IRQ_REMAP_XAPIC_MODE
;
1552 iommu
->mmio_base
= iommu_map_mmio_space(iommu
->mmio_phys
,
1553 iommu
->mmio_phys_end
);
1554 if (!iommu
->mmio_base
)
1557 if (alloc_command_buffer(iommu
))
1560 if (alloc_event_buffer(iommu
))
1563 iommu
->int_enabled
= false;
1565 init_translation_status(iommu
);
1566 if (translation_pre_enabled(iommu
) && !is_kdump_kernel()) {
1567 iommu_disable(iommu
);
1568 clear_translation_pre_enabled(iommu
);
1569 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1572 if (amd_iommu_pre_enabled
)
1573 amd_iommu_pre_enabled
= translation_pre_enabled(iommu
);
1575 ret
= init_iommu_from_acpi(iommu
, h
);
1579 ret
= amd_iommu_create_irq_domain(iommu
);
1584 * Make sure IOMMU is not considered to translate itself. The IVRS
1585 * table tells us so, but this is a lie!
1587 amd_iommu_rlookup_table
[iommu
->devid
] = NULL
;
1593 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1594 * @ivrs Pointer to the IVRS header
1596 * This function search through all IVDB of the maximum supported IVHD
1598 static u8
get_highest_supported_ivhd_type(struct acpi_table_header
*ivrs
)
1600 u8
*base
= (u8
*)ivrs
;
1601 struct ivhd_header
*ivhd
= (struct ivhd_header
*)
1602 (base
+ IVRS_HEADER_LENGTH
);
1603 u8 last_type
= ivhd
->type
;
1604 u16 devid
= ivhd
->devid
;
1606 while (((u8
*)ivhd
- base
< ivrs
->length
) &&
1607 (ivhd
->type
<= ACPI_IVHD_TYPE_MAX_SUPPORTED
)) {
1608 u8
*p
= (u8
*) ivhd
;
1610 if (ivhd
->devid
== devid
)
1611 last_type
= ivhd
->type
;
1612 ivhd
= (struct ivhd_header
*)(p
+ ivhd
->length
);
1619 * Iterates over all IOMMU entries in the ACPI table, allocates the
1620 * IOMMU structure and initializes it with init_iommu_one()
1622 static int __init
init_iommu_all(struct acpi_table_header
*table
)
1624 u8
*p
= (u8
*)table
, *end
= (u8
*)table
;
1625 struct ivhd_header
*h
;
1626 struct amd_iommu
*iommu
;
1629 end
+= table
->length
;
1630 p
+= IVRS_HEADER_LENGTH
;
1633 h
= (struct ivhd_header
*)p
;
1634 if (*p
== amd_iommu_target_ivhd_type
) {
1636 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1637 "seg: %d flags: %01x info %04x\n",
1638 PCI_BUS_NUM(h
->devid
), PCI_SLOT(h
->devid
),
1639 PCI_FUNC(h
->devid
), h
->cap_ptr
,
1640 h
->pci_seg
, h
->flags
, h
->info
);
1641 DUMP_printk(" mmio-addr: %016llx\n",
1644 iommu
= kzalloc(sizeof(struct amd_iommu
), GFP_KERNEL
);
1648 ret
= init_iommu_one(iommu
, h
);
1660 static int iommu_pc_get_set_reg(struct amd_iommu
*iommu
, u8 bank
, u8 cntr
,
1661 u8 fxn
, u64
*value
, bool is_write
);
1663 static void init_iommu_perf_ctr(struct amd_iommu
*iommu
)
1665 u64 val
= 0xabcd, val2
= 0;
1667 if (!iommu_feature(iommu
, FEATURE_PC
))
1670 amd_iommu_pc_present
= true;
1672 /* Check if the performance counters can be written to */
1673 if ((iommu_pc_get_set_reg(iommu
, 0, 0, 0, &val
, true)) ||
1674 (iommu_pc_get_set_reg(iommu
, 0, 0, 0, &val2
, false)) ||
1676 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1677 amd_iommu_pc_present
= false;
1681 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1683 val
= readl(iommu
->mmio_base
+ MMIO_CNTR_CONF_OFFSET
);
1684 iommu
->max_banks
= (u8
) ((val
>> 12) & 0x3f);
1685 iommu
->max_counters
= (u8
) ((val
>> 7) & 0xf);
1688 static ssize_t
amd_iommu_show_cap(struct device
*dev
,
1689 struct device_attribute
*attr
,
1692 struct amd_iommu
*iommu
= dev_to_amd_iommu(dev
);
1693 return sprintf(buf
, "%x\n", iommu
->cap
);
1695 static DEVICE_ATTR(cap
, S_IRUGO
, amd_iommu_show_cap
, NULL
);
1697 static ssize_t
amd_iommu_show_features(struct device
*dev
,
1698 struct device_attribute
*attr
,
1701 struct amd_iommu
*iommu
= dev_to_amd_iommu(dev
);
1702 return sprintf(buf
, "%llx\n", iommu
->features
);
1704 static DEVICE_ATTR(features
, S_IRUGO
, amd_iommu_show_features
, NULL
);
1706 static struct attribute
*amd_iommu_attrs
[] = {
1708 &dev_attr_features
.attr
,
1712 static struct attribute_group amd_iommu_group
= {
1713 .name
= "amd-iommu",
1714 .attrs
= amd_iommu_attrs
,
1717 static const struct attribute_group
*amd_iommu_groups
[] = {
1722 static int __init
iommu_init_pci(struct amd_iommu
*iommu
)
1724 int cap_ptr
= iommu
->cap_ptr
;
1725 u32 range
, misc
, low
, high
;
1728 iommu
->dev
= pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu
->devid
),
1729 iommu
->devid
& 0xff);
1733 /* Prevent binding other PCI device drivers to IOMMU devices */
1734 iommu
->dev
->match_driver
= false;
1736 pci_read_config_dword(iommu
->dev
, cap_ptr
+ MMIO_CAP_HDR_OFFSET
,
1738 pci_read_config_dword(iommu
->dev
, cap_ptr
+ MMIO_RANGE_OFFSET
,
1740 pci_read_config_dword(iommu
->dev
, cap_ptr
+ MMIO_MISC_OFFSET
,
1743 if (!(iommu
->cap
& (1 << IOMMU_CAP_IOTLB
)))
1744 amd_iommu_iotlb_sup
= false;
1746 /* read extended feature bits */
1747 low
= readl(iommu
->mmio_base
+ MMIO_EXT_FEATURES
);
1748 high
= readl(iommu
->mmio_base
+ MMIO_EXT_FEATURES
+ 4);
1750 iommu
->features
= ((u64
)high
<< 32) | low
;
1752 if (iommu_feature(iommu
, FEATURE_GT
)) {
1757 pasmax
= iommu
->features
& FEATURE_PASID_MASK
;
1758 pasmax
>>= FEATURE_PASID_SHIFT
;
1759 max_pasid
= (1 << (pasmax
+ 1)) - 1;
1761 amd_iommu_max_pasid
= min(amd_iommu_max_pasid
, max_pasid
);
1763 BUG_ON(amd_iommu_max_pasid
& ~PASID_MASK
);
1765 glxval
= iommu
->features
& FEATURE_GLXVAL_MASK
;
1766 glxval
>>= FEATURE_GLXVAL_SHIFT
;
1768 if (amd_iommu_max_glx_val
== -1)
1769 amd_iommu_max_glx_val
= glxval
;
1771 amd_iommu_max_glx_val
= min(amd_iommu_max_glx_val
, glxval
);
1774 if (iommu_feature(iommu
, FEATURE_GT
) &&
1775 iommu_feature(iommu
, FEATURE_PPR
)) {
1776 iommu
->is_iommu_v2
= true;
1777 amd_iommu_v2_present
= true;
1780 if (iommu_feature(iommu
, FEATURE_PPR
) && alloc_ppr_log(iommu
))
1783 ret
= iommu_init_ga(iommu
);
1787 if (iommu
->cap
& (1UL << IOMMU_CAP_NPCACHE
))
1788 amd_iommu_np_cache
= true;
1790 init_iommu_perf_ctr(iommu
);
1792 if (is_rd890_iommu(iommu
->dev
)) {
1796 pci_get_domain_bus_and_slot(0, iommu
->dev
->bus
->number
,
1800 * Some rd890 systems may not be fully reconfigured by the
1801 * BIOS, so it's necessary for us to store this information so
1802 * it can be reprogrammed on resume
1804 pci_read_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 4,
1805 &iommu
->stored_addr_lo
);
1806 pci_read_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 8,
1807 &iommu
->stored_addr_hi
);
1809 /* Low bit locks writes to configuration space */
1810 iommu
->stored_addr_lo
&= ~1;
1812 for (i
= 0; i
< 6; i
++)
1813 for (j
= 0; j
< 0x12; j
++)
1814 iommu
->stored_l1
[i
][j
] = iommu_read_l1(iommu
, i
, j
);
1816 for (i
= 0; i
< 0x83; i
++)
1817 iommu
->stored_l2
[i
] = iommu_read_l2(iommu
, i
);
1820 amd_iommu_erratum_746_workaround(iommu
);
1821 amd_iommu_ats_write_check_workaround(iommu
);
1823 iommu_device_sysfs_add(&iommu
->iommu
, &iommu
->dev
->dev
,
1824 amd_iommu_groups
, "ivhd%d", iommu
->index
);
1825 iommu_device_set_ops(&iommu
->iommu
, &amd_iommu_ops
);
1826 iommu_device_register(&iommu
->iommu
);
1828 return pci_enable_device(iommu
->dev
);
1831 static void print_iommu_info(void)
1833 static const char * const feat_str
[] = {
1834 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1835 "IA", "GA", "HE", "PC"
1837 struct amd_iommu
*iommu
;
1839 for_each_iommu(iommu
) {
1842 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1843 dev_name(&iommu
->dev
->dev
), iommu
->cap_ptr
);
1845 if (iommu
->cap
& (1 << IOMMU_CAP_EFR
)) {
1846 pr_info("AMD-Vi: Extended features (%#llx):\n",
1848 for (i
= 0; i
< ARRAY_SIZE(feat_str
); ++i
) {
1849 if (iommu_feature(iommu
, (1ULL << i
)))
1850 pr_cont(" %s", feat_str
[i
]);
1853 if (iommu
->features
& FEATURE_GAM_VAPIC
)
1854 pr_cont(" GA_vAPIC");
1859 if (irq_remapping_enabled
) {
1860 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1861 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
))
1862 pr_info("AMD-Vi: virtual APIC enabled\n");
1863 if (amd_iommu_xt_mode
== IRQ_REMAP_X2APIC_MODE
)
1864 pr_info("AMD-Vi: X2APIC enabled\n");
1868 static int __init
amd_iommu_init_pci(void)
1870 struct amd_iommu
*iommu
;
1873 for_each_iommu(iommu
) {
1874 ret
= iommu_init_pci(iommu
);
1880 * Order is important here to make sure any unity map requirements are
1881 * fulfilled. The unity mappings are created and written to the device
1882 * table during the amd_iommu_init_api() call.
1884 * After that we call init_device_table_dma() to make sure any
1885 * uninitialized DTE will block DMA, and in the end we flush the caches
1886 * of all IOMMUs to make sure the changes to the device table are
1889 ret
= amd_iommu_init_api();
1891 init_device_table_dma();
1893 for_each_iommu(iommu
)
1894 iommu_flush_all_caches(iommu
);
1902 /****************************************************************************
1904 * The following functions initialize the MSI interrupts for all IOMMUs
1905 * in the system. It's a bit challenging because there could be multiple
1906 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1909 ****************************************************************************/
1911 static int iommu_setup_msi(struct amd_iommu
*iommu
)
1915 r
= pci_enable_msi(iommu
->dev
);
1919 r
= request_threaded_irq(iommu
->dev
->irq
,
1920 amd_iommu_int_handler
,
1921 amd_iommu_int_thread
,
1926 pci_disable_msi(iommu
->dev
);
1930 iommu
->int_enabled
= true;
1935 static int iommu_init_msi(struct amd_iommu
*iommu
)
1939 if (iommu
->int_enabled
)
1942 if (iommu
->dev
->msi_cap
)
1943 ret
= iommu_setup_msi(iommu
);
1951 iommu_feature_enable(iommu
, CONTROL_EVT_INT_EN
);
1953 if (iommu
->ppr_log
!= NULL
)
1954 iommu_feature_enable(iommu
, CONTROL_PPFINT_EN
);
1956 iommu_ga_log_enable(iommu
);
1961 /****************************************************************************
1963 * The next functions belong to the third pass of parsing the ACPI
1964 * table. In this last pass the memory mapping requirements are
1965 * gathered (like exclusion and unity mapping ranges).
1967 ****************************************************************************/
1969 static void __init
free_unity_maps(void)
1971 struct unity_map_entry
*entry
, *next
;
1973 list_for_each_entry_safe(entry
, next
, &amd_iommu_unity_map
, list
) {
1974 list_del(&entry
->list
);
1979 /* called when we find an exclusion range definition in ACPI */
1980 static int __init
init_exclusion_range(struct ivmd_header
*m
)
1985 case ACPI_IVMD_TYPE
:
1986 set_device_exclusion_range(m
->devid
, m
);
1988 case ACPI_IVMD_TYPE_ALL
:
1989 for (i
= 0; i
<= amd_iommu_last_bdf
; ++i
)
1990 set_device_exclusion_range(i
, m
);
1992 case ACPI_IVMD_TYPE_RANGE
:
1993 for (i
= m
->devid
; i
<= m
->aux
; ++i
)
1994 set_device_exclusion_range(i
, m
);
2003 /* called for unity map ACPI definition */
2004 static int __init
init_unity_map_range(struct ivmd_header
*m
)
2006 struct unity_map_entry
*e
= NULL
;
2009 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
2017 case ACPI_IVMD_TYPE
:
2018 s
= "IVMD_TYPEi\t\t\t";
2019 e
->devid_start
= e
->devid_end
= m
->devid
;
2021 case ACPI_IVMD_TYPE_ALL
:
2022 s
= "IVMD_TYPE_ALL\t\t";
2024 e
->devid_end
= amd_iommu_last_bdf
;
2026 case ACPI_IVMD_TYPE_RANGE
:
2027 s
= "IVMD_TYPE_RANGE\t\t";
2028 e
->devid_start
= m
->devid
;
2029 e
->devid_end
= m
->aux
;
2032 e
->address_start
= PAGE_ALIGN(m
->range_start
);
2033 e
->address_end
= e
->address_start
+ PAGE_ALIGN(m
->range_length
);
2034 e
->prot
= m
->flags
>> 1;
2036 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2037 " range_start: %016llx range_end: %016llx flags: %x\n", s
,
2038 PCI_BUS_NUM(e
->devid_start
), PCI_SLOT(e
->devid_start
),
2039 PCI_FUNC(e
->devid_start
), PCI_BUS_NUM(e
->devid_end
),
2040 PCI_SLOT(e
->devid_end
), PCI_FUNC(e
->devid_end
),
2041 e
->address_start
, e
->address_end
, m
->flags
);
2043 list_add_tail(&e
->list
, &amd_iommu_unity_map
);
2048 /* iterates over all memory definitions we find in the ACPI table */
2049 static int __init
init_memory_definitions(struct acpi_table_header
*table
)
2051 u8
*p
= (u8
*)table
, *end
= (u8
*)table
;
2052 struct ivmd_header
*m
;
2054 end
+= table
->length
;
2055 p
+= IVRS_HEADER_LENGTH
;
2058 m
= (struct ivmd_header
*)p
;
2059 if (m
->flags
& IVMD_FLAG_EXCL_RANGE
)
2060 init_exclusion_range(m
);
2061 else if (m
->flags
& IVMD_FLAG_UNITY_MAP
)
2062 init_unity_map_range(m
);
2071 * Init the device table to not allow DMA access for devices
2073 static void init_device_table_dma(void)
2077 for (devid
= 0; devid
<= amd_iommu_last_bdf
; ++devid
) {
2078 set_dev_entry_bit(devid
, DEV_ENTRY_VALID
);
2079 set_dev_entry_bit(devid
, DEV_ENTRY_TRANSLATION
);
2083 static void __init
uninit_device_table_dma(void)
2087 for (devid
= 0; devid
<= amd_iommu_last_bdf
; ++devid
) {
2088 amd_iommu_dev_table
[devid
].data
[0] = 0ULL;
2089 amd_iommu_dev_table
[devid
].data
[1] = 0ULL;
2093 static void init_device_table(void)
2097 if (!amd_iommu_irq_remap
)
2100 for (devid
= 0; devid
<= amd_iommu_last_bdf
; ++devid
)
2101 set_dev_entry_bit(devid
, DEV_ENTRY_IRQ_TBL_EN
);
2104 static void iommu_init_flags(struct amd_iommu
*iommu
)
2106 iommu
->acpi_flags
& IVHD_FLAG_HT_TUN_EN_MASK
?
2107 iommu_feature_enable(iommu
, CONTROL_HT_TUN_EN
) :
2108 iommu_feature_disable(iommu
, CONTROL_HT_TUN_EN
);
2110 iommu
->acpi_flags
& IVHD_FLAG_PASSPW_EN_MASK
?
2111 iommu_feature_enable(iommu
, CONTROL_PASSPW_EN
) :
2112 iommu_feature_disable(iommu
, CONTROL_PASSPW_EN
);
2114 iommu
->acpi_flags
& IVHD_FLAG_RESPASSPW_EN_MASK
?
2115 iommu_feature_enable(iommu
, CONTROL_RESPASSPW_EN
) :
2116 iommu_feature_disable(iommu
, CONTROL_RESPASSPW_EN
);
2118 iommu
->acpi_flags
& IVHD_FLAG_ISOC_EN_MASK
?
2119 iommu_feature_enable(iommu
, CONTROL_ISOC_EN
) :
2120 iommu_feature_disable(iommu
, CONTROL_ISOC_EN
);
2123 * make IOMMU memory accesses cache coherent
2125 iommu_feature_enable(iommu
, CONTROL_COHERENT_EN
);
2127 /* Set IOTLB invalidation timeout to 1s */
2128 iommu_set_inv_tlb_timeout(iommu
, CTRL_INV_TO_1S
);
2131 static void iommu_apply_resume_quirks(struct amd_iommu
*iommu
)
2134 u32 ioc_feature_control
;
2135 struct pci_dev
*pdev
= iommu
->root_pdev
;
2137 /* RD890 BIOSes may not have completely reconfigured the iommu */
2138 if (!is_rd890_iommu(iommu
->dev
) || !pdev
)
2142 * First, we need to ensure that the iommu is enabled. This is
2143 * controlled by a register in the northbridge
2146 /* Select Northbridge indirect register 0x75 and enable writing */
2147 pci_write_config_dword(pdev
, 0x60, 0x75 | (1 << 7));
2148 pci_read_config_dword(pdev
, 0x64, &ioc_feature_control
);
2150 /* Enable the iommu */
2151 if (!(ioc_feature_control
& 0x1))
2152 pci_write_config_dword(pdev
, 0x64, ioc_feature_control
| 1);
2154 /* Restore the iommu BAR */
2155 pci_write_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 4,
2156 iommu
->stored_addr_lo
);
2157 pci_write_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 8,
2158 iommu
->stored_addr_hi
);
2160 /* Restore the l1 indirect regs for each of the 6 l1s */
2161 for (i
= 0; i
< 6; i
++)
2162 for (j
= 0; j
< 0x12; j
++)
2163 iommu_write_l1(iommu
, i
, j
, iommu
->stored_l1
[i
][j
]);
2165 /* Restore the l2 indirect regs */
2166 for (i
= 0; i
< 0x83; i
++)
2167 iommu_write_l2(iommu
, i
, iommu
->stored_l2
[i
]);
2169 /* Lock PCI setup registers */
2170 pci_write_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 4,
2171 iommu
->stored_addr_lo
| 1);
2174 static void iommu_enable_ga(struct amd_iommu
*iommu
)
2176 #ifdef CONFIG_IRQ_REMAP
2177 switch (amd_iommu_guest_ir
) {
2178 case AMD_IOMMU_GUEST_IR_VAPIC
:
2179 iommu_feature_enable(iommu
, CONTROL_GAM_EN
);
2181 case AMD_IOMMU_GUEST_IR_LEGACY_GA
:
2182 iommu_feature_enable(iommu
, CONTROL_GA_EN
);
2183 iommu
->irte_ops
= &irte_128_ops
;
2186 iommu
->irte_ops
= &irte_32_ops
;
2192 static void early_enable_iommu(struct amd_iommu
*iommu
)
2194 iommu_disable(iommu
);
2195 iommu_init_flags(iommu
);
2196 iommu_set_device_table(iommu
);
2197 iommu_enable_command_buffer(iommu
);
2198 iommu_enable_event_buffer(iommu
);
2199 iommu_set_exclusion_range(iommu
);
2200 iommu_enable_ga(iommu
);
2201 iommu_enable_xt(iommu
);
2202 iommu_enable(iommu
);
2203 iommu_flush_all_caches(iommu
);
2207 * This function finally enables all IOMMUs found in the system after
2208 * they have been initialized.
2210 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2211 * the old content of device table entries. Not this case or copy failed,
2212 * just continue as normal kernel does.
2214 static void early_enable_iommus(void)
2216 struct amd_iommu
*iommu
;
2219 if (!copy_device_table()) {
2221 * If come here because of failure in copying device table from old
2222 * kernel with all IOMMUs enabled, print error message and try to
2223 * free allocated old_dev_tbl_cpy.
2225 if (amd_iommu_pre_enabled
)
2226 pr_err("Failed to copy DEV table from previous kernel.\n");
2227 if (old_dev_tbl_cpy
!= NULL
)
2228 free_pages((unsigned long)old_dev_tbl_cpy
,
2229 get_order(dev_table_size
));
2231 for_each_iommu(iommu
) {
2232 clear_translation_pre_enabled(iommu
);
2233 early_enable_iommu(iommu
);
2236 pr_info("Copied DEV table from previous kernel.\n");
2237 free_pages((unsigned long)amd_iommu_dev_table
,
2238 get_order(dev_table_size
));
2239 amd_iommu_dev_table
= old_dev_tbl_cpy
;
2240 for_each_iommu(iommu
) {
2241 iommu_disable_command_buffer(iommu
);
2242 iommu_disable_event_buffer(iommu
);
2243 iommu_enable_command_buffer(iommu
);
2244 iommu_enable_event_buffer(iommu
);
2245 iommu_enable_ga(iommu
);
2246 iommu_enable_xt(iommu
);
2247 iommu_set_device_table(iommu
);
2248 iommu_flush_all_caches(iommu
);
2252 #ifdef CONFIG_IRQ_REMAP
2253 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
))
2254 amd_iommu_irq_ops
.capability
|= (1 << IRQ_POSTING_CAP
);
2258 static void enable_iommus_v2(void)
2260 struct amd_iommu
*iommu
;
2262 for_each_iommu(iommu
) {
2263 iommu_enable_ppr_log(iommu
);
2264 iommu_enable_gt(iommu
);
2268 static void enable_iommus(void)
2270 early_enable_iommus();
2275 static void disable_iommus(void)
2277 struct amd_iommu
*iommu
;
2279 for_each_iommu(iommu
)
2280 iommu_disable(iommu
);
2282 #ifdef CONFIG_IRQ_REMAP
2283 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
))
2284 amd_iommu_irq_ops
.capability
&= ~(1 << IRQ_POSTING_CAP
);
2289 * Suspend/Resume support
2290 * disable suspend until real resume implemented
2293 static void amd_iommu_resume(void)
2295 struct amd_iommu
*iommu
;
2297 for_each_iommu(iommu
)
2298 iommu_apply_resume_quirks(iommu
);
2300 /* re-load the hardware */
2303 amd_iommu_enable_interrupts();
2306 static int amd_iommu_suspend(void)
2308 /* disable IOMMUs to go out of the way for BIOS */
2314 static struct syscore_ops amd_iommu_syscore_ops
= {
2315 .suspend
= amd_iommu_suspend
,
2316 .resume
= amd_iommu_resume
,
2319 static void __init
free_iommu_resources(void)
2321 kmemleak_free(irq_lookup_table
);
2322 free_pages((unsigned long)irq_lookup_table
,
2323 get_order(rlookup_table_size
));
2324 irq_lookup_table
= NULL
;
2326 kmem_cache_destroy(amd_iommu_irq_cache
);
2327 amd_iommu_irq_cache
= NULL
;
2329 free_pages((unsigned long)amd_iommu_rlookup_table
,
2330 get_order(rlookup_table_size
));
2331 amd_iommu_rlookup_table
= NULL
;
2333 free_pages((unsigned long)amd_iommu_alias_table
,
2334 get_order(alias_table_size
));
2335 amd_iommu_alias_table
= NULL
;
2337 free_pages((unsigned long)amd_iommu_dev_table
,
2338 get_order(dev_table_size
));
2339 amd_iommu_dev_table
= NULL
;
2343 #ifdef CONFIG_GART_IOMMU
2345 * We failed to initialize the AMD IOMMU - try fallback to GART
2353 /* SB IOAPIC is always on this device in AMD systems */
2354 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2356 static bool __init
check_ioapic_information(void)
2358 const char *fw_bug
= FW_BUG
;
2359 bool ret
, has_sb_ioapic
;
2362 has_sb_ioapic
= false;
2366 * If we have map overrides on the kernel command line the
2367 * messages in this function might not describe firmware bugs
2368 * anymore - so be careful
2373 for (idx
= 0; idx
< nr_ioapics
; idx
++) {
2374 int devid
, id
= mpc_ioapic_id(idx
);
2376 devid
= get_ioapic_devid(id
);
2378 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
2381 } else if (devid
== IOAPIC_SB_DEVID
) {
2382 has_sb_ioapic
= true;
2387 if (!has_sb_ioapic
) {
2389 * We expect the SB IOAPIC to be listed in the IVRS
2390 * table. The system timer is connected to the SB IOAPIC
2391 * and if we don't have it in the list the system will
2392 * panic at boot time. This situation usually happens
2393 * when the BIOS is buggy and provides us the wrong
2394 * device id for the IOAPIC in the system.
2396 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug
);
2400 pr_err("AMD-Vi: Disabling interrupt remapping\n");
2405 static void __init
free_dma_resources(void)
2407 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap
,
2408 get_order(MAX_DOMAIN_ID
/8));
2409 amd_iommu_pd_alloc_bitmap
= NULL
;
2415 * This is the hardware init function for AMD IOMMU in the system.
2416 * This function is called either from amd_iommu_init or from the interrupt
2417 * remapping setup code.
2419 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2422 * 1 pass) Discover the most comprehensive IVHD type to use.
2424 * 2 pass) Find the highest PCI device id the driver has to handle.
2425 * Upon this information the size of the data structures is
2426 * determined that needs to be allocated.
2428 * 3 pass) Initialize the data structures just allocated with the
2429 * information in the ACPI table about available AMD IOMMUs
2430 * in the system. It also maps the PCI devices in the
2431 * system to specific IOMMUs
2433 * 4 pass) After the basic data structures are allocated and
2434 * initialized we update them with information about memory
2435 * remapping requirements parsed out of the ACPI table in
2438 * After everything is set up the IOMMUs are enabled and the necessary
2439 * hotplug and suspend notifiers are registered.
2441 static int __init
early_amd_iommu_init(void)
2443 struct acpi_table_header
*ivrs_base
;
2445 int i
, remap_cache_sz
, ret
= 0;
2447 if (!amd_iommu_detected
)
2450 status
= acpi_get_table("IVRS", 0, &ivrs_base
);
2451 if (status
== AE_NOT_FOUND
)
2453 else if (ACPI_FAILURE(status
)) {
2454 const char *err
= acpi_format_exception(status
);
2455 pr_err("AMD-Vi: IVRS table error: %s\n", err
);
2460 * Validate checksum here so we don't need to do it when
2461 * we actually parse the table
2463 ret
= check_ivrs_checksum(ivrs_base
);
2467 amd_iommu_target_ivhd_type
= get_highest_supported_ivhd_type(ivrs_base
);
2468 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type
);
2471 * First parse ACPI tables to find the largest Bus/Dev/Func
2472 * we need to handle. Upon this information the shared data
2473 * structures for the IOMMUs in the system will be allocated
2475 ret
= find_last_devid_acpi(ivrs_base
);
2479 dev_table_size
= tbl_size(DEV_TABLE_ENTRY_SIZE
);
2480 alias_table_size
= tbl_size(ALIAS_TABLE_ENTRY_SIZE
);
2481 rlookup_table_size
= tbl_size(RLOOKUP_TABLE_ENTRY_SIZE
);
2483 /* Device table - directly used by all IOMMUs */
2485 amd_iommu_dev_table
= (void *)__get_free_pages(
2486 GFP_KERNEL
| __GFP_ZERO
| GFP_DMA32
,
2487 get_order(dev_table_size
));
2488 if (amd_iommu_dev_table
== NULL
)
2492 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2493 * IOMMU see for that device
2495 amd_iommu_alias_table
= (void *)__get_free_pages(GFP_KERNEL
,
2496 get_order(alias_table_size
));
2497 if (amd_iommu_alias_table
== NULL
)
2500 /* IOMMU rlookup table - find the IOMMU for a specific device */
2501 amd_iommu_rlookup_table
= (void *)__get_free_pages(
2502 GFP_KERNEL
| __GFP_ZERO
,
2503 get_order(rlookup_table_size
));
2504 if (amd_iommu_rlookup_table
== NULL
)
2507 amd_iommu_pd_alloc_bitmap
= (void *)__get_free_pages(
2508 GFP_KERNEL
| __GFP_ZERO
,
2509 get_order(MAX_DOMAIN_ID
/8));
2510 if (amd_iommu_pd_alloc_bitmap
== NULL
)
2514 * let all alias entries point to itself
2516 for (i
= 0; i
<= amd_iommu_last_bdf
; ++i
)
2517 amd_iommu_alias_table
[i
] = i
;
2520 * never allocate domain 0 because its used as the non-allocated and
2521 * error value placeholder
2523 __set_bit(0, amd_iommu_pd_alloc_bitmap
);
2525 spin_lock_init(&amd_iommu_pd_lock
);
2528 * now the data structures are allocated and basically initialized
2529 * start the real acpi table scan
2531 ret
= init_iommu_all(ivrs_base
);
2535 /* Disable any previously enabled IOMMUs */
2536 if (!is_kdump_kernel() || amd_iommu_disabled
)
2539 if (amd_iommu_irq_remap
)
2540 amd_iommu_irq_remap
= check_ioapic_information();
2542 if (amd_iommu_irq_remap
) {
2544 * Interrupt remapping enabled, create kmem_cache for the
2548 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir
))
2549 remap_cache_sz
= MAX_IRQS_PER_TABLE
* sizeof(u32
);
2551 remap_cache_sz
= MAX_IRQS_PER_TABLE
* (sizeof(u64
) * 2);
2552 amd_iommu_irq_cache
= kmem_cache_create("irq_remap_cache",
2554 IRQ_TABLE_ALIGNMENT
,
2556 if (!amd_iommu_irq_cache
)
2559 irq_lookup_table
= (void *)__get_free_pages(
2560 GFP_KERNEL
| __GFP_ZERO
,
2561 get_order(rlookup_table_size
));
2562 kmemleak_alloc(irq_lookup_table
, rlookup_table_size
,
2564 if (!irq_lookup_table
)
2568 ret
= init_memory_definitions(ivrs_base
);
2572 /* init the device table */
2573 init_device_table();
2576 /* Don't leak any ACPI memory */
2577 acpi_put_table(ivrs_base
);
2583 static int amd_iommu_enable_interrupts(void)
2585 struct amd_iommu
*iommu
;
2588 for_each_iommu(iommu
) {
2589 ret
= iommu_init_msi(iommu
);
2598 static bool detect_ivrs(void)
2600 struct acpi_table_header
*ivrs_base
;
2603 status
= acpi_get_table("IVRS", 0, &ivrs_base
);
2604 if (status
== AE_NOT_FOUND
)
2606 else if (ACPI_FAILURE(status
)) {
2607 const char *err
= acpi_format_exception(status
);
2608 pr_err("AMD-Vi: IVRS table error: %s\n", err
);
2612 acpi_put_table(ivrs_base
);
2614 /* Make sure ACS will be enabled during PCI probe */
2620 /****************************************************************************
2622 * AMD IOMMU Initialization State Machine
2624 ****************************************************************************/
2626 static int __init
state_next(void)
2630 switch (init_state
) {
2631 case IOMMU_START_STATE
:
2632 if (!detect_ivrs()) {
2633 init_state
= IOMMU_NOT_FOUND
;
2636 init_state
= IOMMU_IVRS_DETECTED
;
2639 case IOMMU_IVRS_DETECTED
:
2640 ret
= early_amd_iommu_init();
2641 init_state
= ret
? IOMMU_INIT_ERROR
: IOMMU_ACPI_FINISHED
;
2642 if (init_state
== IOMMU_ACPI_FINISHED
&& amd_iommu_disabled
) {
2643 pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n");
2644 free_dma_resources();
2645 free_iommu_resources();
2646 init_state
= IOMMU_CMDLINE_DISABLED
;
2650 case IOMMU_ACPI_FINISHED
:
2651 early_enable_iommus();
2652 x86_platform
.iommu_shutdown
= disable_iommus
;
2653 init_state
= IOMMU_ENABLED
;
2656 register_syscore_ops(&amd_iommu_syscore_ops
);
2657 ret
= amd_iommu_init_pci();
2658 init_state
= ret
? IOMMU_INIT_ERROR
: IOMMU_PCI_INIT
;
2661 case IOMMU_PCI_INIT
:
2662 ret
= amd_iommu_enable_interrupts();
2663 init_state
= ret
? IOMMU_INIT_ERROR
: IOMMU_INTERRUPTS_EN
;
2665 case IOMMU_INTERRUPTS_EN
:
2666 ret
= amd_iommu_init_dma_ops();
2667 init_state
= ret
? IOMMU_INIT_ERROR
: IOMMU_DMA_OPS
;
2670 init_state
= IOMMU_INITIALIZED
;
2672 case IOMMU_INITIALIZED
:
2675 case IOMMU_NOT_FOUND
:
2676 case IOMMU_INIT_ERROR
:
2677 case IOMMU_CMDLINE_DISABLED
:
2678 /* Error states => do nothing */
2689 static int __init
iommu_go_to_state(enum iommu_init_state state
)
2693 while (init_state
!= state
) {
2694 if (init_state
== IOMMU_NOT_FOUND
||
2695 init_state
== IOMMU_INIT_ERROR
||
2696 init_state
== IOMMU_CMDLINE_DISABLED
)
2704 #ifdef CONFIG_IRQ_REMAP
2705 int __init
amd_iommu_prepare(void)
2709 amd_iommu_irq_remap
= true;
2711 ret
= iommu_go_to_state(IOMMU_ACPI_FINISHED
);
2714 return amd_iommu_irq_remap
? 0 : -ENODEV
;
2717 int __init
amd_iommu_enable(void)
2721 ret
= iommu_go_to_state(IOMMU_ENABLED
);
2725 irq_remapping_enabled
= 1;
2726 return amd_iommu_xt_mode
;
2729 void amd_iommu_disable(void)
2731 amd_iommu_suspend();
2734 int amd_iommu_reenable(int mode
)
2741 int __init
amd_iommu_enable_faulting(void)
2743 /* We enable MSI later when PCI is initialized */
2749 * This is the core init function for AMD IOMMU hardware in the system.
2750 * This function is called from the generic x86 DMA layer initialization
2753 static int __init
amd_iommu_init(void)
2755 struct amd_iommu
*iommu
;
2758 ret
= iommu_go_to_state(IOMMU_INITIALIZED
);
2760 free_dma_resources();
2761 if (!irq_remapping_enabled
) {
2763 free_iommu_resources();
2765 uninit_device_table_dma();
2766 for_each_iommu(iommu
)
2767 iommu_flush_all_caches(iommu
);
2771 for_each_iommu(iommu
)
2772 amd_iommu_debugfs_setup(iommu
);
2777 static bool amd_iommu_sme_check(void)
2779 if (!sme_active() || (boot_cpu_data
.x86
!= 0x17))
2782 /* For Fam17h, a specific level of support is required */
2783 if (boot_cpu_data
.microcode
>= 0x08001205)
2786 if ((boot_cpu_data
.microcode
>= 0x08001126) &&
2787 (boot_cpu_data
.microcode
<= 0x080011ff))
2790 pr_notice("AMD-Vi: IOMMU not currently supported when SME is active\n");
2795 /****************************************************************************
2797 * Early detect code. This code runs at IOMMU detection time in the DMA
2798 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2801 ****************************************************************************/
2802 int __init
amd_iommu_detect(void)
2806 if (no_iommu
|| (iommu_detected
&& !gart_iommu_aperture
))
2809 if (!amd_iommu_sme_check())
2812 ret
= iommu_go_to_state(IOMMU_IVRS_DETECTED
);
2816 amd_iommu_detected
= true;
2818 x86_init
.iommu
.iommu_init
= amd_iommu_init
;
2823 /****************************************************************************
2825 * Parsing functions for the AMD IOMMU specific kernel command line
2828 ****************************************************************************/
2830 static int __init
parse_amd_iommu_dump(char *str
)
2832 amd_iommu_dump
= true;
2837 static int __init
parse_amd_iommu_intr(char *str
)
2839 for (; *str
; ++str
) {
2840 if (strncmp(str
, "legacy", 6) == 0) {
2841 amd_iommu_guest_ir
= AMD_IOMMU_GUEST_IR_LEGACY
;
2844 if (strncmp(str
, "vapic", 5) == 0) {
2845 amd_iommu_guest_ir
= AMD_IOMMU_GUEST_IR_VAPIC
;
2852 static int __init
parse_amd_iommu_options(char *str
)
2854 for (; *str
; ++str
) {
2855 if (strncmp(str
, "fullflush", 9) == 0)
2856 amd_iommu_unmap_flush
= true;
2857 if (strncmp(str
, "off", 3) == 0)
2858 amd_iommu_disabled
= true;
2859 if (strncmp(str
, "force_isolation", 15) == 0)
2860 amd_iommu_force_isolation
= true;
2866 static int __init
parse_ivrs_ioapic(char *str
)
2868 unsigned int bus
, dev
, fn
;
2872 ret
= sscanf(str
, "[%d]=%x:%x.%x", &id
, &bus
, &dev
, &fn
);
2875 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str
);
2879 if (early_ioapic_map_size
== EARLY_MAP_SIZE
) {
2880 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2885 devid
= ((bus
& 0xff) << 8) | ((dev
& 0x1f) << 3) | (fn
& 0x7);
2887 cmdline_maps
= true;
2888 i
= early_ioapic_map_size
++;
2889 early_ioapic_map
[i
].id
= id
;
2890 early_ioapic_map
[i
].devid
= devid
;
2891 early_ioapic_map
[i
].cmd_line
= true;
2896 static int __init
parse_ivrs_hpet(char *str
)
2898 unsigned int bus
, dev
, fn
;
2902 ret
= sscanf(str
, "[%d]=%x:%x.%x", &id
, &bus
, &dev
, &fn
);
2905 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str
);
2909 if (early_hpet_map_size
== EARLY_MAP_SIZE
) {
2910 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2915 devid
= ((bus
& 0xff) << 8) | ((dev
& 0x1f) << 3) | (fn
& 0x7);
2917 cmdline_maps
= true;
2918 i
= early_hpet_map_size
++;
2919 early_hpet_map
[i
].id
= id
;
2920 early_hpet_map
[i
].devid
= devid
;
2921 early_hpet_map
[i
].cmd_line
= true;
2926 static int __init
parse_ivrs_acpihid(char *str
)
2929 char *hid
, *uid
, *p
;
2930 char acpiid
[ACPIHID_UID_LEN
+ ACPIHID_HID_LEN
] = {0};
2933 ret
= sscanf(str
, "[%x:%x.%x]=%s", &bus
, &dev
, &fn
, acpiid
);
2935 pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str
);
2940 hid
= strsep(&p
, ":");
2943 if (!hid
|| !(*hid
) || !uid
) {
2944 pr_err("AMD-Vi: Invalid command line: hid or uid\n");
2948 i
= early_acpihid_map_size
++;
2949 memcpy(early_acpihid_map
[i
].hid
, hid
, strlen(hid
));
2950 memcpy(early_acpihid_map
[i
].uid
, uid
, strlen(uid
));
2951 early_acpihid_map
[i
].devid
=
2952 ((bus
& 0xff) << 8) | ((dev
& 0x1f) << 3) | (fn
& 0x7);
2953 early_acpihid_map
[i
].cmd_line
= true;
2958 __setup("amd_iommu_dump", parse_amd_iommu_dump
);
2959 __setup("amd_iommu=", parse_amd_iommu_options
);
2960 __setup("amd_iommu_intr=", parse_amd_iommu_intr
);
2961 __setup("ivrs_ioapic", parse_ivrs_ioapic
);
2962 __setup("ivrs_hpet", parse_ivrs_hpet
);
2963 __setup("ivrs_acpihid", parse_ivrs_acpihid
);
2965 IOMMU_INIT_FINISH(amd_iommu_detect
,
2966 gart_iommu_hole_init
,
2970 bool amd_iommu_v2_supported(void)
2972 return amd_iommu_v2_present
;
2974 EXPORT_SYMBOL(amd_iommu_v2_supported
);
2976 struct amd_iommu
*get_amd_iommu(unsigned int idx
)
2979 struct amd_iommu
*iommu
;
2981 for_each_iommu(iommu
)
2986 EXPORT_SYMBOL(get_amd_iommu
);
2988 /****************************************************************************
2990 * IOMMU EFR Performance Counter support functionality. This code allows
2991 * access to the IOMMU PC functionality.
2993 ****************************************************************************/
2995 u8
amd_iommu_pc_get_max_banks(unsigned int idx
)
2997 struct amd_iommu
*iommu
= get_amd_iommu(idx
);
3000 return iommu
->max_banks
;
3004 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks
);
3006 bool amd_iommu_pc_supported(void)
3008 return amd_iommu_pc_present
;
3010 EXPORT_SYMBOL(amd_iommu_pc_supported
);
3012 u8
amd_iommu_pc_get_max_counters(unsigned int idx
)
3014 struct amd_iommu
*iommu
= get_amd_iommu(idx
);
3017 return iommu
->max_counters
;
3021 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters
);
3023 static int iommu_pc_get_set_reg(struct amd_iommu
*iommu
, u8 bank
, u8 cntr
,
3024 u8 fxn
, u64
*value
, bool is_write
)
3029 /* Make sure the IOMMU PC resource is available */
3030 if (!amd_iommu_pc_present
)
3033 /* Check for valid iommu and pc register indexing */
3034 if (WARN_ON(!iommu
|| (fxn
> 0x28) || (fxn
& 7)))
3037 offset
= (u32
)(((0x40 | bank
) << 12) | (cntr
<< 8) | fxn
);
3039 /* Limit the offset to the hw defined mmio region aperture */
3040 max_offset_lim
= (u32
)(((0x40 | iommu
->max_banks
) << 12) |
3041 (iommu
->max_counters
<< 8) | 0x28);
3042 if ((offset
< MMIO_CNTR_REG_OFFSET
) ||
3043 (offset
> max_offset_lim
))
3047 u64 val
= *value
& GENMASK_ULL(47, 0);
3049 writel((u32
)val
, iommu
->mmio_base
+ offset
);
3050 writel((val
>> 32), iommu
->mmio_base
+ offset
+ 4);
3052 *value
= readl(iommu
->mmio_base
+ offset
+ 4);
3054 *value
|= readl(iommu
->mmio_base
+ offset
);
3055 *value
&= GENMASK_ULL(47, 0);
3061 int amd_iommu_pc_get_reg(struct amd_iommu
*iommu
, u8 bank
, u8 cntr
, u8 fxn
, u64
*value
)
3066 return iommu_pc_get_set_reg(iommu
, bank
, cntr
, fxn
, value
, false);
3068 EXPORT_SYMBOL(amd_iommu_pc_get_reg
);
3070 int amd_iommu_pc_set_reg(struct amd_iommu
*iommu
, u8 bank
, u8 cntr
, u8 fxn
, u64
*value
)
3075 return iommu_pc_get_set_reg(iommu
, bank
, cntr
, fxn
, value
, true);
3077 EXPORT_SYMBOL(amd_iommu_pc_set_reg
);