x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / arch / x86 / kernel / vsmp_64.c
bloba1d804bcd48357554cc017f4b63c217eb2b84eaa
1 /*
2 * vSMPowered(tm) systems specific initialization
3 * Copyright (C) 2005 ScaleMP Inc.
5 * Use of this code is subject to the terms and conditions of the
6 * GNU general public license version 2. See "COPYING" or
7 * http://www.gnu.org/licenses/gpl.html
9 * Ravikiran Thirumalai <kiran@scalemp.com>,
10 * Shai Fultheim <shai@scalemp.com>
11 * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
12 * Ravikiran Thirumalai <kiran@scalemp.com>
15 #include <linux/init.h>
16 #include <linux/pci_ids.h>
17 #include <linux/pci_regs.h>
19 #include <asm/apic.h>
20 #include <asm/pci-direct.h>
21 #include <asm/io.h>
22 #include <asm/paravirt.h>
23 #include <asm/setup.h>
25 #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
27 * Interrupt control on vSMPowered systems:
28 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
29 * and vice versa.
32 static unsigned long vsmp_save_fl(void)
34 unsigned long flags = native_save_fl();
36 if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
37 flags &= ~X86_EFLAGS_IF;
38 return flags;
40 PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
42 static void vsmp_restore_fl(unsigned long flags)
44 if (flags & X86_EFLAGS_IF)
45 flags &= ~X86_EFLAGS_AC;
46 else
47 flags |= X86_EFLAGS_AC;
48 native_restore_fl(flags);
50 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
52 static void vsmp_irq_disable(void)
54 unsigned long flags = native_save_fl();
56 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
58 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
60 static void vsmp_irq_enable(void)
62 unsigned long flags = native_save_fl();
64 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
66 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
68 static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
69 unsigned long addr, unsigned len)
71 switch (type) {
72 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
73 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
74 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
75 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
76 return paravirt_patch_default(type, clobbers, ibuf, addr, len);
77 default:
78 return native_patch(type, clobbers, ibuf, addr, len);
83 static void __init set_vsmp_pv_ops(void)
85 void __iomem *address;
86 unsigned int cap, ctl, cfg;
88 /* set vSMP magic bits to indicate vSMP capable kernel */
89 cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
90 address = early_ioremap(cfg, 8);
91 cap = readl(address);
92 ctl = readl(address + 4);
93 printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n",
94 cap, ctl);
95 if (cap & ctl & (1 << 4)) {
96 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
97 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
98 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
99 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
100 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
101 pv_init_ops.patch = vsmp_patch;
103 ctl &= ~(1 << 4);
104 writel(ctl, address + 4);
105 ctl = readl(address + 4);
106 printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl);
109 early_iounmap(address, 8);
111 #else
112 static void __init set_vsmp_pv_ops(void)
115 #endif
117 #ifdef CONFIG_PCI
118 static int is_vsmp = -1;
120 static void __init detect_vsmp_box(void)
122 is_vsmp = 0;
124 if (!early_pci_allowed())
125 return;
127 /* Check if we are running on a ScaleMP vSMPowered box */
128 if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) ==
129 (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16)))
130 is_vsmp = 1;
133 int is_vsmp_box(void)
135 if (is_vsmp != -1)
136 return is_vsmp;
137 else {
138 WARN_ON_ONCE(1);
139 return 0;
143 #else
144 static void __init detect_vsmp_box(void)
147 int is_vsmp_box(void)
149 return 0;
151 #endif
152 void __init vsmp_init(void)
154 detect_vsmp_box();
155 if (!is_vsmp_box())
156 return;
158 set_vsmp_pv_ops();
159 return;