2 * vSMPowered(tm) systems specific initialization
3 * Copyright (C) 2005 ScaleMP Inc.
5 * Use of this code is subject to the terms and conditions of the
6 * GNU general public license version 2. See "COPYING" or
7 * http://www.gnu.org/licenses/gpl.html
9 * Ravikiran Thirumalai <kiran@scalemp.com>,
10 * Shai Fultheim <shai@scalemp.com>
11 * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
12 * Ravikiran Thirumalai <kiran@scalemp.com>
15 #include <linux/init.h>
16 #include <linux/pci_ids.h>
17 #include <linux/pci_regs.h>
18 #include <linux/smp.h>
19 #include <linux/irq.h>
22 #include <asm/pci-direct.h>
24 #include <asm/paravirt.h>
25 #include <asm/setup.h>
27 #define TOPOLOGY_REGISTER_OFFSET 0x10
29 #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
31 * Interrupt control on vSMPowered systems:
32 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
36 static unsigned long vsmp_save_fl(void)
38 unsigned long flags
= native_save_fl();
40 if (!(flags
& X86_EFLAGS_IF
) || (flags
& X86_EFLAGS_AC
))
41 flags
&= ~X86_EFLAGS_IF
;
44 PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl
);
46 static void vsmp_restore_fl(unsigned long flags
)
48 if (flags
& X86_EFLAGS_IF
)
49 flags
&= ~X86_EFLAGS_AC
;
51 flags
|= X86_EFLAGS_AC
;
52 native_restore_fl(flags
);
54 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl
);
56 static void vsmp_irq_disable(void)
58 unsigned long flags
= native_save_fl();
60 native_restore_fl((flags
& ~X86_EFLAGS_IF
) | X86_EFLAGS_AC
);
62 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable
);
64 static void vsmp_irq_enable(void)
66 unsigned long flags
= native_save_fl();
68 native_restore_fl((flags
| X86_EFLAGS_IF
) & (~X86_EFLAGS_AC
));
70 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable
);
72 static unsigned __init_or_module
vsmp_patch(u8 type
, u16 clobbers
, void *ibuf
,
73 unsigned long addr
, unsigned len
)
76 case PARAVIRT_PATCH(pv_irq_ops
.irq_enable
):
77 case PARAVIRT_PATCH(pv_irq_ops
.irq_disable
):
78 case PARAVIRT_PATCH(pv_irq_ops
.save_fl
):
79 case PARAVIRT_PATCH(pv_irq_ops
.restore_fl
):
80 return paravirt_patch_default(type
, clobbers
, ibuf
, addr
, len
);
82 return native_patch(type
, clobbers
, ibuf
, addr
, len
);
87 static void __init
set_vsmp_pv_ops(void)
89 void __iomem
*address
;
90 unsigned int cap
, ctl
, cfg
;
92 /* set vSMP magic bits to indicate vSMP capable kernel */
93 cfg
= read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0
);
94 address
= early_ioremap(cfg
, 8);
96 ctl
= readl(address
+ 4);
97 printk(KERN_INFO
"vSMP CTL: capabilities:0x%08x control:0x%08x\n",
100 /* If possible, let the vSMP foundation route the interrupt optimally */
102 if (cap
& ctl
& BIT(8)) {
104 #ifdef CONFIG_PROC_FS
105 /* Don't let users change irq affinity via procfs */
111 if (cap
& ctl
& (1 << 4)) {
112 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
113 pv_irq_ops
.irq_disable
= PV_CALLEE_SAVE(vsmp_irq_disable
);
114 pv_irq_ops
.irq_enable
= PV_CALLEE_SAVE(vsmp_irq_enable
);
115 pv_irq_ops
.save_fl
= PV_CALLEE_SAVE(vsmp_save_fl
);
116 pv_irq_ops
.restore_fl
= PV_CALLEE_SAVE(vsmp_restore_fl
);
117 pv_init_ops
.patch
= vsmp_patch
;
120 writel(ctl
, address
+ 4);
121 ctl
= readl(address
+ 4);
122 pr_info("vSMP CTL: control set to:0x%08x\n", ctl
);
124 early_iounmap(address
, 8);
127 static void __init
set_vsmp_pv_ops(void)
133 static int is_vsmp
= -1;
135 static void __init
detect_vsmp_box(void)
139 if (!early_pci_allowed())
142 /* Check if we are running on a ScaleMP vSMPowered box */
143 if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID
) ==
144 (PCI_VENDOR_ID_SCALEMP
| (PCI_DEVICE_ID_SCALEMP_VSMP_CTL
<< 16)))
148 int is_vsmp_box(void)
159 static void __init
detect_vsmp_box(void)
162 int is_vsmp_box(void)
168 static void __init
vsmp_cap_cpus(void)
170 #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
171 void __iomem
*address
;
172 unsigned int cfg
, topology
, node_shift
, maxcpus
;
175 * CONFIG_X86_VSMP is not configured, so limit the number CPUs to the
176 * ones present in the first board, unless explicitly overridden by
179 if (setup_max_cpus
!= NR_CPUS
)
182 /* Read the vSMP Foundation topology register */
183 cfg
= read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0
);
184 address
= early_ioremap(cfg
+ TOPOLOGY_REGISTER_OFFSET
, 4);
185 if (WARN_ON(!address
))
188 topology
= readl(address
);
189 node_shift
= (topology
>> 16) & 0x7;
191 /* The value 0 should be decoded as 8 */
193 maxcpus
= (topology
& ((1 << node_shift
) - 1)) + 1;
195 pr_info("vSMP CTL: Capping CPUs to %d (CONFIG_X86_VSMP is unset)\n",
197 setup_max_cpus
= maxcpus
;
198 early_iounmap(address
, 4);
202 static int apicid_phys_pkg_id(int initial_apic_id
, int index_msb
)
204 return hard_smp_processor_id() >> index_msb
;
208 * In vSMP, all cpus should be capable of handling interrupts, regardless of
211 static void fill_vector_allocation_domain(int cpu
, struct cpumask
*retmask
,
212 const struct cpumask
*mask
)
214 cpumask_setall(retmask
);
217 static void vsmp_apic_post_init(void)
219 /* need to update phys_pkg_id */
220 apic
->phys_pkg_id
= apicid_phys_pkg_id
;
221 apic
->vector_allocation_domain
= fill_vector_allocation_domain
;
224 void __init
vsmp_init(void)
230 x86_platform
.apic_post_init
= vsmp_apic_post_init
;