2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * SGI UV APIC functions (note: not an Intel compatible APIC)
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/proc_fs.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/ctype.h>
18 #include <linux/sched.h>
19 #include <linux/timer.h>
20 #include <linux/slab.h>
21 #include <linux/cpu.h>
22 #include <linux/init.h>
24 #include <linux/pci.h>
25 #include <linux/kdebug.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
29 #include <asm/uv/uv_mmrs.h>
30 #include <asm/uv/uv_hub.h>
31 #include <asm/current.h>
32 #include <asm/pgtable.h>
33 #include <asm/uv/bios.h>
34 #include <asm/uv/uv.h>
38 #include <asm/x86_init.h>
39 #include <asm/emergency-restart.h>
42 /* BMC sets a bit this MMR non-zero before sending an NMI */
43 #define UVH_NMI_MMR UVH_SCRATCH5
44 #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45 #define UV_NMI_PENDING_MASK (1UL << 63)
46 DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count
);
48 DEFINE_PER_CPU(int, x2apic_extra_bits
);
50 #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
52 static enum uv_system_type uv_system_type
;
53 static u64 gru_start_paddr
, gru_end_paddr
;
54 static union uvh_apicid uvh_apicid
;
55 int uv_min_hub_revision_id
;
56 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id
);
57 unsigned int uv_apicid_hibits
;
58 EXPORT_SYMBOL_GPL(uv_apicid_hibits
);
59 static DEFINE_SPINLOCK(uv_nmi_lock
);
61 static struct apic apic_x2apic_uv_x
;
63 static unsigned long __init
uv_early_read_mmr(unsigned long addr
)
65 unsigned long val
, *mmr
;
67 mmr
= early_ioremap(UV_LOCAL_MMR_BASE
| addr
, sizeof(*mmr
));
69 early_iounmap(mmr
, sizeof(*mmr
));
73 static inline bool is_GRU_range(u64 start
, u64 end
)
75 return start
>= gru_start_paddr
&& end
<= gru_end_paddr
;
78 static bool uv_is_untracked_pat_range(u64 start
, u64 end
)
80 return is_ISA_range(start
, end
) || is_GRU_range(start
, end
);
83 static int __init
early_get_pnodeid(void)
85 union uvh_node_id_u node_id
;
86 union uvh_rh_gam_config_mmr_u m_n_config
;
89 /* Currently, all blades have same revision number */
90 node_id
.v
= uv_early_read_mmr(UVH_NODE_ID
);
91 m_n_config
.v
= uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR
);
92 uv_min_hub_revision_id
= node_id
.s
.revision
;
94 if (node_id
.s
.part_number
== UV2_HUB_PART_NUMBER
)
95 uv_min_hub_revision_id
+= UV2_HUB_REVISION_BASE
- 1;
97 uv_hub_info
->hub_revision
= uv_min_hub_revision_id
;
98 pnode
= (node_id
.s
.node_id
>> 1) & ((1 << m_n_config
.s
.n_skt
) - 1);
102 static void __init
early_get_apic_pnode_shift(void)
104 uvh_apicid
.v
= uv_early_read_mmr(UVH_APICID
);
107 * Old bios, use default value
109 uvh_apicid
.s
.pnode_shift
= UV_APIC_PNODE_SHIFT
;
113 * Add an extra bit as dictated by bios to the destination apicid of
114 * interrupts potentially passing through the UV HUB. This prevents
115 * a deadlock between interrupts and IO port operations.
117 static void __init
uv_set_apicid_hibit(void)
119 union uv1h_lb_target_physical_apic_id_mask_u apicid_mask
;
123 uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK
);
125 apicid_mask
.s1
.bit_enables
& UV_APICID_HIBIT_MASK
;
129 static int __init
uv_acpi_madt_oem_check(char *oem_id
, char *oem_table_id
)
131 int pnodeid
, is_uv1
, is_uv2
;
133 is_uv1
= !strcmp(oem_id
, "SGI");
134 is_uv2
= !strcmp(oem_id
, "SGI2");
135 if (is_uv1
|| is_uv2
) {
136 uv_hub_info
->hub_revision
=
137 is_uv1
? UV1_HUB_REVISION_BASE
: UV2_HUB_REVISION_BASE
;
138 pnodeid
= early_get_pnodeid();
139 early_get_apic_pnode_shift();
140 x86_platform
.is_untracked_pat_range
= uv_is_untracked_pat_range
;
141 x86_platform
.nmi_init
= uv_nmi_init
;
142 if (!strcmp(oem_table_id
, "UVL"))
143 uv_system_type
= UV_LEGACY_APIC
;
144 else if (!strcmp(oem_table_id
, "UVX"))
145 uv_system_type
= UV_X2APIC
;
146 else if (!strcmp(oem_table_id
, "UVH")) {
147 __this_cpu_write(x2apic_extra_bits
,
148 pnodeid
<< uvh_apicid
.s
.pnode_shift
);
149 uv_system_type
= UV_NON_UNIQUE_APIC
;
150 uv_set_apicid_hibit();
157 enum uv_system_type
get_uv_system_type(void)
159 return uv_system_type
;
162 int is_uv_system(void)
164 return uv_system_type
!= UV_NONE
;
166 EXPORT_SYMBOL_GPL(is_uv_system
);
168 DEFINE_PER_CPU(struct uv_hub_info_s
, __uv_hub_info
);
169 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info
);
171 struct uv_blade_info
*uv_blade_info
;
172 EXPORT_SYMBOL_GPL(uv_blade_info
);
174 short *uv_node_to_blade
;
175 EXPORT_SYMBOL_GPL(uv_node_to_blade
);
177 short *uv_cpu_to_blade
;
178 EXPORT_SYMBOL_GPL(uv_cpu_to_blade
);
180 short uv_possible_blades
;
181 EXPORT_SYMBOL_GPL(uv_possible_blades
);
183 unsigned long sn_rtc_cycles_per_second
;
184 EXPORT_SYMBOL(sn_rtc_cycles_per_second
);
186 static const struct cpumask
*uv_target_cpus(void)
188 return cpu_online_mask
;
191 static void uv_vector_allocation_domain(int cpu
, struct cpumask
*retmask
)
193 cpumask_clear(retmask
);
194 cpumask_set_cpu(cpu
, retmask
);
197 static int __cpuinit
uv_wakeup_secondary(int phys_apicid
, unsigned long start_rip
)
203 pnode
= uv_apicid_to_pnode(phys_apicid
);
204 phys_apicid
|= uv_apicid_hibits
;
205 val
= (1UL << UVH_IPI_INT_SEND_SHFT
) |
206 (phys_apicid
<< UVH_IPI_INT_APIC_ID_SHFT
) |
207 ((start_rip
<< UVH_IPI_INT_VECTOR_SHFT
) >> 12) |
209 uv_write_global_mmr64(pnode
, UVH_IPI_INT
, val
);
211 val
= (1UL << UVH_IPI_INT_SEND_SHFT
) |
212 (phys_apicid
<< UVH_IPI_INT_APIC_ID_SHFT
) |
213 ((start_rip
<< UVH_IPI_INT_VECTOR_SHFT
) >> 12) |
215 uv_write_global_mmr64(pnode
, UVH_IPI_INT
, val
);
217 atomic_set(&init_deasserted
, 1);
222 static void uv_send_IPI_one(int cpu
, int vector
)
224 unsigned long apicid
;
227 apicid
= per_cpu(x86_cpu_to_apicid
, cpu
);
228 pnode
= uv_apicid_to_pnode(apicid
);
229 uv_hub_send_ipi(pnode
, apicid
, vector
);
232 static void uv_send_IPI_mask(const struct cpumask
*mask
, int vector
)
236 for_each_cpu(cpu
, mask
)
237 uv_send_IPI_one(cpu
, vector
);
240 static void uv_send_IPI_mask_allbutself(const struct cpumask
*mask
, int vector
)
242 unsigned int this_cpu
= smp_processor_id();
245 for_each_cpu(cpu
, mask
) {
247 uv_send_IPI_one(cpu
, vector
);
251 static void uv_send_IPI_allbutself(int vector
)
253 unsigned int this_cpu
= smp_processor_id();
256 for_each_online_cpu(cpu
) {
258 uv_send_IPI_one(cpu
, vector
);
262 static void uv_send_IPI_all(int vector
)
264 uv_send_IPI_mask(cpu_online_mask
, vector
);
267 static int uv_apic_id_registered(void)
272 static void uv_init_apic_ldr(void)
276 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask
*cpumask
)
279 * We're using fixed IRQ delivery, can only return one phys APIC ID.
280 * May as well be the first.
282 int cpu
= cpumask_first(cpumask
);
284 if ((unsigned)cpu
< nr_cpu_ids
)
285 return per_cpu(x86_cpu_to_apicid
, cpu
) | uv_apicid_hibits
;
291 uv_cpu_mask_to_apicid_and(const struct cpumask
*cpumask
,
292 const struct cpumask
*andmask
)
297 * We're using fixed IRQ delivery, can only return one phys APIC ID.
298 * May as well be the first.
300 for_each_cpu_and(cpu
, cpumask
, andmask
) {
301 if (cpumask_test_cpu(cpu
, cpu_online_mask
))
304 return per_cpu(x86_cpu_to_apicid
, cpu
) | uv_apicid_hibits
;
307 static unsigned int x2apic_get_apic_id(unsigned long x
)
311 WARN_ON(preemptible() && num_online_cpus() > 1);
312 id
= x
| __this_cpu_read(x2apic_extra_bits
);
317 static unsigned long set_apic_id(unsigned int id
)
321 /* maskout x2apic_extra_bits ? */
326 static unsigned int uv_read_apic_id(void)
329 return x2apic_get_apic_id(apic_read(APIC_ID
));
332 static int uv_phys_pkg_id(int initial_apicid
, int index_msb
)
334 return uv_read_apic_id() >> index_msb
;
337 static void uv_send_IPI_self(int vector
)
339 apic_write(APIC_SELF_IPI
, vector
);
342 static int uv_probe(void)
344 return apic
== &apic_x2apic_uv_x
;
347 static struct apic __refdata apic_x2apic_uv_x
= {
349 .name
= "UV large system",
351 .acpi_madt_oem_check
= uv_acpi_madt_oem_check
,
352 .apic_id_registered
= uv_apic_id_registered
,
354 .irq_delivery_mode
= dest_Fixed
,
355 .irq_dest_mode
= 0, /* physical */
357 .target_cpus
= uv_target_cpus
,
359 .dest_logical
= APIC_DEST_LOGICAL
,
360 .check_apicid_used
= NULL
,
361 .check_apicid_present
= NULL
,
363 .vector_allocation_domain
= uv_vector_allocation_domain
,
364 .init_apic_ldr
= uv_init_apic_ldr
,
366 .ioapic_phys_id_map
= NULL
,
367 .setup_apic_routing
= NULL
,
368 .multi_timer_check
= NULL
,
369 .cpu_present_to_apicid
= default_cpu_present_to_apicid
,
370 .apicid_to_cpu_present
= NULL
,
371 .setup_portio_remap
= NULL
,
372 .check_phys_apicid_present
= default_check_phys_apicid_present
,
373 .enable_apic_mode
= NULL
,
374 .phys_pkg_id
= uv_phys_pkg_id
,
375 .mps_oem_check
= NULL
,
377 .get_apic_id
= x2apic_get_apic_id
,
378 .set_apic_id
= set_apic_id
,
379 .apic_id_mask
= 0xFFFFFFFFu
,
381 .cpu_mask_to_apicid
= uv_cpu_mask_to_apicid
,
382 .cpu_mask_to_apicid_and
= uv_cpu_mask_to_apicid_and
,
384 .send_IPI_mask
= uv_send_IPI_mask
,
385 .send_IPI_mask_allbutself
= uv_send_IPI_mask_allbutself
,
386 .send_IPI_allbutself
= uv_send_IPI_allbutself
,
387 .send_IPI_all
= uv_send_IPI_all
,
388 .send_IPI_self
= uv_send_IPI_self
,
390 .wakeup_secondary_cpu
= uv_wakeup_secondary
,
391 .trampoline_phys_low
= DEFAULT_TRAMPOLINE_PHYS_LOW
,
392 .trampoline_phys_high
= DEFAULT_TRAMPOLINE_PHYS_HIGH
,
393 .wait_for_init_deassert
= NULL
,
394 .smp_callin_clear_local_apic
= NULL
,
395 .inquire_remote_apic
= NULL
,
397 .read
= native_apic_msr_read
,
398 .write
= native_apic_msr_write
,
399 .icr_read
= native_x2apic_icr_read
,
400 .icr_write
= native_x2apic_icr_write
,
401 .wait_icr_idle
= native_x2apic_wait_icr_idle
,
402 .safe_wait_icr_idle
= native_safe_x2apic_wait_icr_idle
,
405 static __cpuinit
void set_x2apic_extra_bits(int pnode
)
407 __this_cpu_write(x2apic_extra_bits
, pnode
<< uvh_apicid
.s
.pnode_shift
);
411 * Called on boot cpu.
413 static __init
int boot_pnode_to_blade(int pnode
)
417 for (blade
= 0; blade
< uv_num_possible_blades(); blade
++)
418 if (pnode
== uv_blade_info
[blade
].pnode
)
424 unsigned long redirect
;
428 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
430 static __initdata
struct redir_addr redir_addrs
[] = {
431 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR
, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR
},
432 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR
, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR
},
433 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR
, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR
},
436 static __init
void get_lowmem_redirect(unsigned long *base
, unsigned long *size
)
438 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias
;
439 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect
;
442 for (i
= 0; i
< ARRAY_SIZE(redir_addrs
); i
++) {
443 alias
.v
= uv_read_local_mmr(redir_addrs
[i
].alias
);
444 if (alias
.s
.enable
&& alias
.s
.base
== 0) {
445 *size
= (1UL << alias
.s
.m_alias
);
446 redirect
.v
= uv_read_local_mmr(redir_addrs
[i
].redirect
);
447 *base
= (unsigned long)redirect
.s
.dest_base
<< DEST_SHIFT
;
454 enum map_type
{map_wb
, map_uc
};
456 static __init
void map_high(char *id
, unsigned long base
, int pshift
,
457 int bshift
, int max_pnode
, enum map_type map_type
)
459 unsigned long bytes
, paddr
;
461 paddr
= base
<< pshift
;
462 bytes
= (1UL << bshift
) * (max_pnode
+ 1);
463 printk(KERN_INFO
"UV: Map %s_HI 0x%lx - 0x%lx\n", id
, paddr
,
465 if (map_type
== map_uc
)
466 init_extra_mapping_uc(paddr
, bytes
);
468 init_extra_mapping_wb(paddr
, bytes
);
471 static __init
void map_gru_high(int max_pnode
)
473 union uvh_rh_gam_gru_overlay_config_mmr_u gru
;
474 int shift
= UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT
;
476 gru
.v
= uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR
);
478 map_high("GRU", gru
.s
.base
, shift
, shift
, max_pnode
, map_wb
);
479 gru_start_paddr
= ((u64
)gru
.s
.base
<< shift
);
480 gru_end_paddr
= gru_start_paddr
+ (1UL << shift
) * (max_pnode
+ 1);
485 static __init
void map_mmr_high(int max_pnode
)
487 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr
;
488 int shift
= UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT
;
490 mmr
.v
= uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR
);
492 map_high("MMR", mmr
.s
.base
, shift
, shift
, max_pnode
, map_uc
);
495 static __init
void map_mmioh_high(int max_pnode
)
497 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh
;
500 mmioh
.v
= uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR
);
501 if (is_uv1_hub() && mmioh
.s1
.enable
) {
502 shift
= UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT
;
503 map_high("MMIOH", mmioh
.s1
.base
, shift
, mmioh
.s1
.m_io
,
506 if (is_uv2_hub() && mmioh
.s2
.enable
) {
507 shift
= UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT
;
508 map_high("MMIOH", mmioh
.s2
.base
, shift
, mmioh
.s2
.m_io
,
513 static __init
void map_low_mmrs(void)
515 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE
, UV_GLOBAL_MMR32_SIZE
);
516 init_extra_mapping_uc(UV_LOCAL_MMR_BASE
, UV_LOCAL_MMR_SIZE
);
519 static __init
void uv_rtc_init(void)
524 status
= uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK
,
526 if (status
!= BIOS_STATUS_SUCCESS
|| ticks_per_sec
< 100000) {
528 "unable to determine platform RTC clock frequency, "
530 /* BIOS gives wrong value for clock freq. so guess */
531 sn_rtc_cycles_per_second
= 1000000000000UL / 30000UL;
533 sn_rtc_cycles_per_second
= ticks_per_sec
;
537 * percpu heartbeat timer
539 static void uv_heartbeat(unsigned long ignored
)
541 struct timer_list
*timer
= &uv_hub_info
->scir
.timer
;
542 unsigned char bits
= uv_hub_info
->scir
.state
;
544 /* flip heartbeat bit */
545 bits
^= SCIR_CPU_HEARTBEAT
;
547 /* is this cpu idle? */
548 if (idle_cpu(raw_smp_processor_id()))
549 bits
&= ~SCIR_CPU_ACTIVITY
;
551 bits
|= SCIR_CPU_ACTIVITY
;
553 /* update system controller interface reg */
554 uv_set_scir_bits(bits
);
556 /* enable next timer period */
557 mod_timer_pinned(timer
, jiffies
+ SCIR_CPU_HB_INTERVAL
);
560 static void __cpuinit
uv_heartbeat_enable(int cpu
)
562 while (!uv_cpu_hub_info(cpu
)->scir
.enabled
) {
563 struct timer_list
*timer
= &uv_cpu_hub_info(cpu
)->scir
.timer
;
565 uv_set_cpu_scir_bits(cpu
, SCIR_CPU_HEARTBEAT
|SCIR_CPU_ACTIVITY
);
566 setup_timer(timer
, uv_heartbeat
, cpu
);
567 timer
->expires
= jiffies
+ SCIR_CPU_HB_INTERVAL
;
568 add_timer_on(timer
, cpu
);
569 uv_cpu_hub_info(cpu
)->scir
.enabled
= 1;
571 /* also ensure that boot cpu is enabled */
576 #ifdef CONFIG_HOTPLUG_CPU
577 static void __cpuinit
uv_heartbeat_disable(int cpu
)
579 if (uv_cpu_hub_info(cpu
)->scir
.enabled
) {
580 uv_cpu_hub_info(cpu
)->scir
.enabled
= 0;
581 del_timer(&uv_cpu_hub_info(cpu
)->scir
.timer
);
583 uv_set_cpu_scir_bits(cpu
, 0xff);
587 * cpu hotplug notifier
589 static __cpuinit
int uv_scir_cpu_notify(struct notifier_block
*self
,
590 unsigned long action
, void *hcpu
)
592 long cpu
= (long)hcpu
;
596 uv_heartbeat_enable(cpu
);
598 case CPU_DOWN_PREPARE
:
599 uv_heartbeat_disable(cpu
);
607 static __init
void uv_scir_register_cpu_notifier(void)
609 hotcpu_notifier(uv_scir_cpu_notify
, 0);
612 #else /* !CONFIG_HOTPLUG_CPU */
614 static __init
void uv_scir_register_cpu_notifier(void)
618 static __init
int uv_init_heartbeat(void)
623 for_each_online_cpu(cpu
)
624 uv_heartbeat_enable(cpu
);
628 late_initcall(uv_init_heartbeat
);
630 #endif /* !CONFIG_HOTPLUG_CPU */
632 /* Direct Legacy VGA I/O traffic to designated IOH */
633 int uv_set_vga_state(struct pci_dev
*pdev
, bool decode
,
634 unsigned int command_bits
, u32 flags
)
638 PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
639 pdev
->devfn
, decode
, command_bits
, flags
);
641 if (!(flags
& PCI_VGA_STATE_CHANGE_BRIDGE
))
644 if ((command_bits
& PCI_COMMAND_IO
) == 0)
647 domain
= pci_domain_nr(pdev
->bus
);
648 bus
= pdev
->bus
->number
;
650 rc
= uv_bios_set_legacy_vga_target(decode
, domain
, bus
);
651 PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode
, domain
, bus
, rc
);
657 * Called on each cpu to initialize the per_cpu UV data area.
658 * FIXME: hotplug not supported yet
660 void __cpuinit
uv_cpu_init(void)
662 /* CPU 0 initilization will be done via uv_system_init. */
666 uv_blade_info
[uv_numa_blade_id()].nr_online_cpus
++;
668 if (get_uv_system_type() == UV_NON_UNIQUE_APIC
)
669 set_x2apic_extra_bits(uv_hub_info
->pnode
);
673 * When NMI is received, print a stack trace.
675 int uv_handle_nmi(struct notifier_block
*self
, unsigned long reason
, void *data
)
677 unsigned long real_uv_nmi
;
680 if (reason
!= DIE_NMIUNKNOWN
)
684 /* do nothing if entering the crash kernel */
688 * Each blade has an MMR that indicates when an NMI has been sent
689 * to cpus on the blade. If an NMI is detected, atomically
690 * clear the MMR and update a per-blade NMI count used to
691 * cause each cpu on the blade to notice a new NMI.
693 bid
= uv_numa_blade_id();
694 real_uv_nmi
= (uv_read_local_mmr(UVH_NMI_MMR
) & UV_NMI_PENDING_MASK
);
696 if (unlikely(real_uv_nmi
)) {
697 spin_lock(&uv_blade_info
[bid
].nmi_lock
);
698 real_uv_nmi
= (uv_read_local_mmr(UVH_NMI_MMR
) & UV_NMI_PENDING_MASK
);
700 uv_blade_info
[bid
].nmi_count
++;
701 uv_write_local_mmr(UVH_NMI_MMR_CLEAR
, UV_NMI_PENDING_MASK
);
703 spin_unlock(&uv_blade_info
[bid
].nmi_lock
);
706 if (likely(__get_cpu_var(cpu_last_nmi_count
) == uv_blade_info
[bid
].nmi_count
))
709 __get_cpu_var(cpu_last_nmi_count
) = uv_blade_info
[bid
].nmi_count
;
712 * Use a lock so only one cpu prints at a time.
713 * This prevents intermixed output.
715 spin_lock(&uv_nmi_lock
);
716 pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
718 spin_unlock(&uv_nmi_lock
);
723 static struct notifier_block uv_dump_stack_nmi_nb
= {
724 .notifier_call
= uv_handle_nmi
,
725 .priority
= NMI_LOCAL_LOW_PRIOR
- 1,
728 void uv_register_nmi_notifier(void)
730 if (register_die_notifier(&uv_dump_stack_nmi_nb
))
731 printk(KERN_WARNING
"UV NMI handler failed to register\n");
734 void uv_nmi_init(void)
739 * Unmask NMI on all cpus
741 value
= apic_read(APIC_LVT1
) | APIC_DM_NMI
;
742 value
&= ~APIC_LVT_MASKED
;
743 apic_write(APIC_LVT1
, value
);
746 void __init
uv_system_init(void)
748 union uvh_rh_gam_config_mmr_u m_n_config
;
749 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh
;
750 union uvh_node_id_u node_id
;
751 unsigned long gnode_upper
, lowmem_redir_base
, lowmem_redir_size
;
752 int bytes
, nid
, cpu
, lcpu
, pnode
, blade
, i
, j
, m_val
, n_val
, n_io
;
753 int gnode_extra
, max_pnode
= 0;
754 unsigned long mmr_base
, present
, paddr
;
755 unsigned short pnode_mask
, pnode_io_mask
;
757 printk(KERN_INFO
"UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2");
760 m_n_config
.v
= uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR
);
761 m_val
= m_n_config
.s
.m_skt
;
762 n_val
= m_n_config
.s
.n_skt
;
763 mmioh
.v
= uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR
);
764 n_io
= is_uv1_hub() ? mmioh
.s1
.n_io
: mmioh
.s2
.n_io
;
766 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR
) &
768 pnode_mask
= (1 << n_val
) - 1;
769 pnode_io_mask
= (1 << n_io
) - 1;
771 node_id
.v
= uv_read_local_mmr(UVH_NODE_ID
);
772 gnode_extra
= (node_id
.s
.node_id
& ~((1 << n_val
) - 1)) >> 1;
773 gnode_upper
= ((unsigned long)gnode_extra
<< m_val
);
774 printk(KERN_INFO
"UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n",
775 n_val
, m_val
, n_io
, gnode_upper
, gnode_extra
, pnode_mask
, pnode_io_mask
);
777 printk(KERN_DEBUG
"UV: global MMR base 0x%lx\n", mmr_base
);
779 for(i
= 0; i
< UVH_NODE_PRESENT_TABLE_DEPTH
; i
++)
780 uv_possible_blades
+=
781 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE
+ i
* 8));
782 printk(KERN_DEBUG
"UV: Found %d blades\n", uv_num_possible_blades());
784 bytes
= sizeof(struct uv_blade_info
) * uv_num_possible_blades();
785 uv_blade_info
= kzalloc(bytes
, GFP_KERNEL
);
786 BUG_ON(!uv_blade_info
);
788 for (blade
= 0; blade
< uv_num_possible_blades(); blade
++)
789 uv_blade_info
[blade
].memory_nid
= -1;
791 get_lowmem_redirect(&lowmem_redir_base
, &lowmem_redir_size
);
793 bytes
= sizeof(uv_node_to_blade
[0]) * num_possible_nodes();
794 uv_node_to_blade
= kmalloc(bytes
, GFP_KERNEL
);
795 BUG_ON(!uv_node_to_blade
);
796 memset(uv_node_to_blade
, 255, bytes
);
798 bytes
= sizeof(uv_cpu_to_blade
[0]) * num_possible_cpus();
799 uv_cpu_to_blade
= kmalloc(bytes
, GFP_KERNEL
);
800 BUG_ON(!uv_cpu_to_blade
);
801 memset(uv_cpu_to_blade
, 255, bytes
);
804 for (i
= 0; i
< UVH_NODE_PRESENT_TABLE_DEPTH
; i
++) {
805 present
= uv_read_local_mmr(UVH_NODE_PRESENT_TABLE
+ i
* 8);
806 for (j
= 0; j
< 64; j
++) {
807 if (!test_bit(j
, &present
))
809 pnode
= (i
* 64 + j
) & pnode_mask
;
810 uv_blade_info
[blade
].pnode
= pnode
;
811 uv_blade_info
[blade
].nr_possible_cpus
= 0;
812 uv_blade_info
[blade
].nr_online_cpus
= 0;
813 spin_lock_init(&uv_blade_info
[blade
].nmi_lock
);
814 max_pnode
= max(pnode
, max_pnode
);
820 uv_bios_get_sn_info(0, &uv_type
, &sn_partition_id
, &sn_coherency_id
,
821 &sn_region_size
, &system_serial_number
);
824 for_each_present_cpu(cpu
) {
825 int apicid
= per_cpu(x86_cpu_to_apicid
, cpu
);
827 nid
= cpu_to_node(cpu
);
829 * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
831 uv_cpu_hub_info(cpu
)->pnode_mask
= pnode_mask
;
832 uv_cpu_hub_info(cpu
)->apic_pnode_shift
= uvh_apicid
.s
.pnode_shift
;
833 uv_cpu_hub_info(cpu
)->hub_revision
= uv_hub_info
->hub_revision
;
835 pnode
= uv_apicid_to_pnode(apicid
);
836 blade
= boot_pnode_to_blade(pnode
);
837 lcpu
= uv_blade_info
[blade
].nr_possible_cpus
;
838 uv_blade_info
[blade
].nr_possible_cpus
++;
840 /* Any node on the blade, else will contain -1. */
841 uv_blade_info
[blade
].memory_nid
= nid
;
843 uv_cpu_hub_info(cpu
)->lowmem_remap_base
= lowmem_redir_base
;
844 uv_cpu_hub_info(cpu
)->lowmem_remap_top
= lowmem_redir_size
;
845 uv_cpu_hub_info(cpu
)->m_val
= m_val
;
846 uv_cpu_hub_info(cpu
)->n_val
= n_val
;
847 uv_cpu_hub_info(cpu
)->numa_blade_id
= blade
;
848 uv_cpu_hub_info(cpu
)->blade_processor_id
= lcpu
;
849 uv_cpu_hub_info(cpu
)->pnode
= pnode
;
850 uv_cpu_hub_info(cpu
)->gpa_mask
= (1UL << (m_val
+ n_val
)) - 1;
851 uv_cpu_hub_info(cpu
)->gnode_upper
= gnode_upper
;
852 uv_cpu_hub_info(cpu
)->gnode_extra
= gnode_extra
;
853 uv_cpu_hub_info(cpu
)->global_mmr_base
= mmr_base
;
854 uv_cpu_hub_info(cpu
)->coherency_domain_number
= sn_coherency_id
;
855 uv_cpu_hub_info(cpu
)->scir
.offset
= uv_scir_offset(apicid
);
856 uv_node_to_blade
[nid
] = blade
;
857 uv_cpu_to_blade
[cpu
] = blade
;
860 /* Add blade/pnode info for nodes without cpus */
861 for_each_online_node(nid
) {
862 if (uv_node_to_blade
[nid
] >= 0)
864 paddr
= node_start_pfn(nid
) << PAGE_SHIFT
;
865 paddr
= uv_soc_phys_ram_to_gpa(paddr
);
866 pnode
= (paddr
>> m_val
) & pnode_mask
;
867 blade
= boot_pnode_to_blade(pnode
);
868 uv_node_to_blade
[nid
] = blade
;
871 map_gru_high(max_pnode
);
872 map_mmr_high(max_pnode
);
873 map_mmioh_high(max_pnode
& pnode_io_mask
);
876 uv_scir_register_cpu_notifier();
877 uv_register_nmi_notifier();
878 proc_mkdir("sgi_uv", NULL
);
880 /* register Legacy VGA I/O redirection handler */
881 pci_register_set_vga_state(uv_set_vga_state
);
884 * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
885 * EFI is not enabled in the kdump kernel.
887 if (is_kdump_kernel())
888 reboot_type
= BOOT_ACPI
;
891 apic_driver(apic_x2apic_uv_x
);