2 * Architecture-specific setup.
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data.
23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
25 #include <linux/module.h>
26 #include <linux/init.h>
28 #include <linux/acpi.h>
29 #include <linux/bootmem.h>
30 #include <linux/console.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <linux/reboot.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <linux/threads.h>
38 #include <linux/screen_info.h>
39 #include <linux/dmi.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
45 #include <linux/cpufreq.h>
46 #include <linux/kexec.h>
47 #include <linux/crash_dump.h>
50 #include <asm/machvec.h>
52 #include <asm/meminit.h>
54 #include <asm/paravirt.h>
55 #include <asm/paravirt_patch.h>
56 #include <asm/patch.h>
57 #include <asm/pgtable.h>
58 #include <asm/processor.h>
60 #include <asm/sections.h>
61 #include <asm/setup.h>
63 #include <asm/system.h>
64 #include <asm/tlbflush.h>
65 #include <asm/unistd.h>
66 #include <asm/hpsim.h>
68 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
69 # error "struct cpuinfo_ia64 too big!"
73 unsigned long __per_cpu_offset
[NR_CPUS
];
74 EXPORT_SYMBOL(__per_cpu_offset
);
77 DEFINE_PER_CPU(struct cpuinfo_ia64
, cpu_info
);
78 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset
);
79 unsigned long ia64_cycles_per_usec
;
80 struct ia64_boot_param
*ia64_boot_param
;
81 struct screen_info screen_info
;
82 unsigned long vga_console_iobase
;
83 unsigned long vga_console_membase
;
85 static struct resource data_resource
= {
86 .name
= "Kernel data",
87 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
90 static struct resource code_resource
= {
91 .name
= "Kernel code",
92 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
95 static struct resource bss_resource
= {
97 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
100 unsigned long ia64_max_cacheline_size
;
102 int dma_get_cache_alignment(void)
104 return ia64_max_cacheline_size
;
106 EXPORT_SYMBOL(dma_get_cache_alignment
);
108 unsigned long ia64_iobase
; /* virtual address for I/O accesses */
109 EXPORT_SYMBOL(ia64_iobase
);
110 struct io_space io_space
[MAX_IO_SPACES
];
111 EXPORT_SYMBOL(io_space
);
112 unsigned int num_io_spaces
;
115 * "flush_icache_range()" needs to know what processor dependent stride size to use
116 * when it makes i-cache(s) coherent with d-caches.
118 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
119 unsigned long ia64_i_cache_stride_shift
= ~0;
121 * "clflush_cache_range()" needs to know what processor dependent stride size to
122 * use when it flushes cache lines including both d-cache and i-cache.
124 /* Safest way to go: 32 bytes by 32 bytes */
125 #define CACHE_STRIDE_SHIFT 5
126 unsigned long ia64_cache_stride_shift
= ~0;
129 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
130 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
131 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
132 * address of the second buffer must be aligned to (merge_mask+1) in order to be
133 * mergeable). By default, we assume there is no I/O MMU which can merge physically
134 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
137 unsigned long ia64_max_iommu_merge_mask
= ~0UL;
138 EXPORT_SYMBOL(ia64_max_iommu_merge_mask
);
141 * We use a special marker for the end of memory and it uses the extra (+1) slot
143 struct rsvd_region rsvd_region
[IA64_MAX_RSVD_REGIONS
+ 1] __initdata
;
144 int num_rsvd_regions __initdata
;
148 * Filter incoming memory segments based on the primitive map created from the boot
149 * parameters. Segments contained in the map are removed from the memory ranges. A
150 * caller-specified function is called with the memory ranges that remain after filtering.
151 * This routine does not assume the incoming segments are sorted.
154 filter_rsvd_memory (u64 start
, u64 end
, void *arg
)
156 u64 range_start
, range_end
, prev_start
;
157 void (*func
)(unsigned long, unsigned long, int);
161 if (start
== PAGE_OFFSET
) {
162 printk(KERN_WARNING
"warning: skipping physical page 0\n");
164 if (start
>= end
) return 0;
168 * lowest possible address(walker uses virtual)
170 prev_start
= PAGE_OFFSET
;
173 for (i
= 0; i
< num_rsvd_regions
; ++i
) {
174 range_start
= max(start
, prev_start
);
175 range_end
= min(end
, rsvd_region
[i
].start
);
177 if (range_start
< range_end
)
178 call_pernode_memory(__pa(range_start
), range_end
- range_start
, func
);
180 /* nothing more available in this segment */
181 if (range_end
== end
) return 0;
183 prev_start
= rsvd_region
[i
].end
;
185 /* end of memory marker allows full processing inside loop body */
190 * Similar to "filter_rsvd_memory()", but the reserved memory ranges
191 * are not filtered out.
194 filter_memory(u64 start
, u64 end
, void *arg
)
196 void (*func
)(unsigned long, unsigned long, int);
199 if (start
== PAGE_OFFSET
) {
200 printk(KERN_WARNING
"warning: skipping physical page 0\n");
208 call_pernode_memory(__pa(start
), end
- start
, func
);
213 sort_regions (struct rsvd_region
*rsvd_region
, int max
)
217 /* simple bubble sorting */
219 for (j
= 0; j
< max
; ++j
) {
220 if (rsvd_region
[j
].start
> rsvd_region
[j
+1].start
) {
221 struct rsvd_region tmp
;
222 tmp
= rsvd_region
[j
];
223 rsvd_region
[j
] = rsvd_region
[j
+ 1];
224 rsvd_region
[j
+ 1] = tmp
;
231 * Request address space for all standard resources
233 static int __init
register_memory(void)
235 code_resource
.start
= ia64_tpa(_text
);
236 code_resource
.end
= ia64_tpa(_etext
) - 1;
237 data_resource
.start
= ia64_tpa(_etext
);
238 data_resource
.end
= ia64_tpa(_edata
) - 1;
239 bss_resource
.start
= ia64_tpa(__bss_start
);
240 bss_resource
.end
= ia64_tpa(_end
) - 1;
241 efi_initialize_iomem_resources(&code_resource
, &data_resource
,
247 __initcall(register_memory
);
253 * This function checks if the reserved crashkernel is allowed on the specific
254 * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
255 * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
256 * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
257 * in kdump case. See the comment in sba_init() in sba_iommu.c.
259 * So, the only machvec that really supports loading the kdump kernel
260 * over 4 GB is "sn2".
262 static int __init
check_crashkernel_memory(unsigned long pbase
, size_t size
)
264 if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
267 return pbase
< (1UL << 32);
270 static void __init
setup_crashkernel(unsigned long total
, int *n
)
272 unsigned long long base
= 0, size
= 0;
275 ret
= parse_crashkernel(boot_command_line
, total
,
277 if (ret
== 0 && size
> 0) {
279 sort_regions(rsvd_region
, *n
);
280 base
= kdump_find_rsvd_region(size
,
284 if (!check_crashkernel_memory(base
, size
)) {
285 pr_warning("crashkernel: There would be kdump memory "
286 "at %ld GB but this is unusable because it "
287 "must\nbe below 4 GB. Change the memory "
288 "configuration of the machine.\n",
289 (unsigned long)(base
>> 30));
294 printk(KERN_INFO
"Reserving %ldMB of memory at %ldMB "
295 "for crashkernel (System RAM: %ldMB)\n",
296 (unsigned long)(size
>> 20),
297 (unsigned long)(base
>> 20),
298 (unsigned long)(total
>> 20));
299 rsvd_region
[*n
].start
=
300 (unsigned long)__va(base
);
301 rsvd_region
[*n
].end
=
302 (unsigned long)__va(base
+ size
);
304 crashk_res
.start
= base
;
305 crashk_res
.end
= base
+ size
- 1;
308 efi_memmap_res
.start
= ia64_boot_param
->efi_memmap
;
309 efi_memmap_res
.end
= efi_memmap_res
.start
+
310 ia64_boot_param
->efi_memmap_size
;
311 boot_param_res
.start
= __pa(ia64_boot_param
);
312 boot_param_res
.end
= boot_param_res
.start
+
313 sizeof(*ia64_boot_param
);
316 static inline void __init
setup_crashkernel(unsigned long total
, int *n
)
321 * reserve_memory - setup reserved memory areas
323 * Setup the reserved memory areas set aside for the boot parameters,
324 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
325 * see arch/ia64/include/asm/meminit.h if you need to define more.
328 reserve_memory (void)
331 unsigned long total_memory
;
334 * none of the entries in this table overlap
336 rsvd_region
[n
].start
= (unsigned long) ia64_boot_param
;
337 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ sizeof(*ia64_boot_param
);
340 rsvd_region
[n
].start
= (unsigned long) __va(ia64_boot_param
->efi_memmap
);
341 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ ia64_boot_param
->efi_memmap_size
;
344 rsvd_region
[n
].start
= (unsigned long) __va(ia64_boot_param
->command_line
);
345 rsvd_region
[n
].end
= (rsvd_region
[n
].start
346 + strlen(__va(ia64_boot_param
->command_line
)) + 1);
349 rsvd_region
[n
].start
= (unsigned long) ia64_imva((void *)KERNEL_START
);
350 rsvd_region
[n
].end
= (unsigned long) ia64_imva(_end
);
353 n
+= paravirt_reserve_memory(&rsvd_region
[n
]);
355 #ifdef CONFIG_BLK_DEV_INITRD
356 if (ia64_boot_param
->initrd_start
) {
357 rsvd_region
[n
].start
= (unsigned long)__va(ia64_boot_param
->initrd_start
);
358 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ ia64_boot_param
->initrd_size
;
363 #ifdef CONFIG_CRASH_DUMP
364 if (reserve_elfcorehdr(&rsvd_region
[n
].start
,
365 &rsvd_region
[n
].end
) == 0)
369 total_memory
= efi_memmap_init(&rsvd_region
[n
].start
, &rsvd_region
[n
].end
);
372 setup_crashkernel(total_memory
, &n
);
374 /* end of memory marker */
375 rsvd_region
[n
].start
= ~0UL;
376 rsvd_region
[n
].end
= ~0UL;
379 num_rsvd_regions
= n
;
380 BUG_ON(IA64_MAX_RSVD_REGIONS
+ 1 < n
);
382 sort_regions(rsvd_region
, num_rsvd_regions
);
387 * find_initrd - get initrd parameters from the boot parameter structure
389 * Grab the initrd start and end from the boot parameter struct given us by
395 #ifdef CONFIG_BLK_DEV_INITRD
396 if (ia64_boot_param
->initrd_start
) {
397 initrd_start
= (unsigned long)__va(ia64_boot_param
->initrd_start
);
398 initrd_end
= initrd_start
+ia64_boot_param
->initrd_size
;
400 printk(KERN_INFO
"Initial ramdisk at: 0x%lx (%llu bytes)\n",
401 initrd_start
, ia64_boot_param
->initrd_size
);
409 unsigned long phys_iobase
;
412 * Set `iobase' based on the EFI memory map or, failing that, the
413 * value firmware left in ar.k0.
415 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
416 * the port's virtual address, so ia32_load_state() loads it with a
417 * user virtual address. But in ia64 mode, glibc uses the
418 * *physical* address in ar.k0 to mmap the appropriate area from
419 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
420 * cases, user-mode can only use the legacy 0-64K I/O port space.
422 * ar.k0 is not involved in kernel I/O port accesses, which can use
423 * any of the I/O port spaces and are done via MMIO using the
424 * virtual mmio_base from the appropriate io_space[].
426 phys_iobase
= efi_get_iobase();
428 phys_iobase
= ia64_get_kr(IA64_KR_IO_BASE
);
429 printk(KERN_INFO
"No I/O port range found in EFI memory map, "
430 "falling back to AR.KR0 (0x%lx)\n", phys_iobase
);
432 ia64_iobase
= (unsigned long) ioremap(phys_iobase
, 0);
433 ia64_set_kr(IA64_KR_IO_BASE
, __pa(ia64_iobase
));
435 /* setup legacy IO port space */
436 io_space
[0].mmio_base
= ia64_iobase
;
437 io_space
[0].sparse
= 1;
442 * early_console_setup - setup debugging console
444 * Consoles started here require little enough setup that we can start using
445 * them very early in the boot process, either right after the machine
446 * vector initialization, or even before if the drivers can detect their hw.
448 * Returns non-zero if a console couldn't be setup.
450 static inline int __init
451 early_console_setup (char *cmdline
)
455 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
457 extern int sn_serial_console_early_setup(void);
458 if (!sn_serial_console_early_setup())
462 #ifdef CONFIG_EFI_PCDP
463 if (!efi_setup_pcdp_console(cmdline
))
466 if (!simcons_register())
469 return (earlycons
) ? 0 : -1;
473 mark_bsp_online (void)
476 /* If we register an early console, allow CPU 0 to printk */
477 cpu_set(smp_processor_id(), cpu_online_map
);
481 static __initdata
int nomca
;
482 static __init
int setup_nomca(char *s
)
487 early_param("nomca", setup_nomca
);
490 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
491 * is_kdump_kernel() to determine if we are booting after a panic. Hence
492 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
494 #ifdef CONFIG_CRASH_DUMP
495 /* elfcorehdr= specifies the location of elf core header
496 * stored by the crashed kernel.
498 static int __init
parse_elfcorehdr(char *arg
)
503 elfcorehdr_addr
= memparse(arg
, &arg
);
506 early_param("elfcorehdr", parse_elfcorehdr
);
508 int __init
reserve_elfcorehdr(u64
*start
, u64
*end
)
512 /* We get the address using the kernel command line,
513 * but the size is extracted from the EFI tables.
514 * Both address and size are required for reservation
518 if (!is_vmcore_usable())
521 if ((length
= vmcore_find_descriptor_size(elfcorehdr_addr
)) == 0) {
526 *start
= (unsigned long)__va(elfcorehdr_addr
);
527 *end
= *start
+ length
;
531 #endif /* CONFIG_PROC_VMCORE */
534 setup_arch (char **cmdline_p
)
538 paravirt_arch_setup_early();
540 ia64_patch_vtop((u64
) __start___vtop_patchlist
, (u64
) __end___vtop_patchlist
);
541 paravirt_patch_apply();
543 *cmdline_p
= __va(ia64_boot_param
->command_line
);
544 strlcpy(boot_command_line
, *cmdline_p
, COMMAND_LINE_SIZE
);
549 #ifdef CONFIG_IA64_GENERIC
550 /* machvec needs to be parsed from the command line
551 * before parse_early_param() is called to ensure
552 * that ia64_mv is initialised before any command line
553 * settings may cause console setup to occur
555 machvec_init_from_cmdline(*cmdline_p
);
560 if (early_console_setup(*cmdline_p
) == 0)
564 /* Initialize the ACPI boot-time table parser */
566 early_acpi_boot_init();
567 # ifdef CONFIG_ACPI_NUMA
569 #ifdef CONFIG_ACPI_HOTPLUG_CPU
570 prefill_possible_map();
572 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map
) == 0 ?
573 32 : cpus_weight(early_cpu_possible_map
)),
574 additional_cpus
> 0 ? additional_cpus
: 0);
578 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
580 #endif /* CONFIG_APCI_BOOT */
584 /* process SAL system table: */
585 ia64_sal_init(__va(efi
.sal_systab
));
587 #ifdef CONFIG_ITANIUM
588 ia64_patch_rse((u64
) __start___rse_patchlist
, (u64
) __end___rse_patchlist
);
591 unsigned long num_phys_stacked
;
593 if (ia64_pal_rse_info(&num_phys_stacked
, 0) == 0 && num_phys_stacked
> 96)
594 ia64_patch_rse((u64
) __start___rse_patchlist
, (u64
) __end___rse_patchlist
);
599 cpu_physical_id(0) = hard_smp_processor_id();
602 cpu_init(); /* initialize the bootstrap CPU */
603 mmu_context_init(); /* initialize context_id bitmap */
610 paravirt_arch_setup_console(cmdline_p
);
614 # if defined(CONFIG_DUMMY_CONSOLE)
615 conswitchp
= &dummy_con
;
617 # if defined(CONFIG_VGA_CONSOLE)
619 * Non-legacy systems may route legacy VGA MMIO range to system
620 * memory. vga_con probes the MMIO hole, so memory looks like
621 * a VGA device to it. The EFI memory map can tell us if it's
622 * memory so we can avoid this problem.
624 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY
)
625 conswitchp
= &vga_con
;
630 /* enable IA-64 Machine Check Abort Handling unless disabled */
631 if (paravirt_arch_setup_nomca())
636 platform_setup(cmdline_p
);
637 #ifndef CONFIG_IA64_HP_SIM
638 check_sal_cache_flush();
644 * Display cpu info for all CPUs.
647 show_cpuinfo (struct seq_file
*m
, void *v
)
650 # define lpj c->loops_per_jiffy
651 # define cpunum c->cpu
653 # define lpj loops_per_jiffy
658 const char *feature_name
;
660 { 1UL << 0, "branchlong" },
661 { 1UL << 1, "spontaneous deferral"},
662 { 1UL << 2, "16-byte atomic ops" }
664 char features
[128], *cp
, *sep
;
665 struct cpuinfo_ia64
*c
= v
;
667 unsigned long proc_freq
;
672 /* build the feature string: */
673 memcpy(features
, "standard", 9);
675 size
= sizeof(features
);
677 for (i
= 0; i
< ARRAY_SIZE(feature_bits
) && size
> 1; ++i
) {
678 if (mask
& feature_bits
[i
].mask
) {
679 cp
+= snprintf(cp
, size
, "%s%s", sep
,
680 feature_bits
[i
].feature_name
),
682 mask
&= ~feature_bits
[i
].mask
;
683 size
= sizeof(features
) - (cp
- features
);
686 if (mask
&& size
> 1) {
687 /* print unknown features as a hex value */
688 snprintf(cp
, size
, "%s0x%lx", sep
, mask
);
691 proc_freq
= cpufreq_quick_get(cpunum
);
693 proc_freq
= c
->proc_freq
/ 1000;
707 "cpu MHz : %lu.%03lu\n"
708 "itc MHz : %lu.%06lu\n"
709 "BogoMIPS : %lu.%02lu\n",
710 cpunum
, c
->vendor
, c
->family
, c
->model
,
711 c
->model_name
, c
->revision
, c
->archrev
,
712 features
, c
->ppn
, c
->number
,
713 proc_freq
/ 1000, proc_freq
% 1000,
714 c
->itc_freq
/ 1000000, c
->itc_freq
% 1000000,
715 lpj
*HZ
/500000, (lpj
*HZ
/5000) % 100);
717 seq_printf(m
, "siblings : %u\n", cpus_weight(cpu_core_map
[cpunum
]));
718 if (c
->socket_id
!= -1)
719 seq_printf(m
, "physical id: %u\n", c
->socket_id
);
720 if (c
->threads_per_core
> 1 || c
->cores_per_socket
> 1)
724 c
->core_id
, c
->thread_id
);
732 c_start (struct seq_file
*m
, loff_t
*pos
)
735 while (*pos
< nr_cpu_ids
&& !cpu_online(*pos
))
738 return *pos
< nr_cpu_ids
? cpu_data(*pos
) : NULL
;
742 c_next (struct seq_file
*m
, void *v
, loff_t
*pos
)
745 return c_start(m
, pos
);
749 c_stop (struct seq_file
*m
, void *v
)
753 const struct seq_operations cpuinfo_op
= {
761 static char brandname
[MAX_BRANDS
][128];
763 static char * __cpuinit
764 get_model_name(__u8 family
, __u8 model
)
770 memcpy(brand
, "Unknown", 8);
771 if (ia64_pal_get_brand_info(brand
)) {
773 memcpy(brand
, "Merced", 7);
774 else if (family
== 0x1f) switch (model
) {
775 case 0: memcpy(brand
, "McKinley", 9); break;
776 case 1: memcpy(brand
, "Madison", 8); break;
777 case 2: memcpy(brand
, "Madison up to 9M cache", 23); break;
780 for (i
= 0; i
< MAX_BRANDS
; i
++)
781 if (strcmp(brandname
[i
], brand
) == 0)
783 for (i
= 0; i
< MAX_BRANDS
; i
++)
784 if (brandname
[i
][0] == '\0')
785 return strcpy(brandname
[i
], brand
);
788 "%s: Table overflow. Some processor model information will be missing\n",
793 static void __cpuinit
794 identify_cpu (struct cpuinfo_ia64
*c
)
797 unsigned long bits
[5];
803 u64 ppn
; /* processor serial number */
807 unsigned revision
: 8;
810 unsigned archrev
: 8;
811 unsigned reserved
: 24;
817 pal_vm_info_1_u_t vm1
;
818 pal_vm_info_2_u_t vm2
;
820 unsigned long impl_va_msb
= 50, phys_addr_size
= 44; /* Itanium defaults */
822 for (i
= 0; i
< 5; ++i
)
823 cpuid
.bits
[i
] = ia64_get_cpuid(i
);
825 memcpy(c
->vendor
, cpuid
.field
.vendor
, 16);
827 c
->cpu
= smp_processor_id();
829 /* below default values will be overwritten by identify_siblings()
830 * for Multi-Threading/Multi-Core capable CPUs
832 c
->threads_per_core
= c
->cores_per_socket
= c
->num_log
= 1;
835 identify_siblings(c
);
837 if (c
->threads_per_core
> smp_num_siblings
)
838 smp_num_siblings
= c
->threads_per_core
;
840 c
->ppn
= cpuid
.field
.ppn
;
841 c
->number
= cpuid
.field
.number
;
842 c
->revision
= cpuid
.field
.revision
;
843 c
->model
= cpuid
.field
.model
;
844 c
->family
= cpuid
.field
.family
;
845 c
->archrev
= cpuid
.field
.archrev
;
846 c
->features
= cpuid
.field
.features
;
847 c
->model_name
= get_model_name(c
->family
, c
->model
);
849 status
= ia64_pal_vm_summary(&vm1
, &vm2
);
850 if (status
== PAL_STATUS_SUCCESS
) {
851 impl_va_msb
= vm2
.pal_vm_info_2_s
.impl_va_msb
;
852 phys_addr_size
= vm1
.pal_vm_info_1_s
.phys_add_size
;
854 c
->unimpl_va_mask
= ~((7L<<61) | ((1L << (impl_va_msb
+ 1)) - 1));
855 c
->unimpl_pa_mask
= ~((1L<<63) | ((1L << phys_addr_size
) - 1));
859 setup_per_cpu_areas (void)
861 /* start_kernel() requires this... */
865 * Do the following calculations:
867 * 1. the max. cache line size.
868 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
869 * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
871 static void __cpuinit
874 unsigned long line_size
, max
= 1;
875 unsigned long l
, levels
, unique_caches
;
876 pal_cache_config_info_t cci
;
879 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
881 printk(KERN_ERR
"%s: ia64_pal_cache_summary() failed (status=%ld)\n",
883 max
= SMP_CACHE_BYTES
;
884 /* Safest setup for "flush_icache_range()" */
885 ia64_i_cache_stride_shift
= I_CACHE_STRIDE_SHIFT
;
886 /* Safest setup for "clflush_cache_range()" */
887 ia64_cache_stride_shift
= CACHE_STRIDE_SHIFT
;
891 for (l
= 0; l
< levels
; ++l
) {
892 /* cache_type (data_or_unified)=2 */
893 status
= ia64_pal_cache_config_info(l
, 2, &cci
);
895 printk(KERN_ERR
"%s: ia64_pal_cache_config_info"
896 "(l=%lu, 2) failed (status=%ld)\n",
897 __func__
, l
, status
);
898 max
= SMP_CACHE_BYTES
;
899 /* The safest setup for "flush_icache_range()" */
900 cci
.pcci_stride
= I_CACHE_STRIDE_SHIFT
;
901 /* The safest setup for "clflush_cache_range()" */
902 ia64_cache_stride_shift
= CACHE_STRIDE_SHIFT
;
903 cci
.pcci_unified
= 1;
905 if (cci
.pcci_stride
< ia64_cache_stride_shift
)
906 ia64_cache_stride_shift
= cci
.pcci_stride
;
908 line_size
= 1 << cci
.pcci_line_size
;
913 if (!cci
.pcci_unified
) {
914 /* cache_type (instruction)=1*/
915 status
= ia64_pal_cache_config_info(l
, 1, &cci
);
917 printk(KERN_ERR
"%s: ia64_pal_cache_config_info"
918 "(l=%lu, 1) failed (status=%ld)\n",
919 __func__
, l
, status
);
920 /* The safest setup for flush_icache_range() */
921 cci
.pcci_stride
= I_CACHE_STRIDE_SHIFT
;
924 if (cci
.pcci_stride
< ia64_i_cache_stride_shift
)
925 ia64_i_cache_stride_shift
= cci
.pcci_stride
;
928 if (max
> ia64_max_cacheline_size
)
929 ia64_max_cacheline_size
= max
;
933 * cpu_init() initializes state that is per-CPU. This function acts
934 * as a 'CPU state barrier', nothing should get across.
939 extern void __cpuinit
ia64_mmu_init (void *);
940 static unsigned long max_num_phys_stacked
= IA64_NUM_PHYS_STACK_REG
;
941 unsigned long num_phys_stacked
;
942 pal_vm_info_2_u_t vmi
;
943 unsigned int max_ctx
;
944 struct cpuinfo_ia64
*cpu_info
;
947 cpu_data
= per_cpu_init();
950 * insert boot cpu into sibling and core mapes
951 * (must be done after per_cpu area is setup)
953 if (smp_processor_id() == 0) {
954 cpu_set(0, per_cpu(cpu_sibling_map
, 0));
955 cpu_set(0, cpu_core_map
[0]);
958 * Set ar.k3 so that assembly code in MCA handler can compute
959 * physical addresses of per cpu variables with a simple:
960 * phys = ar.k3 + &per_cpu_var
961 * and the alt-dtlb-miss handler can set per-cpu mapping into
962 * the TLB when needed. head.S already did this for cpu0.
964 ia64_set_kr(IA64_KR_PER_CPU_DATA
,
965 ia64_tpa(cpu_data
) - (long) __per_cpu_start
);
972 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
973 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
974 * depends on the data returned by identify_cpu(). We break the dependency by
975 * accessing cpu_data() through the canonical per-CPU address.
977 cpu_info
= cpu_data
+ ((char *) &__ia64_per_cpu_var(cpu_info
) - __per_cpu_start
);
978 identify_cpu(cpu_info
);
980 #ifdef CONFIG_MCKINLEY
982 # define FEATURE_SET 16
983 struct ia64_pal_retval iprv
;
985 if (cpu_info
->family
== 0x1f) {
986 PAL_CALL_PHYS(iprv
, PAL_PROC_GET_FEATURES
, 0, FEATURE_SET
, 0);
987 if ((iprv
.status
== 0) && (iprv
.v0
& 0x80) && (iprv
.v2
& 0x80))
988 PAL_CALL_PHYS(iprv
, PAL_PROC_SET_FEATURES
,
989 (iprv
.v1
| 0x80), FEATURE_SET
, 0);
994 /* Clear the stack memory reserved for pt_regs: */
995 memset(task_pt_regs(current
), 0, sizeof(struct pt_regs
));
997 ia64_set_kr(IA64_KR_FPU_OWNER
, 0);
1000 * Initialize the page-table base register to a global
1001 * directory with all zeroes. This ensure that we can handle
1002 * TLB-misses to user address-space even before we created the
1003 * first user address-space. This may happen, e.g., due to
1004 * aggressive use of lfetch.fault.
1006 ia64_set_kr(IA64_KR_PT_BASE
, __pa(ia64_imva(empty_zero_page
)));
1009 * Initialize default control register to defer speculative faults except
1010 * for those arising from TLB misses, which are not deferred. The
1011 * kernel MUST NOT depend on a particular setting of these bits (in other words,
1012 * the kernel must have recovery code for all speculative accesses). Turn on
1013 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
1014 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
1017 ia64_setreg(_IA64_REG_CR_DCR
, ( IA64_DCR_DP
| IA64_DCR_DK
| IA64_DCR_DX
| IA64_DCR_DR
1018 | IA64_DCR_DA
| IA64_DCR_DD
| IA64_DCR_LC
));
1019 atomic_inc(&init_mm
.mm_count
);
1020 current
->active_mm
= &init_mm
;
1021 BUG_ON(current
->mm
);
1023 ia64_mmu_init(ia64_imva(cpu_data
));
1024 ia64_mca_cpu_init(ia64_imva(cpu_data
));
1026 #ifdef CONFIG_IA32_SUPPORT
1030 /* Clear ITC to eliminate sched_clock() overflows in human time. */
1033 /* disable all local interrupt sources: */
1034 ia64_set_itv(1 << 16);
1035 ia64_set_lrr0(1 << 16);
1036 ia64_set_lrr1(1 << 16);
1037 ia64_setreg(_IA64_REG_CR_PMV
, 1 << 16);
1038 ia64_setreg(_IA64_REG_CR_CMCV
, 1 << 16);
1040 /* clear TPR & XTP to enable all interrupt classes: */
1041 ia64_setreg(_IA64_REG_CR_TPR
, 0);
1043 /* Clear any pending interrupts left by SAL/EFI */
1044 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR
)
1051 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
1052 if (ia64_pal_vm_summary(NULL
, &vmi
) == 0) {
1053 max_ctx
= (1U << (vmi
.pal_vm_info_2_s
.rid_size
- 3)) - 1;
1054 setup_ptcg_sem(vmi
.pal_vm_info_2_s
.max_purges
, NPTCG_FROM_PAL
);
1056 printk(KERN_WARNING
"cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
1057 max_ctx
= (1U << 15) - 1; /* use architected minimum */
1059 while (max_ctx
< ia64_ctx
.max_ctx
) {
1060 unsigned int old
= ia64_ctx
.max_ctx
;
1061 if (cmpxchg(&ia64_ctx
.max_ctx
, old
, max_ctx
) == old
)
1065 if (ia64_pal_rse_info(&num_phys_stacked
, NULL
) != 0) {
1066 printk(KERN_WARNING
"cpu_init: PAL RSE info failed; assuming 96 physical "
1068 num_phys_stacked
= 96;
1070 /* size of physical stacked register partition plus 8 bytes: */
1071 if (num_phys_stacked
> max_num_phys_stacked
) {
1072 ia64_patch_phys_stack_reg(num_phys_stacked
*8 + 8);
1073 max_num_phys_stacked
= num_phys_stacked
;
1075 platform_cpu_init();
1076 pm_idle
= default_idle
;
1082 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles
,
1083 (unsigned long) __end___mckinley_e9_bundles
);
1086 static int __init
run_dmi_scan(void)
1091 core_initcall(run_dmi_scan
);