2 * Architecture-specific setup.
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data.
23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
25 #include <linux/module.h>
26 #include <linux/init.h>
28 #include <linux/acpi.h>
29 #include <linux/bootmem.h>
30 #include <linux/console.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <linux/reboot.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <linux/threads.h>
38 #include <linux/screen_info.h>
39 #include <linux/dmi.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
45 #include <linux/cpufreq.h>
46 #include <linux/kexec.h>
47 #include <linux/crash_dump.h>
49 #include <asm/machvec.h>
51 #include <asm/meminit.h>
53 #include <asm/patch.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
57 #include <asm/sections.h>
58 #include <asm/setup.h>
60 #include <asm/tlbflush.h>
61 #include <asm/unistd.h>
62 #include <asm/hpsim.h>
64 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
65 # error "struct cpuinfo_ia64 too big!"
69 unsigned long __per_cpu_offset
[NR_CPUS
];
70 EXPORT_SYMBOL(__per_cpu_offset
);
73 DEFINE_PER_CPU(struct cpuinfo_ia64
, ia64_cpu_info
);
74 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset
);
75 unsigned long ia64_cycles_per_usec
;
76 struct ia64_boot_param
*ia64_boot_param
;
77 struct screen_info screen_info
;
78 unsigned long vga_console_iobase
;
79 unsigned long vga_console_membase
;
81 static struct resource data_resource
= {
82 .name
= "Kernel data",
83 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
86 static struct resource code_resource
= {
87 .name
= "Kernel code",
88 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
91 static struct resource bss_resource
= {
93 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
96 unsigned long ia64_max_cacheline_size
;
98 unsigned long ia64_iobase
; /* virtual address for I/O accesses */
99 EXPORT_SYMBOL(ia64_iobase
);
100 struct io_space io_space
[MAX_IO_SPACES
];
101 EXPORT_SYMBOL(io_space
);
102 unsigned int num_io_spaces
;
105 * "flush_icache_range()" needs to know what processor dependent stride size to use
106 * when it makes i-cache(s) coherent with d-caches.
108 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
109 unsigned long ia64_i_cache_stride_shift
= ~0;
111 * "clflush_cache_range()" needs to know what processor dependent stride size to
112 * use when it flushes cache lines including both d-cache and i-cache.
114 /* Safest way to go: 32 bytes by 32 bytes */
115 #define CACHE_STRIDE_SHIFT 5
116 unsigned long ia64_cache_stride_shift
= ~0;
119 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
120 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
121 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
122 * address of the second buffer must be aligned to (merge_mask+1) in order to be
123 * mergeable). By default, we assume there is no I/O MMU which can merge physically
124 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
127 unsigned long ia64_max_iommu_merge_mask
= ~0UL;
128 EXPORT_SYMBOL(ia64_max_iommu_merge_mask
);
131 * We use a special marker for the end of memory and it uses the extra (+1) slot
133 struct rsvd_region rsvd_region
[IA64_MAX_RSVD_REGIONS
+ 1] __initdata
;
134 int num_rsvd_regions __initdata
;
138 * Filter incoming memory segments based on the primitive map created from the boot
139 * parameters. Segments contained in the map are removed from the memory ranges. A
140 * caller-specified function is called with the memory ranges that remain after filtering.
141 * This routine does not assume the incoming segments are sorted.
144 filter_rsvd_memory (u64 start
, u64 end
, void *arg
)
146 u64 range_start
, range_end
, prev_start
;
147 void (*func
)(unsigned long, unsigned long, int);
151 if (start
== PAGE_OFFSET
) {
152 printk(KERN_WARNING
"warning: skipping physical page 0\n");
154 if (start
>= end
) return 0;
158 * lowest possible address(walker uses virtual)
160 prev_start
= PAGE_OFFSET
;
163 for (i
= 0; i
< num_rsvd_regions
; ++i
) {
164 range_start
= max(start
, prev_start
);
165 range_end
= min(end
, rsvd_region
[i
].start
);
167 if (range_start
< range_end
)
168 call_pernode_memory(__pa(range_start
), range_end
- range_start
, func
);
170 /* nothing more available in this segment */
171 if (range_end
== end
) return 0;
173 prev_start
= rsvd_region
[i
].end
;
175 /* end of memory marker allows full processing inside loop body */
180 * Similar to "filter_rsvd_memory()", but the reserved memory ranges
181 * are not filtered out.
184 filter_memory(u64 start
, u64 end
, void *arg
)
186 void (*func
)(unsigned long, unsigned long, int);
189 if (start
== PAGE_OFFSET
) {
190 printk(KERN_WARNING
"warning: skipping physical page 0\n");
198 call_pernode_memory(__pa(start
), end
- start
, func
);
203 sort_regions (struct rsvd_region
*rsvd_region
, int max
)
207 /* simple bubble sorting */
209 for (j
= 0; j
< max
; ++j
) {
210 if (rsvd_region
[j
].start
> rsvd_region
[j
+1].start
) {
211 struct rsvd_region tmp
;
212 tmp
= rsvd_region
[j
];
213 rsvd_region
[j
] = rsvd_region
[j
+ 1];
214 rsvd_region
[j
+ 1] = tmp
;
222 merge_regions (struct rsvd_region
*rsvd_region
, int max
)
225 for (i
= 1; i
< max
; ++i
) {
226 if (rsvd_region
[i
].start
>= rsvd_region
[i
-1].end
)
228 if (rsvd_region
[i
].end
> rsvd_region
[i
-1].end
)
229 rsvd_region
[i
-1].end
= rsvd_region
[i
].end
;
231 memmove(&rsvd_region
[i
], &rsvd_region
[i
+1],
232 (max
- i
) * sizeof(struct rsvd_region
));
238 * Request address space for all standard resources
240 static int __init
register_memory(void)
242 code_resource
.start
= ia64_tpa(_text
);
243 code_resource
.end
= ia64_tpa(_etext
) - 1;
244 data_resource
.start
= ia64_tpa(_etext
);
245 data_resource
.end
= ia64_tpa(_edata
) - 1;
246 bss_resource
.start
= ia64_tpa(__bss_start
);
247 bss_resource
.end
= ia64_tpa(_end
) - 1;
248 efi_initialize_iomem_resources(&code_resource
, &data_resource
,
254 __initcall(register_memory
);
260 * This function checks if the reserved crashkernel is allowed on the specific
261 * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
262 * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
263 * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
264 * in kdump case. See the comment in sba_init() in sba_iommu.c.
266 * So, the only machvec that really supports loading the kdump kernel
267 * over 4 GB is "sn2".
269 static int __init
check_crashkernel_memory(unsigned long pbase
, size_t size
)
271 if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
274 return pbase
< (1UL << 32);
277 static void __init
setup_crashkernel(unsigned long total
, int *n
)
279 unsigned long long base
= 0, size
= 0;
282 ret
= parse_crashkernel(boot_command_line
, total
,
284 if (ret
== 0 && size
> 0) {
286 sort_regions(rsvd_region
, *n
);
287 *n
= merge_regions(rsvd_region
, *n
);
288 base
= kdump_find_rsvd_region(size
,
292 if (!check_crashkernel_memory(base
, size
)) {
293 pr_warning("crashkernel: There would be kdump memory "
294 "at %ld GB but this is unusable because it "
295 "must\nbe below 4 GB. Change the memory "
296 "configuration of the machine.\n",
297 (unsigned long)(base
>> 30));
302 printk(KERN_INFO
"Reserving %ldMB of memory at %ldMB "
303 "for crashkernel (System RAM: %ldMB)\n",
304 (unsigned long)(size
>> 20),
305 (unsigned long)(base
>> 20),
306 (unsigned long)(total
>> 20));
307 rsvd_region
[*n
].start
=
308 (unsigned long)__va(base
);
309 rsvd_region
[*n
].end
=
310 (unsigned long)__va(base
+ size
);
312 crashk_res
.start
= base
;
313 crashk_res
.end
= base
+ size
- 1;
316 efi_memmap_res
.start
= ia64_boot_param
->efi_memmap
;
317 efi_memmap_res
.end
= efi_memmap_res
.start
+
318 ia64_boot_param
->efi_memmap_size
;
319 boot_param_res
.start
= __pa(ia64_boot_param
);
320 boot_param_res
.end
= boot_param_res
.start
+
321 sizeof(*ia64_boot_param
);
324 static inline void __init
setup_crashkernel(unsigned long total
, int *n
)
329 * reserve_memory - setup reserved memory areas
331 * Setup the reserved memory areas set aside for the boot parameters,
332 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
333 * see arch/ia64/include/asm/meminit.h if you need to define more.
336 reserve_memory (void)
339 unsigned long total_memory
;
342 * none of the entries in this table overlap
344 rsvd_region
[n
].start
= (unsigned long) ia64_boot_param
;
345 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ sizeof(*ia64_boot_param
);
348 rsvd_region
[n
].start
= (unsigned long) __va(ia64_boot_param
->efi_memmap
);
349 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ ia64_boot_param
->efi_memmap_size
;
352 rsvd_region
[n
].start
= (unsigned long) __va(ia64_boot_param
->command_line
);
353 rsvd_region
[n
].end
= (rsvd_region
[n
].start
354 + strlen(__va(ia64_boot_param
->command_line
)) + 1);
357 rsvd_region
[n
].start
= (unsigned long) ia64_imva((void *)KERNEL_START
);
358 rsvd_region
[n
].end
= (unsigned long) ia64_imva(_end
);
361 #ifdef CONFIG_BLK_DEV_INITRD
362 if (ia64_boot_param
->initrd_start
) {
363 rsvd_region
[n
].start
= (unsigned long)__va(ia64_boot_param
->initrd_start
);
364 rsvd_region
[n
].end
= rsvd_region
[n
].start
+ ia64_boot_param
->initrd_size
;
369 #ifdef CONFIG_CRASH_DUMP
370 if (reserve_elfcorehdr(&rsvd_region
[n
].start
,
371 &rsvd_region
[n
].end
) == 0)
375 total_memory
= efi_memmap_init(&rsvd_region
[n
].start
, &rsvd_region
[n
].end
);
378 setup_crashkernel(total_memory
, &n
);
380 /* end of memory marker */
381 rsvd_region
[n
].start
= ~0UL;
382 rsvd_region
[n
].end
= ~0UL;
385 num_rsvd_regions
= n
;
386 BUG_ON(IA64_MAX_RSVD_REGIONS
+ 1 < n
);
388 sort_regions(rsvd_region
, num_rsvd_regions
);
389 num_rsvd_regions
= merge_regions(rsvd_region
, num_rsvd_regions
);
394 * find_initrd - get initrd parameters from the boot parameter structure
396 * Grab the initrd start and end from the boot parameter struct given us by
402 #ifdef CONFIG_BLK_DEV_INITRD
403 if (ia64_boot_param
->initrd_start
) {
404 initrd_start
= (unsigned long)__va(ia64_boot_param
->initrd_start
);
405 initrd_end
= initrd_start
+ia64_boot_param
->initrd_size
;
407 printk(KERN_INFO
"Initial ramdisk at: 0x%lx (%llu bytes)\n",
408 initrd_start
, ia64_boot_param
->initrd_size
);
416 unsigned long phys_iobase
;
419 * Set `iobase' based on the EFI memory map or, failing that, the
420 * value firmware left in ar.k0.
422 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
423 * the port's virtual address, so ia32_load_state() loads it with a
424 * user virtual address. But in ia64 mode, glibc uses the
425 * *physical* address in ar.k0 to mmap the appropriate area from
426 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
427 * cases, user-mode can only use the legacy 0-64K I/O port space.
429 * ar.k0 is not involved in kernel I/O port accesses, which can use
430 * any of the I/O port spaces and are done via MMIO using the
431 * virtual mmio_base from the appropriate io_space[].
433 phys_iobase
= efi_get_iobase();
435 phys_iobase
= ia64_get_kr(IA64_KR_IO_BASE
);
436 printk(KERN_INFO
"No I/O port range found in EFI memory map, "
437 "falling back to AR.KR0 (0x%lx)\n", phys_iobase
);
439 ia64_iobase
= (unsigned long) ioremap(phys_iobase
, 0);
440 ia64_set_kr(IA64_KR_IO_BASE
, __pa(ia64_iobase
));
442 /* setup legacy IO port space */
443 io_space
[0].mmio_base
= ia64_iobase
;
444 io_space
[0].sparse
= 1;
449 * early_console_setup - setup debugging console
451 * Consoles started here require little enough setup that we can start using
452 * them very early in the boot process, either right after the machine
453 * vector initialization, or even before if the drivers can detect their hw.
455 * Returns non-zero if a console couldn't be setup.
457 static inline int __init
458 early_console_setup (char *cmdline
)
462 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
464 extern int sn_serial_console_early_setup(void);
465 if (!sn_serial_console_early_setup())
469 #ifdef CONFIG_EFI_PCDP
470 if (!efi_setup_pcdp_console(cmdline
))
473 if (!simcons_register())
476 return (earlycons
) ? 0 : -1;
480 mark_bsp_online (void)
483 /* If we register an early console, allow CPU 0 to printk */
484 set_cpu_online(smp_processor_id(), true);
488 static __initdata
int nomca
;
489 static __init
int setup_nomca(char *s
)
494 early_param("nomca", setup_nomca
);
496 #ifdef CONFIG_CRASH_DUMP
497 int __init
reserve_elfcorehdr(u64
*start
, u64
*end
)
501 /* We get the address using the kernel command line,
502 * but the size is extracted from the EFI tables.
503 * Both address and size are required for reservation
507 if (!is_vmcore_usable())
510 if ((length
= vmcore_find_descriptor_size(elfcorehdr_addr
)) == 0) {
515 *start
= (unsigned long)__va(elfcorehdr_addr
);
516 *end
= *start
+ length
;
520 #endif /* CONFIG_PROC_VMCORE */
523 setup_arch (char **cmdline_p
)
527 ia64_patch_vtop((u64
) __start___vtop_patchlist
, (u64
) __end___vtop_patchlist
);
529 *cmdline_p
= __va(ia64_boot_param
->command_line
);
530 strlcpy(boot_command_line
, *cmdline_p
, COMMAND_LINE_SIZE
);
535 #ifdef CONFIG_IA64_GENERIC
536 /* machvec needs to be parsed from the command line
537 * before parse_early_param() is called to ensure
538 * that ia64_mv is initialised before any command line
539 * settings may cause console setup to occur
541 machvec_init_from_cmdline(*cmdline_p
);
546 if (early_console_setup(*cmdline_p
) == 0)
550 /* Initialize the ACPI boot-time table parser */
552 early_acpi_boot_init();
553 # ifdef CONFIG_ACPI_NUMA
555 # ifdef CONFIG_ACPI_HOTPLUG_CPU
556 prefill_possible_map();
558 per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map
) == 0 ?
559 32 : cpumask_weight(&early_cpu_possible_map
)),
560 additional_cpus
> 0 ? additional_cpus
: 0);
562 #endif /* CONFIG_APCI_BOOT */
569 /* process SAL system table: */
570 ia64_sal_init(__va(efi
.sal_systab
));
572 #ifdef CONFIG_ITANIUM
573 ia64_patch_rse((u64
) __start___rse_patchlist
, (u64
) __end___rse_patchlist
);
576 unsigned long num_phys_stacked
;
578 if (ia64_pal_rse_info(&num_phys_stacked
, 0) == 0 && num_phys_stacked
> 96)
579 ia64_patch_rse((u64
) __start___rse_patchlist
, (u64
) __end___rse_patchlist
);
584 cpu_physical_id(0) = hard_smp_processor_id();
587 cpu_init(); /* initialize the bootstrap CPU */
588 mmu_context_init(); /* initialize context_id bitmap */
592 # if defined(CONFIG_DUMMY_CONSOLE)
593 conswitchp
= &dummy_con
;
595 # if defined(CONFIG_VGA_CONSOLE)
597 * Non-legacy systems may route legacy VGA MMIO range to system
598 * memory. vga_con probes the MMIO hole, so memory looks like
599 * a VGA device to it. The EFI memory map can tell us if it's
600 * memory so we can avoid this problem.
602 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY
)
603 conswitchp
= &vga_con
;
608 /* enable IA-64 Machine Check Abort Handling unless disabled */
612 platform_setup(cmdline_p
);
613 #ifndef CONFIG_IA64_HP_SIM
614 check_sal_cache_flush();
620 * Display cpu info for all CPUs.
623 show_cpuinfo (struct seq_file
*m
, void *v
)
626 # define lpj c->loops_per_jiffy
627 # define cpunum c->cpu
629 # define lpj loops_per_jiffy
634 const char *feature_name
;
636 { 1UL << 0, "branchlong" },
637 { 1UL << 1, "spontaneous deferral"},
638 { 1UL << 2, "16-byte atomic ops" }
640 char features
[128], *cp
, *sep
;
641 struct cpuinfo_ia64
*c
= v
;
643 unsigned long proc_freq
;
648 /* build the feature string: */
649 memcpy(features
, "standard", 9);
651 size
= sizeof(features
);
653 for (i
= 0; i
< ARRAY_SIZE(feature_bits
) && size
> 1; ++i
) {
654 if (mask
& feature_bits
[i
].mask
) {
655 cp
+= snprintf(cp
, size
, "%s%s", sep
,
656 feature_bits
[i
].feature_name
),
658 mask
&= ~feature_bits
[i
].mask
;
659 size
= sizeof(features
) - (cp
- features
);
662 if (mask
&& size
> 1) {
663 /* print unknown features as a hex value */
664 snprintf(cp
, size
, "%s0x%lx", sep
, mask
);
667 proc_freq
= cpufreq_quick_get(cpunum
);
669 proc_freq
= c
->proc_freq
/ 1000;
683 "cpu MHz : %lu.%03lu\n"
684 "itc MHz : %lu.%06lu\n"
685 "BogoMIPS : %lu.%02lu\n",
686 cpunum
, c
->vendor
, c
->family
, c
->model
,
687 c
->model_name
, c
->revision
, c
->archrev
,
688 features
, c
->ppn
, c
->number
,
689 proc_freq
/ 1000, proc_freq
% 1000,
690 c
->itc_freq
/ 1000000, c
->itc_freq
% 1000000,
691 lpj
*HZ
/500000, (lpj
*HZ
/5000) % 100);
693 seq_printf(m
, "siblings : %u\n",
694 cpumask_weight(&cpu_core_map
[cpunum
]));
695 if (c
->socket_id
!= -1)
696 seq_printf(m
, "physical id: %u\n", c
->socket_id
);
697 if (c
->threads_per_core
> 1 || c
->cores_per_socket
> 1)
701 c
->core_id
, c
->thread_id
);
709 c_start (struct seq_file
*m
, loff_t
*pos
)
712 while (*pos
< nr_cpu_ids
&& !cpu_online(*pos
))
715 return *pos
< nr_cpu_ids
? cpu_data(*pos
) : NULL
;
719 c_next (struct seq_file
*m
, void *v
, loff_t
*pos
)
722 return c_start(m
, pos
);
726 c_stop (struct seq_file
*m
, void *v
)
730 const struct seq_operations cpuinfo_op
= {
738 static char brandname
[MAX_BRANDS
][128];
741 get_model_name(__u8 family
, __u8 model
)
747 memcpy(brand
, "Unknown", 8);
748 if (ia64_pal_get_brand_info(brand
)) {
750 memcpy(brand
, "Merced", 7);
751 else if (family
== 0x1f) switch (model
) {
752 case 0: memcpy(brand
, "McKinley", 9); break;
753 case 1: memcpy(brand
, "Madison", 8); break;
754 case 2: memcpy(brand
, "Madison up to 9M cache", 23); break;
757 for (i
= 0; i
< MAX_BRANDS
; i
++)
758 if (strcmp(brandname
[i
], brand
) == 0)
760 for (i
= 0; i
< MAX_BRANDS
; i
++)
761 if (brandname
[i
][0] == '\0')
762 return strcpy(brandname
[i
], brand
);
765 "%s: Table overflow. Some processor model information will be missing\n",
771 identify_cpu (struct cpuinfo_ia64
*c
)
774 unsigned long bits
[5];
780 u64 ppn
; /* processor serial number */
784 unsigned revision
: 8;
787 unsigned archrev
: 8;
788 unsigned reserved
: 24;
794 pal_vm_info_1_u_t vm1
;
795 pal_vm_info_2_u_t vm2
;
797 unsigned long impl_va_msb
= 50, phys_addr_size
= 44; /* Itanium defaults */
799 for (i
= 0; i
< 5; ++i
)
800 cpuid
.bits
[i
] = ia64_get_cpuid(i
);
802 memcpy(c
->vendor
, cpuid
.field
.vendor
, 16);
804 c
->cpu
= smp_processor_id();
806 /* below default values will be overwritten by identify_siblings()
807 * for Multi-Threading/Multi-Core capable CPUs
809 c
->threads_per_core
= c
->cores_per_socket
= c
->num_log
= 1;
812 identify_siblings(c
);
814 if (c
->threads_per_core
> smp_num_siblings
)
815 smp_num_siblings
= c
->threads_per_core
;
817 c
->ppn
= cpuid
.field
.ppn
;
818 c
->number
= cpuid
.field
.number
;
819 c
->revision
= cpuid
.field
.revision
;
820 c
->model
= cpuid
.field
.model
;
821 c
->family
= cpuid
.field
.family
;
822 c
->archrev
= cpuid
.field
.archrev
;
823 c
->features
= cpuid
.field
.features
;
824 c
->model_name
= get_model_name(c
->family
, c
->model
);
826 status
= ia64_pal_vm_summary(&vm1
, &vm2
);
827 if (status
== PAL_STATUS_SUCCESS
) {
828 impl_va_msb
= vm2
.pal_vm_info_2_s
.impl_va_msb
;
829 phys_addr_size
= vm1
.pal_vm_info_1_s
.phys_add_size
;
831 c
->unimpl_va_mask
= ~((7L<<61) | ((1L << (impl_va_msb
+ 1)) - 1));
832 c
->unimpl_pa_mask
= ~((1L<<63) | ((1L << phys_addr_size
) - 1));
836 * Do the following calculations:
838 * 1. the max. cache line size.
839 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
840 * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
845 unsigned long line_size
, max
= 1;
846 unsigned long l
, levels
, unique_caches
;
847 pal_cache_config_info_t cci
;
850 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
852 printk(KERN_ERR
"%s: ia64_pal_cache_summary() failed (status=%ld)\n",
854 max
= SMP_CACHE_BYTES
;
855 /* Safest setup for "flush_icache_range()" */
856 ia64_i_cache_stride_shift
= I_CACHE_STRIDE_SHIFT
;
857 /* Safest setup for "clflush_cache_range()" */
858 ia64_cache_stride_shift
= CACHE_STRIDE_SHIFT
;
862 for (l
= 0; l
< levels
; ++l
) {
863 /* cache_type (data_or_unified)=2 */
864 status
= ia64_pal_cache_config_info(l
, 2, &cci
);
866 printk(KERN_ERR
"%s: ia64_pal_cache_config_info"
867 "(l=%lu, 2) failed (status=%ld)\n",
868 __func__
, l
, status
);
869 max
= SMP_CACHE_BYTES
;
870 /* The safest setup for "flush_icache_range()" */
871 cci
.pcci_stride
= I_CACHE_STRIDE_SHIFT
;
872 /* The safest setup for "clflush_cache_range()" */
873 ia64_cache_stride_shift
= CACHE_STRIDE_SHIFT
;
874 cci
.pcci_unified
= 1;
876 if (cci
.pcci_stride
< ia64_cache_stride_shift
)
877 ia64_cache_stride_shift
= cci
.pcci_stride
;
879 line_size
= 1 << cci
.pcci_line_size
;
884 if (!cci
.pcci_unified
) {
885 /* cache_type (instruction)=1*/
886 status
= ia64_pal_cache_config_info(l
, 1, &cci
);
888 printk(KERN_ERR
"%s: ia64_pal_cache_config_info"
889 "(l=%lu, 1) failed (status=%ld)\n",
890 __func__
, l
, status
);
891 /* The safest setup for flush_icache_range() */
892 cci
.pcci_stride
= I_CACHE_STRIDE_SHIFT
;
895 if (cci
.pcci_stride
< ia64_i_cache_stride_shift
)
896 ia64_i_cache_stride_shift
= cci
.pcci_stride
;
899 if (max
> ia64_max_cacheline_size
)
900 ia64_max_cacheline_size
= max
;
904 * cpu_init() initializes state that is per-CPU. This function acts
905 * as a 'CPU state barrier', nothing should get across.
910 extern void ia64_mmu_init(void *);
911 static unsigned long max_num_phys_stacked
= IA64_NUM_PHYS_STACK_REG
;
912 unsigned long num_phys_stacked
;
913 pal_vm_info_2_u_t vmi
;
914 unsigned int max_ctx
;
915 struct cpuinfo_ia64
*cpu_info
;
918 cpu_data
= per_cpu_init();
921 * insert boot cpu into sibling and core mapes
922 * (must be done after per_cpu area is setup)
924 if (smp_processor_id() == 0) {
925 cpumask_set_cpu(0, &per_cpu(cpu_sibling_map
, 0));
926 cpumask_set_cpu(0, &cpu_core_map
[0]);
929 * Set ar.k3 so that assembly code in MCA handler can compute
930 * physical addresses of per cpu variables with a simple:
931 * phys = ar.k3 + &per_cpu_var
932 * and the alt-dtlb-miss handler can set per-cpu mapping into
933 * the TLB when needed. head.S already did this for cpu0.
935 ia64_set_kr(IA64_KR_PER_CPU_DATA
,
936 ia64_tpa(cpu_data
) - (long) __per_cpu_start
);
943 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
944 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
945 * depends on the data returned by identify_cpu(). We break the dependency by
946 * accessing cpu_data() through the canonical per-CPU address.
948 cpu_info
= cpu_data
+ ((char *) &__ia64_per_cpu_var(ia64_cpu_info
) - __per_cpu_start
);
949 identify_cpu(cpu_info
);
951 #ifdef CONFIG_MCKINLEY
953 # define FEATURE_SET 16
954 struct ia64_pal_retval iprv
;
956 if (cpu_info
->family
== 0x1f) {
957 PAL_CALL_PHYS(iprv
, PAL_PROC_GET_FEATURES
, 0, FEATURE_SET
, 0);
958 if ((iprv
.status
== 0) && (iprv
.v0
& 0x80) && (iprv
.v2
& 0x80))
959 PAL_CALL_PHYS(iprv
, PAL_PROC_SET_FEATURES
,
960 (iprv
.v1
| 0x80), FEATURE_SET
, 0);
965 /* Clear the stack memory reserved for pt_regs: */
966 memset(task_pt_regs(current
), 0, sizeof(struct pt_regs
));
968 ia64_set_kr(IA64_KR_FPU_OWNER
, 0);
971 * Initialize the page-table base register to a global
972 * directory with all zeroes. This ensure that we can handle
973 * TLB-misses to user address-space even before we created the
974 * first user address-space. This may happen, e.g., due to
975 * aggressive use of lfetch.fault.
977 ia64_set_kr(IA64_KR_PT_BASE
, __pa(ia64_imva(empty_zero_page
)));
980 * Initialize default control register to defer speculative faults except
981 * for those arising from TLB misses, which are not deferred. The
982 * kernel MUST NOT depend on a particular setting of these bits (in other words,
983 * the kernel must have recovery code for all speculative accesses). Turn on
984 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
985 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
988 ia64_setreg(_IA64_REG_CR_DCR
, ( IA64_DCR_DP
| IA64_DCR_DK
| IA64_DCR_DX
| IA64_DCR_DR
989 | IA64_DCR_DA
| IA64_DCR_DD
| IA64_DCR_LC
));
990 atomic_inc(&init_mm
.mm_count
);
991 current
->active_mm
= &init_mm
;
994 ia64_mmu_init(ia64_imva(cpu_data
));
995 ia64_mca_cpu_init(ia64_imva(cpu_data
));
997 /* Clear ITC to eliminate sched_clock() overflows in human time. */
1000 /* disable all local interrupt sources: */
1001 ia64_set_itv(1 << 16);
1002 ia64_set_lrr0(1 << 16);
1003 ia64_set_lrr1(1 << 16);
1004 ia64_setreg(_IA64_REG_CR_PMV
, 1 << 16);
1005 ia64_setreg(_IA64_REG_CR_CMCV
, 1 << 16);
1007 /* clear TPR & XTP to enable all interrupt classes: */
1008 ia64_setreg(_IA64_REG_CR_TPR
, 0);
1010 /* Clear any pending interrupts left by SAL/EFI */
1011 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR
)
1018 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
1019 if (ia64_pal_vm_summary(NULL
, &vmi
) == 0) {
1020 max_ctx
= (1U << (vmi
.pal_vm_info_2_s
.rid_size
- 3)) - 1;
1021 setup_ptcg_sem(vmi
.pal_vm_info_2_s
.max_purges
, NPTCG_FROM_PAL
);
1023 printk(KERN_WARNING
"cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
1024 max_ctx
= (1U << 15) - 1; /* use architected minimum */
1026 while (max_ctx
< ia64_ctx
.max_ctx
) {
1027 unsigned int old
= ia64_ctx
.max_ctx
;
1028 if (cmpxchg(&ia64_ctx
.max_ctx
, old
, max_ctx
) == old
)
1032 if (ia64_pal_rse_info(&num_phys_stacked
, NULL
) != 0) {
1033 printk(KERN_WARNING
"cpu_init: PAL RSE info failed; assuming 96 physical "
1035 num_phys_stacked
= 96;
1037 /* size of physical stacked register partition plus 8 bytes: */
1038 if (num_phys_stacked
> max_num_phys_stacked
) {
1039 ia64_patch_phys_stack_reg(num_phys_stacked
*8 + 8);
1040 max_num_phys_stacked
= num_phys_stacked
;
1042 platform_cpu_init();
1048 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles
,
1049 (unsigned long) __end___mckinley_e9_bundles
);
1052 static int __init
run_dmi_scan(void)
1056 dmi_set_dump_stack_arch_desc();
1059 core_initcall(run_dmi_scan
);