2 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
4 * This file contains the architecture-dependant parts of system setup.
8 #include <linux/export.h>
9 #include <linux/bootmem.h>
10 #include <linux/console.h>
11 #include <linux/cpu.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
15 #include <linux/genhd.h>
16 #include <linux/init.h>
17 #include <linux/initrd.h>
18 #include <linux/interrupt.h>
19 #include <linux/kernel.h>
20 #include <linux/memblock.h>
22 #include <linux/of_fdt.h>
23 #include <linux/pfn.h>
24 #include <linux/root_dev.h>
25 #include <linux/sched.h>
26 #include <linux/seq_file.h>
27 #include <linux/start_kernel.h>
28 #include <linux/string.h>
30 #include <asm/cachepart.h>
31 #include <asm/clock.h>
32 #include <asm/core_reg.h>
35 #include <asm/highmem.h>
36 #include <asm/hwthread.h>
37 #include <asm/l2cache.h>
38 #include <asm/mach/arch.h>
39 #include <asm/metag_mem.h>
40 #include <asm/metag_regs.h>
42 #include <asm/mmzone.h>
43 #include <asm/processor.h>
44 #include <asm/sections.h>
45 #include <asm/setup.h>
46 #include <asm/traps.h>
48 /* Priv protect as many registers as possible. */
49 #define DEFAULT_PRIV (TXPRIVEXT_COPRO_BITS | \
50 TXPRIVEXT_TXTRIGGER_BIT | \
51 TXPRIVEXT_TXGBLCREG_BIT | \
52 TXPRIVEXT_ILOCK_BIT | \
53 TXPRIVEXT_TXITACCYC_BIT | \
54 TXPRIVEXT_TXDIVTIME_BIT | \
55 TXPRIVEXT_TXAMAREGX_BIT | \
56 TXPRIVEXT_TXTIMERI_BIT | \
57 TXPRIVEXT_TXSTATUS_BIT | \
58 TXPRIVEXT_TXDISABLE_BIT)
60 /* Meta2 specific bits. */
61 #ifdef CONFIG_METAG_META12
64 #define META2_PRIV (TXPRIVEXT_TXTIMER_BIT | \
68 /* Unaligned access checking bits. */
69 #ifdef CONFIG_METAG_UNALIGNED
70 #define UNALIGNED_PRIV TXPRIVEXT_ALIGNREW_BIT
72 #define UNALIGNED_PRIV 0
75 #define PRIV_BITS (DEFAULT_PRIV | \
81 * 0x06000000-0x07ffffff Direct mapped region
82 * 0x05000000-0x05ffffff MMU table region (Meta1)
83 * 0x04400000-0x047fffff Cache flush region
84 * 0x84000000-0x87ffffff Core cache memory region (Meta2)
87 * 0x80000000-0x81ffffff Core code memory region (Meta2)
89 #ifdef CONFIG_METAG_META12
90 #define PRIVSYSR_BITS TXPRIVSYSR_ALL_BITS
92 #define PRIVSYSR_BITS (TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT)
95 /* Protect all 0x02xxxxxx and 0x048xxxxx. */
96 #define PIOREG_BITS 0xffffffff
99 * Protect all 0x04000xx0 (system events)
100 * except write combiner flush and write fence (system events 4 and 5).
102 #define PSYREG_BITS 0xfffffffb
105 extern char _heap_start
[];
107 #ifdef CONFIG_DA_CONSOLE
108 /* Our early channel based console driver */
109 extern struct console dash_console
;
112 const struct machine_desc
*machine_desc __initdata
;
115 * Map a Linux CPU number to a hardware thread ID
116 * In SMP this will be setup with the correct mapping at startup; in UP this
117 * will map to the HW thread on which we are running.
119 u8 cpu_2_hwthread_id
[NR_CPUS
] __read_mostly
= {
120 [0 ... NR_CPUS
-1] = BAD_HWTHREAD_ID
122 EXPORT_SYMBOL_GPL(cpu_2_hwthread_id
);
125 * Map a hardware thread ID to a Linux CPU number
126 * In SMP this will be fleshed out with the correct CPU ID for a particular
127 * hardware thread. In UP this will be initialised with the boot CPU ID.
129 u8 hwthread_id_2_cpu
[4] __read_mostly
= {
130 [0 ... 3] = BAD_CPU_ID
133 /* The relative offset of the MMU mapped memory (from ldlk or bootloader)
134 * to the real physical memory. This is needed as we have to use the
135 * physical addresses in the MMU tables (pte entries), and not the virtual
137 * This variable is used in the __pa() and __va() macros, and should
138 * probably only be used via them.
140 unsigned int meta_memoffset
;
141 EXPORT_SYMBOL(meta_memoffset
);
143 static char __initdata
*original_cmd_line
;
145 DEFINE_PER_CPU(PTBI
, pTBI
);
148 * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
150 * "hwthread_map=0:1,1:2,2:3,3:0"
152 * Linux CPU ID HWTHREAD_ID
153 * ---------------------------
159 static int __init
parse_hwthread_map(char *p
)
165 if (cpu
< 0 || cpu
> 9)
168 p
++; /* skip semi-colon */
169 cpu_2_hwthread_id
[cpu
] = (*p
++) - '0';
170 if (cpu_2_hwthread_id
[cpu
] >= 4)
172 hwthread_id_2_cpu
[cpu_2_hwthread_id
[cpu
]] = cpu
;
175 p
++; /* skip comma */
180 pr_err("%s: hwthread_map cpu argument out of range\n", __func__
);
183 pr_err("%s: hwthread_map thread argument out of range\n", __func__
);
186 early_param("hwthread_map", parse_hwthread_map
);
188 void __init
dump_machine_table(void)
190 struct machine_desc
*p
;
193 pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n");
194 for_each_machine_desc(p
) {
195 pr_info("\t%s\t[", p
->name
);
196 for (compat
= p
->dt_compat
; compat
&& *compat
; ++compat
)
197 printk(" '%s'", *compat
);
201 pr_info("\nPlease check your kernel config and/or bootloader.\n");
203 hard_processor_halt(HALT_PANIC
);
206 #ifdef CONFIG_METAG_HALT_ON_PANIC
207 static int metag_panic_event(struct notifier_block
*this, unsigned long event
,
210 hard_processor_halt(HALT_PANIC
);
214 static struct notifier_block metag_panic_block
= {
221 void __init
setup_arch(char **cmdline_p
)
223 unsigned long start_pfn
;
224 unsigned long text_start
= (unsigned long)(&_stext
);
225 unsigned long cpu
= smp_processor_id();
226 unsigned long heap_start
, heap_end
;
227 unsigned long start_pte
;
235 #ifdef CONFIG_DA_CONSOLE
236 if (metag_da_enabled()) {
237 /* An early channel based console driver */
238 register_console(&dash_console
);
239 add_preferred_console("ttyDA", 1, NULL
);
243 /* try interpreting the argument as a device tree */
244 machine_desc
= setup_machine_fdt(original_cmd_line
);
245 /* if it doesn't look like a device tree it must be a command line */
247 #ifdef CONFIG_METAG_BUILTIN_DTB
248 /* try the embedded device tree */
249 machine_desc
= setup_machine_fdt(__dtb_start
);
251 panic("Invalid embedded device tree.");
253 /* use the default machine description */
254 machine_desc
= default_machine_desc();
256 #ifndef CONFIG_CMDLINE_FORCE
257 /* append the bootloader cmdline to any builtin fdt cmdline */
258 if (boot_command_line
[0] && original_cmd_line
[0])
259 strlcat(boot_command_line
, " ", COMMAND_LINE_SIZE
);
260 strlcat(boot_command_line
, original_cmd_line
,
264 setup_meta_clocks(machine_desc
->clocks
);
266 *cmdline_p
= boot_command_line
;
270 * Make sure we don't alias in dcache or icache
272 check_for_cache_aliasing(cpu
);
275 #ifdef CONFIG_METAG_HALT_ON_PANIC
276 atomic_notifier_chain_register(&panic_notifier_list
,
280 #ifdef CONFIG_DUMMY_CONSOLE
281 conswitchp
= &dummy_con
;
284 if (!(__core_reg_get(TXSTATUS
) & TXSTATUS_PSTAT_BIT
))
285 panic("Privilege must be enabled for this thread.");
287 _pTBI
= __TBI(TBID_ISTAT_BIT
);
289 per_cpu(pTBI
, cpu
) = _pTBI
;
291 if (!per_cpu(pTBI
, cpu
))
292 panic("No TBI found!");
295 * Initialize all interrupt vectors to our copy of __TBIUnExpXXX,
296 * rather than the version from the bootloader. This makes call
297 * stacks easier to understand and may allow us to unmap the
298 * bootloader at some point.
300 for (i
= 0; i
<= TBID_SIGNUM_MAX
; i
++)
301 _pTBI
->fnSigs
[i
] = __TBIUnExpXXX
;
303 /* A Meta requirement is that the kernel is loaded (virtually)
304 * at the PAGE_OFFSET.
306 if (PAGE_OFFSET
!= text_start
)
307 panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.",
308 PAGE_OFFSET
, text_start
);
310 start_pte
= mmu_read_second_level_page(text_start
);
313 * Kernel pages should have the PRIV bit set by the bootloader.
315 if (!(start_pte
& _PAGE_KERNEL
))
316 panic("kernel pte does not have PRIV set");
319 * See __pa and __va in include/asm/page.h.
320 * This value is negative when running in local space but the
321 * calculations work anyway.
323 meta_memoffset
= text_start
- (start_pte
& PAGE_MASK
);
325 /* Now lets look at the heap space */
326 heap_id
= (__TBIThreadId() & TBID_THREAD_BITS
)
327 + TBID_SEG(0, TBID_SEGSCOPE_LOCAL
, TBID_SEGTYPE_HEAP
);
329 p_heap
= __TBIFindSeg(NULL
, heap_id
);
332 panic("Could not find heap from TBI!");
334 /* The heap begins at the first full page after the kernel data. */
335 heap_start
= (unsigned long) &_heap_start
;
337 /* The heap ends at the end of the heap segment specified with
340 if (is_global_space(text_start
)) {
341 pr_debug("WARNING: running in global space!\n");
342 heap_end
= (unsigned long)p_heap
->pGAddr
+ p_heap
->Bytes
;
344 heap_end
= (unsigned long)p_heap
->pLAddr
+ p_heap
->Bytes
;
347 ROOT_DEV
= Root_RAM0
;
349 /* init_mm is the mm struct used for the first task. It is then
350 * cloned for all other tasks spawned from that task.
352 * Note - we are using the virtual addresses here.
354 init_mm
.start_code
= (unsigned long)(&_stext
);
355 init_mm
.end_code
= (unsigned long)(&_etext
);
356 init_mm
.end_data
= (unsigned long)(&_edata
);
357 init_mm
.brk
= (unsigned long)heap_start
;
359 min_low_pfn
= PFN_UP(__pa(text_start
));
360 max_low_pfn
= PFN_DOWN(__pa(heap_end
));
362 pfn_base
= min_low_pfn
;
364 /* Round max_pfn up to a 4Mb boundary. The free_bootmem_node()
365 * call later makes sure to keep the rounded up pages marked reserved.
367 max_pfn
= max_low_pfn
+ ((1 << MAX_ORDER
) - 1);
368 max_pfn
&= ~((1 << MAX_ORDER
) - 1);
370 start_pfn
= PFN_UP(__pa(heap_start
));
372 if (min_low_pfn
& ((1 << MAX_ORDER
) - 1)) {
373 /* Theoretically, we could expand the space that the
374 * bootmem allocator covers - much as we do for the
375 * 'high' address, and then tell the bootmem system
376 * that the lowest chunk is 'not available'. Right
377 * now it is just much easier to constrain the
378 * user to always MAX_ORDER align their kernel space.
381 panic("Kernel must be %d byte aligned, currently at %#lx.",
382 1 << (MAX_ORDER
+ PAGE_SHIFT
),
383 min_low_pfn
<< PAGE_SHIFT
);
386 #ifdef CONFIG_HIGHMEM
387 highstart_pfn
= highend_pfn
= max_pfn
;
388 high_memory
= (void *) __va(PFN_PHYS(highstart_pfn
));
390 high_memory
= (void *)__va(PFN_PHYS(max_pfn
));
393 paging_init(heap_end
);
397 /* Setup the boot cpu's mapping. The rest will be setup below. */
398 cpu_2_hwthread_id
[smp_processor_id()] = hard_processor_id();
399 hwthread_id_2_cpu
[hard_processor_id()] = smp_processor_id();
401 unflatten_and_copy_device_tree();
407 if (machine_desc
->init_early
)
408 machine_desc
->init_early();
411 static int __init
customize_machine(void)
413 /* customizes platform devices, or adds new ones */
414 if (machine_desc
->init_machine
)
415 machine_desc
->init_machine();
419 arch_initcall(customize_machine
);
421 static int __init
init_machine_late(void)
423 if (machine_desc
->init_late
)
424 machine_desc
->init_late();
427 late_initcall(init_machine_late
);
429 #ifdef CONFIG_PROC_FS
431 * Get CPU information for use by the procfs.
433 static const char *get_cpu_capabilities(unsigned int txenable
)
435 #ifdef CONFIG_METAG_META21
436 /* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */
437 int coreid
= metag_in32(METAC_CORE_ID
);
438 unsigned int dsp_type
= (coreid
>> 3) & 7;
439 unsigned int fpu_type
= (coreid
>> 7) & 3;
441 switch (dsp_type
| fpu_type
<< 3) {
442 case (0x00): return "EDSP";
443 case (0x01): return "DSP";
444 case (0x08): return "EDSP+LFPU";
445 case (0x09): return "DSP+LFPU";
446 case (0x10): return "EDSP+FPU";
447 case (0x11): return "DSP+FPU";
452 if (!(txenable
& TXENABLE_CLASS_BITS
))
459 static int show_cpuinfo(struct seq_file
*m
, void *v
)
462 unsigned int txenable
, thread_id
, major
, minor
;
463 unsigned long clockfreq
= get_coreclock();
471 txenable
= __core_reg_get(TXENABLE
);
472 major
= (txenable
& TXENABLE_MAJOR_REV_BITS
) >> TXENABLE_MAJOR_REV_S
;
473 minor
= (txenable
& TXENABLE_MINOR_REV_BITS
) >> TXENABLE_MINOR_REV_S
;
474 thread_id
= (txenable
>> 8) & 0x3;
477 for_each_online_cpu(i
) {
478 lpj
= per_cpu(cpu_data
, i
).loops_per_jiffy
;
479 txenable
= core_reg_read(TXUCT_ID
, TXENABLE_REGNUM
,
480 cpu_2_hwthread_id
[i
]);
482 seq_printf(m
, "CPU:\t\t%s %d.%d (thread %d)\n"
483 "Clocking:\t%lu.%1luMHz\n"
484 "BogoMips:\t%lu.%02lu\n"
485 "Calibration:\t%lu loops\n"
486 "Capabilities:\t%s\n\n",
487 cpu
, major
, minor
, i
,
488 clockfreq
/ 1000000, (clockfreq
/ 100000) % 10,
489 lpj
/ (500000 / HZ
), (lpj
/ (5000 / HZ
)) % 100,
491 get_cpu_capabilities(txenable
));
494 seq_printf(m
, "CPU:\t\t%s %d.%d (thread %d)\n"
495 "Clocking:\t%lu.%1luMHz\n"
496 "BogoMips:\t%lu.%02lu\n"
497 "Calibration:\t%lu loops\n"
498 "Capabilities:\t%s\n",
499 cpu
, major
, minor
, thread_id
,
500 clockfreq
/ 1000000, (clockfreq
/ 100000) % 10,
501 loops_per_jiffy
/ (500000 / HZ
),
502 (loops_per_jiffy
/ (5000 / HZ
)) % 100,
504 get_cpu_capabilities(txenable
));
505 #endif /* CONFIG_SMP */
507 #ifdef CONFIG_METAG_L2C
508 if (meta_l2c_is_present()) {
509 seq_printf(m
, "L2 cache:\t%s\n"
510 "L2 cache size:\t%d KB\n",
511 meta_l2c_is_enabled() ? "enabled" : "disabled",
512 meta_l2c_size() >> 10);
518 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
520 return (void *)(*pos
== 0);
522 static void *c_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
526 static void c_stop(struct seq_file
*m
, void *v
)
529 const struct seq_operations cpuinfo_op
= {
533 .show
= show_cpuinfo
,
535 #endif /* CONFIG_PROC_FS */
537 void __init
metag_start_kernel(char *args
)
539 /* Zero the timer register so timestamps are from the point at
540 * which the kernel started running.
542 __core_reg_set(TXTIMER
, 0);
545 memset(__bss_start
, 0,
546 (unsigned long)__bss_stop
- (unsigned long)__bss_start
);
548 /* Remember where these are for use in setup_arch */
549 original_cmd_line
= args
;
551 current_thread_info()->cpu
= hard_processor_id();
557 * setup_priv() - Set up privilege protection registers.
559 * Set up privilege protection registers such as TXPRIVEXT to prevent userland
560 * from touching our precious registers and sensitive memory areas.
562 void setup_priv(void)
564 unsigned int offset
= hard_processor_id() << TXPRIVREG_STRIDE_S
;
566 __core_reg_set(TXPRIVEXT
, PRIV_BITS
);
568 metag_out32(PRIVSYSR_BITS
, T0PRIVSYSR
+ offset
);
569 metag_out32(PIOREG_BITS
, T0PIOREG
+ offset
);
570 metag_out32(PSYREG_BITS
, T0PSYREG
+ offset
);
573 PTBI
pTBI_get(unsigned int cpu
)
575 return per_cpu(pTBI
, cpu
);
577 EXPORT_SYMBOL(pTBI_get
);
579 #if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
580 static char capabilities
[] = "dsp fpu";
581 #elif defined(CONFIG_METAG_DSP)
582 static char capabilities
[] = "dsp";
583 #elif defined(CONFIG_METAG_FPU)
584 static char capabilities
[] = "fpu";
586 static char capabilities
[] = "";
589 static struct ctl_table caps_kern_table
[] = {
591 .procname
= "capabilities",
592 .data
= capabilities
,
593 .maxlen
= sizeof(capabilities
),
595 .proc_handler
= proc_dostring
,
600 static struct ctl_table caps_root_table
[] = {
602 .procname
= "kernel",
604 .child
= caps_kern_table
,
609 static int __init
capabilities_register_sysctl(void)
611 struct ctl_table_header
*caps_table_header
;
613 caps_table_header
= register_sysctl_table(caps_root_table
);
614 if (!caps_table_header
) {
615 pr_err("Unable to register CAPABILITIES sysctl\n");
622 core_initcall(capabilities_register_sysctl
);