2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
29 #include <asm/procinfo.h>
30 #include <asm/setup.h>
31 #include <asm/mach-types.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
35 #include <asm/mach/arch.h>
36 #include <asm/mach/irq.h>
37 #include <asm/mach/time.h>
42 #define MEM_SIZE (16*1024*1024)
45 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
48 static int __init
fpe_setup(char *line
)
50 memcpy(fpe_type
, line
, 8);
54 __setup("fpe=", fpe_setup
);
57 extern void paging_init(struct meminfo
*, struct machine_desc
*desc
);
58 extern void reboot_setup(char *str
);
59 extern int root_mountflags
;
60 extern void _stext
, _text
, _etext
, __data_start
, _edata
, _end
;
62 unsigned int processor_id
;
63 unsigned int __machine_arch_type
;
64 EXPORT_SYMBOL(__machine_arch_type
);
66 unsigned int system_rev
;
67 EXPORT_SYMBOL(system_rev
);
69 unsigned int system_serial_low
;
70 EXPORT_SYMBOL(system_serial_low
);
72 unsigned int system_serial_high
;
73 EXPORT_SYMBOL(system_serial_high
);
75 unsigned int elf_hwcap
;
76 EXPORT_SYMBOL(elf_hwcap
);
80 struct processor processor
;
83 struct cpu_tlb_fns cpu_tlb
;
86 struct cpu_user_fns cpu_user
;
89 struct cpu_cache_fns cpu_cache
;
96 } ____cacheline_aligned
;
98 static struct stack stacks
[NR_CPUS
];
100 char elf_platform
[ELF_PLATFORM_SIZE
];
101 EXPORT_SYMBOL(elf_platform
);
103 unsigned long phys_initrd_start __initdata
= 0;
104 unsigned long phys_initrd_size __initdata
= 0;
106 static struct meminfo meminfo __initdata
= { 0, };
107 static const char *cpu_name
;
108 static const char *machine_name
;
109 static char command_line
[COMMAND_LINE_SIZE
];
111 static char default_command_line
[COMMAND_LINE_SIZE
] __initdata
= CONFIG_CMDLINE
;
112 static union { char c
[4]; unsigned long l
; } endian_test __initdata
= { { 'l', '?', '?', 'b' } };
113 #define ENDIANNESS ((char)endian_test.l)
115 DEFINE_PER_CPU(struct cpuinfo_arm
, cpu_data
);
118 * Standard memory resources
120 static struct resource mem_res
[] = {
125 .flags
= IORESOURCE_MEM
128 .name
= "Kernel text",
131 .flags
= IORESOURCE_MEM
134 .name
= "Kernel data",
137 .flags
= IORESOURCE_MEM
141 #define video_ram mem_res[0]
142 #define kernel_code mem_res[1]
143 #define kernel_data mem_res[2]
145 static struct resource io_res
[] = {
150 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
156 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
162 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
166 #define lp0 io_res[0]
167 #define lp1 io_res[1]
168 #define lp2 io_res[2]
170 static const char *cache_types
[16] = {
189 static const char *cache_clean
[16] = {
208 static const char *cache_lockdown
[16] = {
227 static const char *proc_arch
[] = {
247 #define CACHE_TYPE(x) (((x) >> 25) & 15)
248 #define CACHE_S(x) ((x) & (1 << 24))
249 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
250 #define CACHE_ISIZE(x) ((x) & 4095)
252 #define CACHE_SIZE(y) (((y) >> 6) & 7)
253 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
254 #define CACHE_M(y) ((y) & (1 << 2))
255 #define CACHE_LINE(y) ((y) & 3)
257 static inline void dump_cache(const char *prefix
, int cpu
, unsigned int cache
)
259 unsigned int mult
= 2 + (CACHE_M(cache
) ? 1 : 0);
261 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
263 mult
<< (8 + CACHE_SIZE(cache
)),
264 (mult
<< CACHE_ASSOC(cache
)) >> 1,
265 8 << CACHE_LINE(cache
),
266 1 << (6 + CACHE_SIZE(cache
) - CACHE_ASSOC(cache
) -
270 static void __init
dump_cpu_info(int cpu
)
272 unsigned int info
= read_cpuid(CPUID_CACHETYPE
);
274 if (info
!= processor_id
) {
275 printk("CPU%u: D %s %s cache\n", cpu
, cache_is_vivt() ? "VIVT" : "VIPT",
276 cache_types
[CACHE_TYPE(info
)]);
278 dump_cache("I cache", cpu
, CACHE_ISIZE(info
));
279 dump_cache("D cache", cpu
, CACHE_DSIZE(info
));
281 dump_cache("cache", cpu
, CACHE_ISIZE(info
));
285 if (arch_is_coherent())
286 printk("Cache coherency enabled\n");
289 int cpu_architecture(void)
293 if ((processor_id
& 0x0008f000) == 0) {
294 cpu_arch
= CPU_ARCH_UNKNOWN
;
295 } else if ((processor_id
& 0x0008f000) == 0x00007000) {
296 cpu_arch
= (processor_id
& (1 << 23)) ? CPU_ARCH_ARMv4T
: CPU_ARCH_ARMv3
;
297 } else if ((processor_id
& 0x00080000) == 0x00000000) {
298 cpu_arch
= (processor_id
>> 16) & 7;
300 cpu_arch
+= CPU_ARCH_ARMv3
;
302 /* the revised CPUID */
303 cpu_arch
= ((processor_id
>> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6
;
310 * These functions re-use the assembly code in head.S, which
311 * already provide the required functionality.
313 extern struct proc_info_list
*lookup_processor_type(unsigned int);
314 extern struct machine_desc
*lookup_machine_type(unsigned int);
316 static void __init
setup_processor(void)
318 struct proc_info_list
*list
;
321 * locate processor in the list of supported processor
322 * types. The linker builds this table for us from the
323 * entries in arch/arm/mm/proc-*.S
325 list
= lookup_processor_type(processor_id
);
327 printk("CPU configuration botched (ID %08x), unable "
328 "to continue.\n", processor_id
);
332 cpu_name
= list
->cpu_name
;
335 processor
= *list
->proc
;
338 cpu_tlb
= *list
->tlb
;
341 cpu_user
= *list
->user
;
344 cpu_cache
= *list
->cache
;
347 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
348 cpu_name
, processor_id
, (int)processor_id
& 15,
349 proc_arch
[cpu_architecture()], cr_alignment
);
351 sprintf(init_utsname()->machine
, "%s%c", list
->arch_name
, ENDIANNESS
);
352 sprintf(elf_platform
, "%s%c", list
->elf_name
, ENDIANNESS
);
353 elf_hwcap
= list
->elf_hwcap
;
354 #ifndef CONFIG_ARM_THUMB
355 elf_hwcap
&= ~HWCAP_THUMB
;
362 * cpu_init - initialise one CPU.
364 * cpu_init dumps the cache information, initialises SMP specific
365 * information, and sets up the per-CPU stacks.
369 unsigned int cpu
= smp_processor_id();
370 struct stack
*stk
= &stacks
[cpu
];
372 if (cpu
>= NR_CPUS
) {
373 printk(KERN_CRIT
"CPU%u: bad primary CPU number\n", cpu
);
377 if (system_state
== SYSTEM_BOOTING
)
381 * setup stacks for re-entrant exception handlers
393 "I" (PSR_F_BIT
| PSR_I_BIT
| IRQ_MODE
),
394 "I" (offsetof(struct stack
, irq
[0])),
395 "I" (PSR_F_BIT
| PSR_I_BIT
| ABT_MODE
),
396 "I" (offsetof(struct stack
, abt
[0])),
397 "I" (PSR_F_BIT
| PSR_I_BIT
| UND_MODE
),
398 "I" (offsetof(struct stack
, und
[0])),
399 "I" (PSR_F_BIT
| PSR_I_BIT
| SVC_MODE
)
403 static struct machine_desc
* __init
setup_machine(unsigned int nr
)
405 struct machine_desc
*list
;
408 * locate machine in the list of supported machines.
410 list
= lookup_machine_type(nr
);
412 printk("Machine configuration botched (nr %d), unable "
413 "to continue.\n", nr
);
417 printk("Machine: %s\n", list
->name
);
422 static void __init
early_initrd(char **p
)
424 unsigned long start
, size
;
426 start
= memparse(*p
, p
);
428 size
= memparse((*p
) + 1, p
);
430 phys_initrd_start
= start
;
431 phys_initrd_size
= size
;
434 __early_param("initrd=", early_initrd
);
436 static void __init
arm_add_memory(unsigned long start
, unsigned long size
)
438 struct membank
*bank
;
441 * Ensure that start/size are aligned to a page boundary.
442 * Size is appropriately rounded down, start is rounded up.
444 size
-= start
& ~PAGE_MASK
;
446 bank
= &meminfo
.bank
[meminfo
.nr_banks
++];
448 bank
->start
= PAGE_ALIGN(start
);
449 bank
->size
= size
& PAGE_MASK
;
450 bank
->node
= PHYS_TO_NID(start
);
454 * Pick out the memory size. We look for mem=size@start,
455 * where start and size are "size[KkMm]"
457 static void __init
early_mem(char **p
)
459 static int usermem __initdata
= 0;
460 unsigned long size
, start
;
463 * If the user specifies memory size, we
464 * blow away any automatically generated
469 meminfo
.nr_banks
= 0;
473 size
= memparse(*p
, p
);
475 start
= memparse(*p
+ 1, p
);
477 arm_add_memory(start
, size
);
479 __early_param("mem=", early_mem
);
482 * Initial parsing of the command line.
484 static void __init
parse_cmdline(char **cmdline_p
, char *from
)
486 char c
= ' ', *to
= command_line
;
491 extern struct early_params __early_begin
, __early_end
;
492 struct early_params
*p
;
494 for (p
= &__early_begin
; p
< &__early_end
; p
++) {
495 int len
= strlen(p
->arg
);
497 if (memcmp(from
, p
->arg
, len
) == 0) {
498 if (to
!= command_line
)
503 while (*from
!= ' ' && *from
!= '\0')
512 if (COMMAND_LINE_SIZE
<= ++len
)
517 *cmdline_p
= command_line
;
521 setup_ramdisk(int doload
, int prompt
, int image_start
, unsigned int rd_sz
)
523 #ifdef CONFIG_BLK_DEV_RAM
524 extern int rd_size
, rd_image_start
, rd_prompt
, rd_doload
;
526 rd_image_start
= image_start
;
536 request_standard_resources(struct meminfo
*mi
, struct machine_desc
*mdesc
)
538 struct resource
*res
;
541 kernel_code
.start
= virt_to_phys(&_text
);
542 kernel_code
.end
= virt_to_phys(&_etext
- 1);
543 kernel_data
.start
= virt_to_phys(&__data_start
);
544 kernel_data
.end
= virt_to_phys(&_end
- 1);
546 for (i
= 0; i
< mi
->nr_banks
; i
++) {
547 unsigned long virt_start
, virt_end
;
549 if (mi
->bank
[i
].size
== 0)
552 virt_start
= __phys_to_virt(mi
->bank
[i
].start
);
553 virt_end
= virt_start
+ mi
->bank
[i
].size
- 1;
555 res
= alloc_bootmem_low(sizeof(*res
));
556 res
->name
= "System RAM";
557 res
->start
= __virt_to_phys(virt_start
);
558 res
->end
= __virt_to_phys(virt_end
);
559 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
561 request_resource(&iomem_resource
, res
);
563 if (kernel_code
.start
>= res
->start
&&
564 kernel_code
.end
<= res
->end
)
565 request_resource(res
, &kernel_code
);
566 if (kernel_data
.start
>= res
->start
&&
567 kernel_data
.end
<= res
->end
)
568 request_resource(res
, &kernel_data
);
571 if (mdesc
->video_start
) {
572 video_ram
.start
= mdesc
->video_start
;
573 video_ram
.end
= mdesc
->video_end
;
574 request_resource(&iomem_resource
, &video_ram
);
578 * Some machines don't have the possibility of ever
579 * possessing lp0, lp1 or lp2
581 if (mdesc
->reserve_lp0
)
582 request_resource(&ioport_resource
, &lp0
);
583 if (mdesc
->reserve_lp1
)
584 request_resource(&ioport_resource
, &lp1
);
585 if (mdesc
->reserve_lp2
)
586 request_resource(&ioport_resource
, &lp2
);
592 * This is the new way of passing data to the kernel at boot time. Rather
593 * than passing a fixed inflexible structure to the kernel, we pass a list
594 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
595 * tag for the list to be recognised (to distinguish the tagged list from
596 * a param_struct). The list is terminated with a zero-length tag (this tag
597 * is not parsed in any way).
599 static int __init
parse_tag_core(const struct tag
*tag
)
601 if (tag
->hdr
.size
> 2) {
602 if ((tag
->u
.core
.flags
& 1) == 0)
603 root_mountflags
&= ~MS_RDONLY
;
604 ROOT_DEV
= old_decode_dev(tag
->u
.core
.rootdev
);
609 __tagtable(ATAG_CORE
, parse_tag_core
);
611 static int __init
parse_tag_mem32(const struct tag
*tag
)
613 if (meminfo
.nr_banks
>= NR_BANKS
) {
615 "Ignoring memory bank 0x%08x size %dKB\n",
616 tag
->u
.mem
.start
, tag
->u
.mem
.size
/ 1024);
619 arm_add_memory(tag
->u
.mem
.start
, tag
->u
.mem
.size
);
623 __tagtable(ATAG_MEM
, parse_tag_mem32
);
625 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
626 struct screen_info screen_info
= {
627 .orig_video_lines
= 30,
628 .orig_video_cols
= 80,
629 .orig_video_mode
= 0,
630 .orig_video_ega_bx
= 0,
631 .orig_video_isVGA
= 1,
632 .orig_video_points
= 8
635 static int __init
parse_tag_videotext(const struct tag
*tag
)
637 screen_info
.orig_x
= tag
->u
.videotext
.x
;
638 screen_info
.orig_y
= tag
->u
.videotext
.y
;
639 screen_info
.orig_video_page
= tag
->u
.videotext
.video_page
;
640 screen_info
.orig_video_mode
= tag
->u
.videotext
.video_mode
;
641 screen_info
.orig_video_cols
= tag
->u
.videotext
.video_cols
;
642 screen_info
.orig_video_ega_bx
= tag
->u
.videotext
.video_ega_bx
;
643 screen_info
.orig_video_lines
= tag
->u
.videotext
.video_lines
;
644 screen_info
.orig_video_isVGA
= tag
->u
.videotext
.video_isvga
;
645 screen_info
.orig_video_points
= tag
->u
.videotext
.video_points
;
649 __tagtable(ATAG_VIDEOTEXT
, parse_tag_videotext
);
652 static int __init
parse_tag_ramdisk(const struct tag
*tag
)
654 setup_ramdisk((tag
->u
.ramdisk
.flags
& 1) == 0,
655 (tag
->u
.ramdisk
.flags
& 2) == 0,
656 tag
->u
.ramdisk
.start
, tag
->u
.ramdisk
.size
);
660 __tagtable(ATAG_RAMDISK
, parse_tag_ramdisk
);
662 static int __init
parse_tag_initrd(const struct tag
*tag
)
664 printk(KERN_WARNING
"ATAG_INITRD is deprecated; "
665 "please update your bootloader.\n");
666 phys_initrd_start
= __virt_to_phys(tag
->u
.initrd
.start
);
667 phys_initrd_size
= tag
->u
.initrd
.size
;
671 __tagtable(ATAG_INITRD
, parse_tag_initrd
);
673 static int __init
parse_tag_initrd2(const struct tag
*tag
)
675 phys_initrd_start
= tag
->u
.initrd
.start
;
676 phys_initrd_size
= tag
->u
.initrd
.size
;
680 __tagtable(ATAG_INITRD2
, parse_tag_initrd2
);
682 static int __init
parse_tag_serialnr(const struct tag
*tag
)
684 system_serial_low
= tag
->u
.serialnr
.low
;
685 system_serial_high
= tag
->u
.serialnr
.high
;
689 __tagtable(ATAG_SERIAL
, parse_tag_serialnr
);
691 static int __init
parse_tag_revision(const struct tag
*tag
)
693 system_rev
= tag
->u
.revision
.rev
;
697 __tagtable(ATAG_REVISION
, parse_tag_revision
);
699 static int __init
parse_tag_cmdline(const struct tag
*tag
)
701 strlcpy(default_command_line
, tag
->u
.cmdline
.cmdline
, COMMAND_LINE_SIZE
);
705 __tagtable(ATAG_CMDLINE
, parse_tag_cmdline
);
708 * Scan the tag table for this tag, and call its parse function.
709 * The tag table is built by the linker from all the __tagtable
712 static int __init
parse_tag(const struct tag
*tag
)
714 extern struct tagtable __tagtable_begin
, __tagtable_end
;
717 for (t
= &__tagtable_begin
; t
< &__tagtable_end
; t
++)
718 if (tag
->hdr
.tag
== t
->tag
) {
723 return t
< &__tagtable_end
;
727 * Parse all tags in the list, checking both the global and architecture
728 * specific tag tables.
730 static void __init
parse_tags(const struct tag
*t
)
732 for (; t
->hdr
.size
; t
= tag_next(t
))
735 "Ignoring unrecognised tag 0x%08x\n",
740 * This holds our defaults.
742 static struct init_tags
{
743 struct tag_header hdr1
;
744 struct tag_core core
;
745 struct tag_header hdr2
;
746 struct tag_mem32 mem
;
747 struct tag_header hdr3
;
748 } init_tags __initdata
= {
749 { tag_size(tag_core
), ATAG_CORE
},
750 { 1, PAGE_SIZE
, 0xff },
751 { tag_size(tag_mem32
), ATAG_MEM
},
752 { MEM_SIZE
, PHYS_OFFSET
},
756 static void (*init_machine
)(void) __initdata
;
758 static int __init
customize_machine(void)
760 /* customizes platform devices, or adds new ones */
765 arch_initcall(customize_machine
);
767 void __init
setup_arch(char **cmdline_p
)
769 struct tag
*tags
= (struct tag
*)&init_tags
;
770 struct machine_desc
*mdesc
;
771 char *from
= default_command_line
;
774 mdesc
= setup_machine(machine_arch_type
);
775 machine_name
= mdesc
->name
;
777 if (mdesc
->soft_reboot
)
780 if (mdesc
->boot_params
)
781 tags
= phys_to_virt(mdesc
->boot_params
);
784 * If we have the old style parameters, convert them to
787 if (tags
->hdr
.tag
!= ATAG_CORE
)
788 convert_to_tag_list(tags
);
789 if (tags
->hdr
.tag
!= ATAG_CORE
)
790 tags
= (struct tag
*)&init_tags
;
793 mdesc
->fixup(mdesc
, tags
, &from
, &meminfo
);
795 if (tags
->hdr
.tag
== ATAG_CORE
) {
796 if (meminfo
.nr_banks
!= 0)
797 squash_mem_tags(tags
);
801 init_mm
.start_code
= (unsigned long) &_text
;
802 init_mm
.end_code
= (unsigned long) &_etext
;
803 init_mm
.end_data
= (unsigned long) &_edata
;
804 init_mm
.brk
= (unsigned long) &_end
;
806 memcpy(saved_command_line
, from
, COMMAND_LINE_SIZE
);
807 saved_command_line
[COMMAND_LINE_SIZE
-1] = '\0';
808 parse_cmdline(cmdline_p
, from
);
809 paging_init(&meminfo
, mdesc
);
810 request_standard_resources(&meminfo
, mdesc
);
819 * Set up various architecture-specific pointers
821 init_arch_irq
= mdesc
->init_irq
;
822 system_timer
= mdesc
->timer
;
823 init_machine
= mdesc
->init_machine
;
826 #if defined(CONFIG_VGA_CONSOLE)
827 conswitchp
= &vga_con
;
828 #elif defined(CONFIG_DUMMY_CONSOLE)
829 conswitchp
= &dummy_con
;
835 static int __init
topology_init(void)
839 for_each_possible_cpu(cpu
)
840 register_cpu(&per_cpu(cpu_data
, cpu
).cpu
, cpu
);
845 subsys_initcall(topology_init
);
847 static const char *hwcap_str
[] = {
863 c_show_cache(struct seq_file
*m
, const char *type
, unsigned int cache
)
865 unsigned int mult
= 2 + (CACHE_M(cache
) ? 1 : 0);
867 seq_printf(m
, "%s size\t\t: %d\n"
869 "%s line length\t: %d\n"
871 type
, mult
<< (8 + CACHE_SIZE(cache
)),
872 type
, (mult
<< CACHE_ASSOC(cache
)) >> 1,
873 type
, 8 << CACHE_LINE(cache
),
874 type
, 1 << (6 + CACHE_SIZE(cache
) - CACHE_ASSOC(cache
) -
878 static int c_show(struct seq_file
*m
, void *v
)
882 seq_printf(m
, "Processor\t: %s rev %d (%s)\n",
883 cpu_name
, (int)processor_id
& 15, elf_platform
);
885 #if defined(CONFIG_SMP)
886 for_each_online_cpu(i
) {
888 * glibc reads /proc/cpuinfo to determine the number of
889 * online processors, looking for lines beginning with
890 * "processor". Give glibc what it expects.
892 seq_printf(m
, "processor\t: %d\n", i
);
893 seq_printf(m
, "BogoMIPS\t: %lu.%02lu\n\n",
894 per_cpu(cpu_data
, i
).loops_per_jiffy
/ (500000UL/HZ
),
895 (per_cpu(cpu_data
, i
).loops_per_jiffy
/ (5000UL/HZ
)) % 100);
897 #else /* CONFIG_SMP */
898 seq_printf(m
, "BogoMIPS\t: %lu.%02lu\n",
899 loops_per_jiffy
/ (500000/HZ
),
900 (loops_per_jiffy
/ (5000/HZ
)) % 100);
903 /* dump out the processor features */
904 seq_puts(m
, "Features\t: ");
906 for (i
= 0; hwcap_str
[i
]; i
++)
907 if (elf_hwcap
& (1 << i
))
908 seq_printf(m
, "%s ", hwcap_str
[i
]);
910 seq_printf(m
, "\nCPU implementer\t: 0x%02x\n", processor_id
>> 24);
911 seq_printf(m
, "CPU architecture: %s\n", proc_arch
[cpu_architecture()]);
913 if ((processor_id
& 0x0008f000) == 0x00000000) {
915 seq_printf(m
, "CPU part\t\t: %07x\n", processor_id
>> 4);
917 if ((processor_id
& 0x0008f000) == 0x00007000) {
919 seq_printf(m
, "CPU variant\t: 0x%02x\n",
920 (processor_id
>> 16) & 127);
923 seq_printf(m
, "CPU variant\t: 0x%x\n",
924 (processor_id
>> 20) & 15);
926 seq_printf(m
, "CPU part\t: 0x%03x\n",
927 (processor_id
>> 4) & 0xfff);
929 seq_printf(m
, "CPU revision\t: %d\n", processor_id
& 15);
932 unsigned int cache_info
= read_cpuid(CPUID_CACHETYPE
);
933 if (cache_info
!= processor_id
) {
934 seq_printf(m
, "Cache type\t: %s\n"
935 "Cache clean\t: %s\n"
936 "Cache lockdown\t: %s\n"
937 "Cache format\t: %s\n",
938 cache_types
[CACHE_TYPE(cache_info
)],
939 cache_clean
[CACHE_TYPE(cache_info
)],
940 cache_lockdown
[CACHE_TYPE(cache_info
)],
941 CACHE_S(cache_info
) ? "Harvard" : "Unified");
943 if (CACHE_S(cache_info
)) {
944 c_show_cache(m
, "I", CACHE_ISIZE(cache_info
));
945 c_show_cache(m
, "D", CACHE_DSIZE(cache_info
));
947 c_show_cache(m
, "Cache", CACHE_ISIZE(cache_info
));
954 seq_printf(m
, "Hardware\t: %s\n", machine_name
);
955 seq_printf(m
, "Revision\t: %04x\n", system_rev
);
956 seq_printf(m
, "Serial\t\t: %08x%08x\n",
957 system_serial_high
, system_serial_low
);
962 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
964 return *pos
< 1 ? (void *)1 : NULL
;
967 static void *c_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
973 static void c_stop(struct seq_file
*m
, void *v
)
977 struct seq_operations cpuinfo_op
= {