2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2000 by Silicon Graphics, Inc.
8 * Copyright (C) 2004 by Christoph Hellwig
10 * On SGI IP27 the ARC memory configuration data is completly bogus but
11 * alternate easier to use mechanisms are available.
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/memblock.h>
17 #include <linux/mmzone.h>
18 #include <linux/module.h>
19 #include <linux/nodemask.h>
20 #include <linux/swap.h>
21 #include <linux/bootmem.h>
22 #include <linux/pfn.h>
23 #include <linux/highmem.h>
25 #include <asm/pgalloc.h>
26 #include <asm/sections.h>
28 #include <asm/sn/arch.h>
29 #include <asm/sn/hub.h>
30 #include <asm/sn/klconfig.h>
31 #include <asm/sn/sn_private.h>
34 #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
35 #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
37 struct node_data
*__node_data
[MAX_COMPACT_NODES
];
39 EXPORT_SYMBOL(__node_data
);
43 static int is_fine_dirmode(void)
45 return (((LOCAL_HUB_L(NI_STATUS_REV_ID
) & NSRI_REGIONSIZE_MASK
)
46 >> NSRI_REGIONSIZE_SHFT
) & REGIONSIZE_FINE
);
49 static hubreg_t
get_region(cnodeid_t cnode
)
52 return COMPACT_TO_NASID_NODEID(cnode
) >> NASID_TO_FINEREG_SHFT
;
54 return COMPACT_TO_NASID_NODEID(cnode
) >> NASID_TO_COARSEREG_SHFT
;
57 static hubreg_t region_mask
;
59 static void gen_region_mask(hubreg_t
*region_mask
)
64 for_each_online_node(cnode
) {
65 (*region_mask
) |= 1ULL << get_region(cnode
);
69 #define rou_rflag rou_flags
71 static int router_distance
;
73 static void router_recurse(klrou_t
*router_a
, klrou_t
*router_b
, int depth
)
79 if (router_a
->rou_rflag
== 1)
82 if (depth
>= router_distance
)
85 router_a
->rou_rflag
= 1;
87 for (port
= 1; port
<= MAX_ROUTER_PORTS
; port
++) {
88 if (router_a
->rou_port
[port
].port_nasid
== INVALID_NASID
)
91 brd
= (lboard_t
*)NODE_OFFSET_TO_K0(
92 router_a
->rou_port
[port
].port_nasid
,
93 router_a
->rou_port
[port
].port_offset
);
95 if (brd
->brd_type
== KLTYPE_ROUTER
) {
96 router
= (klrou_t
*)NODE_OFFSET_TO_K0(NASID_GET(brd
), brd
->brd_compts
[0]);
97 if (router
== router_b
) {
98 if (depth
< router_distance
)
99 router_distance
= depth
;
102 router_recurse(router
, router_b
, depth
+ 1);
106 router_a
->rou_rflag
= 0;
109 unsigned char __node_distances
[MAX_COMPACT_NODES
][MAX_COMPACT_NODES
];
111 static int __init
compute_node_distance(nasid_t nasid_a
, nasid_t nasid_b
)
113 klrou_t
*router
, *router_a
= NULL
, *router_b
= NULL
;
114 lboard_t
*brd
, *dest_brd
;
119 /* Figure out which routers nodes in question are connected to */
120 for_each_online_node(cnode
) {
121 nasid
= COMPACT_TO_NASID_NODEID(cnode
);
123 if (nasid
== -1) continue;
125 brd
= find_lboard_class((lboard_t
*)KL_CONFIG_INFO(nasid
),
132 if (brd
->brd_flags
& DUPLICATE_BOARD
)
135 router
= (klrou_t
*)NODE_OFFSET_TO_K0(NASID_GET(brd
), brd
->brd_compts
[0]);
136 router
->rou_rflag
= 0;
138 for (port
= 1; port
<= MAX_ROUTER_PORTS
; port
++) {
139 if (router
->rou_port
[port
].port_nasid
== INVALID_NASID
)
142 dest_brd
= (lboard_t
*)NODE_OFFSET_TO_K0(
143 router
->rou_port
[port
].port_nasid
,
144 router
->rou_port
[port
].port_offset
);
146 if (dest_brd
->brd_type
== KLTYPE_IP27
) {
147 if (dest_brd
->brd_nasid
== nasid_a
)
149 if (dest_brd
->brd_nasid
== nasid_b
)
154 } while ((brd
= find_lboard_class(KLCF_NEXT(brd
), KLTYPE_ROUTER
)));
157 if (router_a
== NULL
) {
158 printk("node_distance: router_a NULL\n");
161 if (router_b
== NULL
) {
162 printk("node_distance: router_b NULL\n");
166 if (nasid_a
== nasid_b
)
169 if (router_a
== router_b
)
172 router_distance
= 100;
173 router_recurse(router_a
, router_b
, 2);
175 return router_distance
;
178 static void __init
init_topology_matrix(void)
180 nasid_t nasid
, nasid2
;
183 for (row
= 0; row
< MAX_COMPACT_NODES
; row
++)
184 for (col
= 0; col
< MAX_COMPACT_NODES
; col
++)
185 __node_distances
[row
][col
] = -1;
187 for_each_online_node(row
) {
188 nasid
= COMPACT_TO_NASID_NODEID(row
);
189 for_each_online_node(col
) {
190 nasid2
= COMPACT_TO_NASID_NODEID(col
);
191 __node_distances
[row
][col
] =
192 compute_node_distance(nasid
, nasid2
);
197 static void __init
dump_topology(void)
201 lboard_t
*brd
, *dest_brd
;
207 printk("************** Topology ********************\n");
210 for_each_online_node(col
)
211 printk("%02d ", col
);
213 for_each_online_node(row
) {
214 printk("%02d ", row
);
215 for_each_online_node(col
)
216 printk("%2d ", node_distance(row
, col
));
220 for_each_online_node(cnode
) {
221 nasid
= COMPACT_TO_NASID_NODEID(cnode
);
223 if (nasid
== -1) continue;
225 brd
= find_lboard_class((lboard_t
*)KL_CONFIG_INFO(nasid
),
232 if (brd
->brd_flags
& DUPLICATE_BOARD
)
234 printk("Router %d:", router_num
);
237 router
= (klrou_t
*)NODE_OFFSET_TO_K0(NASID_GET(brd
), brd
->brd_compts
[0]);
239 for (port
= 1; port
<= MAX_ROUTER_PORTS
; port
++) {
240 if (router
->rou_port
[port
].port_nasid
== INVALID_NASID
)
243 dest_brd
= (lboard_t
*)NODE_OFFSET_TO_K0(
244 router
->rou_port
[port
].port_nasid
,
245 router
->rou_port
[port
].port_offset
);
247 if (dest_brd
->brd_type
== KLTYPE_IP27
)
248 printk(" %d", dest_brd
->brd_nasid
);
249 if (dest_brd
->brd_type
== KLTYPE_ROUTER
)
254 } while ( (brd
= find_lboard_class(KLCF_NEXT(brd
), KLTYPE_ROUTER
)) );
258 static unsigned long __init
slot_getbasepfn(cnodeid_t cnode
, int slot
)
260 nasid_t nasid
= COMPACT_TO_NASID_NODEID(cnode
);
262 return ((unsigned long)nasid
<< PFN_NASIDSHFT
) | (slot
<< SLOT_PFNSHIFT
);
265 static unsigned long __init
slot_psize_compute(cnodeid_t node
, int slot
)
272 nasid
= COMPACT_TO_NASID_NODEID(node
);
273 /* Find the node board */
274 brd
= find_lboard((lboard_t
*)KL_CONFIG_INFO(nasid
), KLTYPE_IP27
);
278 /* Get the memory bank structure */
279 banks
= (klmembnk_t
*) find_first_component(brd
, KLSTRUCT_MEMBNK
);
283 /* Size in _Megabytes_ */
284 size
= (unsigned long)banks
->membnk_bnksz
[slot
/4];
286 /* hack for 128 dimm banks */
289 size
<<= 20; /* size in bytes */
290 return(size
>> PAGE_SHIFT
);
296 return size
>> PAGE_SHIFT
;
300 static void __init
mlreset(void)
304 master_nasid
= get_nasid();
305 fine_mode
= is_fine_dirmode();
308 * Probe for all CPUs - this creates the cpumask and sets up the
309 * mapping tables. We need to do this as early as possible.
315 init_topology_matrix();
318 gen_region_mask(®ion_mask
);
320 setup_replication_mask();
323 * Set all nodes' calias sizes to 8k
325 for_each_online_node(i
) {
328 nasid
= COMPACT_TO_NASID_NODEID(i
);
331 * Always have node 0 in the region mask, otherwise
332 * CALIAS accesses get exceptions since the hub
333 * thinks it is a node 0 address.
335 REMOTE_HUB_S(nasid
, PI_REGION_PRESENT
, (region_mask
| 1));
336 #ifdef CONFIG_REPLICATE_EXHANDLERS
337 REMOTE_HUB_S(nasid
, PI_CALIAS_SIZE
, PI_CALIAS_SIZE_8K
);
339 REMOTE_HUB_S(nasid
, PI_CALIAS_SIZE
, PI_CALIAS_SIZE_0
);
344 * Set up all hubs to have a big window pointing at
345 * widget 0. Memory mode, widget 0, offset 0
347 REMOTE_HUB_S(nasid
, IIO_ITTE(SWIN0_BIGWIN
),
348 ((HUB_PIO_MAP_TO_MEM
<< IIO_ITTE_IOSP_SHIFT
) |
349 (0 << IIO_ITTE_WIDGET_SHIFT
)));
354 static void __init
szmem(void)
356 unsigned long slot_psize
, slot0sz
= 0, nodebytes
; /* Hack to detect problem configs */
360 for_each_online_node(node
) {
362 for (slot
= 0; slot
< MAX_MEM_SLOTS
; slot
++) {
363 slot_psize
= slot_psize_compute(node
, slot
);
365 slot0sz
= slot_psize
;
367 * We need to refine the hack when we have replicated
370 nodebytes
+= (1LL << SLOT_SHIFT
);
375 if ((nodebytes
>> PAGE_SHIFT
) * (sizeof(struct page
)) >
376 (slot0sz
<< PAGE_SHIFT
)) {
377 printk("Ignoring slot %d onwards on node %d\n",
379 slot
= MAX_MEM_SLOTS
;
382 memblock_add_node(PFN_PHYS(slot_getbasepfn(node
, slot
)),
383 PFN_PHYS(slot_psize
), node
);
388 static void __init
node_mem_init(cnodeid_t node
)
390 unsigned long slot_firstpfn
= slot_getbasepfn(node
, 0);
391 unsigned long slot_freepfn
= node_getfirstfree(node
);
392 unsigned long bootmap_size
;
393 unsigned long start_pfn
, end_pfn
;
395 get_pfn_range_for_nid(node
, &start_pfn
, &end_pfn
);
398 * Allocate the node data structures on the node first.
400 __node_data
[node
] = __va(slot_freepfn
<< PAGE_SHIFT
);
401 memset(__node_data
[node
], 0, PAGE_SIZE
);
403 NODE_DATA(node
)->bdata
= &bootmem_node_data
[node
];
404 NODE_DATA(node
)->node_start_pfn
= start_pfn
;
405 NODE_DATA(node
)->node_spanned_pages
= end_pfn
- start_pfn
;
407 cpus_clear(hub_data(node
)->h_cpus
);
409 slot_freepfn
+= PFN_UP(sizeof(struct pglist_data
) +
410 sizeof(struct hub_data
));
412 bootmap_size
= init_bootmem_node(NODE_DATA(node
), slot_freepfn
,
414 free_bootmem_with_active_regions(node
, end_pfn
);
415 reserve_bootmem_node(NODE_DATA(node
), slot_firstpfn
<< PAGE_SHIFT
,
416 ((slot_freepfn
- slot_firstpfn
) << PAGE_SHIFT
) + bootmap_size
,
418 sparse_memory_present_with_active_regions(node
);
422 * A node with nothing. We use it to avoid any special casing in
425 static struct node_data null_node
= {
427 .h_cpus
= CPU_MASK_NONE
432 * Currently, the intranode memory hole support assumes that each slot
433 * contains at least 32 MBytes of memory. We assume all bootmem data
434 * fits on the first slot.
436 void __init
prom_meminit(void)
443 for (node
= 0; node
< MAX_COMPACT_NODES
; node
++) {
444 if (node_online(node
)) {
448 __node_data
[node
] = &null_node
;
452 void __init
prom_free_prom_memory(void)
454 /* We got nothing to free here ... */
457 extern void setup_zero_pages(void);
459 void __init
paging_init(void)
461 unsigned long zones_size
[MAX_NR_ZONES
] = {0, };
466 for_each_online_node(node
) {
467 unsigned long start_pfn
, end_pfn
;
469 get_pfn_range_for_nid(node
, &start_pfn
, &end_pfn
);
471 if (end_pfn
> max_low_pfn
)
472 max_low_pfn
= end_pfn
;
474 zones_size
[ZONE_NORMAL
] = max_low_pfn
;
475 free_area_init_nodes(zones_size
);
478 void __init
mem_init(void)
480 high_memory
= (void *) __va(get_num_physpages() << PAGE_SHIFT
);
482 setup_zero_pages(); /* This comes from node 0 */
483 mem_init_print_info(NULL
);