2 * Some of the code in this file has been gleaned from the 64 bit
3 * discontigmem support code base.
5 * Copyright (C) 2002, IBM Corp.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * Send feedback to Pat Gaughen <gone@us.ibm.com>
27 #include <linux/bootmem.h>
28 #include <linux/memblock.h>
29 #include <linux/mmzone.h>
30 #include <linux/acpi.h>
31 #include <linux/nodemask.h>
33 #include <asm/topology.h>
38 * proximity macros and definitions
40 #define NODE_ARRAY_INDEX(x) ((x) / 8) /* 8 bits/char */
41 #define NODE_ARRAY_OFFSET(x) ((x) % 8) /* 8 bits/char */
42 #define BMAP_SET(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] |= 1 << NODE_ARRAY_OFFSET(bit))
43 #define BMAP_TEST(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] & (1 << NODE_ARRAY_OFFSET(bit)))
44 /* bitmap length; _PXM is at most 255 */
45 #define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8)
46 static u8 __initdata pxm_bitmap
[PXM_BITMAP_LEN
]; /* bitmap of proximity domains */
48 #define MAX_CHUNKS_PER_NODE 3
49 #define MAXCHUNKS (MAX_CHUNKS_PER_NODE * MAX_NUMNODES)
50 struct node_memory_chunk_s
{
51 unsigned long start_pfn
;
52 unsigned long end_pfn
;
53 u8 pxm
; // proximity domain of node
54 u8 nid
; // which cnode contains this chunk?
55 u8 bank
; // which mem bank on this node
57 static struct node_memory_chunk_s __initdata node_memory_chunk
[MAXCHUNKS
];
59 static int __initdata num_memory_chunks
; /* total number of memory chunks */
60 static u8 __initdata apicid_to_pxm
[MAX_APICID
];
62 int numa_off __initdata
;
63 int acpi_numa __initdata
;
65 static __init
void bad_srat(void)
67 printk(KERN_ERR
"SRAT: SRAT not used.\n");
69 num_memory_chunks
= 0;
72 static __init
inline int srat_disabled(void)
74 return numa_off
|| acpi_numa
< 0;
77 /* Identify CPU proximity domains */
79 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity
*cpu_affinity
)
83 if (cpu_affinity
->header
.length
!=
84 sizeof(struct acpi_srat_cpu_affinity
)) {
89 if ((cpu_affinity
->flags
& ACPI_SRAT_CPU_ENABLED
) == 0)
90 return; /* empty entry */
92 /* mark this node as "seen" in node bitmap */
93 BMAP_SET(pxm_bitmap
, cpu_affinity
->proximity_domain_lo
);
95 /* don't need to check apic_id here, because it is always 8 bits */
96 apicid_to_pxm
[cpu_affinity
->apic_id
] = cpu_affinity
->proximity_domain_lo
;
98 printk(KERN_DEBUG
"CPU %02x in proximity domain %02x\n",
99 cpu_affinity
->apic_id
, cpu_affinity
->proximity_domain_lo
);
103 * Identify memory proximity domains and hot-remove capabilities.
104 * Fill node memory chunk list structure.
107 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity
*memory_affinity
)
109 unsigned long long paddr
, size
;
110 unsigned long start_pfn
, end_pfn
;
112 struct node_memory_chunk_s
*p
, *q
, *pend
;
116 if (memory_affinity
->header
.length
!=
117 sizeof(struct acpi_srat_mem_affinity
)) {
122 if ((memory_affinity
->flags
& ACPI_SRAT_MEM_ENABLED
) == 0)
123 return; /* empty entry */
125 pxm
= memory_affinity
->proximity_domain
& 0xff;
127 /* mark this node as "seen" in node bitmap */
128 BMAP_SET(pxm_bitmap
, pxm
);
130 /* calculate info for memory chunk structure */
131 paddr
= memory_affinity
->base_address
;
132 size
= memory_affinity
->length
;
134 start_pfn
= paddr
>> PAGE_SHIFT
;
135 end_pfn
= (paddr
+ size
) >> PAGE_SHIFT
;
138 if (num_memory_chunks
>= MAXCHUNKS
) {
139 printk(KERN_WARNING
"Too many mem chunks in SRAT."
140 " Ignoring %lld MBytes at %llx\n",
141 size
/(1024*1024), paddr
);
145 /* Insertion sort based on base address */
146 pend
= &node_memory_chunk
[num_memory_chunks
];
147 for (p
= &node_memory_chunk
[0]; p
< pend
; p
++) {
148 if (start_pfn
< p
->start_pfn
)
152 for (q
= pend
; q
>= p
; q
--)
155 p
->start_pfn
= start_pfn
;
156 p
->end_pfn
= end_pfn
;
161 printk(KERN_DEBUG
"Memory range %08lx to %08lx"
162 " in proximity domain %02x %s\n",
165 ((memory_affinity
->flags
& ACPI_SRAT_MEM_HOT_PLUGGABLE
) ?
166 "enabled and removable" : "enabled" ) );
169 /* Callback for SLIT parsing */
170 void __init
acpi_numa_slit_init(struct acpi_table_slit
*slit
)
174 void acpi_numa_arch_fixup(void)
178 * The SRAT table always lists ascending addresses, so can always
179 * assume that the first "start" address that you see is the real
180 * start of the node, and that the current "end" address is after
183 static __init
int node_read_chunk(int nid
, struct node_memory_chunk_s
*memory_chunk
)
186 * Only add present memory as told by the e820.
187 * There is no guarantee from the SRAT that the memory it
188 * enumerates is present at boot time because it represents
189 * *possible* memory hotplug areas the same as normal RAM.
191 if (memory_chunk
->start_pfn
>= max_pfn
) {
192 printk(KERN_INFO
"Ignoring SRAT pfns: %08lx - %08lx\n",
193 memory_chunk
->start_pfn
, memory_chunk
->end_pfn
);
196 if (memory_chunk
->nid
!= nid
)
199 if (!node_has_online_mem(nid
))
200 node_start_pfn
[nid
] = memory_chunk
->start_pfn
;
202 if (node_start_pfn
[nid
] > memory_chunk
->start_pfn
)
203 node_start_pfn
[nid
] = memory_chunk
->start_pfn
;
205 if (node_end_pfn
[nid
] < memory_chunk
->end_pfn
)
206 node_end_pfn
[nid
] = memory_chunk
->end_pfn
;
211 int __init
get_memcfg_from_srat(void)
219 if (num_memory_chunks
== 0) {
221 "could not find any ACPI SRAT memory areas.\n");
225 /* Calculate total number of nodes in system from PXM bitmap and create
226 * a set of sequential node IDs starting at zero. (ACPI doesn't seem
227 * to specify the range of _PXM values.)
230 * MCD - we no longer HAVE to number nodes sequentially. PXM domain
231 * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically
232 * 32, so we will continue numbering them in this manner until MAX_NUMNODES
233 * approaches MAX_PXM_DOMAINS for i386.
235 nodes_clear(node_online_map
);
236 for (i
= 0; i
< MAX_PXM_DOMAINS
; i
++) {
237 if (BMAP_TEST(pxm_bitmap
, i
)) {
238 int nid
= acpi_map_pxm_to_node(i
);
239 node_set_online(nid
);
242 BUG_ON(num_online_nodes() == 0);
244 /* set cnode id in memory chunk structure */
245 for (i
= 0; i
< num_memory_chunks
; i
++)
246 node_memory_chunk
[i
].nid
= pxm_to_node(node_memory_chunk
[i
].pxm
);
248 printk(KERN_DEBUG
"pxm bitmap: ");
249 for (i
= 0; i
< sizeof(pxm_bitmap
); i
++) {
250 printk(KERN_CONT
"%02x ", pxm_bitmap
[i
]);
252 printk(KERN_CONT
"\n");
253 printk(KERN_DEBUG
"Number of logical nodes in system = %d\n",
255 printk(KERN_DEBUG
"Number of memory chunks in system = %d\n",
258 for (i
= 0; i
< MAX_APICID
; i
++)
259 apicid_2_node
[i
] = pxm_to_node(apicid_to_pxm
[i
]);
261 for (j
= 0; j
< num_memory_chunks
; j
++){
262 struct node_memory_chunk_s
* chunk
= &node_memory_chunk
[j
];
264 "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
265 j
, chunk
->nid
, chunk
->start_pfn
, chunk
->end_pfn
);
266 if (node_read_chunk(chunk
->nid
, chunk
))
269 memblock_x86_register_active_regions(chunk
->nid
, chunk
->start_pfn
,
270 min(chunk
->end_pfn
, max_pfn
));
272 /* for out of order entries in SRAT */
275 for_each_online_node(nid
) {
276 unsigned long start
= node_start_pfn
[nid
];
277 unsigned long end
= min(node_end_pfn
[nid
], max_pfn
);
279 memory_present(nid
, start
, end
);
280 node_remap_size
[nid
] = node_memmap_size_bytes(nid
, start
, end
);
284 printk(KERN_DEBUG
"failed to get NUMA memory information from SRAT"