exec: RCUify AddressSpaceDispatch
[qemu/qmp-unstable.git] / numa.c
blobafd28666b36934b9960d5a42240b1dddf9f05c29
1 /*
2 * NUMA parameter parsing routines
4 * Copyright (c) 2014 Fujitsu Ltd.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "sysemu/sysemu.h"
26 #include "exec/cpu-common.h"
27 #include "qemu/bitmap.h"
28 #include "qom/cpu.h"
29 #include "qemu/error-report.h"
30 #include "include/exec/cpu-common.h" /* for RAM_ADDR_FMT */
31 #include "qapi-visit.h"
32 #include "qapi/opts-visitor.h"
33 #include "qapi/dealloc-visitor.h"
34 #include "qapi/qmp/qerror.h"
35 #include "hw/boards.h"
36 #include "sysemu/hostmem.h"
37 #include "qmp-commands.h"
38 #include "hw/mem/pc-dimm.h"
40 QemuOptsList qemu_numa_opts = {
41 .name = "numa",
42 .implied_opt_name = "type",
43 .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head),
44 .desc = { { 0 } } /* validated with OptsVisitor */
47 static int have_memdevs = -1;
49 static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp)
51 uint16_t nodenr;
52 uint16List *cpus = NULL;
54 if (node->has_nodeid) {
55 nodenr = node->nodeid;
56 } else {
57 nodenr = nb_numa_nodes;
60 if (nodenr >= MAX_NODES) {
61 error_setg(errp, "Max number of NUMA nodes reached: %"
62 PRIu16 "\n", nodenr);
63 return;
66 if (numa_info[nodenr].present) {
67 error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr);
68 return;
71 for (cpus = node->cpus; cpus; cpus = cpus->next) {
72 if (cpus->value > MAX_CPUMASK_BITS) {
73 error_setg(errp, "CPU number %" PRIu16 " is bigger than %d",
74 cpus->value, MAX_CPUMASK_BITS);
75 return;
77 bitmap_set(numa_info[nodenr].node_cpu, cpus->value, 1);
80 if (node->has_mem && node->has_memdev) {
81 error_setg(errp, "qemu: cannot specify both mem= and memdev=\n");
82 return;
85 if (have_memdevs == -1) {
86 have_memdevs = node->has_memdev;
88 if (node->has_memdev != have_memdevs) {
89 error_setg(errp, "qemu: memdev option must be specified for either "
90 "all or no nodes\n");
91 return;
94 if (node->has_mem) {
95 uint64_t mem_size = node->mem;
96 const char *mem_str = qemu_opt_get(opts, "mem");
97 /* Fix up legacy suffix-less format */
98 if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) {
99 mem_size <<= 20;
101 numa_info[nodenr].node_mem = mem_size;
103 if (node->has_memdev) {
104 Object *o;
105 o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL);
106 if (!o) {
107 error_setg(errp, "memdev=%s is ambiguous", node->memdev);
108 return;
111 object_ref(o);
112 numa_info[nodenr].node_mem = object_property_get_int(o, "size", NULL);
113 numa_info[nodenr].node_memdev = MEMORY_BACKEND(o);
115 numa_info[nodenr].present = true;
116 max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1);
119 int numa_init_func(QemuOpts *opts, void *opaque)
121 NumaOptions *object = NULL;
122 Error *err = NULL;
125 OptsVisitor *ov = opts_visitor_new(opts);
126 visit_type_NumaOptions(opts_get_visitor(ov), &object, NULL, &err);
127 opts_visitor_cleanup(ov);
130 if (err) {
131 goto error;
134 switch (object->kind) {
135 case NUMA_OPTIONS_KIND_NODE:
136 numa_node_parse(object->node, opts, &err);
137 if (err) {
138 goto error;
140 nb_numa_nodes++;
141 break;
142 default:
143 abort();
146 return 0;
148 error:
149 qerror_report_err(err);
150 error_free(err);
152 if (object) {
153 QapiDeallocVisitor *dv = qapi_dealloc_visitor_new();
154 visit_type_NumaOptions(qapi_dealloc_get_visitor(dv),
155 &object, NULL, NULL);
156 qapi_dealloc_visitor_cleanup(dv);
159 return -1;
162 void set_numa_nodes(void)
164 int i;
166 assert(max_numa_nodeid <= MAX_NODES);
168 /* No support for sparse NUMA node IDs yet: */
169 for (i = max_numa_nodeid - 1; i >= 0; i--) {
170 /* Report large node IDs first, to make mistakes easier to spot */
171 if (!numa_info[i].present) {
172 error_report("numa: Node ID missing: %d", i);
173 exit(1);
177 /* This must be always true if all nodes are present: */
178 assert(nb_numa_nodes == max_numa_nodeid);
180 if (nb_numa_nodes > 0) {
181 uint64_t numa_total;
183 if (nb_numa_nodes > MAX_NODES) {
184 nb_numa_nodes = MAX_NODES;
187 /* If no memory size is given for any node, assume the default case
188 * and distribute the available memory equally across all nodes
190 for (i = 0; i < nb_numa_nodes; i++) {
191 if (numa_info[i].node_mem != 0) {
192 break;
195 if (i == nb_numa_nodes) {
196 uint64_t usedmem = 0;
198 /* On Linux, each node's border has to be 8MB aligned,
199 * the final node gets the rest.
201 for (i = 0; i < nb_numa_nodes - 1; i++) {
202 numa_info[i].node_mem = (ram_size / nb_numa_nodes) &
203 ~((1 << 23UL) - 1);
204 usedmem += numa_info[i].node_mem;
206 numa_info[i].node_mem = ram_size - usedmem;
209 numa_total = 0;
210 for (i = 0; i < nb_numa_nodes; i++) {
211 numa_total += numa_info[i].node_mem;
213 if (numa_total != ram_size) {
214 error_report("total memory for NUMA nodes (0x%" PRIx64 ")"
215 " should equal RAM size (0x" RAM_ADDR_FMT ")",
216 numa_total, ram_size);
217 exit(1);
220 for (i = 0; i < nb_numa_nodes; i++) {
221 if (!bitmap_empty(numa_info[i].node_cpu, MAX_CPUMASK_BITS)) {
222 break;
225 /* assigning the VCPUs round-robin is easier to implement, guest OSes
226 * must cope with this anyway, because there are BIOSes out there in
227 * real machines which also use this scheme.
229 if (i == nb_numa_nodes) {
230 for (i = 0; i < max_cpus; i++) {
231 set_bit(i, numa_info[i % nb_numa_nodes].node_cpu);
237 void set_numa_modes(void)
239 CPUState *cpu;
240 int i;
242 CPU_FOREACH(cpu) {
243 for (i = 0; i < nb_numa_nodes; i++) {
244 if (test_bit(cpu->cpu_index, numa_info[i].node_cpu)) {
245 cpu->numa_node = i;
251 static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
252 const char *name,
253 uint64_t ram_size)
255 if (mem_path) {
256 #ifdef __linux__
257 Error *err = NULL;
258 memory_region_init_ram_from_file(mr, owner, name, ram_size, false,
259 mem_path, &err);
261 /* Legacy behavior: if allocation failed, fall back to
262 * regular RAM allocation.
264 if (err) {
265 qerror_report_err(err);
266 error_free(err);
267 memory_region_init_ram(mr, owner, name, ram_size, &error_abort);
269 #else
270 fprintf(stderr, "-mem-path not supported on this host\n");
271 exit(1);
272 #endif
273 } else {
274 memory_region_init_ram(mr, owner, name, ram_size, &error_abort);
276 vmstate_register_ram_global(mr);
279 void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
280 const char *name,
281 uint64_t ram_size)
283 uint64_t addr = 0;
284 int i;
286 if (nb_numa_nodes == 0 || !have_memdevs) {
287 allocate_system_memory_nonnuma(mr, owner, name, ram_size);
288 return;
291 memory_region_init(mr, owner, name, ram_size);
292 for (i = 0; i < MAX_NODES; i++) {
293 Error *local_err = NULL;
294 uint64_t size = numa_info[i].node_mem;
295 HostMemoryBackend *backend = numa_info[i].node_memdev;
296 if (!backend) {
297 continue;
299 MemoryRegion *seg = host_memory_backend_get_memory(backend, &local_err);
300 if (local_err) {
301 qerror_report_err(local_err);
302 exit(1);
305 if (memory_region_is_mapped(seg)) {
306 char *path = object_get_canonical_path_component(OBJECT(backend));
307 error_report("memory backend %s is used multiple times. Each "
308 "-numa option must use a different memdev value.",
309 path);
310 exit(1);
313 memory_region_add_subregion(mr, addr, seg);
314 vmstate_register_ram_global(seg);
315 addr += size;
319 static void numa_stat_memory_devices(uint64_t node_mem[])
321 MemoryDeviceInfoList *info_list = NULL;
322 MemoryDeviceInfoList **prev = &info_list;
323 MemoryDeviceInfoList *info;
325 qmp_pc_dimm_device_list(qdev_get_machine(), &prev);
326 for (info = info_list; info; info = info->next) {
327 MemoryDeviceInfo *value = info->value;
329 if (value) {
330 switch (value->kind) {
331 case MEMORY_DEVICE_INFO_KIND_DIMM:
332 node_mem[value->dimm->node] += value->dimm->size;
333 break;
334 default:
335 break;
339 qapi_free_MemoryDeviceInfoList(info_list);
342 void query_numa_node_mem(uint64_t node_mem[])
344 int i;
346 if (nb_numa_nodes <= 0) {
347 return;
350 numa_stat_memory_devices(node_mem);
351 for (i = 0; i < nb_numa_nodes; i++) {
352 node_mem[i] += numa_info[i].node_mem;
356 static int query_memdev(Object *obj, void *opaque)
358 MemdevList **list = opaque;
359 MemdevList *m = NULL;
360 Error *err = NULL;
362 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
363 m = g_malloc0(sizeof(*m));
365 m->value = g_malloc0(sizeof(*m->value));
367 m->value->size = object_property_get_int(obj, "size",
368 &err);
369 if (err) {
370 goto error;
373 m->value->merge = object_property_get_bool(obj, "merge",
374 &err);
375 if (err) {
376 goto error;
379 m->value->dump = object_property_get_bool(obj, "dump",
380 &err);
381 if (err) {
382 goto error;
385 m->value->prealloc = object_property_get_bool(obj,
386 "prealloc", &err);
387 if (err) {
388 goto error;
391 m->value->policy = object_property_get_enum(obj,
392 "policy",
393 HostMemPolicy_lookup,
394 &err);
395 if (err) {
396 goto error;
399 object_property_get_uint16List(obj, "host-nodes",
400 &m->value->host_nodes, &err);
401 if (err) {
402 goto error;
405 m->next = *list;
406 *list = m;
409 return 0;
410 error:
411 g_free(m->value);
412 g_free(m);
414 return -1;
417 MemdevList *qmp_query_memdev(Error **errp)
419 Object *obj;
420 MemdevList *list = NULL;
422 obj = object_resolve_path("/objects", NULL);
423 if (obj == NULL) {
424 return NULL;
427 if (object_child_foreach(obj, query_memdev, &list) != 0) {
428 goto error;
431 return list;
433 error:
434 qapi_free_MemdevList(list);
435 return NULL;