2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
11 #include "qemu/osdep.h"
12 #include "qemu/units.h"
13 #include "qemu/error-report.h"
15 #include <sys/resource.h>
17 #include "hw/xen/xen-legacy-backend.h"
18 #include "qemu/bitmap.h"
20 #include "sysemu/runstate.h"
21 #include "sysemu/xen-mapcache.h"
25 //#define MAPCACHE_DEBUG
28 # define DPRINTF(fmt, ...) do { \
29 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
32 # define DPRINTF(fmt, ...) do { } while (0)
35 #if HOST_LONG_BITS == 32
36 # define MCACHE_BUCKET_SHIFT 16
37 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
39 # define MCACHE_BUCKET_SHIFT 20
40 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
42 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
44 /* This is the size of the virtual address space reserve to QEMU that will not
46 * From empirical tests I observed that qemu use 75MB more than the
49 #define NON_MCACHE_MEMORY_SIZE (80 * MiB)
51 typedef struct MapCacheEntry
{
54 unsigned long *valid_mapping
;
56 #define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0)
59 struct MapCacheEntry
*next
;
62 typedef struct MapCacheRev
{
66 QTAILQ_ENTRY(MapCacheRev
) next
;
70 typedef struct MapCache
{
72 unsigned long nr_buckets
;
73 QTAILQ_HEAD(, MapCacheRev
) locked_entries
;
75 /* For most cases (>99.9%), the page address is the same. */
76 MapCacheEntry
*last_entry
;
77 unsigned long max_mcache_size
;
78 unsigned int mcache_bucket_shift
;
80 phys_offset_to_gaddr_t phys_offset_to_gaddr
;
85 static MapCache
*mapcache
;
87 static inline void mapcache_lock(void)
89 qemu_mutex_lock(&mapcache
->lock
);
92 static inline void mapcache_unlock(void)
94 qemu_mutex_unlock(&mapcache
->lock
);
97 static inline int test_bits(int nr
, int size
, const unsigned long *addr
)
99 unsigned long res
= find_next_zero_bit(addr
, size
+ nr
, nr
);
100 if (res
>= nr
+ size
)
106 void xen_map_cache_init(phys_offset_to_gaddr_t f
, void *opaque
)
109 struct rlimit rlimit_as
;
111 mapcache
= g_malloc0(sizeof (MapCache
));
113 mapcache
->phys_offset_to_gaddr
= f
;
114 mapcache
->opaque
= opaque
;
115 qemu_mutex_init(&mapcache
->lock
);
117 QTAILQ_INIT(&mapcache
->locked_entries
);
119 if (geteuid() == 0) {
120 rlimit_as
.rlim_cur
= RLIM_INFINITY
;
121 rlimit_as
.rlim_max
= RLIM_INFINITY
;
122 mapcache
->max_mcache_size
= MCACHE_MAX_SIZE
;
124 getrlimit(RLIMIT_AS
, &rlimit_as
);
125 rlimit_as
.rlim_cur
= rlimit_as
.rlim_max
;
127 if (rlimit_as
.rlim_max
!= RLIM_INFINITY
) {
128 warn_report("QEMU's maximum size of virtual"
129 " memory is not infinity");
131 if (rlimit_as
.rlim_max
< MCACHE_MAX_SIZE
+ NON_MCACHE_MEMORY_SIZE
) {
132 mapcache
->max_mcache_size
= rlimit_as
.rlim_max
-
133 NON_MCACHE_MEMORY_SIZE
;
135 mapcache
->max_mcache_size
= MCACHE_MAX_SIZE
;
139 setrlimit(RLIMIT_AS
, &rlimit_as
);
141 mapcache
->nr_buckets
=
142 (((mapcache
->max_mcache_size
>> XC_PAGE_SHIFT
) +
143 (1UL << (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
)) - 1) >>
144 (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
));
146 size
= mapcache
->nr_buckets
* sizeof (MapCacheEntry
);
147 size
= (size
+ XC_PAGE_SIZE
- 1) & ~(XC_PAGE_SIZE
- 1);
148 DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__
,
149 mapcache
->nr_buckets
, size
);
150 mapcache
->entry
= g_malloc0(size
);
153 static void xen_remap_bucket(MapCacheEntry
*entry
,
156 hwaddr address_index
,
163 hwaddr nb_pfn
= size
>> XC_PAGE_SHIFT
;
165 trace_xen_remap_bucket(address_index
);
167 pfns
= g_malloc0(nb_pfn
* sizeof (xen_pfn_t
));
168 err
= g_malloc0(nb_pfn
* sizeof (int));
170 if (entry
->vaddr_base
!= NULL
) {
171 if (!(entry
->flags
& XEN_MAPCACHE_ENTRY_DUMMY
)) {
172 ram_block_notify_remove(entry
->vaddr_base
, entry
->size
,
177 * If an entry is being replaced by another mapping and we're using
178 * MAP_FIXED flag for it - there is possibility of a race for vaddr
179 * address with another thread doing an mmap call itself
180 * (see man 2 mmap). To avoid that we skip explicit unmapping here
181 * and allow the kernel to destroy the previous mappings by replacing
182 * them in mmap call later.
184 * Non-identical replacements are not allowed therefore.
186 assert(!vaddr
|| (entry
->vaddr_base
== vaddr
&& entry
->size
== size
));
188 if (!vaddr
&& munmap(entry
->vaddr_base
, entry
->size
) != 0) {
189 perror("unmap fails");
193 g_free(entry
->valid_mapping
);
194 entry
->valid_mapping
= NULL
;
196 for (i
= 0; i
< nb_pfn
; i
++) {
197 pfns
[i
] = (address_index
<< (MCACHE_BUCKET_SHIFT
-XC_PAGE_SHIFT
)) + i
;
201 * If the caller has requested the mapping at a specific address use
202 * MAP_FIXED to make sure it's honored.
205 vaddr_base
= xenforeignmemory_map2(xen_fmem
, xen_domid
, vaddr
,
206 PROT_READ
| PROT_WRITE
,
207 vaddr
? MAP_FIXED
: 0,
209 if (vaddr_base
== NULL
) {
210 perror("xenforeignmemory_map2");
215 * We create dummy mappings where we are unable to create a foreign
216 * mapping immediately due to certain circumstances (i.e. on resume now)
218 vaddr_base
= mmap(vaddr
, size
, PROT_READ
| PROT_WRITE
,
219 MAP_ANON
| MAP_SHARED
| (vaddr
? MAP_FIXED
: 0),
221 if (vaddr_base
== MAP_FAILED
) {
227 if (!(entry
->flags
& XEN_MAPCACHE_ENTRY_DUMMY
)) {
228 ram_block_notify_add(vaddr_base
, size
, size
);
231 entry
->vaddr_base
= vaddr_base
;
232 entry
->paddr_index
= address_index
;
234 entry
->valid_mapping
= (unsigned long *) g_malloc0(sizeof(unsigned long) *
235 BITS_TO_LONGS(size
>> XC_PAGE_SHIFT
));
238 entry
->flags
|= XEN_MAPCACHE_ENTRY_DUMMY
;
240 entry
->flags
&= ~(XEN_MAPCACHE_ENTRY_DUMMY
);
243 bitmap_zero(entry
->valid_mapping
, nb_pfn
);
244 for (i
= 0; i
< nb_pfn
; i
++) {
246 bitmap_set(entry
->valid_mapping
, i
, 1);
254 static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr
, hwaddr size
,
255 uint8_t lock
, bool dma
)
257 MapCacheEntry
*entry
, *pentry
= NULL
,
258 *free_entry
= NULL
, *free_pentry
= NULL
;
259 hwaddr address_index
;
260 hwaddr address_offset
;
261 hwaddr cache_size
= size
;
262 hwaddr test_bit_size
;
263 bool translated G_GNUC_UNUSED
= false;
267 address_index
= phys_addr
>> MCACHE_BUCKET_SHIFT
;
268 address_offset
= phys_addr
& (MCACHE_BUCKET_SIZE
- 1);
270 trace_xen_map_cache(phys_addr
);
272 /* test_bit_size is always a multiple of XC_PAGE_SIZE */
274 test_bit_size
= size
+ (phys_addr
& (XC_PAGE_SIZE
- 1));
276 if (test_bit_size
% XC_PAGE_SIZE
) {
277 test_bit_size
+= XC_PAGE_SIZE
- (test_bit_size
% XC_PAGE_SIZE
);
280 test_bit_size
= XC_PAGE_SIZE
;
283 if (mapcache
->last_entry
!= NULL
&&
284 mapcache
->last_entry
->paddr_index
== address_index
&&
286 test_bits(address_offset
>> XC_PAGE_SHIFT
,
287 test_bit_size
>> XC_PAGE_SHIFT
,
288 mapcache
->last_entry
->valid_mapping
)) {
289 trace_xen_map_cache_return(mapcache
->last_entry
->vaddr_base
+ address_offset
);
290 return mapcache
->last_entry
->vaddr_base
+ address_offset
;
293 /* size is always a multiple of MCACHE_BUCKET_SIZE */
295 cache_size
= size
+ address_offset
;
296 if (cache_size
% MCACHE_BUCKET_SIZE
) {
297 cache_size
+= MCACHE_BUCKET_SIZE
- (cache_size
% MCACHE_BUCKET_SIZE
);
300 cache_size
= MCACHE_BUCKET_SIZE
;
303 entry
= &mapcache
->entry
[address_index
% mapcache
->nr_buckets
];
305 while (entry
&& (lock
|| entry
->lock
) && entry
->vaddr_base
&&
306 (entry
->paddr_index
!= address_index
|| entry
->size
!= cache_size
||
307 !test_bits(address_offset
>> XC_PAGE_SHIFT
,
308 test_bit_size
>> XC_PAGE_SHIFT
,
309 entry
->valid_mapping
))) {
310 if (!free_entry
&& !entry
->lock
) {
312 free_pentry
= pentry
;
317 if (!entry
&& free_entry
) {
319 pentry
= free_pentry
;
322 entry
= g_malloc0(sizeof (MapCacheEntry
));
323 pentry
->next
= entry
;
324 xen_remap_bucket(entry
, NULL
, cache_size
, address_index
, dummy
);
325 } else if (!entry
->lock
) {
326 if (!entry
->vaddr_base
|| entry
->paddr_index
!= address_index
||
327 entry
->size
!= cache_size
||
328 !test_bits(address_offset
>> XC_PAGE_SHIFT
,
329 test_bit_size
>> XC_PAGE_SHIFT
,
330 entry
->valid_mapping
)) {
331 xen_remap_bucket(entry
, NULL
, cache_size
, address_index
, dummy
);
335 if(!test_bits(address_offset
>> XC_PAGE_SHIFT
,
336 test_bit_size
>> XC_PAGE_SHIFT
,
337 entry
->valid_mapping
)) {
338 mapcache
->last_entry
= NULL
;
339 #ifdef XEN_COMPAT_PHYSMAP
340 if (!translated
&& mapcache
->phys_offset_to_gaddr
) {
341 phys_addr
= mapcache
->phys_offset_to_gaddr(phys_addr
, size
);
346 if (!dummy
&& runstate_check(RUN_STATE_INMIGRATE
)) {
350 trace_xen_map_cache_return(NULL
);
354 mapcache
->last_entry
= entry
;
356 MapCacheRev
*reventry
= g_malloc0(sizeof(MapCacheRev
));
359 reventry
->vaddr_req
= mapcache
->last_entry
->vaddr_base
+ address_offset
;
360 reventry
->paddr_index
= mapcache
->last_entry
->paddr_index
;
361 reventry
->size
= entry
->size
;
362 QTAILQ_INSERT_HEAD(&mapcache
->locked_entries
, reventry
, next
);
365 trace_xen_map_cache_return(mapcache
->last_entry
->vaddr_base
+ address_offset
);
366 return mapcache
->last_entry
->vaddr_base
+ address_offset
;
369 uint8_t *xen_map_cache(hwaddr phys_addr
, hwaddr size
,
370 uint8_t lock
, bool dma
)
375 p
= xen_map_cache_unlocked(phys_addr
, size
, lock
, dma
);
380 ram_addr_t
xen_ram_addr_from_mapcache(void *ptr
)
382 MapCacheEntry
*entry
= NULL
;
383 MapCacheRev
*reventry
;
390 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
391 if (reventry
->vaddr_req
== ptr
) {
392 paddr_index
= reventry
->paddr_index
;
393 size
= reventry
->size
;
399 fprintf(stderr
, "%s, could not find %p\n", __func__
, ptr
);
400 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
401 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
,
402 reventry
->vaddr_req
);
408 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
409 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
413 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr
);
416 raddr
= (reventry
->paddr_index
<< MCACHE_BUCKET_SHIFT
) +
417 ((unsigned long) ptr
- (unsigned long) entry
->vaddr_base
);
423 static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer
)
425 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
426 MapCacheRev
*reventry
;
431 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
432 if (reventry
->vaddr_req
== buffer
) {
433 paddr_index
= reventry
->paddr_index
;
434 size
= reventry
->size
;
440 DPRINTF("%s, could not find %p\n", __func__
, buffer
);
441 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
442 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
, reventry
->vaddr_req
);
446 QTAILQ_REMOVE(&mapcache
->locked_entries
, reventry
, next
);
449 if (mapcache
->last_entry
!= NULL
&&
450 mapcache
->last_entry
->paddr_index
== paddr_index
) {
451 mapcache
->last_entry
= NULL
;
454 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
455 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
460 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer
);
464 if (entry
->lock
> 0 || pentry
== NULL
) {
468 pentry
->next
= entry
->next
;
469 ram_block_notify_remove(entry
->vaddr_base
, entry
->size
, entry
->size
);
470 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
471 perror("unmap fails");
474 g_free(entry
->valid_mapping
);
478 void xen_invalidate_map_cache_entry(uint8_t *buffer
)
481 xen_invalidate_map_cache_entry_unlocked(buffer
);
485 void xen_invalidate_map_cache(void)
488 MapCacheRev
*reventry
;
490 /* Flush pending AIO before destroying the mapcache */
495 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
496 if (!reventry
->dma
) {
499 fprintf(stderr
, "Locked DMA mapping while invalidating mapcache!"
500 " "TARGET_FMT_plx
" -> %p is present\n",
501 reventry
->paddr_index
, reventry
->vaddr_req
);
504 for (i
= 0; i
< mapcache
->nr_buckets
; i
++) {
505 MapCacheEntry
*entry
= &mapcache
->entry
[i
];
507 if (entry
->vaddr_base
== NULL
) {
510 if (entry
->lock
> 0) {
514 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
515 perror("unmap fails");
519 entry
->paddr_index
= 0;
520 entry
->vaddr_base
= NULL
;
522 g_free(entry
->valid_mapping
);
523 entry
->valid_mapping
= NULL
;
526 mapcache
->last_entry
= NULL
;
531 static uint8_t *xen_replace_cache_entry_unlocked(hwaddr old_phys_addr
,
532 hwaddr new_phys_addr
,
535 MapCacheEntry
*entry
;
536 hwaddr address_index
, address_offset
;
537 hwaddr test_bit_size
, cache_size
= size
;
539 address_index
= old_phys_addr
>> MCACHE_BUCKET_SHIFT
;
540 address_offset
= old_phys_addr
& (MCACHE_BUCKET_SIZE
- 1);
543 /* test_bit_size is always a multiple of XC_PAGE_SIZE */
544 test_bit_size
= size
+ (old_phys_addr
& (XC_PAGE_SIZE
- 1));
545 if (test_bit_size
% XC_PAGE_SIZE
) {
546 test_bit_size
+= XC_PAGE_SIZE
- (test_bit_size
% XC_PAGE_SIZE
);
548 cache_size
= size
+ address_offset
;
549 if (cache_size
% MCACHE_BUCKET_SIZE
) {
550 cache_size
+= MCACHE_BUCKET_SIZE
- (cache_size
% MCACHE_BUCKET_SIZE
);
553 entry
= &mapcache
->entry
[address_index
% mapcache
->nr_buckets
];
554 while (entry
&& !(entry
->paddr_index
== address_index
&&
555 entry
->size
== cache_size
)) {
559 DPRINTF("Trying to update an entry for "TARGET_FMT_plx \
560 "that is not in the mapcache!\n", old_phys_addr
);
564 address_index
= new_phys_addr
>> MCACHE_BUCKET_SHIFT
;
565 address_offset
= new_phys_addr
& (MCACHE_BUCKET_SIZE
- 1);
567 fprintf(stderr
, "Replacing a dummy mapcache entry for "TARGET_FMT_plx \
568 " with "TARGET_FMT_plx
"\n", old_phys_addr
, new_phys_addr
);
570 xen_remap_bucket(entry
, entry
->vaddr_base
,
571 cache_size
, address_index
, false);
572 if (!test_bits(address_offset
>> XC_PAGE_SHIFT
,
573 test_bit_size
>> XC_PAGE_SHIFT
,
574 entry
->valid_mapping
)) {
575 DPRINTF("Unable to update a mapcache entry for "TARGET_FMT_plx
"!\n",
580 return entry
->vaddr_base
+ address_offset
;
583 uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr
,
584 hwaddr new_phys_addr
,
590 p
= xen_replace_cache_entry_unlocked(old_phys_addr
, new_phys_addr
, size
);