1 /******************************************************************************
4 * Granting foreign access to our memory reservation.
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
36 #include <linux/bitmap.h>
37 #include <linux/memblock.h>
38 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/vmalloc.h>
42 #include <linux/uaccess.h>
44 #include <linux/delay.h>
45 #include <linux/hardirq.h>
46 #include <linux/workqueue.h>
47 #include <linux/ratelimit.h>
48 #include <linux/moduleparam.h>
49 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
50 #include <linux/dma-mapping.h>
54 #include <xen/interface/xen.h>
56 #include <xen/grant_table.h>
57 #include <xen/interface/memory.h>
58 #include <xen/hvc-console.h>
59 #include <xen/swiotlb-xen.h>
60 #include <xen/balloon.h>
62 #include <asm/xen/cpuid.h>
64 #include <xen/mem-reservation.h>
65 #include <asm/xen/hypercall.h>
66 #include <asm/xen/interface.h>
68 #include <asm/sync_bitops.h>
70 #define GNTTAB_LIST_END 0xffffffff
72 static grant_ref_t
**gnttab_list
;
73 static unsigned int nr_grant_frames
;
76 * Handling of free grants:
78 * Free grants are in a simple list anchored in gnttab_free_head. They are
79 * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
80 * of free entries is stored in gnttab_free_count.
81 * Additionally there is a bitmap of free entries anchored in
82 * gnttab_free_bitmap. This is being used for simplifying allocation of
83 * multiple consecutive grants, which is needed e.g. for support of virtio.
84 * gnttab_last_free is used to add free entries of new frames at the end of
86 * gnttab_free_tail_ptr specifies the variable which references the start
87 * of consecutive free grants ending with gnttab_last_free. This pointer is
88 * updated in a rather defensive way, in order to avoid performance hits in
90 * All those variables are protected by gnttab_list_lock.
92 static int gnttab_free_count
;
93 static unsigned int gnttab_size
;
94 static grant_ref_t gnttab_free_head
= GNTTAB_LIST_END
;
95 static grant_ref_t gnttab_last_free
= GNTTAB_LIST_END
;
96 static grant_ref_t
*gnttab_free_tail_ptr
;
97 static unsigned long *gnttab_free_bitmap
;
98 static DEFINE_SPINLOCK(gnttab_list_lock
);
100 struct grant_frames xen_auto_xlat_grant_frames
;
101 static unsigned int xen_gnttab_version
;
102 module_param_named(version
, xen_gnttab_version
, uint
, 0);
105 struct grant_entry_v1
*v1
;
106 union grant_entry_v2
*v2
;
110 /*This is a structure of function pointers for grant table*/
113 * Version of the grant interface.
115 unsigned int version
;
117 * Grant refs per grant frame.
119 unsigned int grefs_per_grant_frame
;
121 * Mapping a list of frames for storing grant entries. Frames parameter
122 * is used to store grant table address when grant table being setup,
123 * nr_gframes is the number of frames to map grant table. Returning
124 * GNTST_okay means success and negative value means failure.
126 int (*map_frames
)(xen_pfn_t
*frames
, unsigned int nr_gframes
);
128 * Release a list of frames which are mapped in map_frames for grant
131 void (*unmap_frames
)(void);
133 * Introducing a valid entry into the grant table, granting the frame of
134 * this grant entry to domain for accessing. Ref
135 * parameter is reference of this introduced grant entry, domid is id of
136 * granted domain, frame is the page frame to be granted, and flags is
137 * status of the grant entry to be updated.
139 void (*update_entry
)(grant_ref_t ref
, domid_t domid
,
140 unsigned long frame
, unsigned flags
);
142 * Stop granting a grant entry to domain for accessing. Ref parameter is
143 * reference of a grant entry whose grant access will be stopped.
144 * If the grant entry is currently mapped for reading or writing, just
145 * return failure(==0) directly and don't tear down the grant access.
146 * Otherwise, stop grant access for this entry and return success(==1).
148 int (*end_foreign_access_ref
)(grant_ref_t ref
);
150 * Read the frame number related to a given grant reference.
152 unsigned long (*read_frame
)(grant_ref_t ref
);
155 struct unmap_refs_callback_data
{
156 struct completion completion
;
160 static const struct gnttab_ops
*gnttab_interface
;
162 /* This reflects status of grant entries, so act as a global value. */
163 static grant_status_t
*grstatus
;
165 static struct gnttab_free_callback
*gnttab_free_callback_list
;
167 static int gnttab_expand(unsigned int req_entries
);
169 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
170 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
172 static inline grant_ref_t
*__gnttab_entry(grant_ref_t entry
)
174 return &gnttab_list
[(entry
) / RPP
][(entry
) % RPP
];
176 /* This can be used as an l-value */
177 #define gnttab_entry(entry) (*__gnttab_entry(entry))
179 static int get_free_entries(unsigned count
)
185 spin_lock_irqsave(&gnttab_list_lock
, flags
);
187 if ((gnttab_free_count
< count
) &&
188 ((rc
= gnttab_expand(count
- gnttab_free_count
)) < 0)) {
189 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
193 ref
= head
= gnttab_free_head
;
194 gnttab_free_count
-= count
;
196 bitmap_clear(gnttab_free_bitmap
, head
, 1);
197 if (gnttab_free_tail_ptr
== __gnttab_entry(head
))
198 gnttab_free_tail_ptr
= &gnttab_free_head
;
200 head
= gnttab_entry(head
);
202 gnttab_free_head
= gnttab_entry(head
);
203 gnttab_entry(head
) = GNTTAB_LIST_END
;
205 if (!gnttab_free_count
) {
206 gnttab_last_free
= GNTTAB_LIST_END
;
207 gnttab_free_tail_ptr
= NULL
;
210 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
215 static int get_seq_entry_count(void)
217 if (gnttab_last_free
== GNTTAB_LIST_END
|| !gnttab_free_tail_ptr
||
218 *gnttab_free_tail_ptr
== GNTTAB_LIST_END
)
221 return gnttab_last_free
- *gnttab_free_tail_ptr
+ 1;
224 /* Rebuilds the free grant list and tries to find count consecutive entries. */
225 static int get_free_seq(unsigned int count
)
228 unsigned int from
, to
;
231 gnttab_free_tail_ptr
= &gnttab_free_head
;
232 last
= &gnttab_free_head
;
234 for (from
= find_first_bit(gnttab_free_bitmap
, gnttab_size
);
236 from
= find_next_bit(gnttab_free_bitmap
, gnttab_size
, to
+ 1)) {
237 to
= find_next_zero_bit(gnttab_free_bitmap
, gnttab_size
,
239 if (ret
< 0 && to
- from
>= count
) {
241 bitmap_clear(gnttab_free_bitmap
, ret
, count
);
243 gnttab_free_count
-= count
;
249 * Recreate the free list in order to have it properly sorted.
250 * This is needed to make sure that the free tail has the maximum
255 last
= __gnttab_entry(from
);
256 gnttab_last_free
= from
;
259 if (to
< gnttab_size
)
260 gnttab_free_tail_ptr
= __gnttab_entry(to
- 1);
263 *last
= GNTTAB_LIST_END
;
264 if (gnttab_last_free
!= gnttab_size
- 1)
265 gnttab_free_tail_ptr
= NULL
;
270 static int get_free_entries_seq(unsigned int count
)
275 spin_lock_irqsave(&gnttab_list_lock
, flags
);
277 if (gnttab_free_count
< count
) {
278 ret
= gnttab_expand(count
- gnttab_free_count
);
283 if (get_seq_entry_count() < count
) {
284 ret
= get_free_seq(count
);
287 ret
= gnttab_expand(count
- get_seq_entry_count());
292 ret
= *gnttab_free_tail_ptr
;
293 *gnttab_free_tail_ptr
= gnttab_entry(ret
+ count
- 1);
294 gnttab_free_count
-= count
;
295 if (!gnttab_free_count
)
296 gnttab_free_tail_ptr
= NULL
;
297 bitmap_clear(gnttab_free_bitmap
, ret
, count
);
300 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
305 static void do_free_callbacks(void)
307 struct gnttab_free_callback
*callback
, *next
;
309 callback
= gnttab_free_callback_list
;
310 gnttab_free_callback_list
= NULL
;
312 while (callback
!= NULL
) {
313 next
= callback
->next
;
314 if (gnttab_free_count
>= callback
->count
) {
315 callback
->next
= NULL
;
316 callback
->fn(callback
->arg
);
318 callback
->next
= gnttab_free_callback_list
;
319 gnttab_free_callback_list
= callback
;
325 static inline void check_free_callbacks(void)
327 if (unlikely(gnttab_free_callback_list
))
331 static void put_free_entry_locked(grant_ref_t ref
)
333 if (unlikely(ref
< GNTTAB_NR_RESERVED_ENTRIES
))
336 gnttab_entry(ref
) = gnttab_free_head
;
337 gnttab_free_head
= ref
;
338 if (!gnttab_free_count
)
339 gnttab_last_free
= ref
;
340 if (gnttab_free_tail_ptr
== &gnttab_free_head
)
341 gnttab_free_tail_ptr
= __gnttab_entry(ref
);
343 bitmap_set(gnttab_free_bitmap
, ref
, 1);
346 static void put_free_entry(grant_ref_t ref
)
350 spin_lock_irqsave(&gnttab_list_lock
, flags
);
351 put_free_entry_locked(ref
);
352 check_free_callbacks();
353 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
356 static void gnttab_set_free(unsigned int start
, unsigned int n
)
360 for (i
= start
; i
< start
+ n
- 1; i
++)
361 gnttab_entry(i
) = i
+ 1;
363 gnttab_entry(i
) = GNTTAB_LIST_END
;
364 if (!gnttab_free_count
) {
365 gnttab_free_head
= start
;
366 gnttab_free_tail_ptr
= &gnttab_free_head
;
368 gnttab_entry(gnttab_last_free
) = start
;
370 gnttab_free_count
+= n
;
371 gnttab_last_free
= i
;
373 bitmap_set(gnttab_free_bitmap
, start
, n
);
377 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
378 * Introducing a valid entry into the grant table:
379 * 1. Write ent->domid.
380 * 2. Write ent->frame: Frame to which access is permitted.
381 * 3. Write memory barrier (WMB).
382 * 4. Write ent->flags, inc. valid type.
384 static void gnttab_update_entry_v1(grant_ref_t ref
, domid_t domid
,
385 unsigned long frame
, unsigned flags
)
387 gnttab_shared
.v1
[ref
].domid
= domid
;
388 gnttab_shared
.v1
[ref
].frame
= frame
;
390 gnttab_shared
.v1
[ref
].flags
= flags
;
393 static void gnttab_update_entry_v2(grant_ref_t ref
, domid_t domid
,
394 unsigned long frame
, unsigned int flags
)
396 gnttab_shared
.v2
[ref
].hdr
.domid
= domid
;
397 gnttab_shared
.v2
[ref
].full_page
.frame
= frame
;
398 wmb(); /* Hypervisor concurrent accesses. */
399 gnttab_shared
.v2
[ref
].hdr
.flags
= GTF_permit_access
| flags
;
403 * Public grant-issuing interface functions
405 void gnttab_grant_foreign_access_ref(grant_ref_t ref
, domid_t domid
,
406 unsigned long frame
, int readonly
)
408 gnttab_interface
->update_entry(ref
, domid
, frame
,
409 GTF_permit_access
| (readonly
? GTF_readonly
: 0));
411 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref
);
413 int gnttab_grant_foreign_access(domid_t domid
, unsigned long frame
,
418 ref
= get_free_entries(1);
419 if (unlikely(ref
< 0))
422 gnttab_grant_foreign_access_ref(ref
, domid
, frame
, readonly
);
426 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access
);
428 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref
)
430 u16
*pflags
= &gnttab_shared
.v1
[ref
].flags
;
435 if (flags
& (GTF_reading
|GTF_writing
))
437 } while (!sync_try_cmpxchg(pflags
, &flags
, 0));
442 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref
)
444 gnttab_shared
.v2
[ref
].hdr
.flags
= 0;
445 mb(); /* Concurrent access by hypervisor. */
446 if (grstatus
[ref
] & (GTF_reading
|GTF_writing
)) {
450 * The read of grstatus needs to have acquire semantics.
451 * On x86, reads already have that, and we just need to
452 * protect against compiler reorderings.
453 * On other architectures we may need a full barrier.
465 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref
)
467 return gnttab_interface
->end_foreign_access_ref(ref
);
470 int gnttab_end_foreign_access_ref(grant_ref_t ref
)
472 if (_gnttab_end_foreign_access_ref(ref
))
474 pr_warn("WARNING: g.e. %#x still in use!\n", ref
);
477 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref
);
479 static unsigned long gnttab_read_frame_v1(grant_ref_t ref
)
481 return gnttab_shared
.v1
[ref
].frame
;
484 static unsigned long gnttab_read_frame_v2(grant_ref_t ref
)
486 return gnttab_shared
.v2
[ref
].full_page
.frame
;
489 struct deferred_entry
{
490 struct list_head list
;
495 static LIST_HEAD(deferred_list
);
496 static void gnttab_handle_deferred(struct timer_list
*);
497 static DEFINE_TIMER(deferred_timer
, gnttab_handle_deferred
);
499 static atomic64_t deferred_count
;
500 static atomic64_t leaked_count
;
501 static unsigned int free_per_iteration
= 10;
502 module_param(free_per_iteration
, uint
, 0600);
504 static void gnttab_handle_deferred(struct timer_list
*unused
)
506 unsigned int nr
= READ_ONCE(free_per_iteration
);
507 const bool ignore_limit
= nr
== 0;
508 struct deferred_entry
*first
= NULL
;
512 spin_lock_irqsave(&gnttab_list_lock
, flags
);
513 while ((ignore_limit
|| nr
--) && !list_empty(&deferred_list
)) {
514 struct deferred_entry
*entry
515 = list_first_entry(&deferred_list
,
516 struct deferred_entry
, list
);
520 list_del(&entry
->list
);
521 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
522 if (_gnttab_end_foreign_access_ref(entry
->ref
)) {
523 uint64_t ret
= atomic64_dec_return(&deferred_count
);
525 put_free_entry(entry
->ref
);
526 pr_debug("freeing g.e. %#x (pfn %#lx), %llu remaining\n",
527 entry
->ref
, page_to_pfn(entry
->page
),
528 (unsigned long long)ret
);
529 put_page(entry
->page
);
534 if (!--entry
->warn_delay
)
535 pr_info("g.e. %#x still pending\n", entry
->ref
);
539 spin_lock_irqsave(&gnttab_list_lock
, flags
);
541 list_add_tail(&entry
->list
, &deferred_list
);
543 if (list_empty(&deferred_list
))
544 WARN_ON(atomic64_read(&deferred_count
));
545 else if (!timer_pending(&deferred_timer
)) {
546 deferred_timer
.expires
= jiffies
+ HZ
;
547 add_timer(&deferred_timer
);
549 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
550 pr_debug("Freed %zu references", freed
);
553 static void gnttab_add_deferred(grant_ref_t ref
, struct page
*page
)
555 struct deferred_entry
*entry
;
556 gfp_t gfp
= (in_atomic() || irqs_disabled()) ? GFP_ATOMIC
: GFP_KERNEL
;
557 uint64_t leaked
, deferred
;
559 entry
= kmalloc(sizeof(*entry
), gfp
);
561 unsigned long gfn
= gnttab_interface
->read_frame(ref
);
563 page
= pfn_to_page(gfn_to_pfn(gfn
));
572 entry
->warn_delay
= 60;
573 spin_lock_irqsave(&gnttab_list_lock
, flags
);
574 list_add_tail(&entry
->list
, &deferred_list
);
575 if (!timer_pending(&deferred_timer
)) {
576 deferred_timer
.expires
= jiffies
+ HZ
;
577 add_timer(&deferred_timer
);
579 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
580 deferred
= atomic64_inc_return(&deferred_count
);
581 leaked
= atomic64_read(&leaked_count
);
582 pr_debug("deferring g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
583 ref
, page
? page_to_pfn(page
) : -1, deferred
, leaked
);
585 deferred
= atomic64_read(&deferred_count
);
586 leaked
= atomic64_inc_return(&leaked_count
);
587 pr_warn("leaking g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
588 ref
, page
? page_to_pfn(page
) : -1, deferred
, leaked
);
592 int gnttab_try_end_foreign_access(grant_ref_t ref
)
594 int ret
= _gnttab_end_foreign_access_ref(ref
);
601 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access
);
603 void gnttab_end_foreign_access(grant_ref_t ref
, struct page
*page
)
605 if (gnttab_try_end_foreign_access(ref
)) {
609 gnttab_add_deferred(ref
, page
);
611 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access
);
613 void gnttab_free_grant_reference(grant_ref_t ref
)
617 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference
);
619 void gnttab_free_grant_references(grant_ref_t head
)
624 spin_lock_irqsave(&gnttab_list_lock
, flags
);
625 while (head
!= GNTTAB_LIST_END
) {
626 ref
= gnttab_entry(head
);
627 put_free_entry_locked(head
);
630 check_free_callbacks();
631 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
633 EXPORT_SYMBOL_GPL(gnttab_free_grant_references
);
635 void gnttab_free_grant_reference_seq(grant_ref_t head
, unsigned int count
)
640 spin_lock_irqsave(&gnttab_list_lock
, flags
);
641 for (i
= count
; i
> 0; i
--)
642 put_free_entry_locked(head
+ i
- 1);
643 check_free_callbacks();
644 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
646 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq
);
648 int gnttab_alloc_grant_references(u16 count
, grant_ref_t
*head
)
650 int h
= get_free_entries(count
);
659 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references
);
661 int gnttab_alloc_grant_reference_seq(unsigned int count
, grant_ref_t
*first
)
666 h
= get_free_entries(1);
668 h
= get_free_entries_seq(count
);
677 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq
);
679 int gnttab_empty_grant_references(const grant_ref_t
*private_head
)
681 return (*private_head
== GNTTAB_LIST_END
);
683 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references
);
685 int gnttab_claim_grant_reference(grant_ref_t
*private_head
)
687 grant_ref_t g
= *private_head
;
688 if (unlikely(g
== GNTTAB_LIST_END
))
690 *private_head
= gnttab_entry(g
);
693 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference
);
695 void gnttab_release_grant_reference(grant_ref_t
*private_head
,
698 gnttab_entry(release
) = *private_head
;
699 *private_head
= release
;
701 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference
);
703 void gnttab_request_free_callback(struct gnttab_free_callback
*callback
,
704 void (*fn
)(void *), void *arg
, u16 count
)
707 struct gnttab_free_callback
*cb
;
709 spin_lock_irqsave(&gnttab_list_lock
, flags
);
711 /* Check if the callback is already on the list */
712 cb
= gnttab_free_callback_list
;
721 callback
->count
= count
;
722 callback
->next
= gnttab_free_callback_list
;
723 gnttab_free_callback_list
= callback
;
724 check_free_callbacks();
726 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
728 EXPORT_SYMBOL_GPL(gnttab_request_free_callback
);
730 void gnttab_cancel_free_callback(struct gnttab_free_callback
*callback
)
732 struct gnttab_free_callback
**pcb
;
735 spin_lock_irqsave(&gnttab_list_lock
, flags
);
736 for (pcb
= &gnttab_free_callback_list
; *pcb
; pcb
= &(*pcb
)->next
) {
737 if (*pcb
== callback
) {
738 *pcb
= callback
->next
;
742 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
744 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback
);
746 static unsigned int gnttab_frames(unsigned int frames
, unsigned int align
)
748 return (frames
* gnttab_interface
->grefs_per_grant_frame
+ align
- 1) /
752 static int grow_gnttab_list(unsigned int more_frames
)
754 unsigned int new_nr_grant_frames
, extra_entries
, i
;
755 unsigned int nr_glist_frames
, new_nr_glist_frames
;
756 unsigned int grefs_per_frame
;
758 grefs_per_frame
= gnttab_interface
->grefs_per_grant_frame
;
760 new_nr_grant_frames
= nr_grant_frames
+ more_frames
;
761 extra_entries
= more_frames
* grefs_per_frame
;
763 nr_glist_frames
= gnttab_frames(nr_grant_frames
, RPP
);
764 new_nr_glist_frames
= gnttab_frames(new_nr_grant_frames
, RPP
);
765 for (i
= nr_glist_frames
; i
< new_nr_glist_frames
; i
++) {
766 gnttab_list
[i
] = (grant_ref_t
*)__get_free_page(GFP_ATOMIC
);
771 gnttab_set_free(gnttab_size
, extra_entries
);
773 if (!gnttab_free_tail_ptr
)
774 gnttab_free_tail_ptr
= __gnttab_entry(gnttab_size
);
776 nr_grant_frames
= new_nr_grant_frames
;
777 gnttab_size
+= extra_entries
;
779 check_free_callbacks();
784 while (i
-- > nr_glist_frames
)
785 free_page((unsigned long) gnttab_list
[i
]);
789 static unsigned int __max_nr_grant_frames(void)
791 struct gnttab_query_size query
;
794 query
.dom
= DOMID_SELF
;
796 rc
= HYPERVISOR_grant_table_op(GNTTABOP_query_size
, &query
, 1);
797 if ((rc
< 0) || (query
.status
!= GNTST_okay
))
798 return 4; /* Legacy max supported number of frames */
800 return query
.max_nr_frames
;
803 unsigned int gnttab_max_grant_frames(void)
805 unsigned int xen_max
= __max_nr_grant_frames();
806 static unsigned int boot_max_nr_grant_frames
;
808 /* First time, initialize it properly. */
809 if (!boot_max_nr_grant_frames
)
810 boot_max_nr_grant_frames
= __max_nr_grant_frames();
812 if (xen_max
> boot_max_nr_grant_frames
)
813 return boot_max_nr_grant_frames
;
816 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames
);
818 int gnttab_setup_auto_xlat_frames(phys_addr_t addr
)
821 unsigned int max_nr_gframes
= __max_nr_grant_frames();
825 if (xen_auto_xlat_grant_frames
.count
)
828 vaddr
= memremap(addr
, XEN_PAGE_SIZE
* max_nr_gframes
, MEMREMAP_WB
);
830 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
834 pfn
= kcalloc(max_nr_gframes
, sizeof(pfn
[0]), GFP_KERNEL
);
839 for (i
= 0; i
< max_nr_gframes
; i
++)
840 pfn
[i
] = XEN_PFN_DOWN(addr
) + i
;
842 xen_auto_xlat_grant_frames
.vaddr
= vaddr
;
843 xen_auto_xlat_grant_frames
.pfn
= pfn
;
844 xen_auto_xlat_grant_frames
.count
= max_nr_gframes
;
848 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames
);
850 void gnttab_free_auto_xlat_frames(void)
852 if (!xen_auto_xlat_grant_frames
.count
)
854 kfree(xen_auto_xlat_grant_frames
.pfn
);
855 memunmap(xen_auto_xlat_grant_frames
.vaddr
);
857 xen_auto_xlat_grant_frames
.pfn
= NULL
;
858 xen_auto_xlat_grant_frames
.count
= 0;
859 xen_auto_xlat_grant_frames
.vaddr
= NULL
;
861 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames
);
863 int gnttab_pages_set_private(int nr_pages
, struct page
**pages
)
867 for (i
= 0; i
< nr_pages
; i
++) {
868 #if BITS_PER_LONG < 64
869 struct xen_page_foreign
*foreign
;
871 foreign
= kzalloc(sizeof(*foreign
), GFP_KERNEL
);
875 set_page_private(pages
[i
], (unsigned long)foreign
);
877 SetPagePrivate(pages
[i
]);
882 EXPORT_SYMBOL_GPL(gnttab_pages_set_private
);
885 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
886 * @nr_pages: number of pages to alloc
887 * @pages: returns the pages
889 int gnttab_alloc_pages(int nr_pages
, struct page
**pages
)
893 ret
= xen_alloc_unpopulated_pages(nr_pages
, pages
);
897 ret
= gnttab_pages_set_private(nr_pages
, pages
);
899 gnttab_free_pages(nr_pages
, pages
);
903 EXPORT_SYMBOL_GPL(gnttab_alloc_pages
);
905 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
906 static inline void cache_init(struct gnttab_page_cache
*cache
)
911 static inline bool cache_empty(struct gnttab_page_cache
*cache
)
913 return !cache
->pages
;
916 static inline struct page
*cache_deq(struct gnttab_page_cache
*cache
)
921 cache
->pages
= page
->zone_device_data
;
926 static inline void cache_enq(struct gnttab_page_cache
*cache
, struct page
*page
)
928 page
->zone_device_data
= cache
->pages
;
932 static inline void cache_init(struct gnttab_page_cache
*cache
)
934 INIT_LIST_HEAD(&cache
->pages
);
937 static inline bool cache_empty(struct gnttab_page_cache
*cache
)
939 return list_empty(&cache
->pages
);
942 static inline struct page
*cache_deq(struct gnttab_page_cache
*cache
)
946 page
= list_first_entry(&cache
->pages
, struct page
, lru
);
947 list_del(&page
->lru
);
952 static inline void cache_enq(struct gnttab_page_cache
*cache
, struct page
*page
)
954 list_add(&page
->lru
, &cache
->pages
);
958 void gnttab_page_cache_init(struct gnttab_page_cache
*cache
)
960 spin_lock_init(&cache
->lock
);
962 cache
->num_pages
= 0;
964 EXPORT_SYMBOL_GPL(gnttab_page_cache_init
);
966 int gnttab_page_cache_get(struct gnttab_page_cache
*cache
, struct page
**page
)
970 spin_lock_irqsave(&cache
->lock
, flags
);
972 if (cache_empty(cache
)) {
973 spin_unlock_irqrestore(&cache
->lock
, flags
);
974 return gnttab_alloc_pages(1, page
);
977 page
[0] = cache_deq(cache
);
980 spin_unlock_irqrestore(&cache
->lock
, flags
);
984 EXPORT_SYMBOL_GPL(gnttab_page_cache_get
);
986 void gnttab_page_cache_put(struct gnttab_page_cache
*cache
, struct page
**page
,
992 spin_lock_irqsave(&cache
->lock
, flags
);
994 for (i
= 0; i
< num
; i
++)
995 cache_enq(cache
, page
[i
]);
996 cache
->num_pages
+= num
;
998 spin_unlock_irqrestore(&cache
->lock
, flags
);
1000 EXPORT_SYMBOL_GPL(gnttab_page_cache_put
);
1002 void gnttab_page_cache_shrink(struct gnttab_page_cache
*cache
, unsigned int num
)
1004 struct page
*page
[10];
1006 unsigned long flags
;
1008 spin_lock_irqsave(&cache
->lock
, flags
);
1010 while (cache
->num_pages
> num
) {
1011 page
[i
] = cache_deq(cache
);
1013 if (++i
== ARRAY_SIZE(page
)) {
1014 spin_unlock_irqrestore(&cache
->lock
, flags
);
1015 gnttab_free_pages(i
, page
);
1017 spin_lock_irqsave(&cache
->lock
, flags
);
1021 spin_unlock_irqrestore(&cache
->lock
, flags
);
1024 gnttab_free_pages(i
, page
);
1026 EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink
);
1028 void gnttab_pages_clear_private(int nr_pages
, struct page
**pages
)
1032 for (i
= 0; i
< nr_pages
; i
++) {
1033 if (PagePrivate(pages
[i
])) {
1034 #if BITS_PER_LONG < 64
1035 kfree((void *)page_private(pages
[i
]));
1037 ClearPagePrivate(pages
[i
]);
1041 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private
);
1044 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
1045 * @nr_pages: number of pages to free
1048 void gnttab_free_pages(int nr_pages
, struct page
**pages
)
1050 gnttab_pages_clear_private(nr_pages
, pages
);
1051 xen_free_unpopulated_pages(nr_pages
, pages
);
1053 EXPORT_SYMBOL_GPL(gnttab_free_pages
);
1055 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
1057 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
1058 * @args: arguments to the function
1060 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args
*args
)
1062 unsigned long pfn
, start_pfn
;
1066 if (args
->nr_pages
< 0 || args
->nr_pages
> (INT_MAX
>> PAGE_SHIFT
))
1069 size
= args
->nr_pages
<< PAGE_SHIFT
;
1071 args
->vaddr
= dma_alloc_coherent(args
->dev
, size
,
1072 &args
->dev_bus_addr
,
1073 GFP_KERNEL
| __GFP_NOWARN
);
1075 args
->vaddr
= dma_alloc_wc(args
->dev
, size
,
1076 &args
->dev_bus_addr
,
1077 GFP_KERNEL
| __GFP_NOWARN
);
1079 pr_debug("Failed to allocate DMA buffer of size %zu\n", size
);
1083 start_pfn
= __phys_to_pfn(args
->dev_bus_addr
);
1084 for (pfn
= start_pfn
, i
= 0; pfn
< start_pfn
+ args
->nr_pages
;
1086 struct page
*page
= pfn_to_page(pfn
);
1088 args
->pages
[i
] = page
;
1089 args
->frames
[i
] = xen_page_to_gfn(page
);
1090 xenmem_reservation_scrub_page(page
);
1093 xenmem_reservation_va_mapping_reset(args
->nr_pages
, args
->pages
);
1095 ret
= xenmem_reservation_decrease(args
->nr_pages
, args
->frames
);
1096 if (ret
!= args
->nr_pages
) {
1097 pr_debug("Failed to decrease reservation for DMA buffer\n");
1102 ret
= gnttab_pages_set_private(args
->nr_pages
, args
->pages
);
1109 gnttab_dma_free_pages(args
);
1112 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages
);
1115 * gnttab_dma_free_pages - free DMAable pages
1116 * @args: arguments to the function
1118 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args
*args
)
1123 gnttab_pages_clear_private(args
->nr_pages
, args
->pages
);
1125 for (i
= 0; i
< args
->nr_pages
; i
++)
1126 args
->frames
[i
] = page_to_xen_pfn(args
->pages
[i
]);
1128 ret
= xenmem_reservation_increase(args
->nr_pages
, args
->frames
);
1129 if (ret
!= args
->nr_pages
) {
1130 pr_debug("Failed to increase reservation for DMA buffer\n");
1136 xenmem_reservation_va_mapping_update(args
->nr_pages
, args
->pages
,
1139 size
= args
->nr_pages
<< PAGE_SHIFT
;
1141 dma_free_coherent(args
->dev
, size
,
1142 args
->vaddr
, args
->dev_bus_addr
);
1144 dma_free_wc(args
->dev
, size
,
1145 args
->vaddr
, args
->dev_bus_addr
);
1148 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages
);
1151 /* Handling of paged out grant targets (GNTST_eagain) */
1152 #define MAX_DELAY 256
1154 gnttab_retry_eagain_gop(unsigned int cmd
, void *gop
, int16_t *status
,
1160 BUG_ON(HYPERVISOR_grant_table_op(cmd
, gop
, 1));
1161 if (*status
== GNTST_eagain
)
1163 } while ((*status
== GNTST_eagain
) && (delay
< MAX_DELAY
));
1165 if (delay
>= MAX_DELAY
) {
1166 pr_err("%s: %s eagain grant\n", func
, current
->comm
);
1167 *status
= GNTST_bad_page
;
1171 void gnttab_batch_map(struct gnttab_map_grant_ref
*batch
, unsigned count
)
1173 struct gnttab_map_grant_ref
*op
;
1175 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref
, batch
, count
))
1177 for (op
= batch
; op
< batch
+ count
; op
++)
1178 if (op
->status
== GNTST_eagain
)
1179 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref
, op
,
1180 &op
->status
, __func__
);
1182 EXPORT_SYMBOL_GPL(gnttab_batch_map
);
1184 void gnttab_batch_copy(struct gnttab_copy
*batch
, unsigned count
)
1186 struct gnttab_copy
*op
;
1188 if (HYPERVISOR_grant_table_op(GNTTABOP_copy
, batch
, count
))
1190 for (op
= batch
; op
< batch
+ count
; op
++)
1191 if (op
->status
== GNTST_eagain
)
1192 gnttab_retry_eagain_gop(GNTTABOP_copy
, op
,
1193 &op
->status
, __func__
);
1195 EXPORT_SYMBOL_GPL(gnttab_batch_copy
);
1197 void gnttab_foreach_grant_in_range(struct page
*page
,
1198 unsigned int offset
,
1203 unsigned int goffset
;
1205 unsigned long xen_pfn
;
1207 len
= min_t(unsigned int, PAGE_SIZE
- offset
, len
);
1208 goffset
= xen_offset_in_page(offset
);
1210 xen_pfn
= page_to_xen_pfn(page
) + XEN_PFN_DOWN(offset
);
1213 glen
= min_t(unsigned int, XEN_PAGE_SIZE
- goffset
, len
);
1214 fn(pfn_to_gfn(xen_pfn
), goffset
, glen
, data
);
1221 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range
);
1223 void gnttab_foreach_grant(struct page
**pages
,
1224 unsigned int nr_grefs
,
1228 unsigned int goffset
= 0;
1229 unsigned long xen_pfn
= 0;
1232 for (i
= 0; i
< nr_grefs
; i
++) {
1233 if ((i
% XEN_PFN_PER_PAGE
) == 0) {
1234 xen_pfn
= page_to_xen_pfn(pages
[i
/ XEN_PFN_PER_PAGE
]);
1238 fn(pfn_to_gfn(xen_pfn
), goffset
, XEN_PAGE_SIZE
, data
);
1240 goffset
+= XEN_PAGE_SIZE
;
1245 int gnttab_map_refs(struct gnttab_map_grant_ref
*map_ops
,
1246 struct gnttab_map_grant_ref
*kmap_ops
,
1247 struct page
**pages
, unsigned int count
)
1251 ret
= HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref
, map_ops
, count
);
1255 for (i
= 0; i
< count
; i
++) {
1256 switch (map_ops
[i
].status
) {
1259 struct xen_page_foreign
*foreign
;
1261 SetPageForeign(pages
[i
]);
1262 foreign
= xen_page_foreign(pages
[i
]);
1263 foreign
->domid
= map_ops
[i
].dom
;
1264 foreign
->gref
= map_ops
[i
].ref
;
1268 case GNTST_no_device_space
:
1269 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1273 /* Retry eagain maps */
1274 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref
,
1276 &map_ops
[i
].status
, __func__
);
1277 /* Test status in next loop iteration. */
1286 return set_foreign_p2m_mapping(map_ops
, kmap_ops
, pages
, count
);
1288 EXPORT_SYMBOL_GPL(gnttab_map_refs
);
1290 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref
*unmap_ops
,
1291 struct gnttab_unmap_grant_ref
*kunmap_ops
,
1292 struct page
**pages
, unsigned int count
)
1297 ret
= HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref
, unmap_ops
, count
);
1301 for (i
= 0; i
< count
; i
++)
1302 ClearPageForeign(pages
[i
]);
1304 return clear_foreign_p2m_mapping(unmap_ops
, kunmap_ops
, pages
, count
);
1306 EXPORT_SYMBOL_GPL(gnttab_unmap_refs
);
1308 #define GNTTAB_UNMAP_REFS_DELAY 5
1310 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data
* item
);
1312 static void gnttab_unmap_work(struct work_struct
*work
)
1314 struct gntab_unmap_queue_data
1315 *unmap_data
= container_of(work
,
1316 struct gntab_unmap_queue_data
,
1318 if (unmap_data
->age
!= UINT_MAX
)
1320 __gnttab_unmap_refs_async(unmap_data
);
1323 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data
* item
)
1328 for (pc
= 0; pc
< item
->count
; pc
++) {
1329 if (page_count(item
->pages
[pc
]) > 1) {
1330 unsigned long delay
= GNTTAB_UNMAP_REFS_DELAY
* (item
->age
+ 1);
1331 schedule_delayed_work(&item
->gnttab_work
,
1332 msecs_to_jiffies(delay
));
1337 ret
= gnttab_unmap_refs(item
->unmap_ops
, item
->kunmap_ops
,
1338 item
->pages
, item
->count
);
1339 item
->done(ret
, item
);
1342 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data
* item
)
1344 INIT_DELAYED_WORK(&item
->gnttab_work
, gnttab_unmap_work
);
1347 __gnttab_unmap_refs_async(item
);
1349 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async
);
1351 static void unmap_refs_callback(int result
,
1352 struct gntab_unmap_queue_data
*data
)
1354 struct unmap_refs_callback_data
*d
= data
->data
;
1357 complete(&d
->completion
);
1360 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data
*item
)
1362 struct unmap_refs_callback_data data
;
1364 init_completion(&data
.completion
);
1366 item
->done
= &unmap_refs_callback
;
1367 gnttab_unmap_refs_async(item
);
1368 wait_for_completion(&data
.completion
);
1372 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync
);
1374 static unsigned int nr_status_frames(unsigned int nr_grant_frames
)
1376 return gnttab_frames(nr_grant_frames
, SPP
);
1379 static int gnttab_map_frames_v1(xen_pfn_t
*frames
, unsigned int nr_gframes
)
1383 rc
= arch_gnttab_map_shared(frames
, nr_gframes
,
1384 gnttab_max_grant_frames(),
1385 &gnttab_shared
.addr
);
1391 static void gnttab_unmap_frames_v1(void)
1393 arch_gnttab_unmap(gnttab_shared
.addr
, nr_grant_frames
);
1396 static int gnttab_map_frames_v2(xen_pfn_t
*frames
, unsigned int nr_gframes
)
1399 unsigned int nr_sframes
;
1400 struct gnttab_get_status_frames getframes
;
1403 nr_sframes
= nr_status_frames(nr_gframes
);
1405 /* No need for kzalloc as it is initialized in following hypercall
1406 * GNTTABOP_get_status_frames.
1408 sframes
= kmalloc_array(nr_sframes
, sizeof(uint64_t), GFP_ATOMIC
);
1412 getframes
.dom
= DOMID_SELF
;
1413 getframes
.nr_frames
= nr_sframes
;
1414 set_xen_guest_handle(getframes
.frame_list
, sframes
);
1416 rc
= HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames
,
1418 if (rc
== -ENOSYS
) {
1423 BUG_ON(rc
|| getframes
.status
);
1425 rc
= arch_gnttab_map_status(sframes
, nr_sframes
,
1426 nr_status_frames(gnttab_max_grant_frames()),
1431 rc
= arch_gnttab_map_shared(frames
, nr_gframes
,
1432 gnttab_max_grant_frames(),
1433 &gnttab_shared
.addr
);
1439 static void gnttab_unmap_frames_v2(void)
1441 arch_gnttab_unmap(gnttab_shared
.addr
, nr_grant_frames
);
1442 arch_gnttab_unmap(grstatus
, nr_status_frames(nr_grant_frames
));
1445 static int gnttab_map(unsigned int start_idx
, unsigned int end_idx
)
1447 struct gnttab_setup_table setup
;
1449 unsigned int nr_gframes
= end_idx
+ 1;
1452 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
1453 struct xen_add_to_physmap xatp
;
1454 unsigned int i
= end_idx
;
1456 BUG_ON(xen_auto_xlat_grant_frames
.count
< nr_gframes
);
1458 * Loop backwards, so that the first hypercall has the largest
1459 * index, ensuring that the table will grow only once.
1462 xatp
.domid
= DOMID_SELF
;
1464 xatp
.space
= XENMAPSPACE_grant_table
;
1465 xatp
.gpfn
= xen_auto_xlat_grant_frames
.pfn
[i
];
1466 rc
= HYPERVISOR_memory_op(XENMEM_add_to_physmap
, &xatp
);
1468 pr_warn("grant table add_to_physmap failed, err=%d\n",
1472 } while (i
-- > start_idx
);
1477 /* No need for kzalloc as it is initialized in following hypercall
1478 * GNTTABOP_setup_table.
1480 frames
= kmalloc_array(nr_gframes
, sizeof(unsigned long), GFP_ATOMIC
);
1484 setup
.dom
= DOMID_SELF
;
1485 setup
.nr_frames
= nr_gframes
;
1486 set_xen_guest_handle(setup
.frame_list
, frames
);
1488 rc
= HYPERVISOR_grant_table_op(GNTTABOP_setup_table
, &setup
, 1);
1489 if (rc
== -ENOSYS
) {
1494 BUG_ON(rc
|| setup
.status
);
1496 rc
= gnttab_interface
->map_frames(frames
, nr_gframes
);
1503 static const struct gnttab_ops gnttab_v1_ops
= {
1505 .grefs_per_grant_frame
= XEN_PAGE_SIZE
/
1506 sizeof(struct grant_entry_v1
),
1507 .map_frames
= gnttab_map_frames_v1
,
1508 .unmap_frames
= gnttab_unmap_frames_v1
,
1509 .update_entry
= gnttab_update_entry_v1
,
1510 .end_foreign_access_ref
= gnttab_end_foreign_access_ref_v1
,
1511 .read_frame
= gnttab_read_frame_v1
,
1514 static const struct gnttab_ops gnttab_v2_ops
= {
1516 .grefs_per_grant_frame
= XEN_PAGE_SIZE
/
1517 sizeof(union grant_entry_v2
),
1518 .map_frames
= gnttab_map_frames_v2
,
1519 .unmap_frames
= gnttab_unmap_frames_v2
,
1520 .update_entry
= gnttab_update_entry_v2
,
1521 .end_foreign_access_ref
= gnttab_end_foreign_access_ref_v2
,
1522 .read_frame
= gnttab_read_frame_v2
,
1525 static bool gnttab_need_v2(void)
1528 uint32_t base
, width
;
1530 if (xen_pv_domain()) {
1531 base
= xen_cpuid_base();
1532 if (cpuid_eax(base
) < 5)
1533 return false; /* Information not available, use V1. */
1534 width
= cpuid_ebx(base
+ 5) &
1535 XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK
;
1536 return width
> 32 + PAGE_SHIFT
;
1539 return !!(max_possible_pfn
>> 32);
1542 static void gnttab_request_version(void)
1545 struct gnttab_set_version gsv
;
1547 if (gnttab_need_v2())
1552 /* Boot parameter overrides automatic selection. */
1553 if (xen_gnttab_version
>= 1 && xen_gnttab_version
<= 2)
1554 gsv
.version
= xen_gnttab_version
;
1556 rc
= HYPERVISOR_grant_table_op(GNTTABOP_set_version
, &gsv
, 1);
1557 if (rc
== 0 && gsv
.version
== 2)
1558 gnttab_interface
= &gnttab_v2_ops
;
1560 gnttab_interface
= &gnttab_v1_ops
;
1561 pr_info("Grant tables using version %d layout\n",
1562 gnttab_interface
->version
);
1565 static int gnttab_setup(void)
1567 unsigned int max_nr_gframes
;
1569 max_nr_gframes
= gnttab_max_grant_frames();
1570 if (max_nr_gframes
< nr_grant_frames
)
1573 if (xen_feature(XENFEAT_auto_translated_physmap
) && gnttab_shared
.addr
== NULL
) {
1574 gnttab_shared
.addr
= xen_auto_xlat_grant_frames
.vaddr
;
1575 if (gnttab_shared
.addr
== NULL
) {
1576 pr_warn("gnttab share frames is not mapped!\n");
1580 return gnttab_map(0, nr_grant_frames
- 1);
1583 int gnttab_resume(void)
1585 gnttab_request_version();
1586 return gnttab_setup();
1589 int gnttab_suspend(void)
1591 if (!xen_feature(XENFEAT_auto_translated_physmap
))
1592 gnttab_interface
->unmap_frames();
1596 static int gnttab_expand(unsigned int req_entries
)
1599 unsigned int cur
, extra
;
1601 cur
= nr_grant_frames
;
1602 extra
= ((req_entries
+ gnttab_interface
->grefs_per_grant_frame
- 1) /
1603 gnttab_interface
->grefs_per_grant_frame
);
1604 if (cur
+ extra
> gnttab_max_grant_frames()) {
1605 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1606 " cur=%u extra=%u limit=%u"
1607 " gnttab_free_count=%u req_entries=%u\n",
1608 cur
, extra
, gnttab_max_grant_frames(),
1609 gnttab_free_count
, req_entries
);
1613 rc
= gnttab_map(cur
, cur
+ extra
- 1);
1615 rc
= grow_gnttab_list(extra
);
1620 int gnttab_init(void)
1623 unsigned long max_nr_grant_frames
, max_nr_grefs
;
1624 unsigned int max_nr_glist_frames
, nr_glist_frames
;
1627 gnttab_request_version();
1628 max_nr_grant_frames
= gnttab_max_grant_frames();
1629 max_nr_grefs
= max_nr_grant_frames
*
1630 gnttab_interface
->grefs_per_grant_frame
;
1631 nr_grant_frames
= 1;
1633 /* Determine the maximum number of frames required for the
1634 * grant reference free list on the current hypervisor.
1636 max_nr_glist_frames
= max_nr_grefs
/ RPP
;
1638 gnttab_list
= kmalloc_array(max_nr_glist_frames
,
1639 sizeof(grant_ref_t
*),
1641 if (gnttab_list
== NULL
)
1644 nr_glist_frames
= gnttab_frames(nr_grant_frames
, RPP
);
1645 for (i
= 0; i
< nr_glist_frames
; i
++) {
1646 gnttab_list
[i
] = (grant_ref_t
*)__get_free_page(GFP_KERNEL
);
1647 if (gnttab_list
[i
] == NULL
) {
1653 gnttab_free_bitmap
= bitmap_zalloc(max_nr_grefs
, GFP_KERNEL
);
1654 if (!gnttab_free_bitmap
) {
1659 ret
= arch_gnttab_init(max_nr_grant_frames
,
1660 nr_status_frames(max_nr_grant_frames
));
1664 if (gnttab_setup() < 0) {
1669 gnttab_size
= nr_grant_frames
* gnttab_interface
->grefs_per_grant_frame
;
1671 gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES
,
1672 gnttab_size
- GNTTAB_NR_RESERVED_ENTRIES
);
1674 printk("Grant table initialized\n");
1678 for (i
--; i
>= 0; i
--)
1679 free_page((unsigned long)gnttab_list
[i
]);
1681 bitmap_free(gnttab_free_bitmap
);
1684 EXPORT_SYMBOL_GPL(gnttab_init
);
1686 static int __gnttab_init(void)
1691 /* Delay grant-table initialization in the PV on HVM case */
1692 if (xen_hvm_domain() && !xen_pvh_domain())
1695 return gnttab_init();
1697 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1698 * beforehand to initialize xen_auto_xlat_grant_frames. */
1699 core_initcall_sync(__gnttab_init
);