1 /******************************************************************************
4 * Granting foreign access to our memory reservation.
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
36 #include <linux/module.h>
37 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/uaccess.h>
43 #include <linux/delay.h>
44 #include <linux/hardirq.h>
45 #include <linux/workqueue.h>
48 #include <xen/interface/xen.h>
50 #include <xen/grant_table.h>
51 #include <xen/interface/memory.h>
52 #include <xen/hvc-console.h>
53 #include <xen/swiotlb-xen.h>
54 #include <xen/balloon.h>
55 #include <asm/xen/hypercall.h>
56 #include <asm/xen/interface.h>
58 #include <asm/pgtable.h>
59 #include <asm/sync_bitops.h>
61 /* External tools reserve first few grant table entries. */
62 #define NR_RESERVED_ENTRIES 8
63 #define GNTTAB_LIST_END 0xffffffff
65 static grant_ref_t
**gnttab_list
;
66 static unsigned int nr_grant_frames
;
67 static int gnttab_free_count
;
68 static grant_ref_t gnttab_free_head
;
69 static DEFINE_SPINLOCK(gnttab_list_lock
);
70 struct grant_frames xen_auto_xlat_grant_frames
;
73 struct grant_entry_v1
*v1
;
77 /*This is a structure of function pointers for grant table*/
80 * Mapping a list of frames for storing grant entries. Frames parameter
81 * is used to store grant table address when grant table being setup,
82 * nr_gframes is the number of frames to map grant table. Returning
83 * GNTST_okay means success and negative value means failure.
85 int (*map_frames
)(xen_pfn_t
*frames
, unsigned int nr_gframes
);
87 * Release a list of frames which are mapped in map_frames for grant
90 void (*unmap_frames
)(void);
92 * Introducing a valid entry into the grant table, granting the frame of
93 * this grant entry to domain for accessing or transfering. Ref
94 * parameter is reference of this introduced grant entry, domid is id of
95 * granted domain, frame is the page frame to be granted, and flags is
96 * status of the grant entry to be updated.
98 void (*update_entry
)(grant_ref_t ref
, domid_t domid
,
99 unsigned long frame
, unsigned flags
);
101 * Stop granting a grant entry to domain for accessing. Ref parameter is
102 * reference of a grant entry whose grant access will be stopped,
103 * readonly is not in use in this function. If the grant entry is
104 * currently mapped for reading or writing, just return failure(==0)
105 * directly and don't tear down the grant access. Otherwise, stop grant
106 * access for this entry and return success(==1).
108 int (*end_foreign_access_ref
)(grant_ref_t ref
, int readonly
);
110 * Stop granting a grant entry to domain for transfer. Ref parameter is
111 * reference of a grant entry whose grant transfer will be stopped. If
112 * tranfer has not started, just reclaim the grant entry and return
113 * failure(==0). Otherwise, wait for the transfer to complete and then
116 unsigned long (*end_foreign_transfer_ref
)(grant_ref_t ref
);
118 * Query the status of a grant entry. Ref parameter is reference of
119 * queried grant entry, return value is the status of queried entry.
120 * Detailed status(writing/reading) can be gotten from the return value
123 int (*query_foreign_access
)(grant_ref_t ref
);
126 struct unmap_refs_callback_data
{
127 struct completion completion
;
131 static struct gnttab_ops
*gnttab_interface
;
133 static int grant_table_version
;
134 static int grefs_per_grant_frame
;
136 static struct gnttab_free_callback
*gnttab_free_callback_list
;
138 static int gnttab_expand(unsigned int req_entries
);
140 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
141 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
143 static inline grant_ref_t
*__gnttab_entry(grant_ref_t entry
)
145 return &gnttab_list
[(entry
) / RPP
][(entry
) % RPP
];
147 /* This can be used as an l-value */
148 #define gnttab_entry(entry) (*__gnttab_entry(entry))
150 static int get_free_entries(unsigned count
)
156 spin_lock_irqsave(&gnttab_list_lock
, flags
);
158 if ((gnttab_free_count
< count
) &&
159 ((rc
= gnttab_expand(count
- gnttab_free_count
)) < 0)) {
160 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
164 ref
= head
= gnttab_free_head
;
165 gnttab_free_count
-= count
;
167 head
= gnttab_entry(head
);
168 gnttab_free_head
= gnttab_entry(head
);
169 gnttab_entry(head
) = GNTTAB_LIST_END
;
171 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
176 static void do_free_callbacks(void)
178 struct gnttab_free_callback
*callback
, *next
;
180 callback
= gnttab_free_callback_list
;
181 gnttab_free_callback_list
= NULL
;
183 while (callback
!= NULL
) {
184 next
= callback
->next
;
185 if (gnttab_free_count
>= callback
->count
) {
186 callback
->next
= NULL
;
187 callback
->fn(callback
->arg
);
189 callback
->next
= gnttab_free_callback_list
;
190 gnttab_free_callback_list
= callback
;
196 static inline void check_free_callbacks(void)
198 if (unlikely(gnttab_free_callback_list
))
202 static void put_free_entry(grant_ref_t ref
)
205 spin_lock_irqsave(&gnttab_list_lock
, flags
);
206 gnttab_entry(ref
) = gnttab_free_head
;
207 gnttab_free_head
= ref
;
209 check_free_callbacks();
210 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
214 * Following applies to gnttab_update_entry_v1.
215 * Introducing a valid entry into the grant table:
216 * 1. Write ent->domid.
217 * 2. Write ent->frame:
218 * GTF_permit_access: Frame to which access is permitted.
219 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
220 * frame, or zero if none.
221 * 3. Write memory barrier (WMB).
222 * 4. Write ent->flags, inc. valid type.
224 static void gnttab_update_entry_v1(grant_ref_t ref
, domid_t domid
,
225 unsigned long frame
, unsigned flags
)
227 gnttab_shared
.v1
[ref
].domid
= domid
;
228 gnttab_shared
.v1
[ref
].frame
= frame
;
230 gnttab_shared
.v1
[ref
].flags
= flags
;
234 * Public grant-issuing interface functions
236 void gnttab_grant_foreign_access_ref(grant_ref_t ref
, domid_t domid
,
237 unsigned long frame
, int readonly
)
239 gnttab_interface
->update_entry(ref
, domid
, frame
,
240 GTF_permit_access
| (readonly
? GTF_readonly
: 0));
242 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref
);
244 int gnttab_grant_foreign_access(domid_t domid
, unsigned long frame
,
249 ref
= get_free_entries(1);
250 if (unlikely(ref
< 0))
253 gnttab_grant_foreign_access_ref(ref
, domid
, frame
, readonly
);
257 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access
);
259 static int gnttab_query_foreign_access_v1(grant_ref_t ref
)
261 return gnttab_shared
.v1
[ref
].flags
& (GTF_reading
|GTF_writing
);
264 int gnttab_query_foreign_access(grant_ref_t ref
)
266 return gnttab_interface
->query_foreign_access(ref
);
268 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access
);
270 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref
, int readonly
)
275 pflags
= &gnttab_shared
.v1
[ref
].flags
;
279 if (flags
& (GTF_reading
|GTF_writing
))
281 } while ((nflags
= sync_cmpxchg(pflags
, flags
, 0)) != flags
);
286 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref
, int readonly
)
288 return gnttab_interface
->end_foreign_access_ref(ref
, readonly
);
291 int gnttab_end_foreign_access_ref(grant_ref_t ref
, int readonly
)
293 if (_gnttab_end_foreign_access_ref(ref
, readonly
))
295 pr_warn("WARNING: g.e. %#x still in use!\n", ref
);
298 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref
);
300 struct deferred_entry
{
301 struct list_head list
;
307 static LIST_HEAD(deferred_list
);
308 static void gnttab_handle_deferred(unsigned long);
309 static DEFINE_TIMER(deferred_timer
, gnttab_handle_deferred
, 0, 0);
311 static void gnttab_handle_deferred(unsigned long unused
)
313 unsigned int nr
= 10;
314 struct deferred_entry
*first
= NULL
;
317 spin_lock_irqsave(&gnttab_list_lock
, flags
);
319 struct deferred_entry
*entry
320 = list_first_entry(&deferred_list
,
321 struct deferred_entry
, list
);
325 list_del(&entry
->list
);
326 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
327 if (_gnttab_end_foreign_access_ref(entry
->ref
, entry
->ro
)) {
328 put_free_entry(entry
->ref
);
330 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
331 entry
->ref
, page_to_pfn(entry
->page
));
332 __free_page(entry
->page
);
334 pr_info("freeing g.e. %#x\n", entry
->ref
);
338 if (!--entry
->warn_delay
)
339 pr_info("g.e. %#x still pending\n", entry
->ref
);
343 spin_lock_irqsave(&gnttab_list_lock
, flags
);
345 list_add_tail(&entry
->list
, &deferred_list
);
346 else if (list_empty(&deferred_list
))
349 if (!list_empty(&deferred_list
) && !timer_pending(&deferred_timer
)) {
350 deferred_timer
.expires
= jiffies
+ HZ
;
351 add_timer(&deferred_timer
);
353 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
356 static void gnttab_add_deferred(grant_ref_t ref
, bool readonly
,
359 struct deferred_entry
*entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
360 const char *what
= KERN_WARNING
"leaking";
366 entry
->ro
= readonly
;
368 entry
->warn_delay
= 60;
369 spin_lock_irqsave(&gnttab_list_lock
, flags
);
370 list_add_tail(&entry
->list
, &deferred_list
);
371 if (!timer_pending(&deferred_timer
)) {
372 deferred_timer
.expires
= jiffies
+ HZ
;
373 add_timer(&deferred_timer
);
375 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
376 what
= KERN_DEBUG
"deferring";
378 printk("%s g.e. %#x (pfn %#lx)\n",
379 what
, ref
, page
? page_to_pfn(page
) : -1);
382 void gnttab_end_foreign_access(grant_ref_t ref
, int readonly
,
385 if (gnttab_end_foreign_access_ref(ref
, readonly
)) {
390 gnttab_add_deferred(ref
, readonly
,
391 page
? virt_to_page(page
) : NULL
);
393 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access
);
395 int gnttab_grant_foreign_transfer(domid_t domid
, unsigned long pfn
)
399 ref
= get_free_entries(1);
400 if (unlikely(ref
< 0))
402 gnttab_grant_foreign_transfer_ref(ref
, domid
, pfn
);
406 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer
);
408 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref
, domid_t domid
,
411 gnttab_interface
->update_entry(ref
, domid
, pfn
, GTF_accept_transfer
);
413 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref
);
415 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref
)
421 pflags
= &gnttab_shared
.v1
[ref
].flags
;
424 * If a transfer is not even yet started, try to reclaim the grant
425 * reference and return failure (== 0).
427 while (!((flags
= *pflags
) & GTF_transfer_committed
)) {
428 if (sync_cmpxchg(pflags
, flags
, 0) == flags
)
433 /* If a transfer is in progress then wait until it is completed. */
434 while (!(flags
& GTF_transfer_completed
)) {
439 rmb(); /* Read the frame number /after/ reading completion status. */
440 frame
= gnttab_shared
.v1
[ref
].frame
;
446 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref
)
448 return gnttab_interface
->end_foreign_transfer_ref(ref
);
450 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref
);
452 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref
)
454 unsigned long frame
= gnttab_end_foreign_transfer_ref(ref
);
458 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer
);
460 void gnttab_free_grant_reference(grant_ref_t ref
)
464 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference
);
466 void gnttab_free_grant_references(grant_ref_t head
)
471 if (head
== GNTTAB_LIST_END
)
473 spin_lock_irqsave(&gnttab_list_lock
, flags
);
475 while (gnttab_entry(ref
) != GNTTAB_LIST_END
) {
476 ref
= gnttab_entry(ref
);
479 gnttab_entry(ref
) = gnttab_free_head
;
480 gnttab_free_head
= head
;
481 gnttab_free_count
+= count
;
482 check_free_callbacks();
483 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
485 EXPORT_SYMBOL_GPL(gnttab_free_grant_references
);
487 int gnttab_alloc_grant_references(u16 count
, grant_ref_t
*head
)
489 int h
= get_free_entries(count
);
498 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references
);
500 int gnttab_empty_grant_references(const grant_ref_t
*private_head
)
502 return (*private_head
== GNTTAB_LIST_END
);
504 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references
);
506 int gnttab_claim_grant_reference(grant_ref_t
*private_head
)
508 grant_ref_t g
= *private_head
;
509 if (unlikely(g
== GNTTAB_LIST_END
))
511 *private_head
= gnttab_entry(g
);
514 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference
);
516 void gnttab_release_grant_reference(grant_ref_t
*private_head
,
519 gnttab_entry(release
) = *private_head
;
520 *private_head
= release
;
522 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference
);
524 void gnttab_request_free_callback(struct gnttab_free_callback
*callback
,
525 void (*fn
)(void *), void *arg
, u16 count
)
528 struct gnttab_free_callback
*cb
;
530 spin_lock_irqsave(&gnttab_list_lock
, flags
);
532 /* Check if the callback is already on the list */
533 cb
= gnttab_free_callback_list
;
542 callback
->count
= count
;
543 callback
->next
= gnttab_free_callback_list
;
544 gnttab_free_callback_list
= callback
;
545 check_free_callbacks();
547 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
549 EXPORT_SYMBOL_GPL(gnttab_request_free_callback
);
551 void gnttab_cancel_free_callback(struct gnttab_free_callback
*callback
)
553 struct gnttab_free_callback
**pcb
;
556 spin_lock_irqsave(&gnttab_list_lock
, flags
);
557 for (pcb
= &gnttab_free_callback_list
; *pcb
; pcb
= &(*pcb
)->next
) {
558 if (*pcb
== callback
) {
559 *pcb
= callback
->next
;
563 spin_unlock_irqrestore(&gnttab_list_lock
, flags
);
565 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback
);
567 static int grow_gnttab_list(unsigned int more_frames
)
569 unsigned int new_nr_grant_frames
, extra_entries
, i
;
570 unsigned int nr_glist_frames
, new_nr_glist_frames
;
572 BUG_ON(grefs_per_grant_frame
== 0);
574 new_nr_grant_frames
= nr_grant_frames
+ more_frames
;
575 extra_entries
= more_frames
* grefs_per_grant_frame
;
577 nr_glist_frames
= (nr_grant_frames
* grefs_per_grant_frame
+ RPP
- 1) / RPP
;
578 new_nr_glist_frames
=
579 (new_nr_grant_frames
* grefs_per_grant_frame
+ RPP
- 1) / RPP
;
580 for (i
= nr_glist_frames
; i
< new_nr_glist_frames
; i
++) {
581 gnttab_list
[i
] = (grant_ref_t
*)__get_free_page(GFP_ATOMIC
);
587 for (i
= grefs_per_grant_frame
* nr_grant_frames
;
588 i
< grefs_per_grant_frame
* new_nr_grant_frames
- 1; i
++)
589 gnttab_entry(i
) = i
+ 1;
591 gnttab_entry(i
) = gnttab_free_head
;
592 gnttab_free_head
= grefs_per_grant_frame
* nr_grant_frames
;
593 gnttab_free_count
+= extra_entries
;
595 nr_grant_frames
= new_nr_grant_frames
;
597 check_free_callbacks();
602 while (i
-- > nr_glist_frames
)
603 free_page((unsigned long) gnttab_list
[i
]);
607 static unsigned int __max_nr_grant_frames(void)
609 struct gnttab_query_size query
;
612 query
.dom
= DOMID_SELF
;
614 rc
= HYPERVISOR_grant_table_op(GNTTABOP_query_size
, &query
, 1);
615 if ((rc
< 0) || (query
.status
!= GNTST_okay
))
616 return 4; /* Legacy max supported number of frames */
618 return query
.max_nr_frames
;
621 unsigned int gnttab_max_grant_frames(void)
623 unsigned int xen_max
= __max_nr_grant_frames();
624 static unsigned int boot_max_nr_grant_frames
;
626 /* First time, initialize it properly. */
627 if (!boot_max_nr_grant_frames
)
628 boot_max_nr_grant_frames
= __max_nr_grant_frames();
630 if (xen_max
> boot_max_nr_grant_frames
)
631 return boot_max_nr_grant_frames
;
634 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames
);
636 int gnttab_setup_auto_xlat_frames(phys_addr_t addr
)
639 unsigned int max_nr_gframes
= __max_nr_grant_frames();
643 if (xen_auto_xlat_grant_frames
.count
)
646 vaddr
= xen_remap(addr
, PAGE_SIZE
* max_nr_gframes
);
648 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
652 pfn
= kcalloc(max_nr_gframes
, sizeof(pfn
[0]), GFP_KERNEL
);
657 for (i
= 0; i
< max_nr_gframes
; i
++)
658 pfn
[i
] = PFN_DOWN(addr
) + i
;
660 xen_auto_xlat_grant_frames
.vaddr
= vaddr
;
661 xen_auto_xlat_grant_frames
.pfn
= pfn
;
662 xen_auto_xlat_grant_frames
.count
= max_nr_gframes
;
666 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames
);
668 void gnttab_free_auto_xlat_frames(void)
670 if (!xen_auto_xlat_grant_frames
.count
)
672 kfree(xen_auto_xlat_grant_frames
.pfn
);
673 xen_unmap(xen_auto_xlat_grant_frames
.vaddr
);
675 xen_auto_xlat_grant_frames
.pfn
= NULL
;
676 xen_auto_xlat_grant_frames
.count
= 0;
677 xen_auto_xlat_grant_frames
.vaddr
= NULL
;
679 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames
);
682 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
683 * @nr_pages: number of pages to alloc
684 * @pages: returns the pages
686 int gnttab_alloc_pages(int nr_pages
, struct page
**pages
)
691 ret
= alloc_xenballooned_pages(nr_pages
, pages
, false);
695 for (i
= 0; i
< nr_pages
; i
++) {
696 #if BITS_PER_LONG < 64
697 struct xen_page_foreign
*foreign
;
699 foreign
= kzalloc(sizeof(*foreign
), GFP_KERNEL
);
701 gnttab_free_pages(nr_pages
, pages
);
704 set_page_private(pages
[i
], (unsigned long)foreign
);
706 SetPagePrivate(pages
[i
]);
711 EXPORT_SYMBOL(gnttab_alloc_pages
);
714 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
715 * @nr_pages; number of pages to free
718 void gnttab_free_pages(int nr_pages
, struct page
**pages
)
722 for (i
= 0; i
< nr_pages
; i
++) {
723 if (PagePrivate(pages
[i
])) {
724 #if BITS_PER_LONG < 64
725 kfree((void *)page_private(pages
[i
]));
727 ClearPagePrivate(pages
[i
]);
730 free_xenballooned_pages(nr_pages
, pages
);
732 EXPORT_SYMBOL(gnttab_free_pages
);
734 /* Handling of paged out grant targets (GNTST_eagain) */
735 #define MAX_DELAY 256
737 gnttab_retry_eagain_gop(unsigned int cmd
, void *gop
, int16_t *status
,
743 BUG_ON(HYPERVISOR_grant_table_op(cmd
, gop
, 1));
744 if (*status
== GNTST_eagain
)
746 } while ((*status
== GNTST_eagain
) && (delay
< MAX_DELAY
));
748 if (delay
>= MAX_DELAY
) {
749 pr_err("%s: %s eagain grant\n", func
, current
->comm
);
750 *status
= GNTST_bad_page
;
754 void gnttab_batch_map(struct gnttab_map_grant_ref
*batch
, unsigned count
)
756 struct gnttab_map_grant_ref
*op
;
758 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref
, batch
, count
))
760 for (op
= batch
; op
< batch
+ count
; op
++)
761 if (op
->status
== GNTST_eagain
)
762 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref
, op
,
763 &op
->status
, __func__
);
765 EXPORT_SYMBOL_GPL(gnttab_batch_map
);
767 void gnttab_batch_copy(struct gnttab_copy
*batch
, unsigned count
)
769 struct gnttab_copy
*op
;
771 if (HYPERVISOR_grant_table_op(GNTTABOP_copy
, batch
, count
))
773 for (op
= batch
; op
< batch
+ count
; op
++)
774 if (op
->status
== GNTST_eagain
)
775 gnttab_retry_eagain_gop(GNTTABOP_copy
, op
,
776 &op
->status
, __func__
);
778 EXPORT_SYMBOL_GPL(gnttab_batch_copy
);
780 int gnttab_map_refs(struct gnttab_map_grant_ref
*map_ops
,
781 struct gnttab_map_grant_ref
*kmap_ops
,
782 struct page
**pages
, unsigned int count
)
786 ret
= HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref
, map_ops
, count
);
790 for (i
= 0; i
< count
; i
++) {
791 /* Retry eagain maps */
792 if (map_ops
[i
].status
== GNTST_eagain
)
793 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref
, map_ops
+ i
,
794 &map_ops
[i
].status
, __func__
);
796 if (map_ops
[i
].status
== GNTST_okay
) {
797 struct xen_page_foreign
*foreign
;
799 SetPageForeign(pages
[i
]);
800 foreign
= xen_page_foreign(pages
[i
]);
801 foreign
->domid
= map_ops
[i
].dom
;
802 foreign
->gref
= map_ops
[i
].ref
;
806 return set_foreign_p2m_mapping(map_ops
, kmap_ops
, pages
, count
);
808 EXPORT_SYMBOL_GPL(gnttab_map_refs
);
810 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref
*unmap_ops
,
811 struct gnttab_unmap_grant_ref
*kunmap_ops
,
812 struct page
**pages
, unsigned int count
)
817 ret
= HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref
, unmap_ops
, count
);
821 for (i
= 0; i
< count
; i
++)
822 ClearPageForeign(pages
[i
]);
824 return clear_foreign_p2m_mapping(unmap_ops
, kunmap_ops
, pages
, count
);
826 EXPORT_SYMBOL_GPL(gnttab_unmap_refs
);
828 #define GNTTAB_UNMAP_REFS_DELAY 5
830 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data
* item
);
832 static void gnttab_unmap_work(struct work_struct
*work
)
834 struct gntab_unmap_queue_data
835 *unmap_data
= container_of(work
,
836 struct gntab_unmap_queue_data
,
838 if (unmap_data
->age
!= UINT_MAX
)
840 __gnttab_unmap_refs_async(unmap_data
);
843 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data
* item
)
848 for (pc
= 0; pc
< item
->count
; pc
++) {
849 if (page_count(item
->pages
[pc
]) > 1) {
850 unsigned long delay
= GNTTAB_UNMAP_REFS_DELAY
* (item
->age
+ 1);
851 schedule_delayed_work(&item
->gnttab_work
,
852 msecs_to_jiffies(delay
));
857 ret
= gnttab_unmap_refs(item
->unmap_ops
, item
->kunmap_ops
,
858 item
->pages
, item
->count
);
859 item
->done(ret
, item
);
862 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data
* item
)
864 INIT_DELAYED_WORK(&item
->gnttab_work
, gnttab_unmap_work
);
867 __gnttab_unmap_refs_async(item
);
869 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async
);
871 static void unmap_refs_callback(int result
,
872 struct gntab_unmap_queue_data
*data
)
874 struct unmap_refs_callback_data
*d
= data
->data
;
877 complete(&d
->completion
);
880 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data
*item
)
882 struct unmap_refs_callback_data data
;
884 init_completion(&data
.completion
);
886 item
->done
= &unmap_refs_callback
;
887 gnttab_unmap_refs_async(item
);
888 wait_for_completion(&data
.completion
);
892 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync
);
894 static int gnttab_map_frames_v1(xen_pfn_t
*frames
, unsigned int nr_gframes
)
898 rc
= arch_gnttab_map_shared(frames
, nr_gframes
,
899 gnttab_max_grant_frames(),
900 &gnttab_shared
.addr
);
906 static void gnttab_unmap_frames_v1(void)
908 arch_gnttab_unmap(gnttab_shared
.addr
, nr_grant_frames
);
911 static int gnttab_map(unsigned int start_idx
, unsigned int end_idx
)
913 struct gnttab_setup_table setup
;
915 unsigned int nr_gframes
= end_idx
+ 1;
918 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
919 struct xen_add_to_physmap xatp
;
920 unsigned int i
= end_idx
;
922 BUG_ON(xen_auto_xlat_grant_frames
.count
< nr_gframes
);
924 * Loop backwards, so that the first hypercall has the largest
925 * index, ensuring that the table will grow only once.
928 xatp
.domid
= DOMID_SELF
;
930 xatp
.space
= XENMAPSPACE_grant_table
;
931 xatp
.gpfn
= xen_auto_xlat_grant_frames
.pfn
[i
];
932 rc
= HYPERVISOR_memory_op(XENMEM_add_to_physmap
, &xatp
);
934 pr_warn("grant table add_to_physmap failed, err=%d\n",
938 } while (i
-- > start_idx
);
943 /* No need for kzalloc as it is initialized in following hypercall
944 * GNTTABOP_setup_table.
946 frames
= kmalloc(nr_gframes
* sizeof(unsigned long), GFP_ATOMIC
);
950 setup
.dom
= DOMID_SELF
;
951 setup
.nr_frames
= nr_gframes
;
952 set_xen_guest_handle(setup
.frame_list
, frames
);
954 rc
= HYPERVISOR_grant_table_op(GNTTABOP_setup_table
, &setup
, 1);
960 BUG_ON(rc
|| setup
.status
);
962 rc
= gnttab_interface
->map_frames(frames
, nr_gframes
);
969 static struct gnttab_ops gnttab_v1_ops
= {
970 .map_frames
= gnttab_map_frames_v1
,
971 .unmap_frames
= gnttab_unmap_frames_v1
,
972 .update_entry
= gnttab_update_entry_v1
,
973 .end_foreign_access_ref
= gnttab_end_foreign_access_ref_v1
,
974 .end_foreign_transfer_ref
= gnttab_end_foreign_transfer_ref_v1
,
975 .query_foreign_access
= gnttab_query_foreign_access_v1
,
978 static void gnttab_request_version(void)
980 /* Only version 1 is used, which will always be available. */
981 grant_table_version
= 1;
982 grefs_per_grant_frame
= PAGE_SIZE
/ sizeof(struct grant_entry_v1
);
983 gnttab_interface
= &gnttab_v1_ops
;
985 pr_info("Grant tables using version %d layout\n", grant_table_version
);
988 static int gnttab_setup(void)
990 unsigned int max_nr_gframes
;
992 max_nr_gframes
= gnttab_max_grant_frames();
993 if (max_nr_gframes
< nr_grant_frames
)
996 if (xen_feature(XENFEAT_auto_translated_physmap
) && gnttab_shared
.addr
== NULL
) {
997 gnttab_shared
.addr
= xen_auto_xlat_grant_frames
.vaddr
;
998 if (gnttab_shared
.addr
== NULL
) {
999 pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
1000 (unsigned long)xen_auto_xlat_grant_frames
.vaddr
);
1004 return gnttab_map(0, nr_grant_frames
- 1);
1007 int gnttab_resume(void)
1009 gnttab_request_version();
1010 return gnttab_setup();
1013 int gnttab_suspend(void)
1015 if (!xen_feature(XENFEAT_auto_translated_physmap
))
1016 gnttab_interface
->unmap_frames();
1020 static int gnttab_expand(unsigned int req_entries
)
1023 unsigned int cur
, extra
;
1025 BUG_ON(grefs_per_grant_frame
== 0);
1026 cur
= nr_grant_frames
;
1027 extra
= ((req_entries
+ (grefs_per_grant_frame
-1)) /
1028 grefs_per_grant_frame
);
1029 if (cur
+ extra
> gnttab_max_grant_frames())
1032 rc
= gnttab_map(cur
, cur
+ extra
- 1);
1034 rc
= grow_gnttab_list(extra
);
1039 int gnttab_init(void)
1042 unsigned long max_nr_grant_frames
;
1043 unsigned int max_nr_glist_frames
, nr_glist_frames
;
1044 unsigned int nr_init_grefs
;
1047 gnttab_request_version();
1048 max_nr_grant_frames
= gnttab_max_grant_frames();
1049 nr_grant_frames
= 1;
1051 /* Determine the maximum number of frames required for the
1052 * grant reference free list on the current hypervisor.
1054 BUG_ON(grefs_per_grant_frame
== 0);
1055 max_nr_glist_frames
= (max_nr_grant_frames
*
1056 grefs_per_grant_frame
/ RPP
);
1058 gnttab_list
= kmalloc(max_nr_glist_frames
* sizeof(grant_ref_t
*),
1060 if (gnttab_list
== NULL
)
1063 nr_glist_frames
= (nr_grant_frames
* grefs_per_grant_frame
+ RPP
- 1) / RPP
;
1064 for (i
= 0; i
< nr_glist_frames
; i
++) {
1065 gnttab_list
[i
] = (grant_ref_t
*)__get_free_page(GFP_KERNEL
);
1066 if (gnttab_list
[i
] == NULL
) {
1072 ret
= arch_gnttab_init(max_nr_grant_frames
);
1076 if (gnttab_setup() < 0) {
1081 nr_init_grefs
= nr_grant_frames
* grefs_per_grant_frame
;
1083 for (i
= NR_RESERVED_ENTRIES
; i
< nr_init_grefs
- 1; i
++)
1084 gnttab_entry(i
) = i
+ 1;
1086 gnttab_entry(nr_init_grefs
- 1) = GNTTAB_LIST_END
;
1087 gnttab_free_count
= nr_init_grefs
- NR_RESERVED_ENTRIES
;
1088 gnttab_free_head
= NR_RESERVED_ENTRIES
;
1090 printk("Grant table initialized\n");
1094 for (i
--; i
>= 0; i
--)
1095 free_page((unsigned long)gnttab_list
[i
]);
1099 EXPORT_SYMBOL_GPL(gnttab_init
);
1101 static int __gnttab_init(void)
1103 /* Delay grant-table initialization in the PV on HVM case */
1104 if (xen_hvm_domain())
1107 if (!xen_pv_domain())
1110 return gnttab_init();
1112 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1113 * beforehand to initialize xen_auto_xlat_grant_frames. */
1114 core_initcall_sync(__gnttab_init
);