1 /*P:300 The I/O mechanism in lguest is simple yet flexible, allowing the Guest
2 * to talk to the Launcher or directly to another Guest. It uses familiar
3 * concepts of DMA and interrupts, plus some neat code stolen from
6 /* Copyright (C) 2006 Rusty Russell IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <linux/types.h>
23 #include <linux/futex.h>
24 #include <linux/jhash.h>
26 #include <linux/highmem.h>
27 #include <linux/uaccess.h>
33 * Getting data in and out of the Guest is quite an art. There are numerous
34 * ways to do it, and they all suck differently. We try to keep things fairly
35 * close to "real" hardware so our Guest's drivers don't look like an alien
36 * visitation in the middle of the Linux code, and yet make sure that Guests
37 * can talk directly to other Guests, not just the Launcher.
39 * To do this, the Guest gives us a key when it binds or sends DMA buffers.
40 * The key corresponds to a "physical" address inside the Guest (ie. a virtual
41 * address inside the Launcher process). We don't, however, use this key
44 * We want Guests which share memory to be able to DMA to each other: two
45 * Launchers can mmap memory the same file, then the Guests can communicate.
46 * Fortunately, the futex code provides us with a way to get a "union
47 * futex_key" corresponding to the memory lying at a virtual address: if the
48 * two processes share memory, the "union futex_key" for that memory will match
49 * even if the memory is mapped at different addresses in each. So we always
50 * convert the keys to "union futex_key"s to compare them.
52 * Before we dive into this though, we need to look at another set of helper
53 * routines used throughout the Host kernel code to access Guest memory.
55 static struct list_head dma_hash
[61];
57 /* An unfortunate side effect of the Linux double-linked list implementation is
58 * that there's no good way to statically initialize an array of linked
60 void lguest_io_init(void)
64 for (i
= 0; i
< ARRAY_SIZE(dma_hash
); i
++)
65 INIT_LIST_HEAD(&dma_hash
[i
]);
68 /* FIXME: allow multi-page lengths. */
69 static int check_dma_list(struct lguest
*lg
, const struct lguest_dma
*dma
)
73 for (i
= 0; i
< LGUEST_MAX_DMA_SECTIONS
; i
++) {
76 if (!lguest_address_ok(lg
, dma
->addr
[i
], dma
->len
[i
]))
78 if (dma
->len
[i
] > PAGE_SIZE
)
80 /* We could do over a page, but is it worth it? */
81 if ((dma
->addr
[i
] % PAGE_SIZE
) + dma
->len
[i
] > PAGE_SIZE
)
87 kill_guest(lg
, "bad DMA entry: %u@%#lx", dma
->len
[i
], dma
->addr
[i
]);
91 /*L:330 This is our hash function, using the wonderful Jenkins hash.
93 * The futex key is a union with three parts: an unsigned long word, a pointer,
94 * and an int "offset". We could use jhash_2words() which takes three u32s.
95 * (Ok, the hash functions are great: the naming sucks though).
97 * It's nice to be portable to 64-bit platforms, so we use the more generic
98 * jhash2(), which takes an array of u32, the number of u32s, and an initial
99 * u32 to roll in. This is uglier, but breaks down to almost the same code on
100 * 32-bit platforms like this one.
102 * We want a position in the array, so we modulo ARRAY_SIZE(dma_hash) (ie. 61).
104 static unsigned int hash(const union futex_key
*key
)
106 return jhash2((u32
*)&key
->both
.word
,
107 (sizeof(key
->both
.word
)+sizeof(key
->both
.ptr
))/4,
109 % ARRAY_SIZE(dma_hash
);
112 /* This is a convenience routine to compare two keys. It's a much bemoaned C
113 * weakness that it doesn't allow '==' on structures or unions, so we have to
114 * open-code it like this. */
115 static inline int key_eq(const union futex_key
*a
, const union futex_key
*b
)
117 return (a
->both
.word
== b
->both
.word
118 && a
->both
.ptr
== b
->both
.ptr
119 && a
->both
.offset
== b
->both
.offset
);
122 /*L:360 OK, when we need to actually free up a Guest's DMA array we do several
123 * things, so we have a convenient function to do it.
125 * The caller must hold a read lock on dmainfo owner's current->mm->mmap_sem
126 * for the drop_futex_key_refs(). */
127 static void unlink_dma(struct lguest_dma_info
*dmainfo
)
129 /* You locked this too, right? */
130 BUG_ON(!mutex_is_locked(&lguest_lock
));
131 /* This is how we know that the entry is free. */
132 dmainfo
->interrupt
= 0;
133 /* Remove it from the hash table. */
134 list_del(&dmainfo
->list
);
135 /* Drop the references we were holding (to the inode or mm). */
136 drop_futex_key_refs(&dmainfo
->key
);
139 /*L:350 This is the routine which we call when the Guest asks to unregister a
140 * DMA array attached to a given key. Returns true if the array was found. */
141 static int unbind_dma(struct lguest
*lg
,
142 const union futex_key
*key
,
147 /* We don't bother with the hash table, just look through all this
148 * Guest's DMA arrays. */
149 for (i
= 0; i
< LGUEST_MAX_DMA
; i
++) {
150 /* In theory it could have more than one array on the same key,
151 * or one array on multiple keys, so we check both */
152 if (key_eq(key
, &lg
->dma
[i
].key
) && dmas
== lg
->dma
[i
].dmas
) {
153 unlink_dma(&lg
->dma
[i
]);
161 /*L:340 BIND_DMA: this is the hypercall which sets up an array of "struct
162 * lguest_dma" for receiving I/O.
164 * The Guest wants to bind an array of "struct lguest_dma"s to a particular key
165 * to receive input. This only happens when the Guest is setting up a new
166 * device, so it doesn't have to be very fast.
168 * It returns 1 on a successful registration (it can fail if we hit the limit
169 * of registrations for this Guest).
171 int bind_dma(struct lguest
*lg
,
172 unsigned long ukey
, unsigned long dmas
, u16 numdmas
, u8 interrupt
)
177 /* Futex code needs the mmap_sem. */
178 struct rw_semaphore
*fshared
= ¤t
->mm
->mmap_sem
;
180 /* Invalid interrupt? (We could kill the guest here). */
181 if (interrupt
>= LGUEST_IRQS
)
184 /* We need to grab the Big Lguest Lock, because other Guests may be
185 * trying to look through this Guest's DMAs to send something while
186 * we're doing this. */
187 mutex_lock(&lguest_lock
);
189 if (get_futex_key(lg
->mem_base
+ ukey
, fshared
, &key
) != 0) {
190 kill_guest(lg
, "bad dma key %#lx", ukey
);
194 /* We want to keep this key valid once we drop mmap_sem, so we have to
195 * hold a reference. */
196 get_futex_key_refs(&key
);
198 /* If the Guest specified an interrupt of 0, that means they want to
199 * unregister this array of "struct lguest_dma"s. */
201 ret
= unbind_dma(lg
, &key
, dmas
);
203 /* Look through this Guest's dma array for an unused entry. */
204 for (i
= 0; i
< LGUEST_MAX_DMA
; i
++) {
205 /* If the interrupt is non-zero, the entry is already
207 if (lg
->dma
[i
].interrupt
)
210 /* OK, a free one! Fill on our details. */
211 lg
->dma
[i
].dmas
= dmas
;
212 lg
->dma
[i
].num_dmas
= numdmas
;
213 lg
->dma
[i
].next_dma
= 0;
214 lg
->dma
[i
].key
= key
;
215 lg
->dma
[i
].owner
= lg
;
216 lg
->dma
[i
].interrupt
= interrupt
;
218 /* Now we add it to the hash table: the position
219 * depends on the futex key that we got. */
220 list_add(&lg
->dma
[i
].list
, &dma_hash
[hash(&key
)]);
226 /* If we didn't find a slot to put the key in, drop the reference
228 drop_futex_key_refs(&key
);
230 /* Unlock and out. */
232 mutex_unlock(&lguest_lock
);
236 /*L:385 Note that our routines to access a different Guest's memory are called
237 * lgread_other() and lgwrite_other(): these names emphasize that they are only
238 * used when the Guest is *not* the current Guest.
240 * The interface for copying from another process's memory is called
241 * access_process_vm(), with a final argument of 0 for a read, and 1 for a
244 * We need lgread_other() to read the destination Guest's "struct lguest_dma"
246 static int lgread_other(struct lguest
*lg
,
247 void *buf
, u32 addr
, unsigned bytes
)
249 if (!lguest_address_ok(lg
, addr
, bytes
)
250 || access_process_vm(lg
->tsk
, (unsigned long)lg
->mem_base
+ addr
,
251 buf
, bytes
, 0) != bytes
) {
252 memset(buf
, 0, bytes
);
253 kill_guest(lg
, "bad address in registered DMA struct");
259 /* "lgwrite()" to another Guest: used to update the destination "used_len" once
260 * we've transferred data into the buffer. */
261 static int lgwrite_other(struct lguest
*lg
, u32 addr
,
262 const void *buf
, unsigned bytes
)
264 if (!lguest_address_ok(lg
, addr
, bytes
)
265 || access_process_vm(lg
->tsk
, (unsigned long)lg
->mem_base
+ addr
,
266 (void *)buf
, bytes
, 1) != bytes
) {
267 kill_guest(lg
, "bad address writing to registered DMA");
273 /*L:400 This is the generic engine which copies from a source "struct
274 * lguest_dma" from this Guest into another Guest's "struct lguest_dma". The
275 * destination Guest's pages have already been mapped, as contained in the
278 * If you're wondering if there's a nice "copy from one process to another"
279 * routine, so was I. But Linux isn't really set up to copy between two
280 * unrelated processes, so we have to write it ourselves.
282 static u32
copy_data(struct lguest
*srclg
,
283 const struct lguest_dma
*src
,
284 const struct lguest_dma
*dst
,
285 struct page
*pages
[])
287 unsigned int totlen
, si
, di
, srcoff
, dstoff
;
290 /* We return the total length transferred. */
293 /* We keep indexes into the source and destination "struct lguest_dma",
294 * and an offset within each region. */
298 /* We loop until the source or destination is exhausted. */
299 while (si
< LGUEST_MAX_DMA_SECTIONS
&& src
->len
[si
]
300 && di
< LGUEST_MAX_DMA_SECTIONS
&& dst
->len
[di
]) {
301 /* We can only transfer the rest of the src buffer, or as much
302 * as will fit into the destination buffer. */
303 u32 len
= min(src
->len
[si
] - srcoff
, dst
->len
[di
] - dstoff
);
305 /* For systems using "highmem" we need to use kmap() to access
306 * the page we want. We often use the same page over and over,
307 * so rather than kmap() it on every loop, we set the maddr
308 * pointer to NULL when we need to move to the next
309 * destination page. */
311 maddr
= kmap(pages
[di
]);
313 /* Copy directly from (this Guest's) source address to the
314 * destination Guest's kmap()ed buffer. Note that maddr points
315 * to the start of the page: we need to add the offset of the
316 * destination address and offset within the buffer. */
318 /* FIXME: This is not completely portable. I looked at
319 * copy_to_user_page(), and some arch's seem to need special
320 * flushes. x86 is fine. */
321 if (copy_from_user(maddr
+ (dst
->addr
[di
] + dstoff
)%PAGE_SIZE
,
322 srclg
->mem_base
+src
->addr
[si
], len
) != 0) {
323 /* If a copy failed, it's the source's fault. */
324 kill_guest(srclg
, "bad address in sending DMA");
329 /* Increment the total and src & dst offsets */
334 /* Presumably we reached the end of the src or dest buffers: */
335 if (srcoff
== src
->len
[si
]) {
336 /* Move to the next buffer at offset 0 */
340 if (dstoff
== dst
->len
[di
]) {
341 /* We need to unmap that destination page and reset
342 * maddr ready for the next one. */
350 /* If we still had a page mapped at the end, unmap now. */
357 /*L:390 This is how we transfer a "struct lguest_dma" from the source Guest
358 * (the current Guest which called SEND_DMA) to another Guest. */
359 static u32
do_dma(struct lguest
*srclg
, const struct lguest_dma
*src
,
360 struct lguest
*dstlg
, const struct lguest_dma
*dst
)
364 struct page
*pages
[LGUEST_MAX_DMA_SECTIONS
];
366 /* We check that both source and destination "struct lguest_dma"s are
367 * within the bounds of the source and destination Guests */
368 if (!check_dma_list(dstlg
, dst
) || !check_dma_list(srclg
, src
))
371 /* We need to map the pages which correspond to each parts of
372 * destination buffer. */
373 for (i
= 0; i
< LGUEST_MAX_DMA_SECTIONS
; i
++) {
374 if (dst
->len
[i
] == 0)
376 /* get_user_pages() is a complicated function, especially since
377 * we only want a single page. But it works, and returns the
378 * number of pages. Note that we're holding the destination's
379 * mmap_sem, as get_user_pages() requires. */
380 if (get_user_pages(dstlg
->tsk
, dstlg
->mm
,
381 (unsigned long)dstlg
->mem_base
+dst
->addr
[i
],
382 1, 1, 1, pages
+i
, NULL
)
384 /* This means the destination gave us a bogus buffer */
385 kill_guest(dstlg
, "Error mapping DMA pages");
391 /* Now copy the data until we run out of src or dst. */
392 ret
= copy_data(srclg
, src
, dst
, pages
);
400 /*L:380 Transferring data from one Guest to another is not as simple as I'd
401 * like. We've found the "struct lguest_dma_info" bound to the same address as
402 * the send, we need to copy into it.
404 * This function returns true if the destination array was empty. */
405 static int dma_transfer(struct lguest
*srclg
,
407 struct lguest_dma_info
*dst
)
409 struct lguest_dma dst_dma
, src_dma
;
410 struct lguest
*dstlg
;
413 /* From the "struct lguest_dma_info" we found in the hash, grab the
416 /* Read in the source "struct lguest_dma" handed to SEND_DMA. */
417 lgread(srclg
, &src_dma
, udma
, sizeof(src_dma
));
419 /* We need the destination's mmap_sem, and we already hold the source's
420 * mmap_sem for the futex key lookup. Normally this would suggest that
421 * we could deadlock if the destination Guest was trying to send to
422 * this source Guest at the same time, which is another reason that all
423 * I/O is done under the big lguest_lock. */
424 down_read(&dstlg
->mm
->mmap_sem
);
426 /* Look through the destination DMA array for an available buffer. */
427 for (i
= 0; i
< dst
->num_dmas
; i
++) {
428 /* We keep a "next_dma" pointer which often helps us avoid
429 * looking at lots of previously-filled entries. */
430 dma
= (dst
->next_dma
+ i
) % dst
->num_dmas
;
431 if (!lgread_other(dstlg
, &dst_dma
,
432 dst
->dmas
+ dma
* sizeof(struct lguest_dma
),
436 if (!dst_dma
.used_len
)
440 /* If we found a buffer, we do the actual data copy. */
441 if (i
!= dst
->num_dmas
) {
442 unsigned long used_lenp
;
445 ret
= do_dma(srclg
, &src_dma
, dstlg
, &dst_dma
);
446 /* Put used length in the source "struct lguest_dma"'s used_len
447 * field. It's a little tricky to figure out where that is,
450 udma
+offsetof(struct lguest_dma
, used_len
), ret
);
451 /* Tranferring 0 bytes is OK if the source buffer was empty. */
452 if (ret
== 0 && src_dma
.len
[0] != 0)
455 /* The destination Guest might be running on a different CPU:
456 * we have to make sure that it will see the "used_len" field
457 * change to non-zero *after* it sees the data we copied into
458 * the buffer. Hence a write memory barrier. */
460 /* Figuring out where the destination's used_len field for this
461 * "struct lguest_dma" in the array is also a little ugly. */
462 used_lenp
= dst
->dmas
463 + dma
* sizeof(struct lguest_dma
)
464 + offsetof(struct lguest_dma
, used_len
);
465 lgwrite_other(dstlg
, used_lenp
, &ret
, sizeof(ret
));
466 /* Move the cursor for next time. */
469 up_read(&dstlg
->mm
->mmap_sem
);
471 /* We trigger the destination interrupt, even if the destination was
472 * empty and we didn't transfer anything: this gives them a chance to
473 * wake up and refill. */
474 set_bit(dst
->interrupt
, dstlg
->irqs_pending
);
475 /* Wake up the destination process. */
476 wake_up_process(dstlg
->tsk
);
477 /* If we passed the last "struct lguest_dma", the receive had no
479 return i
== dst
->num_dmas
;
482 up_read(&dstlg
->mm
->mmap_sem
);
486 /*L:370 This is the counter-side to the BIND_DMA hypercall; the SEND_DMA
487 * hypercall. We find out who's listening, and send to them. */
488 void send_dma(struct lguest
*lg
, unsigned long ukey
, unsigned long udma
)
492 struct rw_semaphore
*fshared
= ¤t
->mm
->mmap_sem
;
495 mutex_lock(&lguest_lock
);
497 /* Get the futex key for the key the Guest gave us */
498 if (get_futex_key(lg
->mem_base
+ ukey
, fshared
, &key
) != 0) {
499 kill_guest(lg
, "bad sending DMA key");
502 /* Since the key must be a multiple of 4, the futex key uses the lower
503 * bit of the "offset" field (which would always be 0) to indicate a
504 * mapping which is shared with other processes (ie. Guests). */
505 if (key
.shared
.offset
& 1) {
506 struct lguest_dma_info
*i
;
507 /* Look through the hash for other Guests. */
508 list_for_each_entry(i
, &dma_hash
[hash(&key
)], list
) {
509 /* Don't send to ourselves (would deadlock). */
510 if (i
->owner
->mm
== lg
->mm
)
512 if (!key_eq(&key
, &i
->key
))
515 /* If dma_transfer() tells us the destination has no
516 * available buffers, we increment "empty". */
517 empty
+= dma_transfer(lg
, udma
, i
);
520 /* If the destination is empty, we release our locks and
521 * give the destination Guest a brief chance to restock. */
523 /* Give any recipients one chance to restock. */
524 up_read(¤t
->mm
->mmap_sem
);
525 mutex_unlock(&lguest_lock
);
526 /* Next time, we won't try again. */
531 /* Private mapping: Guest is sending to its Launcher. We set
532 * the "dma_is_pending" flag so that the main loop will exit
533 * and the Launcher's read() from /dev/lguest will return. */
534 lg
->dma_is_pending
= 1;
535 lg
->pending_dma
= udma
;
536 lg
->pending_key
= ukey
;
540 mutex_unlock(&lguest_lock
);
544 void release_all_dma(struct lguest
*lg
)
548 BUG_ON(!mutex_is_locked(&lguest_lock
));
550 down_read(&lg
->mm
->mmap_sem
);
551 for (i
= 0; i
< LGUEST_MAX_DMA
; i
++) {
552 if (lg
->dma
[i
].interrupt
)
553 unlink_dma(&lg
->dma
[i
]);
555 up_read(&lg
->mm
->mmap_sem
);
558 /*M:007 We only return a single DMA buffer to the Launcher, but it would be
559 * more efficient to return a pointer to the entire array of DMA buffers, which
560 * it can cache and choose one whenever it wants.
562 * Currently the Launcher uses a write to /dev/lguest, and the return value is
563 * the address of the DMA structure with the interrupt number placed in
564 * dma->used_len. If we wanted to return the entire array, we need to return
565 * the address, array size and interrupt number: this seems to require an
568 /*L:320 This routine looks for a DMA buffer registered by the Guest on the
569 * given key (using the BIND_DMA hypercall). */
570 unsigned long get_dma_buffer(struct lguest
*lg
,
571 unsigned long ukey
, unsigned long *interrupt
)
573 unsigned long ret
= 0;
575 struct lguest_dma_info
*i
;
576 struct rw_semaphore
*fshared
= ¤t
->mm
->mmap_sem
;
578 /* Take the Big Lguest Lock to stop other Guests sending this Guest DMA
579 * at the same time. */
580 mutex_lock(&lguest_lock
);
581 /* To match between Guests sharing the same underlying memory we steal
582 * code from the futex infrastructure. This requires that we hold the
583 * "mmap_sem" for our process (the Launcher), and pass it to the futex
587 /* This can fail if it's not a valid address, or if the address is not
588 * divisible by 4 (the futex code needs that, we don't really). */
589 if (get_futex_key(lg
->mem_base
+ ukey
, fshared
, &key
) != 0) {
590 kill_guest(lg
, "bad registered DMA buffer");
593 /* Search the hash table for matching entries (the Launcher can only
594 * send to its own Guest for the moment, so the entry must be for this
596 list_for_each_entry(i
, &dma_hash
[hash(&key
)], list
) {
597 if (key_eq(&key
, &i
->key
) && i
->owner
== lg
) {
599 /* Look through the registered DMA array for an
600 * available buffer. */
601 for (j
= 0; j
< i
->num_dmas
; j
++) {
602 struct lguest_dma dma
;
604 ret
= i
->dmas
+ j
* sizeof(struct lguest_dma
);
605 lgread(lg
, &dma
, ret
, sizeof(dma
));
606 if (dma
.used_len
== 0)
609 /* Store the interrupt the Guest wants when the buffer
611 *interrupt
= i
->interrupt
;
617 mutex_unlock(&lguest_lock
);
622 /*L:410 This really has completed the Launcher. Not only have we now finished
623 * the longest chapter in our journey, but this also means we are over halfway
626 * Enough prevaricating around the bush: it is time for us to dive into the
627 * core of the Host, in "make Host".