2 * access.h - access guest memory
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #ifndef __KVM_S390_GACCESS_H
14 #define __KVM_S390_GACCESS_H
16 #include <linux/compiler.h>
17 #include <linux/kvm_host.h>
18 #include <asm/uaccess.h>
21 static inline void __user
*__guestaddr_to_user(struct kvm_vcpu
*vcpu
,
22 unsigned long guestaddr
)
24 unsigned long prefix
= vcpu
->arch
.sie_block
->prefix
;
26 if (guestaddr
< 2 * PAGE_SIZE
)
28 else if ((guestaddr
>= prefix
) && (guestaddr
< prefix
+ 2 * PAGE_SIZE
))
31 return (void __user
*) gmap_fault(guestaddr
, vcpu
->arch
.gmap
);
34 static inline int get_guest_u64(struct kvm_vcpu
*vcpu
, unsigned long guestaddr
,
37 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
39 BUG_ON(guestaddr
& 7);
41 if (IS_ERR((void __force
*) uptr
))
42 return PTR_ERR((void __force
*) uptr
);
44 return get_user(*result
, (unsigned long __user
*) uptr
);
47 static inline int get_guest_u32(struct kvm_vcpu
*vcpu
, unsigned long guestaddr
,
50 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
52 BUG_ON(guestaddr
& 3);
54 if (IS_ERR((void __force
*) uptr
))
55 return PTR_ERR((void __force
*) uptr
);
57 return get_user(*result
, (u32 __user
*) uptr
);
60 static inline int get_guest_u16(struct kvm_vcpu
*vcpu
, unsigned long guestaddr
,
63 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
65 BUG_ON(guestaddr
& 1);
70 return get_user(*result
, (u16 __user
*) uptr
);
73 static inline int get_guest_u8(struct kvm_vcpu
*vcpu
, unsigned long guestaddr
,
76 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
78 if (IS_ERR((void __force
*) uptr
))
79 return PTR_ERR((void __force
*) uptr
);
81 return get_user(*result
, (u8 __user
*) uptr
);
84 static inline int put_guest_u64(struct kvm_vcpu
*vcpu
, unsigned long guestaddr
,
87 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
89 BUG_ON(guestaddr
& 7);
91 if (IS_ERR((void __force
*) uptr
))
92 return PTR_ERR((void __force
*) uptr
);
94 return put_user(value
, (u64 __user
*) uptr
);
97 static inline int put_guest_u32(struct kvm_vcpu
*vcpu
, unsigned long guestaddr
,
100 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
102 BUG_ON(guestaddr
& 3);
104 if (IS_ERR((void __force
*) uptr
))
105 return PTR_ERR((void __force
*) uptr
);
107 return put_user(value
, (u32 __user
*) uptr
);
110 static inline int put_guest_u16(struct kvm_vcpu
*vcpu
, unsigned long guestaddr
,
113 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
115 BUG_ON(guestaddr
& 1);
117 if (IS_ERR((void __force
*) uptr
))
118 return PTR_ERR((void __force
*) uptr
);
120 return put_user(value
, (u16 __user
*) uptr
);
123 static inline int put_guest_u8(struct kvm_vcpu
*vcpu
, unsigned long guestaddr
,
126 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
128 if (IS_ERR((void __force
*) uptr
))
129 return PTR_ERR((void __force
*) uptr
);
131 return put_user(value
, (u8 __user
*) uptr
);
135 static inline int __copy_to_guest_slow(struct kvm_vcpu
*vcpu
,
136 unsigned long guestdest
,
137 void *from
, unsigned long n
)
143 for (i
= 0; i
< n
; i
++) {
144 rc
= put_guest_u8(vcpu
, guestdest
++, *(data
++));
151 static inline int __copy_to_guest_fast(struct kvm_vcpu
*vcpu
,
152 unsigned long guestdest
,
153 void *from
, unsigned long n
)
159 if (guestdest
+ n
< guestdest
)
162 /* simple case: all within one segment table entry? */
163 if ((guestdest
& PMD_MASK
) == ((guestdest
+n
) & PMD_MASK
)) {
164 uptr
= (void __user
*) gmap_fault(guestdest
, vcpu
->arch
.gmap
);
166 if (IS_ERR((void __force
*) uptr
))
167 return PTR_ERR((void __force
*) uptr
);
169 r
= copy_to_user(uptr
, from
, n
);
177 /* copy first segment */
178 uptr
= (void __user
*)gmap_fault(guestdest
, vcpu
->arch
.gmap
);
180 if (IS_ERR((void __force
*) uptr
))
181 return PTR_ERR((void __force
*) uptr
);
183 size
= PMD_SIZE
- (guestdest
& ~PMD_MASK
);
185 r
= copy_to_user(uptr
, from
, size
);
195 /* copy full segments */
196 while (n
>= PMD_SIZE
) {
197 uptr
= (void __user
*)gmap_fault(guestdest
, vcpu
->arch
.gmap
);
199 if (IS_ERR((void __force
*) uptr
))
200 return PTR_ERR((void __force
*) uptr
);
202 r
= copy_to_user(uptr
, from
, PMD_SIZE
);
210 guestdest
+= PMD_SIZE
;
213 /* copy the tail segment */
215 uptr
= (void __user
*)gmap_fault(guestdest
, vcpu
->arch
.gmap
);
217 if (IS_ERR((void __force
*) uptr
))
218 return PTR_ERR((void __force
*) uptr
);
220 r
= copy_to_user(uptr
, from
, n
);
229 static inline int copy_to_guest_absolute(struct kvm_vcpu
*vcpu
,
230 unsigned long guestdest
,
231 void *from
, unsigned long n
)
233 return __copy_to_guest_fast(vcpu
, guestdest
, from
, n
);
236 static inline int copy_to_guest(struct kvm_vcpu
*vcpu
, unsigned long guestdest
,
237 void *from
, unsigned long n
)
239 unsigned long prefix
= vcpu
->arch
.sie_block
->prefix
;
241 if ((guestdest
< 2 * PAGE_SIZE
) && (guestdest
+ n
> 2 * PAGE_SIZE
))
244 if ((guestdest
< prefix
) && (guestdest
+ n
> prefix
))
247 if ((guestdest
< prefix
+ 2 * PAGE_SIZE
)
248 && (guestdest
+ n
> prefix
+ 2 * PAGE_SIZE
))
251 if (guestdest
< 2 * PAGE_SIZE
)
253 else if ((guestdest
>= prefix
) && (guestdest
< prefix
+ 2 * PAGE_SIZE
))
256 return __copy_to_guest_fast(vcpu
, guestdest
, from
, n
);
258 return __copy_to_guest_slow(vcpu
, guestdest
, from
, n
);
261 static inline int __copy_from_guest_slow(struct kvm_vcpu
*vcpu
, void *to
,
262 unsigned long guestsrc
,
269 for (i
= 0; i
< n
; i
++) {
270 rc
= get_guest_u8(vcpu
, guestsrc
++, data
++);
277 static inline int __copy_from_guest_fast(struct kvm_vcpu
*vcpu
, void *to
,
278 unsigned long guestsrc
,
285 if (guestsrc
+ n
< guestsrc
)
288 /* simple case: all within one segment table entry? */
289 if ((guestsrc
& PMD_MASK
) == ((guestsrc
+n
) & PMD_MASK
)) {
290 uptr
= (void __user
*) gmap_fault(guestsrc
, vcpu
->arch
.gmap
);
292 if (IS_ERR((void __force
*) uptr
))
293 return PTR_ERR((void __force
*) uptr
);
295 r
= copy_from_user(to
, uptr
, n
);
303 /* copy first segment */
304 uptr
= (void __user
*)gmap_fault(guestsrc
, vcpu
->arch
.gmap
);
306 if (IS_ERR((void __force
*) uptr
))
307 return PTR_ERR((void __force
*) uptr
);
309 size
= PMD_SIZE
- (guestsrc
& ~PMD_MASK
);
311 r
= copy_from_user(to
, uptr
, size
);
321 /* copy full segments */
322 while (n
>= PMD_SIZE
) {
323 uptr
= (void __user
*)gmap_fault(guestsrc
, vcpu
->arch
.gmap
);
325 if (IS_ERR((void __force
*) uptr
))
326 return PTR_ERR((void __force
*) uptr
);
328 r
= copy_from_user(to
, uptr
, PMD_SIZE
);
336 guestsrc
+= PMD_SIZE
;
339 /* copy the tail segment */
341 uptr
= (void __user
*)gmap_fault(guestsrc
, vcpu
->arch
.gmap
);
343 if (IS_ERR((void __force
*) uptr
))
344 return PTR_ERR((void __force
*) uptr
);
346 r
= copy_from_user(to
, uptr
, n
);
355 static inline int copy_from_guest_absolute(struct kvm_vcpu
*vcpu
, void *to
,
356 unsigned long guestsrc
,
359 return __copy_from_guest_fast(vcpu
, to
, guestsrc
, n
);
362 static inline int copy_from_guest(struct kvm_vcpu
*vcpu
, void *to
,
363 unsigned long guestsrc
, unsigned long n
)
365 unsigned long prefix
= vcpu
->arch
.sie_block
->prefix
;
367 if ((guestsrc
< 2 * PAGE_SIZE
) && (guestsrc
+ n
> 2 * PAGE_SIZE
))
370 if ((guestsrc
< prefix
) && (guestsrc
+ n
> prefix
))
373 if ((guestsrc
< prefix
+ 2 * PAGE_SIZE
)
374 && (guestsrc
+ n
> prefix
+ 2 * PAGE_SIZE
))
377 if (guestsrc
< 2 * PAGE_SIZE
)
379 else if ((guestsrc
>= prefix
) && (guestsrc
< prefix
+ 2 * PAGE_SIZE
))
382 return __copy_from_guest_fast(vcpu
, to
, guestsrc
, n
);
384 return __copy_from_guest_slow(vcpu
, to
, guestsrc
, n
);