2 * gaccess.h - access guest memory
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #ifndef __KVM_S390_GACCESS_H
14 #define __KVM_S390_GACCESS_H
16 #include <linux/compiler.h>
17 #include <linux/kvm_host.h>
18 #include <asm/uaccess.h>
20 static inline void __user
*__guestaddr_to_user(struct kvm_vcpu
*vcpu
,
23 u64 prefix
= vcpu
->arch
.sie_block
->prefix
;
24 u64 origin
= vcpu
->kvm
->arch
.guest_origin
;
25 u64 memsize
= vcpu
->kvm
->arch
.guest_memsize
;
27 if (guestaddr
< 2 * PAGE_SIZE
)
29 else if ((guestaddr
>= prefix
) && (guestaddr
< prefix
+ 2 * PAGE_SIZE
))
32 if (guestaddr
> memsize
)
33 return (void __user __force
*) ERR_PTR(-EFAULT
);
37 return (void __user
*) guestaddr
;
40 static inline int get_guest_u64(struct kvm_vcpu
*vcpu
, u64 guestaddr
,
43 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
45 BUG_ON(guestaddr
& 7);
47 if (IS_ERR((void __force
*) uptr
))
48 return PTR_ERR((void __force
*) uptr
);
50 return get_user(*result
, (u64 __user
*) uptr
);
53 static inline int get_guest_u32(struct kvm_vcpu
*vcpu
, u64 guestaddr
,
56 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
58 BUG_ON(guestaddr
& 3);
60 if (IS_ERR((void __force
*) uptr
))
61 return PTR_ERR((void __force
*) uptr
);
63 return get_user(*result
, (u32 __user
*) uptr
);
66 static inline int get_guest_u16(struct kvm_vcpu
*vcpu
, u64 guestaddr
,
69 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
71 BUG_ON(guestaddr
& 1);
76 return get_user(*result
, (u16 __user
*) uptr
);
79 static inline int get_guest_u8(struct kvm_vcpu
*vcpu
, u64 guestaddr
,
82 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
84 if (IS_ERR((void __force
*) uptr
))
85 return PTR_ERR((void __force
*) uptr
);
87 return get_user(*result
, (u8 __user
*) uptr
);
90 static inline int put_guest_u64(struct kvm_vcpu
*vcpu
, u64 guestaddr
,
93 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
95 BUG_ON(guestaddr
& 7);
97 if (IS_ERR((void __force
*) uptr
))
98 return PTR_ERR((void __force
*) uptr
);
100 return put_user(value
, (u64 __user
*) uptr
);
103 static inline int put_guest_u32(struct kvm_vcpu
*vcpu
, u64 guestaddr
,
106 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
108 BUG_ON(guestaddr
& 3);
110 if (IS_ERR((void __force
*) uptr
))
111 return PTR_ERR((void __force
*) uptr
);
113 return put_user(value
, (u32 __user
*) uptr
);
116 static inline int put_guest_u16(struct kvm_vcpu
*vcpu
, u64 guestaddr
,
119 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
121 BUG_ON(guestaddr
& 1);
123 if (IS_ERR((void __force
*) uptr
))
124 return PTR_ERR((void __force
*) uptr
);
126 return put_user(value
, (u16 __user
*) uptr
);
129 static inline int put_guest_u8(struct kvm_vcpu
*vcpu
, u64 guestaddr
,
132 void __user
*uptr
= __guestaddr_to_user(vcpu
, guestaddr
);
134 if (IS_ERR((void __force
*) uptr
))
135 return PTR_ERR((void __force
*) uptr
);
137 return put_user(value
, (u8 __user
*) uptr
);
141 static inline int __copy_to_guest_slow(struct kvm_vcpu
*vcpu
, u64 guestdest
,
142 const void *from
, unsigned long n
)
146 const u8
*data
= from
;
148 for (i
= 0; i
< n
; i
++) {
149 rc
= put_guest_u8(vcpu
, guestdest
++, *(data
++));
156 static inline int copy_to_guest(struct kvm_vcpu
*vcpu
, u64 guestdest
,
157 const void *from
, unsigned long n
)
159 u64 prefix
= vcpu
->arch
.sie_block
->prefix
;
160 u64 origin
= vcpu
->kvm
->arch
.guest_origin
;
161 u64 memsize
= vcpu
->kvm
->arch
.guest_memsize
;
163 if ((guestdest
< 2 * PAGE_SIZE
) && (guestdest
+ n
> 2 * PAGE_SIZE
))
166 if ((guestdest
< prefix
) && (guestdest
+ n
> prefix
))
169 if ((guestdest
< prefix
+ 2 * PAGE_SIZE
)
170 && (guestdest
+ n
> prefix
+ 2 * PAGE_SIZE
))
173 if (guestdest
< 2 * PAGE_SIZE
)
175 else if ((guestdest
>= prefix
) && (guestdest
< prefix
+ 2 * PAGE_SIZE
))
178 if (guestdest
+ n
> memsize
)
181 if (guestdest
+ n
< guestdest
)
186 return copy_to_user((void __user
*) guestdest
, from
, n
);
188 return __copy_to_guest_slow(vcpu
, guestdest
, from
, n
);
191 static inline int __copy_from_guest_slow(struct kvm_vcpu
*vcpu
, void *to
,
192 u64 guestsrc
, unsigned long n
)
198 for (i
= 0; i
< n
; i
++) {
199 rc
= get_guest_u8(vcpu
, guestsrc
++, data
++);
206 static inline int copy_from_guest(struct kvm_vcpu
*vcpu
, void *to
,
207 u64 guestsrc
, unsigned long n
)
209 u64 prefix
= vcpu
->arch
.sie_block
->prefix
;
210 u64 origin
= vcpu
->kvm
->arch
.guest_origin
;
211 u64 memsize
= vcpu
->kvm
->arch
.guest_memsize
;
213 if ((guestsrc
< 2 * PAGE_SIZE
) && (guestsrc
+ n
> 2 * PAGE_SIZE
))
216 if ((guestsrc
< prefix
) && (guestsrc
+ n
> prefix
))
219 if ((guestsrc
< prefix
+ 2 * PAGE_SIZE
)
220 && (guestsrc
+ n
> prefix
+ 2 * PAGE_SIZE
))
223 if (guestsrc
< 2 * PAGE_SIZE
)
225 else if ((guestsrc
>= prefix
) && (guestsrc
< prefix
+ 2 * PAGE_SIZE
))
228 if (guestsrc
+ n
> memsize
)
231 if (guestsrc
+ n
< guestsrc
)
236 return copy_from_user(to
, (void __user
*) guestsrc
, n
);
238 return __copy_from_guest_slow(vcpu
, to
, guestsrc
, n
);
241 static inline int copy_to_guest_absolute(struct kvm_vcpu
*vcpu
, u64 guestdest
,
242 const void *from
, unsigned long n
)
244 u64 origin
= vcpu
->kvm
->arch
.guest_origin
;
245 u64 memsize
= vcpu
->kvm
->arch
.guest_memsize
;
247 if (guestdest
+ n
> memsize
)
250 if (guestdest
+ n
< guestdest
)
255 return copy_to_user((void __user
*) guestdest
, from
, n
);
258 static inline int copy_from_guest_absolute(struct kvm_vcpu
*vcpu
, void *to
,
259 u64 guestsrc
, unsigned long n
)
261 u64 origin
= vcpu
->kvm
->arch
.guest_origin
;
262 u64 memsize
= vcpu
->kvm
->arch
.guest_memsize
;
264 if (guestsrc
+ n
> memsize
)
267 if (guestsrc
+ n
< guestsrc
)
272 return copy_from_user(to
, (void __user
*) guestsrc
, n
);