PRCM: 34XX: Fix wrong shift value used in dpll4_m4x2_ck enable bit
[linux-ginger.git] / arch / s390 / kvm / gaccess.h
blob4e0633c413f3841dc1132c39d06dc5645b57f66d
1 /*
2 * gaccess.h - access guest memory
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #ifndef __KVM_S390_GACCESS_H
14 #define __KVM_S390_GACCESS_H
16 #include <linux/compiler.h>
17 #include <linux/kvm_host.h>
18 #include <asm/uaccess.h>
20 static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
21 u64 guestaddr)
23 u64 prefix = vcpu->arch.sie_block->prefix;
24 u64 origin = vcpu->kvm->arch.guest_origin;
25 u64 memsize = vcpu->kvm->arch.guest_memsize;
27 if (guestaddr < 2 * PAGE_SIZE)
28 guestaddr += prefix;
29 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
30 guestaddr -= prefix;
32 if (guestaddr > memsize)
33 return (void __user __force *) ERR_PTR(-EFAULT);
35 guestaddr += origin;
37 return (void __user *) guestaddr;
40 static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
41 u64 *result)
43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
45 BUG_ON(guestaddr & 7);
47 if (IS_ERR((void __force *) uptr))
48 return PTR_ERR((void __force *) uptr);
50 return get_user(*result, (u64 __user *) uptr);
53 static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
54 u32 *result)
56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
58 BUG_ON(guestaddr & 3);
60 if (IS_ERR((void __force *) uptr))
61 return PTR_ERR((void __force *) uptr);
63 return get_user(*result, (u32 __user *) uptr);
66 static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
67 u16 *result)
69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
71 BUG_ON(guestaddr & 1);
73 if (IS_ERR(uptr))
74 return PTR_ERR(uptr);
76 return get_user(*result, (u16 __user *) uptr);
79 static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
80 u8 *result)
82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
84 if (IS_ERR((void __force *) uptr))
85 return PTR_ERR((void __force *) uptr);
87 return get_user(*result, (u8 __user *) uptr);
90 static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
91 u64 value)
93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
95 BUG_ON(guestaddr & 7);
97 if (IS_ERR((void __force *) uptr))
98 return PTR_ERR((void __force *) uptr);
100 return put_user(value, (u64 __user *) uptr);
103 static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
104 u32 value)
106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
108 BUG_ON(guestaddr & 3);
110 if (IS_ERR((void __force *) uptr))
111 return PTR_ERR((void __force *) uptr);
113 return put_user(value, (u32 __user *) uptr);
116 static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
117 u16 value)
119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
121 BUG_ON(guestaddr & 1);
123 if (IS_ERR((void __force *) uptr))
124 return PTR_ERR((void __force *) uptr);
126 return put_user(value, (u16 __user *) uptr);
129 static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
130 u8 value)
132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
134 if (IS_ERR((void __force *) uptr))
135 return PTR_ERR((void __force *) uptr);
137 return put_user(value, (u8 __user *) uptr);
141 static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
142 const void *from, unsigned long n)
144 int rc;
145 unsigned long i;
146 const u8 *data = from;
148 for (i = 0; i < n; i++) {
149 rc = put_guest_u8(vcpu, guestdest++, *(data++));
150 if (rc < 0)
151 return rc;
153 return 0;
156 static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
157 const void *from, unsigned long n)
159 u64 prefix = vcpu->arch.sie_block->prefix;
160 u64 origin = vcpu->kvm->arch.guest_origin;
161 u64 memsize = vcpu->kvm->arch.guest_memsize;
163 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
164 goto slowpath;
166 if ((guestdest < prefix) && (guestdest + n > prefix))
167 goto slowpath;
169 if ((guestdest < prefix + 2 * PAGE_SIZE)
170 && (guestdest + n > prefix + 2 * PAGE_SIZE))
171 goto slowpath;
173 if (guestdest < 2 * PAGE_SIZE)
174 guestdest += prefix;
175 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
176 guestdest -= prefix;
178 if (guestdest + n > memsize)
179 return -EFAULT;
181 if (guestdest + n < guestdest)
182 return -EFAULT;
184 guestdest += origin;
186 return copy_to_user((void __user *) guestdest, from, n);
187 slowpath:
188 return __copy_to_guest_slow(vcpu, guestdest, from, n);
191 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
192 u64 guestsrc, unsigned long n)
194 int rc;
195 unsigned long i;
196 u8 *data = to;
198 for (i = 0; i < n; i++) {
199 rc = get_guest_u8(vcpu, guestsrc++, data++);
200 if (rc < 0)
201 return rc;
203 return 0;
206 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
207 u64 guestsrc, unsigned long n)
209 u64 prefix = vcpu->arch.sie_block->prefix;
210 u64 origin = vcpu->kvm->arch.guest_origin;
211 u64 memsize = vcpu->kvm->arch.guest_memsize;
213 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
214 goto slowpath;
216 if ((guestsrc < prefix) && (guestsrc + n > prefix))
217 goto slowpath;
219 if ((guestsrc < prefix + 2 * PAGE_SIZE)
220 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
221 goto slowpath;
223 if (guestsrc < 2 * PAGE_SIZE)
224 guestsrc += prefix;
225 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
226 guestsrc -= prefix;
228 if (guestsrc + n > memsize)
229 return -EFAULT;
231 if (guestsrc + n < guestsrc)
232 return -EFAULT;
234 guestsrc += origin;
236 return copy_from_user(to, (void __user *) guestsrc, n);
237 slowpath:
238 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
241 static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
242 const void *from, unsigned long n)
244 u64 origin = vcpu->kvm->arch.guest_origin;
245 u64 memsize = vcpu->kvm->arch.guest_memsize;
247 if (guestdest + n > memsize)
248 return -EFAULT;
250 if (guestdest + n < guestdest)
251 return -EFAULT;
253 guestdest += origin;
255 return copy_to_user((void __user *) guestdest, from, n);
258 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
259 u64 guestsrc, unsigned long n)
261 u64 origin = vcpu->kvm->arch.guest_origin;
262 u64 memsize = vcpu->kvm->arch.guest_memsize;
264 if (guestsrc + n > memsize)
265 return -EFAULT;
267 if (guestsrc + n < guestsrc)
268 return -EFAULT;
270 guestsrc += origin;
272 return copy_from_user(to, (void __user *) guestsrc, n);
274 #endif