Avoid beyond bounds copy while caching ACL
[zen-stable.git] / arch / s390 / kvm / gaccess.h
blobc86f6ae43f7612ea63e6f626565f30eee7db67f3
1 /*
2 * access.h - access guest memory
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #ifndef __KVM_S390_GACCESS_H
14 #define __KVM_S390_GACCESS_H
16 #include <linux/compiler.h>
17 #include <linux/kvm_host.h>
18 #include <asm/uaccess.h>
19 #include "kvm-s390.h"
21 static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
22 unsigned long guestaddr)
24 unsigned long prefix = vcpu->arch.sie_block->prefix;
26 if (guestaddr < 2 * PAGE_SIZE)
27 guestaddr += prefix;
28 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
29 guestaddr -= prefix;
31 return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
34 static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
35 u64 *result)
37 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
39 BUG_ON(guestaddr & 7);
41 if (IS_ERR((void __force *) uptr))
42 return PTR_ERR((void __force *) uptr);
44 return get_user(*result, (unsigned long __user *) uptr);
47 static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
48 u32 *result)
50 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
52 BUG_ON(guestaddr & 3);
54 if (IS_ERR((void __force *) uptr))
55 return PTR_ERR((void __force *) uptr);
57 return get_user(*result, (u32 __user *) uptr);
60 static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
61 u16 *result)
63 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
65 BUG_ON(guestaddr & 1);
67 if (IS_ERR(uptr))
68 return PTR_ERR(uptr);
70 return get_user(*result, (u16 __user *) uptr);
73 static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
74 u8 *result)
76 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
78 if (IS_ERR((void __force *) uptr))
79 return PTR_ERR((void __force *) uptr);
81 return get_user(*result, (u8 __user *) uptr);
84 static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
85 u64 value)
87 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
89 BUG_ON(guestaddr & 7);
91 if (IS_ERR((void __force *) uptr))
92 return PTR_ERR((void __force *) uptr);
94 return put_user(value, (u64 __user *) uptr);
97 static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
98 u32 value)
100 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
102 BUG_ON(guestaddr & 3);
104 if (IS_ERR((void __force *) uptr))
105 return PTR_ERR((void __force *) uptr);
107 return put_user(value, (u32 __user *) uptr);
110 static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
111 u16 value)
113 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
115 BUG_ON(guestaddr & 1);
117 if (IS_ERR((void __force *) uptr))
118 return PTR_ERR((void __force *) uptr);
120 return put_user(value, (u16 __user *) uptr);
123 static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
124 u8 value)
126 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
128 if (IS_ERR((void __force *) uptr))
129 return PTR_ERR((void __force *) uptr);
131 return put_user(value, (u8 __user *) uptr);
135 static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
136 unsigned long guestdest,
137 void *from, unsigned long n)
139 int rc;
140 unsigned long i;
141 u8 *data = from;
143 for (i = 0; i < n; i++) {
144 rc = put_guest_u8(vcpu, guestdest++, *(data++));
145 if (rc < 0)
146 return rc;
148 return 0;
151 static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
152 unsigned long guestdest,
153 void *from, unsigned long n)
155 int r;
156 void __user *uptr;
157 unsigned long size;
159 if (guestdest + n < guestdest)
160 return -EFAULT;
162 /* simple case: all within one segment table entry? */
163 if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
164 uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
166 if (IS_ERR((void __force *) uptr))
167 return PTR_ERR((void __force *) uptr);
169 r = copy_to_user(uptr, from, n);
171 if (r)
172 r = -EFAULT;
174 goto out;
177 /* copy first segment */
178 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
180 if (IS_ERR((void __force *) uptr))
181 return PTR_ERR((void __force *) uptr);
183 size = PMD_SIZE - (guestdest & ~PMD_MASK);
185 r = copy_to_user(uptr, from, size);
187 if (r) {
188 r = -EFAULT;
189 goto out;
191 from += size;
192 n -= size;
193 guestdest += size;
195 /* copy full segments */
196 while (n >= PMD_SIZE) {
197 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
199 if (IS_ERR((void __force *) uptr))
200 return PTR_ERR((void __force *) uptr);
202 r = copy_to_user(uptr, from, PMD_SIZE);
204 if (r) {
205 r = -EFAULT;
206 goto out;
208 from += PMD_SIZE;
209 n -= PMD_SIZE;
210 guestdest += PMD_SIZE;
213 /* copy the tail segment */
214 if (n) {
215 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
217 if (IS_ERR((void __force *) uptr))
218 return PTR_ERR((void __force *) uptr);
220 r = copy_to_user(uptr, from, n);
222 if (r)
223 r = -EFAULT;
225 out:
226 return r;
229 static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
230 unsigned long guestdest,
231 void *from, unsigned long n)
233 return __copy_to_guest_fast(vcpu, guestdest, from, n);
236 static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
237 void *from, unsigned long n)
239 unsigned long prefix = vcpu->arch.sie_block->prefix;
241 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
242 goto slowpath;
244 if ((guestdest < prefix) && (guestdest + n > prefix))
245 goto slowpath;
247 if ((guestdest < prefix + 2 * PAGE_SIZE)
248 && (guestdest + n > prefix + 2 * PAGE_SIZE))
249 goto slowpath;
251 if (guestdest < 2 * PAGE_SIZE)
252 guestdest += prefix;
253 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
254 guestdest -= prefix;
256 return __copy_to_guest_fast(vcpu, guestdest, from, n);
257 slowpath:
258 return __copy_to_guest_slow(vcpu, guestdest, from, n);
261 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
262 unsigned long guestsrc,
263 unsigned long n)
265 int rc;
266 unsigned long i;
267 u8 *data = to;
269 for (i = 0; i < n; i++) {
270 rc = get_guest_u8(vcpu, guestsrc++, data++);
271 if (rc < 0)
272 return rc;
274 return 0;
277 static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
278 unsigned long guestsrc,
279 unsigned long n)
281 int r;
282 void __user *uptr;
283 unsigned long size;
285 if (guestsrc + n < guestsrc)
286 return -EFAULT;
288 /* simple case: all within one segment table entry? */
289 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
290 uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
292 if (IS_ERR((void __force *) uptr))
293 return PTR_ERR((void __force *) uptr);
295 r = copy_from_user(to, uptr, n);
297 if (r)
298 r = -EFAULT;
300 goto out;
303 /* copy first segment */
304 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
306 if (IS_ERR((void __force *) uptr))
307 return PTR_ERR((void __force *) uptr);
309 size = PMD_SIZE - (guestsrc & ~PMD_MASK);
311 r = copy_from_user(to, uptr, size);
313 if (r) {
314 r = -EFAULT;
315 goto out;
317 to += size;
318 n -= size;
319 guestsrc += size;
321 /* copy full segments */
322 while (n >= PMD_SIZE) {
323 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
325 if (IS_ERR((void __force *) uptr))
326 return PTR_ERR((void __force *) uptr);
328 r = copy_from_user(to, uptr, PMD_SIZE);
330 if (r) {
331 r = -EFAULT;
332 goto out;
334 to += PMD_SIZE;
335 n -= PMD_SIZE;
336 guestsrc += PMD_SIZE;
339 /* copy the tail segment */
340 if (n) {
341 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
343 if (IS_ERR((void __force *) uptr))
344 return PTR_ERR((void __force *) uptr);
346 r = copy_from_user(to, uptr, n);
348 if (r)
349 r = -EFAULT;
351 out:
352 return r;
355 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
356 unsigned long guestsrc,
357 unsigned long n)
359 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
362 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
363 unsigned long guestsrc, unsigned long n)
365 unsigned long prefix = vcpu->arch.sie_block->prefix;
367 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
368 goto slowpath;
370 if ((guestsrc < prefix) && (guestsrc + n > prefix))
371 goto slowpath;
373 if ((guestsrc < prefix + 2 * PAGE_SIZE)
374 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
375 goto slowpath;
377 if (guestsrc < 2 * PAGE_SIZE)
378 guestsrc += prefix;
379 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
380 guestsrc -= prefix;
382 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
383 slowpath:
384 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
386 #endif