2 * linux/kernel/compat.c
4 * Kernel compatibililty routines for e.g. 32 bit syscall support
7 * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/linkage.h>
15 #include <linux/compat.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */
20 #include <linux/syscalls.h>
21 #include <linux/unistd.h>
22 #include <linux/security.h>
23 #include <linux/timex.h>
24 #include <linux/export.h>
25 #include <linux/migrate.h>
26 #include <linux/posix-timers.h>
27 #include <linux/times.h>
28 #include <linux/ptrace.h>
29 #include <linux/gfp.h>
31 #include <asm/uaccess.h>
34 * Note that the native side is already converted to a timespec, because
35 * that's what we want anyway.
37 static int compat_get_timeval(struct timespec
*o
,
38 struct compat_timeval __user
*i
)
42 if (get_user(o
->tv_sec
, &i
->tv_sec
) ||
43 get_user(usec
, &i
->tv_usec
))
45 o
->tv_nsec
= usec
* 1000;
49 static int compat_put_timeval(struct compat_timeval __user
*o
,
52 return (put_user(i
->tv_sec
, &o
->tv_sec
) ||
53 put_user(i
->tv_usec
, &o
->tv_usec
)) ? -EFAULT
: 0;
56 static int compat_get_timex(struct timex
*txc
, struct compat_timex __user
*utp
)
58 memset(txc
, 0, sizeof(struct timex
));
60 if (!access_ok(VERIFY_READ
, utp
, sizeof(struct compat_timex
)) ||
61 __get_user(txc
->modes
, &utp
->modes
) ||
62 __get_user(txc
->offset
, &utp
->offset
) ||
63 __get_user(txc
->freq
, &utp
->freq
) ||
64 __get_user(txc
->maxerror
, &utp
->maxerror
) ||
65 __get_user(txc
->esterror
, &utp
->esterror
) ||
66 __get_user(txc
->status
, &utp
->status
) ||
67 __get_user(txc
->constant
, &utp
->constant
) ||
68 __get_user(txc
->precision
, &utp
->precision
) ||
69 __get_user(txc
->tolerance
, &utp
->tolerance
) ||
70 __get_user(txc
->time
.tv_sec
, &utp
->time
.tv_sec
) ||
71 __get_user(txc
->time
.tv_usec
, &utp
->time
.tv_usec
) ||
72 __get_user(txc
->tick
, &utp
->tick
) ||
73 __get_user(txc
->ppsfreq
, &utp
->ppsfreq
) ||
74 __get_user(txc
->jitter
, &utp
->jitter
) ||
75 __get_user(txc
->shift
, &utp
->shift
) ||
76 __get_user(txc
->stabil
, &utp
->stabil
) ||
77 __get_user(txc
->jitcnt
, &utp
->jitcnt
) ||
78 __get_user(txc
->calcnt
, &utp
->calcnt
) ||
79 __get_user(txc
->errcnt
, &utp
->errcnt
) ||
80 __get_user(txc
->stbcnt
, &utp
->stbcnt
))
86 static int compat_put_timex(struct compat_timex __user
*utp
, struct timex
*txc
)
88 if (!access_ok(VERIFY_WRITE
, utp
, sizeof(struct compat_timex
)) ||
89 __put_user(txc
->modes
, &utp
->modes
) ||
90 __put_user(txc
->offset
, &utp
->offset
) ||
91 __put_user(txc
->freq
, &utp
->freq
) ||
92 __put_user(txc
->maxerror
, &utp
->maxerror
) ||
93 __put_user(txc
->esterror
, &utp
->esterror
) ||
94 __put_user(txc
->status
, &utp
->status
) ||
95 __put_user(txc
->constant
, &utp
->constant
) ||
96 __put_user(txc
->precision
, &utp
->precision
) ||
97 __put_user(txc
->tolerance
, &utp
->tolerance
) ||
98 __put_user(txc
->time
.tv_sec
, &utp
->time
.tv_sec
) ||
99 __put_user(txc
->time
.tv_usec
, &utp
->time
.tv_usec
) ||
100 __put_user(txc
->tick
, &utp
->tick
) ||
101 __put_user(txc
->ppsfreq
, &utp
->ppsfreq
) ||
102 __put_user(txc
->jitter
, &utp
->jitter
) ||
103 __put_user(txc
->shift
, &utp
->shift
) ||
104 __put_user(txc
->stabil
, &utp
->stabil
) ||
105 __put_user(txc
->jitcnt
, &utp
->jitcnt
) ||
106 __put_user(txc
->calcnt
, &utp
->calcnt
) ||
107 __put_user(txc
->errcnt
, &utp
->errcnt
) ||
108 __put_user(txc
->stbcnt
, &utp
->stbcnt
) ||
109 __put_user(txc
->tai
, &utp
->tai
))
114 asmlinkage
long compat_sys_gettimeofday(struct compat_timeval __user
*tv
,
115 struct timezone __user
*tz
)
119 do_gettimeofday(&ktv
);
120 if (compat_put_timeval(tv
, &ktv
))
124 if (copy_to_user(tz
, &sys_tz
, sizeof(sys_tz
)))
131 asmlinkage
long compat_sys_settimeofday(struct compat_timeval __user
*tv
,
132 struct timezone __user
*tz
)
138 if (compat_get_timeval(&kts
, tv
))
142 if (copy_from_user(&ktz
, tz
, sizeof(ktz
)))
146 return do_sys_settimeofday(tv
? &kts
: NULL
, tz
? &ktz
: NULL
);
149 int get_compat_timespec(struct timespec
*ts
, const struct compat_timespec __user
*cts
)
151 return (!access_ok(VERIFY_READ
, cts
, sizeof(*cts
)) ||
152 __get_user(ts
->tv_sec
, &cts
->tv_sec
) ||
153 __get_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
156 int put_compat_timespec(const struct timespec
*ts
, struct compat_timespec __user
*cts
)
158 return (!access_ok(VERIFY_WRITE
, cts
, sizeof(*cts
)) ||
159 __put_user(ts
->tv_sec
, &cts
->tv_sec
) ||
160 __put_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
162 EXPORT_SYMBOL_GPL(put_compat_timespec
);
164 static long compat_nanosleep_restart(struct restart_block
*restart
)
166 struct compat_timespec __user
*rmtp
;
171 restart
->nanosleep
.rmtp
= (struct timespec __user
*) &rmt
;
174 ret
= hrtimer_nanosleep_restart(restart
);
178 rmtp
= restart
->nanosleep
.compat_rmtp
;
180 if (rmtp
&& put_compat_timespec(&rmt
, rmtp
))
187 asmlinkage
long compat_sys_nanosleep(struct compat_timespec __user
*rqtp
,
188 struct compat_timespec __user
*rmtp
)
190 struct timespec tu
, rmt
;
194 if (get_compat_timespec(&tu
, rqtp
))
197 if (!timespec_valid(&tu
))
202 ret
= hrtimer_nanosleep(&tu
,
203 rmtp
? (struct timespec __user
*)&rmt
: NULL
,
204 HRTIMER_MODE_REL
, CLOCK_MONOTONIC
);
208 struct restart_block
*restart
209 = ¤t_thread_info()->restart_block
;
211 restart
->fn
= compat_nanosleep_restart
;
212 restart
->nanosleep
.compat_rmtp
= rmtp
;
214 if (rmtp
&& put_compat_timespec(&rmt
, rmtp
))
221 static inline long get_compat_itimerval(struct itimerval
*o
,
222 struct compat_itimerval __user
*i
)
224 return (!access_ok(VERIFY_READ
, i
, sizeof(*i
)) ||
225 (__get_user(o
->it_interval
.tv_sec
, &i
->it_interval
.tv_sec
) |
226 __get_user(o
->it_interval
.tv_usec
, &i
->it_interval
.tv_usec
) |
227 __get_user(o
->it_value
.tv_sec
, &i
->it_value
.tv_sec
) |
228 __get_user(o
->it_value
.tv_usec
, &i
->it_value
.tv_usec
)));
231 static inline long put_compat_itimerval(struct compat_itimerval __user
*o
,
234 return (!access_ok(VERIFY_WRITE
, o
, sizeof(*o
)) ||
235 (__put_user(i
->it_interval
.tv_sec
, &o
->it_interval
.tv_sec
) |
236 __put_user(i
->it_interval
.tv_usec
, &o
->it_interval
.tv_usec
) |
237 __put_user(i
->it_value
.tv_sec
, &o
->it_value
.tv_sec
) |
238 __put_user(i
->it_value
.tv_usec
, &o
->it_value
.tv_usec
)));
241 asmlinkage
long compat_sys_getitimer(int which
,
242 struct compat_itimerval __user
*it
)
244 struct itimerval kit
;
247 error
= do_getitimer(which
, &kit
);
248 if (!error
&& put_compat_itimerval(it
, &kit
))
253 asmlinkage
long compat_sys_setitimer(int which
,
254 struct compat_itimerval __user
*in
,
255 struct compat_itimerval __user
*out
)
257 struct itimerval kin
, kout
;
261 if (get_compat_itimerval(&kin
, in
))
264 memset(&kin
, 0, sizeof(kin
));
266 error
= do_setitimer(which
, &kin
, out
? &kout
: NULL
);
269 if (put_compat_itimerval(out
, &kout
))
274 static compat_clock_t
clock_t_to_compat_clock_t(clock_t x
)
276 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x
));
279 asmlinkage
long compat_sys_times(struct compat_tms __user
*tbuf
)
283 struct compat_tms tmp
;
286 /* Convert our struct tms to the compat version. */
287 tmp
.tms_utime
= clock_t_to_compat_clock_t(tms
.tms_utime
);
288 tmp
.tms_stime
= clock_t_to_compat_clock_t(tms
.tms_stime
);
289 tmp
.tms_cutime
= clock_t_to_compat_clock_t(tms
.tms_cutime
);
290 tmp
.tms_cstime
= clock_t_to_compat_clock_t(tms
.tms_cstime
);
291 if (copy_to_user(tbuf
, &tmp
, sizeof(tmp
)))
294 force_successful_syscall_return();
295 return compat_jiffies_to_clock_t(jiffies
);
298 #ifdef __ARCH_WANT_SYS_SIGPENDING
301 * Assumption: old_sigset_t and compat_old_sigset_t are both
302 * types that can be passed to put_user()/get_user().
305 asmlinkage
long compat_sys_sigpending(compat_old_sigset_t __user
*set
)
309 mm_segment_t old_fs
= get_fs();
312 ret
= sys_sigpending((old_sigset_t __user
*) &s
);
315 ret
= put_user(s
, set
);
321 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
324 * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the
325 * blocked set of signals to the supplied signal set
327 static inline void compat_sig_setmask(sigset_t
*blocked
, compat_sigset_word set
)
329 memcpy(blocked
->sig
, &set
, sizeof(set
));
332 asmlinkage
long compat_sys_sigprocmask(int how
,
333 compat_old_sigset_t __user
*nset
,
334 compat_old_sigset_t __user
*oset
)
336 old_sigset_t old_set
, new_set
;
337 sigset_t new_blocked
;
339 old_set
= current
->blocked
.sig
[0];
342 if (get_user(new_set
, nset
))
344 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
346 new_blocked
= current
->blocked
;
350 sigaddsetmask(&new_blocked
, new_set
);
353 sigdelsetmask(&new_blocked
, new_set
);
356 compat_sig_setmask(&new_blocked
, new_set
);
362 set_current_blocked(&new_blocked
);
366 if (put_user(old_set
, oset
))
375 asmlinkage
long compat_sys_setrlimit(unsigned int resource
,
376 struct compat_rlimit __user
*rlim
)
380 if (!access_ok(VERIFY_READ
, rlim
, sizeof(*rlim
)) ||
381 __get_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
382 __get_user(r
.rlim_max
, &rlim
->rlim_max
))
385 if (r
.rlim_cur
== COMPAT_RLIM_INFINITY
)
386 r
.rlim_cur
= RLIM_INFINITY
;
387 if (r
.rlim_max
== COMPAT_RLIM_INFINITY
)
388 r
.rlim_max
= RLIM_INFINITY
;
389 return do_prlimit(current
, resource
, &r
, NULL
);
392 #ifdef COMPAT_RLIM_OLD_INFINITY
394 asmlinkage
long compat_sys_old_getrlimit(unsigned int resource
,
395 struct compat_rlimit __user
*rlim
)
399 mm_segment_t old_fs
= get_fs();
402 ret
= sys_old_getrlimit(resource
, &r
);
406 if (r
.rlim_cur
> COMPAT_RLIM_OLD_INFINITY
)
407 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
408 if (r
.rlim_max
> COMPAT_RLIM_OLD_INFINITY
)
409 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
411 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
412 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
413 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
421 asmlinkage
long compat_sys_getrlimit(unsigned int resource
,
422 struct compat_rlimit __user
*rlim
)
427 ret
= do_prlimit(current
, resource
, NULL
, &r
);
429 if (r
.rlim_cur
> COMPAT_RLIM_INFINITY
)
430 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
431 if (r
.rlim_max
> COMPAT_RLIM_INFINITY
)
432 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
434 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
435 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
436 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
442 int put_compat_rusage(const struct rusage
*r
, struct compat_rusage __user
*ru
)
444 if (!access_ok(VERIFY_WRITE
, ru
, sizeof(*ru
)) ||
445 __put_user(r
->ru_utime
.tv_sec
, &ru
->ru_utime
.tv_sec
) ||
446 __put_user(r
->ru_utime
.tv_usec
, &ru
->ru_utime
.tv_usec
) ||
447 __put_user(r
->ru_stime
.tv_sec
, &ru
->ru_stime
.tv_sec
) ||
448 __put_user(r
->ru_stime
.tv_usec
, &ru
->ru_stime
.tv_usec
) ||
449 __put_user(r
->ru_maxrss
, &ru
->ru_maxrss
) ||
450 __put_user(r
->ru_ixrss
, &ru
->ru_ixrss
) ||
451 __put_user(r
->ru_idrss
, &ru
->ru_idrss
) ||
452 __put_user(r
->ru_isrss
, &ru
->ru_isrss
) ||
453 __put_user(r
->ru_minflt
, &ru
->ru_minflt
) ||
454 __put_user(r
->ru_majflt
, &ru
->ru_majflt
) ||
455 __put_user(r
->ru_nswap
, &ru
->ru_nswap
) ||
456 __put_user(r
->ru_inblock
, &ru
->ru_inblock
) ||
457 __put_user(r
->ru_oublock
, &ru
->ru_oublock
) ||
458 __put_user(r
->ru_msgsnd
, &ru
->ru_msgsnd
) ||
459 __put_user(r
->ru_msgrcv
, &ru
->ru_msgrcv
) ||
460 __put_user(r
->ru_nsignals
, &ru
->ru_nsignals
) ||
461 __put_user(r
->ru_nvcsw
, &ru
->ru_nvcsw
) ||
462 __put_user(r
->ru_nivcsw
, &ru
->ru_nivcsw
))
467 asmlinkage
long compat_sys_getrusage(int who
, struct compat_rusage __user
*ru
)
471 mm_segment_t old_fs
= get_fs();
474 ret
= sys_getrusage(who
, (struct rusage __user
*) &r
);
480 if (put_compat_rusage(&r
, ru
))
487 compat_sys_wait4(compat_pid_t pid
, compat_uint_t __user
*stat_addr
, int options
,
488 struct compat_rusage __user
*ru
)
491 return sys_wait4(pid
, stat_addr
, options
, NULL
);
496 mm_segment_t old_fs
= get_fs();
501 (unsigned int __user
*) &status
: NULL
),
502 options
, (struct rusage __user
*) &r
);
506 if (put_compat_rusage(&r
, ru
))
508 if (stat_addr
&& put_user(status
, stat_addr
))
515 asmlinkage
long compat_sys_waitid(int which
, compat_pid_t pid
,
516 struct compat_siginfo __user
*uinfo
, int options
,
517 struct compat_rusage __user
*uru
)
522 mm_segment_t old_fs
= get_fs();
524 memset(&info
, 0, sizeof(info
));
527 ret
= sys_waitid(which
, pid
, (siginfo_t __user
*)&info
, options
,
528 uru
? (struct rusage __user
*)&ru
: NULL
);
531 if ((ret
< 0) || (info
.si_signo
== 0))
535 ret
= put_compat_rusage(&ru
, uru
);
540 BUG_ON(info
.si_code
& __SI_MASK
);
541 info
.si_code
|= __SI_CHLD
;
542 return copy_siginfo_to_user32(uinfo
, &info
);
545 static int compat_get_user_cpu_mask(compat_ulong_t __user
*user_mask_ptr
,
546 unsigned len
, struct cpumask
*new_mask
)
550 if (len
< cpumask_size())
551 memset(new_mask
, 0, cpumask_size());
552 else if (len
> cpumask_size())
553 len
= cpumask_size();
555 k
= cpumask_bits(new_mask
);
556 return compat_get_bitmap(k
, user_mask_ptr
, len
* 8);
559 asmlinkage
long compat_sys_sched_setaffinity(compat_pid_t pid
,
561 compat_ulong_t __user
*user_mask_ptr
)
563 cpumask_var_t new_mask
;
566 if (!alloc_cpumask_var(&new_mask
, GFP_KERNEL
))
569 retval
= compat_get_user_cpu_mask(user_mask_ptr
, len
, new_mask
);
573 retval
= sched_setaffinity(pid
, new_mask
);
575 free_cpumask_var(new_mask
);
579 asmlinkage
long compat_sys_sched_getaffinity(compat_pid_t pid
, unsigned int len
,
580 compat_ulong_t __user
*user_mask_ptr
)
585 if ((len
* BITS_PER_BYTE
) < nr_cpu_ids
)
587 if (len
& (sizeof(compat_ulong_t
)-1))
590 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
593 ret
= sched_getaffinity(pid
, mask
);
595 size_t retlen
= min_t(size_t, len
, cpumask_size());
597 if (compat_put_bitmap(user_mask_ptr
, cpumask_bits(mask
), retlen
* 8))
602 free_cpumask_var(mask
);
607 int get_compat_itimerspec(struct itimerspec
*dst
,
608 const struct compat_itimerspec __user
*src
)
610 if (get_compat_timespec(&dst
->it_interval
, &src
->it_interval
) ||
611 get_compat_timespec(&dst
->it_value
, &src
->it_value
))
616 int put_compat_itimerspec(struct compat_itimerspec __user
*dst
,
617 const struct itimerspec
*src
)
619 if (put_compat_timespec(&src
->it_interval
, &dst
->it_interval
) ||
620 put_compat_timespec(&src
->it_value
, &dst
->it_value
))
625 long compat_sys_timer_create(clockid_t which_clock
,
626 struct compat_sigevent __user
*timer_event_spec
,
627 timer_t __user
*created_timer_id
)
629 struct sigevent __user
*event
= NULL
;
631 if (timer_event_spec
) {
632 struct sigevent kevent
;
634 event
= compat_alloc_user_space(sizeof(*event
));
635 if (get_compat_sigevent(&kevent
, timer_event_spec
) ||
636 copy_to_user(event
, &kevent
, sizeof(*event
)))
640 return sys_timer_create(which_clock
, event
, created_timer_id
);
643 long compat_sys_timer_settime(timer_t timer_id
, int flags
,
644 struct compat_itimerspec __user
*new,
645 struct compat_itimerspec __user
*old
)
649 struct itimerspec newts
, oldts
;
653 if (get_compat_itimerspec(&newts
, new))
657 err
= sys_timer_settime(timer_id
, flags
,
658 (struct itimerspec __user
*) &newts
,
659 (struct itimerspec __user
*) &oldts
);
661 if (!err
&& old
&& put_compat_itimerspec(old
, &oldts
))
666 long compat_sys_timer_gettime(timer_t timer_id
,
667 struct compat_itimerspec __user
*setting
)
671 struct itimerspec ts
;
675 err
= sys_timer_gettime(timer_id
,
676 (struct itimerspec __user
*) &ts
);
678 if (!err
&& put_compat_itimerspec(setting
, &ts
))
683 long compat_sys_clock_settime(clockid_t which_clock
,
684 struct compat_timespec __user
*tp
)
690 if (get_compat_timespec(&ts
, tp
))
694 err
= sys_clock_settime(which_clock
,
695 (struct timespec __user
*) &ts
);
700 long compat_sys_clock_gettime(clockid_t which_clock
,
701 struct compat_timespec __user
*tp
)
709 err
= sys_clock_gettime(which_clock
,
710 (struct timespec __user
*) &ts
);
712 if (!err
&& put_compat_timespec(&ts
, tp
))
717 long compat_sys_clock_adjtime(clockid_t which_clock
,
718 struct compat_timex __user
*utp
)
724 err
= compat_get_timex(&txc
, utp
);
730 ret
= sys_clock_adjtime(which_clock
, (struct timex __user
*) &txc
);
733 err
= compat_put_timex(utp
, &txc
);
740 long compat_sys_clock_getres(clockid_t which_clock
,
741 struct compat_timespec __user
*tp
)
749 err
= sys_clock_getres(which_clock
,
750 (struct timespec __user
*) &ts
);
752 if (!err
&& tp
&& put_compat_timespec(&ts
, tp
))
757 static long compat_clock_nanosleep_restart(struct restart_block
*restart
)
762 struct compat_timespec
*rmtp
= restart
->nanosleep
.compat_rmtp
;
764 restart
->nanosleep
.rmtp
= (struct timespec __user
*) &tu
;
767 err
= clock_nanosleep_restart(restart
);
770 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
771 put_compat_timespec(&tu
, rmtp
))
774 if (err
== -ERESTART_RESTARTBLOCK
) {
775 restart
->fn
= compat_clock_nanosleep_restart
;
776 restart
->nanosleep
.compat_rmtp
= rmtp
;
781 long compat_sys_clock_nanosleep(clockid_t which_clock
, int flags
,
782 struct compat_timespec __user
*rqtp
,
783 struct compat_timespec __user
*rmtp
)
787 struct timespec in
, out
;
788 struct restart_block
*restart
;
790 if (get_compat_timespec(&in
, rqtp
))
795 err
= sys_clock_nanosleep(which_clock
, flags
,
796 (struct timespec __user
*) &in
,
797 (struct timespec __user
*) &out
);
800 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
801 put_compat_timespec(&out
, rmtp
))
804 if (err
== -ERESTART_RESTARTBLOCK
) {
805 restart
= ¤t_thread_info()->restart_block
;
806 restart
->fn
= compat_clock_nanosleep_restart
;
807 restart
->nanosleep
.compat_rmtp
= rmtp
;
813 * We currently only need the following fields from the sigevent
814 * structure: sigev_value, sigev_signo, sig_notify and (sometimes
815 * sigev_notify_thread_id). The others are handled in user mode.
816 * We also assume that copying sigev_value.sival_int is sufficient
817 * to keep all the bits of sigev_value.sival_ptr intact.
819 int get_compat_sigevent(struct sigevent
*event
,
820 const struct compat_sigevent __user
*u_event
)
822 memset(event
, 0, sizeof(*event
));
823 return (!access_ok(VERIFY_READ
, u_event
, sizeof(*u_event
)) ||
824 __get_user(event
->sigev_value
.sival_int
,
825 &u_event
->sigev_value
.sival_int
) ||
826 __get_user(event
->sigev_signo
, &u_event
->sigev_signo
) ||
827 __get_user(event
->sigev_notify
, &u_event
->sigev_notify
) ||
828 __get_user(event
->sigev_notify_thread_id
,
829 &u_event
->sigev_notify_thread_id
))
833 long compat_get_bitmap(unsigned long *mask
, const compat_ulong_t __user
*umask
,
834 unsigned long bitmap_size
)
839 unsigned long nr_compat_longs
;
841 /* align bitmap up to nearest compat_long_t boundary */
842 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
844 if (!access_ok(VERIFY_READ
, umask
, bitmap_size
/ 8))
847 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
849 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
852 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
854 * We dont want to read past the end of the userspace
855 * bitmap. We must however ensure the end of the
856 * kernel bitmap is zeroed.
858 if (nr_compat_longs
-- > 0) {
859 if (__get_user(um
, umask
))
866 m
|= (long)um
<< (j
* BITS_PER_COMPAT_LONG
);
874 long compat_put_bitmap(compat_ulong_t __user
*umask
, unsigned long *mask
,
875 unsigned long bitmap_size
)
880 unsigned long nr_compat_longs
;
882 /* align bitmap up to nearest compat_long_t boundary */
883 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
885 if (!access_ok(VERIFY_WRITE
, umask
, bitmap_size
/ 8))
888 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
890 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
893 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
897 * We dont want to write past the end of the userspace
900 if (nr_compat_longs
-- > 0) {
901 if (__put_user(um
, umask
))
915 sigset_from_compat (sigset_t
*set
, compat_sigset_t
*compat
)
917 switch (_NSIG_WORDS
) {
918 case 4: set
->sig
[3] = compat
->sig
[6] | (((long)compat
->sig
[7]) << 32 );
919 case 3: set
->sig
[2] = compat
->sig
[4] | (((long)compat
->sig
[5]) << 32 );
920 case 2: set
->sig
[1] = compat
->sig
[2] | (((long)compat
->sig
[3]) << 32 );
921 case 1: set
->sig
[0] = compat
->sig
[0] | (((long)compat
->sig
[1]) << 32 );
924 EXPORT_SYMBOL_GPL(sigset_from_compat
);
927 compat_sys_rt_sigtimedwait (compat_sigset_t __user
*uthese
,
928 struct compat_siginfo __user
*uinfo
,
929 struct compat_timespec __user
*uts
, compat_size_t sigsetsize
)
937 if (sigsetsize
!= sizeof(sigset_t
))
940 if (copy_from_user(&s32
, uthese
, sizeof(compat_sigset_t
)))
942 sigset_from_compat(&s
, &s32
);
945 if (get_compat_timespec(&t
, uts
))
949 ret
= do_sigtimedwait(&s
, &info
, uts
? &t
: NULL
);
951 if (ret
> 0 && uinfo
) {
952 if (copy_siginfo_to_user32(uinfo
, &info
))
961 compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid
, compat_pid_t pid
, int sig
,
962 struct compat_siginfo __user
*uinfo
)
966 if (copy_siginfo_from_user32(&info
, uinfo
))
968 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
971 #ifdef __ARCH_WANT_COMPAT_SYS_TIME
973 /* compat_time_t is a 32 bit "long" and needs to get converted. */
975 asmlinkage
long compat_sys_time(compat_time_t __user
* tloc
)
980 do_gettimeofday(&tv
);
984 if (put_user(i
,tloc
))
987 force_successful_syscall_return();
991 asmlinkage
long compat_sys_stime(compat_time_t __user
*tptr
)
996 if (get_user(tv
.tv_sec
, tptr
))
1001 err
= security_settime(&tv
, NULL
);
1005 do_settimeofday(&tv
);
1009 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
1011 #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
1012 asmlinkage
long compat_sys_rt_sigsuspend(compat_sigset_t __user
*unewset
, compat_size_t sigsetsize
)
1015 compat_sigset_t newset32
;
1017 /* XXX: Don't preclude handling different sized sigset_t's. */
1018 if (sigsetsize
!= sizeof(sigset_t
))
1021 if (copy_from_user(&newset32
, unewset
, sizeof(compat_sigset_t
)))
1023 sigset_from_compat(&newset
, &newset32
);
1024 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1026 current
->saved_sigmask
= current
->blocked
;
1027 set_current_blocked(&newset
);
1029 current
->state
= TASK_INTERRUPTIBLE
;
1031 set_restore_sigmask();
1032 return -ERESTARTNOHAND
;
1034 #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
1036 asmlinkage
long compat_sys_adjtimex(struct compat_timex __user
*utp
)
1041 err
= compat_get_timex(&txc
, utp
);
1045 ret
= do_adjtimex(&txc
);
1047 err
= compat_put_timex(utp
, &txc
);
1055 asmlinkage
long compat_sys_move_pages(pid_t pid
, unsigned long nr_pages
,
1056 compat_uptr_t __user
*pages32
,
1057 const int __user
*nodes
,
1061 const void __user
* __user
*pages
;
1064 pages
= compat_alloc_user_space(nr_pages
* sizeof(void *));
1065 for (i
= 0; i
< nr_pages
; i
++) {
1068 if (get_user(p
, pages32
+ i
) ||
1069 put_user(compat_ptr(p
), pages
+ i
))
1072 return sys_move_pages(pid
, nr_pages
, pages
, nodes
, status
, flags
);
1075 asmlinkage
long compat_sys_migrate_pages(compat_pid_t pid
,
1076 compat_ulong_t maxnode
,
1077 const compat_ulong_t __user
*old_nodes
,
1078 const compat_ulong_t __user
*new_nodes
)
1080 unsigned long __user
*old
= NULL
;
1081 unsigned long __user
*new = NULL
;
1082 nodemask_t tmp_mask
;
1083 unsigned long nr_bits
;
1086 nr_bits
= min_t(unsigned long, maxnode
- 1, MAX_NUMNODES
);
1087 size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1089 if (compat_get_bitmap(nodes_addr(tmp_mask
), old_nodes
, nr_bits
))
1091 old
= compat_alloc_user_space(new_nodes
? size
* 2 : size
);
1093 new = old
+ size
/ sizeof(unsigned long);
1094 if (copy_to_user(old
, nodes_addr(tmp_mask
), size
))
1098 if (compat_get_bitmap(nodes_addr(tmp_mask
), new_nodes
, nr_bits
))
1101 new = compat_alloc_user_space(size
);
1102 if (copy_to_user(new, nodes_addr(tmp_mask
), size
))
1105 return sys_migrate_pages(pid
, nr_bits
+ 1, old
, new);
1109 struct compat_sysinfo
{
1123 char _f
[20-2*sizeof(u32
)-sizeof(int)];
1127 compat_sys_sysinfo(struct compat_sysinfo __user
*info
)
1133 /* Check to see if any memory value is too large for 32-bit and scale
1136 if ((s
.totalram
>> 32) || (s
.totalswap
>> 32)) {
1139 while (s
.mem_unit
< PAGE_SIZE
) {
1144 s
.totalram
>>= bitcount
;
1145 s
.freeram
>>= bitcount
;
1146 s
.sharedram
>>= bitcount
;
1147 s
.bufferram
>>= bitcount
;
1148 s
.totalswap
>>= bitcount
;
1149 s
.freeswap
>>= bitcount
;
1150 s
.totalhigh
>>= bitcount
;
1151 s
.freehigh
>>= bitcount
;
1154 if (!access_ok(VERIFY_WRITE
, info
, sizeof(struct compat_sysinfo
)) ||
1155 __put_user (s
.uptime
, &info
->uptime
) ||
1156 __put_user (s
.loads
[0], &info
->loads
[0]) ||
1157 __put_user (s
.loads
[1], &info
->loads
[1]) ||
1158 __put_user (s
.loads
[2], &info
->loads
[2]) ||
1159 __put_user (s
.totalram
, &info
->totalram
) ||
1160 __put_user (s
.freeram
, &info
->freeram
) ||
1161 __put_user (s
.sharedram
, &info
->sharedram
) ||
1162 __put_user (s
.bufferram
, &info
->bufferram
) ||
1163 __put_user (s
.totalswap
, &info
->totalswap
) ||
1164 __put_user (s
.freeswap
, &info
->freeswap
) ||
1165 __put_user (s
.procs
, &info
->procs
) ||
1166 __put_user (s
.totalhigh
, &info
->totalhigh
) ||
1167 __put_user (s
.freehigh
, &info
->freehigh
) ||
1168 __put_user (s
.mem_unit
, &info
->mem_unit
))
1175 * Allocate user-space memory for the duration of a single system call,
1176 * in order to marshall parameters inside a compat thunk.
1178 void __user
*compat_alloc_user_space(unsigned long len
)
1182 /* If len would occupy more than half of the entire compat space... */
1183 if (unlikely(len
> (((compat_uptr_t
)~0) >> 1)))
1186 ptr
= arch_compat_alloc_user_space(len
);
1188 if (unlikely(!access_ok(VERIFY_WRITE
, ptr
, len
)))
1193 EXPORT_SYMBOL_GPL(compat_alloc_user_space
);