2 * linux/kernel/compat.c
4 * Kernel compatibililty routines for e.g. 32 bit syscall support
7 * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/linkage.h>
15 #include <linux/compat.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */
20 #include <linux/syscalls.h>
21 #include <linux/unistd.h>
22 #include <linux/security.h>
23 #include <linux/timex.h>
24 #include <linux/export.h>
25 #include <linux/migrate.h>
26 #include <linux/posix-timers.h>
27 #include <linux/times.h>
28 #include <linux/ptrace.h>
29 #include <linux/gfp.h>
31 #include <linux/uaccess.h>
33 static int compat_get_timex(struct timex
*txc
, struct compat_timex __user
*utp
)
35 memset(txc
, 0, sizeof(struct timex
));
37 if (!access_ok(VERIFY_READ
, utp
, sizeof(struct compat_timex
)) ||
38 __get_user(txc
->modes
, &utp
->modes
) ||
39 __get_user(txc
->offset
, &utp
->offset
) ||
40 __get_user(txc
->freq
, &utp
->freq
) ||
41 __get_user(txc
->maxerror
, &utp
->maxerror
) ||
42 __get_user(txc
->esterror
, &utp
->esterror
) ||
43 __get_user(txc
->status
, &utp
->status
) ||
44 __get_user(txc
->constant
, &utp
->constant
) ||
45 __get_user(txc
->precision
, &utp
->precision
) ||
46 __get_user(txc
->tolerance
, &utp
->tolerance
) ||
47 __get_user(txc
->time
.tv_sec
, &utp
->time
.tv_sec
) ||
48 __get_user(txc
->time
.tv_usec
, &utp
->time
.tv_usec
) ||
49 __get_user(txc
->tick
, &utp
->tick
) ||
50 __get_user(txc
->ppsfreq
, &utp
->ppsfreq
) ||
51 __get_user(txc
->jitter
, &utp
->jitter
) ||
52 __get_user(txc
->shift
, &utp
->shift
) ||
53 __get_user(txc
->stabil
, &utp
->stabil
) ||
54 __get_user(txc
->jitcnt
, &utp
->jitcnt
) ||
55 __get_user(txc
->calcnt
, &utp
->calcnt
) ||
56 __get_user(txc
->errcnt
, &utp
->errcnt
) ||
57 __get_user(txc
->stbcnt
, &utp
->stbcnt
))
63 static int compat_put_timex(struct compat_timex __user
*utp
, struct timex
*txc
)
65 if (!access_ok(VERIFY_WRITE
, utp
, sizeof(struct compat_timex
)) ||
66 __put_user(txc
->modes
, &utp
->modes
) ||
67 __put_user(txc
->offset
, &utp
->offset
) ||
68 __put_user(txc
->freq
, &utp
->freq
) ||
69 __put_user(txc
->maxerror
, &utp
->maxerror
) ||
70 __put_user(txc
->esterror
, &utp
->esterror
) ||
71 __put_user(txc
->status
, &utp
->status
) ||
72 __put_user(txc
->constant
, &utp
->constant
) ||
73 __put_user(txc
->precision
, &utp
->precision
) ||
74 __put_user(txc
->tolerance
, &utp
->tolerance
) ||
75 __put_user(txc
->time
.tv_sec
, &utp
->time
.tv_sec
) ||
76 __put_user(txc
->time
.tv_usec
, &utp
->time
.tv_usec
) ||
77 __put_user(txc
->tick
, &utp
->tick
) ||
78 __put_user(txc
->ppsfreq
, &utp
->ppsfreq
) ||
79 __put_user(txc
->jitter
, &utp
->jitter
) ||
80 __put_user(txc
->shift
, &utp
->shift
) ||
81 __put_user(txc
->stabil
, &utp
->stabil
) ||
82 __put_user(txc
->jitcnt
, &utp
->jitcnt
) ||
83 __put_user(txc
->calcnt
, &utp
->calcnt
) ||
84 __put_user(txc
->errcnt
, &utp
->errcnt
) ||
85 __put_user(txc
->stbcnt
, &utp
->stbcnt
) ||
86 __put_user(txc
->tai
, &utp
->tai
))
91 COMPAT_SYSCALL_DEFINE2(gettimeofday
, struct compat_timeval __user
*, tv
,
92 struct timezone __user
*, tz
)
96 do_gettimeofday(&ktv
);
97 if (compat_put_timeval(&ktv
, tv
))
101 if (copy_to_user(tz
, &sys_tz
, sizeof(sys_tz
)))
108 COMPAT_SYSCALL_DEFINE2(settimeofday
, struct compat_timeval __user
*, tv
,
109 struct timezone __user
*, tz
)
111 struct timespec64 new_ts
;
112 struct timeval user_tv
;
113 struct timezone new_tz
;
116 if (compat_get_timeval(&user_tv
, tv
))
118 new_ts
.tv_sec
= user_tv
.tv_sec
;
119 new_ts
.tv_nsec
= user_tv
.tv_usec
* NSEC_PER_USEC
;
122 if (copy_from_user(&new_tz
, tz
, sizeof(*tz
)))
126 return do_sys_settimeofday64(tv
? &new_ts
: NULL
, tz
? &new_tz
: NULL
);
129 static int __compat_get_timeval(struct timeval
*tv
, const struct compat_timeval __user
*ctv
)
131 return (!access_ok(VERIFY_READ
, ctv
, sizeof(*ctv
)) ||
132 __get_user(tv
->tv_sec
, &ctv
->tv_sec
) ||
133 __get_user(tv
->tv_usec
, &ctv
->tv_usec
)) ? -EFAULT
: 0;
136 static int __compat_put_timeval(const struct timeval
*tv
, struct compat_timeval __user
*ctv
)
138 return (!access_ok(VERIFY_WRITE
, ctv
, sizeof(*ctv
)) ||
139 __put_user(tv
->tv_sec
, &ctv
->tv_sec
) ||
140 __put_user(tv
->tv_usec
, &ctv
->tv_usec
)) ? -EFAULT
: 0;
143 static int __compat_get_timespec(struct timespec
*ts
, const struct compat_timespec __user
*cts
)
145 return (!access_ok(VERIFY_READ
, cts
, sizeof(*cts
)) ||
146 __get_user(ts
->tv_sec
, &cts
->tv_sec
) ||
147 __get_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
150 static int __compat_put_timespec(const struct timespec
*ts
, struct compat_timespec __user
*cts
)
152 return (!access_ok(VERIFY_WRITE
, cts
, sizeof(*cts
)) ||
153 __put_user(ts
->tv_sec
, &cts
->tv_sec
) ||
154 __put_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
157 int compat_get_timeval(struct timeval
*tv
, const void __user
*utv
)
159 if (COMPAT_USE_64BIT_TIME
)
160 return copy_from_user(tv
, utv
, sizeof(*tv
)) ? -EFAULT
: 0;
162 return __compat_get_timeval(tv
, utv
);
164 EXPORT_SYMBOL_GPL(compat_get_timeval
);
166 int compat_put_timeval(const struct timeval
*tv
, void __user
*utv
)
168 if (COMPAT_USE_64BIT_TIME
)
169 return copy_to_user(utv
, tv
, sizeof(*tv
)) ? -EFAULT
: 0;
171 return __compat_put_timeval(tv
, utv
);
173 EXPORT_SYMBOL_GPL(compat_put_timeval
);
175 int compat_get_timespec(struct timespec
*ts
, const void __user
*uts
)
177 if (COMPAT_USE_64BIT_TIME
)
178 return copy_from_user(ts
, uts
, sizeof(*ts
)) ? -EFAULT
: 0;
180 return __compat_get_timespec(ts
, uts
);
182 EXPORT_SYMBOL_GPL(compat_get_timespec
);
184 int compat_put_timespec(const struct timespec
*ts
, void __user
*uts
)
186 if (COMPAT_USE_64BIT_TIME
)
187 return copy_to_user(uts
, ts
, sizeof(*ts
)) ? -EFAULT
: 0;
189 return __compat_put_timespec(ts
, uts
);
191 EXPORT_SYMBOL_GPL(compat_put_timespec
);
193 int compat_convert_timespec(struct timespec __user
**kts
,
194 const void __user
*cts
)
197 struct timespec __user
*uts
;
199 if (!cts
|| COMPAT_USE_64BIT_TIME
) {
200 *kts
= (struct timespec __user
*)cts
;
204 uts
= compat_alloc_user_space(sizeof(ts
));
207 if (compat_get_timespec(&ts
, cts
))
209 if (copy_to_user(uts
, &ts
, sizeof(ts
)))
216 static long compat_nanosleep_restart(struct restart_block
*restart
)
218 struct compat_timespec __user
*rmtp
;
223 restart
->nanosleep
.rmtp
= (struct timespec __user
*) &rmt
;
226 ret
= hrtimer_nanosleep_restart(restart
);
229 if (ret
== -ERESTART_RESTARTBLOCK
) {
230 rmtp
= restart
->nanosleep
.compat_rmtp
;
232 if (rmtp
&& compat_put_timespec(&rmt
, rmtp
))
239 COMPAT_SYSCALL_DEFINE2(nanosleep
, struct compat_timespec __user
*, rqtp
,
240 struct compat_timespec __user
*, rmtp
)
242 struct timespec tu
, rmt
;
243 struct timespec64 tu64
;
247 if (compat_get_timespec(&tu
, rqtp
))
250 tu64
= timespec_to_timespec64(tu
);
251 if (!timespec64_valid(&tu64
))
256 ret
= hrtimer_nanosleep(&tu64
,
257 rmtp
? (struct timespec __user
*)&rmt
: NULL
,
258 HRTIMER_MODE_REL
, CLOCK_MONOTONIC
);
262 * hrtimer_nanosleep() can only return 0 or
263 * -ERESTART_RESTARTBLOCK here because:
265 * - we call it with HRTIMER_MODE_REL and therefor exclude the
266 * -ERESTARTNOHAND return path.
268 * - we supply the rmtp argument from the task stack (due to
269 * the necessary compat conversion. So the update cannot
270 * fail, which excludes the -EFAULT return path as well. If
271 * it fails nevertheless we have a bigger problem and wont
272 * reach this place anymore.
274 * - if the return value is 0, we do not have to update rmtp
275 * because there is no remaining time.
277 * We check for -ERESTART_RESTARTBLOCK nevertheless if the
278 * core implementation decides to return random nonsense.
280 if (ret
== -ERESTART_RESTARTBLOCK
) {
281 struct restart_block
*restart
= ¤t
->restart_block
;
283 restart
->fn
= compat_nanosleep_restart
;
284 restart
->nanosleep
.compat_rmtp
= rmtp
;
286 if (rmtp
&& compat_put_timespec(&rmt
, rmtp
))
292 static inline long get_compat_itimerval(struct itimerval
*o
,
293 struct compat_itimerval __user
*i
)
295 return (!access_ok(VERIFY_READ
, i
, sizeof(*i
)) ||
296 (__get_user(o
->it_interval
.tv_sec
, &i
->it_interval
.tv_sec
) |
297 __get_user(o
->it_interval
.tv_usec
, &i
->it_interval
.tv_usec
) |
298 __get_user(o
->it_value
.tv_sec
, &i
->it_value
.tv_sec
) |
299 __get_user(o
->it_value
.tv_usec
, &i
->it_value
.tv_usec
)));
302 static inline long put_compat_itimerval(struct compat_itimerval __user
*o
,
305 return (!access_ok(VERIFY_WRITE
, o
, sizeof(*o
)) ||
306 (__put_user(i
->it_interval
.tv_sec
, &o
->it_interval
.tv_sec
) |
307 __put_user(i
->it_interval
.tv_usec
, &o
->it_interval
.tv_usec
) |
308 __put_user(i
->it_value
.tv_sec
, &o
->it_value
.tv_sec
) |
309 __put_user(i
->it_value
.tv_usec
, &o
->it_value
.tv_usec
)));
312 asmlinkage
long sys_ni_posix_timers(void);
314 COMPAT_SYSCALL_DEFINE2(getitimer
, int, which
,
315 struct compat_itimerval __user
*, it
)
317 struct itimerval kit
;
320 if (!IS_ENABLED(CONFIG_POSIX_TIMERS
))
321 return sys_ni_posix_timers();
323 error
= do_getitimer(which
, &kit
);
324 if (!error
&& put_compat_itimerval(it
, &kit
))
329 COMPAT_SYSCALL_DEFINE3(setitimer
, int, which
,
330 struct compat_itimerval __user
*, in
,
331 struct compat_itimerval __user
*, out
)
333 struct itimerval kin
, kout
;
336 if (!IS_ENABLED(CONFIG_POSIX_TIMERS
))
337 return sys_ni_posix_timers();
340 if (get_compat_itimerval(&kin
, in
))
343 memset(&kin
, 0, sizeof(kin
));
345 error
= do_setitimer(which
, &kin
, out
? &kout
: NULL
);
348 if (put_compat_itimerval(out
, &kout
))
353 static compat_clock_t
clock_t_to_compat_clock_t(clock_t x
)
355 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x
));
358 COMPAT_SYSCALL_DEFINE1(times
, struct compat_tms __user
*, tbuf
)
362 struct compat_tms tmp
;
365 /* Convert our struct tms to the compat version. */
366 tmp
.tms_utime
= clock_t_to_compat_clock_t(tms
.tms_utime
);
367 tmp
.tms_stime
= clock_t_to_compat_clock_t(tms
.tms_stime
);
368 tmp
.tms_cutime
= clock_t_to_compat_clock_t(tms
.tms_cutime
);
369 tmp
.tms_cstime
= clock_t_to_compat_clock_t(tms
.tms_cstime
);
370 if (copy_to_user(tbuf
, &tmp
, sizeof(tmp
)))
373 force_successful_syscall_return();
374 return compat_jiffies_to_clock_t(jiffies
);
377 #ifdef __ARCH_WANT_SYS_SIGPENDING
380 * Assumption: old_sigset_t and compat_old_sigset_t are both
381 * types that can be passed to put_user()/get_user().
384 COMPAT_SYSCALL_DEFINE1(sigpending
, compat_old_sigset_t __user
*, set
)
388 mm_segment_t old_fs
= get_fs();
391 ret
= sys_sigpending((old_sigset_t __user
*) &s
);
394 ret
= put_user(s
, set
);
400 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
403 * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the
404 * blocked set of signals to the supplied signal set
406 static inline void compat_sig_setmask(sigset_t
*blocked
, compat_sigset_word set
)
408 memcpy(blocked
->sig
, &set
, sizeof(set
));
411 COMPAT_SYSCALL_DEFINE3(sigprocmask
, int, how
,
412 compat_old_sigset_t __user
*, nset
,
413 compat_old_sigset_t __user
*, oset
)
415 old_sigset_t old_set
, new_set
;
416 sigset_t new_blocked
;
418 old_set
= current
->blocked
.sig
[0];
421 if (get_user(new_set
, nset
))
423 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
425 new_blocked
= current
->blocked
;
429 sigaddsetmask(&new_blocked
, new_set
);
432 sigdelsetmask(&new_blocked
, new_set
);
435 compat_sig_setmask(&new_blocked
, new_set
);
441 set_current_blocked(&new_blocked
);
445 if (put_user(old_set
, oset
))
454 COMPAT_SYSCALL_DEFINE2(setrlimit
, unsigned int, resource
,
455 struct compat_rlimit __user
*, rlim
)
459 if (!access_ok(VERIFY_READ
, rlim
, sizeof(*rlim
)) ||
460 __get_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
461 __get_user(r
.rlim_max
, &rlim
->rlim_max
))
464 if (r
.rlim_cur
== COMPAT_RLIM_INFINITY
)
465 r
.rlim_cur
= RLIM_INFINITY
;
466 if (r
.rlim_max
== COMPAT_RLIM_INFINITY
)
467 r
.rlim_max
= RLIM_INFINITY
;
468 return do_prlimit(current
, resource
, &r
, NULL
);
471 #ifdef COMPAT_RLIM_OLD_INFINITY
473 COMPAT_SYSCALL_DEFINE2(old_getrlimit
, unsigned int, resource
,
474 struct compat_rlimit __user
*, rlim
)
478 mm_segment_t old_fs
= get_fs();
481 ret
= sys_old_getrlimit(resource
, (struct rlimit __user
*)&r
);
485 if (r
.rlim_cur
> COMPAT_RLIM_OLD_INFINITY
)
486 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
487 if (r
.rlim_max
> COMPAT_RLIM_OLD_INFINITY
)
488 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
490 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
491 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
492 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
500 COMPAT_SYSCALL_DEFINE2(getrlimit
, unsigned int, resource
,
501 struct compat_rlimit __user
*, rlim
)
506 ret
= do_prlimit(current
, resource
, NULL
, &r
);
508 if (r
.rlim_cur
> COMPAT_RLIM_INFINITY
)
509 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
510 if (r
.rlim_max
> COMPAT_RLIM_INFINITY
)
511 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
513 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
514 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
515 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
521 int put_compat_rusage(const struct rusage
*r
, struct compat_rusage __user
*ru
)
523 if (!access_ok(VERIFY_WRITE
, ru
, sizeof(*ru
)) ||
524 __put_user(r
->ru_utime
.tv_sec
, &ru
->ru_utime
.tv_sec
) ||
525 __put_user(r
->ru_utime
.tv_usec
, &ru
->ru_utime
.tv_usec
) ||
526 __put_user(r
->ru_stime
.tv_sec
, &ru
->ru_stime
.tv_sec
) ||
527 __put_user(r
->ru_stime
.tv_usec
, &ru
->ru_stime
.tv_usec
) ||
528 __put_user(r
->ru_maxrss
, &ru
->ru_maxrss
) ||
529 __put_user(r
->ru_ixrss
, &ru
->ru_ixrss
) ||
530 __put_user(r
->ru_idrss
, &ru
->ru_idrss
) ||
531 __put_user(r
->ru_isrss
, &ru
->ru_isrss
) ||
532 __put_user(r
->ru_minflt
, &ru
->ru_minflt
) ||
533 __put_user(r
->ru_majflt
, &ru
->ru_majflt
) ||
534 __put_user(r
->ru_nswap
, &ru
->ru_nswap
) ||
535 __put_user(r
->ru_inblock
, &ru
->ru_inblock
) ||
536 __put_user(r
->ru_oublock
, &ru
->ru_oublock
) ||
537 __put_user(r
->ru_msgsnd
, &ru
->ru_msgsnd
) ||
538 __put_user(r
->ru_msgrcv
, &ru
->ru_msgrcv
) ||
539 __put_user(r
->ru_nsignals
, &ru
->ru_nsignals
) ||
540 __put_user(r
->ru_nvcsw
, &ru
->ru_nvcsw
) ||
541 __put_user(r
->ru_nivcsw
, &ru
->ru_nivcsw
))
546 COMPAT_SYSCALL_DEFINE4(wait4
,
548 compat_uint_t __user
*, stat_addr
,
550 struct compat_rusage __user
*, ru
)
553 return sys_wait4(pid
, stat_addr
, options
, NULL
);
558 mm_segment_t old_fs
= get_fs();
563 (unsigned int __user
*) &status
: NULL
),
564 options
, (struct rusage __user
*) &r
);
568 if (put_compat_rusage(&r
, ru
))
570 if (stat_addr
&& put_user(status
, stat_addr
))
577 COMPAT_SYSCALL_DEFINE5(waitid
,
578 int, which
, compat_pid_t
, pid
,
579 struct compat_siginfo __user
*, uinfo
, int, options
,
580 struct compat_rusage __user
*, uru
)
585 mm_segment_t old_fs
= get_fs();
587 memset(&info
, 0, sizeof(info
));
590 ret
= sys_waitid(which
, pid
, (siginfo_t __user
*)&info
, options
,
591 uru
? (struct rusage __user
*)&ru
: NULL
);
594 if ((ret
< 0) || (info
.si_signo
== 0))
598 /* sys_waitid() overwrites everything in ru */
599 if (COMPAT_USE_64BIT_TIME
)
600 ret
= copy_to_user(uru
, &ru
, sizeof(ru
));
602 ret
= put_compat_rusage(&ru
, uru
);
607 BUG_ON(info
.si_code
& __SI_MASK
);
608 info
.si_code
|= __SI_CHLD
;
609 return copy_siginfo_to_user32(uinfo
, &info
);
612 static int compat_get_user_cpu_mask(compat_ulong_t __user
*user_mask_ptr
,
613 unsigned len
, struct cpumask
*new_mask
)
617 if (len
< cpumask_size())
618 memset(new_mask
, 0, cpumask_size());
619 else if (len
> cpumask_size())
620 len
= cpumask_size();
622 k
= cpumask_bits(new_mask
);
623 return compat_get_bitmap(k
, user_mask_ptr
, len
* 8);
626 COMPAT_SYSCALL_DEFINE3(sched_setaffinity
, compat_pid_t
, pid
,
628 compat_ulong_t __user
*, user_mask_ptr
)
630 cpumask_var_t new_mask
;
633 if (!alloc_cpumask_var(&new_mask
, GFP_KERNEL
))
636 retval
= compat_get_user_cpu_mask(user_mask_ptr
, len
, new_mask
);
640 retval
= sched_setaffinity(pid
, new_mask
);
642 free_cpumask_var(new_mask
);
646 COMPAT_SYSCALL_DEFINE3(sched_getaffinity
, compat_pid_t
, pid
, unsigned int, len
,
647 compat_ulong_t __user
*, user_mask_ptr
)
652 if ((len
* BITS_PER_BYTE
) < nr_cpu_ids
)
654 if (len
& (sizeof(compat_ulong_t
)-1))
657 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
660 ret
= sched_getaffinity(pid
, mask
);
662 size_t retlen
= min_t(size_t, len
, cpumask_size());
664 if (compat_put_bitmap(user_mask_ptr
, cpumask_bits(mask
), retlen
* 8))
669 free_cpumask_var(mask
);
674 int get_compat_itimerspec(struct itimerspec
*dst
,
675 const struct compat_itimerspec __user
*src
)
677 if (__compat_get_timespec(&dst
->it_interval
, &src
->it_interval
) ||
678 __compat_get_timespec(&dst
->it_value
, &src
->it_value
))
683 int put_compat_itimerspec(struct compat_itimerspec __user
*dst
,
684 const struct itimerspec
*src
)
686 if (__compat_put_timespec(&src
->it_interval
, &dst
->it_interval
) ||
687 __compat_put_timespec(&src
->it_value
, &dst
->it_value
))
692 COMPAT_SYSCALL_DEFINE3(timer_create
, clockid_t
, which_clock
,
693 struct compat_sigevent __user
*, timer_event_spec
,
694 timer_t __user
*, created_timer_id
)
696 struct sigevent __user
*event
= NULL
;
698 if (timer_event_spec
) {
699 struct sigevent kevent
;
701 event
= compat_alloc_user_space(sizeof(*event
));
702 if (get_compat_sigevent(&kevent
, timer_event_spec
) ||
703 copy_to_user(event
, &kevent
, sizeof(*event
)))
707 return sys_timer_create(which_clock
, event
, created_timer_id
);
710 COMPAT_SYSCALL_DEFINE4(timer_settime
, timer_t
, timer_id
, int, flags
,
711 struct compat_itimerspec __user
*, new,
712 struct compat_itimerspec __user
*, old
)
716 struct itimerspec newts
, oldts
;
720 if (get_compat_itimerspec(&newts
, new))
724 err
= sys_timer_settime(timer_id
, flags
,
725 (struct itimerspec __user
*) &newts
,
726 (struct itimerspec __user
*) &oldts
);
728 if (!err
&& old
&& put_compat_itimerspec(old
, &oldts
))
733 COMPAT_SYSCALL_DEFINE2(timer_gettime
, timer_t
, timer_id
,
734 struct compat_itimerspec __user
*, setting
)
738 struct itimerspec ts
;
742 err
= sys_timer_gettime(timer_id
,
743 (struct itimerspec __user
*) &ts
);
745 if (!err
&& put_compat_itimerspec(setting
, &ts
))
750 COMPAT_SYSCALL_DEFINE2(clock_settime
, clockid_t
, which_clock
,
751 struct compat_timespec __user
*, tp
)
757 if (compat_get_timespec(&ts
, tp
))
761 err
= sys_clock_settime(which_clock
,
762 (struct timespec __user
*) &ts
);
767 COMPAT_SYSCALL_DEFINE2(clock_gettime
, clockid_t
, which_clock
,
768 struct compat_timespec __user
*, tp
)
776 err
= sys_clock_gettime(which_clock
,
777 (struct timespec __user
*) &ts
);
779 if (!err
&& compat_put_timespec(&ts
, tp
))
784 COMPAT_SYSCALL_DEFINE2(clock_adjtime
, clockid_t
, which_clock
,
785 struct compat_timex __user
*, utp
)
791 err
= compat_get_timex(&txc
, utp
);
797 ret
= sys_clock_adjtime(which_clock
, (struct timex __user
*) &txc
);
800 err
= compat_put_timex(utp
, &txc
);
807 COMPAT_SYSCALL_DEFINE2(clock_getres
, clockid_t
, which_clock
,
808 struct compat_timespec __user
*, tp
)
816 err
= sys_clock_getres(which_clock
,
817 (struct timespec __user
*) &ts
);
819 if (!err
&& tp
&& compat_put_timespec(&ts
, tp
))
824 static long compat_clock_nanosleep_restart(struct restart_block
*restart
)
829 struct compat_timespec __user
*rmtp
= restart
->nanosleep
.compat_rmtp
;
831 restart
->nanosleep
.rmtp
= (struct timespec __user
*) &tu
;
834 err
= clock_nanosleep_restart(restart
);
837 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
838 compat_put_timespec(&tu
, rmtp
))
841 if (err
== -ERESTART_RESTARTBLOCK
) {
842 restart
->fn
= compat_clock_nanosleep_restart
;
843 restart
->nanosleep
.compat_rmtp
= rmtp
;
848 COMPAT_SYSCALL_DEFINE4(clock_nanosleep
, clockid_t
, which_clock
, int, flags
,
849 struct compat_timespec __user
*, rqtp
,
850 struct compat_timespec __user
*, rmtp
)
854 struct timespec in
, out
;
855 struct restart_block
*restart
;
857 if (compat_get_timespec(&in
, rqtp
))
862 err
= sys_clock_nanosleep(which_clock
, flags
,
863 (struct timespec __user
*) &in
,
864 (struct timespec __user
*) &out
);
867 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
868 compat_put_timespec(&out
, rmtp
))
871 if (err
== -ERESTART_RESTARTBLOCK
) {
872 restart
= ¤t
->restart_block
;
873 restart
->fn
= compat_clock_nanosleep_restart
;
874 restart
->nanosleep
.compat_rmtp
= rmtp
;
880 * We currently only need the following fields from the sigevent
881 * structure: sigev_value, sigev_signo, sig_notify and (sometimes
882 * sigev_notify_thread_id). The others are handled in user mode.
883 * We also assume that copying sigev_value.sival_int is sufficient
884 * to keep all the bits of sigev_value.sival_ptr intact.
886 int get_compat_sigevent(struct sigevent
*event
,
887 const struct compat_sigevent __user
*u_event
)
889 memset(event
, 0, sizeof(*event
));
890 return (!access_ok(VERIFY_READ
, u_event
, sizeof(*u_event
)) ||
891 __get_user(event
->sigev_value
.sival_int
,
892 &u_event
->sigev_value
.sival_int
) ||
893 __get_user(event
->sigev_signo
, &u_event
->sigev_signo
) ||
894 __get_user(event
->sigev_notify
, &u_event
->sigev_notify
) ||
895 __get_user(event
->sigev_notify_thread_id
,
896 &u_event
->sigev_notify_thread_id
))
900 long compat_get_bitmap(unsigned long *mask
, const compat_ulong_t __user
*umask
,
901 unsigned long bitmap_size
)
906 unsigned long nr_compat_longs
;
908 /* align bitmap up to nearest compat_long_t boundary */
909 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
911 if (!access_ok(VERIFY_READ
, umask
, bitmap_size
/ 8))
914 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
916 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
919 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
921 * We dont want to read past the end of the userspace
922 * bitmap. We must however ensure the end of the
923 * kernel bitmap is zeroed.
925 if (nr_compat_longs
) {
927 if (__get_user(um
, umask
))
934 m
|= (long)um
<< (j
* BITS_PER_COMPAT_LONG
);
942 long compat_put_bitmap(compat_ulong_t __user
*umask
, unsigned long *mask
,
943 unsigned long bitmap_size
)
948 unsigned long nr_compat_longs
;
950 /* align bitmap up to nearest compat_long_t boundary */
951 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
953 if (!access_ok(VERIFY_WRITE
, umask
, bitmap_size
/ 8))
956 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
958 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
961 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
965 * We dont want to write past the end of the userspace
968 if (nr_compat_longs
) {
970 if (__put_user(um
, umask
))
984 sigset_from_compat(sigset_t
*set
, const compat_sigset_t
*compat
)
986 switch (_NSIG_WORDS
) {
987 case 4: set
->sig
[3] = compat
->sig
[6] | (((long)compat
->sig
[7]) << 32 );
988 case 3: set
->sig
[2] = compat
->sig
[4] | (((long)compat
->sig
[5]) << 32 );
989 case 2: set
->sig
[1] = compat
->sig
[2] | (((long)compat
->sig
[3]) << 32 );
990 case 1: set
->sig
[0] = compat
->sig
[0] | (((long)compat
->sig
[1]) << 32 );
993 EXPORT_SYMBOL_GPL(sigset_from_compat
);
996 sigset_to_compat(compat_sigset_t
*compat
, const sigset_t
*set
)
998 switch (_NSIG_WORDS
) {
999 case 4: compat
->sig
[7] = (set
->sig
[3] >> 32); compat
->sig
[6] = set
->sig
[3];
1000 case 3: compat
->sig
[5] = (set
->sig
[2] >> 32); compat
->sig
[4] = set
->sig
[2];
1001 case 2: compat
->sig
[3] = (set
->sig
[1] >> 32); compat
->sig
[2] = set
->sig
[1];
1002 case 1: compat
->sig
[1] = (set
->sig
[0] >> 32); compat
->sig
[0] = set
->sig
[0];
1006 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait
, compat_sigset_t __user
*, uthese
,
1007 struct compat_siginfo __user
*, uinfo
,
1008 struct compat_timespec __user
*, uts
, compat_size_t
, sigsetsize
)
1010 compat_sigset_t s32
;
1016 if (sigsetsize
!= sizeof(sigset_t
))
1019 if (copy_from_user(&s32
, uthese
, sizeof(compat_sigset_t
)))
1021 sigset_from_compat(&s
, &s32
);
1024 if (compat_get_timespec(&t
, uts
))
1028 ret
= do_sigtimedwait(&s
, &info
, uts
? &t
: NULL
);
1030 if (ret
> 0 && uinfo
) {
1031 if (copy_siginfo_to_user32(uinfo
, &info
))
1038 #ifdef __ARCH_WANT_COMPAT_SYS_TIME
1040 /* compat_time_t is a 32 bit "long" and needs to get converted. */
1042 COMPAT_SYSCALL_DEFINE1(time
, compat_time_t __user
*, tloc
)
1047 do_gettimeofday(&tv
);
1051 if (put_user(i
,tloc
))
1054 force_successful_syscall_return();
1058 COMPAT_SYSCALL_DEFINE1(stime
, compat_time_t __user
*, tptr
)
1063 if (get_user(tv
.tv_sec
, tptr
))
1068 err
= security_settime(&tv
, NULL
);
1072 do_settimeofday(&tv
);
1076 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
1078 COMPAT_SYSCALL_DEFINE1(adjtimex
, struct compat_timex __user
*, utp
)
1083 err
= compat_get_timex(&txc
, utp
);
1087 ret
= do_adjtimex(&txc
);
1089 err
= compat_put_timex(utp
, &txc
);
1097 COMPAT_SYSCALL_DEFINE6(move_pages
, pid_t
, pid
, compat_ulong_t
, nr_pages
,
1098 compat_uptr_t __user
*, pages32
,
1099 const int __user
*, nodes
,
1100 int __user
*, status
,
1103 const void __user
* __user
*pages
;
1106 pages
= compat_alloc_user_space(nr_pages
* sizeof(void *));
1107 for (i
= 0; i
< nr_pages
; i
++) {
1110 if (get_user(p
, pages32
+ i
) ||
1111 put_user(compat_ptr(p
), pages
+ i
))
1114 return sys_move_pages(pid
, nr_pages
, pages
, nodes
, status
, flags
);
1117 COMPAT_SYSCALL_DEFINE4(migrate_pages
, compat_pid_t
, pid
,
1118 compat_ulong_t
, maxnode
,
1119 const compat_ulong_t __user
*, old_nodes
,
1120 const compat_ulong_t __user
*, new_nodes
)
1122 unsigned long __user
*old
= NULL
;
1123 unsigned long __user
*new = NULL
;
1124 nodemask_t tmp_mask
;
1125 unsigned long nr_bits
;
1128 nr_bits
= min_t(unsigned long, maxnode
- 1, MAX_NUMNODES
);
1129 size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1131 if (compat_get_bitmap(nodes_addr(tmp_mask
), old_nodes
, nr_bits
))
1133 old
= compat_alloc_user_space(new_nodes
? size
* 2 : size
);
1135 new = old
+ size
/ sizeof(unsigned long);
1136 if (copy_to_user(old
, nodes_addr(tmp_mask
), size
))
1140 if (compat_get_bitmap(nodes_addr(tmp_mask
), new_nodes
, nr_bits
))
1143 new = compat_alloc_user_space(size
);
1144 if (copy_to_user(new, nodes_addr(tmp_mask
), size
))
1147 return sys_migrate_pages(pid
, nr_bits
+ 1, old
, new);
1151 COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval
,
1153 struct compat_timespec __user
*, interval
)
1157 mm_segment_t old_fs
= get_fs();
1160 ret
= sys_sched_rr_get_interval(pid
, (struct timespec __user
*)&t
);
1162 if (compat_put_timespec(&t
, interval
))
1168 * Allocate user-space memory for the duration of a single system call,
1169 * in order to marshall parameters inside a compat thunk.
1171 void __user
*compat_alloc_user_space(unsigned long len
)
1175 /* If len would occupy more than half of the entire compat space... */
1176 if (unlikely(len
> (((compat_uptr_t
)~0) >> 1)))
1179 ptr
= arch_compat_alloc_user_space(len
);
1181 if (unlikely(!access_ok(VERIFY_WRITE
, ptr
, len
)))
1186 EXPORT_SYMBOL_GPL(compat_alloc_user_space
);