4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
11 #include <linux/file.h>
12 #include <linux/fdtable.h>
13 #include <linux/capability.h>
14 #include <linux/dnotify.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/security.h>
18 #include <linux/ptrace.h>
19 #include <linux/signal.h>
20 #include <linux/rcupdate.h>
21 #include <linux/pid_namespace.h>
24 #include <asm/siginfo.h>
25 #include <asm/uaccess.h>
27 void set_close_on_exec(unsigned int fd
, int flag
)
29 struct files_struct
*files
= current
->files
;
31 spin_lock(&files
->file_lock
);
32 fdt
= files_fdtable(files
);
34 FD_SET(fd
, fdt
->close_on_exec
);
36 FD_CLR(fd
, fdt
->close_on_exec
);
37 spin_unlock(&files
->file_lock
);
40 static int get_close_on_exec(unsigned int fd
)
42 struct files_struct
*files
= current
->files
;
46 fdt
= files_fdtable(files
);
47 res
= FD_ISSET(fd
, fdt
->close_on_exec
);
52 asmlinkage
long sys_dup3(unsigned int oldfd
, unsigned int newfd
, int flags
)
55 struct file
* file
, *tofree
;
56 struct files_struct
* files
= current
->files
;
59 if ((flags
& ~O_CLOEXEC
) != 0)
62 if (unlikely(oldfd
== newfd
))
65 spin_lock(&files
->file_lock
);
66 err
= expand_files(files
, newfd
);
70 if (unlikely(err
< 0)) {
76 * We need to detect attempts to do dup2() over allocated but still
77 * not finished descriptor. NB: OpenBSD avoids that at the price of
78 * extra work in their equivalent of fget() - they insert struct
79 * file immediately after grabbing descriptor, mark it larval if
80 * more work (e.g. actual opening) is needed and make sure that
81 * fget() treats larval files as absent. Potentially interesting,
82 * but while extra work in fget() is trivial, locking implications
83 * and amount of surgery on open()-related paths in VFS are not.
84 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
85 * deadlocks in rather amusing ways, AFAICS. All of that is out of
86 * scope of POSIX or SUS, since neither considers shared descriptor
87 * tables and this condition does not arise without those.
90 fdt
= files_fdtable(files
);
91 tofree
= fdt
->fd
[newfd
];
92 if (!tofree
&& FD_ISSET(newfd
, fdt
->open_fds
))
95 rcu_assign_pointer(fdt
->fd
[newfd
], file
);
96 FD_SET(newfd
, fdt
->open_fds
);
97 if (flags
& O_CLOEXEC
)
98 FD_SET(newfd
, fdt
->close_on_exec
);
100 FD_CLR(newfd
, fdt
->close_on_exec
);
101 spin_unlock(&files
->file_lock
);
104 filp_close(tofree
, files
);
111 spin_unlock(&files
->file_lock
);
115 asmlinkage
long sys_dup2(unsigned int oldfd
, unsigned int newfd
)
117 if (unlikely(newfd
== oldfd
)) { /* corner case */
118 struct files_struct
*files
= current
->files
;
120 if (!fcheck_files(files
, oldfd
))
125 return sys_dup3(oldfd
, newfd
, 0);
128 asmlinkage
long sys_dup(unsigned int fildes
)
131 struct file
*file
= fget(fildes
);
134 ret
= get_unused_fd();
136 fd_install(ret
, file
);
143 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
145 static int setfl(int fd
, struct file
* filp
, unsigned long arg
)
147 struct inode
* inode
= filp
->f_path
.dentry
->d_inode
;
151 * O_APPEND cannot be cleared if the file is marked as append-only
152 * and the file is open for write.
154 if (((arg
^ filp
->f_flags
) & O_APPEND
) && IS_APPEND(inode
))
157 /* O_NOATIME can only be set by the owner or superuser */
158 if ((arg
& O_NOATIME
) && !(filp
->f_flags
& O_NOATIME
))
159 if (!is_owner_or_cap(inode
))
162 /* required for strict SunOS emulation */
163 if (O_NONBLOCK
!= O_NDELAY
)
167 if (arg
& O_DIRECT
) {
168 if (!filp
->f_mapping
|| !filp
->f_mapping
->a_ops
||
169 !filp
->f_mapping
->a_ops
->direct_IO
)
173 if (filp
->f_op
&& filp
->f_op
->check_flags
)
174 error
= filp
->f_op
->check_flags(arg
);
178 if ((arg
^ filp
->f_flags
) & FASYNC
) {
179 if (filp
->f_op
&& filp
->f_op
->fasync
) {
180 error
= filp
->f_op
->fasync(fd
, filp
, (arg
& FASYNC
) != 0);
186 filp
->f_flags
= (arg
& SETFL_MASK
) | (filp
->f_flags
& ~SETFL_MASK
);
191 static void f_modown(struct file
*filp
, struct pid
*pid
, enum pid_type type
,
192 uid_t uid
, uid_t euid
, int force
)
194 write_lock_irq(&filp
->f_owner
.lock
);
195 if (force
|| !filp
->f_owner
.pid
) {
196 put_pid(filp
->f_owner
.pid
);
197 filp
->f_owner
.pid
= get_pid(pid
);
198 filp
->f_owner
.pid_type
= type
;
199 filp
->f_owner
.uid
= uid
;
200 filp
->f_owner
.euid
= euid
;
202 write_unlock_irq(&filp
->f_owner
.lock
);
205 int __f_setown(struct file
*filp
, struct pid
*pid
, enum pid_type type
,
208 const struct cred
*cred
= current_cred();
211 err
= security_file_set_fowner(filp
);
215 f_modown(filp
, pid
, type
, cred
->uid
, cred
->euid
, force
);
218 EXPORT_SYMBOL(__f_setown
);
220 int f_setown(struct file
*filp
, unsigned long arg
, int force
)
232 pid
= find_vpid(who
);
233 result
= __f_setown(filp
, pid
, type
, force
);
237 EXPORT_SYMBOL(f_setown
);
239 void f_delown(struct file
*filp
)
241 f_modown(filp
, NULL
, PIDTYPE_PID
, 0, 0, 1);
244 pid_t
f_getown(struct file
*filp
)
247 read_lock(&filp
->f_owner
.lock
);
248 pid
= pid_vnr(filp
->f_owner
.pid
);
249 if (filp
->f_owner
.pid_type
== PIDTYPE_PGID
)
251 read_unlock(&filp
->f_owner
.lock
);
255 static long do_fcntl(int fd
, unsigned int cmd
, unsigned long arg
,
262 case F_DUPFD_CLOEXEC
:
263 if (arg
>= current
->signal
->rlim
[RLIMIT_NOFILE
].rlim_cur
)
265 err
= alloc_fd(arg
, cmd
== F_DUPFD_CLOEXEC
? O_CLOEXEC
: 0);
268 fd_install(err
, filp
);
272 err
= get_close_on_exec(fd
) ? FD_CLOEXEC
: 0;
276 set_close_on_exec(fd
, arg
& FD_CLOEXEC
);
282 err
= setfl(fd
, filp
, arg
);
285 err
= fcntl_getlk(filp
, (struct flock __user
*) arg
);
289 err
= fcntl_setlk(fd
, filp
, cmd
, (struct flock __user
*) arg
);
293 * XXX If f_owner is a process group, the
294 * negative return value will get converted
295 * into an error. Oops. If we keep the
296 * current syscall conventions, the only way
297 * to fix this will be in libc.
299 err
= f_getown(filp
);
300 force_successful_syscall_return();
303 err
= f_setown(filp
, arg
, 1);
306 err
= filp
->f_owner
.signum
;
309 /* arg == 0 restores default behaviour. */
310 if (!valid_signal(arg
)) {
314 filp
->f_owner
.signum
= arg
;
317 err
= fcntl_getlease(filp
);
320 err
= fcntl_setlease(fd
, filp
, arg
);
323 err
= fcntl_dirnotify(fd
, filp
, arg
);
331 asmlinkage
long sys_fcntl(unsigned int fd
, unsigned int cmd
, unsigned long arg
)
340 err
= security_file_fcntl(filp
, cmd
, arg
);
346 err
= do_fcntl(fd
, cmd
, arg
, filp
);
353 #if BITS_PER_LONG == 32
354 asmlinkage
long sys_fcntl64(unsigned int fd
, unsigned int cmd
, unsigned long arg
)
364 err
= security_file_fcntl(filp
, cmd
, arg
);
373 err
= fcntl_getlk64(filp
, (struct flock64 __user
*) arg
);
377 err
= fcntl_setlk64(fd
, filp
, cmd
,
378 (struct flock64 __user
*) arg
);
381 err
= do_fcntl(fd
, cmd
, arg
, filp
);
390 /* Table to convert sigio signal codes into poll band bitmaps */
392 static const long band_table
[NSIGPOLL
] = {
393 POLLIN
| POLLRDNORM
, /* POLL_IN */
394 POLLOUT
| POLLWRNORM
| POLLWRBAND
, /* POLL_OUT */
395 POLLIN
| POLLRDNORM
| POLLMSG
, /* POLL_MSG */
396 POLLERR
, /* POLL_ERR */
397 POLLPRI
| POLLRDBAND
, /* POLL_PRI */
398 POLLHUP
| POLLERR
/* POLL_HUP */
401 static inline int sigio_perm(struct task_struct
*p
,
402 struct fown_struct
*fown
, int sig
)
404 const struct cred
*cred
;
408 cred
= __task_cred(p
);
409 ret
= ((fown
->euid
== 0 ||
410 fown
->euid
== cred
->suid
|| fown
->euid
== cred
->uid
||
411 fown
->uid
== cred
->suid
|| fown
->uid
== cred
->uid
) &&
412 !security_file_send_sigiotask(p
, fown
, sig
));
417 static void send_sigio_to_task(struct task_struct
*p
,
418 struct fown_struct
*fown
,
422 if (!sigio_perm(p
, fown
, fown
->signum
))
425 switch (fown
->signum
) {
428 /* Queue a rt signal with the appropriate fd as its
429 value. We use SI_SIGIO as the source, not
430 SI_KERNEL, since kernel signals always get
431 delivered even if we can't queue. Failure to
432 queue in this case _should_ be reported; we fall
433 back to SIGIO in that case. --sct */
434 si
.si_signo
= fown
->signum
;
437 /* Make sure we are called with one of the POLL_*
438 reasons, otherwise we could leak kernel stack into
440 BUG_ON((reason
& __SI_MASK
) != __SI_POLL
);
441 if (reason
- POLL_IN
>= NSIGPOLL
)
444 si
.si_band
= band_table
[reason
- POLL_IN
];
446 if (!group_send_sig_info(fown
->signum
, &si
, p
))
448 /* fall-through: fall back on the old plain SIGIO signal */
450 group_send_sig_info(SIGIO
, SEND_SIG_PRIV
, p
);
454 void send_sigio(struct fown_struct
*fown
, int fd
, int band
)
456 struct task_struct
*p
;
460 read_lock(&fown
->lock
);
461 type
= fown
->pid_type
;
464 goto out_unlock_fown
;
466 read_lock(&tasklist_lock
);
467 do_each_pid_task(pid
, type
, p
) {
468 send_sigio_to_task(p
, fown
, fd
, band
);
469 } while_each_pid_task(pid
, type
, p
);
470 read_unlock(&tasklist_lock
);
472 read_unlock(&fown
->lock
);
475 static void send_sigurg_to_task(struct task_struct
*p
,
476 struct fown_struct
*fown
)
478 if (sigio_perm(p
, fown
, SIGURG
))
479 group_send_sig_info(SIGURG
, SEND_SIG_PRIV
, p
);
482 int send_sigurg(struct fown_struct
*fown
)
484 struct task_struct
*p
;
489 read_lock(&fown
->lock
);
490 type
= fown
->pid_type
;
493 goto out_unlock_fown
;
497 read_lock(&tasklist_lock
);
498 do_each_pid_task(pid
, type
, p
) {
499 send_sigurg_to_task(p
, fown
);
500 } while_each_pid_task(pid
, type
, p
);
501 read_unlock(&tasklist_lock
);
503 read_unlock(&fown
->lock
);
507 static DEFINE_RWLOCK(fasync_lock
);
508 static struct kmem_cache
*fasync_cache __read_mostly
;
511 * fasync_helper() is used by some character device drivers (mainly mice)
512 * to set up the fasync queue. It returns negative on error, 0 if it did
513 * no changes and positive if it added/deleted the entry.
515 int fasync_helper(int fd
, struct file
* filp
, int on
, struct fasync_struct
**fapp
)
517 struct fasync_struct
*fa
, **fp
;
518 struct fasync_struct
*new = NULL
;
522 new = kmem_cache_alloc(fasync_cache
, GFP_KERNEL
);
526 write_lock_irq(&fasync_lock
);
527 for (fp
= fapp
; (fa
= *fp
) != NULL
; fp
= &fa
->fa_next
) {
528 if (fa
->fa_file
== filp
) {
531 kmem_cache_free(fasync_cache
, new);
534 kmem_cache_free(fasync_cache
, fa
);
542 new->magic
= FASYNC_MAGIC
;
545 new->fa_next
= *fapp
;
550 write_unlock_irq(&fasync_lock
);
554 EXPORT_SYMBOL(fasync_helper
);
556 void __kill_fasync(struct fasync_struct
*fa
, int sig
, int band
)
559 struct fown_struct
* fown
;
560 if (fa
->magic
!= FASYNC_MAGIC
) {
561 printk(KERN_ERR
"kill_fasync: bad magic number in "
565 fown
= &fa
->fa_file
->f_owner
;
566 /* Don't send SIGURG to processes which have not set a
567 queued signum: SIGURG has its own default signalling
569 if (!(sig
== SIGURG
&& fown
->signum
== 0))
570 send_sigio(fown
, fa
->fa_fd
, band
);
575 EXPORT_SYMBOL(__kill_fasync
);
577 void kill_fasync(struct fasync_struct
**fp
, int sig
, int band
)
579 /* First a quick test without locking: usually
583 read_lock(&fasync_lock
);
584 /* reread *fp after obtaining the lock */
585 __kill_fasync(*fp
, sig
, band
);
586 read_unlock(&fasync_lock
);
589 EXPORT_SYMBOL(kill_fasync
);
591 static int __init
fasync_init(void)
593 fasync_cache
= kmem_cache_create("fasync_cache",
594 sizeof(struct fasync_struct
), 0, SLAB_PANIC
, NULL
);
598 module_init(fasync_init
)