Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[linux/fpc-iii.git] / fs / locks.c
blobcb424a4fed71a42d237b72d16de8318bbbeb0fbd
1 /*
2 * linux/fs/locks.c
4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
36 * FL_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
44 * unlocked).
46 * FL_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52 * Removed some race conditions in flock_lock_file(), marked other possible
53 * races. Just grep for FIXME to see them.
54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58 * once we've checked for blocking and deadlocking.
59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61 * Initial implementation of mandatory locks. SunOS turned out to be
62 * a rotten model, so I implemented the "obvious" semantics.
63 * See 'Documentation/filesystems/mandatory-locking.txt' for details.
64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67 * check if a file has mandatory locks, used by mmap(), open() and creat() to
68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69 * Manual, Section 2.
70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72 * Tidied up block list handling. Added '/proc/locks' interface.
73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75 * Fixed deadlock condition for pathological code that mixes calls to
76 * flock() and fcntl().
77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81 * guarantee sensible behaviour in the case where file system modules might
82 * be compiled with different options than the kernel itself.
83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90 * locks. Changed process synchronisation to avoid dereferencing locks that
91 * have already been freed.
92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94 * Made the block list a circular list to minimise searching in the list.
95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97 * Made mandatory locking a mount option. Default is not to allow mandatory
98 * locking.
99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101 * Some adaptations for NFS support.
102 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
107 * Use slab allocator instead of kmalloc/kfree.
108 * Use generic list implementation from <linux/list.h>.
109 * Sped up posix_locks_deadlock by only considering blocked locks.
110 * Matthew Wilcox <willy@debian.org>, March, 2000.
112 * Leases and LOCK_MAND
113 * Matthew Wilcox <willy@debian.org>, June, 2000.
114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 #include <linux/pid_namespace.h>
130 #include <asm/uaccess.h>
132 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
136 static bool lease_breaking(struct file_lock *fl)
138 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
141 static int target_leasetype(struct file_lock *fl)
143 if (fl->fl_flags & FL_UNLOCK_PENDING)
144 return F_UNLCK;
145 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
146 return F_RDLCK;
147 return fl->fl_type;
150 int leases_enable = 1;
151 int lease_break_time = 45;
153 #define for_each_lock(inode, lockp) \
154 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
156 static LIST_HEAD(file_lock_list);
157 static LIST_HEAD(blocked_list);
158 static DEFINE_SPINLOCK(file_lock_lock);
161 * Protects the two list heads above, plus the inode->i_flock list
163 void lock_flocks(void)
165 spin_lock(&file_lock_lock);
167 EXPORT_SYMBOL_GPL(lock_flocks);
169 void unlock_flocks(void)
171 spin_unlock(&file_lock_lock);
173 EXPORT_SYMBOL_GPL(unlock_flocks);
175 static struct kmem_cache *filelock_cache __read_mostly;
177 static void locks_init_lock_heads(struct file_lock *fl)
179 INIT_LIST_HEAD(&fl->fl_link);
180 INIT_LIST_HEAD(&fl->fl_block);
181 init_waitqueue_head(&fl->fl_wait);
184 /* Allocate an empty lock structure. */
185 struct file_lock *locks_alloc_lock(void)
187 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
189 if (fl)
190 locks_init_lock_heads(fl);
192 return fl;
194 EXPORT_SYMBOL_GPL(locks_alloc_lock);
196 void locks_release_private(struct file_lock *fl)
198 if (fl->fl_ops) {
199 if (fl->fl_ops->fl_release_private)
200 fl->fl_ops->fl_release_private(fl);
201 fl->fl_ops = NULL;
203 fl->fl_lmops = NULL;
206 EXPORT_SYMBOL_GPL(locks_release_private);
208 /* Free a lock which is not in use. */
209 void locks_free_lock(struct file_lock *fl)
211 BUG_ON(waitqueue_active(&fl->fl_wait));
212 BUG_ON(!list_empty(&fl->fl_block));
213 BUG_ON(!list_empty(&fl->fl_link));
215 locks_release_private(fl);
216 kmem_cache_free(filelock_cache, fl);
218 EXPORT_SYMBOL(locks_free_lock);
220 void locks_init_lock(struct file_lock *fl)
222 memset(fl, 0, sizeof(struct file_lock));
223 locks_init_lock_heads(fl);
226 EXPORT_SYMBOL(locks_init_lock);
228 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
230 if (fl->fl_ops) {
231 if (fl->fl_ops->fl_copy_lock)
232 fl->fl_ops->fl_copy_lock(new, fl);
233 new->fl_ops = fl->fl_ops;
235 if (fl->fl_lmops)
236 new->fl_lmops = fl->fl_lmops;
240 * Initialize a new lock from an existing file_lock structure.
242 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
244 new->fl_owner = fl->fl_owner;
245 new->fl_pid = fl->fl_pid;
246 new->fl_file = NULL;
247 new->fl_flags = fl->fl_flags;
248 new->fl_type = fl->fl_type;
249 new->fl_start = fl->fl_start;
250 new->fl_end = fl->fl_end;
251 new->fl_ops = NULL;
252 new->fl_lmops = NULL;
254 EXPORT_SYMBOL(__locks_copy_lock);
256 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
258 locks_release_private(new);
260 __locks_copy_lock(new, fl);
261 new->fl_file = fl->fl_file;
262 new->fl_ops = fl->fl_ops;
263 new->fl_lmops = fl->fl_lmops;
265 locks_copy_private(new, fl);
268 EXPORT_SYMBOL(locks_copy_lock);
270 static inline int flock_translate_cmd(int cmd) {
271 if (cmd & LOCK_MAND)
272 return cmd & (LOCK_MAND | LOCK_RW);
273 switch (cmd) {
274 case LOCK_SH:
275 return F_RDLCK;
276 case LOCK_EX:
277 return F_WRLCK;
278 case LOCK_UN:
279 return F_UNLCK;
281 return -EINVAL;
284 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
285 static int flock_make_lock(struct file *filp, struct file_lock **lock,
286 unsigned int cmd)
288 struct file_lock *fl;
289 int type = flock_translate_cmd(cmd);
290 if (type < 0)
291 return type;
293 fl = locks_alloc_lock();
294 if (fl == NULL)
295 return -ENOMEM;
297 fl->fl_file = filp;
298 fl->fl_pid = current->tgid;
299 fl->fl_flags = FL_FLOCK;
300 fl->fl_type = type;
301 fl->fl_end = OFFSET_MAX;
303 *lock = fl;
304 return 0;
307 static int assign_type(struct file_lock *fl, long type)
309 switch (type) {
310 case F_RDLCK:
311 case F_WRLCK:
312 case F_UNLCK:
313 fl->fl_type = type;
314 break;
315 default:
316 return -EINVAL;
318 return 0;
321 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
322 * style lock.
324 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
325 struct flock *l)
327 off_t start, end;
329 switch (l->l_whence) {
330 case SEEK_SET:
331 start = 0;
332 break;
333 case SEEK_CUR:
334 start = filp->f_pos;
335 break;
336 case SEEK_END:
337 start = i_size_read(file_inode(filp));
338 break;
339 default:
340 return -EINVAL;
343 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
344 POSIX-2001 defines it. */
345 start += l->l_start;
346 if (start < 0)
347 return -EINVAL;
348 fl->fl_end = OFFSET_MAX;
349 if (l->l_len > 0) {
350 end = start + l->l_len - 1;
351 fl->fl_end = end;
352 } else if (l->l_len < 0) {
353 end = start - 1;
354 fl->fl_end = end;
355 start += l->l_len;
356 if (start < 0)
357 return -EINVAL;
359 fl->fl_start = start; /* we record the absolute position */
360 if (fl->fl_end < fl->fl_start)
361 return -EOVERFLOW;
363 fl->fl_owner = current->files;
364 fl->fl_pid = current->tgid;
365 fl->fl_file = filp;
366 fl->fl_flags = FL_POSIX;
367 fl->fl_ops = NULL;
368 fl->fl_lmops = NULL;
370 return assign_type(fl, l->l_type);
373 #if BITS_PER_LONG == 32
374 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
375 struct flock64 *l)
377 loff_t start;
379 switch (l->l_whence) {
380 case SEEK_SET:
381 start = 0;
382 break;
383 case SEEK_CUR:
384 start = filp->f_pos;
385 break;
386 case SEEK_END:
387 start = i_size_read(file_inode(filp));
388 break;
389 default:
390 return -EINVAL;
393 start += l->l_start;
394 if (start < 0)
395 return -EINVAL;
396 fl->fl_end = OFFSET_MAX;
397 if (l->l_len > 0) {
398 fl->fl_end = start + l->l_len - 1;
399 } else if (l->l_len < 0) {
400 fl->fl_end = start - 1;
401 start += l->l_len;
402 if (start < 0)
403 return -EINVAL;
405 fl->fl_start = start; /* we record the absolute position */
406 if (fl->fl_end < fl->fl_start)
407 return -EOVERFLOW;
409 fl->fl_owner = current->files;
410 fl->fl_pid = current->tgid;
411 fl->fl_file = filp;
412 fl->fl_flags = FL_POSIX;
413 fl->fl_ops = NULL;
414 fl->fl_lmops = NULL;
416 return assign_type(fl, l->l_type);
418 #endif
420 /* default lease lock manager operations */
421 static void lease_break_callback(struct file_lock *fl)
423 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
426 static const struct lock_manager_operations lease_manager_ops = {
427 .lm_break = lease_break_callback,
428 .lm_change = lease_modify,
432 * Initialize a lease, use the default lock manager operations
434 static int lease_init(struct file *filp, long type, struct file_lock *fl)
436 if (assign_type(fl, type) != 0)
437 return -EINVAL;
439 fl->fl_owner = current->files;
440 fl->fl_pid = current->tgid;
442 fl->fl_file = filp;
443 fl->fl_flags = FL_LEASE;
444 fl->fl_start = 0;
445 fl->fl_end = OFFSET_MAX;
446 fl->fl_ops = NULL;
447 fl->fl_lmops = &lease_manager_ops;
448 return 0;
451 /* Allocate a file_lock initialised to this type of lease */
452 static struct file_lock *lease_alloc(struct file *filp, long type)
454 struct file_lock *fl = locks_alloc_lock();
455 int error = -ENOMEM;
457 if (fl == NULL)
458 return ERR_PTR(error);
460 error = lease_init(filp, type, fl);
461 if (error) {
462 locks_free_lock(fl);
463 return ERR_PTR(error);
465 return fl;
468 /* Check if two locks overlap each other.
470 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
472 return ((fl1->fl_end >= fl2->fl_start) &&
473 (fl2->fl_end >= fl1->fl_start));
477 * Check whether two locks have the same owner.
479 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
481 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
482 return fl2->fl_lmops == fl1->fl_lmops &&
483 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
484 return fl1->fl_owner == fl2->fl_owner;
487 /* Remove waiter from blocker's block list.
488 * When blocker ends up pointing to itself then the list is empty.
490 static void __locks_delete_block(struct file_lock *waiter)
492 list_del_init(&waiter->fl_block);
493 list_del_init(&waiter->fl_link);
494 waiter->fl_next = NULL;
499 void locks_delete_block(struct file_lock *waiter)
501 lock_flocks();
502 __locks_delete_block(waiter);
503 unlock_flocks();
505 EXPORT_SYMBOL(locks_delete_block);
507 /* Insert waiter into blocker's block list.
508 * We use a circular list so that processes can be easily woken up in
509 * the order they blocked. The documentation doesn't require this but
510 * it seems like the reasonable thing to do.
512 static void locks_insert_block(struct file_lock *blocker,
513 struct file_lock *waiter)
515 BUG_ON(!list_empty(&waiter->fl_block));
516 list_add_tail(&waiter->fl_block, &blocker->fl_block);
517 waiter->fl_next = blocker;
518 if (IS_POSIX(blocker))
519 list_add(&waiter->fl_link, &blocked_list);
522 /* Wake up processes blocked waiting for blocker.
523 * If told to wait then schedule the processes until the block list
524 * is empty, otherwise empty the block list ourselves.
526 static void locks_wake_up_blocks(struct file_lock *blocker)
528 while (!list_empty(&blocker->fl_block)) {
529 struct file_lock *waiter;
531 waiter = list_first_entry(&blocker->fl_block,
532 struct file_lock, fl_block);
533 __locks_delete_block(waiter);
534 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
535 waiter->fl_lmops->lm_notify(waiter);
536 else
537 wake_up(&waiter->fl_wait);
541 /* Insert file lock fl into an inode's lock list at the position indicated
542 * by pos. At the same time add the lock to the global file lock list.
544 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
546 list_add(&fl->fl_link, &file_lock_list);
548 fl->fl_nspid = get_pid(task_tgid(current));
550 /* insert into file's list */
551 fl->fl_next = *pos;
552 *pos = fl;
556 * Delete a lock and then free it.
557 * Wake up processes that are blocked waiting for this lock,
558 * notify the FS that the lock has been cleared and
559 * finally free the lock.
561 static void locks_delete_lock(struct file_lock **thisfl_p)
563 struct file_lock *fl = *thisfl_p;
565 *thisfl_p = fl->fl_next;
566 fl->fl_next = NULL;
567 list_del_init(&fl->fl_link);
569 if (fl->fl_nspid) {
570 put_pid(fl->fl_nspid);
571 fl->fl_nspid = NULL;
574 locks_wake_up_blocks(fl);
575 locks_free_lock(fl);
578 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
579 * checks for shared/exclusive status of overlapping locks.
581 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
583 if (sys_fl->fl_type == F_WRLCK)
584 return 1;
585 if (caller_fl->fl_type == F_WRLCK)
586 return 1;
587 return 0;
590 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
591 * checking before calling the locks_conflict().
593 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
595 /* POSIX locks owned by the same process do not conflict with
596 * each other.
598 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
599 return (0);
601 /* Check whether they overlap */
602 if (!locks_overlap(caller_fl, sys_fl))
603 return 0;
605 return (locks_conflict(caller_fl, sys_fl));
608 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
609 * checking before calling the locks_conflict().
611 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
613 /* FLOCK locks referring to the same filp do not conflict with
614 * each other.
616 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
617 return (0);
618 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
619 return 0;
621 return (locks_conflict(caller_fl, sys_fl));
624 void
625 posix_test_lock(struct file *filp, struct file_lock *fl)
627 struct file_lock *cfl;
629 lock_flocks();
630 for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) {
631 if (!IS_POSIX(cfl))
632 continue;
633 if (posix_locks_conflict(fl, cfl))
634 break;
636 if (cfl) {
637 __locks_copy_lock(fl, cfl);
638 if (cfl->fl_nspid)
639 fl->fl_pid = pid_vnr(cfl->fl_nspid);
640 } else
641 fl->fl_type = F_UNLCK;
642 unlock_flocks();
643 return;
645 EXPORT_SYMBOL(posix_test_lock);
648 * Deadlock detection:
650 * We attempt to detect deadlocks that are due purely to posix file
651 * locks.
653 * We assume that a task can be waiting for at most one lock at a time.
654 * So for any acquired lock, the process holding that lock may be
655 * waiting on at most one other lock. That lock in turns may be held by
656 * someone waiting for at most one other lock. Given a requested lock
657 * caller_fl which is about to wait for a conflicting lock block_fl, we
658 * follow this chain of waiters to ensure we are not about to create a
659 * cycle.
661 * Since we do this before we ever put a process to sleep on a lock, we
662 * are ensured that there is never a cycle; that is what guarantees that
663 * the while() loop in posix_locks_deadlock() eventually completes.
665 * Note: the above assumption may not be true when handling lock
666 * requests from a broken NFS client. It may also fail in the presence
667 * of tasks (such as posix threads) sharing the same open file table.
669 * To handle those cases, we just bail out after a few iterations.
672 #define MAX_DEADLK_ITERATIONS 10
674 /* Find a lock that the owner of the given block_fl is blocking on. */
675 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
677 struct file_lock *fl;
679 list_for_each_entry(fl, &blocked_list, fl_link) {
680 if (posix_same_owner(fl, block_fl))
681 return fl->fl_next;
683 return NULL;
686 static int posix_locks_deadlock(struct file_lock *caller_fl,
687 struct file_lock *block_fl)
689 int i = 0;
691 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
692 if (i++ > MAX_DEADLK_ITERATIONS)
693 return 0;
694 if (posix_same_owner(caller_fl, block_fl))
695 return 1;
697 return 0;
700 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
701 * after any leases, but before any posix locks.
703 * Note that if called with an FL_EXISTS argument, the caller may determine
704 * whether or not a lock was successfully freed by testing the return
705 * value for -ENOENT.
707 static int flock_lock_file(struct file *filp, struct file_lock *request)
709 struct file_lock *new_fl = NULL;
710 struct file_lock **before;
711 struct inode * inode = file_inode(filp);
712 int error = 0;
713 int found = 0;
715 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
716 new_fl = locks_alloc_lock();
717 if (!new_fl)
718 return -ENOMEM;
721 lock_flocks();
722 if (request->fl_flags & FL_ACCESS)
723 goto find_conflict;
725 for_each_lock(inode, before) {
726 struct file_lock *fl = *before;
727 if (IS_POSIX(fl))
728 break;
729 if (IS_LEASE(fl))
730 continue;
731 if (filp != fl->fl_file)
732 continue;
733 if (request->fl_type == fl->fl_type)
734 goto out;
735 found = 1;
736 locks_delete_lock(before);
737 break;
740 if (request->fl_type == F_UNLCK) {
741 if ((request->fl_flags & FL_EXISTS) && !found)
742 error = -ENOENT;
743 goto out;
747 * If a higher-priority process was blocked on the old file lock,
748 * give it the opportunity to lock the file.
750 if (found) {
751 unlock_flocks();
752 cond_resched();
753 lock_flocks();
756 find_conflict:
757 for_each_lock(inode, before) {
758 struct file_lock *fl = *before;
759 if (IS_POSIX(fl))
760 break;
761 if (IS_LEASE(fl))
762 continue;
763 if (!flock_locks_conflict(request, fl))
764 continue;
765 error = -EAGAIN;
766 if (!(request->fl_flags & FL_SLEEP))
767 goto out;
768 error = FILE_LOCK_DEFERRED;
769 locks_insert_block(fl, request);
770 goto out;
772 if (request->fl_flags & FL_ACCESS)
773 goto out;
774 locks_copy_lock(new_fl, request);
775 locks_insert_lock(before, new_fl);
776 new_fl = NULL;
777 error = 0;
779 out:
780 unlock_flocks();
781 if (new_fl)
782 locks_free_lock(new_fl);
783 return error;
786 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
788 struct file_lock *fl;
789 struct file_lock *new_fl = NULL;
790 struct file_lock *new_fl2 = NULL;
791 struct file_lock *left = NULL;
792 struct file_lock *right = NULL;
793 struct file_lock **before;
794 int error, added = 0;
797 * We may need two file_lock structures for this operation,
798 * so we get them in advance to avoid races.
800 * In some cases we can be sure, that no new locks will be needed
802 if (!(request->fl_flags & FL_ACCESS) &&
803 (request->fl_type != F_UNLCK ||
804 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
805 new_fl = locks_alloc_lock();
806 new_fl2 = locks_alloc_lock();
809 lock_flocks();
810 if (request->fl_type != F_UNLCK) {
811 for_each_lock(inode, before) {
812 fl = *before;
813 if (!IS_POSIX(fl))
814 continue;
815 if (!posix_locks_conflict(request, fl))
816 continue;
817 if (conflock)
818 __locks_copy_lock(conflock, fl);
819 error = -EAGAIN;
820 if (!(request->fl_flags & FL_SLEEP))
821 goto out;
822 error = -EDEADLK;
823 if (posix_locks_deadlock(request, fl))
824 goto out;
825 error = FILE_LOCK_DEFERRED;
826 locks_insert_block(fl, request);
827 goto out;
831 /* If we're just looking for a conflict, we're done. */
832 error = 0;
833 if (request->fl_flags & FL_ACCESS)
834 goto out;
837 * Find the first old lock with the same owner as the new lock.
840 before = &inode->i_flock;
842 /* First skip locks owned by other processes. */
843 while ((fl = *before) && (!IS_POSIX(fl) ||
844 !posix_same_owner(request, fl))) {
845 before = &fl->fl_next;
848 /* Process locks with this owner. */
849 while ((fl = *before) && posix_same_owner(request, fl)) {
850 /* Detect adjacent or overlapping regions (if same lock type)
852 if (request->fl_type == fl->fl_type) {
853 /* In all comparisons of start vs end, use
854 * "start - 1" rather than "end + 1". If end
855 * is OFFSET_MAX, end + 1 will become negative.
857 if (fl->fl_end < request->fl_start - 1)
858 goto next_lock;
859 /* If the next lock in the list has entirely bigger
860 * addresses than the new one, insert the lock here.
862 if (fl->fl_start - 1 > request->fl_end)
863 break;
865 /* If we come here, the new and old lock are of the
866 * same type and adjacent or overlapping. Make one
867 * lock yielding from the lower start address of both
868 * locks to the higher end address.
870 if (fl->fl_start > request->fl_start)
871 fl->fl_start = request->fl_start;
872 else
873 request->fl_start = fl->fl_start;
874 if (fl->fl_end < request->fl_end)
875 fl->fl_end = request->fl_end;
876 else
877 request->fl_end = fl->fl_end;
878 if (added) {
879 locks_delete_lock(before);
880 continue;
882 request = fl;
883 added = 1;
885 else {
886 /* Processing for different lock types is a bit
887 * more complex.
889 if (fl->fl_end < request->fl_start)
890 goto next_lock;
891 if (fl->fl_start > request->fl_end)
892 break;
893 if (request->fl_type == F_UNLCK)
894 added = 1;
895 if (fl->fl_start < request->fl_start)
896 left = fl;
897 /* If the next lock in the list has a higher end
898 * address than the new one, insert the new one here.
900 if (fl->fl_end > request->fl_end) {
901 right = fl;
902 break;
904 if (fl->fl_start >= request->fl_start) {
905 /* The new lock completely replaces an old
906 * one (This may happen several times).
908 if (added) {
909 locks_delete_lock(before);
910 continue;
912 /* Replace the old lock with the new one.
913 * Wake up anybody waiting for the old one,
914 * as the change in lock type might satisfy
915 * their needs.
917 locks_wake_up_blocks(fl);
918 fl->fl_start = request->fl_start;
919 fl->fl_end = request->fl_end;
920 fl->fl_type = request->fl_type;
921 locks_release_private(fl);
922 locks_copy_private(fl, request);
923 request = fl;
924 added = 1;
927 /* Go on to next lock.
929 next_lock:
930 before = &fl->fl_next;
934 * The above code only modifies existing locks in case of
935 * merging or replacing. If new lock(s) need to be inserted
936 * all modifications are done bellow this, so it's safe yet to
937 * bail out.
939 error = -ENOLCK; /* "no luck" */
940 if (right && left == right && !new_fl2)
941 goto out;
943 error = 0;
944 if (!added) {
945 if (request->fl_type == F_UNLCK) {
946 if (request->fl_flags & FL_EXISTS)
947 error = -ENOENT;
948 goto out;
951 if (!new_fl) {
952 error = -ENOLCK;
953 goto out;
955 locks_copy_lock(new_fl, request);
956 locks_insert_lock(before, new_fl);
957 new_fl = NULL;
959 if (right) {
960 if (left == right) {
961 /* The new lock breaks the old one in two pieces,
962 * so we have to use the second new lock.
964 left = new_fl2;
965 new_fl2 = NULL;
966 locks_copy_lock(left, right);
967 locks_insert_lock(before, left);
969 right->fl_start = request->fl_end + 1;
970 locks_wake_up_blocks(right);
972 if (left) {
973 left->fl_end = request->fl_start - 1;
974 locks_wake_up_blocks(left);
976 out:
977 unlock_flocks();
979 * Free any unused locks.
981 if (new_fl)
982 locks_free_lock(new_fl);
983 if (new_fl2)
984 locks_free_lock(new_fl2);
985 return error;
989 * posix_lock_file - Apply a POSIX-style lock to a file
990 * @filp: The file to apply the lock to
991 * @fl: The lock to be applied
992 * @conflock: Place to return a copy of the conflicting lock, if found.
994 * Add a POSIX style lock to a file.
995 * We merge adjacent & overlapping locks whenever possible.
996 * POSIX locks are sorted by owner task, then by starting address
998 * Note that if called with an FL_EXISTS argument, the caller may determine
999 * whether or not a lock was successfully freed by testing the return
1000 * value for -ENOENT.
1002 int posix_lock_file(struct file *filp, struct file_lock *fl,
1003 struct file_lock *conflock)
1005 return __posix_lock_file(file_inode(filp), fl, conflock);
1007 EXPORT_SYMBOL(posix_lock_file);
1010 * posix_lock_file_wait - Apply a POSIX-style lock to a file
1011 * @filp: The file to apply the lock to
1012 * @fl: The lock to be applied
1014 * Add a POSIX style lock to a file.
1015 * We merge adjacent & overlapping locks whenever possible.
1016 * POSIX locks are sorted by owner task, then by starting address
1018 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1020 int error;
1021 might_sleep ();
1022 for (;;) {
1023 error = posix_lock_file(filp, fl, NULL);
1024 if (error != FILE_LOCK_DEFERRED)
1025 break;
1026 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1027 if (!error)
1028 continue;
1030 locks_delete_block(fl);
1031 break;
1033 return error;
1035 EXPORT_SYMBOL(posix_lock_file_wait);
1038 * locks_mandatory_locked - Check for an active lock
1039 * @inode: the file to check
1041 * Searches the inode's list of locks to find any POSIX locks which conflict.
1042 * This function is called from locks_verify_locked() only.
1044 int locks_mandatory_locked(struct inode *inode)
1046 fl_owner_t owner = current->files;
1047 struct file_lock *fl;
1050 * Search the lock list for this inode for any POSIX locks.
1052 lock_flocks();
1053 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1054 if (!IS_POSIX(fl))
1055 continue;
1056 if (fl->fl_owner != owner)
1057 break;
1059 unlock_flocks();
1060 return fl ? -EAGAIN : 0;
1064 * locks_mandatory_area - Check for a conflicting lock
1065 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1066 * for shared
1067 * @inode: the file to check
1068 * @filp: how the file was opened (if it was)
1069 * @offset: start of area to check
1070 * @count: length of area to check
1072 * Searches the inode's list of locks to find any POSIX locks which conflict.
1073 * This function is called from rw_verify_area() and
1074 * locks_verify_truncate().
1076 int locks_mandatory_area(int read_write, struct inode *inode,
1077 struct file *filp, loff_t offset,
1078 size_t count)
1080 struct file_lock fl;
1081 int error;
1083 locks_init_lock(&fl);
1084 fl.fl_owner = current->files;
1085 fl.fl_pid = current->tgid;
1086 fl.fl_file = filp;
1087 fl.fl_flags = FL_POSIX | FL_ACCESS;
1088 if (filp && !(filp->f_flags & O_NONBLOCK))
1089 fl.fl_flags |= FL_SLEEP;
1090 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1091 fl.fl_start = offset;
1092 fl.fl_end = offset + count - 1;
1094 for (;;) {
1095 error = __posix_lock_file(inode, &fl, NULL);
1096 if (error != FILE_LOCK_DEFERRED)
1097 break;
1098 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1099 if (!error) {
1101 * If we've been sleeping someone might have
1102 * changed the permissions behind our back.
1104 if (__mandatory_lock(inode))
1105 continue;
1108 locks_delete_block(&fl);
1109 break;
1112 return error;
1115 EXPORT_SYMBOL(locks_mandatory_area);
1117 static void lease_clear_pending(struct file_lock *fl, int arg)
1119 switch (arg) {
1120 case F_UNLCK:
1121 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1122 /* fall through: */
1123 case F_RDLCK:
1124 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1128 /* We already had a lease on this file; just change its type */
1129 int lease_modify(struct file_lock **before, int arg)
1131 struct file_lock *fl = *before;
1132 int error = assign_type(fl, arg);
1134 if (error)
1135 return error;
1136 lease_clear_pending(fl, arg);
1137 locks_wake_up_blocks(fl);
1138 if (arg == F_UNLCK) {
1139 struct file *filp = fl->fl_file;
1141 f_delown(filp);
1142 filp->f_owner.signum = 0;
1143 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1144 if (fl->fl_fasync != NULL) {
1145 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1146 fl->fl_fasync = NULL;
1148 locks_delete_lock(before);
1150 return 0;
1153 EXPORT_SYMBOL(lease_modify);
1155 static bool past_time(unsigned long then)
1157 if (!then)
1158 /* 0 is a special value meaning "this never expires": */
1159 return false;
1160 return time_after(jiffies, then);
1163 static void time_out_leases(struct inode *inode)
1165 struct file_lock **before;
1166 struct file_lock *fl;
1168 before = &inode->i_flock;
1169 while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1170 if (past_time(fl->fl_downgrade_time))
1171 lease_modify(before, F_RDLCK);
1172 if (past_time(fl->fl_break_time))
1173 lease_modify(before, F_UNLCK);
1174 if (fl == *before) /* lease_modify may have freed fl */
1175 before = &fl->fl_next;
1180 * __break_lease - revoke all outstanding leases on file
1181 * @inode: the inode of the file to return
1182 * @mode: the open mode (read or write)
1184 * break_lease (inlined for speed) has checked there already is at least
1185 * some kind of lock (maybe a lease) on this file. Leases are broken on
1186 * a call to open() or truncate(). This function can sleep unless you
1187 * specified %O_NONBLOCK to your open().
1189 int __break_lease(struct inode *inode, unsigned int mode)
1191 int error = 0;
1192 struct file_lock *new_fl, *flock;
1193 struct file_lock *fl;
1194 unsigned long break_time;
1195 int i_have_this_lease = 0;
1196 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1198 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1199 if (IS_ERR(new_fl))
1200 return PTR_ERR(new_fl);
1202 lock_flocks();
1204 time_out_leases(inode);
1206 flock = inode->i_flock;
1207 if ((flock == NULL) || !IS_LEASE(flock))
1208 goto out;
1210 if (!locks_conflict(flock, new_fl))
1211 goto out;
1213 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1214 if (fl->fl_owner == current->files)
1215 i_have_this_lease = 1;
1217 break_time = 0;
1218 if (lease_break_time > 0) {
1219 break_time = jiffies + lease_break_time * HZ;
1220 if (break_time == 0)
1221 break_time++; /* so that 0 means no break time */
1224 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1225 if (want_write) {
1226 if (fl->fl_flags & FL_UNLOCK_PENDING)
1227 continue;
1228 fl->fl_flags |= FL_UNLOCK_PENDING;
1229 fl->fl_break_time = break_time;
1230 } else {
1231 if (lease_breaking(flock))
1232 continue;
1233 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1234 fl->fl_downgrade_time = break_time;
1236 fl->fl_lmops->lm_break(fl);
1239 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1240 error = -EWOULDBLOCK;
1241 goto out;
1244 restart:
1245 break_time = flock->fl_break_time;
1246 if (break_time != 0) {
1247 break_time -= jiffies;
1248 if (break_time == 0)
1249 break_time++;
1251 locks_insert_block(flock, new_fl);
1252 unlock_flocks();
1253 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1254 !new_fl->fl_next, break_time);
1255 lock_flocks();
1256 __locks_delete_block(new_fl);
1257 if (error >= 0) {
1258 if (error == 0)
1259 time_out_leases(inode);
1261 * Wait for the next conflicting lease that has not been
1262 * broken yet
1264 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1265 flock = flock->fl_next) {
1266 if (locks_conflict(new_fl, flock))
1267 goto restart;
1269 error = 0;
1272 out:
1273 unlock_flocks();
1274 locks_free_lock(new_fl);
1275 return error;
1278 EXPORT_SYMBOL(__break_lease);
1281 * lease_get_mtime - get the last modified time of an inode
1282 * @inode: the inode
1283 * @time: pointer to a timespec which will contain the last modified time
1285 * This is to force NFS clients to flush their caches for files with
1286 * exclusive leases. The justification is that if someone has an
1287 * exclusive lease, then they could be modifying it.
1289 void lease_get_mtime(struct inode *inode, struct timespec *time)
1291 struct file_lock *flock = inode->i_flock;
1292 if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
1293 *time = current_fs_time(inode->i_sb);
1294 else
1295 *time = inode->i_mtime;
1298 EXPORT_SYMBOL(lease_get_mtime);
1301 * fcntl_getlease - Enquire what lease is currently active
1302 * @filp: the file
1304 * The value returned by this function will be one of
1305 * (if no lease break is pending):
1307 * %F_RDLCK to indicate a shared lease is held.
1309 * %F_WRLCK to indicate an exclusive lease is held.
1311 * %F_UNLCK to indicate no lease is held.
1313 * (if a lease break is pending):
1315 * %F_RDLCK to indicate an exclusive lease needs to be
1316 * changed to a shared lease (or removed).
1318 * %F_UNLCK to indicate the lease needs to be removed.
1320 * XXX: sfr & willy disagree over whether F_INPROGRESS
1321 * should be returned to userspace.
1323 int fcntl_getlease(struct file *filp)
1325 struct file_lock *fl;
1326 int type = F_UNLCK;
1328 lock_flocks();
1329 time_out_leases(file_inode(filp));
1330 for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
1331 fl = fl->fl_next) {
1332 if (fl->fl_file == filp) {
1333 type = target_leasetype(fl);
1334 break;
1337 unlock_flocks();
1338 return type;
1341 int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
1343 struct file_lock *fl, **before, **my_before = NULL, *lease;
1344 struct dentry *dentry = filp->f_path.dentry;
1345 struct inode *inode = dentry->d_inode;
1346 int error;
1348 lease = *flp;
1350 error = -EAGAIN;
1351 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1352 goto out;
1353 if ((arg == F_WRLCK)
1354 && ((dentry->d_count > 1)
1355 || (atomic_read(&inode->i_count) > 1)))
1356 goto out;
1359 * At this point, we know that if there is an exclusive
1360 * lease on this file, then we hold it on this filp
1361 * (otherwise our open of this file would have blocked).
1362 * And if we are trying to acquire an exclusive lease,
1363 * then the file is not open by anyone (including us)
1364 * except for this filp.
1366 error = -EAGAIN;
1367 for (before = &inode->i_flock;
1368 ((fl = *before) != NULL) && IS_LEASE(fl);
1369 before = &fl->fl_next) {
1370 if (fl->fl_file == filp) {
1371 my_before = before;
1372 continue;
1375 * No exclusive leases if someone else has a lease on
1376 * this file:
1378 if (arg == F_WRLCK)
1379 goto out;
1381 * Modifying our existing lease is OK, but no getting a
1382 * new lease if someone else is opening for write:
1384 if (fl->fl_flags & FL_UNLOCK_PENDING)
1385 goto out;
1388 if (my_before != NULL) {
1389 error = lease->fl_lmops->lm_change(my_before, arg);
1390 if (!error)
1391 *flp = *my_before;
1392 goto out;
1395 error = -EINVAL;
1396 if (!leases_enable)
1397 goto out;
1399 locks_insert_lock(before, lease);
1400 return 0;
1402 out:
1403 return error;
1406 int generic_delete_lease(struct file *filp, struct file_lock **flp)
1408 struct file_lock *fl, **before;
1409 struct dentry *dentry = filp->f_path.dentry;
1410 struct inode *inode = dentry->d_inode;
1412 for (before = &inode->i_flock;
1413 ((fl = *before) != NULL) && IS_LEASE(fl);
1414 before = &fl->fl_next) {
1415 if (fl->fl_file != filp)
1416 continue;
1417 return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
1419 return -EAGAIN;
1423 * generic_setlease - sets a lease on an open file
1424 * @filp: file pointer
1425 * @arg: type of lease to obtain
1426 * @flp: input - file_lock to use, output - file_lock inserted
1428 * The (input) flp->fl_lmops->lm_break function is required
1429 * by break_lease().
1431 * Called with file_lock_lock held.
1433 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1435 struct dentry *dentry = filp->f_path.dentry;
1436 struct inode *inode = dentry->d_inode;
1437 int error;
1439 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1440 return -EACCES;
1441 if (!S_ISREG(inode->i_mode))
1442 return -EINVAL;
1443 error = security_file_lock(filp, arg);
1444 if (error)
1445 return error;
1447 time_out_leases(inode);
1449 BUG_ON(!(*flp)->fl_lmops->lm_break);
1451 switch (arg) {
1452 case F_UNLCK:
1453 return generic_delete_lease(filp, flp);
1454 case F_RDLCK:
1455 case F_WRLCK:
1456 return generic_add_lease(filp, arg, flp);
1457 default:
1458 return -EINVAL;
1461 EXPORT_SYMBOL(generic_setlease);
1463 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1465 if (filp->f_op && filp->f_op->setlease)
1466 return filp->f_op->setlease(filp, arg, lease);
1467 else
1468 return generic_setlease(filp, arg, lease);
1472 * vfs_setlease - sets a lease on an open file
1473 * @filp: file pointer
1474 * @arg: type of lease to obtain
1475 * @lease: file_lock to use
1477 * Call this to establish a lease on the file.
1478 * The (*lease)->fl_lmops->lm_break operation must be set; if not,
1479 * break_lease will oops!
1481 * This will call the filesystem's setlease file method, if
1482 * defined. Note that there is no getlease method; instead, the
1483 * filesystem setlease method should call back to setlease() to
1484 * add a lease to the inode's lease list, where fcntl_getlease() can
1485 * find it. Since fcntl_getlease() only reports whether the current
1486 * task holds a lease, a cluster filesystem need only do this for
1487 * leases held by processes on this node.
1489 * There is also no break_lease method; filesystems that
1490 * handle their own leases should break leases themselves from the
1491 * filesystem's open, create, and (on truncate) setattr methods.
1493 * Warning: the only current setlease methods exist only to disable
1494 * leases in certain cases. More vfs changes may be required to
1495 * allow a full filesystem lease implementation.
1498 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1500 int error;
1502 lock_flocks();
1503 error = __vfs_setlease(filp, arg, lease);
1504 unlock_flocks();
1506 return error;
1508 EXPORT_SYMBOL_GPL(vfs_setlease);
1510 static int do_fcntl_delete_lease(struct file *filp)
1512 struct file_lock fl, *flp = &fl;
1514 lease_init(filp, F_UNLCK, flp);
1516 return vfs_setlease(filp, F_UNLCK, &flp);
1519 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1521 struct file_lock *fl, *ret;
1522 struct fasync_struct *new;
1523 int error;
1525 fl = lease_alloc(filp, arg);
1526 if (IS_ERR(fl))
1527 return PTR_ERR(fl);
1529 new = fasync_alloc();
1530 if (!new) {
1531 locks_free_lock(fl);
1532 return -ENOMEM;
1534 ret = fl;
1535 lock_flocks();
1536 error = __vfs_setlease(filp, arg, &ret);
1537 if (error) {
1538 unlock_flocks();
1539 locks_free_lock(fl);
1540 goto out_free_fasync;
1542 if (ret != fl)
1543 locks_free_lock(fl);
1546 * fasync_insert_entry() returns the old entry if any.
1547 * If there was no old entry, then it used 'new' and
1548 * inserted it into the fasync list. Clear new so that
1549 * we don't release it here.
1551 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1552 new = NULL;
1554 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1555 unlock_flocks();
1557 out_free_fasync:
1558 if (new)
1559 fasync_free(new);
1560 return error;
1564 * fcntl_setlease - sets a lease on an open file
1565 * @fd: open file descriptor
1566 * @filp: file pointer
1567 * @arg: type of lease to obtain
1569 * Call this fcntl to establish a lease on the file.
1570 * Note that you also need to call %F_SETSIG to
1571 * receive a signal when the lease is broken.
1573 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1575 if (arg == F_UNLCK)
1576 return do_fcntl_delete_lease(filp);
1577 return do_fcntl_add_lease(fd, filp, arg);
1581 * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1582 * @filp: The file to apply the lock to
1583 * @fl: The lock to be applied
1585 * Add a FLOCK style lock to a file.
1587 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1589 int error;
1590 might_sleep();
1591 for (;;) {
1592 error = flock_lock_file(filp, fl);
1593 if (error != FILE_LOCK_DEFERRED)
1594 break;
1595 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1596 if (!error)
1597 continue;
1599 locks_delete_block(fl);
1600 break;
1602 return error;
1605 EXPORT_SYMBOL(flock_lock_file_wait);
1608 * sys_flock: - flock() system call.
1609 * @fd: the file descriptor to lock.
1610 * @cmd: the type of lock to apply.
1612 * Apply a %FL_FLOCK style lock to an open file descriptor.
1613 * The @cmd can be one of
1615 * %LOCK_SH -- a shared lock.
1617 * %LOCK_EX -- an exclusive lock.
1619 * %LOCK_UN -- remove an existing lock.
1621 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
1623 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1624 * processes read and write access respectively.
1626 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1628 struct fd f = fdget(fd);
1629 struct file_lock *lock;
1630 int can_sleep, unlock;
1631 int error;
1633 error = -EBADF;
1634 if (!f.file)
1635 goto out;
1637 can_sleep = !(cmd & LOCK_NB);
1638 cmd &= ~LOCK_NB;
1639 unlock = (cmd == LOCK_UN);
1641 if (!unlock && !(cmd & LOCK_MAND) &&
1642 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
1643 goto out_putf;
1645 error = flock_make_lock(f.file, &lock, cmd);
1646 if (error)
1647 goto out_putf;
1648 if (can_sleep)
1649 lock->fl_flags |= FL_SLEEP;
1651 error = security_file_lock(f.file, lock->fl_type);
1652 if (error)
1653 goto out_free;
1655 if (f.file->f_op && f.file->f_op->flock)
1656 error = f.file->f_op->flock(f.file,
1657 (can_sleep) ? F_SETLKW : F_SETLK,
1658 lock);
1659 else
1660 error = flock_lock_file_wait(f.file, lock);
1662 out_free:
1663 locks_free_lock(lock);
1665 out_putf:
1666 fdput(f);
1667 out:
1668 return error;
1672 * vfs_test_lock - test file byte range lock
1673 * @filp: The file to test lock for
1674 * @fl: The lock to test; also used to hold result
1676 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
1677 * setting conf->fl_type to something other than F_UNLCK.
1679 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1681 if (filp->f_op && filp->f_op->lock)
1682 return filp->f_op->lock(filp, F_GETLK, fl);
1683 posix_test_lock(filp, fl);
1684 return 0;
1686 EXPORT_SYMBOL_GPL(vfs_test_lock);
1688 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1690 flock->l_pid = fl->fl_pid;
1691 #if BITS_PER_LONG == 32
1693 * Make sure we can represent the posix lock via
1694 * legacy 32bit flock.
1696 if (fl->fl_start > OFFT_OFFSET_MAX)
1697 return -EOVERFLOW;
1698 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1699 return -EOVERFLOW;
1700 #endif
1701 flock->l_start = fl->fl_start;
1702 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1703 fl->fl_end - fl->fl_start + 1;
1704 flock->l_whence = 0;
1705 flock->l_type = fl->fl_type;
1706 return 0;
1709 #if BITS_PER_LONG == 32
1710 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1712 flock->l_pid = fl->fl_pid;
1713 flock->l_start = fl->fl_start;
1714 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1715 fl->fl_end - fl->fl_start + 1;
1716 flock->l_whence = 0;
1717 flock->l_type = fl->fl_type;
1719 #endif
1721 /* Report the first existing lock that would conflict with l.
1722 * This implements the F_GETLK command of fcntl().
1724 int fcntl_getlk(struct file *filp, struct flock __user *l)
1726 struct file_lock file_lock;
1727 struct flock flock;
1728 int error;
1730 error = -EFAULT;
1731 if (copy_from_user(&flock, l, sizeof(flock)))
1732 goto out;
1733 error = -EINVAL;
1734 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1735 goto out;
1737 error = flock_to_posix_lock(filp, &file_lock, &flock);
1738 if (error)
1739 goto out;
1741 error = vfs_test_lock(filp, &file_lock);
1742 if (error)
1743 goto out;
1745 flock.l_type = file_lock.fl_type;
1746 if (file_lock.fl_type != F_UNLCK) {
1747 error = posix_lock_to_flock(&flock, &file_lock);
1748 if (error)
1749 goto out;
1751 error = -EFAULT;
1752 if (!copy_to_user(l, &flock, sizeof(flock)))
1753 error = 0;
1754 out:
1755 return error;
1759 * vfs_lock_file - file byte range lock
1760 * @filp: The file to apply the lock to
1761 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1762 * @fl: The lock to be applied
1763 * @conf: Place to return a copy of the conflicting lock, if found.
1765 * A caller that doesn't care about the conflicting lock may pass NULL
1766 * as the final argument.
1768 * If the filesystem defines a private ->lock() method, then @conf will
1769 * be left unchanged; so a caller that cares should initialize it to
1770 * some acceptable default.
1772 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1773 * locks, the ->lock() interface may return asynchronously, before the lock has
1774 * been granted or denied by the underlying filesystem, if (and only if)
1775 * lm_grant is set. Callers expecting ->lock() to return asynchronously
1776 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1777 * the request is for a blocking lock. When ->lock() does return asynchronously,
1778 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
1779 * request completes.
1780 * If the request is for non-blocking lock the file system should return
1781 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1782 * with the result. If the request timed out the callback routine will return a
1783 * nonzero return code and the file system should release the lock. The file
1784 * system is also responsible to keep a corresponding posix lock when it
1785 * grants a lock so the VFS can find out which locks are locally held and do
1786 * the correct lock cleanup when required.
1787 * The underlying filesystem must not drop the kernel lock or call
1788 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1789 * return code.
1791 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1793 if (filp->f_op && filp->f_op->lock)
1794 return filp->f_op->lock(filp, cmd, fl);
1795 else
1796 return posix_lock_file(filp, fl, conf);
1798 EXPORT_SYMBOL_GPL(vfs_lock_file);
1800 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1801 struct file_lock *fl)
1803 int error;
1805 error = security_file_lock(filp, fl->fl_type);
1806 if (error)
1807 return error;
1809 for (;;) {
1810 error = vfs_lock_file(filp, cmd, fl, NULL);
1811 if (error != FILE_LOCK_DEFERRED)
1812 break;
1813 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1814 if (!error)
1815 continue;
1817 locks_delete_block(fl);
1818 break;
1821 return error;
1824 /* Apply the lock described by l to an open file descriptor.
1825 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1827 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1828 struct flock __user *l)
1830 struct file_lock *file_lock = locks_alloc_lock();
1831 struct flock flock;
1832 struct inode *inode;
1833 struct file *f;
1834 int error;
1836 if (file_lock == NULL)
1837 return -ENOLCK;
1840 * This might block, so we do it before checking the inode.
1842 error = -EFAULT;
1843 if (copy_from_user(&flock, l, sizeof(flock)))
1844 goto out;
1846 inode = file_inode(filp);
1848 /* Don't allow mandatory locks on files that may be memory mapped
1849 * and shared.
1851 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1852 error = -EAGAIN;
1853 goto out;
1856 again:
1857 error = flock_to_posix_lock(filp, file_lock, &flock);
1858 if (error)
1859 goto out;
1860 if (cmd == F_SETLKW) {
1861 file_lock->fl_flags |= FL_SLEEP;
1864 error = -EBADF;
1865 switch (flock.l_type) {
1866 case F_RDLCK:
1867 if (!(filp->f_mode & FMODE_READ))
1868 goto out;
1869 break;
1870 case F_WRLCK:
1871 if (!(filp->f_mode & FMODE_WRITE))
1872 goto out;
1873 break;
1874 case F_UNLCK:
1875 break;
1876 default:
1877 error = -EINVAL;
1878 goto out;
1881 error = do_lock_file_wait(filp, cmd, file_lock);
1884 * Attempt to detect a close/fcntl race and recover by
1885 * releasing the lock that was just acquired.
1888 * we need that spin_lock here - it prevents reordering between
1889 * update of inode->i_flock and check for it done in close().
1890 * rcu_read_lock() wouldn't do.
1892 spin_lock(&current->files->file_lock);
1893 f = fcheck(fd);
1894 spin_unlock(&current->files->file_lock);
1895 if (!error && f != filp && flock.l_type != F_UNLCK) {
1896 flock.l_type = F_UNLCK;
1897 goto again;
1900 out:
1901 locks_free_lock(file_lock);
1902 return error;
1905 #if BITS_PER_LONG == 32
1906 /* Report the first existing lock that would conflict with l.
1907 * This implements the F_GETLK command of fcntl().
1909 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1911 struct file_lock file_lock;
1912 struct flock64 flock;
1913 int error;
1915 error = -EFAULT;
1916 if (copy_from_user(&flock, l, sizeof(flock)))
1917 goto out;
1918 error = -EINVAL;
1919 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1920 goto out;
1922 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1923 if (error)
1924 goto out;
1926 error = vfs_test_lock(filp, &file_lock);
1927 if (error)
1928 goto out;
1930 flock.l_type = file_lock.fl_type;
1931 if (file_lock.fl_type != F_UNLCK)
1932 posix_lock_to_flock64(&flock, &file_lock);
1934 error = -EFAULT;
1935 if (!copy_to_user(l, &flock, sizeof(flock)))
1936 error = 0;
1938 out:
1939 return error;
1942 /* Apply the lock described by l to an open file descriptor.
1943 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1945 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1946 struct flock64 __user *l)
1948 struct file_lock *file_lock = locks_alloc_lock();
1949 struct flock64 flock;
1950 struct inode *inode;
1951 struct file *f;
1952 int error;
1954 if (file_lock == NULL)
1955 return -ENOLCK;
1958 * This might block, so we do it before checking the inode.
1960 error = -EFAULT;
1961 if (copy_from_user(&flock, l, sizeof(flock)))
1962 goto out;
1964 inode = file_inode(filp);
1966 /* Don't allow mandatory locks on files that may be memory mapped
1967 * and shared.
1969 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1970 error = -EAGAIN;
1971 goto out;
1974 again:
1975 error = flock64_to_posix_lock(filp, file_lock, &flock);
1976 if (error)
1977 goto out;
1978 if (cmd == F_SETLKW64) {
1979 file_lock->fl_flags |= FL_SLEEP;
1982 error = -EBADF;
1983 switch (flock.l_type) {
1984 case F_RDLCK:
1985 if (!(filp->f_mode & FMODE_READ))
1986 goto out;
1987 break;
1988 case F_WRLCK:
1989 if (!(filp->f_mode & FMODE_WRITE))
1990 goto out;
1991 break;
1992 case F_UNLCK:
1993 break;
1994 default:
1995 error = -EINVAL;
1996 goto out;
1999 error = do_lock_file_wait(filp, cmd, file_lock);
2002 * Attempt to detect a close/fcntl race and recover by
2003 * releasing the lock that was just acquired.
2005 spin_lock(&current->files->file_lock);
2006 f = fcheck(fd);
2007 spin_unlock(&current->files->file_lock);
2008 if (!error && f != filp && flock.l_type != F_UNLCK) {
2009 flock.l_type = F_UNLCK;
2010 goto again;
2013 out:
2014 locks_free_lock(file_lock);
2015 return error;
2017 #endif /* BITS_PER_LONG == 32 */
2020 * This function is called when the file is being removed
2021 * from the task's fd array. POSIX locks belonging to this task
2022 * are deleted at this time.
2024 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2026 struct file_lock lock;
2029 * If there are no locks held on this file, we don't need to call
2030 * posix_lock_file(). Another process could be setting a lock on this
2031 * file at the same time, but we wouldn't remove that lock anyway.
2033 if (!file_inode(filp)->i_flock)
2034 return;
2036 lock.fl_type = F_UNLCK;
2037 lock.fl_flags = FL_POSIX | FL_CLOSE;
2038 lock.fl_start = 0;
2039 lock.fl_end = OFFSET_MAX;
2040 lock.fl_owner = owner;
2041 lock.fl_pid = current->tgid;
2042 lock.fl_file = filp;
2043 lock.fl_ops = NULL;
2044 lock.fl_lmops = NULL;
2046 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2048 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2049 lock.fl_ops->fl_release_private(&lock);
2052 EXPORT_SYMBOL(locks_remove_posix);
2055 * This function is called on the last close of an open file.
2057 void locks_remove_flock(struct file *filp)
2059 struct inode * inode = file_inode(filp);
2060 struct file_lock *fl;
2061 struct file_lock **before;
2063 if (!inode->i_flock)
2064 return;
2066 if (filp->f_op && filp->f_op->flock) {
2067 struct file_lock fl = {
2068 .fl_pid = current->tgid,
2069 .fl_file = filp,
2070 .fl_flags = FL_FLOCK,
2071 .fl_type = F_UNLCK,
2072 .fl_end = OFFSET_MAX,
2074 filp->f_op->flock(filp, F_SETLKW, &fl);
2075 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2076 fl.fl_ops->fl_release_private(&fl);
2079 lock_flocks();
2080 before = &inode->i_flock;
2082 while ((fl = *before) != NULL) {
2083 if (fl->fl_file == filp) {
2084 if (IS_FLOCK(fl)) {
2085 locks_delete_lock(before);
2086 continue;
2088 if (IS_LEASE(fl)) {
2089 lease_modify(before, F_UNLCK);
2090 continue;
2092 /* What? */
2093 BUG();
2095 before = &fl->fl_next;
2097 unlock_flocks();
2101 * posix_unblock_lock - stop waiting for a file lock
2102 * @filp: how the file was opened
2103 * @waiter: the lock which was waiting
2105 * lockd needs to block waiting for locks.
2108 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2110 int status = 0;
2112 lock_flocks();
2113 if (waiter->fl_next)
2114 __locks_delete_block(waiter);
2115 else
2116 status = -ENOENT;
2117 unlock_flocks();
2118 return status;
2121 EXPORT_SYMBOL(posix_unblock_lock);
2124 * vfs_cancel_lock - file byte range unblock lock
2125 * @filp: The file to apply the unblock to
2126 * @fl: The lock to be unblocked
2128 * Used by lock managers to cancel blocked requests
2130 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2132 if (filp->f_op && filp->f_op->lock)
2133 return filp->f_op->lock(filp, F_CANCELLK, fl);
2134 return 0;
2137 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2139 #ifdef CONFIG_PROC_FS
2140 #include <linux/proc_fs.h>
2141 #include <linux/seq_file.h>
2143 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2144 loff_t id, char *pfx)
2146 struct inode *inode = NULL;
2147 unsigned int fl_pid;
2149 if (fl->fl_nspid)
2150 fl_pid = pid_vnr(fl->fl_nspid);
2151 else
2152 fl_pid = fl->fl_pid;
2154 if (fl->fl_file != NULL)
2155 inode = file_inode(fl->fl_file);
2157 seq_printf(f, "%lld:%s ", id, pfx);
2158 if (IS_POSIX(fl)) {
2159 seq_printf(f, "%6s %s ",
2160 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2161 (inode == NULL) ? "*NOINODE*" :
2162 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2163 } else if (IS_FLOCK(fl)) {
2164 if (fl->fl_type & LOCK_MAND) {
2165 seq_printf(f, "FLOCK MSNFS ");
2166 } else {
2167 seq_printf(f, "FLOCK ADVISORY ");
2169 } else if (IS_LEASE(fl)) {
2170 seq_printf(f, "LEASE ");
2171 if (lease_breaking(fl))
2172 seq_printf(f, "BREAKING ");
2173 else if (fl->fl_file)
2174 seq_printf(f, "ACTIVE ");
2175 else
2176 seq_printf(f, "BREAKER ");
2177 } else {
2178 seq_printf(f, "UNKNOWN UNKNOWN ");
2180 if (fl->fl_type & LOCK_MAND) {
2181 seq_printf(f, "%s ",
2182 (fl->fl_type & LOCK_READ)
2183 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2184 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2185 } else {
2186 seq_printf(f, "%s ",
2187 (lease_breaking(fl))
2188 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2189 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2191 if (inode) {
2192 #ifdef WE_CAN_BREAK_LSLK_NOW
2193 seq_printf(f, "%d %s:%ld ", fl_pid,
2194 inode->i_sb->s_id, inode->i_ino);
2195 #else
2196 /* userspace relies on this representation of dev_t ;-( */
2197 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2198 MAJOR(inode->i_sb->s_dev),
2199 MINOR(inode->i_sb->s_dev), inode->i_ino);
2200 #endif
2201 } else {
2202 seq_printf(f, "%d <none>:0 ", fl_pid);
2204 if (IS_POSIX(fl)) {
2205 if (fl->fl_end == OFFSET_MAX)
2206 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2207 else
2208 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2209 } else {
2210 seq_printf(f, "0 EOF\n");
2214 static int locks_show(struct seq_file *f, void *v)
2216 struct file_lock *fl, *bfl;
2218 fl = list_entry(v, struct file_lock, fl_link);
2220 lock_get_status(f, fl, *((loff_t *)f->private), "");
2222 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2223 lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2225 return 0;
2228 static void *locks_start(struct seq_file *f, loff_t *pos)
2230 loff_t *p = f->private;
2232 lock_flocks();
2233 *p = (*pos + 1);
2234 return seq_list_start(&file_lock_list, *pos);
2237 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2239 loff_t *p = f->private;
2240 ++*p;
2241 return seq_list_next(v, &file_lock_list, pos);
2244 static void locks_stop(struct seq_file *f, void *v)
2246 unlock_flocks();
2249 static const struct seq_operations locks_seq_operations = {
2250 .start = locks_start,
2251 .next = locks_next,
2252 .stop = locks_stop,
2253 .show = locks_show,
2256 static int locks_open(struct inode *inode, struct file *filp)
2258 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2261 static const struct file_operations proc_locks_operations = {
2262 .open = locks_open,
2263 .read = seq_read,
2264 .llseek = seq_lseek,
2265 .release = seq_release_private,
2268 static int __init proc_locks_init(void)
2270 proc_create("locks", 0, NULL, &proc_locks_operations);
2271 return 0;
2273 module_init(proc_locks_init);
2274 #endif
2277 * lock_may_read - checks that the region is free of locks
2278 * @inode: the inode that is being read
2279 * @start: the first byte to read
2280 * @len: the number of bytes to read
2282 * Emulates Windows locking requirements. Whole-file
2283 * mandatory locks (share modes) can prohibit a read and
2284 * byte-range POSIX locks can prohibit a read if they overlap.
2286 * N.B. this function is only ever called
2287 * from knfsd and ownership of locks is never checked.
2289 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2291 struct file_lock *fl;
2292 int result = 1;
2293 lock_flocks();
2294 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2295 if (IS_POSIX(fl)) {
2296 if (fl->fl_type == F_RDLCK)
2297 continue;
2298 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2299 continue;
2300 } else if (IS_FLOCK(fl)) {
2301 if (!(fl->fl_type & LOCK_MAND))
2302 continue;
2303 if (fl->fl_type & LOCK_READ)
2304 continue;
2305 } else
2306 continue;
2307 result = 0;
2308 break;
2310 unlock_flocks();
2311 return result;
2314 EXPORT_SYMBOL(lock_may_read);
2317 * lock_may_write - checks that the region is free of locks
2318 * @inode: the inode that is being written
2319 * @start: the first byte to write
2320 * @len: the number of bytes to write
2322 * Emulates Windows locking requirements. Whole-file
2323 * mandatory locks (share modes) can prohibit a write and
2324 * byte-range POSIX locks can prohibit a write if they overlap.
2326 * N.B. this function is only ever called
2327 * from knfsd and ownership of locks is never checked.
2329 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2331 struct file_lock *fl;
2332 int result = 1;
2333 lock_flocks();
2334 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2335 if (IS_POSIX(fl)) {
2336 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2337 continue;
2338 } else if (IS_FLOCK(fl)) {
2339 if (!(fl->fl_type & LOCK_MAND))
2340 continue;
2341 if (fl->fl_type & LOCK_WRITE)
2342 continue;
2343 } else
2344 continue;
2345 result = 0;
2346 break;
2348 unlock_flocks();
2349 return result;
2352 EXPORT_SYMBOL(lock_may_write);
2354 static int __init filelock_init(void)
2356 filelock_cache = kmem_cache_create("file_lock_cache",
2357 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2359 return 0;
2362 core_initcall(filelock_init);