2 * Server-side file descriptor management
4 * Copyright (C) 2000, 2003 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
36 #ifdef HAVE_SYS_POLL_H
44 #include <sys/types.h>
57 #if defined(HAVE_SYS_EPOLL_H) && defined(HAVE_EPOLL_CREATE)
58 # include <sys/epoll.h>
60 #elif defined(linux) && defined(__i386__) && defined(HAVE_STDINT_H)
62 # define EPOLLIN POLLIN
63 # define EPOLLOUT POLLOUT
64 # define EPOLLERR POLLERR
65 # define EPOLLHUP POLLHUP
66 # define EPOLL_CTL_ADD 1
67 # define EPOLL_CTL_DEL 2
68 # define EPOLL_CTL_MOD 3
70 typedef union epoll_data
84 #define SYSCALL_RET(ret) do { \
85 if (ret < 0) { errno = -ret; ret = -1; } \
89 static inline int epoll_create( int size
)
92 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
93 : "=a" (ret
) : "0" (254 /*NR_epoll_create*/), "r" (size
) );
97 static inline int epoll_ctl( int epfd
, int op
, int fd
, const struct epoll_event
*event
)
100 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
102 : "0" (255 /*NR_epoll_ctl*/), "r" (epfd
), "c" (op
), "d" (fd
), "S" (event
), "m" (*event
) );
106 static inline int epoll_wait( int epfd
, struct epoll_event
*events
, int maxevents
, int timeout
)
109 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
111 : "0" (256 /*NR_epoll_wait*/), "r" (epfd
), "c" (events
), "d" (maxevents
), "S" (timeout
)
117 #endif /* linux && __i386__ && HAVE_STDINT_H */
120 /* Because of the stupid Posix locking semantics, we need to keep
121 * track of all file descriptors referencing a given file, and not
122 * close a single one until all the locks are gone (sigh).
125 /* file descriptor object */
127 /* closed_fd is used to keep track of the unix fd belonging to a closed fd object */
130 struct list entry
; /* entry in inode closed list */
131 int fd
; /* the unix file descriptor */
132 char unlink
[1]; /* name to unlink on close (if any) */
137 struct object obj
; /* object header */
138 const struct fd_ops
*fd_ops
; /* file descriptor operations */
139 struct inode
*inode
; /* inode that this fd belongs to */
140 struct list inode_entry
; /* entry in inode fd list */
141 struct closed_fd
*closed
; /* structure to store the unix fd at destroy time */
142 struct object
*user
; /* object using this file descriptor */
143 struct list locks
; /* list of locks on this fd */
144 unsigned int access
; /* file access (GENERIC_READ/WRITE) */
145 unsigned int sharing
; /* file sharing mode */
146 int unix_fd
; /* unix file descriptor */
147 int fs_locks
; /* can we use filesystem locks for this fd? */
148 int poll_index
; /* index of fd in poll array */
151 static void fd_dump( struct object
*obj
, int verbose
);
152 static void fd_destroy( struct object
*obj
);
154 static const struct object_ops fd_ops
=
156 sizeof(struct fd
), /* size */
158 no_add_queue
, /* add_queue */
159 NULL
, /* remove_queue */
161 NULL
, /* satisfied */
162 no_signal
, /* signal */
163 no_get_fd
, /* get_fd */
164 fd_destroy
/* destroy */
171 struct object obj
; /* object header */
172 struct list entry
; /* inode hash list entry */
173 unsigned int hash
; /* hashing code */
174 dev_t dev
; /* device number */
175 ino_t ino
; /* inode number */
176 struct list open
; /* list of open file descriptors */
177 struct list locks
; /* list of file locks */
178 struct list closed
; /* list of file descriptors to close at destroy time */
181 static void inode_dump( struct object
*obj
, int verbose
);
182 static void inode_destroy( struct object
*obj
);
184 static const struct object_ops inode_ops
=
186 sizeof(struct inode
), /* size */
187 inode_dump
, /* dump */
188 no_add_queue
, /* add_queue */
189 NULL
, /* remove_queue */
191 NULL
, /* satisfied */
192 no_signal
, /* signal */
193 no_get_fd
, /* get_fd */
194 inode_destroy
/* destroy */
197 /* file lock object */
201 struct object obj
; /* object header */
202 struct fd
*fd
; /* fd owning this lock */
203 struct list fd_entry
; /* entry in list of locks on a given fd */
204 struct list inode_entry
; /* entry in inode list of locks */
205 int shared
; /* shared lock? */
206 file_pos_t start
; /* locked region is interval [start;end) */
208 struct process
*process
; /* process owning this lock */
209 struct list proc_entry
; /* entry in list of locks owned by the process */
212 static void file_lock_dump( struct object
*obj
, int verbose
);
213 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
);
215 static const struct object_ops file_lock_ops
=
217 sizeof(struct file_lock
), /* size */
218 file_lock_dump
, /* dump */
219 add_queue
, /* add_queue */
220 remove_queue
, /* remove_queue */
221 file_lock_signaled
, /* signaled */
222 no_satisfied
, /* satisfied */
223 no_signal
, /* signal */
224 no_get_fd
, /* get_fd */
225 no_destroy
/* destroy */
229 #define OFF_T_MAX (~((file_pos_t)1 << (8*sizeof(off_t)-1)))
230 #define FILE_POS_T_MAX (~(file_pos_t)0)
232 static file_pos_t max_unix_offset
= OFF_T_MAX
;
234 #define DUMP_LONG_LONG(val) do { \
235 if (sizeof(val) > sizeof(unsigned long) && (val) > ~0UL) \
236 fprintf( stderr, "%lx%08lx", (unsigned long)((val) >> 32), (unsigned long)(val) ); \
238 fprintf( stderr, "%lx", (unsigned long)(val) ); \
243 /****************************************************************/
244 /* timeouts support */
248 struct list entry
; /* entry in sorted timeout list */
249 struct timeval when
; /* timeout expiry (absolute time) */
250 timeout_callback callback
; /* callback function */
251 void *private; /* callback private data */
254 static struct list timeout_list
= LIST_INIT(timeout_list
); /* sorted timeouts list */
256 /* add a timeout user */
257 struct timeout_user
*add_timeout_user( const struct timeval
*when
, timeout_callback func
,
260 struct timeout_user
*user
;
263 if (!(user
= mem_alloc( sizeof(*user
) ))) return NULL
;
265 user
->callback
= func
;
266 user
->private = private;
268 /* Now insert it in the linked list */
270 LIST_FOR_EACH( ptr
, &timeout_list
)
272 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
273 if (!time_before( &timeout
->when
, when
)) break;
275 list_add_before( ptr
, &user
->entry
);
279 /* remove a timeout user */
280 void remove_timeout_user( struct timeout_user
*user
)
282 list_remove( &user
->entry
);
286 /* add a timeout in milliseconds to an absolute time */
287 void add_timeout( struct timeval
*when
, int timeout
)
291 long sec
= timeout
/ 1000;
292 if ((when
->tv_usec
+= (timeout
- 1000*sec
) * 1000) >= 1000000)
294 when
->tv_usec
-= 1000000;
302 /****************************************************************/
305 static struct fd
**poll_users
; /* users array */
306 static struct pollfd
*pollfd
; /* poll fd array */
307 static int nb_users
; /* count of array entries actually in use */
308 static int active_users
; /* current number of active users */
309 static int allocated_users
; /* count of allocated entries in the array */
310 static struct fd
**freelist
; /* list of free entries in the array */
315 static struct epoll_event
*epoll_events
;
317 /* set the events that epoll waits for on this fd; helper for set_fd_events */
318 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
320 struct epoll_event ev
;
323 if (epoll_fd
== -1) return;
325 if (events
== -1) /* stop waiting on this fd completely */
327 if (pollfd
[user
].fd
== -1) return; /* already removed */
330 else if (pollfd
[user
].fd
== -1)
332 if (pollfd
[user
].events
) return; /* stopped waiting on it, don't restart */
337 if (pollfd
[user
].events
== events
) return; /* nothing to do */
344 if (epoll_ctl( epoll_fd
, ctl
, fd
->unix_fd
, &ev
) == -1)
346 if (errno
== ENOMEM
) /* not enough memory, give up on epoll */
351 else perror( "epoll_ctl" ); /* should not happen */
355 #else /* USE_EPOLL */
357 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
361 #endif /* USE_EPOLL */
364 /* add a user in the poll array and return its index, or -1 on failure */
365 static int add_poll_user( struct fd
*fd
)
370 ret
= freelist
- poll_users
;
371 freelist
= (struct fd
**)poll_users
[ret
];
375 if (nb_users
== allocated_users
)
377 struct fd
**newusers
;
378 struct pollfd
*newpoll
;
379 int new_count
= allocated_users
? (allocated_users
+ allocated_users
/ 2) : 16;
380 if (!(newusers
= realloc( poll_users
, new_count
* sizeof(*poll_users
) ))) return -1;
381 if (!(newpoll
= realloc( pollfd
, new_count
* sizeof(*pollfd
) )))
384 poll_users
= newusers
;
389 poll_users
= newusers
;
392 if (!allocated_users
) epoll_fd
= epoll_create( new_count
);
395 struct epoll_event
*new_events
;
396 if (!(new_events
= realloc( epoll_events
, new_count
* sizeof(*epoll_events
) )))
398 epoll_events
= new_events
;
401 allocated_users
= new_count
;
406 pollfd
[ret
].events
= 0;
407 pollfd
[ret
].revents
= 0;
408 poll_users
[ret
] = fd
;
413 /* remove a user from the poll list */
414 static void remove_poll_user( struct fd
*fd
, int user
)
417 assert( poll_users
[user
] == fd
);
420 if (epoll_fd
!= -1 && pollfd
[user
].fd
!= -1)
422 struct epoll_event dummy
;
423 epoll_ctl( epoll_fd
, EPOLL_CTL_DEL
, fd
->unix_fd
, &dummy
);
426 pollfd
[user
].fd
= -1;
427 pollfd
[user
].events
= 0;
428 pollfd
[user
].revents
= 0;
429 poll_users
[user
] = (struct fd
*)freelist
;
430 freelist
= &poll_users
[user
];
434 /* process pending timeouts and return the time until the next timeout, in milliseconds */
435 static int get_next_timeout(void)
437 if (!list_empty( &timeout_list
))
439 struct list expired_list
, *ptr
;
442 gettimeofday( &now
, NULL
);
444 /* first remove all expired timers from the list */
446 list_init( &expired_list
);
447 while ((ptr
= list_head( &timeout_list
)) != NULL
)
449 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
451 if (!time_before( &now
, &timeout
->when
))
453 list_remove( &timeout
->entry
);
454 list_add_tail( &expired_list
, &timeout
->entry
);
459 /* now call the callback for all the removed timers */
461 while ((ptr
= list_head( &expired_list
)) != NULL
)
463 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
464 list_remove( &timeout
->entry
);
465 timeout
->callback( timeout
->private );
469 if ((ptr
= list_head( &timeout_list
)) != NULL
)
471 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
472 int diff
= (timeout
->when
.tv_sec
- now
.tv_sec
) * 1000
473 + (timeout
->when
.tv_usec
- now
.tv_usec
) / 1000;
474 if (diff
< 0) diff
= 0;
478 return -1; /* no pending timeouts */
481 /* server main poll() loop */
487 assert( POLLIN
== EPOLLIN
);
488 assert( POLLOUT
== EPOLLOUT
);
489 assert( POLLERR
== EPOLLERR
);
490 assert( POLLHUP
== EPOLLHUP
);
496 timeout
= get_next_timeout();
498 if (!active_users
) break; /* last user removed by a timeout */
499 if (epoll_fd
== -1) break; /* an error occurred with epoll */
501 ret
= epoll_wait( epoll_fd
, epoll_events
, allocated_users
, timeout
);
503 /* put the events into the pollfd array first, like poll does */
504 for (i
= 0; i
< ret
; i
++)
506 int user
= epoll_events
[i
].data
.u32
;
507 pollfd
[user
].revents
= epoll_events
[i
].events
;
510 /* read events from the pollfd array, as set_fd_events may modify them */
511 for (i
= 0; i
< ret
; i
++)
513 int user
= epoll_events
[i
].data
.u32
;
514 if (pollfd
[user
].revents
) fd_poll_event( poll_users
[user
], pollfd
[user
].revents
);
518 /* fall through to normal poll loop */
519 #endif /* USE_EPOLL */
523 timeout
= get_next_timeout();
525 if (!active_users
) break; /* last user removed by a timeout */
527 ret
= poll( pollfd
, nb_users
, timeout
);
530 for (i
= 0; i
< nb_users
; i
++)
532 if (pollfd
[i
].revents
)
534 fd_poll_event( poll_users
[i
], pollfd
[i
].revents
);
543 /****************************************************************/
544 /* inode functions */
548 static struct list inode_hash
[HASH_SIZE
];
550 /* close all pending file descriptors in the closed list */
551 static void inode_close_pending( struct inode
*inode
)
553 struct list
*ptr
= list_head( &inode
->closed
);
557 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
558 struct list
*next
= list_next( &inode
->closed
, ptr
);
565 if (!fd
->unlink
) /* get rid of it unless there's an unlink pending on that file */
575 static void inode_dump( struct object
*obj
, int verbose
)
577 struct inode
*inode
= (struct inode
*)obj
;
578 fprintf( stderr
, "Inode dev=" );
579 DUMP_LONG_LONG( inode
->dev
);
580 fprintf( stderr
, " ino=" );
581 DUMP_LONG_LONG( inode
->ino
);
582 fprintf( stderr
, "\n" );
585 static void inode_destroy( struct object
*obj
)
587 struct inode
*inode
= (struct inode
*)obj
;
590 assert( list_empty(&inode
->open
) );
591 assert( list_empty(&inode
->locks
) );
593 list_remove( &inode
->entry
);
595 while ((ptr
= list_head( &inode
->closed
)))
597 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
599 if (fd
->fd
!= -1) close( fd
->fd
);
602 /* make sure it is still the same file */
604 if (!stat( fd
->unlink
, &st
) && st
.st_dev
== inode
->dev
&& st
.st_ino
== inode
->ino
)
606 if (S_ISDIR(st
.st_mode
)) rmdir( fd
->unlink
);
607 else unlink( fd
->unlink
);
614 /* retrieve the inode object for a given fd, creating it if needed */
615 static struct inode
*get_inode( dev_t dev
, ino_t ino
)
619 unsigned int hash
= (dev
^ ino
) % HASH_SIZE
;
621 if (inode_hash
[hash
].next
)
623 LIST_FOR_EACH( ptr
, &inode_hash
[hash
] )
625 inode
= LIST_ENTRY( ptr
, struct inode
, entry
);
626 if (inode
->dev
== dev
&& inode
->ino
== ino
)
627 return (struct inode
*)grab_object( inode
);
630 else list_init( &inode_hash
[hash
] );
632 /* not found, create it */
633 if ((inode
= alloc_object( &inode_ops
)))
638 list_init( &inode
->open
);
639 list_init( &inode
->locks
);
640 list_init( &inode
->closed
);
641 list_add_head( &inode_hash
[hash
], &inode
->entry
);
646 /* add fd to the indoe list of file descriptors to close */
647 static void inode_add_closed_fd( struct inode
*inode
, struct closed_fd
*fd
)
649 if (!list_empty( &inode
->locks
))
651 list_add_head( &inode
->closed
, &fd
->entry
);
653 else if (fd
->unlink
[0]) /* close the fd but keep the structure around for unlink */
657 list_add_head( &inode
->closed
, &fd
->entry
);
659 else /* no locks on this inode and no unlink, get rid of the fd */
667 /****************************************************************/
668 /* file lock functions */
670 static void file_lock_dump( struct object
*obj
, int verbose
)
672 struct file_lock
*lock
= (struct file_lock
*)obj
;
673 fprintf( stderr
, "Lock %s fd=%p proc=%p start=",
674 lock
->shared
? "shared" : "excl", lock
->fd
, lock
->process
);
675 DUMP_LONG_LONG( lock
->start
);
676 fprintf( stderr
, " end=" );
677 DUMP_LONG_LONG( lock
->end
);
678 fprintf( stderr
, "\n" );
681 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
)
683 struct file_lock
*lock
= (struct file_lock
*)obj
;
684 /* lock is signaled if it has lost its owner */
685 return !lock
->process
;
688 /* set (or remove) a Unix lock if possible for the given range */
689 static int set_unix_lock( struct fd
*fd
, file_pos_t start
, file_pos_t end
, int type
)
693 if (!fd
->fs_locks
) return 1; /* no fs locks possible for this fd */
696 if (start
== end
) return 1; /* can't set zero-byte lock */
697 if (start
> max_unix_offset
) return 1; /* ignore it */
699 fl
.l_whence
= SEEK_SET
;
701 if (!end
|| end
> max_unix_offset
) fl
.l_len
= 0;
702 else fl
.l_len
= end
- start
;
703 if (fcntl( fd
->unix_fd
, F_SETLK
, &fl
) != -1) return 1;
708 /* check whether locks work at all on this file system */
709 if (fcntl( fd
->unix_fd
, F_GETLK
, &fl
) != -1)
711 set_error( STATUS_FILE_LOCK_CONFLICT
);
717 /* no locking on this fs, just ignore it */
721 set_error( STATUS_FILE_LOCK_CONFLICT
);
724 /* this can happen if we try to set a write lock on a read-only file */
725 /* we just ignore that error */
726 if (fl
.l_type
== F_WRLCK
) return 1;
727 set_error( STATUS_ACCESS_DENIED
);
733 /* this can happen if off_t is 64-bit but the kernel only supports 32-bit */
734 /* in that case we shrink the limit and retry */
735 if (max_unix_offset
> INT_MAX
)
737 max_unix_offset
= INT_MAX
;
748 /* check if interval [start;end) overlaps the lock */
749 inline static int lock_overlaps( struct file_lock
*lock
, file_pos_t start
, file_pos_t end
)
751 if (lock
->end
&& start
>= lock
->end
) return 0;
752 if (end
&& lock
->start
>= end
) return 0;
756 /* remove Unix locks for all bytes in the specified area that are no longer locked */
757 static void remove_unix_locks( struct fd
*fd
, file_pos_t start
, file_pos_t end
)
765 } *first
, *cur
, *next
, *buffer
;
770 if (!fd
->inode
) return;
771 if (!fd
->fs_locks
) return;
772 if (start
== end
|| start
> max_unix_offset
) return;
773 if (!end
|| end
> max_unix_offset
) end
= max_unix_offset
+ 1;
775 /* count the number of locks overlapping the specified area */
777 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
779 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
780 if (lock
->start
== lock
->end
) continue;
781 if (lock_overlaps( lock
, start
, end
)) count
++;
784 if (!count
) /* no locks at all, we can unlock everything */
786 set_unix_lock( fd
, start
, end
, F_UNLCK
);
790 /* allocate space for the list of holes */
791 /* max. number of holes is number of locks + 1 */
793 if (!(buffer
= malloc( sizeof(*buffer
) * (count
+1) ))) return;
797 first
->start
= start
;
801 /* build a sorted list of unlocked holes in the specified area */
803 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
805 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
806 if (lock
->start
== lock
->end
) continue;
807 if (!lock_overlaps( lock
, start
, end
)) continue;
809 /* go through all the holes touched by this lock */
810 for (cur
= first
; cur
; cur
= cur
->next
)
812 if (cur
->end
<= lock
->start
) continue; /* hole is before start of lock */
813 if (lock
->end
&& cur
->start
>= lock
->end
) break; /* hole is after end of lock */
815 /* now we know that lock is overlapping hole */
817 if (cur
->start
>= lock
->start
) /* lock starts before hole, shrink from start */
819 cur
->start
= lock
->end
;
820 if (cur
->start
&& cur
->start
< cur
->end
) break; /* done with this lock */
821 /* now hole is empty, remove it */
822 if (cur
->next
) cur
->next
->prev
= cur
->prev
;
823 if (cur
->prev
) cur
->prev
->next
= cur
->next
;
824 else if (!(first
= cur
->next
)) goto done
; /* no more holes at all */
826 else if (!lock
->end
|| cur
->end
<= lock
->end
) /* lock larger than hole, shrink from end */
828 cur
->end
= lock
->start
;
829 assert( cur
->start
< cur
->end
);
831 else /* lock is in the middle of hole, split hole in two */
834 next
->next
= cur
->next
;
836 next
->start
= lock
->end
;
837 next
->end
= cur
->end
;
838 cur
->end
= lock
->start
;
839 assert( next
->start
< next
->end
);
840 assert( cur
->end
< next
->start
);
842 break; /* done with this lock */
847 /* clear Unix locks for all the holes */
849 for (cur
= first
; cur
; cur
= cur
->next
)
850 set_unix_lock( fd
, cur
->start
, cur
->end
, F_UNLCK
);
856 /* create a new lock on a fd */
857 static struct file_lock
*add_lock( struct fd
*fd
, int shared
, file_pos_t start
, file_pos_t end
)
859 struct file_lock
*lock
;
861 if (!fd
->inode
) /* not a regular file */
863 set_error( STATUS_INVALID_HANDLE
);
867 if (!(lock
= alloc_object( &file_lock_ops
))) return NULL
;
868 lock
->shared
= shared
;
872 lock
->process
= current
->process
;
874 /* now try to set a Unix lock */
875 if (!set_unix_lock( lock
->fd
, lock
->start
, lock
->end
, lock
->shared
? F_RDLCK
: F_WRLCK
))
877 release_object( lock
);
880 list_add_head( &fd
->locks
, &lock
->fd_entry
);
881 list_add_head( &fd
->inode
->locks
, &lock
->inode_entry
);
882 list_add_head( &lock
->process
->locks
, &lock
->proc_entry
);
886 /* remove an existing lock */
887 static void remove_lock( struct file_lock
*lock
, int remove_unix
)
889 struct inode
*inode
= lock
->fd
->inode
;
891 list_remove( &lock
->fd_entry
);
892 list_remove( &lock
->inode_entry
);
893 list_remove( &lock
->proc_entry
);
894 if (remove_unix
) remove_unix_locks( lock
->fd
, lock
->start
, lock
->end
);
895 if (list_empty( &inode
->locks
)) inode_close_pending( inode
);
896 lock
->process
= NULL
;
897 wake_up( &lock
->obj
, 0 );
898 release_object( lock
);
901 /* remove all locks owned by a given process */
902 void remove_process_locks( struct process
*process
)
906 while ((ptr
= list_head( &process
->locks
)))
908 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, proc_entry
);
909 remove_lock( lock
, 1 ); /* this removes it from the list */
913 /* remove all locks on a given fd */
914 static void remove_fd_locks( struct fd
*fd
)
916 file_pos_t start
= FILE_POS_T_MAX
, end
= 0;
919 while ((ptr
= list_head( &fd
->locks
)))
921 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
922 if (lock
->start
< start
) start
= lock
->start
;
923 if (!lock
->end
|| lock
->end
> end
) end
= lock
->end
- 1;
924 remove_lock( lock
, 0 );
926 if (start
< end
) remove_unix_locks( fd
, start
, end
+ 1 );
929 /* add a lock on an fd */
930 /* returns handle to wait on */
931 obj_handle_t
lock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
, int shared
, int wait
)
934 file_pos_t end
= start
+ count
;
936 /* don't allow wrapping locks */
937 if (end
&& end
< start
)
939 set_error( STATUS_INVALID_PARAMETER
);
943 /* check if another lock on that file overlaps the area */
944 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
946 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
947 if (!lock_overlaps( lock
, start
, end
)) continue;
948 if (lock
->shared
&& shared
) continue;
952 set_error( STATUS_FILE_LOCK_CONFLICT
);
955 set_error( STATUS_PENDING
);
956 return alloc_handle( current
->process
, lock
, SYNCHRONIZE
, 0 );
959 /* not found, add it */
960 if (add_lock( fd
, shared
, start
, end
)) return 0;
961 if (get_error() == STATUS_FILE_LOCK_CONFLICT
)
963 /* Unix lock conflict -> tell client to wait and retry */
964 if (wait
) set_error( STATUS_PENDING
);
969 /* remove a lock on an fd */
970 void unlock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
)
973 file_pos_t end
= start
+ count
;
975 /* find an existing lock with the exact same parameters */
976 LIST_FOR_EACH( ptr
, &fd
->locks
)
978 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
979 if ((lock
->start
== start
) && (lock
->end
== end
))
981 remove_lock( lock
, 1 );
985 set_error( STATUS_FILE_LOCK_CONFLICT
);
989 /****************************************************************/
990 /* asynchronous operations support */
994 struct thread
*thread
;
998 struct timeout_user
*timeout
;
1002 /* notifies client thread of new status of its async request */
1003 /* destroys the server side of it */
1004 static void async_terminate( struct async
*async
, int status
)
1006 thread_queue_apc( async
->thread
, NULL
, async
->apc
, APC_ASYNC_IO
,
1007 1, async
->user
, async
->sb
, (void *)status
);
1009 if (async
->timeout
) remove_timeout_user( async
->timeout
);
1010 async
->timeout
= NULL
;
1011 list_remove( &async
->entry
);
1012 release_object( async
->thread
);
1016 /* cb for timeout on an async request */
1017 static void async_callback(void *private)
1019 struct async
*async
= (struct async
*)private;
1021 /* fprintf(stderr, "async timeout out %p\n", async); */
1022 async
->timeout
= NULL
;
1023 async_terminate( async
, STATUS_TIMEOUT
);
1026 /* create an async on a given queue of a fd */
1027 struct async
*create_async(struct thread
*thread
, int* timeout
, struct list
*queue
,
1028 void *io_apc
, void *io_user
, void* io_sb
)
1030 struct async
*async
= mem_alloc( sizeof(struct async
) );
1032 if (!async
) return NULL
;
1034 async
->thread
= (struct thread
*)grab_object(thread
);
1035 async
->apc
= io_apc
;
1036 async
->user
= io_user
;
1039 list_add_tail( queue
, &async
->entry
);
1043 struct timeval when
;
1045 gettimeofday( &when
, 0 );
1046 add_timeout( &when
, *timeout
);
1047 async
->timeout
= add_timeout_user( &when
, async_callback
, async
);
1049 else async
->timeout
= NULL
;
1054 /* terminate the async operation at the head of the queue */
1055 void async_terminate_head( struct list
*queue
, int status
)
1057 struct list
*ptr
= list_head( queue
);
1058 if (ptr
) async_terminate( LIST_ENTRY( ptr
, struct async
, entry
), status
);
1061 /****************************************************************/
1062 /* file descriptor functions */
1064 static void fd_dump( struct object
*obj
, int verbose
)
1066 struct fd
*fd
= (struct fd
*)obj
;
1067 fprintf( stderr
, "Fd unix_fd=%d user=%p", fd
->unix_fd
, fd
->user
);
1068 if (fd
->inode
) fprintf( stderr
, " inode=%p unlink='%s'", fd
->inode
, fd
->closed
->unlink
);
1069 fprintf( stderr
, "\n" );
1072 static void fd_destroy( struct object
*obj
)
1074 struct fd
*fd
= (struct fd
*)obj
;
1076 remove_fd_locks( fd
);
1077 list_remove( &fd
->inode_entry
);
1078 if (fd
->poll_index
!= -1) remove_poll_user( fd
, fd
->poll_index
);
1081 inode_add_closed_fd( fd
->inode
, fd
->closed
);
1082 release_object( fd
->inode
);
1084 else /* no inode, close it right away */
1086 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
1090 /* set the events that select waits for on this fd */
1091 void set_fd_events( struct fd
*fd
, int events
)
1093 int user
= fd
->poll_index
;
1094 assert( poll_users
[user
] == fd
);
1096 set_fd_epoll_events( fd
, user
, events
);
1098 if (events
== -1) /* stop waiting on this fd completely */
1100 pollfd
[user
].fd
= -1;
1101 pollfd
[user
].events
= POLLERR
;
1102 pollfd
[user
].revents
= 0;
1104 else if (pollfd
[user
].fd
!= -1 || !pollfd
[user
].events
)
1106 pollfd
[user
].fd
= fd
->unix_fd
;
1107 pollfd
[user
].events
= events
;
1111 /* allocate an fd object, without setting the unix fd yet */
1112 struct fd
*alloc_fd( const struct fd_ops
*fd_user_ops
, struct object
*user
)
1114 struct fd
*fd
= alloc_object( &fd_ops
);
1116 if (!fd
) return NULL
;
1118 fd
->fd_ops
= fd_user_ops
;
1126 fd
->poll_index
= -1;
1127 list_init( &fd
->inode_entry
);
1128 list_init( &fd
->locks
);
1130 if ((fd
->poll_index
= add_poll_user( fd
)) == -1)
1132 release_object( fd
);
1138 /* check if the desired access is possible without violating */
1139 /* the sharing mode of other opens of the same file */
1140 static int check_sharing( struct fd
*fd
, unsigned int access
, unsigned int sharing
)
1142 unsigned int existing_sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
;
1143 unsigned int existing_access
= 0;
1147 /* if access mode is 0, sharing mode is ignored */
1148 if (!access
) sharing
= existing_sharing
;
1149 fd
->access
= access
;
1150 fd
->sharing
= sharing
;
1152 LIST_FOR_EACH( ptr
, &fd
->inode
->open
)
1154 struct fd
*fd_ptr
= LIST_ENTRY( ptr
, struct fd
, inode_entry
);
1157 existing_sharing
&= fd_ptr
->sharing
;
1158 existing_access
|= fd_ptr
->access
;
1159 if (fd_ptr
->closed
->unlink
[0]) unlink
= 1;
1163 if ((access
& GENERIC_READ
) && !(existing_sharing
& FILE_SHARE_READ
)) return 0;
1164 if ((access
& GENERIC_WRITE
) && !(existing_sharing
& FILE_SHARE_WRITE
)) return 0;
1165 if ((existing_access
& GENERIC_READ
) && !(sharing
& FILE_SHARE_READ
)) return 0;
1166 if ((existing_access
& GENERIC_WRITE
) && !(sharing
& FILE_SHARE_WRITE
)) return 0;
1167 if (fd
->closed
->unlink
[0] && !(existing_sharing
& FILE_SHARE_DELETE
)) return 0;
1168 if (unlink
&& !(sharing
& FILE_SHARE_DELETE
)) return 0;
1172 /* open() wrapper using a struct fd */
1173 /* the fd must have been created with alloc_fd */
1174 /* on error the fd object is released */
1175 struct fd
*open_fd( struct fd
*fd
, const char *name
, int flags
, mode_t
*mode
,
1176 unsigned int access
, unsigned int sharing
, unsigned int options
)
1179 struct closed_fd
*closed_fd
;
1180 const char *unlink_name
= "";
1182 assert( fd
->unix_fd
== -1 );
1184 if (options
& FILE_DELETE_ON_CLOSE
) unlink_name
= name
;
1185 if (!(closed_fd
= mem_alloc( sizeof(*closed_fd
) + strlen(unlink_name
) )))
1187 release_object( fd
);
1190 /* create the directory if needed */
1191 if ((options
& FILE_DIRECTORY_FILE
) && (flags
& O_CREAT
))
1193 if (mkdir( name
, 0777 ) == -1)
1195 if (errno
!= EEXIST
|| (flags
& O_EXCL
))
1198 release_object( fd
);
1203 flags
&= ~(O_CREAT
| O_EXCL
| O_TRUNC
);
1205 if ((fd
->unix_fd
= open( name
, flags
& ~O_TRUNC
, *mode
)) == -1)
1208 release_object( fd
);
1212 closed_fd
->fd
= fd
->unix_fd
;
1213 closed_fd
->unlink
[0] = 0;
1214 fstat( fd
->unix_fd
, &st
);
1217 /* only bother with an inode for normal files and directories */
1218 if (S_ISREG(st
.st_mode
) || S_ISDIR(st
.st_mode
))
1220 struct inode
*inode
= get_inode( st
.st_dev
, st
.st_ino
);
1224 /* we can close the fd because there are no others open on the same file,
1225 * otherwise we wouldn't have failed to allocate a new inode
1230 fd
->closed
= closed_fd
;
1231 list_add_head( &inode
->open
, &fd
->inode_entry
);
1233 /* check directory options */
1234 if ((options
& FILE_DIRECTORY_FILE
) && !S_ISDIR(st
.st_mode
))
1236 release_object( fd
);
1237 set_error( STATUS_NOT_A_DIRECTORY
);
1240 if ((options
& FILE_NON_DIRECTORY_FILE
) && S_ISDIR(st
.st_mode
))
1242 release_object( fd
);
1243 set_error( STATUS_FILE_IS_A_DIRECTORY
);
1246 if (!check_sharing( fd
, access
, sharing
))
1248 release_object( fd
);
1249 set_error( STATUS_SHARING_VIOLATION
);
1252 strcpy( closed_fd
->unlink
, unlink_name
);
1253 if (flags
& O_TRUNC
) ftruncate( fd
->unix_fd
, 0 );
1255 else /* special file */
1257 if (options
& FILE_DIRECTORY_FILE
)
1259 set_error( STATUS_NOT_A_DIRECTORY
);
1262 if (unlink_name
[0]) /* we can't unlink special files */
1264 set_error( STATUS_INVALID_PARAMETER
);
1272 release_object( fd
);
1277 /* create an fd for an anonymous file */
1278 /* if the function fails the unix fd is closed */
1279 struct fd
*create_anonymous_fd( const struct fd_ops
*fd_user_ops
, int unix_fd
, struct object
*user
)
1281 struct fd
*fd
= alloc_fd( fd_user_ops
, user
);
1285 fd
->unix_fd
= unix_fd
;
1292 /* retrieve the object that is using an fd */
1293 void *get_fd_user( struct fd
*fd
)
1298 /* retrieve the unix fd for an object */
1299 int get_unix_fd( struct fd
*fd
)
1304 /* check if two file descriptors point to the same file */
1305 int is_same_file_fd( struct fd
*fd1
, struct fd
*fd2
)
1307 return fd1
->inode
== fd2
->inode
;
1310 /* callback for event happening in the main poll() loop */
1311 void fd_poll_event( struct fd
*fd
, int event
)
1313 return fd
->fd_ops
->poll_event( fd
, event
);
1316 /* check if events are pending and if yes return which one(s) */
1317 int check_fd_events( struct fd
*fd
, int events
)
1321 pfd
.fd
= fd
->unix_fd
;
1322 pfd
.events
= events
;
1323 if (poll( &pfd
, 1, 0 ) <= 0) return 0;
1327 /* default add_queue() routine for objects that poll() on an fd */
1328 int default_fd_add_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
1330 struct fd
*fd
= get_obj_fd( obj
);
1333 if (list_empty( &obj
->wait_queue
)) /* first on the queue */
1334 set_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1335 add_queue( obj
, entry
);
1336 release_object( fd
);
1340 /* default remove_queue() routine for objects that poll() on an fd */
1341 void default_fd_remove_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
1343 struct fd
*fd
= get_obj_fd( obj
);
1346 remove_queue( obj
, entry
);
1347 if (list_empty( &obj
->wait_queue
)) /* last on the queue is gone */
1348 set_fd_events( fd
, 0 );
1349 release_object( obj
);
1350 release_object( fd
);
1353 /* default signaled() routine for objects that poll() on an fd */
1354 int default_fd_signaled( struct object
*obj
, struct thread
*thread
)
1357 struct fd
*fd
= get_obj_fd( obj
);
1359 if (fd
->inode
) return 1; /* regular files are always signaled */
1361 events
= fd
->fd_ops
->get_poll_events( fd
);
1362 ret
= check_fd_events( fd
, events
) != 0;
1365 set_fd_events( fd
, 0 ); /* stop waiting on select() if we are signaled */
1366 else if (!list_empty( &obj
->wait_queue
))
1367 set_fd_events( fd
, events
); /* restart waiting on poll() if we are no longer signaled */
1369 release_object( fd
);
1373 /* default handler for poll() events */
1374 void default_poll_event( struct fd
*fd
, int event
)
1376 /* an error occurred, stop polling this fd to avoid busy-looping */
1377 if (event
& (POLLERR
| POLLHUP
)) set_fd_events( fd
, -1 );
1378 wake_up( fd
->user
, 0 );
1381 /* default flush() routine */
1382 int no_flush( struct fd
*fd
, struct event
**event
)
1384 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1388 /* default get_file_info() routine */
1389 int no_get_file_info( struct fd
*fd
)
1391 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1395 /* default queue_async() routine */
1396 void no_queue_async( struct fd
*fd
, void* apc
, void* user
, void* io_sb
,
1397 int type
, int count
)
1399 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1402 /* default cancel_async() routine */
1403 void no_cancel_async( struct fd
*fd
)
1405 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1408 /* same as get_handle_obj but retrieve the struct fd associated to the object */
1409 static struct fd
*get_handle_fd_obj( struct process
*process
, obj_handle_t handle
,
1410 unsigned int access
)
1412 struct fd
*fd
= NULL
;
1415 if ((obj
= get_handle_obj( process
, handle
, access
, NULL
)))
1417 fd
= get_obj_fd( obj
);
1418 release_object( obj
);
1423 /* flush a file buffers */
1424 DECL_HANDLER(flush_file
)
1426 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1427 struct event
* event
= NULL
;
1431 fd
->fd_ops
->flush( fd
, &event
);
1434 reply
->event
= alloc_handle( current
->process
, event
, SYNCHRONIZE
, 0 );
1436 release_object( fd
);
1440 /* get a Unix fd to access a file */
1441 DECL_HANDLER(get_handle_fd
)
1447 if ((fd
= get_handle_fd_obj( current
->process
, req
->handle
, req
->access
)))
1449 int unix_fd
= get_handle_unix_fd( current
->process
, req
->handle
, req
->access
);
1450 if (unix_fd
!= -1) reply
->fd
= unix_fd
;
1451 else if (!get_error())
1453 assert( fd
->unix_fd
!= -1 );
1454 send_client_fd( current
->process
, fd
->unix_fd
, req
->handle
);
1456 reply
->flags
= fd
->fd_ops
->get_file_info( fd
);
1457 release_object( fd
);
1461 /* create / reschedule an async I/O */
1462 DECL_HANDLER(register_async
)
1464 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1467 * The queue_async method must do the following:
1469 * 1. Get the async_queue for the request of given type.
1470 * 2. Create a new asynchronous request for the selected queue
1471 * 3. Carry out any operations necessary to adjust the object's poll events
1472 * Usually: set_elect_events (obj, obj->ops->get_poll_events()).
1473 * 4. When the async request is triggered, then send back (with a proper APC)
1474 * the trigger (STATUS_ALERTED) to the thread that posted the request.
1475 * async_destroy() is to be called: it will both notify the sender about
1476 * the trigger and destroy the request by itself
1477 * See also the implementations in file.c, serial.c, and sock.c.
1482 fd
->fd_ops
->queue_async( fd
, req
->io_apc
, req
->io_user
, req
->io_sb
,
1483 req
->type
, req
->count
);
1484 release_object( fd
);
1488 /* cancels all async I/O */
1489 DECL_HANDLER(cancel_async
)
1491 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1494 /* Note: we don't kill the queued APC_ASYNC_IO on this thread because
1495 * NtCancelIoFile() will force the pending APC to be run. Since,
1496 * Windows only guarantees that the current thread will have no async
1497 * operation on the current fd when NtCancelIoFile returns, this shall
1500 fd
->fd_ops
->cancel_async( fd
);
1501 release_object( fd
);