2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2015, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
8 //! Operations on file descriptors
18 #include <AutoDeleter.h>
21 #include <syscall_restart.h>
22 #include <util/AutoLock.h>
24 #include <wait_for_objects.h>
26 #include "vfs_tracing.h"
31 # define TRACE(x) dprintf x
37 static const size_t kMaxReadDirBufferSize
= 64 * 1024;
40 static struct file_descriptor
* get_fd_locked(struct io_context
* context
,
42 static struct file_descriptor
* remove_fd(struct io_context
* context
, int fd
);
43 static void deselect_select_infos(file_descriptor
* descriptor
,
44 select_info
* infos
, bool putSyncObjects
);
47 struct FDGetterLocking
{
48 inline bool Lock(file_descriptor
* /*lockable*/)
53 inline void Unlock(file_descriptor
* lockable
)
59 class FDGetter
: public AutoLocker
<file_descriptor
, FDGetterLocking
> {
62 : AutoLocker
<file_descriptor
, FDGetterLocking
>()
66 inline FDGetter(io_context
* context
, int fd
, bool contextLocked
= false)
67 : AutoLocker
<file_descriptor
, FDGetterLocking
>(
68 contextLocked
? get_fd_locked(context
, fd
) : get_fd(context
, fd
))
72 inline file_descriptor
* SetTo(io_context
* context
, int fd
,
73 bool contextLocked
= false)
75 file_descriptor
* descriptor
76 = contextLocked
? get_fd_locked(context
, fd
) : get_fd(context
, fd
);
77 AutoLocker
<file_descriptor
, FDGetterLocking
>::SetTo(descriptor
, true);
81 inline file_descriptor
* SetTo(int fd
, bool kernel
,
82 bool contextLocked
= false)
84 return SetTo(get_current_io_context(kernel
), fd
, contextLocked
);
87 inline file_descriptor
* FD() const
94 // #pragma mark - General fd routines
98 void dump_fd(int fd
, struct file_descriptor
* descriptor
);
101 dump_fd(int fd
,struct file_descriptor
* descriptor
)
103 dprintf("fd[%d] = %p: type = %" B_PRId32
", ref_count = %" B_PRId32
", ops "
104 "= %p, u.vnode = %p, u.mount = %p, cookie = %p, open_mode = %" B_PRIx32
105 ", pos = %" B_PRId64
"\n",
106 fd
, descriptor
, descriptor
->type
, descriptor
->ref_count
,
107 descriptor
->ops
, descriptor
->u
.vnode
, descriptor
->u
.mount
,
108 descriptor
->cookie
, descriptor
->open_mode
, descriptor
->pos
);
113 /*! Allocates and initializes a new file_descriptor.
115 struct file_descriptor
*
118 file_descriptor
* descriptor
119 = (file_descriptor
*)malloc(sizeof(struct file_descriptor
));
120 if (descriptor
== NULL
)
123 descriptor
->u
.vnode
= NULL
;
124 descriptor
->cookie
= NULL
;
125 descriptor
->ref_count
= 1;
126 descriptor
->open_count
= 0;
127 descriptor
->open_mode
= 0;
135 fd_close_on_exec(struct io_context
* context
, int fd
)
137 return CHECK_BIT(context
->fds_close_on_exec
[fd
/ 8], fd
& 7) ? true : false;
142 fd_set_close_on_exec(struct io_context
* context
, int fd
, bool closeFD
)
145 context
->fds_close_on_exec
[fd
/ 8] |= (1 << (fd
& 7));
147 context
->fds_close_on_exec
[fd
/ 8] &= ~(1 << (fd
& 7));
151 /*! Searches a free slot in the FD table of the provided I/O context, and
152 inserts the specified descriptor into it.
155 new_fd_etc(struct io_context
* context
, struct file_descriptor
* descriptor
,
161 mutex_lock(&context
->io_mutex
);
163 for (i
= firstIndex
; i
< context
->table_size
; i
++) {
164 if (!context
->fds
[i
]) {
174 TFD(NewFD(context
, fd
, descriptor
));
176 context
->fds
[fd
] = descriptor
;
177 context
->num_used_fds
++;
178 atomic_add(&descriptor
->open_count
, 1);
181 mutex_unlock(&context
->io_mutex
);
188 new_fd(struct io_context
* context
, struct file_descriptor
* descriptor
)
190 return new_fd_etc(context
, descriptor
, 0);
194 /*! Reduces the descriptor's reference counter, and frees all resources
195 when it's no longer used.
198 put_fd(struct file_descriptor
* descriptor
)
200 int32 previous
= atomic_add(&descriptor
->ref_count
, -1);
202 TFD(PutFD(descriptor
));
204 TRACE(("put_fd(descriptor = %p [ref = %ld, cookie = %p])\n",
205 descriptor
, descriptor
->ref_count
, descriptor
->cookie
));
207 // free the descriptor if we don't need it anymore
209 // free the underlying object
210 if (descriptor
->ops
!= NULL
&& descriptor
->ops
->fd_free
!= NULL
)
211 descriptor
->ops
->fd_free(descriptor
);
214 } else if ((descriptor
->open_mode
& O_DISCONNECTED
) != 0
215 && previous
- 1 == descriptor
->open_count
216 && descriptor
->ops
!= NULL
) {
217 // the descriptor has been disconnected - it cannot
218 // be accessed anymore, let's close it (no one is
219 // currently accessing this descriptor)
221 if (descriptor
->ops
->fd_close
)
222 descriptor
->ops
->fd_close(descriptor
);
223 if (descriptor
->ops
->fd_free
)
224 descriptor
->ops
->fd_free(descriptor
);
226 // prevent this descriptor from being closed/freed again
227 descriptor
->ops
= NULL
;
228 descriptor
->u
.vnode
= NULL
;
230 // the file descriptor is kept intact, so that it's not
231 // reused until someone explicitly closes it
236 /*! Decrements the open counter of the file descriptor and invokes
237 its close hook when appropriate.
240 close_fd(struct file_descriptor
* descriptor
)
242 if (atomic_add(&descriptor
->open_count
, -1) == 1) {
243 vfs_unlock_vnode_if_locked(descriptor
);
245 if (descriptor
->ops
!= NULL
&& descriptor
->ops
->fd_close
!= NULL
)
246 descriptor
->ops
->fd_close(descriptor
);
252 close_fd_index(struct io_context
* context
, int fd
)
254 struct file_descriptor
* descriptor
= remove_fd(context
, fd
);
256 if (descriptor
== NULL
)
259 close_fd(descriptor
);
261 // the reference associated with the slot
267 /*! This descriptor's underlying object will be closed and freed as soon as
268 possible (in one of the next calls to put_fd() - get_fd() will no longer
269 succeed on this descriptor).
270 This is useful if the underlying object is gone, for instance when a
271 (mounted) volume got removed unexpectedly.
274 disconnect_fd(struct file_descriptor
* descriptor
)
276 descriptor
->open_mode
|= O_DISCONNECTED
;
281 inc_fd_ref_count(struct file_descriptor
* descriptor
)
283 atomic_add(&descriptor
->ref_count
, 1);
287 static struct file_descriptor
*
288 get_fd_locked(struct io_context
* context
, int fd
)
290 if (fd
< 0 || (uint32
)fd
>= context
->table_size
)
293 struct file_descriptor
* descriptor
= context
->fds
[fd
];
295 if (descriptor
!= NULL
) {
296 TFD(GetFD(context
, fd
, descriptor
));
297 inc_fd_ref_count(descriptor
);
304 struct file_descriptor
*
305 get_fd(struct io_context
* context
, int fd
)
307 MutexLocker
_(context
->io_mutex
);
309 return get_fd_locked(context
, fd
);
313 struct file_descriptor
*
314 get_open_fd(struct io_context
* context
, int fd
)
316 MutexLocker
_(context
->io_mutex
);
318 file_descriptor
* descriptor
= get_fd_locked(context
, fd
);
319 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
322 atomic_add(&descriptor
->open_count
, 1);
328 /*! Removes the file descriptor from the specified slot.
330 static struct file_descriptor
*
331 remove_fd(struct io_context
* context
, int fd
)
333 struct file_descriptor
* descriptor
= NULL
;
338 mutex_lock(&context
->io_mutex
);
340 if ((uint32
)fd
< context
->table_size
)
341 descriptor
= context
->fds
[fd
];
343 select_info
* selectInfos
= NULL
;
344 bool disconnected
= false;
346 if (descriptor
!= NULL
) {
348 TFD(RemoveFD(context
, fd
, descriptor
));
350 context
->fds
[fd
] = NULL
;
351 fd_set_close_on_exec(context
, fd
, false);
352 context
->num_used_fds
--;
354 selectInfos
= context
->select_infos
[fd
];
355 context
->select_infos
[fd
] = NULL
;
357 disconnected
= (descriptor
->open_mode
& O_DISCONNECTED
);
360 mutex_unlock(&context
->io_mutex
);
362 if (selectInfos
!= NULL
)
363 deselect_select_infos(descriptor
, selectInfos
, true);
365 return disconnected
? NULL
: descriptor
;
370 dup_fd(int fd
, bool kernel
)
372 struct io_context
* context
= get_current_io_context(kernel
);
373 struct file_descriptor
* descriptor
;
376 TRACE(("dup_fd: fd = %d\n", fd
));
378 // Try to get the fd structure
379 descriptor
= get_fd(context
, fd
);
380 if (descriptor
== NULL
)
383 // now put the fd in place
384 status
= new_fd(context
, descriptor
);
388 mutex_lock(&context
->io_mutex
);
389 fd_set_close_on_exec(context
, status
, false);
390 mutex_unlock(&context
->io_mutex
);
397 /*! POSIX says this should be the same as:
399 fcntl(oldfd, F_DUPFD, newfd);
401 We do dup2() directly to be thread-safe.
404 dup2_fd(int oldfd
, int newfd
, bool kernel
)
406 struct file_descriptor
* evicted
= NULL
;
407 struct io_context
* context
;
409 TRACE(("dup2_fd: ofd = %d, nfd = %d\n", oldfd
, newfd
));
412 if (oldfd
< 0 || newfd
< 0)
415 // Get current I/O context and lock it
416 context
= get_current_io_context(kernel
);
417 mutex_lock(&context
->io_mutex
);
419 // Check if the fds are valid (mutex must be locked because
420 // the table size could be changed)
421 if ((uint32
)oldfd
>= context
->table_size
422 || (uint32
)newfd
>= context
->table_size
423 || context
->fds
[oldfd
] == NULL
424 || (context
->fds
[oldfd
]->open_mode
& O_DISCONNECTED
) != 0) {
425 mutex_unlock(&context
->io_mutex
);
429 // Check for identity, note that it cannot be made above
430 // because we always want to return an error on invalid
432 select_info
* selectInfos
= NULL
;
433 if (oldfd
!= newfd
) {
435 TFD(Dup2FD(context
, oldfd
, newfd
));
437 evicted
= context
->fds
[newfd
];
438 selectInfos
= context
->select_infos
[newfd
];
439 context
->select_infos
[newfd
] = NULL
;
440 atomic_add(&context
->fds
[oldfd
]->ref_count
, 1);
441 atomic_add(&context
->fds
[oldfd
]->open_count
, 1);
442 context
->fds
[newfd
] = context
->fds
[oldfd
];
445 context
->num_used_fds
++;
448 fd_set_close_on_exec(context
, newfd
, false);
450 mutex_unlock(&context
->io_mutex
);
452 // Say bye bye to the evicted fd
454 deselect_select_infos(evicted
, selectInfos
, true);
463 /*! Duplicates an FD from another team to this/the kernel team.
464 \param fromTeam The team which owns the FD.
465 \param fd The FD to duplicate.
466 \param kernel If \c true, the new FD will be created in the kernel team,
467 the current userland team otherwise.
468 \return The newly created FD or an error code, if something went wrong.
471 dup_foreign_fd(team_id fromTeam
, int fd
, bool kernel
)
473 // get the I/O context for the team in question
474 Team
* team
= Team::Get(fromTeam
);
476 return B_BAD_TEAM_ID
;
477 BReference
<Team
> teamReference(team
, true);
479 io_context
* fromContext
= team
->io_context
;
481 // get the file descriptor
482 file_descriptor
* descriptor
= get_fd(fromContext
, fd
);
483 if (descriptor
== NULL
)
485 CObjectDeleter
<file_descriptor
> descriptorPutter(descriptor
, put_fd
);
487 // create a new FD in the target I/O context
488 int result
= new_fd(get_current_io_context(kernel
), descriptor
);
490 // the descriptor reference belongs to the slot, now
491 descriptorPutter
.Detach();
499 fd_ioctl(bool kernelFD
, int fd
, uint32 op
, void* buffer
, size_t length
)
501 struct file_descriptor
* descriptor
;
504 descriptor
= get_fd(get_current_io_context(kernelFD
), fd
);
505 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
508 if (descriptor
->ops
->fd_ioctl
!= NULL
)
509 status
= descriptor
->ops
->fd_ioctl(descriptor
, op
, buffer
, length
);
511 status
= B_DEV_INVALID_IOCTL
;
513 if (status
== B_DEV_INVALID_IOCTL
)
522 deselect_select_infos(file_descriptor
* descriptor
, select_info
* infos
,
525 TRACE(("deselect_select_infos(%p, %p)\n", descriptor
, infos
));
527 select_info
* info
= infos
;
528 while (info
!= NULL
) {
529 select_sync
* sync
= info
->sync
;
531 // deselect the selected events
532 uint16 eventsToDeselect
= info
->selected_events
& ~B_EVENT_INVALID
;
533 if (descriptor
->ops
->fd_deselect
!= NULL
&& eventsToDeselect
!= 0) {
534 for (uint16 event
= 1; event
< 16; event
++) {
535 if ((eventsToDeselect
& SELECT_FLAG(event
)) != 0) {
536 descriptor
->ops
->fd_deselect(descriptor
, event
,
542 notify_select_events(info
, B_EVENT_INVALID
);
546 put_select_sync(sync
);
552 select_fd(int32 fd
, struct select_info
* info
, bool kernel
)
554 TRACE(("select_fd(fd = %ld, info = %p (%p), 0x%x)\n", fd
, info
,
555 info
->sync
, info
->selected_events
));
558 // define before the context locker, so it will be destroyed after it
560 io_context
* context
= get_current_io_context(kernel
);
561 MutexLocker
locker(context
->io_mutex
);
563 struct file_descriptor
* descriptor
= fdGetter
.SetTo(context
, fd
, true);
564 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
567 uint16 eventsToSelect
= info
->selected_events
& ~B_EVENT_INVALID
;
569 if (descriptor
->ops
->fd_select
== NULL
&& eventsToSelect
!= 0) {
570 // if the I/O subsystem doesn't support select(), we will
571 // immediately notify the select call
572 return notify_select_events(info
, eventsToSelect
);
575 // We need the FD to stay open while we're doing this, so no select()/
576 // deselect() will be called on it after it is closed.
577 atomic_add(&descriptor
->open_count
, 1);
581 // select any events asked for
582 uint32 selectedEvents
= 0;
584 for (uint16 event
= 1; event
< 16; event
++) {
585 if ((eventsToSelect
& SELECT_FLAG(event
)) != 0
586 && descriptor
->ops
->fd_select(descriptor
, event
,
587 (selectsync
*)info
) == B_OK
) {
588 selectedEvents
|= SELECT_FLAG(event
);
591 info
->selected_events
= selectedEvents
592 | (info
->selected_events
& B_EVENT_INVALID
);
594 // Add the info to the IO context. Even if nothing has been selected -- we
595 // always support B_EVENT_INVALID.
597 if (context
->fds
[fd
] != descriptor
) {
598 // Someone close()d the index in the meantime. deselect() all
601 deselect_select_infos(descriptor
, info
, false);
603 // Release our open reference of the descriptor.
604 close_fd(descriptor
);
608 // The FD index hasn't changed, so we add the select info to the table.
610 info
->next
= context
->select_infos
[fd
];
611 context
->select_infos
[fd
] = info
;
613 // As long as the info is in the list, we keep a reference to the sync
615 atomic_add(&info
->sync
->ref_count
, 1);
617 // Finally release our open reference. It is safe just to decrement,
618 // since as long as the descriptor is associated with the slot,
619 // someone else still has it open.
620 atomic_add(&descriptor
->open_count
, -1);
627 deselect_fd(int32 fd
, struct select_info
* info
, bool kernel
)
629 TRACE(("deselect_fd(fd = %ld, info = %p (%p), 0x%x)\n", fd
, info
,
630 info
->sync
, info
->selected_events
));
633 // define before the context locker, so it will be destroyed after it
635 io_context
* context
= get_current_io_context(kernel
);
636 MutexLocker
locker(context
->io_mutex
);
638 struct file_descriptor
* descriptor
= fdGetter
.SetTo(context
, fd
, true);
639 if (descriptor
== NULL
)
642 // remove the info from the IO context
644 select_info
** infoLocation
= &context
->select_infos
[fd
];
645 while (*infoLocation
!= NULL
&& *infoLocation
!= info
)
646 infoLocation
= &(*infoLocation
)->next
;
648 // If not found, someone else beat us to it.
649 if (*infoLocation
!= info
)
652 *infoLocation
= info
->next
;
656 // deselect the selected events
657 uint16 eventsToDeselect
= info
->selected_events
& ~B_EVENT_INVALID
;
658 if (descriptor
->ops
->fd_deselect
!= NULL
&& eventsToDeselect
!= 0) {
659 for (uint16 event
= 1; event
< 16; event
++) {
660 if ((eventsToDeselect
& SELECT_FLAG(event
)) != 0) {
661 descriptor
->ops
->fd_deselect(descriptor
, event
,
667 put_select_sync(info
->sync
);
673 /*! This function checks if the specified fd is valid in the current
674 context. It can be used for a quick check; the fd is not locked
675 so it could become invalid immediately after this check.
678 fd_is_valid(int fd
, bool kernel
)
680 struct file_descriptor
* descriptor
681 = get_fd(get_current_io_context(kernel
), fd
);
682 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
691 fd_vnode(struct file_descriptor
* descriptor
)
693 switch (descriptor
->type
) {
696 case FDTYPE_ATTR_DIR
:
698 return descriptor
->u
.vnode
;
706 common_close(int fd
, bool kernel
)
708 return close_fd_index(get_current_io_context(kernel
), fd
);
713 common_user_io(int fd
, off_t pos
, void* buffer
, size_t length
, bool write
)
715 if (!IS_USER_ADDRESS(buffer
))
716 return B_BAD_ADDRESS
;
722 struct file_descriptor
* descriptor
= fdGetter
.SetTo(fd
, false);
723 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
726 if (write
? (descriptor
->open_mode
& O_RWMASK
) == O_RDONLY
727 : (descriptor
->open_mode
& O_RWMASK
) == O_WRONLY
) {
731 bool movePosition
= false;
733 pos
= descriptor
->pos
;
737 if (write
? descriptor
->ops
->fd_write
== NULL
738 : descriptor
->ops
->fd_read
== NULL
) {
742 SyscallRestartWrapper
<status_t
> status
;
745 status
= descriptor
->ops
->fd_write(descriptor
, pos
, buffer
, &length
);
747 status
= descriptor
->ops
->fd_read(descriptor
, pos
, buffer
, &length
);
753 descriptor
->pos
= pos
+ length
;
755 return length
<= SSIZE_MAX
? (ssize_t
)length
: SSIZE_MAX
;
760 common_user_vector_io(int fd
, off_t pos
, const iovec
* userVecs
, size_t count
,
763 if (!IS_USER_ADDRESS(userVecs
))
764 return B_BAD_ADDRESS
;
769 // prevent integer overflow exploit in malloc()
774 struct file_descriptor
* descriptor
= fdGetter
.SetTo(fd
, false);
775 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
778 if (write
? (descriptor
->open_mode
& O_RWMASK
) == O_RDONLY
779 : (descriptor
->open_mode
& O_RWMASK
) == O_WRONLY
) {
783 iovec
* vecs
= (iovec
*)malloc(sizeof(iovec
) * count
);
786 MemoryDeleter
_(vecs
);
788 if (user_memcpy(vecs
, userVecs
, sizeof(iovec
) * count
) != B_OK
)
789 return B_BAD_ADDRESS
;
791 bool movePosition
= false;
793 pos
= descriptor
->pos
;
797 if (write
? descriptor
->ops
->fd_write
== NULL
798 : descriptor
->ops
->fd_read
== NULL
) {
802 SyscallRestartWrapper
<status_t
> status
;
804 ssize_t bytesTransferred
= 0;
805 for (uint32 i
= 0; i
< count
; i
++) {
806 size_t length
= vecs
[i
].iov_len
;
808 status
= descriptor
->ops
->fd_write(descriptor
, pos
,
809 vecs
[i
].iov_base
, &length
);
811 status
= descriptor
->ops
->fd_read(descriptor
, pos
, vecs
[i
].iov_base
,
815 if (status
!= B_OK
) {
816 if (bytesTransferred
== 0)
822 if ((uint64
)bytesTransferred
+ length
> SSIZE_MAX
)
823 bytesTransferred
= SSIZE_MAX
;
825 bytesTransferred
+= (ssize_t
)length
;
829 if (length
< vecs
[i
].iov_len
)
834 descriptor
->pos
= pos
;
836 return bytesTransferred
;
841 user_fd_kernel_ioctl(int fd
, uint32 op
, void* buffer
, size_t length
)
843 TRACE(("user_fd_kernel_ioctl: fd %d\n", fd
));
845 return fd_ioctl(false, fd
, op
, buffer
, length
);
849 // #pragma mark - User syscalls
853 _user_read(int fd
, off_t pos
, void* buffer
, size_t length
)
855 return common_user_io(fd
, pos
, buffer
, length
, false);
860 _user_readv(int fd
, off_t pos
, const iovec
* userVecs
, size_t count
)
862 return common_user_vector_io(fd
, pos
, userVecs
, count
, false);
867 _user_write(int fd
, off_t pos
, const void* buffer
, size_t length
)
869 return common_user_io(fd
, pos
, (void*)buffer
, length
, true);
874 _user_writev(int fd
, off_t pos
, const iovec
* userVecs
, size_t count
)
876 return common_user_vector_io(fd
, pos
, userVecs
, count
, true);
881 _user_seek(int fd
, off_t pos
, int seekType
)
883 syscall_64_bit_return_value();
885 struct file_descriptor
* descriptor
;
887 descriptor
= get_fd(get_current_io_context(false), fd
);
888 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
891 TRACE(("user_seek(descriptor = %p)\n", descriptor
));
893 if (descriptor
->ops
->fd_seek
!= NULL
)
894 pos
= descriptor
->ops
->fd_seek(descriptor
, pos
, seekType
);
904 _user_ioctl(int fd
, uint32 op
, void* buffer
, size_t length
)
906 if (!IS_USER_ADDRESS(buffer
))
907 return B_BAD_ADDRESS
;
909 TRACE(("user_ioctl: fd %d\n", fd
));
911 SyscallRestartWrapper
<status_t
> status
;
913 return status
= fd_ioctl(false, fd
, op
, buffer
, length
);
918 _user_read_dir(int fd
, struct dirent
* userBuffer
, size_t bufferSize
,
921 TRACE(("user_read_dir(fd = %d, userBuffer = %p, bufferSize = %ld, count = "
922 "%lu)\n", fd
, userBuffer
, bufferSize
, maxCount
));
927 if (userBuffer
== NULL
|| !IS_USER_ADDRESS(userBuffer
))
928 return B_BAD_ADDRESS
;
930 // get I/O context and FD
931 io_context
* ioContext
= get_current_io_context(false);
933 struct file_descriptor
* descriptor
= fdGetter
.SetTo(ioContext
, fd
, false);
934 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
937 if (descriptor
->ops
->fd_read_dir
== NULL
)
938 return B_UNSUPPORTED
;
940 // restrict buffer size and allocate a heap buffer
941 if (bufferSize
> kMaxReadDirBufferSize
)
942 bufferSize
= kMaxReadDirBufferSize
;
943 struct dirent
* buffer
= (struct dirent
*)malloc(bufferSize
);
946 MemoryDeleter
bufferDeleter(buffer
);
948 // read the directory
949 uint32 count
= maxCount
;
950 status_t status
= descriptor
->ops
->fd_read_dir(ioContext
, descriptor
,
951 buffer
, bufferSize
, &count
);
955 // copy the buffer back -- determine the total buffer size first
956 size_t sizeToCopy
= 0;
957 struct dirent
* entry
= buffer
;
958 for (uint32 i
= 0; i
< count
; i
++) {
959 size_t length
= entry
->d_reclen
;
960 sizeToCopy
+= length
;
961 entry
= (struct dirent
*)((uint8
*)entry
+ length
);
964 if (user_memcpy(userBuffer
, buffer
, sizeToCopy
) != B_OK
)
965 return B_BAD_ADDRESS
;
972 _user_rewind_dir(int fd
)
974 struct file_descriptor
* descriptor
;
977 TRACE(("user_rewind_dir(fd = %d)\n", fd
));
979 descriptor
= get_fd(get_current_io_context(false), fd
);
980 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
983 if (descriptor
->ops
->fd_rewind_dir
!= NULL
)
984 status
= descriptor
->ops
->fd_rewind_dir(descriptor
);
986 status
= B_UNSUPPORTED
;
996 return common_close(fd
, false);
1003 return dup_fd(fd
, false);
1008 _user_dup2(int ofd
, int nfd
)
1010 return dup2_fd(ofd
, nfd
, false);
1014 // #pragma mark - Kernel calls
1018 _kern_read(int fd
, off_t pos
, void* buffer
, size_t length
)
1024 struct file_descriptor
* descriptor
= fdGetter
.SetTo(fd
, true);
1027 return B_FILE_ERROR
;
1028 if ((descriptor
->open_mode
& O_RWMASK
) == O_WRONLY
)
1029 return B_FILE_ERROR
;
1031 bool movePosition
= false;
1033 pos
= descriptor
->pos
;
1034 movePosition
= true;
1037 SyscallFlagUnsetter _
;
1039 if (descriptor
->ops
->fd_read
== NULL
)
1042 ssize_t bytesRead
= descriptor
->ops
->fd_read(descriptor
, pos
, buffer
,
1044 if (bytesRead
>= B_OK
) {
1045 if (length
> SSIZE_MAX
)
1046 bytesRead
= SSIZE_MAX
;
1048 bytesRead
= (ssize_t
)length
;
1051 descriptor
->pos
= pos
+ length
;
1059 _kern_readv(int fd
, off_t pos
, const iovec
* vecs
, size_t count
)
1061 bool movePosition
= false;
1069 struct file_descriptor
* descriptor
= fdGetter
.SetTo(fd
, true);
1072 return B_FILE_ERROR
;
1073 if ((descriptor
->open_mode
& O_RWMASK
) == O_WRONLY
)
1074 return B_FILE_ERROR
;
1077 pos
= descriptor
->pos
;
1078 movePosition
= true;
1081 if (descriptor
->ops
->fd_read
== NULL
)
1084 SyscallFlagUnsetter _
;
1086 ssize_t bytesRead
= 0;
1088 for (i
= 0; i
< count
; i
++) {
1089 size_t length
= vecs
[i
].iov_len
;
1090 status
= descriptor
->ops
->fd_read(descriptor
, pos
, vecs
[i
].iov_base
,
1092 if (status
!= B_OK
) {
1097 if ((uint64
)bytesRead
+ length
> SSIZE_MAX
)
1098 bytesRead
= SSIZE_MAX
;
1100 bytesRead
+= (ssize_t
)length
;
1102 pos
+= vecs
[i
].iov_len
;
1106 descriptor
->pos
= pos
;
1113 _kern_write(int fd
, off_t pos
, const void* buffer
, size_t length
)
1119 struct file_descriptor
* descriptor
= fdGetter
.SetTo(fd
, true);
1121 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
1122 return B_FILE_ERROR
;
1123 if ((descriptor
->open_mode
& O_RWMASK
) == O_RDONLY
)
1124 return B_FILE_ERROR
;
1126 bool movePosition
= false;
1128 pos
= descriptor
->pos
;
1129 movePosition
= true;
1132 if (descriptor
->ops
->fd_write
== NULL
)
1135 SyscallFlagUnsetter _
;
1137 ssize_t bytesWritten
= descriptor
->ops
->fd_write(descriptor
, pos
, buffer
,
1139 if (bytesWritten
>= B_OK
) {
1140 if (length
> SSIZE_MAX
)
1141 bytesWritten
= SSIZE_MAX
;
1143 bytesWritten
= (ssize_t
)length
;
1146 descriptor
->pos
= pos
+ length
;
1149 return bytesWritten
;
1154 _kern_writev(int fd
, off_t pos
, const iovec
* vecs
, size_t count
)
1156 bool movePosition
= false;
1164 struct file_descriptor
* descriptor
= fdGetter
.SetTo(fd
, true);
1167 return B_FILE_ERROR
;
1168 if ((descriptor
->open_mode
& O_RWMASK
) == O_RDONLY
)
1169 return B_FILE_ERROR
;
1172 pos
= descriptor
->pos
;
1173 movePosition
= true;
1176 if (descriptor
->ops
->fd_write
== NULL
)
1179 SyscallFlagUnsetter _
;
1181 ssize_t bytesWritten
= 0;
1183 for (i
= 0; i
< count
; i
++) {
1184 size_t length
= vecs
[i
].iov_len
;
1185 status
= descriptor
->ops
->fd_write(descriptor
, pos
,
1186 vecs
[i
].iov_base
, &length
);
1187 if (status
!= B_OK
) {
1188 bytesWritten
= status
;
1192 if ((uint64
)bytesWritten
+ length
> SSIZE_MAX
)
1193 bytesWritten
= SSIZE_MAX
;
1195 bytesWritten
+= (ssize_t
)length
;
1197 pos
+= vecs
[i
].iov_len
;
1201 descriptor
->pos
= pos
;
1203 return bytesWritten
;
1208 _kern_seek(int fd
, off_t pos
, int seekType
)
1210 struct file_descriptor
* descriptor
;
1212 descriptor
= get_fd(get_current_io_context(true), fd
);
1214 return B_FILE_ERROR
;
1216 if (descriptor
->ops
->fd_seek
)
1217 pos
= descriptor
->ops
->fd_seek(descriptor
, pos
, seekType
);
1227 _kern_ioctl(int fd
, uint32 op
, void* buffer
, size_t length
)
1229 TRACE(("kern_ioctl: fd %d\n", fd
));
1231 SyscallFlagUnsetter _
;
1233 return fd_ioctl(true, fd
, op
, buffer
, length
);
1238 _kern_read_dir(int fd
, struct dirent
* buffer
, size_t bufferSize
,
1241 struct file_descriptor
* descriptor
;
1244 TRACE(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = "
1245 "%lu)\n",fd
, buffer
, bufferSize
, maxCount
));
1247 struct io_context
* ioContext
= get_current_io_context(true);
1248 descriptor
= get_fd(ioContext
, fd
);
1249 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
1250 return B_FILE_ERROR
;
1252 if (descriptor
->ops
->fd_read_dir
) {
1253 uint32 count
= maxCount
;
1254 retval
= descriptor
->ops
->fd_read_dir(ioContext
, descriptor
, buffer
,
1255 bufferSize
, &count
);
1259 retval
= B_UNSUPPORTED
;
1267 _kern_rewind_dir(int fd
)
1269 struct file_descriptor
* descriptor
;
1272 TRACE(("sys_rewind_dir(fd = %d)\n",fd
));
1274 descriptor
= get_fd(get_current_io_context(true), fd
);
1275 if (descriptor
== NULL
|| (descriptor
->open_mode
& O_DISCONNECTED
) != 0)
1276 return B_FILE_ERROR
;
1278 if (descriptor
->ops
->fd_rewind_dir
)
1279 status
= descriptor
->ops
->fd_rewind_dir(descriptor
);
1281 status
= B_UNSUPPORTED
;
1291 return common_close(fd
, true);
1298 return dup_fd(fd
, true);
1303 _kern_dup2(int ofd
, int nfd
)
1305 return dup2_fd(ofd
, nfd
, true);