vfs: check userland buffers before reading them.
[haiku.git] / src / system / kernel / fs / fd.cpp
blob4e50e906efbb4c7aabba08d840ef9f815d7c4a17
1 /*
2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2015, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 */
8 //! Operations on file descriptors
11 #include <fd.h>
13 #include <stdlib.h>
14 #include <string.h>
16 #include <OS.h>
18 #include <AutoDeleter.h>
20 #include <syscalls.h>
21 #include <syscall_restart.h>
22 #include <util/AutoLock.h>
23 #include <vfs.h>
24 #include <wait_for_objects.h>
26 #include "vfs_tracing.h"
29 //#define TRACE_FD
30 #ifdef TRACE_FD
31 # define TRACE(x) dprintf x
32 #else
33 # define TRACE(x)
34 #endif
37 static const size_t kMaxReadDirBufferSize = 64 * 1024;
40 static struct file_descriptor* get_fd_locked(struct io_context* context,
41 int fd);
42 static struct file_descriptor* remove_fd(struct io_context* context, int fd);
43 static void deselect_select_infos(file_descriptor* descriptor,
44 select_info* infos, bool putSyncObjects);
47 struct FDGetterLocking {
48 inline bool Lock(file_descriptor* /*lockable*/)
50 return false;
53 inline void Unlock(file_descriptor* lockable)
55 put_fd(lockable);
59 class FDGetter : public AutoLocker<file_descriptor, FDGetterLocking> {
60 public:
61 inline FDGetter()
62 : AutoLocker<file_descriptor, FDGetterLocking>()
66 inline FDGetter(io_context* context, int fd, bool contextLocked = false)
67 : AutoLocker<file_descriptor, FDGetterLocking>(
68 contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd))
72 inline file_descriptor* SetTo(io_context* context, int fd,
73 bool contextLocked = false)
75 file_descriptor* descriptor
76 = contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd);
77 AutoLocker<file_descriptor, FDGetterLocking>::SetTo(descriptor, true);
78 return descriptor;
81 inline file_descriptor* SetTo(int fd, bool kernel,
82 bool contextLocked = false)
84 return SetTo(get_current_io_context(kernel), fd, contextLocked);
87 inline file_descriptor* FD() const
89 return fLockable;
94 // #pragma mark - General fd routines
97 #ifdef DEBUG
98 void dump_fd(int fd, struct file_descriptor* descriptor);
100 void
101 dump_fd(int fd,struct file_descriptor* descriptor)
103 dprintf("fd[%d] = %p: type = %" B_PRId32 ", ref_count = %" B_PRId32 ", ops "
104 "= %p, u.vnode = %p, u.mount = %p, cookie = %p, open_mode = %" B_PRIx32
105 ", pos = %" B_PRId64 "\n",
106 fd, descriptor, descriptor->type, descriptor->ref_count,
107 descriptor->ops, descriptor->u.vnode, descriptor->u.mount,
108 descriptor->cookie, descriptor->open_mode, descriptor->pos);
110 #endif
113 /*! Allocates and initializes a new file_descriptor.
115 struct file_descriptor*
116 alloc_fd(void)
118 file_descriptor* descriptor
119 = (file_descriptor*)malloc(sizeof(struct file_descriptor));
120 if (descriptor == NULL)
121 return NULL;
123 descriptor->u.vnode = NULL;
124 descriptor->cookie = NULL;
125 descriptor->ref_count = 1;
126 descriptor->open_count = 0;
127 descriptor->open_mode = 0;
128 descriptor->pos = 0;
130 return descriptor;
134 bool
135 fd_close_on_exec(struct io_context* context, int fd)
137 return CHECK_BIT(context->fds_close_on_exec[fd / 8], fd & 7) ? true : false;
141 void
142 fd_set_close_on_exec(struct io_context* context, int fd, bool closeFD)
144 if (closeFD)
145 context->fds_close_on_exec[fd / 8] |= (1 << (fd & 7));
146 else
147 context->fds_close_on_exec[fd / 8] &= ~(1 << (fd & 7));
151 /*! Searches a free slot in the FD table of the provided I/O context, and
152 inserts the specified descriptor into it.
155 new_fd_etc(struct io_context* context, struct file_descriptor* descriptor,
156 int firstIndex)
158 int fd = -1;
159 uint32 i;
161 mutex_lock(&context->io_mutex);
163 for (i = firstIndex; i < context->table_size; i++) {
164 if (!context->fds[i]) {
165 fd = i;
166 break;
169 if (fd < 0) {
170 fd = B_NO_MORE_FDS;
171 goto err;
174 TFD(NewFD(context, fd, descriptor));
176 context->fds[fd] = descriptor;
177 context->num_used_fds++;
178 atomic_add(&descriptor->open_count, 1);
180 err:
181 mutex_unlock(&context->io_mutex);
183 return fd;
188 new_fd(struct io_context* context, struct file_descriptor* descriptor)
190 return new_fd_etc(context, descriptor, 0);
194 /*! Reduces the descriptor's reference counter, and frees all resources
195 when it's no longer used.
197 void
198 put_fd(struct file_descriptor* descriptor)
200 int32 previous = atomic_add(&descriptor->ref_count, -1);
202 TFD(PutFD(descriptor));
204 TRACE(("put_fd(descriptor = %p [ref = %ld, cookie = %p])\n",
205 descriptor, descriptor->ref_count, descriptor->cookie));
207 // free the descriptor if we don't need it anymore
208 if (previous == 1) {
209 // free the underlying object
210 if (descriptor->ops != NULL && descriptor->ops->fd_free != NULL)
211 descriptor->ops->fd_free(descriptor);
213 free(descriptor);
214 } else if ((descriptor->open_mode & O_DISCONNECTED) != 0
215 && previous - 1 == descriptor->open_count
216 && descriptor->ops != NULL) {
217 // the descriptor has been disconnected - it cannot
218 // be accessed anymore, let's close it (no one is
219 // currently accessing this descriptor)
221 if (descriptor->ops->fd_close)
222 descriptor->ops->fd_close(descriptor);
223 if (descriptor->ops->fd_free)
224 descriptor->ops->fd_free(descriptor);
226 // prevent this descriptor from being closed/freed again
227 descriptor->ops = NULL;
228 descriptor->u.vnode = NULL;
230 // the file descriptor is kept intact, so that it's not
231 // reused until someone explicitly closes it
236 /*! Decrements the open counter of the file descriptor and invokes
237 its close hook when appropriate.
239 void
240 close_fd(struct file_descriptor* descriptor)
242 if (atomic_add(&descriptor->open_count, -1) == 1) {
243 vfs_unlock_vnode_if_locked(descriptor);
245 if (descriptor->ops != NULL && descriptor->ops->fd_close != NULL)
246 descriptor->ops->fd_close(descriptor);
251 status_t
252 close_fd_index(struct io_context* context, int fd)
254 struct file_descriptor* descriptor = remove_fd(context, fd);
256 if (descriptor == NULL)
257 return B_FILE_ERROR;
259 close_fd(descriptor);
260 put_fd(descriptor);
261 // the reference associated with the slot
263 return B_OK;
267 /*! This descriptor's underlying object will be closed and freed as soon as
268 possible (in one of the next calls to put_fd() - get_fd() will no longer
269 succeed on this descriptor).
270 This is useful if the underlying object is gone, for instance when a
271 (mounted) volume got removed unexpectedly.
273 void
274 disconnect_fd(struct file_descriptor* descriptor)
276 descriptor->open_mode |= O_DISCONNECTED;
280 void
281 inc_fd_ref_count(struct file_descriptor* descriptor)
283 atomic_add(&descriptor->ref_count, 1);
287 static struct file_descriptor*
288 get_fd_locked(struct io_context* context, int fd)
290 if (fd < 0 || (uint32)fd >= context->table_size)
291 return NULL;
293 struct file_descriptor* descriptor = context->fds[fd];
295 if (descriptor != NULL) {
296 TFD(GetFD(context, fd, descriptor));
297 inc_fd_ref_count(descriptor);
300 return descriptor;
304 struct file_descriptor*
305 get_fd(struct io_context* context, int fd)
307 MutexLocker _(context->io_mutex);
309 return get_fd_locked(context, fd);
313 struct file_descriptor*
314 get_open_fd(struct io_context* context, int fd)
316 MutexLocker _(context->io_mutex);
318 file_descriptor* descriptor = get_fd_locked(context, fd);
319 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
320 return NULL;
322 atomic_add(&descriptor->open_count, 1);
324 return descriptor;
328 /*! Removes the file descriptor from the specified slot.
330 static struct file_descriptor*
331 remove_fd(struct io_context* context, int fd)
333 struct file_descriptor* descriptor = NULL;
335 if (fd < 0)
336 return NULL;
338 mutex_lock(&context->io_mutex);
340 if ((uint32)fd < context->table_size)
341 descriptor = context->fds[fd];
343 select_info* selectInfos = NULL;
344 bool disconnected = false;
346 if (descriptor != NULL) {
347 // fd is valid
348 TFD(RemoveFD(context, fd, descriptor));
350 context->fds[fd] = NULL;
351 fd_set_close_on_exec(context, fd, false);
352 context->num_used_fds--;
354 selectInfos = context->select_infos[fd];
355 context->select_infos[fd] = NULL;
357 disconnected = (descriptor->open_mode & O_DISCONNECTED);
360 mutex_unlock(&context->io_mutex);
362 if (selectInfos != NULL)
363 deselect_select_infos(descriptor, selectInfos, true);
365 return disconnected ? NULL : descriptor;
369 static int
370 dup_fd(int fd, bool kernel)
372 struct io_context* context = get_current_io_context(kernel);
373 struct file_descriptor* descriptor;
374 int status;
376 TRACE(("dup_fd: fd = %d\n", fd));
378 // Try to get the fd structure
379 descriptor = get_fd(context, fd);
380 if (descriptor == NULL)
381 return B_FILE_ERROR;
383 // now put the fd in place
384 status = new_fd(context, descriptor);
385 if (status < 0)
386 put_fd(descriptor);
387 else {
388 mutex_lock(&context->io_mutex);
389 fd_set_close_on_exec(context, status, false);
390 mutex_unlock(&context->io_mutex);
393 return status;
397 /*! POSIX says this should be the same as:
398 close(newfd);
399 fcntl(oldfd, F_DUPFD, newfd);
401 We do dup2() directly to be thread-safe.
403 static int
404 dup2_fd(int oldfd, int newfd, bool kernel)
406 struct file_descriptor* evicted = NULL;
407 struct io_context* context;
409 TRACE(("dup2_fd: ofd = %d, nfd = %d\n", oldfd, newfd));
411 // quick check
412 if (oldfd < 0 || newfd < 0)
413 return B_FILE_ERROR;
415 // Get current I/O context and lock it
416 context = get_current_io_context(kernel);
417 mutex_lock(&context->io_mutex);
419 // Check if the fds are valid (mutex must be locked because
420 // the table size could be changed)
421 if ((uint32)oldfd >= context->table_size
422 || (uint32)newfd >= context->table_size
423 || context->fds[oldfd] == NULL
424 || (context->fds[oldfd]->open_mode & O_DISCONNECTED) != 0) {
425 mutex_unlock(&context->io_mutex);
426 return B_FILE_ERROR;
429 // Check for identity, note that it cannot be made above
430 // because we always want to return an error on invalid
431 // handles
432 select_info* selectInfos = NULL;
433 if (oldfd != newfd) {
434 // Now do the work
435 TFD(Dup2FD(context, oldfd, newfd));
437 evicted = context->fds[newfd];
438 selectInfos = context->select_infos[newfd];
439 context->select_infos[newfd] = NULL;
440 atomic_add(&context->fds[oldfd]->ref_count, 1);
441 atomic_add(&context->fds[oldfd]->open_count, 1);
442 context->fds[newfd] = context->fds[oldfd];
444 if (evicted == NULL)
445 context->num_used_fds++;
448 fd_set_close_on_exec(context, newfd, false);
450 mutex_unlock(&context->io_mutex);
452 // Say bye bye to the evicted fd
453 if (evicted) {
454 deselect_select_infos(evicted, selectInfos, true);
455 close_fd(evicted);
456 put_fd(evicted);
459 return newfd;
463 /*! Duplicates an FD from another team to this/the kernel team.
464 \param fromTeam The team which owns the FD.
465 \param fd The FD to duplicate.
466 \param kernel If \c true, the new FD will be created in the kernel team,
467 the current userland team otherwise.
468 \return The newly created FD or an error code, if something went wrong.
471 dup_foreign_fd(team_id fromTeam, int fd, bool kernel)
473 // get the I/O context for the team in question
474 Team* team = Team::Get(fromTeam);
475 if (team == NULL)
476 return B_BAD_TEAM_ID;
477 BReference<Team> teamReference(team, true);
479 io_context* fromContext = team->io_context;
481 // get the file descriptor
482 file_descriptor* descriptor = get_fd(fromContext, fd);
483 if (descriptor == NULL)
484 return B_FILE_ERROR;
485 CObjectDeleter<file_descriptor> descriptorPutter(descriptor, put_fd);
487 // create a new FD in the target I/O context
488 int result = new_fd(get_current_io_context(kernel), descriptor);
489 if (result >= 0) {
490 // the descriptor reference belongs to the slot, now
491 descriptorPutter.Detach();
494 return result;
498 static status_t
499 fd_ioctl(bool kernelFD, int fd, uint32 op, void* buffer, size_t length)
501 struct file_descriptor* descriptor;
502 int status;
504 descriptor = get_fd(get_current_io_context(kernelFD), fd);
505 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
506 return B_FILE_ERROR;
508 if (descriptor->ops->fd_ioctl != NULL)
509 status = descriptor->ops->fd_ioctl(descriptor, op, buffer, length);
510 else
511 status = B_DEV_INVALID_IOCTL;
513 if (status == B_DEV_INVALID_IOCTL)
514 status = ENOTTY;
516 put_fd(descriptor);
517 return status;
521 static void
522 deselect_select_infos(file_descriptor* descriptor, select_info* infos,
523 bool putSyncObjects)
525 TRACE(("deselect_select_infos(%p, %p)\n", descriptor, infos));
527 select_info* info = infos;
528 while (info != NULL) {
529 select_sync* sync = info->sync;
531 // deselect the selected events
532 uint16 eventsToDeselect = info->selected_events & ~B_EVENT_INVALID;
533 if (descriptor->ops->fd_deselect != NULL && eventsToDeselect != 0) {
534 for (uint16 event = 1; event < 16; event++) {
535 if ((eventsToDeselect & SELECT_FLAG(event)) != 0) {
536 descriptor->ops->fd_deselect(descriptor, event,
537 (selectsync*)info);
542 notify_select_events(info, B_EVENT_INVALID);
543 info = info->next;
545 if (putSyncObjects)
546 put_select_sync(sync);
551 status_t
552 select_fd(int32 fd, struct select_info* info, bool kernel)
554 TRACE(("select_fd(fd = %ld, info = %p (%p), 0x%x)\n", fd, info,
555 info->sync, info->selected_events));
557 FDGetter fdGetter;
558 // define before the context locker, so it will be destroyed after it
560 io_context* context = get_current_io_context(kernel);
561 MutexLocker locker(context->io_mutex);
563 struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true);
564 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
565 return B_FILE_ERROR;
567 uint16 eventsToSelect = info->selected_events & ~B_EVENT_INVALID;
569 if (descriptor->ops->fd_select == NULL && eventsToSelect != 0) {
570 // if the I/O subsystem doesn't support select(), we will
571 // immediately notify the select call
572 return notify_select_events(info, eventsToSelect);
575 // We need the FD to stay open while we're doing this, so no select()/
576 // deselect() will be called on it after it is closed.
577 atomic_add(&descriptor->open_count, 1);
579 locker.Unlock();
581 // select any events asked for
582 uint32 selectedEvents = 0;
584 for (uint16 event = 1; event < 16; event++) {
585 if ((eventsToSelect & SELECT_FLAG(event)) != 0
586 && descriptor->ops->fd_select(descriptor, event,
587 (selectsync*)info) == B_OK) {
588 selectedEvents |= SELECT_FLAG(event);
591 info->selected_events = selectedEvents
592 | (info->selected_events & B_EVENT_INVALID);
594 // Add the info to the IO context. Even if nothing has been selected -- we
595 // always support B_EVENT_INVALID.
596 locker.Lock();
597 if (context->fds[fd] != descriptor) {
598 // Someone close()d the index in the meantime. deselect() all
599 // events.
600 info->next = NULL;
601 deselect_select_infos(descriptor, info, false);
603 // Release our open reference of the descriptor.
604 close_fd(descriptor);
605 return B_FILE_ERROR;
608 // The FD index hasn't changed, so we add the select info to the table.
610 info->next = context->select_infos[fd];
611 context->select_infos[fd] = info;
613 // As long as the info is in the list, we keep a reference to the sync
614 // object.
615 atomic_add(&info->sync->ref_count, 1);
617 // Finally release our open reference. It is safe just to decrement,
618 // since as long as the descriptor is associated with the slot,
619 // someone else still has it open.
620 atomic_add(&descriptor->open_count, -1);
622 return B_OK;
626 status_t
627 deselect_fd(int32 fd, struct select_info* info, bool kernel)
629 TRACE(("deselect_fd(fd = %ld, info = %p (%p), 0x%x)\n", fd, info,
630 info->sync, info->selected_events));
632 FDGetter fdGetter;
633 // define before the context locker, so it will be destroyed after it
635 io_context* context = get_current_io_context(kernel);
636 MutexLocker locker(context->io_mutex);
638 struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true);
639 if (descriptor == NULL)
640 return B_FILE_ERROR;
642 // remove the info from the IO context
644 select_info** infoLocation = &context->select_infos[fd];
645 while (*infoLocation != NULL && *infoLocation != info)
646 infoLocation = &(*infoLocation)->next;
648 // If not found, someone else beat us to it.
649 if (*infoLocation != info)
650 return B_OK;
652 *infoLocation = info->next;
654 locker.Unlock();
656 // deselect the selected events
657 uint16 eventsToDeselect = info->selected_events & ~B_EVENT_INVALID;
658 if (descriptor->ops->fd_deselect != NULL && eventsToDeselect != 0) {
659 for (uint16 event = 1; event < 16; event++) {
660 if ((eventsToDeselect & SELECT_FLAG(event)) != 0) {
661 descriptor->ops->fd_deselect(descriptor, event,
662 (selectsync*)info);
667 put_select_sync(info->sync);
669 return B_OK;
673 /*! This function checks if the specified fd is valid in the current
674 context. It can be used for a quick check; the fd is not locked
675 so it could become invalid immediately after this check.
677 bool
678 fd_is_valid(int fd, bool kernel)
680 struct file_descriptor* descriptor
681 = get_fd(get_current_io_context(kernel), fd);
682 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
683 return false;
685 put_fd(descriptor);
686 return true;
690 struct vnode*
691 fd_vnode(struct file_descriptor* descriptor)
693 switch (descriptor->type) {
694 case FDTYPE_FILE:
695 case FDTYPE_DIR:
696 case FDTYPE_ATTR_DIR:
697 case FDTYPE_ATTR:
698 return descriptor->u.vnode;
701 return NULL;
705 static status_t
706 common_close(int fd, bool kernel)
708 return close_fd_index(get_current_io_context(kernel), fd);
712 static ssize_t
713 common_user_io(int fd, off_t pos, void* buffer, size_t length, bool write)
715 if (!IS_USER_ADDRESS(buffer))
716 return B_BAD_ADDRESS;
718 if (pos < -1)
719 return B_BAD_VALUE;
721 FDGetter fdGetter;
722 struct file_descriptor* descriptor = fdGetter.SetTo(fd, false);
723 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
724 return B_FILE_ERROR;
726 if (write ? (descriptor->open_mode & O_RWMASK) == O_RDONLY
727 : (descriptor->open_mode & O_RWMASK) == O_WRONLY) {
728 return B_FILE_ERROR;
731 bool movePosition = false;
732 if (pos == -1) {
733 pos = descriptor->pos;
734 movePosition = true;
737 if (write ? descriptor->ops->fd_write == NULL
738 : descriptor->ops->fd_read == NULL) {
739 return B_BAD_VALUE;
742 SyscallRestartWrapper<status_t> status;
744 if (write)
745 status = descriptor->ops->fd_write(descriptor, pos, buffer, &length);
746 else
747 status = descriptor->ops->fd_read(descriptor, pos, buffer, &length);
749 if (status != B_OK)
750 return status;
752 if (movePosition)
753 descriptor->pos = pos + length;
755 return length <= SSIZE_MAX ? (ssize_t)length : SSIZE_MAX;
759 static ssize_t
760 common_user_vector_io(int fd, off_t pos, const iovec* userVecs, size_t count,
761 bool write)
763 if (!IS_USER_ADDRESS(userVecs))
764 return B_BAD_ADDRESS;
766 if (pos < -1)
767 return B_BAD_VALUE;
769 // prevent integer overflow exploit in malloc()
770 if (count > IOV_MAX)
771 return B_BAD_VALUE;
773 FDGetter fdGetter;
774 struct file_descriptor* descriptor = fdGetter.SetTo(fd, false);
775 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
776 return B_FILE_ERROR;
778 if (write ? (descriptor->open_mode & O_RWMASK) == O_RDONLY
779 : (descriptor->open_mode & O_RWMASK) == O_WRONLY) {
780 return B_FILE_ERROR;
783 iovec* vecs = (iovec*)malloc(sizeof(iovec) * count);
784 if (vecs == NULL)
785 return B_NO_MEMORY;
786 MemoryDeleter _(vecs);
788 if (user_memcpy(vecs, userVecs, sizeof(iovec) * count) != B_OK)
789 return B_BAD_ADDRESS;
791 bool movePosition = false;
792 if (pos == -1) {
793 pos = descriptor->pos;
794 movePosition = true;
797 if (write ? descriptor->ops->fd_write == NULL
798 : descriptor->ops->fd_read == NULL) {
799 return B_BAD_VALUE;
802 SyscallRestartWrapper<status_t> status;
804 ssize_t bytesTransferred = 0;
805 for (uint32 i = 0; i < count; i++) {
806 size_t length = vecs[i].iov_len;
807 if (write) {
808 status = descriptor->ops->fd_write(descriptor, pos,
809 vecs[i].iov_base, &length);
810 } else {
811 status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base,
812 &length);
815 if (status != B_OK) {
816 if (bytesTransferred == 0)
817 return status;
818 status = B_OK;
819 break;
822 if ((uint64)bytesTransferred + length > SSIZE_MAX)
823 bytesTransferred = SSIZE_MAX;
824 else
825 bytesTransferred += (ssize_t)length;
827 pos += length;
829 if (length < vecs[i].iov_len)
830 break;
833 if (movePosition)
834 descriptor->pos = pos;
836 return bytesTransferred;
840 status_t
841 user_fd_kernel_ioctl(int fd, uint32 op, void* buffer, size_t length)
843 TRACE(("user_fd_kernel_ioctl: fd %d\n", fd));
845 return fd_ioctl(false, fd, op, buffer, length);
849 // #pragma mark - User syscalls
852 ssize_t
853 _user_read(int fd, off_t pos, void* buffer, size_t length)
855 return common_user_io(fd, pos, buffer, length, false);
859 ssize_t
860 _user_readv(int fd, off_t pos, const iovec* userVecs, size_t count)
862 return common_user_vector_io(fd, pos, userVecs, count, false);
866 ssize_t
867 _user_write(int fd, off_t pos, const void* buffer, size_t length)
869 return common_user_io(fd, pos, (void*)buffer, length, true);
873 ssize_t
874 _user_writev(int fd, off_t pos, const iovec* userVecs, size_t count)
876 return common_user_vector_io(fd, pos, userVecs, count, true);
880 off_t
881 _user_seek(int fd, off_t pos, int seekType)
883 syscall_64_bit_return_value();
885 struct file_descriptor* descriptor;
887 descriptor = get_fd(get_current_io_context(false), fd);
888 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
889 return B_FILE_ERROR;
891 TRACE(("user_seek(descriptor = %p)\n", descriptor));
893 if (descriptor->ops->fd_seek != NULL)
894 pos = descriptor->ops->fd_seek(descriptor, pos, seekType);
895 else
896 pos = ESPIPE;
898 put_fd(descriptor);
899 return pos;
903 status_t
904 _user_ioctl(int fd, uint32 op, void* buffer, size_t length)
906 if (!IS_USER_ADDRESS(buffer))
907 return B_BAD_ADDRESS;
909 TRACE(("user_ioctl: fd %d\n", fd));
911 SyscallRestartWrapper<status_t> status;
913 return status = fd_ioctl(false, fd, op, buffer, length);
917 ssize_t
918 _user_read_dir(int fd, struct dirent* userBuffer, size_t bufferSize,
919 uint32 maxCount)
921 TRACE(("user_read_dir(fd = %d, userBuffer = %p, bufferSize = %ld, count = "
922 "%lu)\n", fd, userBuffer, bufferSize, maxCount));
924 if (maxCount == 0)
925 return 0;
927 if (userBuffer == NULL || !IS_USER_ADDRESS(userBuffer))
928 return B_BAD_ADDRESS;
930 // get I/O context and FD
931 io_context* ioContext = get_current_io_context(false);
932 FDGetter fdGetter;
933 struct file_descriptor* descriptor = fdGetter.SetTo(ioContext, fd, false);
934 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
935 return B_FILE_ERROR;
937 if (descriptor->ops->fd_read_dir == NULL)
938 return B_UNSUPPORTED;
940 // restrict buffer size and allocate a heap buffer
941 if (bufferSize > kMaxReadDirBufferSize)
942 bufferSize = kMaxReadDirBufferSize;
943 struct dirent* buffer = (struct dirent*)malloc(bufferSize);
944 if (buffer == NULL)
945 return B_NO_MEMORY;
946 MemoryDeleter bufferDeleter(buffer);
948 // read the directory
949 uint32 count = maxCount;
950 status_t status = descriptor->ops->fd_read_dir(ioContext, descriptor,
951 buffer, bufferSize, &count);
952 if (status != B_OK)
953 return status;
955 // copy the buffer back -- determine the total buffer size first
956 size_t sizeToCopy = 0;
957 struct dirent* entry = buffer;
958 for (uint32 i = 0; i < count; i++) {
959 size_t length = entry->d_reclen;
960 sizeToCopy += length;
961 entry = (struct dirent*)((uint8*)entry + length);
964 if (user_memcpy(userBuffer, buffer, sizeToCopy) != B_OK)
965 return B_BAD_ADDRESS;
967 return count;
971 status_t
972 _user_rewind_dir(int fd)
974 struct file_descriptor* descriptor;
975 status_t status;
977 TRACE(("user_rewind_dir(fd = %d)\n", fd));
979 descriptor = get_fd(get_current_io_context(false), fd);
980 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
981 return B_FILE_ERROR;
983 if (descriptor->ops->fd_rewind_dir != NULL)
984 status = descriptor->ops->fd_rewind_dir(descriptor);
985 else
986 status = B_UNSUPPORTED;
988 put_fd(descriptor);
989 return status;
993 status_t
994 _user_close(int fd)
996 return common_close(fd, false);
1001 _user_dup(int fd)
1003 return dup_fd(fd, false);
1008 _user_dup2(int ofd, int nfd)
1010 return dup2_fd(ofd, nfd, false);
1014 // #pragma mark - Kernel calls
1017 ssize_t
1018 _kern_read(int fd, off_t pos, void* buffer, size_t length)
1020 if (pos < -1)
1021 return B_BAD_VALUE;
1023 FDGetter fdGetter;
1024 struct file_descriptor* descriptor = fdGetter.SetTo(fd, true);
1026 if (!descriptor)
1027 return B_FILE_ERROR;
1028 if ((descriptor->open_mode & O_RWMASK) == O_WRONLY)
1029 return B_FILE_ERROR;
1031 bool movePosition = false;
1032 if (pos == -1) {
1033 pos = descriptor->pos;
1034 movePosition = true;
1037 SyscallFlagUnsetter _;
1039 if (descriptor->ops->fd_read == NULL)
1040 return B_BAD_VALUE;
1042 ssize_t bytesRead = descriptor->ops->fd_read(descriptor, pos, buffer,
1043 &length);
1044 if (bytesRead >= B_OK) {
1045 if (length > SSIZE_MAX)
1046 bytesRead = SSIZE_MAX;
1047 else
1048 bytesRead = (ssize_t)length;
1050 if (movePosition)
1051 descriptor->pos = pos + length;
1054 return bytesRead;
1058 ssize_t
1059 _kern_readv(int fd, off_t pos, const iovec* vecs, size_t count)
1061 bool movePosition = false;
1062 status_t status;
1063 uint32 i;
1065 if (pos < -1)
1066 return B_BAD_VALUE;
1068 FDGetter fdGetter;
1069 struct file_descriptor* descriptor = fdGetter.SetTo(fd, true);
1071 if (!descriptor)
1072 return B_FILE_ERROR;
1073 if ((descriptor->open_mode & O_RWMASK) == O_WRONLY)
1074 return B_FILE_ERROR;
1076 if (pos == -1) {
1077 pos = descriptor->pos;
1078 movePosition = true;
1081 if (descriptor->ops->fd_read == NULL)
1082 return B_BAD_VALUE;
1084 SyscallFlagUnsetter _;
1086 ssize_t bytesRead = 0;
1088 for (i = 0; i < count; i++) {
1089 size_t length = vecs[i].iov_len;
1090 status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base,
1091 &length);
1092 if (status != B_OK) {
1093 bytesRead = status;
1094 break;
1097 if ((uint64)bytesRead + length > SSIZE_MAX)
1098 bytesRead = SSIZE_MAX;
1099 else
1100 bytesRead += (ssize_t)length;
1102 pos += vecs[i].iov_len;
1105 if (movePosition)
1106 descriptor->pos = pos;
1108 return bytesRead;
1112 ssize_t
1113 _kern_write(int fd, off_t pos, const void* buffer, size_t length)
1115 if (pos < -1)
1116 return B_BAD_VALUE;
1118 FDGetter fdGetter;
1119 struct file_descriptor* descriptor = fdGetter.SetTo(fd, true);
1121 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
1122 return B_FILE_ERROR;
1123 if ((descriptor->open_mode & O_RWMASK) == O_RDONLY)
1124 return B_FILE_ERROR;
1126 bool movePosition = false;
1127 if (pos == -1) {
1128 pos = descriptor->pos;
1129 movePosition = true;
1132 if (descriptor->ops->fd_write == NULL)
1133 return B_BAD_VALUE;
1135 SyscallFlagUnsetter _;
1137 ssize_t bytesWritten = descriptor->ops->fd_write(descriptor, pos, buffer,
1138 &length);
1139 if (bytesWritten >= B_OK) {
1140 if (length > SSIZE_MAX)
1141 bytesWritten = SSIZE_MAX;
1142 else
1143 bytesWritten = (ssize_t)length;
1145 if (movePosition)
1146 descriptor->pos = pos + length;
1149 return bytesWritten;
1153 ssize_t
1154 _kern_writev(int fd, off_t pos, const iovec* vecs, size_t count)
1156 bool movePosition = false;
1157 status_t status;
1158 uint32 i;
1160 if (pos < -1)
1161 return B_BAD_VALUE;
1163 FDGetter fdGetter;
1164 struct file_descriptor* descriptor = fdGetter.SetTo(fd, true);
1166 if (!descriptor)
1167 return B_FILE_ERROR;
1168 if ((descriptor->open_mode & O_RWMASK) == O_RDONLY)
1169 return B_FILE_ERROR;
1171 if (pos == -1) {
1172 pos = descriptor->pos;
1173 movePosition = true;
1176 if (descriptor->ops->fd_write == NULL)
1177 return B_BAD_VALUE;
1179 SyscallFlagUnsetter _;
1181 ssize_t bytesWritten = 0;
1183 for (i = 0; i < count; i++) {
1184 size_t length = vecs[i].iov_len;
1185 status = descriptor->ops->fd_write(descriptor, pos,
1186 vecs[i].iov_base, &length);
1187 if (status != B_OK) {
1188 bytesWritten = status;
1189 break;
1192 if ((uint64)bytesWritten + length > SSIZE_MAX)
1193 bytesWritten = SSIZE_MAX;
1194 else
1195 bytesWritten += (ssize_t)length;
1197 pos += vecs[i].iov_len;
1200 if (movePosition)
1201 descriptor->pos = pos;
1203 return bytesWritten;
1207 off_t
1208 _kern_seek(int fd, off_t pos, int seekType)
1210 struct file_descriptor* descriptor;
1212 descriptor = get_fd(get_current_io_context(true), fd);
1213 if (!descriptor)
1214 return B_FILE_ERROR;
1216 if (descriptor->ops->fd_seek)
1217 pos = descriptor->ops->fd_seek(descriptor, pos, seekType);
1218 else
1219 pos = ESPIPE;
1221 put_fd(descriptor);
1222 return pos;
1226 status_t
1227 _kern_ioctl(int fd, uint32 op, void* buffer, size_t length)
1229 TRACE(("kern_ioctl: fd %d\n", fd));
1231 SyscallFlagUnsetter _;
1233 return fd_ioctl(true, fd, op, buffer, length);
1237 ssize_t
1238 _kern_read_dir(int fd, struct dirent* buffer, size_t bufferSize,
1239 uint32 maxCount)
1241 struct file_descriptor* descriptor;
1242 ssize_t retval;
1244 TRACE(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = "
1245 "%lu)\n",fd, buffer, bufferSize, maxCount));
1247 struct io_context* ioContext = get_current_io_context(true);
1248 descriptor = get_fd(ioContext, fd);
1249 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
1250 return B_FILE_ERROR;
1252 if (descriptor->ops->fd_read_dir) {
1253 uint32 count = maxCount;
1254 retval = descriptor->ops->fd_read_dir(ioContext, descriptor, buffer,
1255 bufferSize, &count);
1256 if (retval >= 0)
1257 retval = count;
1258 } else
1259 retval = B_UNSUPPORTED;
1261 put_fd(descriptor);
1262 return retval;
1266 status_t
1267 _kern_rewind_dir(int fd)
1269 struct file_descriptor* descriptor;
1270 status_t status;
1272 TRACE(("sys_rewind_dir(fd = %d)\n",fd));
1274 descriptor = get_fd(get_current_io_context(true), fd);
1275 if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0)
1276 return B_FILE_ERROR;
1278 if (descriptor->ops->fd_rewind_dir)
1279 status = descriptor->ops->fd_rewind_dir(descriptor);
1280 else
1281 status = B_UNSUPPORTED;
1283 put_fd(descriptor);
1284 return status;
1288 status_t
1289 _kern_close(int fd)
1291 return common_close(fd, true);
1296 _kern_dup(int fd)
1298 return dup_fd(fd, true);
1303 _kern_dup2(int ofd, int nfd)
1305 return dup2_fd(ofd, nfd, true);