2 * Copyright 2007-2013, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2010, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
14 #include <sys/ioctl.h>
19 #include <KernelExport.h>
20 #include <NodeMonitor.h>
23 #include <condition_variable.h>
24 #include <debug_hex_dump.h>
26 #include <select_sync_pool.h>
27 #include <syscall_restart.h>
30 #include <util/DoublyLinkedList.h>
31 #include <util/AutoLock.h>
32 #include <util/ring_buffer.h>
40 # define TRACE(x...) dprintf(x)
58 status_t
CreateBuffer();
61 ssize_t
Write(const void* buffer
, size_t length
,
63 ssize_t
Read(void* buffer
, size_t length
, bool isUser
);
64 ssize_t
Peek(size_t offset
, void* buffer
,
67 size_t Readable() const;
68 size_t Writable() const;
71 struct ring_buffer
* fBuffer
;
75 class ReadRequest
: public DoublyLinkedListLinkImpl
<ReadRequest
> {
77 ReadRequest(file_cookie
* cookie
)
79 fThread(thread_get_current_thread()),
83 B_INITIALIZE_SPINLOCK(&fLock
);
86 void SetNotified(bool notified
)
88 InterruptsSpinLocker
_(fLock
);
92 void Notify(status_t status
= B_OK
)
94 InterruptsSpinLocker
_(fLock
);
95 TRACE("ReadRequest %p::Notify(), fNotified %d\n", this, fNotified
);
98 thread_unblock(fThread
, status
);
103 Thread
* GetThread() const
108 file_cookie
* Cookie() const
116 file_cookie
* fCookie
;
117 volatile bool fNotified
;
121 class WriteRequest
: public DoublyLinkedListLinkImpl
<WriteRequest
> {
123 WriteRequest(Thread
* thread
, size_t minimalWriteCount
)
126 fMinimalWriteCount(minimalWriteCount
)
130 Thread
* GetThread() const
135 size_t MinimalWriteCount() const
137 return fMinimalWriteCount
;
142 size_t fMinimalWriteCount
;
146 typedef DoublyLinkedList
<ReadRequest
> ReadRequestList
;
147 typedef DoublyLinkedList
<WriteRequest
> WriteRequestList
;
155 status_t
InitCheck();
157 bool IsActive() const { return fActive
; }
158 timespec
CreationTime() const { return fCreationTime
; }
159 void SetCreationTime(timespec creationTime
)
160 { fCreationTime
= creationTime
; }
161 timespec
ModificationTime() const
162 { return fModificationTime
; }
163 void SetModificationTime(timespec modificationTime
)
164 { fModificationTime
= modificationTime
; }
166 mutex
* RequestLock() { return &fRequestLock
; }
168 status_t
WriteDataToBuffer(const void* data
,
169 size_t* _length
, bool nonBlocking
,
171 status_t
ReadDataFromBuffer(void* data
, size_t* _length
,
172 bool nonBlocking
, bool isUser
,
173 ReadRequest
& request
);
174 size_t BytesAvailable() const
175 { return fBuffer
.Readable(); }
176 size_t BytesWritable() const
177 { return fBuffer
.Writable(); }
179 void AddReadRequest(ReadRequest
& request
);
180 void RemoveReadRequest(ReadRequest
& request
);
181 status_t
WaitForReadRequest(ReadRequest
& request
);
183 void NotifyBytesRead(size_t bytes
);
184 void NotifyReadDone();
185 void NotifyBytesWritten(size_t bytes
);
186 void NotifyEndClosed(bool writer
);
188 void Open(int openMode
);
189 void Close(file_cookie
* cookie
);
190 int32
ReaderCount() const { return fReaderCount
; }
191 int32
WriterCount() const { return fWriterCount
; }
193 status_t
Select(uint8 event
, selectsync
* sync
,
195 status_t
Deselect(uint8 event
, selectsync
* sync
,
198 void Dump(bool dumpData
) const;
199 static int Dump(int argc
, char** argv
);
202 timespec fCreationTime
;
203 timespec fModificationTime
;
207 ReadRequestList fReadRequests
;
208 WriteRequestList fWriteRequests
;
212 ConditionVariable fWriteCondition
;
218 select_sync_pool
* fReadSelectSyncPool
;
219 select_sync_pool
* fWriteSelectSyncPool
;
223 class FIFOInode
: public Inode
{
225 FIFOInode(fs_vnode
* vnode
)
232 fs_vnode
* SuperVnode() { return &fSuperVnode
; }
235 fs_vnode fSuperVnode
;
241 // guarded by Inode::fRequestLock
243 void SetNonBlocking(bool nonBlocking
)
246 open_mode
|= O_NONBLOCK
;
248 open_mode
&= ~(int)O_NONBLOCK
;
256 RingBuffer::RingBuffer()
263 RingBuffer::~RingBuffer()
270 RingBuffer::CreateBuffer()
275 fBuffer
= create_ring_buffer(VFS_FIFO_BUFFER_CAPACITY
);
276 return fBuffer
!= NULL
? B_OK
: B_NO_MEMORY
;
281 RingBuffer::DeleteBuffer()
283 if (fBuffer
!= NULL
) {
284 delete_ring_buffer(fBuffer
);
291 RingBuffer::Write(const void* buffer
, size_t length
, bool isUser
)
295 if (isUser
&& !IS_USER_ADDRESS(buffer
))
296 return B_BAD_ADDRESS
;
299 ? ring_buffer_user_write(fBuffer
, (const uint8
*)buffer
, length
)
300 : ring_buffer_write(fBuffer
, (const uint8
*)buffer
, length
);
305 RingBuffer::Read(void* buffer
, size_t length
, bool isUser
)
309 if (isUser
&& !IS_USER_ADDRESS(buffer
))
310 return B_BAD_ADDRESS
;
313 ? ring_buffer_user_read(fBuffer
, (uint8
*)buffer
, length
)
314 : ring_buffer_read(fBuffer
, (uint8
*)buffer
, length
);
319 RingBuffer::Peek(size_t offset
, void* buffer
, size_t length
) const
324 return ring_buffer_peek(fBuffer
, offset
, (uint8
*)buffer
, length
);
329 RingBuffer::Readable() const
331 return fBuffer
!= NULL
? ring_buffer_readable(fBuffer
) : 0;
336 RingBuffer::Writable() const
338 return fBuffer
!= NULL
? ring_buffer_writable(fBuffer
) : 0;
352 fReadSelectSyncPool(NULL
),
353 fWriteSelectSyncPool(NULL
)
355 fWriteCondition
.Publish(this, "pipe");
356 mutex_init(&fRequestLock
, "pipe request");
358 bigtime_t time
= real_time_clock();
359 fModificationTime
.tv_sec
= time
/ 1000000;
360 fModificationTime
.tv_nsec
= (time
% 1000000) * 1000;
361 fCreationTime
= fModificationTime
;
367 fWriteCondition
.Unpublish();
368 mutex_destroy(&fRequestLock
);
379 /*! Writes the specified data bytes to the inode's ring buffer. The
380 request lock must be held when calling this method.
381 Notifies readers if necessary, so that blocking readers will get started.
382 Returns B_OK for success, B_BAD_ADDRESS if copying from the buffer failed,
383 and various semaphore errors (like B_WOULD_BLOCK in non-blocking mode). If
384 the returned length is > 0, the returned error code can be ignored.
387 Inode::WriteDataToBuffer(const void* _data
, size_t* _length
, bool nonBlocking
,
390 const uint8
* data
= (const uint8
*)_data
;
391 size_t dataSize
= *_length
;
392 size_t& written
= *_length
;
395 TRACE("Inode %p::WriteDataToBuffer(data = %p, bytes = %zu)\n", this, data
,
398 // A request up to VFS_FIFO_ATOMIC_WRITE_SIZE bytes shall not be
399 // interleaved with other writer's data.
400 size_t minToWrite
= 1;
401 if (dataSize
<= VFS_FIFO_ATOMIC_WRITE_SIZE
)
402 minToWrite
= dataSize
;
404 while (dataSize
> 0) {
405 // Wait until enough space in the buffer is available.
407 || (fBuffer
.Writable() < minToWrite
&& fReaderCount
> 0)) {
409 return B_WOULD_BLOCK
;
411 ConditionVariableEntry entry
;
414 WriteRequest
request(thread_get_current_thread(), minToWrite
);
415 fWriteRequests
.Add(&request
);
417 mutex_unlock(&fRequestLock
);
418 status_t status
= entry
.Wait(B_CAN_INTERRUPT
);
419 mutex_lock(&fRequestLock
);
421 fWriteRequests
.Remove(&request
);
427 // write only as long as there are readers left
428 if (fActive
&& fReaderCount
== 0) {
430 send_signal(find_thread(NULL
), SIGPIPE
);
434 // write as much as we can
436 size_t toWrite
= (fActive
? fBuffer
.Writable() : 0);
437 if (toWrite
> dataSize
)
441 ssize_t bytesWritten
= fBuffer
.Write(data
, toWrite
, isUser
);
442 if (bytesWritten
< 0)
450 NotifyBytesWritten(toWrite
);
458 Inode::ReadDataFromBuffer(void* data
, size_t* _length
, bool nonBlocking
,
459 bool isUser
, ReadRequest
& request
)
461 size_t dataSize
= *_length
;
464 // wait until our request is first in queue
466 if (fReadRequests
.Head() != &request
) {
468 return B_WOULD_BLOCK
;
470 TRACE("Inode %p::%s(): wait for request %p to become the first "
471 "request.\n", this, __FUNCTION__
, &request
);
473 error
= WaitForReadRequest(request
);
478 // wait until data are available
479 while (fBuffer
.Readable() == 0) {
481 return B_WOULD_BLOCK
;
483 if (fActive
&& fWriterCount
== 0)
486 TRACE("Inode %p::%s(): wait for data, request %p\n", this, __FUNCTION__
,
489 error
= WaitForReadRequest(request
);
494 // read as much as we can
495 size_t toRead
= fBuffer
.Readable();
496 if (toRead
> dataSize
)
499 ssize_t bytesRead
= fBuffer
.Read(data
, toRead
, isUser
);
503 NotifyBytesRead(toRead
);
512 Inode::AddReadRequest(ReadRequest
& request
)
514 fReadRequests
.Add(&request
);
519 Inode::RemoveReadRequest(ReadRequest
& request
)
521 fReadRequests
.Remove(&request
);
526 Inode::WaitForReadRequest(ReadRequest
& request
)
528 // add the entry to wait on
529 thread_prepare_to_block(thread_get_current_thread(), B_CAN_INTERRUPT
,
530 THREAD_BLOCK_TYPE_OTHER
, "fifo read request");
532 request
.SetNotified(false);
535 mutex_unlock(&fRequestLock
);
536 status_t status
= thread_block();
538 // Before going to lock again, we need to make sure no one tries to
539 // unblock us. Otherwise that would screw with mutex_lock().
540 request
.SetNotified(true);
542 mutex_lock(&fRequestLock
);
549 Inode::NotifyBytesRead(size_t bytes
)
551 // notify writer, if something can be written now
552 size_t writable
= fBuffer
.Writable();
554 // notify select()ors only, if nothing was writable before
555 if (writable
== bytes
) {
556 if (fWriteSelectSyncPool
)
557 notify_select_event_pool(fWriteSelectSyncPool
, B_SELECT_WRITE
);
560 // If any of the waiting writers has a minimal write count that has
561 // now become satisfied, we notify all of them (condition variables
562 // don't support doing that selectively).
563 WriteRequest
* request
;
564 WriteRequestList::Iterator iterator
= fWriteRequests
.GetIterator();
565 while ((request
= iterator
.Next()) != NULL
) {
566 size_t minWriteCount
= request
->MinimalWriteCount();
567 if (minWriteCount
> 0 && minWriteCount
<= writable
568 && minWriteCount
> writable
- bytes
) {
569 fWriteCondition
.NotifyAll();
578 Inode::NotifyReadDone()
580 // notify next reader, if there's still something to be read
581 if (fBuffer
.Readable() > 0) {
582 if (ReadRequest
* request
= fReadRequests
.First())
589 Inode::NotifyBytesWritten(size_t bytes
)
591 // notify reader, if something can be read now
592 if (bytes
> 0 && fBuffer
.Readable() == bytes
) {
593 if (fReadSelectSyncPool
)
594 notify_select_event_pool(fReadSelectSyncPool
, B_SELECT_READ
);
596 if (ReadRequest
* request
= fReadRequests
.First())
603 Inode::NotifyEndClosed(bool writer
)
605 TRACE("Inode %p::%s(%s)\n", this, __FUNCTION__
,
606 writer
? "writer" : "reader");
609 // Our last writer has been closed; if the pipe
610 // contains no data, unlock all waiting readers
611 TRACE(" buffer readable: %zu\n", fBuffer
.Readable());
612 if (fBuffer
.Readable() == 0) {
613 ReadRequestList::Iterator iterator
= fReadRequests
.GetIterator();
614 while (ReadRequest
* request
= iterator
.Next())
617 if (fReadSelectSyncPool
)
618 notify_select_event_pool(fReadSelectSyncPool
, B_SELECT_READ
);
621 // Last reader is gone. Wake up all writers.
622 fWriteCondition
.NotifyAll();
624 if (fWriteSelectSyncPool
) {
625 notify_select_event_pool(fWriteSelectSyncPool
, B_SELECT_WRITE
);
626 notify_select_event_pool(fWriteSelectSyncPool
, B_SELECT_ERROR
);
633 Inode::Open(int openMode
)
635 MutexLocker
locker(RequestLock());
637 if ((openMode
& O_ACCMODE
) == O_WRONLY
)
640 if ((openMode
& O_ACCMODE
) == O_RDONLY
|| (openMode
& O_ACCMODE
) == O_RDWR
)
643 if (fReaderCount
> 0 && fWriterCount
> 0) {
644 TRACE("Inode %p::Open(): fifo becomes active\n", this);
645 fBuffer
.CreateBuffer();
648 // notify all waiting writers that they can start
649 if (fWriteSelectSyncPool
)
650 notify_select_event_pool(fWriteSelectSyncPool
, B_SELECT_WRITE
);
651 fWriteCondition
.NotifyAll();
657 Inode::Close(file_cookie
* cookie
)
659 TRACE("Inode %p::Close(openMode = %d)\n", this, openMode
);
661 MutexLocker
locker(RequestLock());
663 int openMode
= cookie
->open_mode
;
665 // Notify all currently reading file descriptors
666 ReadRequestList::Iterator iterator
= fReadRequests
.GetIterator();
667 while (ReadRequest
* request
= iterator
.Next()) {
668 if (request
->Cookie() == cookie
)
669 request
->Notify(B_FILE_ERROR
);
672 if ((openMode
& O_ACCMODE
) == O_WRONLY
&& --fWriterCount
== 0)
673 NotifyEndClosed(true);
675 if ((openMode
& O_ACCMODE
) == O_RDONLY
676 || (openMode
& O_ACCMODE
) == O_RDWR
) {
677 if (--fReaderCount
== 0)
678 NotifyEndClosed(false);
681 if (fWriterCount
== 0) {
682 // Notify any still reading writers to stop
683 // TODO: This only works reliable if there is only one writer - we could
684 // do the same thing done for the read requests.
685 fWriteCondition
.NotifyAll(B_FILE_ERROR
);
688 if (fReaderCount
== 0 && fWriterCount
== 0) {
690 fBuffer
.DeleteBuffer();
696 Inode::Select(uint8 event
, selectsync
* sync
, int openMode
)
699 select_sync_pool
** pool
;
700 if ((openMode
& O_RWMASK
) == O_RDONLY
) {
701 pool
= &fReadSelectSyncPool
;
703 } else if ((openMode
& O_RWMASK
) == O_WRONLY
) {
704 pool
= &fWriteSelectSyncPool
;
706 return B_NOT_ALLOWED
;
708 if (add_select_sync_pool_entry(pool
, sync
, event
) != B_OK
)
711 // signal right away, if the condition holds already
713 if ((event
== B_SELECT_WRITE
714 && (fBuffer
.Writable() > 0 || fReaderCount
== 0))
715 || (event
== B_SELECT_ERROR
&& fReaderCount
== 0)) {
716 return notify_select_event(sync
, event
);
719 if (event
== B_SELECT_READ
720 && (fBuffer
.Readable() > 0 || fWriterCount
== 0)) {
721 return notify_select_event(sync
, event
);
730 Inode::Deselect(uint8 event
, selectsync
* sync
, int openMode
)
732 select_sync_pool
** pool
;
733 if ((openMode
& O_RWMASK
) == O_RDONLY
) {
734 pool
= &fReadSelectSyncPool
;
735 } else if ((openMode
& O_RWMASK
) == O_WRONLY
) {
736 pool
= &fWriteSelectSyncPool
;
738 return B_NOT_ALLOWED
;
740 remove_select_sync_pool_entry(pool
, sync
, event
);
746 Inode::Dump(bool dumpData
) const
748 kprintf("FIFO %p\n", this);
749 kprintf(" active: %s\n", fActive
? "true" : "false");
750 kprintf(" readers: %" B_PRId32
"\n", fReaderCount
);
751 kprintf(" writers: %" B_PRId32
"\n", fWriterCount
);
753 if (!fReadRequests
.IsEmpty()) {
754 kprintf(" pending readers:\n");
755 for (ReadRequestList::ConstIterator it
= fReadRequests
.GetIterator();
756 ReadRequest
* request
= it
.Next();) {
757 kprintf(" %p: thread %" B_PRId32
", cookie: %p\n", request
,
758 request
->GetThread()->id
, request
->Cookie());
762 if (!fWriteRequests
.IsEmpty()) {
763 kprintf(" pending writers:\n");
764 for (WriteRequestList::ConstIterator it
= fWriteRequests
.GetIterator();
765 WriteRequest
* request
= it
.Next();) {
766 kprintf(" %p: thread %" B_PRId32
", min count: %zu\n", request
,
767 request
->GetThread()->id
, request
->MinimalWriteCount());
771 kprintf(" %zu bytes buffered\n", fBuffer
.Readable());
773 if (dumpData
&& fBuffer
.Readable() > 0) {
774 struct DataProvider
: BKernel::HexDumpDataProvider
{
775 DataProvider(const RingBuffer
& buffer
)
782 virtual bool HasMoreData() const
784 return fOffset
< fBuffer
.Readable();
787 virtual uint8
NextByte()
790 if (fOffset
< fBuffer
.Readable()) {
791 fBuffer
.Peek(fOffset
, &byte
, 1);
797 virtual bool GetAddressString(char* buffer
, size_t bufferSize
) const
799 snprintf(buffer
, bufferSize
, " %4zx", fOffset
);
804 const RingBuffer
& fBuffer
;
808 DataProvider
dataProvider(fBuffer
);
809 BKernel::print_hex_dump(dataProvider
, fBuffer
.Readable());
815 Inode::Dump(int argc
, char** argv
)
817 bool dumpData
= false;
819 if (argi
< argc
&& strcmp(argv
[argi
], "-d") == 0) {
824 if (argi
>= argc
|| argi
+ 2 < argc
) {
825 print_debugger_command_usage(argv
[0]);
829 Inode
* node
= (Inode
*)parse_expression(argv
[argi
]);
830 if (IS_USER_ADDRESS(node
)) {
831 kprintf("invalid FIFO address\n");
835 node
->Dump(dumpData
);
840 // #pragma mark - vnode API
844 fifo_put_vnode(fs_volume
* volume
, fs_vnode
* vnode
, bool reenter
)
846 FIFOInode
* fifo
= (FIFOInode
*)vnode
->private_node
;
847 fs_vnode
* superVnode
= fifo
->SuperVnode();
849 status_t error
= B_OK
;
850 if (superVnode
->ops
->put_vnode
!= NULL
)
851 error
= superVnode
->ops
->put_vnode(volume
, superVnode
, reenter
);
860 fifo_remove_vnode(fs_volume
* volume
, fs_vnode
* vnode
, bool reenter
)
862 FIFOInode
* fifo
= (FIFOInode
*)vnode
->private_node
;
863 fs_vnode
* superVnode
= fifo
->SuperVnode();
865 status_t error
= B_OK
;
866 if (superVnode
->ops
->remove_vnode
!= NULL
)
867 error
= superVnode
->ops
->remove_vnode(volume
, superVnode
, reenter
);
876 fifo_open(fs_volume
* _volume
, fs_vnode
* _node
, int openMode
,
879 Inode
* inode
= (Inode
*)_node
->private_node
;
881 TRACE("fifo_open(): node = %p, openMode = %d\n", inode
, openMode
);
883 file_cookie
* cookie
= (file_cookie
*)malloc(sizeof(file_cookie
));
887 TRACE(" open cookie = %p\n", cookie
);
888 cookie
->open_mode
= openMode
;
889 inode
->Open(openMode
);
891 *_cookie
= (void*)cookie
;
898 fifo_close(fs_volume
* volume
, fs_vnode
* vnode
, void* _cookie
)
900 file_cookie
* cookie
= (file_cookie
*)_cookie
;
901 FIFOInode
* fifo
= (FIFOInode
*)vnode
->private_node
;
910 fifo_free_cookie(fs_volume
* _volume
, fs_vnode
* _node
, void* _cookie
)
912 file_cookie
* cookie
= (file_cookie
*)_cookie
;
914 TRACE("fifo_freecookie: entry vnode %p, cookie %p\n", _node
, _cookie
);
923 fifo_fsync(fs_volume
* _volume
, fs_vnode
* _node
)
930 fifo_read(fs_volume
* _volume
, fs_vnode
* _node
, void* _cookie
,
931 off_t
/*pos*/, void* buffer
, size_t* _length
)
933 file_cookie
* cookie
= (file_cookie
*)_cookie
;
934 Inode
* inode
= (Inode
*)_node
->private_node
;
936 TRACE("fifo_read(vnode = %p, cookie = %p, length = %lu, mode = %d)\n",
937 inode
, cookie
, *_length
, cookie
->open_mode
);
939 MutexLocker
locker(inode
->RequestLock());
941 if ((cookie
->open_mode
& O_RWMASK
) != O_RDONLY
)
942 return B_NOT_ALLOWED
;
944 if (inode
->IsActive() && inode
->WriterCount() == 0) {
945 // as long there is no writer, and the pipe is empty,
946 // we always just return 0 to indicate end of file
947 if (inode
->BytesAvailable() == 0) {
953 // issue read request
955 ReadRequest
request(cookie
);
956 inode
->AddReadRequest(request
);
958 TRACE(" issue read request %p\n", &request
);
960 size_t length
= *_length
;
961 status_t status
= inode
->ReadDataFromBuffer(buffer
, &length
,
962 (cookie
->open_mode
& O_NONBLOCK
) != 0, is_called_via_syscall(),
965 inode
->RemoveReadRequest(request
);
966 inode
->NotifyReadDone();
968 TRACE(" done reading request %p, length %zu\n", &request
, length
);
979 fifo_write(fs_volume
* _volume
, fs_vnode
* _node
, void* _cookie
,
980 off_t
/*pos*/, const void* buffer
, size_t* _length
)
982 file_cookie
* cookie
= (file_cookie
*)_cookie
;
983 Inode
* inode
= (Inode
*)_node
->private_node
;
985 TRACE("fifo_write(vnode = %p, cookie = %p, length = %lu)\n",
986 _node
, cookie
, *_length
);
988 MutexLocker
locker(inode
->RequestLock());
990 if ((cookie
->open_mode
& O_RWMASK
) != O_WRONLY
)
991 return B_NOT_ALLOWED
;
993 size_t length
= *_length
;
997 // copy data into ring buffer
998 status_t status
= inode
->WriteDataToBuffer(buffer
, &length
,
999 (cookie
->open_mode
& O_NONBLOCK
) != 0, is_called_via_syscall());
1010 fifo_read_stat(fs_volume
* volume
, fs_vnode
* vnode
, struct ::stat
* st
)
1012 FIFOInode
* fifo
= (FIFOInode
*)vnode
->private_node
;
1013 fs_vnode
* superVnode
= fifo
->SuperVnode();
1015 if (superVnode
->ops
->read_stat
== NULL
)
1018 status_t error
= superVnode
->ops
->read_stat(volume
, superVnode
, st
);
1023 MutexLocker
locker(fifo
->RequestLock());
1025 st
->st_size
= fifo
->BytesAvailable();
1027 st
->st_blksize
= 4096;
1029 // TODO: Just pass the changes to our modification time on to the super node.
1030 st
->st_atim
.tv_sec
= time(NULL
);
1031 st
->st_atim
.tv_nsec
= 0;
1032 st
->st_mtim
= st
->st_ctim
= fifo
->ModificationTime();
1039 fifo_write_stat(fs_volume
* volume
, fs_vnode
* vnode
, const struct ::stat
* st
,
1042 // we cannot change the size of anything
1043 if ((statMask
& B_STAT_SIZE
) != 0)
1046 FIFOInode
* fifo
= (FIFOInode
*)vnode
->private_node
;
1047 fs_vnode
* superVnode
= fifo
->SuperVnode();
1049 if (superVnode
->ops
->write_stat
== NULL
)
1052 status_t error
= superVnode
->ops
->write_stat(volume
, superVnode
, st
,
1062 fifo_ioctl(fs_volume
* _volume
, fs_vnode
* _node
, void* _cookie
, uint32 op
,
1063 void* buffer
, size_t length
)
1065 file_cookie
* cookie
= (file_cookie
*)_cookie
;
1066 Inode
* inode
= (Inode
*)_node
->private_node
;
1068 TRACE("fifo_ioctl: vnode %p, cookie %p, op %ld, buf %p, len %ld\n",
1069 _vnode
, _cookie
, op
, buffer
, length
);
1078 if (is_called_via_syscall()) {
1079 if (!IS_USER_ADDRESS(buffer
)
1080 || user_memcpy(&value
, buffer
, sizeof(int)) != B_OK
) {
1081 return B_BAD_ADDRESS
;
1084 value
= *(int*)buffer
;
1086 MutexLocker
locker(inode
->RequestLock());
1087 cookie
->SetNonBlocking(value
!= 0);
1096 MutexLocker
locker(inode
->RequestLock());
1097 int available
= (int)inode
->BytesAvailable();
1100 if (is_called_via_syscall()) {
1101 if (!IS_USER_ADDRESS(buffer
)
1102 || user_memcpy(buffer
, &available
, sizeof(available
))
1104 return B_BAD_ADDRESS
;
1107 *(int*)buffer
= available
;
1112 case B_SET_BLOCKING_IO
:
1113 case B_SET_NONBLOCKING_IO
:
1115 MutexLocker
locker(inode
->RequestLock());
1116 cookie
->SetNonBlocking(op
== B_SET_NONBLOCKING_IO
);
1126 fifo_set_flags(fs_volume
* _volume
, fs_vnode
* _node
, void* _cookie
,
1129 Inode
* inode
= (Inode
*)_node
->private_node
;
1130 file_cookie
* cookie
= (file_cookie
*)_cookie
;
1132 TRACE("fifo_set_flags(vnode = %p, flags = %x)\n", _vnode
, flags
);
1134 MutexLocker
locker(inode
->RequestLock());
1135 cookie
->open_mode
= (cookie
->open_mode
& ~(O_APPEND
| O_NONBLOCK
)) | flags
;
1141 fifo_select(fs_volume
* _volume
, fs_vnode
* _node
, void* _cookie
,
1142 uint8 event
, selectsync
* sync
)
1144 file_cookie
* cookie
= (file_cookie
*)_cookie
;
1146 TRACE("fifo_select(vnode = %p)\n", _node
);
1147 Inode
* inode
= (Inode
*)_node
->private_node
;
1151 MutexLocker
locker(inode
->RequestLock());
1152 return inode
->Select(event
, sync
, cookie
->open_mode
);
1157 fifo_deselect(fs_volume
* _volume
, fs_vnode
* _node
, void* _cookie
,
1158 uint8 event
, selectsync
* sync
)
1160 file_cookie
* cookie
= (file_cookie
*)_cookie
;
1162 TRACE("fifo_deselect(vnode = %p)\n", _node
);
1163 Inode
* inode
= (Inode
*)_node
->private_node
;
1167 MutexLocker
locker(inode
->RequestLock());
1168 return inode
->Deselect(event
, sync
, cookie
->open_mode
);
1173 fifo_can_page(fs_volume
* _volume
, fs_vnode
* _node
, void* cookie
)
1180 fifo_read_pages(fs_volume
* _volume
, fs_vnode
* _node
, void* cookie
, off_t pos
,
1181 const iovec
* vecs
, size_t count
, size_t* _numBytes
)
1183 return B_NOT_ALLOWED
;
1188 fifo_write_pages(fs_volume
* _volume
, fs_vnode
* _node
, void* cookie
,
1189 off_t pos
, const iovec
* vecs
, size_t count
, size_t* _numBytes
)
1191 return B_NOT_ALLOWED
;
1196 fifo_get_super_vnode(fs_volume
* volume
, fs_vnode
* vnode
, fs_volume
* superVolume
,
1197 fs_vnode
* _superVnode
)
1199 FIFOInode
* fifo
= (FIFOInode
*)vnode
->private_node
;
1200 fs_vnode
* superVnode
= fifo
->SuperVnode();
1202 if (superVnode
->ops
->get_super_vnode
!= NULL
) {
1203 return superVnode
->ops
->get_super_vnode(volume
, superVnode
, superVolume
,
1207 *_superVnode
= *superVnode
;
1213 static fs_vnode_ops sFIFOVnodeOps
= {
1215 NULL
, // get_vnode_name
1216 // TODO: This is suboptimal! We'd need to forward the
1217 // super node's hook, if it has got one.
1227 NULL
, // cancel_io()
1229 NULL
, // get_file_map
1238 NULL
, // fs_read_link
1244 NULL
, // fs_access()
1262 NULL
, // free_dir_cookie
1266 /* attribute directory operations */
1267 NULL
, // open_attr_dir
1268 NULL
, // close_attr_dir
1269 NULL
, // free_attr_dir_cookie
1270 NULL
, // read_attr_dir
1271 NULL
, // rewind_attr_dir
1273 /* attribute operations */
1274 NULL
, // create_attr
1277 NULL
, // free_attr_cookie
1281 NULL
, // read_attr_stat
1282 NULL
, // write_attr_stat
1283 NULL
, // rename_attr
1284 NULL
, // remove_attr
1286 /* support for node and FS layers */
1287 NULL
, // create_special_node
1288 &fifo_get_super_vnode
,
1295 using namespace fifo
;
1302 create_fifo_vnode(fs_volume
* superVolume
, fs_vnode
* vnode
)
1304 FIFOInode
* fifo
= new(std::nothrow
) FIFOInode(vnode
);
1308 status_t status
= fifo
->InitCheck();
1309 if (status
!= B_OK
) {
1314 vnode
->private_node
= fifo
;
1315 vnode
->ops
= &sFIFOVnodeOps
;
1324 add_debugger_command_etc("fifo", &Inode::Dump
,
1325 "Print info about the specified FIFO node",
1326 "[ \"-d\" ] <address>\n"
1327 "Prints information about the FIFO node specified by address\n"
1328 "<address>. If \"-d\" is given, the data in the FIFO's ring buffer\n"
1329 "hexdumped as well.\n",