2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
7 #include "vnode_store.h"
12 #include <file_cache.h>
13 #include <slab/Slab.h>
17 #include "IORequest.h"
21 VMVnodeCache::Init(struct vnode
* vnode
, uint32 allocationFlags
)
23 status_t error
= VMCache::Init(CACHE_TYPE_VNODE
, allocationFlags
);
29 fVnodeDeleted
= false;
31 vfs_vnode_to_node_ref(fVnode
, &fDevice
, &fInode
);
38 VMVnodeCache::HasPage(off_t offset
)
40 return ROUNDUP(offset
, B_PAGE_SIZE
) >= virtual_base
41 && offset
< virtual_end
;
46 VMVnodeCache::Read(off_t offset
, const generic_io_vec
* vecs
, size_t count
,
47 uint32 flags
, generic_size_t
* _numBytes
)
49 generic_size_t bytesUntouched
= *_numBytes
;
51 status_t status
= vfs_read_pages(fVnode
, NULL
, offset
, vecs
, count
,
54 generic_size_t bytesEnd
= *_numBytes
;
56 if (offset
+ (off_t
)bytesEnd
> virtual_end
)
57 bytesEnd
= virtual_end
- offset
;
59 // If the request could be filled completely, or an error occured,
61 if (status
!= B_OK
|| bytesUntouched
== bytesEnd
)
64 bytesUntouched
-= bytesEnd
;
66 // Clear out any leftovers that were not touched by the above read - we're
67 // doing this here so that not every file system/device has to implement
69 for (int32 i
= count
; i
-- > 0 && bytesUntouched
!= 0;) {
70 generic_size_t length
= min_c(bytesUntouched
, vecs
[i
].length
);
72 generic_addr_t address
= vecs
[i
].base
+ vecs
[i
].length
- length
;
73 if ((flags
& B_PHYSICAL_IO_REQUEST
) != 0)
74 vm_memset_physical(address
, 0, length
);
76 memset((void*)(addr_t
)address
, 0, length
);
78 bytesUntouched
-= length
;
86 VMVnodeCache::Write(off_t offset
, const generic_io_vec
* vecs
, size_t count
,
87 uint32 flags
, generic_size_t
* _numBytes
)
89 return vfs_write_pages(fVnode
, NULL
, offset
, vecs
, count
, flags
, _numBytes
);
94 VMVnodeCache::WriteAsync(off_t offset
, const generic_io_vec
* vecs
, size_t count
,
95 generic_size_t numBytes
, uint32 flags
, AsyncIOCallback
* callback
)
97 return vfs_asynchronous_write_pages(fVnode
, NULL
, offset
, vecs
, count
,
98 numBytes
, flags
, callback
);
103 VMVnodeCache::Fault(struct VMAddressSpace
* aspace
, off_t offset
)
105 if (!HasPage(offset
))
106 return B_BAD_ADDRESS
;
108 // vm_soft_fault() reads the page in.
109 return B_BAD_HANDLER
;
114 VMVnodeCache::CanWritePage(off_t offset
)
116 // all pages can be written
122 VMVnodeCache::AcquireUnreferencedStoreRef()
124 // Quick check whether getting a vnode reference is still allowed. Only
125 // after a successful vfs_get_vnode() the check is safe (since then we've
126 // either got the reference to our vnode, or have been notified that it is
127 // toast), but the check is cheap and saves quite a bit of work in case the
133 status_t status
= vfs_get_vnode(fDevice
, fInode
, false, &vnode
);
135 // If successful, update the store's vnode pointer, so that release_ref()
136 // won't use a stale pointer.
137 if (status
== B_OK
&& fVnodeDeleted
) {
138 vfs_put_vnode(vnode
);
147 VMVnodeCache::AcquireStoreRef()
149 vfs_acquire_vnode(fVnode
);
154 VMVnodeCache::ReleaseStoreRef()
156 vfs_put_vnode(fVnode
);
161 VMVnodeCache::Dump(bool showPages
) const
163 VMCache::Dump(showPages
);
165 kprintf(" vnode: %p <%" B_PRIdDEV
", %" B_PRIdINO
">\n", fVnode
,
171 VMVnodeCache::DeleteObject()
173 object_cache_delete(gVnodeCacheObjectCache
, this);