2 /* This file implements the disk cache.
4 * If they exist anywhere, cached pages are always in a private
7 * They might also be any combination of:
8 * - be mapped in by a filesystem for reading/writing by it
9 * - be mapped in by a process as the result of an mmap call (future)
11 * This file manages the datastructure of all cache blocks, and
12 * mapping them in and out of filesystems.
18 #include <minix/hash.h>
20 #include <machine/vmparam.h>
28 static int cache_reference(struct phys_region
*pr
, struct phys_region
*pr2
);
29 static int cache_unreference(struct phys_region
*pr
);
30 static int cache_sanitycheck(struct phys_region
*pr
, const char *file
, int line
);
31 static int cache_writable(struct phys_region
*pr
);
32 static int cache_resize(struct vmproc
*vmp
, struct vir_region
*vr
, vir_bytes l
);
33 static int cache_lowshrink(struct vir_region
*vr
, vir_bytes len
);
34 static int cache_pagefault(struct vmproc
*vmp
, struct vir_region
*region
,
35 struct phys_region
*ph
, int write
, vfs_callback_t cb
, void *state
,
37 static int cache_pt_flags(struct vir_region
*vr
);
39 struct mem_type mem_type_cache
= {
40 .name
= "cache memory",
41 .ev_reference
= cache_reference
,
42 .ev_unreference
= cache_unreference
,
43 .ev_resize
= cache_resize
,
44 .ev_lowshrink
= cache_lowshrink
,
45 .ev_sanitycheck
= cache_sanitycheck
,
46 .ev_pagefault
= cache_pagefault
,
47 .writable
= cache_writable
,
48 .pt_flags
= cache_pt_flags
,
51 static int cache_pt_flags(struct vir_region
*vr
){
53 return ARM_VM_PTE_CACHED
;
60 static int cache_reference(struct phys_region
*pr
, struct phys_region
*pr2
)
65 static int cache_unreference(struct phys_region
*pr
)
67 return mem_type_anon
.ev_unreference(pr
);
70 static int cache_sanitycheck(struct phys_region
*pr
, const char *file
, int line
)
72 MYASSERT(usedpages_add(pr
->ph
->phys
, VM_PAGE_SIZE
) == OK
);
76 static int cache_writable(struct phys_region
*pr
)
78 /* Cache blocks are at the moment only used by filesystems so always writable. */
79 assert(pr
->ph
->refcount
> 0);
80 return pr
->ph
->phys
!= MAP_NONE
;
83 static int cache_resize(struct vmproc
*vmp
, struct vir_region
*vr
, vir_bytes l
)
85 printf("VM: cannot resize cache blocks.\n");
89 static int cache_lowshrink(struct vir_region
*vr
, vir_bytes len
)
95 do_mapcache(message
*msg
)
97 dev_t dev
= msg
->m_vmmcp
.dev
;
98 uint64_t dev_off
= msg
->m_vmmcp
.dev_offset
;
99 off_t ino_off
= msg
->m_vmmcp
.ino_offset
;
101 phys_bytes bytes
= msg
->m_vmmcp
.pages
* VM_PAGE_SIZE
;
102 phys_bytes alloc_bytes
;
103 struct vir_region
*vr
;
104 struct vmproc
*caller
;
108 if(dev_off
% PAGE_SIZE
|| ino_off
% PAGE_SIZE
) {
109 printf("VM: unaligned cache operation\n");
113 if(vm_isokendpt(msg
->m_source
, &n
) != OK
) panic("bogus source");
116 if(bytes
< VM_PAGE_SIZE
) return EINVAL
;
120 /* Make sure there is a 1-page hole available before the region,
121 * in case instrumentation needs to allocate in-band metadata later.
122 * This does effectively halve the usable part of the caller's address
123 * space, though, so only do this if we are instrumenting at all.
124 * Also make sure it falls within the mmap range, so that it is
125 * transferred upon live update. This again cuts the usable part of
126 * the address space for caching purposes in half.
128 alloc_bytes
+= VM_PAGE_SIZE
;
130 if (!(vr
= map_page_region(caller
, VM_MMAPBASE
, VM_MMAPTOP
,
131 alloc_bytes
, VR_ANON
| VR_WRITABLE
, 0, &mem_type_cache
))) {
132 printf("VM: map_page_region failed\n");
136 map_unmap_region(caller
, vr
, 0, VM_PAGE_SIZE
);
139 assert(vr
->length
== bytes
);
141 for(offset
= 0; offset
< bytes
; offset
+= VM_PAGE_SIZE
) {
142 struct cached_page
*hb
;
144 assert(vr
->length
== bytes
);
145 assert(offset
< vr
->length
);
147 if(!(hb
= find_cached_page_bydev(dev
, dev_off
+ offset
,
148 msg
->m_vmmcp
.ino
, ino_off
+ offset
, 1)) ||
149 (hb
->flags
& VMSF_ONCE
)) {
150 map_unmap_region(caller
, vr
, 0, bytes
);
154 assert(!vr
->param
.pb_cache
);
155 vr
->param
.pb_cache
= hb
->page
;
157 assert(vr
->length
== bytes
);
158 assert(offset
< vr
->length
);
160 if(map_pf(caller
, vr
, offset
, 1, NULL
, NULL
, 0, &io
) != OK
) {
161 map_unmap_region(caller
, vr
, 0, bytes
);
162 printf("VM: map_pf failed\n");
165 assert(!vr
->param
.pb_cache
);
168 memset(msg
, 0, sizeof(*msg
));
170 msg
->m_vmmcp_reply
.addr
= (void *) vr
->vaddr
;
175 cache_sanitycheck_internal();
181 static int cache_pagefault(struct vmproc
*vmp
, struct vir_region
*region
,
182 struct phys_region
*ph
, int write
, vfs_callback_t cb
,
183 void *state
, int len
, int *io
)
185 vir_bytes offset
= ph
->offset
;
186 assert(ph
->ph
->phys
== MAP_NONE
);
187 assert(region
->param
.pb_cache
);
188 pb_unreferenced(region
, ph
, 0);
189 pb_link(ph
, region
->param
.pb_cache
, offset
, region
);
190 region
->param
.pb_cache
= NULL
;
196 do_setcache(message
*msg
)
199 dev_t dev
= msg
->m_vmmcp
.dev
;
200 uint64_t dev_off
= msg
->m_vmmcp
.dev_offset
;
201 off_t ino_off
= msg
->m_vmmcp
.ino_offset
;
202 int flags
= msg
->m_vmmcp
.flags
;
204 struct vmproc
*caller
;
206 phys_bytes bytes
= msg
->m_vmmcp
.pages
* VM_PAGE_SIZE
;
208 if(bytes
< VM_PAGE_SIZE
) return EINVAL
;
210 if(dev_off
% PAGE_SIZE
|| ino_off
% PAGE_SIZE
) {
211 printf("VM: unaligned cache operation\n");
215 if(vm_isokendpt(msg
->m_source
, &n
) != OK
) panic("bogus source");
218 for(offset
= 0; offset
< bytes
; offset
+= VM_PAGE_SIZE
) {
219 struct vir_region
*region
;
220 struct phys_region
*phys_region
= NULL
;
221 vir_bytes v
= (vir_bytes
) msg
->m_vmmcp
.block
+ offset
;
222 struct cached_page
*hb
;
224 if(!(region
= map_lookup(caller
, v
, &phys_region
))) {
225 printf("VM: error: no reasonable memory region given (offset 0x%lx, 0x%lx)\n", offset
, v
);
230 printf("VM: error: no available memory region given\n");
234 if((hb
=find_cached_page_bydev(dev
, dev_off
+ offset
,
235 msg
->m_vmmcp
.ino
, ino_off
+ offset
, 1))) {
236 /* block inode info updated */
237 if(hb
->page
!= phys_region
->ph
||
238 (hb
->flags
& VMSF_ONCE
)) {
239 /* previous cache entry has become
240 * obsolete; make a new one. rmcache
241 * removes it from the cache and frees
242 * the page if it isn't mapped in anywhere
247 /* block was already there, inode info might've changed which is fine */
252 if(phys_region
->memtype
!= &mem_type_anon
&&
253 phys_region
->memtype
!= &mem_type_anon_contig
) {
254 printf("VM: error: no reasonable memory type\n");
258 if(phys_region
->ph
->refcount
!= 1) {
259 printf("VM: error: no reasonable refcount\n");
263 phys_region
->memtype
= &mem_type_cache
;
265 if((r
=addcache(dev
, dev_off
+ offset
, msg
->m_vmmcp
.ino
,
266 ino_off
+ offset
, flags
, phys_region
->ph
)) != OK
) {
267 printf("VM: addcache failed\n");
273 cache_sanitycheck_internal();
280 * Forget all pages associated to a particular block in the cache.
283 do_forgetcache(message
*msg
)
285 struct cached_page
*hb
;
288 phys_bytes bytes
, offset
;
290 dev
= msg
->m_vmmcp
.dev
;
291 dev_off
= msg
->m_vmmcp
.dev_offset
;
292 bytes
= msg
->m_vmmcp
.pages
* VM_PAGE_SIZE
;
294 if (bytes
< VM_PAGE_SIZE
)
297 if (dev_off
% PAGE_SIZE
) {
298 printf("VM: unaligned cache operation\n");
302 for (offset
= 0; offset
< bytes
; offset
+= VM_PAGE_SIZE
) {
303 if ((hb
= find_cached_page_bydev(dev
, dev_off
+ offset
,
304 VMC_NO_INODE
, 0 /*ino_off*/, 0 /*touchlru*/)) != NULL
)
312 * A file system wants to invalidate all pages belonging to a certain device.
315 do_clearcache(message
*msg
)
319 dev
= msg
->m_vmmcp
.dev
;
321 clear_cache_bydev(dev
);