2 /* This file implements the disk cache.
4 * If they exist anywhere, cached pages are always in a private
7 * They might also be any combination of:
8 * - be mapped in by a filesystem for reading/writing by it
9 * - be mapped in by a process as the result of an mmap call (future)
11 * This file manages the datastructure of all cache blocks, and
12 * mapping them in and out of filesystems.
18 #include <minix/hash.h>
20 #include <machine/vmparam.h>
28 static int cache_reference(struct phys_region
*pr
, struct phys_region
*pr2
);
29 static int cache_unreference(struct phys_region
*pr
);
30 static int cache_sanitycheck(struct phys_region
*pr
, const char *file
, int line
);
31 static int cache_writable(struct phys_region
*pr
);
32 static int cache_resize(struct vmproc
*vmp
, struct vir_region
*vr
, vir_bytes l
);
33 static int cache_pagefault(struct vmproc
*vmp
, struct vir_region
*region
,
34 struct phys_region
*ph
, int write
, vfs_callback_t cb
, void *state
,
36 static int cache_pt_flags(struct vir_region
*vr
);
38 struct mem_type mem_type_cache
= {
39 .name
= "cache memory",
40 .ev_reference
= cache_reference
,
41 .ev_unreference
= cache_unreference
,
42 .ev_resize
= cache_resize
,
43 .ev_sanitycheck
= cache_sanitycheck
,
44 .ev_pagefault
= cache_pagefault
,
45 .writable
= cache_writable
,
46 .pt_flags
= cache_pt_flags
,
49 static int cache_pt_flags(struct vir_region
*vr
){
51 return ARM_VM_PTE_CACHED
;
58 static int cache_reference(struct phys_region
*pr
, struct phys_region
*pr2
)
63 static int cache_unreference(struct phys_region
*pr
)
65 return mem_type_anon
.ev_unreference(pr
);
68 static int cache_sanitycheck(struct phys_region
*pr
, const char *file
, int line
)
70 MYASSERT(usedpages_add(pr
->ph
->phys
, VM_PAGE_SIZE
) == OK
);
74 static int cache_writable(struct phys_region
*pr
)
76 /* Cache blocks are at the moment only used by filesystems so always writable. */
77 assert(pr
->ph
->refcount
> 0);
78 return pr
->ph
->phys
!= MAP_NONE
;
81 static int cache_resize(struct vmproc
*vmp
, struct vir_region
*vr
, vir_bytes l
)
83 printf("VM: cannot resize cache blocks.\n");
88 do_mapcache(message
*msg
)
90 dev_t dev
= msg
->m_vmmcp
.dev
;
91 off_t dev_off
= msg
->m_vmmcp
.dev_offset
;
92 off_t ino_off
= msg
->m_vmmcp
.ino_offset
;
94 phys_bytes bytes
= msg
->m_vmmcp
.pages
* VM_PAGE_SIZE
;
95 struct vir_region
*vr
;
96 struct vmproc
*caller
;
100 if(dev_off
% PAGE_SIZE
|| ino_off
% PAGE_SIZE
) {
101 printf("VM: unaligned cache operation\n");
105 if(vm_isokendpt(msg
->m_source
, &n
) != OK
) panic("bogus source");
108 if(bytes
< VM_PAGE_SIZE
) return EINVAL
;
110 if(!(vr
= map_page_region(caller
, VM_PAGE_SIZE
, VM_DATATOP
, bytes
,
111 VR_ANON
| VR_WRITABLE
, 0, &mem_type_cache
))) {
112 printf("VM: map_page_region failed\n");
116 assert(vr
->length
== bytes
);
118 for(offset
= 0; offset
< bytes
; offset
+= VM_PAGE_SIZE
) {
119 struct cached_page
*hb
;
121 assert(vr
->length
== bytes
);
122 assert(offset
< vr
->length
);
124 if(!(hb
= find_cached_page_bydev(dev
, dev_off
+ offset
,
125 msg
->m_vmmcp
.ino
, ino_off
+ offset
, 1))) {
126 map_unmap_region(caller
, vr
, 0, bytes
);
130 assert(!vr
->param
.pb_cache
);
131 vr
->param
.pb_cache
= hb
->page
;
133 assert(vr
->length
== bytes
);
134 assert(offset
< vr
->length
);
136 if(map_pf(caller
, vr
, offset
, 1, NULL
, NULL
, 0, &io
) != OK
) {
137 map_unmap_region(caller
, vr
, 0, bytes
);
138 printf("VM: map_pf failed\n");
141 assert(!vr
->param
.pb_cache
);
144 memset(msg
, 0, sizeof(*msg
));
146 msg
->m_vmmcp_reply
.addr
= (void *) vr
->vaddr
;
151 cache_sanitycheck_internal();
157 static int cache_pagefault(struct vmproc
*vmp
, struct vir_region
*region
,
158 struct phys_region
*ph
, int write
, vfs_callback_t cb
,
159 void *state
, int len
, int *io
)
161 vir_bytes offset
= ph
->offset
;
162 assert(ph
->ph
->phys
== MAP_NONE
);
163 assert(region
->param
.pb_cache
);
164 pb_unreferenced(region
, ph
, 0);
165 pb_link(ph
, region
->param
.pb_cache
, offset
, region
);
166 region
->param
.pb_cache
= NULL
;
172 do_setcache(message
*msg
)
175 dev_t dev
= msg
->m_vmmcp
.dev
;
176 off_t dev_off
= msg
->m_vmmcp
.dev_offset
;
177 off_t ino_off
= msg
->m_vmmcp
.ino_offset
;
179 struct vmproc
*caller
;
181 phys_bytes bytes
= msg
->m_vmmcp
.pages
* VM_PAGE_SIZE
;
183 if(bytes
< VM_PAGE_SIZE
) return EINVAL
;
185 if(dev_off
% PAGE_SIZE
|| ino_off
% PAGE_SIZE
) {
186 printf("VM: unaligned cache operation\n");
190 if(vm_isokendpt(msg
->m_source
, &n
) != OK
) panic("bogus source");
193 for(offset
= 0; offset
< bytes
; offset
+= VM_PAGE_SIZE
) {
194 struct vir_region
*region
;
195 struct phys_region
*phys_region
= NULL
;
196 vir_bytes v
= (vir_bytes
) msg
->m_vmmcp
.block
+ offset
;
197 struct cached_page
*hb
;
199 if(!(region
= map_lookup(caller
, v
, &phys_region
))) {
200 printf("VM: error: no reasonable memory region given (offset 0x%lx, 0x%lx)\n", offset
, v
);
205 printf("VM: error: no available memory region given\n");
209 if((hb
=find_cached_page_bydev(dev
, dev_off
+ offset
,
210 msg
->m_vmmcp
.ino
, ino_off
+ offset
, 1))) {
211 /* block inode info updated */
212 if(hb
->page
!= phys_region
->ph
) {
213 /* previous cache entry has become
214 * obsolete; make a new one. rmcache
215 * removes it from the cache and frees
216 * the page if it isn't mapped in anywhere
221 /* block was already there, inode info might've changed which is fine */
226 if(phys_region
->memtype
!= &mem_type_anon
&&
227 phys_region
->memtype
!= &mem_type_anon_contig
) {
228 printf("VM: error: no reasonable memory type\n");
232 if(phys_region
->ph
->refcount
!= 1) {
233 printf("VM: error: no reasonable refcount\n");
237 phys_region
->memtype
= &mem_type_cache
;
239 if((r
=addcache(dev
, dev_off
+ offset
,
240 msg
->m_vmmcp
.ino
, ino_off
+ offset
, phys_region
->ph
)) != OK
) {
241 printf("VM: addcache failed\n");
247 cache_sanitycheck_internal();
254 * A file system wants to invalidate all pages belonging to a certain device.
257 do_clearcache(message
*msg
)
261 dev
= msg
->m_vmmcp
.dev
;
263 clear_cache_bydev(dev
);