4 * Created on: Jun 23, 2013
9 #include "spiffs_nucleus.h"
13 // returns cached page for give page index, or null if no such cached page
14 static spiffs_cache_page
*spiffs_cache_page_get(spiffs
*fs
, spiffs_page_ix pix
) {
15 spiffs_cache
*cache
= spiffs_get_cache(fs
);
16 if ((cache
->cpage_use_map
& cache
->cpage_use_mask
) == 0) return 0;
18 for (i
= 0; i
< cache
->cpage_count
; i
++) {
19 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, i
);
20 if ((cache
->cpage_use_map
& (1 << i
)) &&
21 (cp
->flags
& SPIFFS_CACHE_FLAG_TYPE_WR
) == 0 &&
22 cp
->ucache
.spix
.pix
== pix
) {
23 //SPIFFS_CACHE_DBG("CACHE_GET: have cache page "_SPIPRIi" for "_SPIPRIpg"\n", i, pix);
24 cp
->last_access
= cache
->last_access
;
28 //SPIFFS_CACHE_DBG("CACHE_GET: no cache for "_SPIPRIpg"\n", pix);
33 static s32_t
spiffs_cache_page_free(spiffs
*fs
, int ix
, u8_t write_back
) {
34 s32_t res
= SPIFFS_OK
;
35 spiffs_cache
*cache
= spiffs_get_cache(fs
);
36 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, ix
);
37 if (cache
->cpage_use_map
& (1 << ix
)) {
39 (cp
->flags
& SPIFFS_CACHE_FLAG_TYPE_WR
) == 0 &&
40 (cp
->flags
& SPIFFS_CACHE_FLAG_DIRTY
)) {
41 u8_t
*mem
= spiffs_get_cache_page(fs
, cache
, ix
);
42 SPIFFS_CACHE_DBG("CACHE_FREE: write cache page "_SPIPRIi
" pix "_SPIPRIpg
"\n", ix
, cp
->ucache
.spix
.pix
);
43 res
= SPIFFS_HAL_WRITE(fs
, SPIFFS_PAGE_TO_PADDR(fs
, cp
->ucache
.spix
.pix
), SPIFFS_CFG_LOG_PAGE_SZ(fs
), mem
);
47 if (cp
->flags
& SPIFFS_CACHE_FLAG_TYPE_WR
) {
48 SPIFFS_CACHE_DBG("CACHE_FREE: free cache page "_SPIPRIi
" objid "_SPIPRIid
"\n", ix
, cp
->ucache
.swrc
.obj_id
);
52 SPIFFS_CACHE_DBG("CACHE_FREE: free cache page "_SPIPRIi
" pix "_SPIPRIpg
"\n", ix
, cp
->ucache
.spix
.pix
);
54 cache
->cpage_use_map
&= ~(1 << ix
);
61 // removes the oldest accessed cached page
62 static s32_t
spiffs_cache_page_remove_oldest(spiffs
*fs
, u8_t flag_mask
, u8_t flags
) {
63 s32_t res
= SPIFFS_OK
;
64 spiffs_cache
*cache
= spiffs_get_cache(fs
);
66 if ((cache
->cpage_use_map
& cache
->cpage_use_mask
) != cache
->cpage_use_mask
) {
67 // at least one free cpage
71 // all busy, scan thru all to find the cpage which has oldest access
75 for (i
= 0; i
< cache
->cpage_count
; i
++) {
76 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, i
);
77 if ((cache
->last_access
- cp
->last_access
) > oldest_val
&&
78 (cp
->flags
& flag_mask
) == flags
) {
79 oldest_val
= cache
->last_access
- cp
->last_access
;
85 res
= spiffs_cache_page_free(fs
, cand_ix
, 1);
91 // allocates a new cached page and returns it, or null if all cache pages are busy
92 static spiffs_cache_page
*spiffs_cache_page_allocate(spiffs
*fs
) {
93 spiffs_cache
*cache
= spiffs_get_cache(fs
);
94 if (cache
->cpage_use_map
== 0xffffffff) {
95 // out of cache memory
99 for (i
= 0; i
< cache
->cpage_count
; i
++) {
100 if ((cache
->cpage_use_map
& (1 << i
)) == 0) {
101 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, i
);
102 cache
->cpage_use_map
|= (1 << i
);
103 cp
->last_access
= cache
->last_access
;
104 //SPIFFS_CACHE_DBG("CACHE_ALLO: allocated cache page "_SPIPRIi"\n", i);
108 // out of cache entries
112 // drops the cache page for give page index
113 void spiffs_cache_drop_page(spiffs
*fs
, spiffs_page_ix pix
) {
114 spiffs_cache_page
*cp
= spiffs_cache_page_get(fs
, pix
);
116 spiffs_cache_page_free(fs
, cp
->ix
, 0);
120 // ------------------------------
122 // reads from spi flash or the cache
123 s32_t
spiffs_phys_rd(
131 s32_t res
= SPIFFS_OK
;
132 spiffs_cache
*cache
= spiffs_get_cache(fs
);
133 spiffs_cache_page
*cp
= spiffs_cache_page_get(fs
, SPIFFS_PADDR_TO_PAGE(fs
, addr
));
134 cache
->last_access
++;
136 // we've already got one, you see
137 #if SPIFFS_CACHE_STATS
140 cp
->last_access
= cache
->last_access
;
141 u8_t
*mem
= spiffs_get_cache_page(fs
, cache
, cp
->ix
);
142 _SPIFFS_MEMCPY(dst
, &mem
[SPIFFS_PADDR_TO_PAGE_OFFSET(fs
, addr
)], len
);
144 if ((op
& SPIFFS_OP_TYPE_MASK
) == SPIFFS_OP_T_OBJ_LU2
) {
145 // for second layer lookup functions, we do not cache in order to prevent shredding
146 return SPIFFS_HAL_READ(fs
, addr
, len
, dst
);
148 #if SPIFFS_CACHE_STATS
151 // this operation will always free one cache page (unless all already free),
152 // the result code stems from the write operation of the possibly freed cache page
153 res
= spiffs_cache_page_remove_oldest(fs
, SPIFFS_CACHE_FLAG_TYPE_WR
, 0);
155 cp
= spiffs_cache_page_allocate(fs
);
157 cp
->flags
= SPIFFS_CACHE_FLAG_WRTHRU
;
158 cp
->ucache
.spix
.pix
= SPIFFS_PADDR_TO_PAGE(fs
, addr
);
159 SPIFFS_CACHE_DBG("CACHE_ALLO: allocated cache page "_SPIPRIi
" for pix "_SPIPRIpg
"\n", cp
->ix
, cp
->ucache
.spix
.pix
);
161 s32_t res2
= SPIFFS_HAL_READ(fs
,
162 addr
- SPIFFS_PADDR_TO_PAGE_OFFSET(fs
, addr
),
163 SPIFFS_CFG_LOG_PAGE_SZ(fs
),
164 spiffs_get_cache_page(fs
, cache
, cp
->ix
));
165 if (res2
!= SPIFFS_OK
) {
166 // honor read failure before possible write failure (bad idea?)
169 u8_t
*mem
= spiffs_get_cache_page(fs
, cache
, cp
->ix
);
170 _SPIFFS_MEMCPY(dst
, &mem
[SPIFFS_PADDR_TO_PAGE_OFFSET(fs
, addr
)], len
);
172 // this will never happen, last resort for sake of symmetry
173 s32_t res2
= SPIFFS_HAL_READ(fs
, addr
, len
, dst
);
174 if (res2
!= SPIFFS_OK
) {
175 // honor read failure before possible write failure (bad idea?)
183 // writes to spi flash and/or the cache
184 s32_t
spiffs_phys_wr(
192 spiffs_page_ix pix
= SPIFFS_PADDR_TO_PAGE(fs
, addr
);
193 spiffs_cache
*cache
= spiffs_get_cache(fs
);
194 spiffs_cache_page
*cp
= spiffs_cache_page_get(fs
, pix
);
196 if (cp
&& (op
& SPIFFS_OP_COM_MASK
) != SPIFFS_OP_C_WRTHRU
) {
198 // copy in data to cache page
200 if ((op
& SPIFFS_OP_COM_MASK
) == SPIFFS_OP_C_DELE
&&
201 (op
& SPIFFS_OP_TYPE_MASK
) != SPIFFS_OP_T_OBJ_LU
) {
202 // page is being deleted, wipe from cache - unless it is a lookup page
203 spiffs_cache_page_free(fs
, cp
->ix
, 0);
204 return SPIFFS_HAL_WRITE(fs
, addr
, len
, src
);
207 u8_t
*mem
= spiffs_get_cache_page(fs
, cache
, cp
->ix
);
208 _SPIFFS_MEMCPY(&mem
[SPIFFS_PADDR_TO_PAGE_OFFSET(fs
, addr
)], src
, len
);
210 cache
->last_access
++;
211 cp
->last_access
= cache
->last_access
;
213 if (cp
->flags
& SPIFFS_CACHE_FLAG_WRTHRU
) {
214 // page is being updated, no write-cache, just pass thru
215 return SPIFFS_HAL_WRITE(fs
, addr
, len
, src
);
220 // no cache page, no write cache - just write thru
221 return SPIFFS_HAL_WRITE(fs
, addr
, len
, src
);
226 // returns the cache page that this fd refers, or null if no cache page
227 spiffs_cache_page
*spiffs_cache_page_get_by_fd(spiffs
*fs
, spiffs_fd
*fd
) {
228 spiffs_cache
*cache
= spiffs_get_cache(fs
);
230 if ((cache
->cpage_use_map
& cache
->cpage_use_mask
) == 0) {
231 // all cpages free, no cpage cannot be assigned to obj_id
236 for (i
= 0; i
< cache
->cpage_count
; i
++) {
237 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, i
);
238 if ((cache
->cpage_use_map
& (1 << i
)) &&
239 (cp
->flags
& SPIFFS_CACHE_FLAG_TYPE_WR
) &&
240 cp
->ucache
.swrc
.obj_id
== fd
->obj_id
) {
248 // allocates a new cache page and refers this to given fd - flushes an old cache
249 // page if all cache is busy
250 spiffs_cache_page
*spiffs_cache_page_allocate_by_fd(spiffs
*fs
, spiffs_fd
*fd
) {
251 // before this function is called, it is ensured that there is no already existing
252 // cache page with same object id
253 spiffs_cache_page_remove_oldest(fs
, SPIFFS_CACHE_FLAG_TYPE_WR
, 0);
254 spiffs_cache_page
*cp
= spiffs_cache_page_allocate(fs
);
256 // could not get cache page
260 cp
->flags
= SPIFFS_CACHE_FLAG_TYPE_WR
;
261 cp
->ucache
.swrc
.obj_id
= fd
->obj_id
;
263 SPIFFS_CACHE_DBG("CACHE_ALLO: allocated cache page "_SPIPRIi
" for fd "_SPIPRIfd
":"_SPIPRIid
"\n", cp
->ix
, fd
->file_nbr
, fd
->obj_id
);
267 // unrefers all fds that this cache page refers to and releases the cache page
268 void spiffs_cache_fd_release(spiffs
*fs
, spiffs_cache_page
*cp
) {
271 spiffs_fd
*fds
= (spiffs_fd
*)fs
->fd_space
;
272 for (i
= 0; i
< fs
->fd_count
; i
++) {
273 spiffs_fd
*cur_fd
= &fds
[i
];
274 if (cur_fd
->file_nbr
!= 0 && cur_fd
->cache_page
== cp
) {
275 cur_fd
->cache_page
= 0;
278 spiffs_cache_page_free(fs
, cp
->ix
, 0);
280 cp
->ucache
.swrc
.obj_id
= 0;
285 // initializes the cache
286 void spiffs_cache_init(spiffs
*fs
) {
287 if (fs
->cache
== 0) return;
288 u32_t sz
= fs
->cache_size
;
289 u32_t cache_mask
= 0;
292 (sz
- sizeof(spiffs_cache
)) / (SPIFFS_CACHE_PAGE_SIZE(fs
));
293 if (cache_entries
<= 0) return;
295 for (i
= 0; i
< cache_entries
; i
++) {
301 memset(&cache
, 0, sizeof(spiffs_cache
));
302 cache
.cpage_count
= cache_entries
;
303 cache
.cpages
= (u8_t
*)((u8_t
*)fs
->cache
+ sizeof(spiffs_cache
));
305 cache
.cpage_use_map
= 0xffffffff;
306 cache
.cpage_use_mask
= cache_mask
;
307 _SPIFFS_MEMCPY(fs
->cache
, &cache
, sizeof(spiffs_cache
));
309 spiffs_cache
*c
= spiffs_get_cache(fs
);
311 memset(c
->cpages
, 0, c
->cpage_count
* SPIFFS_CACHE_PAGE_SIZE(fs
));
313 c
->cpage_use_map
&= ~(c
->cpage_use_mask
);
314 for (i
= 0; i
< cache
.cpage_count
; i
++) {
315 spiffs_get_cache_page_hdr(fs
, c
, i
)->ix
= i
;
319 #endif // SPIFFS_CACHE