1 //-----------------------------------------------------------------------------
2 // Borrowed initially from https://github.com/pellepl/spiffs
3 // Copyright (c) 2013-2017 Peter Andersson (pelleplutt1976 at gmail.com)
4 // Copyright (C) Proxmark3 contributors. See AUTHORS.md for details.
6 // This program is free software: you can redistribute it and/or modify
7 // it under the terms of the GNU General Public License as published by
8 // the Free Software Foundation, either version 3 of the License, or
9 // (at your option) any later version.
11 // This program is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // See LICENSE.txt for the text of the license.
17 //-----------------------------------------------------------------------------
20 #include "spiffs_nucleus.h"
24 // returns cached page for give page index, or null if no such cached page
25 static spiffs_cache_page
*spiffs_cache_page_get(spiffs
*fs
, spiffs_page_ix pix
) {
26 spiffs_cache
*cache
= spiffs_get_cache(fs
);
27 if ((cache
->cpage_use_map
& cache
->cpage_use_mask
) == 0) return 0;
29 for (i
= 0; i
< cache
->cpage_count
; i
++) {
30 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, i
);
31 if ((cache
->cpage_use_map
& (1 << i
)) &&
32 (cp
->flags
& SPIFFS_CACHE_FLAG_TYPE_WR
) == 0 &&
33 cp
->ucache
.spix
.pix
== pix
) {
34 //SPIFFS_CACHE_DBG("CACHE_GET: have cache page "_SPIPRIi" for "_SPIPRIpg"\n", i, pix);
35 cp
->last_access
= cache
->last_access
;
39 //SPIFFS_CACHE_DBG("CACHE_GET: no cache for "_SPIPRIpg"\n", pix);
44 static s32_t
spiffs_cache_page_free(spiffs
*fs
, int ix
, u8_t write_back
) {
45 s32_t res
= SPIFFS_OK
;
46 spiffs_cache
*cache
= spiffs_get_cache(fs
);
47 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, ix
);
48 if (cache
->cpage_use_map
& (1 << ix
)) {
50 (cp
->flags
& SPIFFS_CACHE_FLAG_TYPE_WR
) == 0 &&
51 (cp
->flags
& SPIFFS_CACHE_FLAG_DIRTY
)) {
52 u8_t
*mem
= spiffs_get_cache_page(fs
, cache
, ix
);
53 SPIFFS_CACHE_DBG("CACHE_FREE: write cache page "_SPIPRIi
" pix "_SPIPRIpg
"\n", ix
, cp
->ucache
.spix
.pix
);
54 res
= SPIFFS_HAL_WRITE(fs
, SPIFFS_PAGE_TO_PADDR(fs
, cp
->ucache
.spix
.pix
), SPIFFS_CFG_LOG_PAGE_SZ(fs
), mem
);
58 if (cp
->flags
& SPIFFS_CACHE_FLAG_TYPE_WR
) {
59 SPIFFS_CACHE_DBG("CACHE_FREE: free cache page "_SPIPRIi
" objid "_SPIPRIid
"\n", ix
, cp
->ucache
.swrc
.obj_id
);
63 SPIFFS_CACHE_DBG("CACHE_FREE: free cache page "_SPIPRIi
" pix "_SPIPRIpg
"\n", ix
, cp
->ucache
.spix
.pix
);
65 cache
->cpage_use_map
&= ~(1 << ix
);
72 // removes the oldest accessed cached page
73 static s32_t
spiffs_cache_page_remove_oldest(spiffs
*fs
, u8_t flag_mask
, u8_t flags
) {
74 s32_t res
= SPIFFS_OK
;
75 spiffs_cache
*cache
= spiffs_get_cache(fs
);
77 if ((cache
->cpage_use_map
& cache
->cpage_use_mask
) != cache
->cpage_use_mask
) {
78 // at least one free cpage
82 // all busy, scan thru all to find the cpage which has oldest access
86 for (i
= 0; i
< cache
->cpage_count
; i
++) {
87 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, i
);
88 if ((cache
->last_access
- cp
->last_access
) > oldest_val
&&
89 (cp
->flags
& flag_mask
) == flags
) {
90 oldest_val
= cache
->last_access
- cp
->last_access
;
96 res
= spiffs_cache_page_free(fs
, cand_ix
, 1);
102 // allocates a new cached page and returns it, or null if all cache pages are busy
103 static spiffs_cache_page
*spiffs_cache_page_allocate(spiffs
*fs
) {
104 spiffs_cache
*cache
= spiffs_get_cache(fs
);
105 if (cache
->cpage_use_map
== 0xffffffff) {
106 // out of cache memory
110 for (i
= 0; i
< cache
->cpage_count
; i
++) {
111 if ((cache
->cpage_use_map
& (1 << i
)) == 0) {
112 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, i
);
113 cache
->cpage_use_map
|= (1 << i
);
114 cp
->last_access
= cache
->last_access
;
115 //SPIFFS_CACHE_DBG("CACHE_ALLO: allocated cache page "_SPIPRIi"\n", i);
119 // out of cache entries
123 // drops the cache page for give page index
124 void spiffs_cache_drop_page(spiffs
*fs
, spiffs_page_ix pix
) {
125 spiffs_cache_page
*cp
= spiffs_cache_page_get(fs
, pix
);
127 spiffs_cache_page_free(fs
, cp
->ix
, 0);
131 // ------------------------------
133 // reads from spi flash or the cache
134 s32_t
spiffs_phys_rd(
142 s32_t res
= SPIFFS_OK
;
143 spiffs_cache
*cache
= spiffs_get_cache(fs
);
144 spiffs_cache_page
*cp
= spiffs_cache_page_get(fs
, SPIFFS_PADDR_TO_PAGE(fs
, addr
));
145 cache
->last_access
++;
147 // we've already got one, you see
148 #if SPIFFS_CACHE_STATS
151 cp
->last_access
= cache
->last_access
;
152 u8_t
*mem
= spiffs_get_cache_page(fs
, cache
, cp
->ix
);
153 _SPIFFS_MEMCPY(dst
, &mem
[SPIFFS_PADDR_TO_PAGE_OFFSET(fs
, addr
)], len
);
155 if ((op
& SPIFFS_OP_TYPE_MASK
) == SPIFFS_OP_T_OBJ_LU2
) {
156 // for second layer lookup functions, we do not cache in order to prevent shredding
157 return SPIFFS_HAL_READ(fs
, addr
, len
, dst
);
159 #if SPIFFS_CACHE_STATS
162 // this operation will always free one cache page (unless all already free),
163 // the result code stems from the write operation of the possibly freed cache page
164 res
= spiffs_cache_page_remove_oldest(fs
, SPIFFS_CACHE_FLAG_TYPE_WR
, 0);
166 cp
= spiffs_cache_page_allocate(fs
);
168 cp
->flags
= SPIFFS_CACHE_FLAG_WRTHRU
;
169 cp
->ucache
.spix
.pix
= SPIFFS_PADDR_TO_PAGE(fs
, addr
);
170 SPIFFS_CACHE_DBG("CACHE_ALLO: allocated cache page "_SPIPRIi
" for pix "_SPIPRIpg
"\n", cp
->ix
, cp
->ucache
.spix
.pix
);
172 s32_t res2
= SPIFFS_HAL_READ(fs
,
173 addr
- SPIFFS_PADDR_TO_PAGE_OFFSET(fs
, addr
),
174 SPIFFS_CFG_LOG_PAGE_SZ(fs
),
175 spiffs_get_cache_page(fs
, cache
, cp
->ix
));
176 if (res2
!= SPIFFS_OK
) {
177 // honor read failure before possible write failure (bad idea?)
180 u8_t
*mem
= spiffs_get_cache_page(fs
, cache
, cp
->ix
);
181 _SPIFFS_MEMCPY(dst
, &mem
[SPIFFS_PADDR_TO_PAGE_OFFSET(fs
, addr
)], len
);
183 // this will never happen, last resort for sake of symmetry
184 s32_t res2
= SPIFFS_HAL_READ(fs
, addr
, len
, dst
);
185 if (res2
!= SPIFFS_OK
) {
186 // honor read failure before possible write failure (bad idea?)
194 // writes to spi flash and/or the cache
195 s32_t
spiffs_phys_wr(
203 spiffs_page_ix pix
= SPIFFS_PADDR_TO_PAGE(fs
, addr
);
204 spiffs_cache
*cache
= spiffs_get_cache(fs
);
205 spiffs_cache_page
*cp
= spiffs_cache_page_get(fs
, pix
);
207 if (cp
&& (op
& SPIFFS_OP_COM_MASK
) != SPIFFS_OP_C_WRTHRU
) {
209 // copy in data to cache page
211 if ((op
& SPIFFS_OP_COM_MASK
) == SPIFFS_OP_C_DELE
&&
212 (op
& SPIFFS_OP_TYPE_MASK
) != SPIFFS_OP_T_OBJ_LU
) {
213 // page is being deleted, wipe from cache - unless it is a lookup page
214 spiffs_cache_page_free(fs
, cp
->ix
, 0);
215 return SPIFFS_HAL_WRITE(fs
, addr
, len
, src
);
218 u8_t
*mem
= spiffs_get_cache_page(fs
, cache
, cp
->ix
);
219 _SPIFFS_MEMCPY(&mem
[SPIFFS_PADDR_TO_PAGE_OFFSET(fs
, addr
)], src
, len
);
221 cache
->last_access
++;
222 cp
->last_access
= cache
->last_access
;
224 if (cp
->flags
& SPIFFS_CACHE_FLAG_WRTHRU
) {
225 // page is being updated, no write-cache, just pass thru
226 return SPIFFS_HAL_WRITE(fs
, addr
, len
, src
);
231 // no cache page, no write cache - just write thru
232 return SPIFFS_HAL_WRITE(fs
, addr
, len
, src
);
237 // returns the cache page that this fd refers, or null if no cache page
238 spiffs_cache_page
*spiffs_cache_page_get_by_fd(spiffs
*fs
, spiffs_fd
*fd
) {
239 spiffs_cache
*cache
= spiffs_get_cache(fs
);
241 if ((cache
->cpage_use_map
& cache
->cpage_use_mask
) == 0) {
242 // all cpages free, no cpage cannot be assigned to obj_id
247 for (i
= 0; i
< cache
->cpage_count
; i
++) {
248 spiffs_cache_page
*cp
= spiffs_get_cache_page_hdr(fs
, cache
, i
);
249 if ((cache
->cpage_use_map
& (1 << i
)) &&
250 (cp
->flags
& SPIFFS_CACHE_FLAG_TYPE_WR
) &&
251 cp
->ucache
.swrc
.obj_id
== fd
->obj_id
) {
259 // allocates a new cache page and refers this to given fd - flushes an old cache
260 // page if all cache is busy
261 spiffs_cache_page
*spiffs_cache_page_allocate_by_fd(spiffs
*fs
, spiffs_fd
*fd
) {
262 // before this function is called, it is ensured that there is no already existing
263 // cache page with same object id
264 spiffs_cache_page_remove_oldest(fs
, SPIFFS_CACHE_FLAG_TYPE_WR
, 0);
265 spiffs_cache_page
*cp
= spiffs_cache_page_allocate(fs
);
267 // could not get cache page
271 cp
->flags
= SPIFFS_CACHE_FLAG_TYPE_WR
;
272 cp
->ucache
.swrc
.obj_id
= fd
->obj_id
;
274 SPIFFS_CACHE_DBG("CACHE_ALLO: allocated cache page "_SPIPRIi
" for fd "_SPIPRIfd
":"_SPIPRIid
"\n", cp
->ix
, fd
->file_nbr
, fd
->obj_id
);
278 // unrefers all fds that this cache page refers to and releases the cache page
279 void spiffs_cache_fd_release(spiffs
*fs
, spiffs_cache_page
*cp
) {
282 spiffs_fd
*fds
= (spiffs_fd
*)fs
->fd_space
;
283 for (i
= 0; i
< fs
->fd_count
; i
++) {
284 spiffs_fd
*cur_fd
= &fds
[i
];
285 if (cur_fd
->file_nbr
!= 0 && cur_fd
->cache_page
== cp
) {
286 cur_fd
->cache_page
= 0;
289 spiffs_cache_page_free(fs
, cp
->ix
, 0);
291 cp
->ucache
.swrc
.obj_id
= 0;
296 // initializes the cache
297 void spiffs_cache_init(spiffs
*fs
) {
298 if (fs
->cache
== 0) return;
299 u32_t sz
= fs
->cache_size
;
300 u32_t cache_mask
= 0;
303 (sz
- sizeof(spiffs_cache
)) / (SPIFFS_CACHE_PAGE_SIZE(fs
));
304 if (cache_entries
<= 0) return;
306 for (i
= 0; i
< cache_entries
; i
++) {
312 memset(&cache
, 0, sizeof(spiffs_cache
));
313 cache
.cpage_count
= cache_entries
;
314 cache
.cpages
= (u8_t
*)((u8_t
*)fs
->cache
+ sizeof(spiffs_cache
));
316 cache
.cpage_use_map
= 0xffffffff;
317 cache
.cpage_use_mask
= cache_mask
;
318 _SPIFFS_MEMCPY(fs
->cache
, &cache
, sizeof(spiffs_cache
));
320 spiffs_cache
*c
= spiffs_get_cache(fs
);
322 memset(c
->cpages
, 0, c
->cpage_count
* SPIFFS_CACHE_PAGE_SIZE(fs
));
324 c
->cpage_use_map
&= ~(c
->cpage_use_mask
);
325 for (i
= 0; i
< cache
.cpage_count
; i
++) {
326 spiffs_get_cache_page_hdr(fs
, c
, i
)->ix
= i
;
330 #endif // SPIFFS_CACHE