1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 #include <asm/unaligned.h>
9 #include <trace/events/erofs.h>
11 int z_erofs_fill_inode(struct inode
*inode
)
13 struct erofs_inode
*const vi
= EROFS_I(inode
);
15 if (vi
->datalayout
== EROFS_INODE_FLAT_COMPRESSION_LEGACY
) {
17 vi
->z_algorithmtype
[0] = 0;
18 vi
->z_algorithmtype
[1] = 0;
19 vi
->z_logical_clusterbits
= LOG_BLOCK_SIZE
;
20 vi
->z_physical_clusterbits
[0] = vi
->z_logical_clusterbits
;
21 vi
->z_physical_clusterbits
[1] = vi
->z_logical_clusterbits
;
22 set_bit(EROFS_I_Z_INITED_BIT
, &vi
->flags
);
25 inode
->i_mapping
->a_ops
= &z_erofs_aops
;
29 static int z_erofs_fill_inode_lazy(struct inode
*inode
)
31 struct erofs_inode
*const vi
= EROFS_I(inode
);
32 struct super_block
*const sb
= inode
->i_sb
;
37 struct z_erofs_map_header
*h
;
39 if (test_bit(EROFS_I_Z_INITED_BIT
, &vi
->flags
))
42 if (wait_on_bit_lock(&vi
->flags
, EROFS_I_BL_Z_BIT
, TASK_KILLABLE
))
46 if (test_bit(EROFS_I_Z_INITED_BIT
, &vi
->flags
))
49 DBG_BUGON(vi
->datalayout
== EROFS_INODE_FLAT_COMPRESSION_LEGACY
);
51 pos
= ALIGN(iloc(EROFS_SB(sb
), vi
->nid
) + vi
->inode_isize
+
53 page
= erofs_get_meta_page(sb
, erofs_blknr(pos
));
59 kaddr
= kmap_atomic(page
);
61 h
= kaddr
+ erofs_blkoff(pos
);
62 vi
->z_advise
= le16_to_cpu(h
->h_advise
);
63 vi
->z_algorithmtype
[0] = h
->h_algorithmtype
& 15;
64 vi
->z_algorithmtype
[1] = h
->h_algorithmtype
>> 4;
66 if (vi
->z_algorithmtype
[0] >= Z_EROFS_COMPRESSION_MAX
) {
67 erofs_err(sb
, "unknown compression format %u for nid %llu, please upgrade kernel",
68 vi
->z_algorithmtype
[0], vi
->nid
);
73 vi
->z_logical_clusterbits
= LOG_BLOCK_SIZE
+ (h
->h_clusterbits
& 7);
74 vi
->z_physical_clusterbits
[0] = vi
->z_logical_clusterbits
+
75 ((h
->h_clusterbits
>> 3) & 3);
77 if (vi
->z_physical_clusterbits
[0] != LOG_BLOCK_SIZE
) {
78 erofs_err(sb
, "unsupported physical clusterbits %u for nid %llu, please upgrade kernel",
79 vi
->z_physical_clusterbits
[0], vi
->nid
);
84 vi
->z_physical_clusterbits
[1] = vi
->z_logical_clusterbits
+
85 ((h
->h_clusterbits
>> 5) & 7);
86 set_bit(EROFS_I_Z_INITED_BIT
, &vi
->flags
);
92 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT
, &vi
->flags
);
96 struct z_erofs_maprecorder
{
98 struct erofs_map_blocks
*map
;
102 /* compression extent information gathered */
109 static int z_erofs_reload_indexes(struct z_erofs_maprecorder
*m
,
112 struct super_block
*const sb
= m
->inode
->i_sb
;
113 struct erofs_map_blocks
*const map
= m
->map
;
114 struct page
*mpage
= map
->mpage
;
117 if (mpage
->index
== eblk
) {
119 m
->kaddr
= kmap_atomic(mpage
);
124 kunmap_atomic(m
->kaddr
);
130 mpage
= erofs_get_meta_page(sb
, eblk
);
133 return PTR_ERR(mpage
);
135 m
->kaddr
= kmap_atomic(mpage
);
141 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder
*m
,
144 struct inode
*const inode
= m
->inode
;
145 struct erofs_inode
*const vi
= EROFS_I(inode
);
146 const erofs_off_t ibase
= iloc(EROFS_I_SB(inode
), vi
->nid
);
147 const erofs_off_t pos
=
148 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase
+ vi
->inode_isize
+
150 lcn
* sizeof(struct z_erofs_vle_decompressed_index
);
151 struct z_erofs_vle_decompressed_index
*di
;
152 unsigned int advise
, type
;
155 err
= z_erofs_reload_indexes(m
, erofs_blknr(pos
));
160 di
= m
->kaddr
+ erofs_blkoff(pos
);
162 advise
= le16_to_cpu(di
->di_advise
);
163 type
= (advise
>> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT
) &
164 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS
) - 1);
166 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD
:
167 m
->clusterofs
= 1 << vi
->z_logical_clusterbits
;
168 m
->delta
[0] = le16_to_cpu(di
->di_u
.delta
[0]);
169 m
->delta
[1] = le16_to_cpu(di
->di_u
.delta
[1]);
171 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN
:
172 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD
:
173 m
->clusterofs
= le16_to_cpu(di
->di_clusterofs
);
174 m
->pblk
= le32_to_cpu(di
->di_u
.blkaddr
);
184 static unsigned int decode_compactedbits(unsigned int lobits
,
186 u8
*in
, unsigned int pos
, u8
*type
)
188 const unsigned int v
= get_unaligned_le32(in
+ pos
/ 8) >> (pos
& 7);
189 const unsigned int lo
= v
& lomask
;
191 *type
= (v
>> lobits
) & 3;
195 static int unpack_compacted_index(struct z_erofs_maprecorder
*m
,
196 unsigned int amortizedshift
,
199 struct erofs_inode
*const vi
= EROFS_I(m
->inode
);
200 const unsigned int lclusterbits
= vi
->z_logical_clusterbits
;
201 const unsigned int lomask
= (1 << lclusterbits
) - 1;
202 unsigned int vcnt
, base
, lo
, encodebits
, nblk
;
206 if (1 << amortizedshift
== 4)
208 else if (1 << amortizedshift
== 2 && lclusterbits
== 12)
213 encodebits
= ((vcnt
<< amortizedshift
) - sizeof(__le32
)) * 8 / vcnt
;
214 base
= round_down(eofs
, vcnt
<< amortizedshift
);
215 in
= m
->kaddr
+ base
;
217 i
= (eofs
- base
) >> amortizedshift
;
219 lo
= decode_compactedbits(lclusterbits
, lomask
,
220 in
, encodebits
* i
, &type
);
222 if (type
== Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD
) {
223 m
->clusterofs
= 1 << lclusterbits
;
229 * since the last lcluster in the pack is special,
230 * of which lo saves delta[1] rather than delta[0].
231 * Hence, get delta[0] by the previous lcluster indirectly.
233 lo
= decode_compactedbits(lclusterbits
, lomask
,
234 in
, encodebits
* (i
- 1), &type
);
235 if (type
!= Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD
)
237 m
->delta
[0] = lo
+ 1;
242 /* figout out blkaddr (pblk) for HEAD lclusters */
246 lo
= decode_compactedbits(lclusterbits
, lomask
,
247 in
, encodebits
* i
, &type
);
248 if (type
== Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD
)
254 in
+= (vcnt
<< amortizedshift
) - sizeof(__le32
);
255 m
->pblk
= le32_to_cpu(*(__le32
*)in
) + nblk
;
259 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder
*m
,
262 struct inode
*const inode
= m
->inode
;
263 struct erofs_inode
*const vi
= EROFS_I(inode
);
264 const unsigned int lclusterbits
= vi
->z_logical_clusterbits
;
265 const erofs_off_t ebase
= ALIGN(iloc(EROFS_I_SB(inode
), vi
->nid
) +
266 vi
->inode_isize
+ vi
->xattr_isize
, 8) +
267 sizeof(struct z_erofs_map_header
);
268 const unsigned int totalidx
= DIV_ROUND_UP(inode
->i_size
, EROFS_BLKSIZ
);
269 unsigned int compacted_4b_initial
, compacted_2b
;
270 unsigned int amortizedshift
;
274 if (lclusterbits
!= 12)
281 /* used to align to 32-byte (compacted_2b) alignment */
282 compacted_4b_initial
= (32 - ebase
% 32) / 4;
283 if (compacted_4b_initial
== 32 / 4)
284 compacted_4b_initial
= 0;
286 if (vi
->z_advise
& Z_EROFS_ADVISE_COMPACTED_2B
)
287 compacted_2b
= rounddown(totalidx
- compacted_4b_initial
, 16);
292 if (lcn
< compacted_4b_initial
) {
296 pos
+= compacted_4b_initial
* 4;
297 lcn
-= compacted_4b_initial
;
299 if (lcn
< compacted_2b
) {
303 pos
+= compacted_2b
* 2;
307 pos
+= lcn
* (1 << amortizedshift
);
308 err
= z_erofs_reload_indexes(m
, erofs_blknr(pos
));
311 return unpack_compacted_index(m
, amortizedshift
, erofs_blkoff(pos
));
314 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder
*m
,
317 const unsigned int datamode
= EROFS_I(m
->inode
)->datalayout
;
319 if (datamode
== EROFS_INODE_FLAT_COMPRESSION_LEGACY
)
320 return legacy_load_cluster_from_disk(m
, lcn
);
322 if (datamode
== EROFS_INODE_FLAT_COMPRESSION
)
323 return compacted_load_cluster_from_disk(m
, lcn
);
328 static int z_erofs_extent_lookback(struct z_erofs_maprecorder
*m
,
329 unsigned int lookback_distance
)
331 struct erofs_inode
*const vi
= EROFS_I(m
->inode
);
332 struct erofs_map_blocks
*const map
= m
->map
;
333 const unsigned int lclusterbits
= vi
->z_logical_clusterbits
;
334 unsigned long lcn
= m
->lcn
;
337 if (lcn
< lookback_distance
) {
338 erofs_err(m
->inode
->i_sb
,
339 "bogus lookback distance @ nid %llu", vi
->nid
);
341 return -EFSCORRUPTED
;
344 /* load extent head logical cluster if needed */
345 lcn
-= lookback_distance
;
346 err
= z_erofs_load_cluster_from_disk(m
, lcn
);
351 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD
:
353 erofs_err(m
->inode
->i_sb
,
354 "invalid lookback distance 0 @ nid %llu",
357 return -EFSCORRUPTED
;
359 return z_erofs_extent_lookback(m
, m
->delta
[0]);
360 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN
:
361 map
->m_flags
&= ~EROFS_MAP_ZIPPED
;
363 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD
:
364 map
->m_la
= (lcn
<< lclusterbits
) | m
->clusterofs
;
367 erofs_err(m
->inode
->i_sb
,
368 "unknown type %u @ lcn %lu of nid %llu",
369 m
->type
, lcn
, vi
->nid
);
376 int z_erofs_map_blocks_iter(struct inode
*inode
,
377 struct erofs_map_blocks
*map
,
380 struct erofs_inode
*const vi
= EROFS_I(inode
);
381 struct z_erofs_maprecorder m
= {
386 unsigned int lclusterbits
, endoff
;
387 unsigned long long ofs
, end
;
389 trace_z_erofs_map_blocks_iter_enter(inode
, map
, flags
);
391 /* when trying to read beyond EOF, leave it unmapped */
392 if (map
->m_la
>= inode
->i_size
) {
393 map
->m_llen
= map
->m_la
+ 1 - inode
->i_size
;
394 map
->m_la
= inode
->i_size
;
399 err
= z_erofs_fill_inode_lazy(inode
);
403 lclusterbits
= vi
->z_logical_clusterbits
;
405 m
.lcn
= ofs
>> lclusterbits
;
406 endoff
= ofs
& ((1 << lclusterbits
) - 1);
408 err
= z_erofs_load_cluster_from_disk(&m
, m
.lcn
);
412 map
->m_flags
= EROFS_MAP_ZIPPED
; /* by default, compressed */
413 end
= (m
.lcn
+ 1ULL) << lclusterbits
;
416 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN
:
417 if (endoff
>= m
.clusterofs
)
418 map
->m_flags
&= ~EROFS_MAP_ZIPPED
;
420 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD
:
421 if (endoff
>= m
.clusterofs
) {
422 map
->m_la
= (m
.lcn
<< lclusterbits
) | m
.clusterofs
;
425 /* m.lcn should be >= 1 if endoff < m.clusterofs */
427 erofs_err(inode
->i_sb
,
428 "invalid logical cluster 0 at nid %llu",
433 end
= (m
.lcn
<< lclusterbits
) | m
.clusterofs
;
434 map
->m_flags
|= EROFS_MAP_FULL_MAPPED
;
437 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD
:
438 /* get the correspoinding first chunk */
439 err
= z_erofs_extent_lookback(&m
, m
.delta
[0]);
444 erofs_err(inode
->i_sb
,
445 "unknown type %u @ offset %llu of nid %llu",
446 m
.type
, ofs
, vi
->nid
);
451 map
->m_llen
= end
- map
->m_la
;
452 map
->m_plen
= 1 << lclusterbits
;
453 map
->m_pa
= blknr_to_addr(m
.pblk
);
454 map
->m_flags
|= EROFS_MAP_MAPPED
;
458 kunmap_atomic(m
.kaddr
);
461 erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
462 __func__
, map
->m_la
, map
->m_pa
,
463 map
->m_llen
, map
->m_plen
, map
->m_flags
);
465 trace_z_erofs_map_blocks_iter_exit(inode
, map
, flags
, err
);
467 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
468 DBG_BUGON(err
< 0 && err
!= -ENOMEM
);