1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * https://www.huawei.com/
7 #include <linux/unaligned.h>
8 #include <trace/events/erofs.h>
10 struct z_erofs_maprecorder
{
12 struct erofs_map_blocks
*map
;
14 /* compression extent information gathered */
18 erofs_blk_t pblk
, compressedblks
;
19 erofs_off_t nextpackoff
;
23 static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder
*m
,
26 struct inode
*const inode
= m
->inode
;
27 struct erofs_inode
*const vi
= EROFS_I(inode
);
28 const erofs_off_t pos
= Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode
) +
29 vi
->inode_isize
+ vi
->xattr_isize
) +
30 lcn
* sizeof(struct z_erofs_lcluster_index
);
31 struct z_erofs_lcluster_index
*di
;
34 di
= erofs_read_metabuf(&m
->map
->buf
, inode
->i_sb
, pos
, EROFS_KMAP
);
38 m
->nextpackoff
= pos
+ sizeof(struct z_erofs_lcluster_index
);
40 advise
= le16_to_cpu(di
->di_advise
);
41 m
->type
= advise
& Z_EROFS_LI_LCLUSTER_TYPE_MASK
;
42 if (m
->type
== Z_EROFS_LCLUSTER_TYPE_NONHEAD
) {
43 m
->clusterofs
= 1 << vi
->z_logical_clusterbits
;
44 m
->delta
[0] = le16_to_cpu(di
->di_u
.delta
[0]);
45 if (m
->delta
[0] & Z_EROFS_LI_D0_CBLKCNT
) {
46 if (!(vi
->z_advise
& (Z_EROFS_ADVISE_BIG_PCLUSTER_1
|
47 Z_EROFS_ADVISE_BIG_PCLUSTER_2
))) {
51 m
->compressedblks
= m
->delta
[0] & ~Z_EROFS_LI_D0_CBLKCNT
;
54 m
->delta
[1] = le16_to_cpu(di
->di_u
.delta
[1]);
56 m
->partialref
= !!(advise
& Z_EROFS_LI_PARTIAL_REF
);
57 m
->clusterofs
= le16_to_cpu(di
->di_clusterofs
);
58 if (m
->clusterofs
>= 1 << vi
->z_logical_clusterbits
) {
62 m
->pblk
= le32_to_cpu(di
->di_u
.blkaddr
);
67 static unsigned int decode_compactedbits(unsigned int lobits
,
68 u8
*in
, unsigned int pos
, u8
*type
)
70 const unsigned int v
= get_unaligned_le32(in
+ pos
/ 8) >> (pos
& 7);
71 const unsigned int lo
= v
& ((1 << lobits
) - 1);
73 *type
= (v
>> lobits
) & 3;
77 static int get_compacted_la_distance(unsigned int lobits
,
78 unsigned int encodebits
,
79 unsigned int vcnt
, u8
*in
, int i
)
81 unsigned int lo
, d1
= 0;
87 lo
= decode_compactedbits(lobits
, in
, encodebits
* i
, &type
);
89 if (type
!= Z_EROFS_LCLUSTER_TYPE_NONHEAD
)
94 /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
95 if (!(lo
& Z_EROFS_LI_D0_CBLKCNT
))
100 static int unpack_compacted_index(struct z_erofs_maprecorder
*m
,
101 unsigned int amortizedshift
,
102 erofs_off_t pos
, bool lookahead
)
104 struct erofs_inode
*const vi
= EROFS_I(m
->inode
);
105 const unsigned int lclusterbits
= vi
->z_logical_clusterbits
;
106 unsigned int vcnt
, lo
, lobits
, encodebits
, nblk
, bytes
;
111 if (1 << amortizedshift
== 4 && lclusterbits
<= 14)
113 else if (1 << amortizedshift
== 2 && lclusterbits
<= 12)
118 in
= erofs_read_metabuf(&m
->map
->buf
, m
->inode
->i_sb
, pos
, EROFS_KMAP
);
122 /* it doesn't equal to round_up(..) */
123 m
->nextpackoff
= round_down(pos
, vcnt
<< amortizedshift
) +
124 (vcnt
<< amortizedshift
);
125 big_pcluster
= vi
->z_advise
& Z_EROFS_ADVISE_BIG_PCLUSTER_1
;
126 lobits
= max(lclusterbits
, ilog2(Z_EROFS_LI_D0_CBLKCNT
) + 1U);
127 encodebits
= ((vcnt
<< amortizedshift
) - sizeof(__le32
)) * 8 / vcnt
;
128 bytes
= pos
& ((vcnt
<< amortizedshift
) - 1);
130 i
= bytes
>> amortizedshift
;
132 lo
= decode_compactedbits(lobits
, in
, encodebits
* i
, &type
);
134 if (type
== Z_EROFS_LCLUSTER_TYPE_NONHEAD
) {
135 m
->clusterofs
= 1 << lclusterbits
;
137 /* figure out lookahead_distance: delta[1] if needed */
139 m
->delta
[1] = get_compacted_la_distance(lobits
,
140 encodebits
, vcnt
, in
, i
);
141 if (lo
& Z_EROFS_LI_D0_CBLKCNT
) {
144 return -EFSCORRUPTED
;
146 m
->compressedblks
= lo
& ~Z_EROFS_LI_D0_CBLKCNT
;
149 } else if (i
+ 1 != (int)vcnt
) {
154 * since the last lcluster in the pack is special,
155 * of which lo saves delta[1] rather than delta[0].
156 * Hence, get delta[0] by the previous lcluster indirectly.
158 lo
= decode_compactedbits(lobits
, in
,
159 encodebits
* (i
- 1), &type
);
160 if (type
!= Z_EROFS_LCLUSTER_TYPE_NONHEAD
)
162 else if (lo
& Z_EROFS_LI_D0_CBLKCNT
)
164 m
->delta
[0] = lo
+ 1;
169 /* figout out blkaddr (pblk) for HEAD lclusters */
174 lo
= decode_compactedbits(lobits
, in
,
175 encodebits
* i
, &type
);
176 if (type
== Z_EROFS_LCLUSTER_TYPE_NONHEAD
)
186 lo
= decode_compactedbits(lobits
, in
,
187 encodebits
* i
, &type
);
188 if (type
== Z_EROFS_LCLUSTER_TYPE_NONHEAD
) {
189 if (lo
& Z_EROFS_LI_D0_CBLKCNT
) {
191 nblk
+= lo
& ~Z_EROFS_LI_D0_CBLKCNT
;
194 /* bigpcluster shouldn't have plain d0 == 1 */
197 return -EFSCORRUPTED
;
205 in
+= (vcnt
<< amortizedshift
) - sizeof(__le32
);
206 m
->pblk
= le32_to_cpu(*(__le32
*)in
) + nblk
;
210 static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder
*m
,
211 unsigned long lcn
, bool lookahead
)
213 struct inode
*const inode
= m
->inode
;
214 struct erofs_inode
*const vi
= EROFS_I(inode
);
215 const erofs_off_t ebase
= sizeof(struct z_erofs_map_header
) +
216 ALIGN(erofs_iloc(inode
) + vi
->inode_isize
+ vi
->xattr_isize
, 8);
217 unsigned int totalidx
= erofs_iblks(inode
);
218 unsigned int compacted_4b_initial
, compacted_2b
;
219 unsigned int amortizedshift
;
222 if (lcn
>= totalidx
|| vi
->z_logical_clusterbits
> 14)
226 /* used to align to 32-byte (compacted_2b) alignment */
227 compacted_4b_initial
= (32 - ebase
% 32) / 4;
228 if (compacted_4b_initial
== 32 / 4)
229 compacted_4b_initial
= 0;
231 if ((vi
->z_advise
& Z_EROFS_ADVISE_COMPACTED_2B
) &&
232 compacted_4b_initial
< totalidx
)
233 compacted_2b
= rounddown(totalidx
- compacted_4b_initial
, 16);
238 if (lcn
< compacted_4b_initial
) {
242 pos
+= compacted_4b_initial
* 4;
243 lcn
-= compacted_4b_initial
;
245 if (lcn
< compacted_2b
) {
249 pos
+= compacted_2b
* 2;
253 pos
+= lcn
* (1 << amortizedshift
);
254 return unpack_compacted_index(m
, amortizedshift
, pos
, lookahead
);
257 static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder
*m
,
258 unsigned int lcn
, bool lookahead
)
260 switch (EROFS_I(m
->inode
)->datalayout
) {
261 case EROFS_INODE_COMPRESSED_FULL
:
262 return z_erofs_load_full_lcluster(m
, lcn
);
263 case EROFS_INODE_COMPRESSED_COMPACT
:
264 return z_erofs_load_compact_lcluster(m
, lcn
, lookahead
);
270 static int z_erofs_extent_lookback(struct z_erofs_maprecorder
*m
,
271 unsigned int lookback_distance
)
273 struct super_block
*sb
= m
->inode
->i_sb
;
274 struct erofs_inode
*const vi
= EROFS_I(m
->inode
);
275 const unsigned int lclusterbits
= vi
->z_logical_clusterbits
;
277 while (m
->lcn
>= lookback_distance
) {
278 unsigned long lcn
= m
->lcn
- lookback_distance
;
281 err
= z_erofs_load_lcluster_from_disk(m
, lcn
, false);
286 case Z_EROFS_LCLUSTER_TYPE_NONHEAD
:
287 lookback_distance
= m
->delta
[0];
288 if (!lookback_distance
)
291 case Z_EROFS_LCLUSTER_TYPE_PLAIN
:
292 case Z_EROFS_LCLUSTER_TYPE_HEAD1
:
293 case Z_EROFS_LCLUSTER_TYPE_HEAD2
:
294 m
->headtype
= m
->type
;
295 m
->map
->m_la
= (lcn
<< lclusterbits
) | m
->clusterofs
;
298 erofs_err(sb
, "unknown type %u @ lcn %lu of nid %llu",
299 m
->type
, lcn
, vi
->nid
);
305 erofs_err(sb
, "bogus lookback distance %u @ lcn %lu of nid %llu",
306 lookback_distance
, m
->lcn
, vi
->nid
);
308 return -EFSCORRUPTED
;
311 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder
*m
,
312 unsigned int initial_lcn
)
314 struct super_block
*sb
= m
->inode
->i_sb
;
315 struct erofs_inode
*const vi
= EROFS_I(m
->inode
);
316 struct erofs_map_blocks
*const map
= m
->map
;
317 const unsigned int lclusterbits
= vi
->z_logical_clusterbits
;
321 DBG_BUGON(m
->type
!= Z_EROFS_LCLUSTER_TYPE_PLAIN
&&
322 m
->type
!= Z_EROFS_LCLUSTER_TYPE_HEAD1
&&
323 m
->type
!= Z_EROFS_LCLUSTER_TYPE_HEAD2
);
324 DBG_BUGON(m
->type
!= m
->headtype
);
326 if (m
->headtype
== Z_EROFS_LCLUSTER_TYPE_PLAIN
||
327 ((m
->headtype
== Z_EROFS_LCLUSTER_TYPE_HEAD1
) &&
328 !(vi
->z_advise
& Z_EROFS_ADVISE_BIG_PCLUSTER_1
)) ||
329 ((m
->headtype
== Z_EROFS_LCLUSTER_TYPE_HEAD2
) &&
330 !(vi
->z_advise
& Z_EROFS_ADVISE_BIG_PCLUSTER_2
))) {
331 map
->m_plen
= 1ULL << lclusterbits
;
335 if (m
->compressedblks
)
338 err
= z_erofs_load_lcluster_from_disk(m
, lcn
, false);
343 * If the 1st NONHEAD lcluster has already been handled initially w/o
344 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
345 * an internal implemenatation error is detected.
347 * The following code can also handle it properly anyway, but let's
348 * BUG_ON in the debugging mode only for developers to notice that.
350 DBG_BUGON(lcn
== initial_lcn
&&
351 m
->type
== Z_EROFS_LCLUSTER_TYPE_NONHEAD
);
354 case Z_EROFS_LCLUSTER_TYPE_PLAIN
:
355 case Z_EROFS_LCLUSTER_TYPE_HEAD1
:
356 case Z_EROFS_LCLUSTER_TYPE_HEAD2
:
358 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
359 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
361 m
->compressedblks
= 1 << (lclusterbits
- sb
->s_blocksize_bits
);
363 case Z_EROFS_LCLUSTER_TYPE_NONHEAD
:
364 if (m
->delta
[0] != 1)
365 goto err_bonus_cblkcnt
;
366 if (m
->compressedblks
)
370 erofs_err(sb
, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn
,
373 return -EFSCORRUPTED
;
376 map
->m_plen
= erofs_pos(sb
, m
->compressedblks
);
379 erofs_err(sb
, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn
, vi
->nid
);
381 return -EFSCORRUPTED
;
384 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder
*m
)
386 struct inode
*inode
= m
->inode
;
387 struct erofs_inode
*vi
= EROFS_I(inode
);
388 struct erofs_map_blocks
*map
= m
->map
;
389 unsigned int lclusterbits
= vi
->z_logical_clusterbits
;
390 u64 lcn
= m
->lcn
, headlcn
= map
->m_la
>> lclusterbits
;
394 /* handle the last EOF pcluster (no next HEAD lcluster) */
395 if ((lcn
<< lclusterbits
) >= inode
->i_size
) {
396 map
->m_llen
= inode
->i_size
- map
->m_la
;
400 err
= z_erofs_load_lcluster_from_disk(m
, lcn
, true);
404 if (m
->type
== Z_EROFS_LCLUSTER_TYPE_NONHEAD
) {
405 /* work around invalid d1 generated by pre-1.0 mkfs */
406 if (unlikely(!m
->delta
[1])) {
410 } else if (m
->type
== Z_EROFS_LCLUSTER_TYPE_PLAIN
||
411 m
->type
== Z_EROFS_LCLUSTER_TYPE_HEAD1
||
412 m
->type
== Z_EROFS_LCLUSTER_TYPE_HEAD2
) {
414 break; /* ends at the next HEAD lcluster */
417 erofs_err(inode
->i_sb
, "unknown type %u @ lcn %llu of nid %llu",
418 m
->type
, lcn
, vi
->nid
);
424 map
->m_llen
= (lcn
<< lclusterbits
) + m
->clusterofs
- map
->m_la
;
428 static int z_erofs_do_map_blocks(struct inode
*inode
,
429 struct erofs_map_blocks
*map
, int flags
)
431 struct erofs_inode
*const vi
= EROFS_I(inode
);
432 bool ztailpacking
= vi
->z_advise
& Z_EROFS_ADVISE_INLINE_PCLUSTER
;
433 bool fragment
= vi
->z_advise
& Z_EROFS_ADVISE_FRAGMENT_PCLUSTER
;
434 struct z_erofs_maprecorder m
= {
439 unsigned int lclusterbits
, endoff
, afmt
;
440 unsigned long initial_lcn
;
441 unsigned long long ofs
, end
;
443 lclusterbits
= vi
->z_logical_clusterbits
;
444 ofs
= flags
& EROFS_GET_BLOCKS_FINDTAIL
? inode
->i_size
- 1 : map
->m_la
;
445 initial_lcn
= ofs
>> lclusterbits
;
446 endoff
= ofs
& ((1 << lclusterbits
) - 1);
448 err
= z_erofs_load_lcluster_from_disk(&m
, initial_lcn
, false);
452 if (ztailpacking
&& (flags
& EROFS_GET_BLOCKS_FINDTAIL
))
453 vi
->z_idataoff
= m
.nextpackoff
;
455 map
->m_flags
= EROFS_MAP_MAPPED
| EROFS_MAP_ENCODED
;
456 end
= (m
.lcn
+ 1ULL) << lclusterbits
;
459 case Z_EROFS_LCLUSTER_TYPE_PLAIN
:
460 case Z_EROFS_LCLUSTER_TYPE_HEAD1
:
461 case Z_EROFS_LCLUSTER_TYPE_HEAD2
:
462 if (endoff
>= m
.clusterofs
) {
464 map
->m_la
= (m
.lcn
<< lclusterbits
) | m
.clusterofs
;
466 * For ztailpacking files, in order to inline data more
467 * effectively, special EOF lclusters are now supported
468 * which can have three parts at most.
470 if (ztailpacking
&& end
> inode
->i_size
)
474 /* m.lcn should be >= 1 if endoff < m.clusterofs */
476 erofs_err(inode
->i_sb
,
477 "invalid logical cluster 0 at nid %llu",
482 end
= (m
.lcn
<< lclusterbits
) | m
.clusterofs
;
483 map
->m_flags
|= EROFS_MAP_FULL_MAPPED
;
486 case Z_EROFS_LCLUSTER_TYPE_NONHEAD
:
487 /* get the corresponding first chunk */
488 err
= z_erofs_extent_lookback(&m
, m
.delta
[0]);
493 erofs_err(inode
->i_sb
,
494 "unknown type %u @ offset %llu of nid %llu",
495 m
.type
, ofs
, vi
->nid
);
500 map
->m_flags
|= EROFS_MAP_PARTIAL_REF
;
501 map
->m_llen
= end
- map
->m_la
;
503 if (flags
& EROFS_GET_BLOCKS_FINDTAIL
) {
504 vi
->z_tailextent_headlcn
= m
.lcn
;
505 /* for non-compact indexes, fragmentoff is 64 bits */
506 if (fragment
&& vi
->datalayout
== EROFS_INODE_COMPRESSED_FULL
)
507 vi
->z_fragmentoff
|= (u64
)m
.pblk
<< 32;
509 if (ztailpacking
&& m
.lcn
== vi
->z_tailextent_headlcn
) {
510 map
->m_flags
|= EROFS_MAP_META
;
511 map
->m_pa
= vi
->z_idataoff
;
512 map
->m_plen
= vi
->z_idata_size
;
513 } else if (fragment
&& m
.lcn
== vi
->z_tailextent_headlcn
) {
514 map
->m_flags
|= EROFS_MAP_FRAGMENT
;
516 map
->m_pa
= erofs_pos(inode
->i_sb
, m
.pblk
);
517 err
= z_erofs_get_extent_compressedlen(&m
, initial_lcn
);
522 if (m
.headtype
== Z_EROFS_LCLUSTER_TYPE_PLAIN
) {
523 if (map
->m_llen
> map
->m_plen
) {
528 afmt
= vi
->z_advise
& Z_EROFS_ADVISE_INTERLACED_PCLUSTER
?
529 Z_EROFS_COMPRESSION_INTERLACED
:
530 Z_EROFS_COMPRESSION_SHIFTED
;
532 afmt
= m
.headtype
== Z_EROFS_LCLUSTER_TYPE_HEAD2
?
533 vi
->z_algorithmtype
[1] : vi
->z_algorithmtype
[0];
534 if (!(EROFS_I_SB(inode
)->available_compr_algs
& (1 << afmt
))) {
535 erofs_err(inode
->i_sb
, "inconsistent algorithmtype %u for nid %llu",
541 map
->m_algorithmformat
= afmt
;
543 if ((flags
& EROFS_GET_BLOCKS_FIEMAP
) ||
544 ((flags
& EROFS_GET_BLOCKS_READMORE
) &&
545 (map
->m_algorithmformat
== Z_EROFS_COMPRESSION_LZMA
||
546 map
->m_algorithmformat
== Z_EROFS_COMPRESSION_DEFLATE
||
547 map
->m_algorithmformat
== Z_EROFS_COMPRESSION_ZSTD
) &&
548 map
->m_llen
>= i_blocksize(inode
))) {
549 err
= z_erofs_get_extent_decompressedlen(&m
);
551 map
->m_flags
|= EROFS_MAP_FULL_MAPPED
;
555 erofs_unmap_metabuf(&m
.map
->buf
);
559 static int z_erofs_fill_inode_lazy(struct inode
*inode
)
561 struct erofs_inode
*const vi
= EROFS_I(inode
);
562 struct super_block
*const sb
= inode
->i_sb
;
565 struct erofs_buf buf
= __EROFS_BUF_INITIALIZER
;
566 struct z_erofs_map_header
*h
;
568 if (test_bit(EROFS_I_Z_INITED_BIT
, &vi
->flags
)) {
570 * paired with smp_mb() at the end of the function to ensure
571 * fields will only be observed after the bit is set.
577 if (wait_on_bit_lock(&vi
->flags
, EROFS_I_BL_Z_BIT
, TASK_KILLABLE
))
581 if (test_bit(EROFS_I_Z_INITED_BIT
, &vi
->flags
))
584 pos
= ALIGN(erofs_iloc(inode
) + vi
->inode_isize
+ vi
->xattr_isize
, 8);
585 h
= erofs_read_metabuf(&buf
, sb
, pos
, EROFS_KMAP
);
592 * if the highest bit of the 8-byte map header is set, the whole file
593 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
595 if (h
->h_clusterbits
>> Z_EROFS_FRAGMENT_INODE_BIT
) {
596 vi
->z_advise
= Z_EROFS_ADVISE_FRAGMENT_PCLUSTER
;
597 vi
->z_fragmentoff
= le64_to_cpu(*(__le64
*)h
) ^ (1ULL << 63);
598 vi
->z_tailextent_headlcn
= 0;
601 vi
->z_advise
= le16_to_cpu(h
->h_advise
);
602 vi
->z_algorithmtype
[0] = h
->h_algorithmtype
& 15;
603 vi
->z_algorithmtype
[1] = h
->h_algorithmtype
>> 4;
606 if (vi
->z_algorithmtype
[0] >= Z_EROFS_COMPRESSION_MAX
||
607 vi
->z_algorithmtype
[++headnr
] >= Z_EROFS_COMPRESSION_MAX
) {
608 erofs_err(sb
, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
609 headnr
+ 1, vi
->z_algorithmtype
[headnr
], vi
->nid
);
611 goto out_put_metabuf
;
614 vi
->z_logical_clusterbits
= sb
->s_blocksize_bits
+ (h
->h_clusterbits
& 7);
615 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb
)) &&
616 vi
->z_advise
& (Z_EROFS_ADVISE_BIG_PCLUSTER_1
|
617 Z_EROFS_ADVISE_BIG_PCLUSTER_2
)) {
618 erofs_err(sb
, "per-inode big pcluster without sb feature for nid %llu",
621 goto out_put_metabuf
;
623 if (vi
->datalayout
== EROFS_INODE_COMPRESSED_COMPACT
&&
624 !(vi
->z_advise
& Z_EROFS_ADVISE_BIG_PCLUSTER_1
) ^
625 !(vi
->z_advise
& Z_EROFS_ADVISE_BIG_PCLUSTER_2
)) {
626 erofs_err(sb
, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
629 goto out_put_metabuf
;
632 if (vi
->z_advise
& Z_EROFS_ADVISE_INLINE_PCLUSTER
) {
633 struct erofs_map_blocks map
= {
634 .buf
= __EROFS_BUF_INITIALIZER
637 vi
->z_idata_size
= le16_to_cpu(h
->h_idata_size
);
638 err
= z_erofs_do_map_blocks(inode
, &map
,
639 EROFS_GET_BLOCKS_FINDTAIL
);
640 erofs_put_metabuf(&map
.buf
);
643 erofs_blkoff(sb
, map
.m_pa
) + map
.m_plen
> sb
->s_blocksize
) {
644 erofs_err(sb
, "invalid tail-packing pclustersize %llu",
649 goto out_put_metabuf
;
652 if (vi
->z_advise
& Z_EROFS_ADVISE_FRAGMENT_PCLUSTER
&&
653 !(h
->h_clusterbits
>> Z_EROFS_FRAGMENT_INODE_BIT
)) {
654 struct erofs_map_blocks map
= {
655 .buf
= __EROFS_BUF_INITIALIZER
658 vi
->z_fragmentoff
= le32_to_cpu(h
->h_fragmentoff
);
659 err
= z_erofs_do_map_blocks(inode
, &map
,
660 EROFS_GET_BLOCKS_FINDTAIL
);
661 erofs_put_metabuf(&map
.buf
);
663 goto out_put_metabuf
;
666 /* paired with smp_mb() at the beginning of the function */
668 set_bit(EROFS_I_Z_INITED_BIT
, &vi
->flags
);
670 erofs_put_metabuf(&buf
);
672 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT
, &vi
->flags
);
676 int z_erofs_map_blocks_iter(struct inode
*inode
, struct erofs_map_blocks
*map
,
679 struct erofs_inode
*const vi
= EROFS_I(inode
);
682 trace_erofs_map_blocks_enter(inode
, map
, flags
);
683 if (map
->m_la
>= inode
->i_size
) { /* post-EOF unmapped extent */
684 map
->m_llen
= map
->m_la
+ 1 - inode
->i_size
;
685 map
->m_la
= inode
->i_size
;
688 err
= z_erofs_fill_inode_lazy(inode
);
690 if ((vi
->z_advise
& Z_EROFS_ADVISE_FRAGMENT_PCLUSTER
) &&
691 !vi
->z_tailextent_headlcn
) {
693 map
->m_llen
= inode
->i_size
;
694 map
->m_flags
= EROFS_MAP_MAPPED
|
695 EROFS_MAP_FULL_MAPPED
| EROFS_MAP_FRAGMENT
;
697 err
= z_erofs_do_map_blocks(inode
, map
, flags
);
700 if (!err
&& (map
->m_flags
& EROFS_MAP_ENCODED
) &&
701 unlikely(map
->m_plen
> Z_EROFS_PCLUSTER_MAX_SIZE
||
702 map
->m_llen
> Z_EROFS_PCLUSTER_MAX_DSIZE
))
707 trace_erofs_map_blocks_exit(inode
, map
, flags
, err
);
711 static int z_erofs_iomap_begin_report(struct inode
*inode
, loff_t offset
,
712 loff_t length
, unsigned int flags
,
713 struct iomap
*iomap
, struct iomap
*srcmap
)
716 struct erofs_map_blocks map
= { .m_la
= offset
};
718 ret
= z_erofs_map_blocks_iter(inode
, &map
, EROFS_GET_BLOCKS_FIEMAP
);
719 erofs_put_metabuf(&map
.buf
);
723 iomap
->bdev
= inode
->i_sb
->s_bdev
;
724 iomap
->offset
= map
.m_la
;
725 iomap
->length
= map
.m_llen
;
726 if (map
.m_flags
& EROFS_MAP_MAPPED
) {
727 iomap
->type
= IOMAP_MAPPED
;
728 iomap
->addr
= map
.m_flags
& EROFS_MAP_FRAGMENT
?
729 IOMAP_NULL_ADDR
: map
.m_pa
;
731 iomap
->type
= IOMAP_HOLE
;
732 iomap
->addr
= IOMAP_NULL_ADDR
;
734 * No strict rule on how to describe extents for post EOF, yet
735 * we need to do like below. Otherwise, iomap itself will get
736 * into an endless loop on post EOF.
738 * Calculate the effective offset by subtracting extent start
739 * (map.m_la) from the requested offset, and add it to length.
740 * (NB: offset >= map.m_la always)
742 if (iomap
->offset
>= inode
->i_size
)
743 iomap
->length
= length
+ offset
- map
.m_la
;
749 const struct iomap_ops z_erofs_iomap_report_ops
= {
750 .iomap_begin
= z_erofs_iomap_begin_report
,