0001
0002
0003
0004
0005
0006 #include "internal.h"
0007 #include <asm/unaligned.h>
0008 #include <trace/events/erofs.h>
0009
0010 static int z_erofs_do_map_blocks(struct inode *inode,
0011 struct erofs_map_blocks *map,
0012 int flags);
0013
0014 int z_erofs_fill_inode(struct inode *inode)
0015 {
0016 struct erofs_inode *const vi = EROFS_I(inode);
0017 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
0018
0019 if (!erofs_sb_has_big_pcluster(sbi) &&
0020 !erofs_sb_has_ztailpacking(sbi) &&
0021 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
0022 vi->z_advise = 0;
0023 vi->z_algorithmtype[0] = 0;
0024 vi->z_algorithmtype[1] = 0;
0025 vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
0026 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
0027 }
0028 inode->i_mapping->a_ops = &z_erofs_aops;
0029 return 0;
0030 }
0031
0032 static int z_erofs_fill_inode_lazy(struct inode *inode)
0033 {
0034 struct erofs_inode *const vi = EROFS_I(inode);
0035 struct super_block *const sb = inode->i_sb;
0036 int err, headnr;
0037 erofs_off_t pos;
0038 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
0039 void *kaddr;
0040 struct z_erofs_map_header *h;
0041
0042 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
0043
0044
0045
0046
0047 smp_mb();
0048 return 0;
0049 }
0050
0051 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
0052 return -ERESTARTSYS;
0053
0054 err = 0;
0055 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
0056 goto out_unlock;
0057
0058 DBG_BUGON(!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
0059 !erofs_sb_has_ztailpacking(EROFS_SB(sb)) &&
0060 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
0061
0062 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
0063 vi->xattr_isize, 8);
0064 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos),
0065 EROFS_KMAP_ATOMIC);
0066 if (IS_ERR(kaddr)) {
0067 err = PTR_ERR(kaddr);
0068 goto out_unlock;
0069 }
0070
0071 h = kaddr + erofs_blkoff(pos);
0072 vi->z_advise = le16_to_cpu(h->h_advise);
0073 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
0074 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
0075
0076 headnr = 0;
0077 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
0078 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
0079 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
0080 headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
0081 err = -EOPNOTSUPP;
0082 goto unmap_done;
0083 }
0084
0085 vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
0086 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
0087 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
0088 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
0089 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
0090 vi->nid);
0091 err = -EFSCORRUPTED;
0092 goto unmap_done;
0093 }
0094 if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
0095 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
0096 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
0097 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
0098 vi->nid);
0099 err = -EFSCORRUPTED;
0100 goto unmap_done;
0101 }
0102 unmap_done:
0103 erofs_put_metabuf(&buf);
0104 if (err)
0105 goto out_unlock;
0106
0107 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
0108 struct erofs_map_blocks map = {
0109 .buf = __EROFS_BUF_INITIALIZER
0110 };
0111
0112 vi->z_idata_size = le16_to_cpu(h->h_idata_size);
0113 err = z_erofs_do_map_blocks(inode, &map,
0114 EROFS_GET_BLOCKS_FINDTAIL);
0115 erofs_put_metabuf(&map.buf);
0116
0117 if (!map.m_plen ||
0118 erofs_blkoff(map.m_pa) + map.m_plen > EROFS_BLKSIZ) {
0119 erofs_err(sb, "invalid tail-packing pclustersize %llu",
0120 map.m_plen);
0121 err = -EFSCORRUPTED;
0122 }
0123 if (err < 0)
0124 goto out_unlock;
0125 }
0126
0127 smp_mb();
0128 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
0129 out_unlock:
0130 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
0131 return err;
0132 }
0133
0134 struct z_erofs_maprecorder {
0135 struct inode *inode;
0136 struct erofs_map_blocks *map;
0137 void *kaddr;
0138
0139 unsigned long lcn;
0140
0141 u8 type, headtype;
0142 u16 clusterofs;
0143 u16 delta[2];
0144 erofs_blk_t pblk, compressedblks;
0145 erofs_off_t nextpackoff;
0146 };
0147
0148 static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
0149 erofs_blk_t eblk)
0150 {
0151 struct super_block *const sb = m->inode->i_sb;
0152
0153 m->kaddr = erofs_read_metabuf(&m->map->buf, sb, eblk,
0154 EROFS_KMAP_ATOMIC);
0155 if (IS_ERR(m->kaddr))
0156 return PTR_ERR(m->kaddr);
0157 return 0;
0158 }
0159
0160 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
0161 unsigned long lcn)
0162 {
0163 struct inode *const inode = m->inode;
0164 struct erofs_inode *const vi = EROFS_I(inode);
0165 const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
0166 const erofs_off_t pos =
0167 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
0168 vi->xattr_isize) +
0169 lcn * sizeof(struct z_erofs_vle_decompressed_index);
0170 struct z_erofs_vle_decompressed_index *di;
0171 unsigned int advise, type;
0172 int err;
0173
0174 err = z_erofs_reload_indexes(m, erofs_blknr(pos));
0175 if (err)
0176 return err;
0177
0178 m->nextpackoff = pos + sizeof(struct z_erofs_vle_decompressed_index);
0179 m->lcn = lcn;
0180 di = m->kaddr + erofs_blkoff(pos);
0181
0182 advise = le16_to_cpu(di->di_advise);
0183 type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
0184 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
0185 switch (type) {
0186 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
0187 m->clusterofs = 1 << vi->z_logical_clusterbits;
0188 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
0189 if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
0190 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
0191 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
0192 DBG_BUGON(1);
0193 return -EFSCORRUPTED;
0194 }
0195 m->compressedblks = m->delta[0] &
0196 ~Z_EROFS_VLE_DI_D0_CBLKCNT;
0197 m->delta[0] = 1;
0198 }
0199 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
0200 break;
0201 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
0202 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
0203 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
0204 m->clusterofs = le16_to_cpu(di->di_clusterofs);
0205 m->pblk = le32_to_cpu(di->di_u.blkaddr);
0206 break;
0207 default:
0208 DBG_BUGON(1);
0209 return -EOPNOTSUPP;
0210 }
0211 m->type = type;
0212 return 0;
0213 }
0214
0215 static unsigned int decode_compactedbits(unsigned int lobits,
0216 unsigned int lomask,
0217 u8 *in, unsigned int pos, u8 *type)
0218 {
0219 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
0220 const unsigned int lo = v & lomask;
0221
0222 *type = (v >> lobits) & 3;
0223 return lo;
0224 }
0225
0226 static int get_compacted_la_distance(unsigned int lclusterbits,
0227 unsigned int encodebits,
0228 unsigned int vcnt, u8 *in, int i)
0229 {
0230 const unsigned int lomask = (1 << lclusterbits) - 1;
0231 unsigned int lo, d1 = 0;
0232 u8 type;
0233
0234 DBG_BUGON(i >= vcnt);
0235
0236 do {
0237 lo = decode_compactedbits(lclusterbits, lomask,
0238 in, encodebits * i, &type);
0239
0240 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
0241 return d1;
0242 ++d1;
0243 } while (++i < vcnt);
0244
0245
0246 if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT))
0247 d1 += lo - 1;
0248 return d1;
0249 }
0250
0251 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
0252 unsigned int amortizedshift,
0253 erofs_off_t pos, bool lookahead)
0254 {
0255 struct erofs_inode *const vi = EROFS_I(m->inode);
0256 const unsigned int lclusterbits = vi->z_logical_clusterbits;
0257 const unsigned int lomask = (1 << lclusterbits) - 1;
0258 unsigned int vcnt, base, lo, encodebits, nblk, eofs;
0259 int i;
0260 u8 *in, type;
0261 bool big_pcluster;
0262
0263 if (1 << amortizedshift == 4)
0264 vcnt = 2;
0265 else if (1 << amortizedshift == 2 && lclusterbits == 12)
0266 vcnt = 16;
0267 else
0268 return -EOPNOTSUPP;
0269
0270
0271 m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
0272 (vcnt << amortizedshift);
0273 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
0274 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
0275 eofs = erofs_blkoff(pos);
0276 base = round_down(eofs, vcnt << amortizedshift);
0277 in = m->kaddr + base;
0278
0279 i = (eofs - base) >> amortizedshift;
0280
0281 lo = decode_compactedbits(lclusterbits, lomask,
0282 in, encodebits * i, &type);
0283 m->type = type;
0284 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
0285 m->clusterofs = 1 << lclusterbits;
0286
0287
0288 if (lookahead)
0289 m->delta[1] = get_compacted_la_distance(lclusterbits,
0290 encodebits, vcnt, in, i);
0291 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
0292 if (!big_pcluster) {
0293 DBG_BUGON(1);
0294 return -EFSCORRUPTED;
0295 }
0296 m->compressedblks = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
0297 m->delta[0] = 1;
0298 return 0;
0299 } else if (i + 1 != (int)vcnt) {
0300 m->delta[0] = lo;
0301 return 0;
0302 }
0303
0304
0305
0306
0307
0308 lo = decode_compactedbits(lclusterbits, lomask,
0309 in, encodebits * (i - 1), &type);
0310 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
0311 lo = 0;
0312 else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
0313 lo = 1;
0314 m->delta[0] = lo + 1;
0315 return 0;
0316 }
0317 m->clusterofs = lo;
0318 m->delta[0] = 0;
0319
0320 if (!big_pcluster) {
0321 nblk = 1;
0322 while (i > 0) {
0323 --i;
0324 lo = decode_compactedbits(lclusterbits, lomask,
0325 in, encodebits * i, &type);
0326 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
0327 i -= lo;
0328
0329 if (i >= 0)
0330 ++nblk;
0331 }
0332 } else {
0333 nblk = 0;
0334 while (i > 0) {
0335 --i;
0336 lo = decode_compactedbits(lclusterbits, lomask,
0337 in, encodebits * i, &type);
0338 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
0339 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
0340 --i;
0341 nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
0342 continue;
0343 }
0344
0345 if (lo <= 1) {
0346 DBG_BUGON(1);
0347 return -EFSCORRUPTED;
0348 }
0349 i -= lo - 2;
0350 continue;
0351 }
0352 ++nblk;
0353 }
0354 }
0355 in += (vcnt << amortizedshift) - sizeof(__le32);
0356 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
0357 return 0;
0358 }
0359
0360 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
0361 unsigned long lcn, bool lookahead)
0362 {
0363 struct inode *const inode = m->inode;
0364 struct erofs_inode *const vi = EROFS_I(inode);
0365 const unsigned int lclusterbits = vi->z_logical_clusterbits;
0366 const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
0367 vi->inode_isize + vi->xattr_isize, 8) +
0368 sizeof(struct z_erofs_map_header);
0369 const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
0370 unsigned int compacted_4b_initial, compacted_2b;
0371 unsigned int amortizedshift;
0372 erofs_off_t pos;
0373 int err;
0374
0375 if (lclusterbits != 12)
0376 return -EOPNOTSUPP;
0377
0378 if (lcn >= totalidx)
0379 return -EINVAL;
0380
0381 m->lcn = lcn;
0382
0383 compacted_4b_initial = (32 - ebase % 32) / 4;
0384 if (compacted_4b_initial == 32 / 4)
0385 compacted_4b_initial = 0;
0386
0387 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
0388 compacted_4b_initial < totalidx)
0389 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
0390 else
0391 compacted_2b = 0;
0392
0393 pos = ebase;
0394 if (lcn < compacted_4b_initial) {
0395 amortizedshift = 2;
0396 goto out;
0397 }
0398 pos += compacted_4b_initial * 4;
0399 lcn -= compacted_4b_initial;
0400
0401 if (lcn < compacted_2b) {
0402 amortizedshift = 1;
0403 goto out;
0404 }
0405 pos += compacted_2b * 2;
0406 lcn -= compacted_2b;
0407 amortizedshift = 2;
0408 out:
0409 pos += lcn * (1 << amortizedshift);
0410 err = z_erofs_reload_indexes(m, erofs_blknr(pos));
0411 if (err)
0412 return err;
0413 return unpack_compacted_index(m, amortizedshift, pos, lookahead);
0414 }
0415
0416 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
0417 unsigned int lcn, bool lookahead)
0418 {
0419 const unsigned int datamode = EROFS_I(m->inode)->datalayout;
0420
0421 if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
0422 return legacy_load_cluster_from_disk(m, lcn);
0423
0424 if (datamode == EROFS_INODE_FLAT_COMPRESSION)
0425 return compacted_load_cluster_from_disk(m, lcn, lookahead);
0426
0427 return -EINVAL;
0428 }
0429
0430 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
0431 unsigned int lookback_distance)
0432 {
0433 struct erofs_inode *const vi = EROFS_I(m->inode);
0434 const unsigned int lclusterbits = vi->z_logical_clusterbits;
0435
0436 while (m->lcn >= lookback_distance) {
0437 unsigned long lcn = m->lcn - lookback_distance;
0438 int err;
0439
0440
0441 err = z_erofs_load_cluster_from_disk(m, lcn, false);
0442 if (err)
0443 return err;
0444
0445 switch (m->type) {
0446 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
0447 if (!m->delta[0]) {
0448 erofs_err(m->inode->i_sb,
0449 "invalid lookback distance 0 @ nid %llu",
0450 vi->nid);
0451 DBG_BUGON(1);
0452 return -EFSCORRUPTED;
0453 }
0454 lookback_distance = m->delta[0];
0455 continue;
0456 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
0457 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
0458 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
0459 m->headtype = m->type;
0460 m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
0461 return 0;
0462 default:
0463 erofs_err(m->inode->i_sb,
0464 "unknown type %u @ lcn %lu of nid %llu",
0465 m->type, lcn, vi->nid);
0466 DBG_BUGON(1);
0467 return -EOPNOTSUPP;
0468 }
0469 }
0470
0471 erofs_err(m->inode->i_sb, "bogus lookback distance @ nid %llu",
0472 vi->nid);
0473 DBG_BUGON(1);
0474 return -EFSCORRUPTED;
0475 }
0476
0477 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
0478 unsigned int initial_lcn)
0479 {
0480 struct erofs_inode *const vi = EROFS_I(m->inode);
0481 struct erofs_map_blocks *const map = m->map;
0482 const unsigned int lclusterbits = vi->z_logical_clusterbits;
0483 unsigned long lcn;
0484 int err;
0485
0486 DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
0487 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 &&
0488 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2);
0489 DBG_BUGON(m->type != m->headtype);
0490
0491 if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
0492 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) &&
0493 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
0494 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) &&
0495 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
0496 map->m_plen = 1ULL << lclusterbits;
0497 return 0;
0498 }
0499 lcn = m->lcn + 1;
0500 if (m->compressedblks)
0501 goto out;
0502
0503 err = z_erofs_load_cluster_from_disk(m, lcn, false);
0504 if (err)
0505 return err;
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515 DBG_BUGON(lcn == initial_lcn &&
0516 m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
0517
0518 switch (m->type) {
0519 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
0520 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
0521 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
0522
0523
0524
0525
0526 m->compressedblks = 1 << (lclusterbits - LOG_BLOCK_SIZE);
0527 break;
0528 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
0529 if (m->delta[0] != 1)
0530 goto err_bonus_cblkcnt;
0531 if (m->compressedblks)
0532 break;
0533 fallthrough;
0534 default:
0535 erofs_err(m->inode->i_sb,
0536 "cannot found CBLKCNT @ lcn %lu of nid %llu",
0537 lcn, vi->nid);
0538 DBG_BUGON(1);
0539 return -EFSCORRUPTED;
0540 }
0541 out:
0542 map->m_plen = (u64)m->compressedblks << LOG_BLOCK_SIZE;
0543 return 0;
0544 err_bonus_cblkcnt:
0545 erofs_err(m->inode->i_sb,
0546 "bogus CBLKCNT @ lcn %lu of nid %llu",
0547 lcn, vi->nid);
0548 DBG_BUGON(1);
0549 return -EFSCORRUPTED;
0550 }
0551
0552 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
0553 {
0554 struct inode *inode = m->inode;
0555 struct erofs_inode *vi = EROFS_I(inode);
0556 struct erofs_map_blocks *map = m->map;
0557 unsigned int lclusterbits = vi->z_logical_clusterbits;
0558 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
0559 int err;
0560
0561 do {
0562
0563 if ((lcn << lclusterbits) >= inode->i_size) {
0564 map->m_llen = inode->i_size - map->m_la;
0565 return 0;
0566 }
0567
0568 err = z_erofs_load_cluster_from_disk(m, lcn, true);
0569 if (err)
0570 return err;
0571
0572 if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
0573 DBG_BUGON(!m->delta[1] &&
0574 m->clusterofs != 1 << lclusterbits);
0575 } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
0576 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 ||
0577 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
0578
0579 if (lcn != headlcn)
0580 break;
0581 m->delta[1] = 1;
0582 } else {
0583 erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
0584 m->type, lcn, vi->nid);
0585 DBG_BUGON(1);
0586 return -EOPNOTSUPP;
0587 }
0588 lcn += m->delta[1];
0589 } while (m->delta[1]);
0590
0591 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
0592 return 0;
0593 }
0594
0595 static int z_erofs_do_map_blocks(struct inode *inode,
0596 struct erofs_map_blocks *map,
0597 int flags)
0598 {
0599 struct erofs_inode *const vi = EROFS_I(inode);
0600 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
0601 struct z_erofs_maprecorder m = {
0602 .inode = inode,
0603 .map = map,
0604 };
0605 int err = 0;
0606 unsigned int lclusterbits, endoff;
0607 unsigned long initial_lcn;
0608 unsigned long long ofs, end;
0609
0610 lclusterbits = vi->z_logical_clusterbits;
0611 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
0612 initial_lcn = ofs >> lclusterbits;
0613 endoff = ofs & ((1 << lclusterbits) - 1);
0614
0615 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
0616 if (err)
0617 goto unmap_out;
0618
0619 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
0620 vi->z_idataoff = m.nextpackoff;
0621
0622 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
0623 end = (m.lcn + 1ULL) << lclusterbits;
0624
0625 switch (m.type) {
0626 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
0627 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1:
0628 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2:
0629 if (endoff >= m.clusterofs) {
0630 m.headtype = m.type;
0631 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
0632
0633
0634
0635
0636
0637 if (ztailpacking && end > inode->i_size)
0638 end = inode->i_size;
0639 break;
0640 }
0641
0642 if (!m.lcn) {
0643 erofs_err(inode->i_sb,
0644 "invalid logical cluster 0 at nid %llu",
0645 vi->nid);
0646 err = -EFSCORRUPTED;
0647 goto unmap_out;
0648 }
0649 end = (m.lcn << lclusterbits) | m.clusterofs;
0650 map->m_flags |= EROFS_MAP_FULL_MAPPED;
0651 m.delta[0] = 1;
0652 fallthrough;
0653 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
0654
0655 err = z_erofs_extent_lookback(&m, m.delta[0]);
0656 if (err)
0657 goto unmap_out;
0658 break;
0659 default:
0660 erofs_err(inode->i_sb,
0661 "unknown type %u @ offset %llu of nid %llu",
0662 m.type, ofs, vi->nid);
0663 err = -EOPNOTSUPP;
0664 goto unmap_out;
0665 }
0666
0667 map->m_llen = end - map->m_la;
0668
0669 if (flags & EROFS_GET_BLOCKS_FINDTAIL)
0670 vi->z_tailextent_headlcn = m.lcn;
0671 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
0672 map->m_flags |= EROFS_MAP_META;
0673 map->m_pa = vi->z_idataoff;
0674 map->m_plen = vi->z_idata_size;
0675 } else {
0676 map->m_pa = blknr_to_addr(m.pblk);
0677 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
0678 if (err)
0679 goto out;
0680 }
0681
0682 if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN)
0683 map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
0684 else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2)
0685 map->m_algorithmformat = vi->z_algorithmtype[1];
0686 else
0687 map->m_algorithmformat = vi->z_algorithmtype[0];
0688
0689 if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
0690 ((flags & EROFS_GET_BLOCKS_READMORE) &&
0691 map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA &&
0692 map->m_llen >= EROFS_BLKSIZ)) {
0693 err = z_erofs_get_extent_decompressedlen(&m);
0694 if (!err)
0695 map->m_flags |= EROFS_MAP_FULL_MAPPED;
0696 }
0697 unmap_out:
0698 erofs_unmap_metabuf(&m.map->buf);
0699
0700 out:
0701 erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
0702 __func__, map->m_la, map->m_pa,
0703 map->m_llen, map->m_plen, map->m_flags);
0704
0705 return err;
0706 }
0707
0708 int z_erofs_map_blocks_iter(struct inode *inode,
0709 struct erofs_map_blocks *map,
0710 int flags)
0711 {
0712 int err = 0;
0713
0714 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
0715
0716
0717 if (map->m_la >= inode->i_size) {
0718 map->m_llen = map->m_la + 1 - inode->i_size;
0719 map->m_la = inode->i_size;
0720 map->m_flags = 0;
0721 goto out;
0722 }
0723
0724 err = z_erofs_fill_inode_lazy(inode);
0725 if (err)
0726 goto out;
0727
0728 err = z_erofs_do_map_blocks(inode, map, flags);
0729 out:
0730 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
0731
0732
0733 DBG_BUGON(err < 0 && err != -ENOMEM);
0734 return err;
0735 }
0736
0737 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
0738 loff_t length, unsigned int flags,
0739 struct iomap *iomap, struct iomap *srcmap)
0740 {
0741 int ret;
0742 struct erofs_map_blocks map = { .m_la = offset };
0743
0744 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
0745 erofs_put_metabuf(&map.buf);
0746 if (ret < 0)
0747 return ret;
0748
0749 iomap->bdev = inode->i_sb->s_bdev;
0750 iomap->offset = map.m_la;
0751 iomap->length = map.m_llen;
0752 if (map.m_flags & EROFS_MAP_MAPPED) {
0753 iomap->type = IOMAP_MAPPED;
0754 iomap->addr = map.m_pa;
0755 } else {
0756 iomap->type = IOMAP_HOLE;
0757 iomap->addr = IOMAP_NULL_ADDR;
0758
0759
0760
0761
0762
0763 if (iomap->offset >= inode->i_size)
0764 iomap->length = length + map.m_la - offset;
0765 }
0766 iomap->flags = 0;
0767 return 0;
0768 }
0769
0770 const struct iomap_ops z_erofs_iomap_report_ops = {
0771 .iomap_begin = z_erofs_iomap_begin_report,
0772 };