Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2017-2018 HUAWEI, Inc.
0004  *             https://www.huawei.com/
0005  * Copyright (C) 2021, Alibaba Cloud
0006  */
0007 #include "xattr.h"
0008 
0009 #include <trace/events/erofs.h>
0010 
0011 static void *erofs_read_inode(struct erofs_buf *buf,
0012                   struct inode *inode, unsigned int *ofs)
0013 {
0014     struct super_block *sb = inode->i_sb;
0015     struct erofs_sb_info *sbi = EROFS_SB(sb);
0016     struct erofs_inode *vi = EROFS_I(inode);
0017     const erofs_off_t inode_loc = iloc(sbi, vi->nid);
0018 
0019     erofs_blk_t blkaddr, nblks = 0;
0020     void *kaddr;
0021     struct erofs_inode_compact *dic;
0022     struct erofs_inode_extended *die, *copied = NULL;
0023     unsigned int ifmt;
0024     int err;
0025 
0026     blkaddr = erofs_blknr(inode_loc);
0027     *ofs = erofs_blkoff(inode_loc);
0028 
0029     erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
0030           __func__, vi->nid, *ofs, blkaddr);
0031 
0032     kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
0033     if (IS_ERR(kaddr)) {
0034         erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
0035               vi->nid, PTR_ERR(kaddr));
0036         return kaddr;
0037     }
0038 
0039     dic = kaddr + *ofs;
0040     ifmt = le16_to_cpu(dic->i_format);
0041 
0042     if (ifmt & ~EROFS_I_ALL) {
0043         erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
0044               ifmt, vi->nid);
0045         err = -EOPNOTSUPP;
0046         goto err_out;
0047     }
0048 
0049     vi->datalayout = erofs_inode_datalayout(ifmt);
0050     if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
0051         erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
0052               vi->datalayout, vi->nid);
0053         err = -EOPNOTSUPP;
0054         goto err_out;
0055     }
0056 
0057     switch (erofs_inode_version(ifmt)) {
0058     case EROFS_INODE_LAYOUT_EXTENDED:
0059         vi->inode_isize = sizeof(struct erofs_inode_extended);
0060         /* check if the extended inode acrosses block boundary */
0061         if (*ofs + vi->inode_isize <= EROFS_BLKSIZ) {
0062             *ofs += vi->inode_isize;
0063             die = (struct erofs_inode_extended *)dic;
0064         } else {
0065             const unsigned int gotten = EROFS_BLKSIZ - *ofs;
0066 
0067             copied = kmalloc(vi->inode_isize, GFP_NOFS);
0068             if (!copied) {
0069                 err = -ENOMEM;
0070                 goto err_out;
0071             }
0072             memcpy(copied, dic, gotten);
0073             kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
0074                            EROFS_KMAP);
0075             if (IS_ERR(kaddr)) {
0076                 erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
0077                       vi->nid, PTR_ERR(kaddr));
0078                 kfree(copied);
0079                 return kaddr;
0080             }
0081             *ofs = vi->inode_isize - gotten;
0082             memcpy((u8 *)copied + gotten, kaddr, *ofs);
0083             die = copied;
0084         }
0085         vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
0086 
0087         inode->i_mode = le16_to_cpu(die->i_mode);
0088         switch (inode->i_mode & S_IFMT) {
0089         case S_IFREG:
0090         case S_IFDIR:
0091         case S_IFLNK:
0092             vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
0093             break;
0094         case S_IFCHR:
0095         case S_IFBLK:
0096             inode->i_rdev =
0097                 new_decode_dev(le32_to_cpu(die->i_u.rdev));
0098             break;
0099         case S_IFIFO:
0100         case S_IFSOCK:
0101             inode->i_rdev = 0;
0102             break;
0103         default:
0104             goto bogusimode;
0105         }
0106         i_uid_write(inode, le32_to_cpu(die->i_uid));
0107         i_gid_write(inode, le32_to_cpu(die->i_gid));
0108         set_nlink(inode, le32_to_cpu(die->i_nlink));
0109 
0110         /* extended inode has its own timestamp */
0111         inode->i_ctime.tv_sec = le64_to_cpu(die->i_mtime);
0112         inode->i_ctime.tv_nsec = le32_to_cpu(die->i_mtime_nsec);
0113 
0114         inode->i_size = le64_to_cpu(die->i_size);
0115 
0116         /* total blocks for compressed files */
0117         if (erofs_inode_is_data_compressed(vi->datalayout))
0118             nblks = le32_to_cpu(die->i_u.compressed_blocks);
0119         else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
0120             /* fill chunked inode summary info */
0121             vi->chunkformat = le16_to_cpu(die->i_u.c.format);
0122         kfree(copied);
0123         copied = NULL;
0124         break;
0125     case EROFS_INODE_LAYOUT_COMPACT:
0126         vi->inode_isize = sizeof(struct erofs_inode_compact);
0127         *ofs += vi->inode_isize;
0128         vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
0129 
0130         inode->i_mode = le16_to_cpu(dic->i_mode);
0131         switch (inode->i_mode & S_IFMT) {
0132         case S_IFREG:
0133         case S_IFDIR:
0134         case S_IFLNK:
0135             vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
0136             break;
0137         case S_IFCHR:
0138         case S_IFBLK:
0139             inode->i_rdev =
0140                 new_decode_dev(le32_to_cpu(dic->i_u.rdev));
0141             break;
0142         case S_IFIFO:
0143         case S_IFSOCK:
0144             inode->i_rdev = 0;
0145             break;
0146         default:
0147             goto bogusimode;
0148         }
0149         i_uid_write(inode, le16_to_cpu(dic->i_uid));
0150         i_gid_write(inode, le16_to_cpu(dic->i_gid));
0151         set_nlink(inode, le16_to_cpu(dic->i_nlink));
0152 
0153         /* use build time for compact inodes */
0154         inode->i_ctime.tv_sec = sbi->build_time;
0155         inode->i_ctime.tv_nsec = sbi->build_time_nsec;
0156 
0157         inode->i_size = le32_to_cpu(dic->i_size);
0158         if (erofs_inode_is_data_compressed(vi->datalayout))
0159             nblks = le32_to_cpu(dic->i_u.compressed_blocks);
0160         else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
0161             vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
0162         break;
0163     default:
0164         erofs_err(inode->i_sb,
0165               "unsupported on-disk inode version %u of nid %llu",
0166               erofs_inode_version(ifmt), vi->nid);
0167         err = -EOPNOTSUPP;
0168         goto err_out;
0169     }
0170 
0171     if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
0172         if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
0173             erofs_err(inode->i_sb,
0174                   "unsupported chunk format %x of nid %llu",
0175                   vi->chunkformat, vi->nid);
0176             err = -EOPNOTSUPP;
0177             goto err_out;
0178         }
0179         vi->chunkbits = LOG_BLOCK_SIZE +
0180             (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
0181     }
0182     inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
0183     inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
0184     inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
0185     inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
0186 
0187     inode->i_flags &= ~S_DAX;
0188     if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
0189         vi->datalayout == EROFS_INODE_FLAT_PLAIN)
0190         inode->i_flags |= S_DAX;
0191     if (!nblks)
0192         /* measure inode.i_blocks as generic filesystems */
0193         inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
0194     else
0195         inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
0196     return kaddr;
0197 
0198 bogusimode:
0199     erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
0200           inode->i_mode, vi->nid);
0201     err = -EFSCORRUPTED;
0202 err_out:
0203     DBG_BUGON(1);
0204     kfree(copied);
0205     erofs_put_metabuf(buf);
0206     return ERR_PTR(err);
0207 }
0208 
0209 static int erofs_fill_symlink(struct inode *inode, void *kaddr,
0210                   unsigned int m_pofs)
0211 {
0212     struct erofs_inode *vi = EROFS_I(inode);
0213     char *lnk;
0214 
0215     /* if it cannot be handled with fast symlink scheme */
0216     if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
0217         inode->i_size >= EROFS_BLKSIZ) {
0218         inode->i_op = &erofs_symlink_iops;
0219         return 0;
0220     }
0221 
0222     lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
0223     if (!lnk)
0224         return -ENOMEM;
0225 
0226     m_pofs += vi->xattr_isize;
0227     /* inline symlink data shouldn't cross block boundary */
0228     if (m_pofs + inode->i_size > EROFS_BLKSIZ) {
0229         kfree(lnk);
0230         erofs_err(inode->i_sb,
0231               "inline data cross block boundary @ nid %llu",
0232               vi->nid);
0233         DBG_BUGON(1);
0234         return -EFSCORRUPTED;
0235     }
0236     memcpy(lnk, kaddr + m_pofs, inode->i_size);
0237     lnk[inode->i_size] = '\0';
0238 
0239     inode->i_link = lnk;
0240     inode->i_op = &erofs_fast_symlink_iops;
0241     return 0;
0242 }
0243 
0244 static int erofs_fill_inode(struct inode *inode, int isdir)
0245 {
0246     struct erofs_inode *vi = EROFS_I(inode);
0247     struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
0248     void *kaddr;
0249     unsigned int ofs;
0250     int err = 0;
0251 
0252     trace_erofs_fill_inode(inode, isdir);
0253 
0254     /* read inode base data from disk */
0255     kaddr = erofs_read_inode(&buf, inode, &ofs);
0256     if (IS_ERR(kaddr))
0257         return PTR_ERR(kaddr);
0258 
0259     /* setup the new inode */
0260     switch (inode->i_mode & S_IFMT) {
0261     case S_IFREG:
0262         inode->i_op = &erofs_generic_iops;
0263         if (erofs_inode_is_data_compressed(vi->datalayout))
0264             inode->i_fop = &generic_ro_fops;
0265         else
0266             inode->i_fop = &erofs_file_fops;
0267         break;
0268     case S_IFDIR:
0269         inode->i_op = &erofs_dir_iops;
0270         inode->i_fop = &erofs_dir_fops;
0271         break;
0272     case S_IFLNK:
0273         err = erofs_fill_symlink(inode, kaddr, ofs);
0274         if (err)
0275             goto out_unlock;
0276         inode_nohighmem(inode);
0277         break;
0278     case S_IFCHR:
0279     case S_IFBLK:
0280     case S_IFIFO:
0281     case S_IFSOCK:
0282         inode->i_op = &erofs_generic_iops;
0283         init_special_inode(inode, inode->i_mode, inode->i_rdev);
0284         goto out_unlock;
0285     default:
0286         err = -EFSCORRUPTED;
0287         goto out_unlock;
0288     }
0289 
0290     if (erofs_inode_is_data_compressed(vi->datalayout)) {
0291         if (!erofs_is_fscache_mode(inode->i_sb))
0292             err = z_erofs_fill_inode(inode);
0293         else
0294             err = -EOPNOTSUPP;
0295         goto out_unlock;
0296     }
0297     inode->i_mapping->a_ops = &erofs_raw_access_aops;
0298 #ifdef CONFIG_EROFS_FS_ONDEMAND
0299     if (erofs_is_fscache_mode(inode->i_sb))
0300         inode->i_mapping->a_ops = &erofs_fscache_access_aops;
0301 #endif
0302 
0303 out_unlock:
0304     erofs_put_metabuf(&buf);
0305     return err;
0306 }
0307 
0308 /*
0309  * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
0310  * we should do more for 32-bit platform to find the right inode.
0311  */
0312 static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
0313 {
0314     const erofs_nid_t nid = *(erofs_nid_t *)opaque;
0315 
0316     return EROFS_I(inode)->nid == nid;
0317 }
0318 
0319 static int erofs_iget_set_actor(struct inode *inode, void *opaque)
0320 {
0321     const erofs_nid_t nid = *(erofs_nid_t *)opaque;
0322 
0323     inode->i_ino = erofs_inode_hash(nid);
0324     return 0;
0325 }
0326 
0327 static inline struct inode *erofs_iget_locked(struct super_block *sb,
0328                           erofs_nid_t nid)
0329 {
0330     const unsigned long hashval = erofs_inode_hash(nid);
0331 
0332     return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
0333         erofs_iget_set_actor, &nid);
0334 }
0335 
0336 struct inode *erofs_iget(struct super_block *sb,
0337              erofs_nid_t nid,
0338              bool isdir)
0339 {
0340     struct inode *inode = erofs_iget_locked(sb, nid);
0341 
0342     if (!inode)
0343         return ERR_PTR(-ENOMEM);
0344 
0345     if (inode->i_state & I_NEW) {
0346         int err;
0347         struct erofs_inode *vi = EROFS_I(inode);
0348 
0349         vi->nid = nid;
0350 
0351         err = erofs_fill_inode(inode, isdir);
0352         if (!err)
0353             unlock_new_inode(inode);
0354         else {
0355             iget_failed(inode);
0356             inode = ERR_PTR(err);
0357         }
0358     }
0359     return inode;
0360 }
0361 
0362 int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
0363           struct kstat *stat, u32 request_mask,
0364           unsigned int query_flags)
0365 {
0366     struct inode *const inode = d_inode(path->dentry);
0367 
0368     if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
0369         stat->attributes |= STATX_ATTR_COMPRESSED;
0370 
0371     stat->attributes |= STATX_ATTR_IMMUTABLE;
0372     stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
0373                   STATX_ATTR_IMMUTABLE);
0374 
0375     generic_fillattr(mnt_userns, inode, stat);
0376     return 0;
0377 }
0378 
0379 const struct inode_operations erofs_generic_iops = {
0380     .getattr = erofs_getattr,
0381     .listxattr = erofs_listxattr,
0382     .get_acl = erofs_get_acl,
0383     .fiemap = erofs_fiemap,
0384 };
0385 
0386 const struct inode_operations erofs_symlink_iops = {
0387     .get_link = page_get_link,
0388     .getattr = erofs_getattr,
0389     .listxattr = erofs_listxattr,
0390     .get_acl = erofs_get_acl,
0391 };
0392 
0393 const struct inode_operations erofs_fast_symlink_iops = {
0394     .get_link = simple_get_link,
0395     .getattr = erofs_getattr,
0396     .listxattr = erofs_listxattr,
0397     .get_acl = erofs_get_acl,
0398 };