Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * super.c
0003  *
0004  * PURPOSE
0005  *  Super block routines for the OSTA-UDF(tm) filesystem.
0006  *
0007  * DESCRIPTION
0008  *  OSTA-UDF(tm) = Optical Storage Technology Association
0009  *  Universal Disk Format.
0010  *
0011  *  This code is based on version 2.00 of the UDF specification,
0012  *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
0013  *    http://www.osta.org/
0014  *    https://www.ecma.ch/
0015  *    https://www.iso.org/
0016  *
0017  * COPYRIGHT
0018  *  This file is distributed under the terms of the GNU General Public
0019  *  License (GPL). Copies of the GPL can be obtained from:
0020  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
0021  *  Each contributing author retains all rights to their own work.
0022  *
0023  *  (C) 1998 Dave Boynton
0024  *  (C) 1998-2004 Ben Fennema
0025  *  (C) 2000 Stelias Computing Inc
0026  *
0027  * HISTORY
0028  *
0029  *  09/24/98 dgb  changed to allow compiling outside of kernel, and
0030  *                added some debugging.
0031  *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
0032  *  10/16/98      attempting some multi-session support
0033  *  10/17/98      added freespace count for "df"
0034  *  11/11/98 gr   added novrs option
0035  *  11/26/98 dgb  added fileset,anchor mount options
0036  *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced
0037  *                vol descs. rewrote option handling based on isofs
0038  *  12/20/98      find the free space bitmap (if it exists)
0039  */
0040 
0041 #include "udfdecl.h"
0042 
0043 #include <linux/blkdev.h>
0044 #include <linux/slab.h>
0045 #include <linux/kernel.h>
0046 #include <linux/module.h>
0047 #include <linux/parser.h>
0048 #include <linux/stat.h>
0049 #include <linux/cdrom.h>
0050 #include <linux/nls.h>
0051 #include <linux/vfs.h>
0052 #include <linux/vmalloc.h>
0053 #include <linux/errno.h>
0054 #include <linux/mount.h>
0055 #include <linux/seq_file.h>
0056 #include <linux/bitmap.h>
0057 #include <linux/crc-itu-t.h>
0058 #include <linux/log2.h>
0059 #include <asm/byteorder.h>
0060 #include <linux/iversion.h>
0061 
0062 #include "udf_sb.h"
0063 #include "udf_i.h"
0064 
0065 #include <linux/init.h>
0066 #include <linux/uaccess.h>
0067 
0068 enum {
0069     VDS_POS_PRIMARY_VOL_DESC,
0070     VDS_POS_UNALLOC_SPACE_DESC,
0071     VDS_POS_LOGICAL_VOL_DESC,
0072     VDS_POS_IMP_USE_VOL_DESC,
0073     VDS_POS_LENGTH
0074 };
0075 
0076 #define VSD_FIRST_SECTOR_OFFSET     32768
0077 #define VSD_MAX_SECTOR_OFFSET       0x800000
0078 
0079 /*
0080  * Maximum number of Terminating Descriptor / Logical Volume Integrity
0081  * Descriptor redirections. The chosen numbers are arbitrary - just that we
0082  * hopefully don't limit any real use of rewritten inode on write-once media
0083  * but avoid looping for too long on corrupted media.
0084  */
0085 #define UDF_MAX_TD_NESTING 64
0086 #define UDF_MAX_LVID_NESTING 1000
0087 
0088 enum { UDF_MAX_LINKS = 0xffff };
0089 
0090 /* These are the "meat" - everything else is stuffing */
0091 static int udf_fill_super(struct super_block *, void *, int);
0092 static void udf_put_super(struct super_block *);
0093 static int udf_sync_fs(struct super_block *, int);
0094 static int udf_remount_fs(struct super_block *, int *, char *);
0095 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
0096 static void udf_open_lvid(struct super_block *);
0097 static void udf_close_lvid(struct super_block *);
0098 static unsigned int udf_count_free(struct super_block *);
0099 static int udf_statfs(struct dentry *, struct kstatfs *);
0100 static int udf_show_options(struct seq_file *, struct dentry *);
0101 
0102 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
0103 {
0104     struct logicalVolIntegrityDesc *lvid;
0105     unsigned int partnum;
0106     unsigned int offset;
0107 
0108     if (!UDF_SB(sb)->s_lvid_bh)
0109         return NULL;
0110     lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
0111     partnum = le32_to_cpu(lvid->numOfPartitions);
0112     /* The offset is to skip freeSpaceTable and sizeTable arrays */
0113     offset = partnum * 2 * sizeof(uint32_t);
0114     return (struct logicalVolIntegrityDescImpUse *)
0115                     (((uint8_t *)(lvid + 1)) + offset);
0116 }
0117 
0118 /* UDF filesystem type */
0119 static struct dentry *udf_mount(struct file_system_type *fs_type,
0120               int flags, const char *dev_name, void *data)
0121 {
0122     return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
0123 }
0124 
0125 static struct file_system_type udf_fstype = {
0126     .owner      = THIS_MODULE,
0127     .name       = "udf",
0128     .mount      = udf_mount,
0129     .kill_sb    = kill_block_super,
0130     .fs_flags   = FS_REQUIRES_DEV,
0131 };
0132 MODULE_ALIAS_FS("udf");
0133 
0134 static struct kmem_cache *udf_inode_cachep;
0135 
0136 static struct inode *udf_alloc_inode(struct super_block *sb)
0137 {
0138     struct udf_inode_info *ei;
0139     ei = alloc_inode_sb(sb, udf_inode_cachep, GFP_KERNEL);
0140     if (!ei)
0141         return NULL;
0142 
0143     ei->i_unique = 0;
0144     ei->i_lenExtents = 0;
0145     ei->i_lenStreams = 0;
0146     ei->i_next_alloc_block = 0;
0147     ei->i_next_alloc_goal = 0;
0148     ei->i_strat4096 = 0;
0149     ei->i_streamdir = 0;
0150     init_rwsem(&ei->i_data_sem);
0151     ei->cached_extent.lstart = -1;
0152     spin_lock_init(&ei->i_extent_cache_lock);
0153     inode_set_iversion(&ei->vfs_inode, 1);
0154 
0155     return &ei->vfs_inode;
0156 }
0157 
0158 static void udf_free_in_core_inode(struct inode *inode)
0159 {
0160     kmem_cache_free(udf_inode_cachep, UDF_I(inode));
0161 }
0162 
0163 static void init_once(void *foo)
0164 {
0165     struct udf_inode_info *ei = (struct udf_inode_info *)foo;
0166 
0167     ei->i_data = NULL;
0168     inode_init_once(&ei->vfs_inode);
0169 }
0170 
0171 static int __init init_inodecache(void)
0172 {
0173     udf_inode_cachep = kmem_cache_create("udf_inode_cache",
0174                          sizeof(struct udf_inode_info),
0175                          0, (SLAB_RECLAIM_ACCOUNT |
0176                          SLAB_MEM_SPREAD |
0177                          SLAB_ACCOUNT),
0178                          init_once);
0179     if (!udf_inode_cachep)
0180         return -ENOMEM;
0181     return 0;
0182 }
0183 
0184 static void destroy_inodecache(void)
0185 {
0186     /*
0187      * Make sure all delayed rcu free inodes are flushed before we
0188      * destroy cache.
0189      */
0190     rcu_barrier();
0191     kmem_cache_destroy(udf_inode_cachep);
0192 }
0193 
0194 /* Superblock operations */
0195 static const struct super_operations udf_sb_ops = {
0196     .alloc_inode    = udf_alloc_inode,
0197     .free_inode = udf_free_in_core_inode,
0198     .write_inode    = udf_write_inode,
0199     .evict_inode    = udf_evict_inode,
0200     .put_super  = udf_put_super,
0201     .sync_fs    = udf_sync_fs,
0202     .statfs     = udf_statfs,
0203     .remount_fs = udf_remount_fs,
0204     .show_options   = udf_show_options,
0205 };
0206 
0207 struct udf_options {
0208     unsigned char novrs;
0209     unsigned int blocksize;
0210     unsigned int session;
0211     unsigned int lastblock;
0212     unsigned int anchor;
0213     unsigned int flags;
0214     umode_t umask;
0215     kgid_t gid;
0216     kuid_t uid;
0217     umode_t fmode;
0218     umode_t dmode;
0219     struct nls_table *nls_map;
0220 };
0221 
0222 static int __init init_udf_fs(void)
0223 {
0224     int err;
0225 
0226     err = init_inodecache();
0227     if (err)
0228         goto out1;
0229     err = register_filesystem(&udf_fstype);
0230     if (err)
0231         goto out;
0232 
0233     return 0;
0234 
0235 out:
0236     destroy_inodecache();
0237 
0238 out1:
0239     return err;
0240 }
0241 
0242 static void __exit exit_udf_fs(void)
0243 {
0244     unregister_filesystem(&udf_fstype);
0245     destroy_inodecache();
0246 }
0247 
0248 static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
0249 {
0250     struct udf_sb_info *sbi = UDF_SB(sb);
0251 
0252     sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL);
0253     if (!sbi->s_partmaps) {
0254         sbi->s_partitions = 0;
0255         return -ENOMEM;
0256     }
0257 
0258     sbi->s_partitions = count;
0259     return 0;
0260 }
0261 
0262 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
0263 {
0264     int i;
0265     int nr_groups = bitmap->s_nr_groups;
0266 
0267     for (i = 0; i < nr_groups; i++)
0268         brelse(bitmap->s_block_bitmap[i]);
0269 
0270     kvfree(bitmap);
0271 }
0272 
0273 static void udf_free_partition(struct udf_part_map *map)
0274 {
0275     int i;
0276     struct udf_meta_data *mdata;
0277 
0278     if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
0279         iput(map->s_uspace.s_table);
0280     if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
0281         udf_sb_free_bitmap(map->s_uspace.s_bitmap);
0282     if (map->s_partition_type == UDF_SPARABLE_MAP15)
0283         for (i = 0; i < 4; i++)
0284             brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
0285     else if (map->s_partition_type == UDF_METADATA_MAP25) {
0286         mdata = &map->s_type_specific.s_metadata;
0287         iput(mdata->s_metadata_fe);
0288         mdata->s_metadata_fe = NULL;
0289 
0290         iput(mdata->s_mirror_fe);
0291         mdata->s_mirror_fe = NULL;
0292 
0293         iput(mdata->s_bitmap_fe);
0294         mdata->s_bitmap_fe = NULL;
0295     }
0296 }
0297 
0298 static void udf_sb_free_partitions(struct super_block *sb)
0299 {
0300     struct udf_sb_info *sbi = UDF_SB(sb);
0301     int i;
0302 
0303     if (!sbi->s_partmaps)
0304         return;
0305     for (i = 0; i < sbi->s_partitions; i++)
0306         udf_free_partition(&sbi->s_partmaps[i]);
0307     kfree(sbi->s_partmaps);
0308     sbi->s_partmaps = NULL;
0309 }
0310 
0311 static int udf_show_options(struct seq_file *seq, struct dentry *root)
0312 {
0313     struct super_block *sb = root->d_sb;
0314     struct udf_sb_info *sbi = UDF_SB(sb);
0315 
0316     if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
0317         seq_puts(seq, ",nostrict");
0318     if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
0319         seq_printf(seq, ",bs=%lu", sb->s_blocksize);
0320     if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
0321         seq_puts(seq, ",unhide");
0322     if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
0323         seq_puts(seq, ",undelete");
0324     if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
0325         seq_puts(seq, ",noadinicb");
0326     if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
0327         seq_puts(seq, ",shortad");
0328     if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
0329         seq_puts(seq, ",uid=forget");
0330     if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
0331         seq_puts(seq, ",gid=forget");
0332     if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
0333         seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
0334     if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
0335         seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
0336     if (sbi->s_umask != 0)
0337         seq_printf(seq, ",umask=%ho", sbi->s_umask);
0338     if (sbi->s_fmode != UDF_INVALID_MODE)
0339         seq_printf(seq, ",mode=%ho", sbi->s_fmode);
0340     if (sbi->s_dmode != UDF_INVALID_MODE)
0341         seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
0342     if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
0343         seq_printf(seq, ",session=%d", sbi->s_session);
0344     if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
0345         seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
0346     if (sbi->s_anchor != 0)
0347         seq_printf(seq, ",anchor=%u", sbi->s_anchor);
0348     if (sbi->s_nls_map)
0349         seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
0350     else
0351         seq_puts(seq, ",iocharset=utf8");
0352 
0353     return 0;
0354 }
0355 
0356 /*
0357  * udf_parse_options
0358  *
0359  * PURPOSE
0360  *  Parse mount options.
0361  *
0362  * DESCRIPTION
0363  *  The following mount options are supported:
0364  *
0365  *  gid=        Set the default group.
0366  *  umask=      Set the default umask.
0367  *  mode=       Set the default file permissions.
0368  *  dmode=      Set the default directory permissions.
0369  *  uid=        Set the default user.
0370  *  bs=     Set the block size.
0371  *  unhide      Show otherwise hidden files.
0372  *  undelete    Show deleted files in lists.
0373  *  adinicb     Embed data in the inode (default)
0374  *  noadinicb   Don't embed data in the inode
0375  *  shortad     Use short ad's
0376  *  longad      Use long ad's (default)
0377  *  nostrict    Unset strict conformance
0378  *  iocharset=  Set the NLS character set
0379  *
0380  *  The remaining are for debugging and disaster recovery:
0381  *
0382  *  novrs       Skip volume sequence recognition
0383  *
0384  *  The following expect a offset from 0.
0385  *
0386  *  session=    Set the CDROM session (default= last session)
0387  *  anchor=     Override standard anchor location. (default= 256)
0388  *  volume=     Override the VolumeDesc location. (unused)
0389  *  partition=  Override the PartitionDesc location. (unused)
0390  *  lastblock=  Set the last block of the filesystem/
0391  *
0392  *  The following expect a offset from the partition root.
0393  *
0394  *  fileset=    Override the fileset block location. (unused)
0395  *  rootdir=    Override the root directory location. (unused)
0396  *      WARNING: overriding the rootdir to a non-directory may
0397  *      yield highly unpredictable results.
0398  *
0399  * PRE-CONDITIONS
0400  *  options     Pointer to mount options string.
0401  *  uopts       Pointer to mount options variable.
0402  *
0403  * POST-CONDITIONS
0404  *  <return>    1   Mount options parsed okay.
0405  *  <return>    0   Error parsing mount options.
0406  *
0407  * HISTORY
0408  *  July 1, 1997 - Andrew E. Mileski
0409  *  Written, tested, and released.
0410  */
0411 
0412 enum {
0413     Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
0414     Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
0415     Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
0416     Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
0417     Opt_rootdir, Opt_utf8, Opt_iocharset,
0418     Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
0419     Opt_fmode, Opt_dmode
0420 };
0421 
0422 static const match_table_t tokens = {
0423     {Opt_novrs, "novrs"},
0424     {Opt_nostrict,  "nostrict"},
0425     {Opt_bs,    "bs=%u"},
0426     {Opt_unhide,    "unhide"},
0427     {Opt_undelete,  "undelete"},
0428     {Opt_noadinicb, "noadinicb"},
0429     {Opt_adinicb,   "adinicb"},
0430     {Opt_shortad,   "shortad"},
0431     {Opt_longad,    "longad"},
0432     {Opt_uforget,   "uid=forget"},
0433     {Opt_uignore,   "uid=ignore"},
0434     {Opt_gforget,   "gid=forget"},
0435     {Opt_gignore,   "gid=ignore"},
0436     {Opt_gid,   "gid=%u"},
0437     {Opt_uid,   "uid=%u"},
0438     {Opt_umask, "umask=%o"},
0439     {Opt_session,   "session=%u"},
0440     {Opt_lastblock, "lastblock=%u"},
0441     {Opt_anchor,    "anchor=%u"},
0442     {Opt_volume,    "volume=%u"},
0443     {Opt_partition, "partition=%u"},
0444     {Opt_fileset,   "fileset=%u"},
0445     {Opt_rootdir,   "rootdir=%u"},
0446     {Opt_utf8,  "utf8"},
0447     {Opt_iocharset, "iocharset=%s"},
0448     {Opt_fmode,     "mode=%o"},
0449     {Opt_dmode,     "dmode=%o"},
0450     {Opt_err,   NULL}
0451 };
0452 
0453 static int udf_parse_options(char *options, struct udf_options *uopt,
0454                  bool remount)
0455 {
0456     char *p;
0457     int option;
0458     unsigned int uv;
0459 
0460     uopt->novrs = 0;
0461     uopt->session = 0xFFFFFFFF;
0462     uopt->lastblock = 0;
0463     uopt->anchor = 0;
0464 
0465     if (!options)
0466         return 1;
0467 
0468     while ((p = strsep(&options, ",")) != NULL) {
0469         substring_t args[MAX_OPT_ARGS];
0470         int token;
0471         unsigned n;
0472         if (!*p)
0473             continue;
0474 
0475         token = match_token(p, tokens, args);
0476         switch (token) {
0477         case Opt_novrs:
0478             uopt->novrs = 1;
0479             break;
0480         case Opt_bs:
0481             if (match_int(&args[0], &option))
0482                 return 0;
0483             n = option;
0484             if (n != 512 && n != 1024 && n != 2048 && n != 4096)
0485                 return 0;
0486             uopt->blocksize = n;
0487             uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
0488             break;
0489         case Opt_unhide:
0490             uopt->flags |= (1 << UDF_FLAG_UNHIDE);
0491             break;
0492         case Opt_undelete:
0493             uopt->flags |= (1 << UDF_FLAG_UNDELETE);
0494             break;
0495         case Opt_noadinicb:
0496             uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
0497             break;
0498         case Opt_adinicb:
0499             uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
0500             break;
0501         case Opt_shortad:
0502             uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
0503             break;
0504         case Opt_longad:
0505             uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
0506             break;
0507         case Opt_gid:
0508             if (match_uint(args, &uv))
0509                 return 0;
0510             uopt->gid = make_kgid(current_user_ns(), uv);
0511             if (!gid_valid(uopt->gid))
0512                 return 0;
0513             uopt->flags |= (1 << UDF_FLAG_GID_SET);
0514             break;
0515         case Opt_uid:
0516             if (match_uint(args, &uv))
0517                 return 0;
0518             uopt->uid = make_kuid(current_user_ns(), uv);
0519             if (!uid_valid(uopt->uid))
0520                 return 0;
0521             uopt->flags |= (1 << UDF_FLAG_UID_SET);
0522             break;
0523         case Opt_umask:
0524             if (match_octal(args, &option))
0525                 return 0;
0526             uopt->umask = option;
0527             break;
0528         case Opt_nostrict:
0529             uopt->flags &= ~(1 << UDF_FLAG_STRICT);
0530             break;
0531         case Opt_session:
0532             if (match_int(args, &option))
0533                 return 0;
0534             uopt->session = option;
0535             if (!remount)
0536                 uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
0537             break;
0538         case Opt_lastblock:
0539             if (match_int(args, &option))
0540                 return 0;
0541             uopt->lastblock = option;
0542             if (!remount)
0543                 uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
0544             break;
0545         case Opt_anchor:
0546             if (match_int(args, &option))
0547                 return 0;
0548             uopt->anchor = option;
0549             break;
0550         case Opt_volume:
0551         case Opt_partition:
0552         case Opt_fileset:
0553         case Opt_rootdir:
0554             /* Ignored (never implemented properly) */
0555             break;
0556         case Opt_utf8:
0557             if (!remount) {
0558                 unload_nls(uopt->nls_map);
0559                 uopt->nls_map = NULL;
0560             }
0561             break;
0562         case Opt_iocharset:
0563             if (!remount) {
0564                 unload_nls(uopt->nls_map);
0565                 uopt->nls_map = NULL;
0566             }
0567             /* When nls_map is not loaded then UTF-8 is used */
0568             if (!remount && strcmp(args[0].from, "utf8") != 0) {
0569                 uopt->nls_map = load_nls(args[0].from);
0570                 if (!uopt->nls_map) {
0571                     pr_err("iocharset %s not found\n",
0572                         args[0].from);
0573                     return 0;
0574                 }
0575             }
0576             break;
0577         case Opt_uforget:
0578             uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
0579             break;
0580         case Opt_uignore:
0581         case Opt_gignore:
0582             /* These options are superseeded by uid=<number> */
0583             break;
0584         case Opt_gforget:
0585             uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
0586             break;
0587         case Opt_fmode:
0588             if (match_octal(args, &option))
0589                 return 0;
0590             uopt->fmode = option & 0777;
0591             break;
0592         case Opt_dmode:
0593             if (match_octal(args, &option))
0594                 return 0;
0595             uopt->dmode = option & 0777;
0596             break;
0597         default:
0598             pr_err("bad mount option \"%s\" or missing value\n", p);
0599             return 0;
0600         }
0601     }
0602     return 1;
0603 }
0604 
0605 static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
0606 {
0607     struct udf_options uopt;
0608     struct udf_sb_info *sbi = UDF_SB(sb);
0609     int error = 0;
0610 
0611     if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
0612         return -EACCES;
0613 
0614     sync_filesystem(sb);
0615 
0616     uopt.flags = sbi->s_flags;
0617     uopt.uid   = sbi->s_uid;
0618     uopt.gid   = sbi->s_gid;
0619     uopt.umask = sbi->s_umask;
0620     uopt.fmode = sbi->s_fmode;
0621     uopt.dmode = sbi->s_dmode;
0622     uopt.nls_map = NULL;
0623 
0624     if (!udf_parse_options(options, &uopt, true))
0625         return -EINVAL;
0626 
0627     write_lock(&sbi->s_cred_lock);
0628     sbi->s_flags = uopt.flags;
0629     sbi->s_uid   = uopt.uid;
0630     sbi->s_gid   = uopt.gid;
0631     sbi->s_umask = uopt.umask;
0632     sbi->s_fmode = uopt.fmode;
0633     sbi->s_dmode = uopt.dmode;
0634     write_unlock(&sbi->s_cred_lock);
0635 
0636     if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
0637         goto out_unlock;
0638 
0639     if (*flags & SB_RDONLY)
0640         udf_close_lvid(sb);
0641     else
0642         udf_open_lvid(sb);
0643 
0644 out_unlock:
0645     return error;
0646 }
0647 
0648 /*
0649  * Check VSD descriptor. Returns -1 in case we are at the end of volume
0650  * recognition area, 0 if the descriptor is valid but non-interesting, 1 if
0651  * we found one of NSR descriptors we are looking for.
0652  */
0653 static int identify_vsd(const struct volStructDesc *vsd)
0654 {
0655     int ret = 0;
0656 
0657     if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
0658         switch (vsd->structType) {
0659         case 0:
0660             udf_debug("ISO9660 Boot Record found\n");
0661             break;
0662         case 1:
0663             udf_debug("ISO9660 Primary Volume Descriptor found\n");
0664             break;
0665         case 2:
0666             udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
0667             break;
0668         case 3:
0669             udf_debug("ISO9660 Volume Partition Descriptor found\n");
0670             break;
0671         case 255:
0672             udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
0673             break;
0674         default:
0675             udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
0676             break;
0677         }
0678     } else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
0679         ; /* ret = 0 */
0680     else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
0681         ret = 1;
0682     else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
0683         ret = 1;
0684     else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN))
0685         ; /* ret = 0 */
0686     else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN))
0687         ; /* ret = 0 */
0688     else {
0689         /* TEA01 or invalid id : end of volume recognition area */
0690         ret = -1;
0691     }
0692 
0693     return ret;
0694 }
0695 
0696 /*
0697  * Check Volume Structure Descriptors (ECMA 167 2/9.1)
0698  * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1)
0699  * @return   1 if NSR02 or NSR03 found,
0700  *      -1 if first sector read error, 0 otherwise
0701  */
0702 static int udf_check_vsd(struct super_block *sb)
0703 {
0704     struct volStructDesc *vsd = NULL;
0705     loff_t sector = VSD_FIRST_SECTOR_OFFSET;
0706     int sectorsize;
0707     struct buffer_head *bh = NULL;
0708     int nsr = 0;
0709     struct udf_sb_info *sbi;
0710     loff_t session_offset;
0711 
0712     sbi = UDF_SB(sb);
0713     if (sb->s_blocksize < sizeof(struct volStructDesc))
0714         sectorsize = sizeof(struct volStructDesc);
0715     else
0716         sectorsize = sb->s_blocksize;
0717 
0718     session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
0719     sector += session_offset;
0720 
0721     udf_debug("Starting at sector %u (%lu byte sectors)\n",
0722           (unsigned int)(sector >> sb->s_blocksize_bits),
0723           sb->s_blocksize);
0724     /* Process the sequence (if applicable). The hard limit on the sector
0725      * offset is arbitrary, hopefully large enough so that all valid UDF
0726      * filesystems will be recognised. There is no mention of an upper
0727      * bound to the size of the volume recognition area in the standard.
0728      *  The limit will prevent the code to read all the sectors of a
0729      * specially crafted image (like a bluray disc full of CD001 sectors),
0730      * potentially causing minutes or even hours of uninterruptible I/O
0731      * activity. This actually happened with uninitialised SSD partitions
0732      * (all 0xFF) before the check for the limit and all valid IDs were
0733      * added */
0734     for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) {
0735         /* Read a block */
0736         bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
0737         if (!bh)
0738             break;
0739 
0740         vsd = (struct volStructDesc *)(bh->b_data +
0741                           (sector & (sb->s_blocksize - 1)));
0742         nsr = identify_vsd(vsd);
0743         /* Found NSR or end? */
0744         if (nsr) {
0745             brelse(bh);
0746             break;
0747         }
0748         /*
0749          * Special handling for improperly formatted VRS (e.g., Win10)
0750          * where components are separated by 2048 bytes even though
0751          * sectors are 4K
0752          */
0753         if (sb->s_blocksize == 4096) {
0754             nsr = identify_vsd(vsd + 1);
0755             /* Ignore unknown IDs... */
0756             if (nsr < 0)
0757                 nsr = 0;
0758         }
0759         brelse(bh);
0760     }
0761 
0762     if (nsr > 0)
0763         return 1;
0764     else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
0765         return -1;
0766     else
0767         return 0;
0768 }
0769 
0770 static int udf_verify_domain_identifier(struct super_block *sb,
0771                     struct regid *ident, char *dname)
0772 {
0773     struct domainIdentSuffix *suffix;
0774 
0775     if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
0776         udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
0777         goto force_ro;
0778     }
0779     if (ident->flags & ENTITYID_FLAGS_DIRTY) {
0780         udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
0781              dname);
0782         goto force_ro;
0783     }
0784     suffix = (struct domainIdentSuffix *)ident->identSuffix;
0785     if ((suffix->domainFlags & DOMAIN_FLAGS_HARD_WRITE_PROTECT) ||
0786         (suffix->domainFlags & DOMAIN_FLAGS_SOFT_WRITE_PROTECT)) {
0787         if (!sb_rdonly(sb)) {
0788             udf_warn(sb, "Descriptor for %s marked write protected."
0789                  " Forcing read only mount.\n", dname);
0790         }
0791         goto force_ro;
0792     }
0793     return 0;
0794 
0795 force_ro:
0796     if (!sb_rdonly(sb))
0797         return -EACCES;
0798     UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
0799     return 0;
0800 }
0801 
0802 static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset,
0803                 struct kernel_lb_addr *root)
0804 {
0805     int ret;
0806 
0807     ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set");
0808     if (ret < 0)
0809         return ret;
0810 
0811     *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
0812     UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
0813 
0814     udf_debug("Rootdir at block=%u, partition=%u\n",
0815           root->logicalBlockNum, root->partitionReferenceNum);
0816     return 0;
0817 }
0818 
0819 static int udf_find_fileset(struct super_block *sb,
0820                 struct kernel_lb_addr *fileset,
0821                 struct kernel_lb_addr *root)
0822 {
0823     struct buffer_head *bh = NULL;
0824     uint16_t ident;
0825     int ret;
0826 
0827     if (fileset->logicalBlockNum == 0xFFFFFFFF &&
0828         fileset->partitionReferenceNum == 0xFFFF)
0829         return -EINVAL;
0830 
0831     bh = udf_read_ptagged(sb, fileset, 0, &ident);
0832     if (!bh)
0833         return -EIO;
0834     if (ident != TAG_IDENT_FSD) {
0835         brelse(bh);
0836         return -EINVAL;
0837     }
0838 
0839     udf_debug("Fileset at block=%u, partition=%u\n",
0840           fileset->logicalBlockNum, fileset->partitionReferenceNum);
0841 
0842     UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
0843     ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root);
0844     brelse(bh);
0845     return ret;
0846 }
0847 
0848 /*
0849  * Load primary Volume Descriptor Sequence
0850  *
0851  * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
0852  * should be tried.
0853  */
0854 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
0855 {
0856     struct primaryVolDesc *pvoldesc;
0857     uint8_t *outstr;
0858     struct buffer_head *bh;
0859     uint16_t ident;
0860     int ret;
0861     struct timestamp *ts;
0862 
0863     outstr = kmalloc(128, GFP_NOFS);
0864     if (!outstr)
0865         return -ENOMEM;
0866 
0867     bh = udf_read_tagged(sb, block, block, &ident);
0868     if (!bh) {
0869         ret = -EAGAIN;
0870         goto out2;
0871     }
0872 
0873     if (ident != TAG_IDENT_PVD) {
0874         ret = -EIO;
0875         goto out_bh;
0876     }
0877 
0878     pvoldesc = (struct primaryVolDesc *)bh->b_data;
0879 
0880     udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
0881                   pvoldesc->recordingDateAndTime);
0882     ts = &pvoldesc->recordingDateAndTime;
0883     udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
0884           le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
0885           ts->minute, le16_to_cpu(ts->typeAndTimezone));
0886 
0887     ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
0888     if (ret < 0) {
0889         strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
0890         pr_warn("incorrect volume identification, setting to "
0891             "'InvalidName'\n");
0892     } else {
0893         strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
0894     }
0895     udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
0896 
0897     ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
0898     if (ret < 0) {
0899         ret = 0;
0900         goto out_bh;
0901     }
0902     outstr[ret] = 0;
0903     udf_debug("volSetIdent[] = '%s'\n", outstr);
0904 
0905     ret = 0;
0906 out_bh:
0907     brelse(bh);
0908 out2:
0909     kfree(outstr);
0910     return ret;
0911 }
0912 
0913 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
0914                     u32 meta_file_loc, u32 partition_ref)
0915 {
0916     struct kernel_lb_addr addr;
0917     struct inode *metadata_fe;
0918 
0919     addr.logicalBlockNum = meta_file_loc;
0920     addr.partitionReferenceNum = partition_ref;
0921 
0922     metadata_fe = udf_iget_special(sb, &addr);
0923 
0924     if (IS_ERR(metadata_fe)) {
0925         udf_warn(sb, "metadata inode efe not found\n");
0926         return metadata_fe;
0927     }
0928     if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
0929         udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
0930         iput(metadata_fe);
0931         return ERR_PTR(-EIO);
0932     }
0933 
0934     return metadata_fe;
0935 }
0936 
0937 static int udf_load_metadata_files(struct super_block *sb, int partition,
0938                    int type1_index)
0939 {
0940     struct udf_sb_info *sbi = UDF_SB(sb);
0941     struct udf_part_map *map;
0942     struct udf_meta_data *mdata;
0943     struct kernel_lb_addr addr;
0944     struct inode *fe;
0945 
0946     map = &sbi->s_partmaps[partition];
0947     mdata = &map->s_type_specific.s_metadata;
0948     mdata->s_phys_partition_ref = type1_index;
0949 
0950     /* metadata address */
0951     udf_debug("Metadata file location: block = %u part = %u\n",
0952           mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
0953 
0954     fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
0955                      mdata->s_phys_partition_ref);
0956     if (IS_ERR(fe)) {
0957         /* mirror file entry */
0958         udf_debug("Mirror metadata file location: block = %u part = %u\n",
0959               mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
0960 
0961         fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
0962                          mdata->s_phys_partition_ref);
0963 
0964         if (IS_ERR(fe)) {
0965             udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
0966             return PTR_ERR(fe);
0967         }
0968         mdata->s_mirror_fe = fe;
0969     } else
0970         mdata->s_metadata_fe = fe;
0971 
0972 
0973     /*
0974      * bitmap file entry
0975      * Note:
0976      * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
0977     */
0978     if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
0979         addr.logicalBlockNum = mdata->s_bitmap_file_loc;
0980         addr.partitionReferenceNum = mdata->s_phys_partition_ref;
0981 
0982         udf_debug("Bitmap file location: block = %u part = %u\n",
0983               addr.logicalBlockNum, addr.partitionReferenceNum);
0984 
0985         fe = udf_iget_special(sb, &addr);
0986         if (IS_ERR(fe)) {
0987             if (sb_rdonly(sb))
0988                 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
0989             else {
0990                 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
0991                 return PTR_ERR(fe);
0992             }
0993         } else
0994             mdata->s_bitmap_fe = fe;
0995     }
0996 
0997     udf_debug("udf_load_metadata_files Ok\n");
0998     return 0;
0999 }
1000 
1001 int udf_compute_nr_groups(struct super_block *sb, u32 partition)
1002 {
1003     struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1004     return DIV_ROUND_UP(map->s_partition_len +
1005                 (sizeof(struct spaceBitmapDesc) << 3),
1006                 sb->s_blocksize * 8);
1007 }
1008 
1009 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1010 {
1011     struct udf_bitmap *bitmap;
1012     int nr_groups = udf_compute_nr_groups(sb, index);
1013 
1014     bitmap = kvzalloc(struct_size(bitmap, s_block_bitmap, nr_groups),
1015               GFP_KERNEL);
1016     if (!bitmap)
1017         return NULL;
1018 
1019     bitmap->s_nr_groups = nr_groups;
1020     return bitmap;
1021 }
1022 
1023 static int check_partition_desc(struct super_block *sb,
1024                 struct partitionDesc *p,
1025                 struct udf_part_map *map)
1026 {
1027     bool umap, utable, fmap, ftable;
1028     struct partitionHeaderDesc *phd;
1029 
1030     switch (le32_to_cpu(p->accessType)) {
1031     case PD_ACCESS_TYPE_READ_ONLY:
1032     case PD_ACCESS_TYPE_WRITE_ONCE:
1033     case PD_ACCESS_TYPE_NONE:
1034         goto force_ro;
1035     }
1036 
1037     /* No Partition Header Descriptor? */
1038     if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1039         strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1040         goto force_ro;
1041 
1042     phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1043     utable = phd->unallocSpaceTable.extLength;
1044     umap = phd->unallocSpaceBitmap.extLength;
1045     ftable = phd->freedSpaceTable.extLength;
1046     fmap = phd->freedSpaceBitmap.extLength;
1047 
1048     /* No allocation info? */
1049     if (!utable && !umap && !ftable && !fmap)
1050         goto force_ro;
1051 
1052     /* We don't support blocks that require erasing before overwrite */
1053     if (ftable || fmap)
1054         goto force_ro;
1055     /* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */
1056     if (utable && umap)
1057         goto force_ro;
1058 
1059     if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1060         map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1061         map->s_partition_type == UDF_METADATA_MAP25)
1062         goto force_ro;
1063 
1064     return 0;
1065 force_ro:
1066     if (!sb_rdonly(sb))
1067         return -EACCES;
1068     UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1069     return 0;
1070 }
1071 
1072 static int udf_fill_partdesc_info(struct super_block *sb,
1073         struct partitionDesc *p, int p_index)
1074 {
1075     struct udf_part_map *map;
1076     struct udf_sb_info *sbi = UDF_SB(sb);
1077     struct partitionHeaderDesc *phd;
1078     int err;
1079 
1080     map = &sbi->s_partmaps[p_index];
1081 
1082     map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1083     map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1084 
1085     if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1086         map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1087     if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1088         map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1089     if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1090         map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1091     if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1092         map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1093 
1094     udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n",
1095           p_index, map->s_partition_type,
1096           map->s_partition_root, map->s_partition_len);
1097 
1098     err = check_partition_desc(sb, p, map);
1099     if (err)
1100         return err;
1101 
1102     /*
1103      * Skip loading allocation info it we cannot ever write to the fs.
1104      * This is a correctness thing as we may have decided to force ro mount
1105      * to avoid allocation info we don't support.
1106      */
1107     if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
1108         return 0;
1109 
1110     phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1111     if (phd->unallocSpaceTable.extLength) {
1112         struct kernel_lb_addr loc = {
1113             .logicalBlockNum = le32_to_cpu(
1114                 phd->unallocSpaceTable.extPosition),
1115             .partitionReferenceNum = p_index,
1116         };
1117         struct inode *inode;
1118 
1119         inode = udf_iget_special(sb, &loc);
1120         if (IS_ERR(inode)) {
1121             udf_debug("cannot load unallocSpaceTable (part %d)\n",
1122                   p_index);
1123             return PTR_ERR(inode);
1124         }
1125         map->s_uspace.s_table = inode;
1126         map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1127         udf_debug("unallocSpaceTable (part %d) @ %lu\n",
1128               p_index, map->s_uspace.s_table->i_ino);
1129     }
1130 
1131     if (phd->unallocSpaceBitmap.extLength) {
1132         struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1133         if (!bitmap)
1134             return -ENOMEM;
1135         map->s_uspace.s_bitmap = bitmap;
1136         bitmap->s_extPosition = le32_to_cpu(
1137                 phd->unallocSpaceBitmap.extPosition);
1138         map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1139         udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
1140               p_index, bitmap->s_extPosition);
1141     }
1142 
1143     return 0;
1144 }
1145 
1146 static void udf_find_vat_block(struct super_block *sb, int p_index,
1147                    int type1_index, sector_t start_block)
1148 {
1149     struct udf_sb_info *sbi = UDF_SB(sb);
1150     struct udf_part_map *map = &sbi->s_partmaps[p_index];
1151     sector_t vat_block;
1152     struct kernel_lb_addr ino;
1153     struct inode *inode;
1154 
1155     /*
1156      * VAT file entry is in the last recorded block. Some broken disks have
1157      * it a few blocks before so try a bit harder...
1158      */
1159     ino.partitionReferenceNum = type1_index;
1160     for (vat_block = start_block;
1161          vat_block >= map->s_partition_root &&
1162          vat_block >= start_block - 3; vat_block--) {
1163         ino.logicalBlockNum = vat_block - map->s_partition_root;
1164         inode = udf_iget_special(sb, &ino);
1165         if (!IS_ERR(inode)) {
1166             sbi->s_vat_inode = inode;
1167             break;
1168         }
1169     }
1170 }
1171 
1172 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1173 {
1174     struct udf_sb_info *sbi = UDF_SB(sb);
1175     struct udf_part_map *map = &sbi->s_partmaps[p_index];
1176     struct buffer_head *bh = NULL;
1177     struct udf_inode_info *vati;
1178     uint32_t pos;
1179     struct virtualAllocationTable20 *vat20;
1180     sector_t blocks = sb_bdev_nr_blocks(sb);
1181 
1182     udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1183     if (!sbi->s_vat_inode &&
1184         sbi->s_last_block != blocks - 1) {
1185         pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1186               (unsigned long)sbi->s_last_block,
1187               (unsigned long)blocks - 1);
1188         udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1189     }
1190     if (!sbi->s_vat_inode)
1191         return -EIO;
1192 
1193     if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1194         map->s_type_specific.s_virtual.s_start_offset = 0;
1195         map->s_type_specific.s_virtual.s_num_entries =
1196             (sbi->s_vat_inode->i_size - 36) >> 2;
1197     } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1198         vati = UDF_I(sbi->s_vat_inode);
1199         if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1200             pos = udf_block_map(sbi->s_vat_inode, 0);
1201             bh = sb_bread(sb, pos);
1202             if (!bh)
1203                 return -EIO;
1204             vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1205         } else {
1206             vat20 = (struct virtualAllocationTable20 *)
1207                             vati->i_data;
1208         }
1209 
1210         map->s_type_specific.s_virtual.s_start_offset =
1211             le16_to_cpu(vat20->lengthHeader);
1212         map->s_type_specific.s_virtual.s_num_entries =
1213             (sbi->s_vat_inode->i_size -
1214                 map->s_type_specific.s_virtual.
1215                     s_start_offset) >> 2;
1216         brelse(bh);
1217     }
1218     return 0;
1219 }
1220 
1221 /*
1222  * Load partition descriptor block
1223  *
1224  * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1225  * sequence.
1226  */
1227 static int udf_load_partdesc(struct super_block *sb, sector_t block)
1228 {
1229     struct buffer_head *bh;
1230     struct partitionDesc *p;
1231     struct udf_part_map *map;
1232     struct udf_sb_info *sbi = UDF_SB(sb);
1233     int i, type1_idx;
1234     uint16_t partitionNumber;
1235     uint16_t ident;
1236     int ret;
1237 
1238     bh = udf_read_tagged(sb, block, block, &ident);
1239     if (!bh)
1240         return -EAGAIN;
1241     if (ident != TAG_IDENT_PD) {
1242         ret = 0;
1243         goto out_bh;
1244     }
1245 
1246     p = (struct partitionDesc *)bh->b_data;
1247     partitionNumber = le16_to_cpu(p->partitionNumber);
1248 
1249     /* First scan for TYPE1 and SPARABLE partitions */
1250     for (i = 0; i < sbi->s_partitions; i++) {
1251         map = &sbi->s_partmaps[i];
1252         udf_debug("Searching map: (%u == %u)\n",
1253               map->s_partition_num, partitionNumber);
1254         if (map->s_partition_num == partitionNumber &&
1255             (map->s_partition_type == UDF_TYPE1_MAP15 ||
1256              map->s_partition_type == UDF_SPARABLE_MAP15))
1257             break;
1258     }
1259 
1260     if (i >= sbi->s_partitions) {
1261         udf_debug("Partition (%u) not found in partition map\n",
1262               partitionNumber);
1263         ret = 0;
1264         goto out_bh;
1265     }
1266 
1267     ret = udf_fill_partdesc_info(sb, p, i);
1268     if (ret < 0)
1269         goto out_bh;
1270 
1271     /*
1272      * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1273      * PHYSICAL partitions are already set up
1274      */
1275     type1_idx = i;
1276     map = NULL; /* supress 'maybe used uninitialized' warning */
1277     for (i = 0; i < sbi->s_partitions; i++) {
1278         map = &sbi->s_partmaps[i];
1279 
1280         if (map->s_partition_num == partitionNumber &&
1281             (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1282              map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1283              map->s_partition_type == UDF_METADATA_MAP25))
1284             break;
1285     }
1286 
1287     if (i >= sbi->s_partitions) {
1288         ret = 0;
1289         goto out_bh;
1290     }
1291 
1292     ret = udf_fill_partdesc_info(sb, p, i);
1293     if (ret < 0)
1294         goto out_bh;
1295 
1296     if (map->s_partition_type == UDF_METADATA_MAP25) {
1297         ret = udf_load_metadata_files(sb, i, type1_idx);
1298         if (ret < 0) {
1299             udf_err(sb, "error loading MetaData partition map %d\n",
1300                 i);
1301             goto out_bh;
1302         }
1303     } else {
1304         /*
1305          * If we have a partition with virtual map, we don't handle
1306          * writing to it (we overwrite blocks instead of relocating
1307          * them).
1308          */
1309         if (!sb_rdonly(sb)) {
1310             ret = -EACCES;
1311             goto out_bh;
1312         }
1313         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1314         ret = udf_load_vat(sb, i, type1_idx);
1315         if (ret < 0)
1316             goto out_bh;
1317     }
1318     ret = 0;
1319 out_bh:
1320     /* In case loading failed, we handle cleanup in udf_fill_super */
1321     brelse(bh);
1322     return ret;
1323 }
1324 
1325 static int udf_load_sparable_map(struct super_block *sb,
1326                  struct udf_part_map *map,
1327                  struct sparablePartitionMap *spm)
1328 {
1329     uint32_t loc;
1330     uint16_t ident;
1331     struct sparingTable *st;
1332     struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1333     int i;
1334     struct buffer_head *bh;
1335 
1336     map->s_partition_type = UDF_SPARABLE_MAP15;
1337     sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1338     if (!is_power_of_2(sdata->s_packet_len)) {
1339         udf_err(sb, "error loading logical volume descriptor: "
1340             "Invalid packet length %u\n",
1341             (unsigned)sdata->s_packet_len);
1342         return -EIO;
1343     }
1344     if (spm->numSparingTables > 4) {
1345         udf_err(sb, "error loading logical volume descriptor: "
1346             "Too many sparing tables (%d)\n",
1347             (int)spm->numSparingTables);
1348         return -EIO;
1349     }
1350     if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
1351         udf_err(sb, "error loading logical volume descriptor: "
1352             "Too big sparing table size (%u)\n",
1353             le32_to_cpu(spm->sizeSparingTable));
1354         return -EIO;
1355     }
1356 
1357     for (i = 0; i < spm->numSparingTables; i++) {
1358         loc = le32_to_cpu(spm->locSparingTable[i]);
1359         bh = udf_read_tagged(sb, loc, loc, &ident);
1360         if (!bh)
1361             continue;
1362 
1363         st = (struct sparingTable *)bh->b_data;
1364         if (ident != 0 ||
1365             strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1366                 strlen(UDF_ID_SPARING)) ||
1367             sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1368                             sb->s_blocksize) {
1369             brelse(bh);
1370             continue;
1371         }
1372 
1373         sdata->s_spar_map[i] = bh;
1374     }
1375     map->s_partition_func = udf_get_pblock_spar15;
1376     return 0;
1377 }
1378 
1379 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1380                    struct kernel_lb_addr *fileset)
1381 {
1382     struct logicalVolDesc *lvd;
1383     int i, offset;
1384     uint8_t type;
1385     struct udf_sb_info *sbi = UDF_SB(sb);
1386     struct genericPartitionMap *gpm;
1387     uint16_t ident;
1388     struct buffer_head *bh;
1389     unsigned int table_len;
1390     int ret;
1391 
1392     bh = udf_read_tagged(sb, block, block, &ident);
1393     if (!bh)
1394         return -EAGAIN;
1395     BUG_ON(ident != TAG_IDENT_LVD);
1396     lvd = (struct logicalVolDesc *)bh->b_data;
1397     table_len = le32_to_cpu(lvd->mapTableLength);
1398     if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1399         udf_err(sb, "error loading logical volume descriptor: "
1400             "Partition table too long (%u > %lu)\n", table_len,
1401             sb->s_blocksize - sizeof(*lvd));
1402         ret = -EIO;
1403         goto out_bh;
1404     }
1405 
1406     ret = udf_verify_domain_identifier(sb, &lvd->domainIdent,
1407                        "logical volume");
1408     if (ret)
1409         goto out_bh;
1410     ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1411     if (ret)
1412         goto out_bh;
1413 
1414     for (i = 0, offset = 0;
1415          i < sbi->s_partitions && offset < table_len;
1416          i++, offset += gpm->partitionMapLength) {
1417         struct udf_part_map *map = &sbi->s_partmaps[i];
1418         gpm = (struct genericPartitionMap *)
1419                 &(lvd->partitionMaps[offset]);
1420         type = gpm->partitionMapType;
1421         if (type == 1) {
1422             struct genericPartitionMap1 *gpm1 =
1423                 (struct genericPartitionMap1 *)gpm;
1424             map->s_partition_type = UDF_TYPE1_MAP15;
1425             map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1426             map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1427             map->s_partition_func = NULL;
1428         } else if (type == 2) {
1429             struct udfPartitionMap2 *upm2 =
1430                         (struct udfPartitionMap2 *)gpm;
1431             if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1432                         strlen(UDF_ID_VIRTUAL))) {
1433                 u16 suf =
1434                     le16_to_cpu(((__le16 *)upm2->partIdent.
1435                             identSuffix)[0]);
1436                 if (suf < 0x0200) {
1437                     map->s_partition_type =
1438                             UDF_VIRTUAL_MAP15;
1439                     map->s_partition_func =
1440                             udf_get_pblock_virt15;
1441                 } else {
1442                     map->s_partition_type =
1443                             UDF_VIRTUAL_MAP20;
1444                     map->s_partition_func =
1445                             udf_get_pblock_virt20;
1446                 }
1447             } else if (!strncmp(upm2->partIdent.ident,
1448                         UDF_ID_SPARABLE,
1449                         strlen(UDF_ID_SPARABLE))) {
1450                 ret = udf_load_sparable_map(sb, map,
1451                     (struct sparablePartitionMap *)gpm);
1452                 if (ret < 0)
1453                     goto out_bh;
1454             } else if (!strncmp(upm2->partIdent.ident,
1455                         UDF_ID_METADATA,
1456                         strlen(UDF_ID_METADATA))) {
1457                 struct udf_meta_data *mdata =
1458                     &map->s_type_specific.s_metadata;
1459                 struct metadataPartitionMap *mdm =
1460                         (struct metadataPartitionMap *)
1461                         &(lvd->partitionMaps[offset]);
1462                 udf_debug("Parsing Logical vol part %d type %u  id=%s\n",
1463                       i, type, UDF_ID_METADATA);
1464 
1465                 map->s_partition_type = UDF_METADATA_MAP25;
1466                 map->s_partition_func = udf_get_pblock_meta25;
1467 
1468                 mdata->s_meta_file_loc   =
1469                     le32_to_cpu(mdm->metadataFileLoc);
1470                 mdata->s_mirror_file_loc =
1471                     le32_to_cpu(mdm->metadataMirrorFileLoc);
1472                 mdata->s_bitmap_file_loc =
1473                     le32_to_cpu(mdm->metadataBitmapFileLoc);
1474                 mdata->s_alloc_unit_size =
1475                     le32_to_cpu(mdm->allocUnitSize);
1476                 mdata->s_align_unit_size =
1477                     le16_to_cpu(mdm->alignUnitSize);
1478                 if (mdm->flags & 0x01)
1479                     mdata->s_flags |= MF_DUPLICATE_MD;
1480 
1481                 udf_debug("Metadata Ident suffix=0x%x\n",
1482                       le16_to_cpu(*(__le16 *)
1483                               mdm->partIdent.identSuffix));
1484                 udf_debug("Metadata part num=%u\n",
1485                       le16_to_cpu(mdm->partitionNum));
1486                 udf_debug("Metadata part alloc unit size=%u\n",
1487                       le32_to_cpu(mdm->allocUnitSize));
1488                 udf_debug("Metadata file loc=%u\n",
1489                       le32_to_cpu(mdm->metadataFileLoc));
1490                 udf_debug("Mirror file loc=%u\n",
1491                       le32_to_cpu(mdm->metadataMirrorFileLoc));
1492                 udf_debug("Bitmap file loc=%u\n",
1493                       le32_to_cpu(mdm->metadataBitmapFileLoc));
1494                 udf_debug("Flags: %d %u\n",
1495                       mdata->s_flags, mdm->flags);
1496             } else {
1497                 udf_debug("Unknown ident: %s\n",
1498                       upm2->partIdent.ident);
1499                 continue;
1500             }
1501             map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1502             map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1503         }
1504         udf_debug("Partition (%d:%u) type %u on volume %u\n",
1505               i, map->s_partition_num, type, map->s_volumeseqnum);
1506     }
1507 
1508     if (fileset) {
1509         struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1510 
1511         *fileset = lelb_to_cpu(la->extLocation);
1512         udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n",
1513               fileset->logicalBlockNum,
1514               fileset->partitionReferenceNum);
1515     }
1516     if (lvd->integritySeqExt.extLength)
1517         udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1518     ret = 0;
1519 
1520     if (!sbi->s_lvid_bh) {
1521         /* We can't generate unique IDs without a valid LVID */
1522         if (sb_rdonly(sb)) {
1523             UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1524         } else {
1525             udf_warn(sb, "Damaged or missing LVID, forcing "
1526                      "readonly mount\n");
1527             ret = -EACCES;
1528         }
1529     }
1530 out_bh:
1531     brelse(bh);
1532     return ret;
1533 }
1534 
1535 /*
1536  * Find the prevailing Logical Volume Integrity Descriptor.
1537  */
1538 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1539 {
1540     struct buffer_head *bh, *final_bh;
1541     uint16_t ident;
1542     struct udf_sb_info *sbi = UDF_SB(sb);
1543     struct logicalVolIntegrityDesc *lvid;
1544     int indirections = 0;
1545     u32 parts, impuselen;
1546 
1547     while (++indirections <= UDF_MAX_LVID_NESTING) {
1548         final_bh = NULL;
1549         while (loc.extLength > 0 &&
1550             (bh = udf_read_tagged(sb, loc.extLocation,
1551                     loc.extLocation, &ident))) {
1552             if (ident != TAG_IDENT_LVID) {
1553                 brelse(bh);
1554                 break;
1555             }
1556 
1557             brelse(final_bh);
1558             final_bh = bh;
1559 
1560             loc.extLength -= sb->s_blocksize;
1561             loc.extLocation++;
1562         }
1563 
1564         if (!final_bh)
1565             return;
1566 
1567         brelse(sbi->s_lvid_bh);
1568         sbi->s_lvid_bh = final_bh;
1569 
1570         lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
1571         if (lvid->nextIntegrityExt.extLength == 0)
1572             goto check;
1573 
1574         loc = leea_to_cpu(lvid->nextIntegrityExt);
1575     }
1576 
1577     udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
1578         UDF_MAX_LVID_NESTING);
1579 out_err:
1580     brelse(sbi->s_lvid_bh);
1581     sbi->s_lvid_bh = NULL;
1582     return;
1583 check:
1584     parts = le32_to_cpu(lvid->numOfPartitions);
1585     impuselen = le32_to_cpu(lvid->lengthOfImpUse);
1586     if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
1587         sizeof(struct logicalVolIntegrityDesc) + impuselen +
1588         2 * parts * sizeof(u32) > sb->s_blocksize) {
1589         udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
1590              "ignoring.\n", parts, impuselen);
1591         goto out_err;
1592     }
1593 }
1594 
1595 /*
1596  * Step for reallocation of table of partition descriptor sequence numbers.
1597  * Must be power of 2.
1598  */
1599 #define PART_DESC_ALLOC_STEP 32
1600 
1601 struct part_desc_seq_scan_data {
1602     struct udf_vds_record rec;
1603     u32 partnum;
1604 };
1605 
1606 struct desc_seq_scan_data {
1607     struct udf_vds_record vds[VDS_POS_LENGTH];
1608     unsigned int size_part_descs;
1609     unsigned int num_part_descs;
1610     struct part_desc_seq_scan_data *part_descs_loc;
1611 };
1612 
1613 static struct udf_vds_record *handle_partition_descriptor(
1614                 struct buffer_head *bh,
1615                 struct desc_seq_scan_data *data)
1616 {
1617     struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
1618     int partnum;
1619     int i;
1620 
1621     partnum = le16_to_cpu(desc->partitionNumber);
1622     for (i = 0; i < data->num_part_descs; i++)
1623         if (partnum == data->part_descs_loc[i].partnum)
1624             return &(data->part_descs_loc[i].rec);
1625     if (data->num_part_descs >= data->size_part_descs) {
1626         struct part_desc_seq_scan_data *new_loc;
1627         unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
1628 
1629         new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
1630         if (!new_loc)
1631             return ERR_PTR(-ENOMEM);
1632         memcpy(new_loc, data->part_descs_loc,
1633                data->size_part_descs * sizeof(*new_loc));
1634         kfree(data->part_descs_loc);
1635         data->part_descs_loc = new_loc;
1636         data->size_part_descs = new_size;
1637     }
1638     return &(data->part_descs_loc[data->num_part_descs++].rec);
1639 }
1640 
1641 
1642 static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident,
1643         struct buffer_head *bh, struct desc_seq_scan_data *data)
1644 {
1645     switch (ident) {
1646     case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1647         return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]);
1648     case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1649         return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]);
1650     case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1651         return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]);
1652     case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1653         return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]);
1654     case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1655         return handle_partition_descriptor(bh, data);
1656     }
1657     return NULL;
1658 }
1659 
1660 /*
1661  * Process a main/reserve volume descriptor sequence.
1662  *   @block     First block of first extent of the sequence.
1663  *   @lastblock     Lastblock of first extent of the sequence.
1664  *   @fileset       There we store extent containing root fileset
1665  *
1666  * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1667  * sequence
1668  */
1669 static noinline int udf_process_sequence(
1670         struct super_block *sb,
1671         sector_t block, sector_t lastblock,
1672         struct kernel_lb_addr *fileset)
1673 {
1674     struct buffer_head *bh = NULL;
1675     struct udf_vds_record *curr;
1676     struct generic_desc *gd;
1677     struct volDescPtr *vdp;
1678     bool done = false;
1679     uint32_t vdsn;
1680     uint16_t ident;
1681     int ret;
1682     unsigned int indirections = 0;
1683     struct desc_seq_scan_data data;
1684     unsigned int i;
1685 
1686     memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1687     data.size_part_descs = PART_DESC_ALLOC_STEP;
1688     data.num_part_descs = 0;
1689     data.part_descs_loc = kcalloc(data.size_part_descs,
1690                       sizeof(*data.part_descs_loc),
1691                       GFP_KERNEL);
1692     if (!data.part_descs_loc)
1693         return -ENOMEM;
1694 
1695     /*
1696      * Read the main descriptor sequence and find which descriptors
1697      * are in it.
1698      */
1699     for (; (!done && block <= lastblock); block++) {
1700         bh = udf_read_tagged(sb, block, block, &ident);
1701         if (!bh)
1702             break;
1703 
1704         /* Process each descriptor (ISO 13346 3/8.3-8.4) */
1705         gd = (struct generic_desc *)bh->b_data;
1706         vdsn = le32_to_cpu(gd->volDescSeqNum);
1707         switch (ident) {
1708         case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1709             if (++indirections > UDF_MAX_TD_NESTING) {
1710                 udf_err(sb, "too many Volume Descriptor "
1711                     "Pointers (max %u supported)\n",
1712                     UDF_MAX_TD_NESTING);
1713                 brelse(bh);
1714                 ret = -EIO;
1715                 goto out;
1716             }
1717 
1718             vdp = (struct volDescPtr *)bh->b_data;
1719             block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
1720             lastblock = le32_to_cpu(
1721                 vdp->nextVolDescSeqExt.extLength) >>
1722                 sb->s_blocksize_bits;
1723             lastblock += block - 1;
1724             /* For loop is going to increment 'block' again */
1725             block--;
1726             break;
1727         case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1728         case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1729         case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1730         case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1731         case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1732             curr = get_volume_descriptor_record(ident, bh, &data);
1733             if (IS_ERR(curr)) {
1734                 brelse(bh);
1735                 ret = PTR_ERR(curr);
1736                 goto out;
1737             }
1738             /* Descriptor we don't care about? */
1739             if (!curr)
1740                 break;
1741             if (vdsn >= curr->volDescSeqNum) {
1742                 curr->volDescSeqNum = vdsn;
1743                 curr->block = block;
1744             }
1745             break;
1746         case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1747             done = true;
1748             break;
1749         }
1750         brelse(bh);
1751     }
1752     /*
1753      * Now read interesting descriptors again and process them
1754      * in a suitable order
1755      */
1756     if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1757         udf_err(sb, "Primary Volume Descriptor not found!\n");
1758         ret = -EAGAIN;
1759         goto out;
1760     }
1761     ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
1762     if (ret < 0)
1763         goto out;
1764 
1765     if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1766         ret = udf_load_logicalvol(sb,
1767                 data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
1768                 fileset);
1769         if (ret < 0)
1770             goto out;
1771     }
1772 
1773     /* Now handle prevailing Partition Descriptors */
1774     for (i = 0; i < data.num_part_descs; i++) {
1775         ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
1776         if (ret < 0)
1777             goto out;
1778     }
1779     ret = 0;
1780 out:
1781     kfree(data.part_descs_loc);
1782     return ret;
1783 }
1784 
1785 /*
1786  * Load Volume Descriptor Sequence described by anchor in bh
1787  *
1788  * Returns <0 on error, 0 on success
1789  */
1790 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1791                  struct kernel_lb_addr *fileset)
1792 {
1793     struct anchorVolDescPtr *anchor;
1794     sector_t main_s, main_e, reserve_s, reserve_e;
1795     int ret;
1796 
1797     anchor = (struct anchorVolDescPtr *)bh->b_data;
1798 
1799     /* Locate the main sequence */
1800     main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1801     main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1802     main_e = main_e >> sb->s_blocksize_bits;
1803     main_e += main_s - 1;
1804 
1805     /* Locate the reserve sequence */
1806     reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1807     reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1808     reserve_e = reserve_e >> sb->s_blocksize_bits;
1809     reserve_e += reserve_s - 1;
1810 
1811     /* Process the main & reserve sequences */
1812     /* responsible for finding the PartitionDesc(s) */
1813     ret = udf_process_sequence(sb, main_s, main_e, fileset);
1814     if (ret != -EAGAIN)
1815         return ret;
1816     udf_sb_free_partitions(sb);
1817     ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1818     if (ret < 0) {
1819         udf_sb_free_partitions(sb);
1820         /* No sequence was OK, return -EIO */
1821         if (ret == -EAGAIN)
1822             ret = -EIO;
1823     }
1824     return ret;
1825 }
1826 
1827 /*
1828  * Check whether there is an anchor block in the given block and
1829  * load Volume Descriptor Sequence if so.
1830  *
1831  * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1832  * block
1833  */
1834 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1835                   struct kernel_lb_addr *fileset)
1836 {
1837     struct buffer_head *bh;
1838     uint16_t ident;
1839     int ret;
1840 
1841     if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
1842         udf_fixed_to_variable(block) >= sb_bdev_nr_blocks(sb))
1843         return -EAGAIN;
1844 
1845     bh = udf_read_tagged(sb, block, block, &ident);
1846     if (!bh)
1847         return -EAGAIN;
1848     if (ident != TAG_IDENT_AVDP) {
1849         brelse(bh);
1850         return -EAGAIN;
1851     }
1852     ret = udf_load_sequence(sb, bh, fileset);
1853     brelse(bh);
1854     return ret;
1855 }
1856 
1857 /*
1858  * Search for an anchor volume descriptor pointer.
1859  *
1860  * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1861  * of anchors.
1862  */
1863 static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
1864                 struct kernel_lb_addr *fileset)
1865 {
1866     sector_t last[6];
1867     int i;
1868     struct udf_sb_info *sbi = UDF_SB(sb);
1869     int last_count = 0;
1870     int ret;
1871 
1872     /* First try user provided anchor */
1873     if (sbi->s_anchor) {
1874         ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1875         if (ret != -EAGAIN)
1876             return ret;
1877     }
1878     /*
1879      * according to spec, anchor is in either:
1880      *     block 256
1881      *     lastblock-256
1882      *     lastblock
1883      *  however, if the disc isn't closed, it could be 512.
1884      */
1885     ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1886     if (ret != -EAGAIN)
1887         return ret;
1888     /*
1889      * The trouble is which block is the last one. Drives often misreport
1890      * this so we try various possibilities.
1891      */
1892     last[last_count++] = *lastblock;
1893     if (*lastblock >= 1)
1894         last[last_count++] = *lastblock - 1;
1895     last[last_count++] = *lastblock + 1;
1896     if (*lastblock >= 2)
1897         last[last_count++] = *lastblock - 2;
1898     if (*lastblock >= 150)
1899         last[last_count++] = *lastblock - 150;
1900     if (*lastblock >= 152)
1901         last[last_count++] = *lastblock - 152;
1902 
1903     for (i = 0; i < last_count; i++) {
1904         if (last[i] >= sb_bdev_nr_blocks(sb))
1905             continue;
1906         ret = udf_check_anchor_block(sb, last[i], fileset);
1907         if (ret != -EAGAIN) {
1908             if (!ret)
1909                 *lastblock = last[i];
1910             return ret;
1911         }
1912         if (last[i] < 256)
1913             continue;
1914         ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1915         if (ret != -EAGAIN) {
1916             if (!ret)
1917                 *lastblock = last[i];
1918             return ret;
1919         }
1920     }
1921 
1922     /* Finally try block 512 in case media is open */
1923     return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1924 }
1925 
1926 /*
1927  * Find an anchor volume descriptor and load Volume Descriptor Sequence from
1928  * area specified by it. The function expects sbi->s_lastblock to be the last
1929  * block on the media.
1930  *
1931  * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
1932  * was not found.
1933  */
1934 static int udf_find_anchor(struct super_block *sb,
1935                struct kernel_lb_addr *fileset)
1936 {
1937     struct udf_sb_info *sbi = UDF_SB(sb);
1938     sector_t lastblock = sbi->s_last_block;
1939     int ret;
1940 
1941     ret = udf_scan_anchors(sb, &lastblock, fileset);
1942     if (ret != -EAGAIN)
1943         goto out;
1944 
1945     /* No anchor found? Try VARCONV conversion of block numbers */
1946     UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
1947     lastblock = udf_variable_to_fixed(sbi->s_last_block);
1948     /* Firstly, we try to not convert number of the last block */
1949     ret = udf_scan_anchors(sb, &lastblock, fileset);
1950     if (ret != -EAGAIN)
1951         goto out;
1952 
1953     lastblock = sbi->s_last_block;
1954     /* Secondly, we try with converted number of the last block */
1955     ret = udf_scan_anchors(sb, &lastblock, fileset);
1956     if (ret < 0) {
1957         /* VARCONV didn't help. Clear it. */
1958         UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
1959     }
1960 out:
1961     if (ret == 0)
1962         sbi->s_last_block = lastblock;
1963     return ret;
1964 }
1965 
1966 /*
1967  * Check Volume Structure Descriptor, find Anchor block and load Volume
1968  * Descriptor Sequence.
1969  *
1970  * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
1971  * block was not found.
1972  */
1973 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1974             int silent, struct kernel_lb_addr *fileset)
1975 {
1976     struct udf_sb_info *sbi = UDF_SB(sb);
1977     int nsr = 0;
1978     int ret;
1979 
1980     if (!sb_set_blocksize(sb, uopt->blocksize)) {
1981         if (!silent)
1982             udf_warn(sb, "Bad block size\n");
1983         return -EINVAL;
1984     }
1985     sbi->s_last_block = uopt->lastblock;
1986     if (!uopt->novrs) {
1987         /* Check that it is NSR02 compliant */
1988         nsr = udf_check_vsd(sb);
1989         if (!nsr) {
1990             if (!silent)
1991                 udf_warn(sb, "No VRS found\n");
1992             return -EINVAL;
1993         }
1994         if (nsr == -1)
1995             udf_debug("Failed to read sector at offset %d. "
1996                   "Assuming open disc. Skipping validity "
1997                   "check\n", VSD_FIRST_SECTOR_OFFSET);
1998         if (!sbi->s_last_block)
1999             sbi->s_last_block = udf_get_last_block(sb);
2000     } else {
2001         udf_debug("Validity check skipped because of novrs option\n");
2002     }
2003 
2004     /* Look for anchor block and load Volume Descriptor Sequence */
2005     sbi->s_anchor = uopt->anchor;
2006     ret = udf_find_anchor(sb, fileset);
2007     if (ret < 0) {
2008         if (!silent && ret == -EAGAIN)
2009             udf_warn(sb, "No anchor found\n");
2010         return ret;
2011     }
2012     return 0;
2013 }
2014 
2015 static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid)
2016 {
2017     struct timespec64 ts;
2018 
2019     ktime_get_real_ts64(&ts);
2020     udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
2021     lvid->descTag.descCRC = cpu_to_le16(
2022         crc_itu_t(0, (char *)lvid + sizeof(struct tag),
2023             le16_to_cpu(lvid->descTag.descCRCLength)));
2024     lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
2025 }
2026 
2027 static void udf_open_lvid(struct super_block *sb)
2028 {
2029     struct udf_sb_info *sbi = UDF_SB(sb);
2030     struct buffer_head *bh = sbi->s_lvid_bh;
2031     struct logicalVolIntegrityDesc *lvid;
2032     struct logicalVolIntegrityDescImpUse *lvidiu;
2033 
2034     if (!bh)
2035         return;
2036     lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2037     lvidiu = udf_sb_lvidiu(sb);
2038     if (!lvidiu)
2039         return;
2040 
2041     mutex_lock(&sbi->s_alloc_mutex);
2042     lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2043     lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2044     if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
2045         lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
2046     else
2047         UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
2048 
2049     udf_finalize_lvid(lvid);
2050     mark_buffer_dirty(bh);
2051     sbi->s_lvid_dirty = 0;
2052     mutex_unlock(&sbi->s_alloc_mutex);
2053     /* Make opening of filesystem visible on the media immediately */
2054     sync_dirty_buffer(bh);
2055 }
2056 
2057 static void udf_close_lvid(struct super_block *sb)
2058 {
2059     struct udf_sb_info *sbi = UDF_SB(sb);
2060     struct buffer_head *bh = sbi->s_lvid_bh;
2061     struct logicalVolIntegrityDesc *lvid;
2062     struct logicalVolIntegrityDescImpUse *lvidiu;
2063 
2064     if (!bh)
2065         return;
2066     lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2067     lvidiu = udf_sb_lvidiu(sb);
2068     if (!lvidiu)
2069         return;
2070 
2071     mutex_lock(&sbi->s_alloc_mutex);
2072     lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2073     lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2074     if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
2075         lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
2076     if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
2077         lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
2078     if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
2079         lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
2080     if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
2081         lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
2082 
2083     /*
2084      * We set buffer uptodate unconditionally here to avoid spurious
2085      * warnings from mark_buffer_dirty() when previous EIO has marked
2086      * the buffer as !uptodate
2087      */
2088     set_buffer_uptodate(bh);
2089     udf_finalize_lvid(lvid);
2090     mark_buffer_dirty(bh);
2091     sbi->s_lvid_dirty = 0;
2092     mutex_unlock(&sbi->s_alloc_mutex);
2093     /* Make closing of filesystem visible on the media immediately */
2094     sync_dirty_buffer(bh);
2095 }
2096 
2097 u64 lvid_get_unique_id(struct super_block *sb)
2098 {
2099     struct buffer_head *bh;
2100     struct udf_sb_info *sbi = UDF_SB(sb);
2101     struct logicalVolIntegrityDesc *lvid;
2102     struct logicalVolHeaderDesc *lvhd;
2103     u64 uniqueID;
2104     u64 ret;
2105 
2106     bh = sbi->s_lvid_bh;
2107     if (!bh)
2108         return 0;
2109 
2110     lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2111     lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
2112 
2113     mutex_lock(&sbi->s_alloc_mutex);
2114     ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
2115     if (!(++uniqueID & 0xFFFFFFFF))
2116         uniqueID += 16;
2117     lvhd->uniqueID = cpu_to_le64(uniqueID);
2118     udf_updated_lvid(sb);
2119     mutex_unlock(&sbi->s_alloc_mutex);
2120 
2121     return ret;
2122 }
2123 
2124 static int udf_fill_super(struct super_block *sb, void *options, int silent)
2125 {
2126     int ret = -EINVAL;
2127     struct inode *inode = NULL;
2128     struct udf_options uopt;
2129     struct kernel_lb_addr rootdir, fileset;
2130     struct udf_sb_info *sbi;
2131     bool lvid_open = false;
2132 
2133     uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
2134     /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
2135     uopt.uid = make_kuid(current_user_ns(), overflowuid);
2136     uopt.gid = make_kgid(current_user_ns(), overflowgid);
2137     uopt.umask = 0;
2138     uopt.fmode = UDF_INVALID_MODE;
2139     uopt.dmode = UDF_INVALID_MODE;
2140     uopt.nls_map = NULL;
2141 
2142     sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2143     if (!sbi)
2144         return -ENOMEM;
2145 
2146     sb->s_fs_info = sbi;
2147 
2148     mutex_init(&sbi->s_alloc_mutex);
2149 
2150     if (!udf_parse_options((char *)options, &uopt, false))
2151         goto parse_options_failure;
2152 
2153     fileset.logicalBlockNum = 0xFFFFFFFF;
2154     fileset.partitionReferenceNum = 0xFFFF;
2155 
2156     sbi->s_flags = uopt.flags;
2157     sbi->s_uid = uopt.uid;
2158     sbi->s_gid = uopt.gid;
2159     sbi->s_umask = uopt.umask;
2160     sbi->s_fmode = uopt.fmode;
2161     sbi->s_dmode = uopt.dmode;
2162     sbi->s_nls_map = uopt.nls_map;
2163     rwlock_init(&sbi->s_cred_lock);
2164 
2165     if (uopt.session == 0xFFFFFFFF)
2166         sbi->s_session = udf_get_last_session(sb);
2167     else
2168         sbi->s_session = uopt.session;
2169 
2170     udf_debug("Multi-session=%d\n", sbi->s_session);
2171 
2172     /* Fill in the rest of the superblock */
2173     sb->s_op = &udf_sb_ops;
2174     sb->s_export_op = &udf_export_ops;
2175 
2176     sb->s_magic = UDF_SUPER_MAGIC;
2177     sb->s_time_gran = 1000;
2178 
2179     if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2180         ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2181     } else {
2182         uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
2183         while (uopt.blocksize <= 4096) {
2184             ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2185             if (ret < 0) {
2186                 if (!silent && ret != -EACCES) {
2187                     pr_notice("Scanning with blocksize %u failed\n",
2188                           uopt.blocksize);
2189                 }
2190                 brelse(sbi->s_lvid_bh);
2191                 sbi->s_lvid_bh = NULL;
2192                 /*
2193                  * EACCES is special - we want to propagate to
2194                  * upper layers that we cannot handle RW mount.
2195                  */
2196                 if (ret == -EACCES)
2197                     break;
2198             } else
2199                 break;
2200 
2201             uopt.blocksize <<= 1;
2202         }
2203     }
2204     if (ret < 0) {
2205         if (ret == -EAGAIN) {
2206             udf_warn(sb, "No partition found (1)\n");
2207             ret = -EINVAL;
2208         }
2209         goto error_out;
2210     }
2211 
2212     udf_debug("Lastblock=%u\n", sbi->s_last_block);
2213 
2214     if (sbi->s_lvid_bh) {
2215         struct logicalVolIntegrityDescImpUse *lvidiu =
2216                             udf_sb_lvidiu(sb);
2217         uint16_t minUDFReadRev;
2218         uint16_t minUDFWriteRev;
2219 
2220         if (!lvidiu) {
2221             ret = -EINVAL;
2222             goto error_out;
2223         }
2224         minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2225         minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2226         if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2227             udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2228                 minUDFReadRev,
2229                 UDF_MAX_READ_VERSION);
2230             ret = -EINVAL;
2231             goto error_out;
2232         } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
2233             if (!sb_rdonly(sb)) {
2234                 ret = -EACCES;
2235                 goto error_out;
2236             }
2237             UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2238         }
2239 
2240         sbi->s_udfrev = minUDFWriteRev;
2241 
2242         if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2243             UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2244         if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2245             UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2246     }
2247 
2248     if (!sbi->s_partitions) {
2249         udf_warn(sb, "No partition found (2)\n");
2250         ret = -EINVAL;
2251         goto error_out;
2252     }
2253 
2254     if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2255             UDF_PART_FLAG_READ_ONLY) {
2256         if (!sb_rdonly(sb)) {
2257             ret = -EACCES;
2258             goto error_out;
2259         }
2260         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2261     }
2262 
2263     ret = udf_find_fileset(sb, &fileset, &rootdir);
2264     if (ret < 0) {
2265         udf_warn(sb, "No fileset found\n");
2266         goto error_out;
2267     }
2268 
2269     if (!silent) {
2270         struct timestamp ts;
2271         udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2272         udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2273              sbi->s_volume_ident,
2274              le16_to_cpu(ts.year), ts.month, ts.day,
2275              ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2276     }
2277     if (!sb_rdonly(sb)) {
2278         udf_open_lvid(sb);
2279         lvid_open = true;
2280     }
2281 
2282     /* Assign the root inode */
2283     /* assign inodes by physical block number */
2284     /* perhaps it's not extensible enough, but for now ... */
2285     inode = udf_iget(sb, &rootdir);
2286     if (IS_ERR(inode)) {
2287         udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n",
2288                rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2289         ret = PTR_ERR(inode);
2290         goto error_out;
2291     }
2292 
2293     /* Allocate a dentry for the root inode */
2294     sb->s_root = d_make_root(inode);
2295     if (!sb->s_root) {
2296         udf_err(sb, "Couldn't allocate root dentry\n");
2297         ret = -ENOMEM;
2298         goto error_out;
2299     }
2300     sb->s_maxbytes = MAX_LFS_FILESIZE;
2301     sb->s_max_links = UDF_MAX_LINKS;
2302     return 0;
2303 
2304 error_out:
2305     iput(sbi->s_vat_inode);
2306 parse_options_failure:
2307     unload_nls(uopt.nls_map);
2308     if (lvid_open)
2309         udf_close_lvid(sb);
2310     brelse(sbi->s_lvid_bh);
2311     udf_sb_free_partitions(sb);
2312     kfree(sbi);
2313     sb->s_fs_info = NULL;
2314 
2315     return ret;
2316 }
2317 
2318 void _udf_err(struct super_block *sb, const char *function,
2319           const char *fmt, ...)
2320 {
2321     struct va_format vaf;
2322     va_list args;
2323 
2324     va_start(args, fmt);
2325 
2326     vaf.fmt = fmt;
2327     vaf.va = &args;
2328 
2329     pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2330 
2331     va_end(args);
2332 }
2333 
2334 void _udf_warn(struct super_block *sb, const char *function,
2335            const char *fmt, ...)
2336 {
2337     struct va_format vaf;
2338     va_list args;
2339 
2340     va_start(args, fmt);
2341 
2342     vaf.fmt = fmt;
2343     vaf.va = &args;
2344 
2345     pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2346 
2347     va_end(args);
2348 }
2349 
2350 static void udf_put_super(struct super_block *sb)
2351 {
2352     struct udf_sb_info *sbi;
2353 
2354     sbi = UDF_SB(sb);
2355 
2356     iput(sbi->s_vat_inode);
2357     unload_nls(sbi->s_nls_map);
2358     if (!sb_rdonly(sb))
2359         udf_close_lvid(sb);
2360     brelse(sbi->s_lvid_bh);
2361     udf_sb_free_partitions(sb);
2362     mutex_destroy(&sbi->s_alloc_mutex);
2363     kfree(sb->s_fs_info);
2364     sb->s_fs_info = NULL;
2365 }
2366 
2367 static int udf_sync_fs(struct super_block *sb, int wait)
2368 {
2369     struct udf_sb_info *sbi = UDF_SB(sb);
2370 
2371     mutex_lock(&sbi->s_alloc_mutex);
2372     if (sbi->s_lvid_dirty) {
2373         struct buffer_head *bh = sbi->s_lvid_bh;
2374         struct logicalVolIntegrityDesc *lvid;
2375 
2376         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2377         udf_finalize_lvid(lvid);
2378 
2379         /*
2380          * Blockdevice will be synced later so we don't have to submit
2381          * the buffer for IO
2382          */
2383         mark_buffer_dirty(bh);
2384         sbi->s_lvid_dirty = 0;
2385     }
2386     mutex_unlock(&sbi->s_alloc_mutex);
2387 
2388     return 0;
2389 }
2390 
2391 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2392 {
2393     struct super_block *sb = dentry->d_sb;
2394     struct udf_sb_info *sbi = UDF_SB(sb);
2395     struct logicalVolIntegrityDescImpUse *lvidiu;
2396     u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2397 
2398     lvidiu = udf_sb_lvidiu(sb);
2399     buf->f_type = UDF_SUPER_MAGIC;
2400     buf->f_bsize = sb->s_blocksize;
2401     buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2402     buf->f_bfree = udf_count_free(sb);
2403     buf->f_bavail = buf->f_bfree;
2404     /*
2405      * Let's pretend each free block is also a free 'inode' since UDF does
2406      * not have separate preallocated table of inodes.
2407      */
2408     buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2409                       le32_to_cpu(lvidiu->numDirs)) : 0)
2410             + buf->f_bfree;
2411     buf->f_ffree = buf->f_bfree;
2412     buf->f_namelen = UDF_NAME_LEN;
2413     buf->f_fsid = u64_to_fsid(id);
2414 
2415     return 0;
2416 }
2417 
2418 static unsigned int udf_count_free_bitmap(struct super_block *sb,
2419                       struct udf_bitmap *bitmap)
2420 {
2421     struct buffer_head *bh = NULL;
2422     unsigned int accum = 0;
2423     int index;
2424     udf_pblk_t block = 0, newblock;
2425     struct kernel_lb_addr loc;
2426     uint32_t bytes;
2427     uint8_t *ptr;
2428     uint16_t ident;
2429     struct spaceBitmapDesc *bm;
2430 
2431     loc.logicalBlockNum = bitmap->s_extPosition;
2432     loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2433     bh = udf_read_ptagged(sb, &loc, 0, &ident);
2434 
2435     if (!bh) {
2436         udf_err(sb, "udf_count_free failed\n");
2437         goto out;
2438     } else if (ident != TAG_IDENT_SBD) {
2439         brelse(bh);
2440         udf_err(sb, "udf_count_free failed\n");
2441         goto out;
2442     }
2443 
2444     bm = (struct spaceBitmapDesc *)bh->b_data;
2445     bytes = le32_to_cpu(bm->numOfBytes);
2446     index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2447     ptr = (uint8_t *)bh->b_data;
2448 
2449     while (bytes > 0) {
2450         u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2451         accum += bitmap_weight((const unsigned long *)(ptr + index),
2452                     cur_bytes * 8);
2453         bytes -= cur_bytes;
2454         if (bytes) {
2455             brelse(bh);
2456             newblock = udf_get_lb_pblock(sb, &loc, ++block);
2457             bh = udf_tread(sb, newblock);
2458             if (!bh) {
2459                 udf_debug("read failed\n");
2460                 goto out;
2461             }
2462             index = 0;
2463             ptr = (uint8_t *)bh->b_data;
2464         }
2465     }
2466     brelse(bh);
2467 out:
2468     return accum;
2469 }
2470 
2471 static unsigned int udf_count_free_table(struct super_block *sb,
2472                      struct inode *table)
2473 {
2474     unsigned int accum = 0;
2475     uint32_t elen;
2476     struct kernel_lb_addr eloc;
2477     struct extent_position epos;
2478 
2479     mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2480     epos.block = UDF_I(table)->i_location;
2481     epos.offset = sizeof(struct unallocSpaceEntry);
2482     epos.bh = NULL;
2483 
2484     while (udf_next_aext(table, &epos, &eloc, &elen, 1) != -1)
2485         accum += (elen >> table->i_sb->s_blocksize_bits);
2486 
2487     brelse(epos.bh);
2488     mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2489 
2490     return accum;
2491 }
2492 
2493 static unsigned int udf_count_free(struct super_block *sb)
2494 {
2495     unsigned int accum = 0;
2496     struct udf_sb_info *sbi = UDF_SB(sb);
2497     struct udf_part_map *map;
2498     unsigned int part = sbi->s_partition;
2499     int ptype = sbi->s_partmaps[part].s_partition_type;
2500 
2501     if (ptype == UDF_METADATA_MAP25) {
2502         part = sbi->s_partmaps[part].s_type_specific.s_metadata.
2503                             s_phys_partition_ref;
2504     } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
2505         /*
2506          * Filesystems with VAT are append-only and we cannot write to
2507          * them. Let's just report 0 here.
2508          */
2509         return 0;
2510     }
2511 
2512     if (sbi->s_lvid_bh) {
2513         struct logicalVolIntegrityDesc *lvid =
2514             (struct logicalVolIntegrityDesc *)
2515             sbi->s_lvid_bh->b_data;
2516         if (le32_to_cpu(lvid->numOfPartitions) > part) {
2517             accum = le32_to_cpu(
2518                     lvid->freeSpaceTable[part]);
2519             if (accum == 0xFFFFFFFF)
2520                 accum = 0;
2521         }
2522     }
2523 
2524     if (accum)
2525         return accum;
2526 
2527     map = &sbi->s_partmaps[part];
2528     if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2529         accum += udf_count_free_bitmap(sb,
2530                            map->s_uspace.s_bitmap);
2531     }
2532     if (accum)
2533         return accum;
2534 
2535     if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2536         accum += udf_count_free_table(sb,
2537                           map->s_uspace.s_table);
2538     }
2539     return accum;
2540 }
2541 
2542 MODULE_AUTHOR("Ben Fennema");
2543 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
2544 MODULE_LICENSE("GPL");
2545 module_init(init_udf_fs)
2546 module_exit(exit_udf_fs)