Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  linux/fs/fat/cache.c
0004  *
0005  *  Written 1992,1993 by Werner Almesberger
0006  *
0007  *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
0008  *  of inode number.
0009  *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
0010  */
0011 
0012 #include <linux/slab.h>
0013 #include "fat.h"
0014 
0015 /* this must be > 0. */
0016 #define FAT_MAX_CACHE   8
0017 
0018 struct fat_cache {
0019     struct list_head cache_list;
0020     int nr_contig;  /* number of contiguous clusters */
0021     int fcluster;   /* cluster number in the file. */
0022     int dcluster;   /* cluster number on disk. */
0023 };
0024 
0025 struct fat_cache_id {
0026     unsigned int id;
0027     int nr_contig;
0028     int fcluster;
0029     int dcluster;
0030 };
0031 
0032 static inline int fat_max_cache(struct inode *inode)
0033 {
0034     return FAT_MAX_CACHE;
0035 }
0036 
0037 static struct kmem_cache *fat_cache_cachep;
0038 
0039 static void init_once(void *foo)
0040 {
0041     struct fat_cache *cache = (struct fat_cache *)foo;
0042 
0043     INIT_LIST_HEAD(&cache->cache_list);
0044 }
0045 
0046 int __init fat_cache_init(void)
0047 {
0048     fat_cache_cachep = kmem_cache_create("fat_cache",
0049                 sizeof(struct fat_cache),
0050                 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
0051                 init_once);
0052     if (fat_cache_cachep == NULL)
0053         return -ENOMEM;
0054     return 0;
0055 }
0056 
0057 void fat_cache_destroy(void)
0058 {
0059     kmem_cache_destroy(fat_cache_cachep);
0060 }
0061 
0062 static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
0063 {
0064     return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
0065 }
0066 
0067 static inline void fat_cache_free(struct fat_cache *cache)
0068 {
0069     BUG_ON(!list_empty(&cache->cache_list));
0070     kmem_cache_free(fat_cache_cachep, cache);
0071 }
0072 
0073 static inline void fat_cache_update_lru(struct inode *inode,
0074                     struct fat_cache *cache)
0075 {
0076     if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
0077         list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
0078 }
0079 
0080 static int fat_cache_lookup(struct inode *inode, int fclus,
0081                 struct fat_cache_id *cid,
0082                 int *cached_fclus, int *cached_dclus)
0083 {
0084     static struct fat_cache nohit = { .fcluster = 0, };
0085 
0086     struct fat_cache *hit = &nohit, *p;
0087     int offset = -1;
0088 
0089     spin_lock(&MSDOS_I(inode)->cache_lru_lock);
0090     list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
0091         /* Find the cache of "fclus" or nearest cache. */
0092         if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
0093             hit = p;
0094             if ((hit->fcluster + hit->nr_contig) < fclus) {
0095                 offset = hit->nr_contig;
0096             } else {
0097                 offset = fclus - hit->fcluster;
0098                 break;
0099             }
0100         }
0101     }
0102     if (hit != &nohit) {
0103         fat_cache_update_lru(inode, hit);
0104 
0105         cid->id = MSDOS_I(inode)->cache_valid_id;
0106         cid->nr_contig = hit->nr_contig;
0107         cid->fcluster = hit->fcluster;
0108         cid->dcluster = hit->dcluster;
0109         *cached_fclus = cid->fcluster + offset;
0110         *cached_dclus = cid->dcluster + offset;
0111     }
0112     spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
0113 
0114     return offset;
0115 }
0116 
0117 static struct fat_cache *fat_cache_merge(struct inode *inode,
0118                      struct fat_cache_id *new)
0119 {
0120     struct fat_cache *p;
0121 
0122     list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
0123         /* Find the same part as "new" in cluster-chain. */
0124         if (p->fcluster == new->fcluster) {
0125             BUG_ON(p->dcluster != new->dcluster);
0126             if (new->nr_contig > p->nr_contig)
0127                 p->nr_contig = new->nr_contig;
0128             return p;
0129         }
0130     }
0131     return NULL;
0132 }
0133 
0134 static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
0135 {
0136     struct fat_cache *cache, *tmp;
0137 
0138     if (new->fcluster == -1) /* dummy cache */
0139         return;
0140 
0141     spin_lock(&MSDOS_I(inode)->cache_lru_lock);
0142     if (new->id != FAT_CACHE_VALID &&
0143         new->id != MSDOS_I(inode)->cache_valid_id)
0144         goto out;   /* this cache was invalidated */
0145 
0146     cache = fat_cache_merge(inode, new);
0147     if (cache == NULL) {
0148         if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
0149             MSDOS_I(inode)->nr_caches++;
0150             spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
0151 
0152             tmp = fat_cache_alloc(inode);
0153             if (!tmp) {
0154                 spin_lock(&MSDOS_I(inode)->cache_lru_lock);
0155                 MSDOS_I(inode)->nr_caches--;
0156                 spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
0157                 return;
0158             }
0159 
0160             spin_lock(&MSDOS_I(inode)->cache_lru_lock);
0161             cache = fat_cache_merge(inode, new);
0162             if (cache != NULL) {
0163                 MSDOS_I(inode)->nr_caches--;
0164                 fat_cache_free(tmp);
0165                 goto out_update_lru;
0166             }
0167             cache = tmp;
0168         } else {
0169             struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
0170             cache = list_entry(p, struct fat_cache, cache_list);
0171         }
0172         cache->fcluster = new->fcluster;
0173         cache->dcluster = new->dcluster;
0174         cache->nr_contig = new->nr_contig;
0175     }
0176 out_update_lru:
0177     fat_cache_update_lru(inode, cache);
0178 out:
0179     spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
0180 }
0181 
0182 /*
0183  * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
0184  * fixes itself after a while.
0185  */
0186 static void __fat_cache_inval_inode(struct inode *inode)
0187 {
0188     struct msdos_inode_info *i = MSDOS_I(inode);
0189     struct fat_cache *cache;
0190 
0191     while (!list_empty(&i->cache_lru)) {
0192         cache = list_entry(i->cache_lru.next,
0193                    struct fat_cache, cache_list);
0194         list_del_init(&cache->cache_list);
0195         i->nr_caches--;
0196         fat_cache_free(cache);
0197     }
0198     /* Update. The copy of caches before this id is discarded. */
0199     i->cache_valid_id++;
0200     if (i->cache_valid_id == FAT_CACHE_VALID)
0201         i->cache_valid_id++;
0202 }
0203 
0204 void fat_cache_inval_inode(struct inode *inode)
0205 {
0206     spin_lock(&MSDOS_I(inode)->cache_lru_lock);
0207     __fat_cache_inval_inode(inode);
0208     spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
0209 }
0210 
0211 static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
0212 {
0213     cid->nr_contig++;
0214     return ((cid->dcluster + cid->nr_contig) == dclus);
0215 }
0216 
0217 static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
0218 {
0219     cid->id = FAT_CACHE_VALID;
0220     cid->fcluster = fclus;
0221     cid->dcluster = dclus;
0222     cid->nr_contig = 0;
0223 }
0224 
0225 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
0226 {
0227     struct super_block *sb = inode->i_sb;
0228     struct msdos_sb_info *sbi = MSDOS_SB(sb);
0229     const int limit = sb->s_maxbytes >> sbi->cluster_bits;
0230     struct fat_entry fatent;
0231     struct fat_cache_id cid;
0232     int nr;
0233 
0234     BUG_ON(MSDOS_I(inode)->i_start == 0);
0235 
0236     *fclus = 0;
0237     *dclus = MSDOS_I(inode)->i_start;
0238     if (!fat_valid_entry(sbi, *dclus)) {
0239         fat_fs_error_ratelimit(sb,
0240             "%s: invalid start cluster (i_pos %lld, start %08x)",
0241             __func__, MSDOS_I(inode)->i_pos, *dclus);
0242         return -EIO;
0243     }
0244     if (cluster == 0)
0245         return 0;
0246 
0247     if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
0248         /*
0249          * dummy, always not contiguous
0250          * This is reinitialized by cache_init(), later.
0251          */
0252         cache_init(&cid, -1, -1);
0253     }
0254 
0255     fatent_init(&fatent);
0256     while (*fclus < cluster) {
0257         /* prevent the infinite loop of cluster chain */
0258         if (*fclus > limit) {
0259             fat_fs_error_ratelimit(sb,
0260                 "%s: detected the cluster chain loop (i_pos %lld)",
0261                 __func__, MSDOS_I(inode)->i_pos);
0262             nr = -EIO;
0263             goto out;
0264         }
0265 
0266         nr = fat_ent_read(inode, &fatent, *dclus);
0267         if (nr < 0)
0268             goto out;
0269         else if (nr == FAT_ENT_FREE) {
0270             fat_fs_error_ratelimit(sb,
0271                 "%s: invalid cluster chain (i_pos %lld)",
0272                 __func__, MSDOS_I(inode)->i_pos);
0273             nr = -EIO;
0274             goto out;
0275         } else if (nr == FAT_ENT_EOF) {
0276             fat_cache_add(inode, &cid);
0277             goto out;
0278         }
0279         (*fclus)++;
0280         *dclus = nr;
0281         if (!cache_contiguous(&cid, *dclus))
0282             cache_init(&cid, *fclus, *dclus);
0283     }
0284     nr = 0;
0285     fat_cache_add(inode, &cid);
0286 out:
0287     fatent_brelse(&fatent);
0288     return nr;
0289 }
0290 
0291 static int fat_bmap_cluster(struct inode *inode, int cluster)
0292 {
0293     struct super_block *sb = inode->i_sb;
0294     int ret, fclus, dclus;
0295 
0296     if (MSDOS_I(inode)->i_start == 0)
0297         return 0;
0298 
0299     ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
0300     if (ret < 0)
0301         return ret;
0302     else if (ret == FAT_ENT_EOF) {
0303         fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
0304                  __func__, MSDOS_I(inode)->i_pos);
0305         return -EIO;
0306     }
0307     return dclus;
0308 }
0309 
0310 int fat_get_mapped_cluster(struct inode *inode, sector_t sector,
0311                sector_t last_block,
0312                unsigned long *mapped_blocks, sector_t *bmap)
0313 {
0314     struct super_block *sb = inode->i_sb;
0315     struct msdos_sb_info *sbi = MSDOS_SB(sb);
0316     int cluster, offset;
0317 
0318     cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
0319     offset  = sector & (sbi->sec_per_clus - 1);
0320     cluster = fat_bmap_cluster(inode, cluster);
0321     if (cluster < 0)
0322         return cluster;
0323     else if (cluster) {
0324         *bmap = fat_clus_to_blknr(sbi, cluster) + offset;
0325         *mapped_blocks = sbi->sec_per_clus - offset;
0326         if (*mapped_blocks > last_block - sector)
0327             *mapped_blocks = last_block - sector;
0328     }
0329 
0330     return 0;
0331 }
0332 
0333 static int is_exceed_eof(struct inode *inode, sector_t sector,
0334              sector_t *last_block, int create)
0335 {
0336     struct super_block *sb = inode->i_sb;
0337     const unsigned long blocksize = sb->s_blocksize;
0338     const unsigned char blocksize_bits = sb->s_blocksize_bits;
0339 
0340     *last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
0341     if (sector >= *last_block) {
0342         if (!create)
0343             return 1;
0344 
0345         /*
0346          * ->mmu_private can access on only allocation path.
0347          * (caller must hold ->i_mutex)
0348          */
0349         *last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
0350             >> blocksize_bits;
0351         if (sector >= *last_block)
0352             return 1;
0353     }
0354 
0355     return 0;
0356 }
0357 
0358 int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
0359          unsigned long *mapped_blocks, int create, bool from_bmap)
0360 {
0361     struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
0362     sector_t last_block;
0363 
0364     *phys = 0;
0365     *mapped_blocks = 0;
0366     if (!is_fat32(sbi) && (inode->i_ino == MSDOS_ROOT_INO)) {
0367         if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
0368             *phys = sector + sbi->dir_start;
0369             *mapped_blocks = 1;
0370         }
0371         return 0;
0372     }
0373 
0374     if (!from_bmap) {
0375         if (is_exceed_eof(inode, sector, &last_block, create))
0376             return 0;
0377     } else {
0378         last_block = inode->i_blocks >>
0379                 (inode->i_sb->s_blocksize_bits - 9);
0380         if (sector >= last_block)
0381             return 0;
0382     }
0383 
0384     return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks,
0385                       phys);
0386 }