0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include "hpfs_fn.h"
0011
0012 __le32 *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
0013 {
0014 return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0);
0015 }
0016
0017 __le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
0018 struct quad_buffer_head *qbh, char *id)
0019 {
0020 secno sec;
0021 __le32 *ret;
0022 unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
0023 if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) {
0024 hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
0025 return NULL;
0026 }
0027 sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]);
0028 if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) {
0029 hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id);
0030 return NULL;
0031 }
0032 ret = hpfs_map_4sectors(s, sec, qbh, 4);
0033 if (ret) hpfs_prefetch_bitmap(s, bmp_block + 1);
0034 return ret;
0035 }
0036
0037 void hpfs_prefetch_bitmap(struct super_block *s, unsigned bmp_block)
0038 {
0039 unsigned to_prefetch, next_prefetch;
0040 unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
0041 if (unlikely(bmp_block >= n_bands))
0042 return;
0043 to_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]);
0044 if (unlikely(bmp_block + 1 >= n_bands))
0045 next_prefetch = 0;
0046 else
0047 next_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block + 1]);
0048 hpfs_prefetch_sectors(s, to_prefetch, 4 + 4 * (to_prefetch + 4 == next_prefetch));
0049 }
0050
0051
0052
0053
0054
0055
0056
0057 unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
0058 {
0059 struct buffer_head *bh;
0060 secno cpds;
0061 unsigned cpi;
0062 unsigned char *ptr;
0063 unsigned char *cp_table;
0064 int i;
0065 struct code_page_data *cpd;
0066 struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0);
0067 if (!cp) return NULL;
0068 if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) {
0069 pr_err("Code page directory magic doesn't match (magic = %08x)\n",
0070 le32_to_cpu(cp->magic));
0071 brelse(bh);
0072 return NULL;
0073 }
0074 if (!le32_to_cpu(cp->n_code_pages)) {
0075 pr_err("n_code_pages == 0\n");
0076 brelse(bh);
0077 return NULL;
0078 }
0079 cpds = le32_to_cpu(cp->array[0].code_page_data);
0080 cpi = le16_to_cpu(cp->array[0].index);
0081 brelse(bh);
0082
0083 if (cpi >= 3) {
0084 pr_err("Code page index out of array\n");
0085 return NULL;
0086 }
0087
0088 if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL;
0089 if (le16_to_cpu(cpd->offs[cpi]) > 0x178) {
0090 pr_err("Code page index out of sector\n");
0091 brelse(bh);
0092 return NULL;
0093 }
0094 ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6;
0095 if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
0096 pr_err("out of memory for code page table\n");
0097 brelse(bh);
0098 return NULL;
0099 }
0100 memcpy(cp_table, ptr, 128);
0101 brelse(bh);
0102
0103
0104
0105 for (i=128; i<256; i++) cp_table[i]=i;
0106 for (i=128; i<256; i++) if (cp_table[i-128]!=i && cp_table[i-128]>=128)
0107 cp_table[cp_table[i-128]] = i;
0108
0109 return cp_table;
0110 }
0111
0112 __le32 *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
0113 {
0114 struct buffer_head *bh;
0115 int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
0116 int i;
0117 __le32 *b;
0118 if (!(b = kmalloc_array(n, 512, GFP_KERNEL))) {
0119 pr_err("can't allocate memory for bitmap directory\n");
0120 return NULL;
0121 }
0122 for (i=0;i<n;i++) {
0123 __le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
0124 if (!d) {
0125 kfree(b);
0126 return NULL;
0127 }
0128 memcpy((char *)b + 512 * i, d, 512);
0129 brelse(bh);
0130 }
0131 return b;
0132 }
0133
0134 void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock)
0135 {
0136 struct quad_buffer_head qbh;
0137 __le32 *directory;
0138 u32 n_hotfixes, n_used_hotfixes;
0139 unsigned i;
0140
0141 n_hotfixes = le32_to_cpu(spareblock->n_spares);
0142 n_used_hotfixes = le32_to_cpu(spareblock->n_spares_used);
0143
0144 if (n_hotfixes > 256 || n_used_hotfixes > n_hotfixes) {
0145 hpfs_error(s, "invalid number of hotfixes: %u, used: %u", n_hotfixes, n_used_hotfixes);
0146 return;
0147 }
0148 if (!(directory = hpfs_map_4sectors(s, le32_to_cpu(spareblock->hotfix_map), &qbh, 0))) {
0149 hpfs_error(s, "can't load hotfix map");
0150 return;
0151 }
0152 for (i = 0; i < n_used_hotfixes; i++) {
0153 hpfs_sb(s)->hotfix_from[i] = le32_to_cpu(directory[i]);
0154 hpfs_sb(s)->hotfix_to[i] = le32_to_cpu(directory[n_hotfixes + i]);
0155 }
0156 hpfs_sb(s)->n_hotfixes = n_used_hotfixes;
0157 hpfs_brelse4(&qbh);
0158 }
0159
0160
0161
0162
0163
0164 struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_head **bhp)
0165 {
0166 struct fnode *fnode;
0167 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ino, 1, "fnode")) {
0168 return NULL;
0169 }
0170 if ((fnode = hpfs_map_sector(s, ino, bhp, FNODE_RD_AHEAD))) {
0171 if (hpfs_sb(s)->sb_chk) {
0172 struct extended_attribute *ea;
0173 struct extended_attribute *ea_end;
0174 if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) {
0175 hpfs_error(s, "bad magic on fnode %08lx",
0176 (unsigned long)ino);
0177 goto bail;
0178 }
0179 if (!fnode_is_dir(fnode)) {
0180 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
0181 (bp_internal(&fnode->btree) ? 12 : 8)) {
0182 hpfs_error(s,
0183 "bad number of nodes in fnode %08lx",
0184 (unsigned long)ino);
0185 goto bail;
0186 }
0187 if (le16_to_cpu(fnode->btree.first_free) !=
0188 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
0189 hpfs_error(s,
0190 "bad first_free pointer in fnode %08lx",
0191 (unsigned long)ino);
0192 goto bail;
0193 }
0194 }
0195 if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 ||
0196 le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) {
0197 hpfs_error(s,
0198 "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
0199 (unsigned long)ino,
0200 le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
0201 goto bail;
0202 }
0203 ea = fnode_ea(fnode);
0204 ea_end = fnode_end_ea(fnode);
0205 while (ea != ea_end) {
0206 if (ea > ea_end) {
0207 hpfs_error(s, "bad EA in fnode %08lx",
0208 (unsigned long)ino);
0209 goto bail;
0210 }
0211 ea = next_ea(ea);
0212 }
0213 }
0214 }
0215 return fnode;
0216 bail:
0217 brelse(*bhp);
0218 return NULL;
0219 }
0220
0221 struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buffer_head **bhp)
0222 {
0223 struct anode *anode;
0224 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL;
0225 if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD)))
0226 if (hpfs_sb(s)->sb_chk) {
0227 if (le32_to_cpu(anode->magic) != ANODE_MAGIC) {
0228 hpfs_error(s, "bad magic on anode %08x", ano);
0229 goto bail;
0230 }
0231 if (le32_to_cpu(anode->self) != ano) {
0232 hpfs_error(s, "self pointer invalid on anode %08x", ano);
0233 goto bail;
0234 }
0235 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
0236 (bp_internal(&anode->btree) ? 60 : 40)) {
0237 hpfs_error(s, "bad number of nodes in anode %08x", ano);
0238 goto bail;
0239 }
0240 if (le16_to_cpu(anode->btree.first_free) !=
0241 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
0242 hpfs_error(s, "bad first_free pointer in anode %08x", ano);
0243 goto bail;
0244 }
0245 }
0246 return anode;
0247 bail:
0248 brelse(*bhp);
0249 return NULL;
0250 }
0251
0252
0253
0254
0255
0256 struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
0257 struct quad_buffer_head *qbh)
0258 {
0259 struct dnode *dnode;
0260 if (hpfs_sb(s)->sb_chk) {
0261 if (hpfs_chk_sectors(s, secno, 4, "dnode")) return NULL;
0262 if (secno & 3) {
0263 hpfs_error(s, "dnode %08x not byte-aligned", secno);
0264 return NULL;
0265 }
0266 }
0267 if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD)))
0268 if (hpfs_sb(s)->sb_chk) {
0269 unsigned p, pp = 0;
0270 unsigned char *d = (unsigned char *)dnode;
0271 int b = 0;
0272 if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) {
0273 hpfs_error(s, "bad magic on dnode %08x", secno);
0274 goto bail;
0275 }
0276 if (le32_to_cpu(dnode->self) != secno)
0277 hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self));
0278
0279
0280 if (le32_to_cpu(dnode->first_free) > 2048) {
0281 hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free));
0282 goto bail;
0283 }
0284 for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) {
0285 struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p);
0286 if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) {
0287 hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
0288 goto bail;
0289 }
0290 if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
0291 if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & SB_RDONLY) goto ok;
0292 hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
0293 goto bail;
0294 }
0295 ok:
0296 if (hpfs_sb(s)->sb_chk >= 2) b |= 1 << de->down;
0297 if (de->down) if (de_down_pointer(de) < 0x10) {
0298 hpfs_error(s, "bad down pointer in dnode %08x, dirent %03x, last %03x", secno, p, pp);
0299 goto bail;
0300 }
0301 pp = p;
0302
0303 }
0304 if (p != le32_to_cpu(dnode->first_free)) {
0305 hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno);
0306 goto bail;
0307 }
0308 if (d[pp + 30] != 1 || d[pp + 31] != 255) {
0309 hpfs_error(s, "dnode %08x does not end with \\377 entry", secno);
0310 goto bail;
0311 }
0312 if (b == 3)
0313 pr_err("unbalanced dnode tree, dnode %08x; see hpfs.txt 4 more info\n",
0314 secno);
0315 }
0316 return dnode;
0317 bail:
0318 hpfs_brelse4(qbh);
0319 return NULL;
0320 }
0321
0322 dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino)
0323 {
0324 struct buffer_head *bh;
0325 struct fnode *fnode;
0326 dnode_secno dno;
0327
0328 fnode = hpfs_map_fnode(s, ino, &bh);
0329 if (!fnode)
0330 return 0;
0331
0332 dno = le32_to_cpu(fnode->u.external[0].disk_secno);
0333 brelse(bh);
0334 return dno;
0335 }