0001
0002
0003
0004
0005
0006
0007
0008 #include "check.h"
0009
0010 struct lvm_rec {
0011 char lvm_id[4];
0012 char reserved4[16];
0013 __be32 lvmarea_len;
0014 __be32 vgda_len;
0015 __be32 vgda_psn[2];
0016 char reserved36[10];
0017 __be16 pp_size;
0018 char reserved46[12];
0019 __be16 version;
0020 };
0021
0022 struct vgda {
0023 __be32 secs;
0024 __be32 usec;
0025 char reserved8[16];
0026 __be16 numlvs;
0027 __be16 maxlvs;
0028 __be16 pp_size;
0029 __be16 numpvs;
0030 __be16 total_vgdas;
0031 __be16 vgda_size;
0032 };
0033
0034 struct lvd {
0035 __be16 lv_ix;
0036 __be16 res2;
0037 __be16 res4;
0038 __be16 maxsize;
0039 __be16 lv_state;
0040 __be16 mirror;
0041 __be16 mirror_policy;
0042 __be16 num_lps;
0043 __be16 res10[8];
0044 };
0045
0046 struct lvname {
0047 char name[64];
0048 };
0049
0050 struct ppe {
0051 __be16 lv_ix;
0052 unsigned short res2;
0053 unsigned short res4;
0054 __be16 lp_ix;
0055 unsigned short res8[12];
0056 };
0057
0058 struct pvd {
0059 char reserved0[16];
0060 __be16 pp_count;
0061 char reserved18[2];
0062 __be32 psn_part1;
0063 char reserved24[8];
0064 struct ppe ppe[1016];
0065 };
0066
0067 #define LVM_MAXLVS 256
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
0080 size_t count)
0081 {
0082 size_t totalreadcount = 0;
0083
0084 if (!buffer || lba + count / 512 > get_capacity(state->disk) - 1ULL)
0085 return 0;
0086
0087 while (count) {
0088 int copied = 512;
0089 Sector sect;
0090 unsigned char *data = read_part_sector(state, lba++, §);
0091 if (!data)
0092 break;
0093 if (copied > count)
0094 copied = count;
0095 memcpy(buffer, data, copied);
0096 put_dev_sector(sect);
0097 buffer += copied;
0098 totalreadcount += copied;
0099 count -= copied;
0100 }
0101 return totalreadcount;
0102 }
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113 static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba)
0114 {
0115 size_t count = sizeof(struct pvd);
0116 struct pvd *p;
0117
0118 p = kmalloc(count, GFP_KERNEL);
0119 if (!p)
0120 return NULL;
0121
0122 if (read_lba(state, lba, (u8 *) p, count) < count) {
0123 kfree(p);
0124 return NULL;
0125 }
0126 return p;
0127 }
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba)
0139 {
0140 size_t count = sizeof(struct lvname) * LVM_MAXLVS;
0141 struct lvname *p;
0142
0143 p = kmalloc(count, GFP_KERNEL);
0144 if (!p)
0145 return NULL;
0146
0147 if (read_lba(state, lba, (u8 *) p, count) < count) {
0148 kfree(p);
0149 return NULL;
0150 }
0151 return p;
0152 }
0153
0154 int aix_partition(struct parsed_partitions *state)
0155 {
0156 int ret = 0;
0157 Sector sect;
0158 unsigned char *d;
0159 u32 pp_bytes_size;
0160 u32 pp_blocks_size = 0;
0161 u32 vgda_sector = 0;
0162 u32 vgda_len = 0;
0163 int numlvs = 0;
0164 struct pvd *pvd = NULL;
0165 struct lv_info {
0166 unsigned short pps_per_lv;
0167 unsigned short pps_found;
0168 unsigned char lv_is_contiguous;
0169 } *lvip;
0170 struct lvname *n = NULL;
0171
0172 d = read_part_sector(state, 7, §);
0173 if (d) {
0174 struct lvm_rec *p = (struct lvm_rec *)d;
0175 u16 lvm_version = be16_to_cpu(p->version);
0176 char tmp[64];
0177
0178 if (lvm_version == 1) {
0179 int pp_size_log2 = be16_to_cpu(p->pp_size);
0180
0181 pp_bytes_size = 1 << pp_size_log2;
0182 pp_blocks_size = pp_bytes_size / 512;
0183 snprintf(tmp, sizeof(tmp),
0184 " AIX LVM header version %u found\n",
0185 lvm_version);
0186 vgda_len = be32_to_cpu(p->vgda_len);
0187 vgda_sector = be32_to_cpu(p->vgda_psn[0]);
0188 } else {
0189 snprintf(tmp, sizeof(tmp),
0190 " unsupported AIX LVM version %d found\n",
0191 lvm_version);
0192 }
0193 strlcat(state->pp_buf, tmp, PAGE_SIZE);
0194 put_dev_sector(sect);
0195 }
0196 if (vgda_sector && (d = read_part_sector(state, vgda_sector, §))) {
0197 struct vgda *p = (struct vgda *)d;
0198
0199 numlvs = be16_to_cpu(p->numlvs);
0200 put_dev_sector(sect);
0201 }
0202 lvip = kcalloc(state->limit, sizeof(struct lv_info), GFP_KERNEL);
0203 if (!lvip)
0204 return 0;
0205 if (numlvs && (d = read_part_sector(state, vgda_sector + 1, §))) {
0206 struct lvd *p = (struct lvd *)d;
0207 int i;
0208
0209 n = alloc_lvn(state, vgda_sector + vgda_len - 33);
0210 if (n) {
0211 int foundlvs = 0;
0212
0213 for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) {
0214 lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps);
0215 if (lvip[i].pps_per_lv)
0216 foundlvs += 1;
0217 }
0218
0219 pvd = alloc_pvd(state, vgda_sector + 17);
0220 }
0221 put_dev_sector(sect);
0222 }
0223 if (pvd) {
0224 int numpps = be16_to_cpu(pvd->pp_count);
0225 int psn_part1 = be32_to_cpu(pvd->psn_part1);
0226 int i;
0227 int cur_lv_ix = -1;
0228 int next_lp_ix = 1;
0229 int lp_ix;
0230
0231 for (i = 0; i < numpps; i += 1) {
0232 struct ppe *p = pvd->ppe + i;
0233 unsigned int lv_ix;
0234
0235 lp_ix = be16_to_cpu(p->lp_ix);
0236 if (!lp_ix) {
0237 next_lp_ix = 1;
0238 continue;
0239 }
0240 lv_ix = be16_to_cpu(p->lv_ix) - 1;
0241 if (lv_ix >= state->limit) {
0242 cur_lv_ix = -1;
0243 continue;
0244 }
0245 lvip[lv_ix].pps_found += 1;
0246 if (lp_ix == 1) {
0247 cur_lv_ix = lv_ix;
0248 next_lp_ix = 1;
0249 } else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) {
0250 next_lp_ix = 1;
0251 continue;
0252 }
0253 if (lp_ix == lvip[lv_ix].pps_per_lv) {
0254 char tmp[70];
0255
0256 put_partition(state, lv_ix + 1,
0257 (i + 1 - lp_ix) * pp_blocks_size + psn_part1,
0258 lvip[lv_ix].pps_per_lv * pp_blocks_size);
0259 snprintf(tmp, sizeof(tmp), " <%s>\n",
0260 n[lv_ix].name);
0261 strlcat(state->pp_buf, tmp, PAGE_SIZE);
0262 lvip[lv_ix].lv_is_contiguous = 1;
0263 ret = 1;
0264 next_lp_ix = 1;
0265 } else
0266 next_lp_ix += 1;
0267 }
0268 for (i = 0; i < state->limit; i += 1)
0269 if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
0270 char tmp[sizeof(n[i].name) + 1];
0271
0272 snprintf(tmp, sizeof(tmp), "%s", n[i].name);
0273 pr_warn("partition %s (%u pp's found) is "
0274 "not contiguous\n",
0275 tmp, lvip[i].pps_found);
0276 }
0277 kfree(pvd);
0278 }
0279 kfree(n);
0280 kfree(lvip);
0281 return ret;
0282 }