0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include <linux/kernel.h>
0017 #include <linux/mm.h>
0018 #include <asm/processor.h>
0019 #include <linux/uaccess.h>
0020 #include <asm/cache.h>
0021 #include <asm/cputable.h>
0022 #include <asm/emulated_ops.h>
0023 #include <asm/switch_to.h>
0024 #include <asm/disassemble.h>
0025 #include <asm/cpu_has_feature.h>
0026 #include <asm/sstep.h>
0027 #include <asm/inst.h>
0028
0029 struct aligninfo {
0030 unsigned char len;
0031 unsigned char flags;
0032 };
0033
0034
0035 #define INVALID { 0, 0 }
0036
0037
0038 #define LD 0
0039 #define ST 1
0040 #define SE 2
0041 #define SW 0x20
0042 #define E4 0x40
0043 #define E8 0x80
0044
0045 #ifdef CONFIG_SPE
0046
0047 static struct aligninfo spe_aligninfo[32] = {
0048 { 8, LD+E8 },
0049 { 8, LD+E4 },
0050 { 8, LD },
0051 INVALID,
0052 { 2, LD },
0053 INVALID,
0054 { 2, LD },
0055 { 2, LD+SE },
0056 { 4, LD },
0057 INVALID,
0058 { 4, LD },
0059 { 4, LD+SE },
0060 { 4, LD+E4 },
0061 INVALID,
0062 { 4, LD },
0063 INVALID,
0064
0065 { 8, ST+E8 },
0066 { 8, ST+E4 },
0067 { 8, ST },
0068 INVALID,
0069 INVALID,
0070 INVALID,
0071 INVALID,
0072 INVALID,
0073 { 4, ST },
0074 INVALID,
0075 { 4, ST },
0076 INVALID,
0077 { 4, ST+E4 },
0078 INVALID,
0079 { 4, ST+E4 },
0080 INVALID,
0081 };
0082
0083 #define EVLDD 0x00
0084 #define EVLDW 0x01
0085 #define EVLDH 0x02
0086 #define EVLHHESPLAT 0x04
0087 #define EVLHHOUSPLAT 0x06
0088 #define EVLHHOSSPLAT 0x07
0089 #define EVLWHE 0x08
0090 #define EVLWHOU 0x0A
0091 #define EVLWHOS 0x0B
0092 #define EVLWWSPLAT 0x0C
0093 #define EVLWHSPLAT 0x0E
0094 #define EVSTDD 0x10
0095 #define EVSTDW 0x11
0096 #define EVSTDH 0x12
0097 #define EVSTWHE 0x18
0098 #define EVSTWHO 0x1A
0099 #define EVSTWWE 0x1C
0100 #define EVSTWWO 0x1E
0101
0102
0103
0104
0105
0106
0107 static int emulate_spe(struct pt_regs *regs, unsigned int reg,
0108 ppc_inst_t ppc_instr)
0109 {
0110 union {
0111 u64 ll;
0112 u32 w[2];
0113 u16 h[4];
0114 u8 v[8];
0115 } data, temp;
0116 unsigned char __user *p, *addr;
0117 unsigned long *evr = ¤t->thread.evr[reg];
0118 unsigned int nb, flags, instr;
0119
0120 instr = ppc_inst_val(ppc_instr);
0121 instr = (instr >> 1) & 0x1f;
0122
0123
0124 addr = (unsigned char __user *)regs->dar;
0125
0126 nb = spe_aligninfo[instr].len;
0127 flags = spe_aligninfo[instr].flags;
0128
0129
0130 if (unlikely(!user_mode(regs)))
0131 return 0;
0132
0133 flush_spe_to_thread(current);
0134
0135
0136
0137
0138 if (flags & ST) {
0139 data.ll = 0;
0140 switch (instr) {
0141 case EVSTDD:
0142 case EVSTDW:
0143 case EVSTDH:
0144 data.w[0] = *evr;
0145 data.w[1] = regs->gpr[reg];
0146 break;
0147 case EVSTWHE:
0148 data.h[2] = *evr >> 16;
0149 data.h[3] = regs->gpr[reg] >> 16;
0150 break;
0151 case EVSTWHO:
0152 data.h[2] = *evr & 0xffff;
0153 data.h[3] = regs->gpr[reg] & 0xffff;
0154 break;
0155 case EVSTWWE:
0156 data.w[1] = *evr;
0157 break;
0158 case EVSTWWO:
0159 data.w[1] = regs->gpr[reg];
0160 break;
0161 default:
0162 return -EINVAL;
0163 }
0164 } else {
0165 temp.ll = data.ll = 0;
0166 p = addr;
0167
0168 if (!user_read_access_begin(addr, nb))
0169 return -EFAULT;
0170
0171 switch (nb) {
0172 case 8:
0173 unsafe_get_user(temp.v[0], p++, Efault_read);
0174 unsafe_get_user(temp.v[1], p++, Efault_read);
0175 unsafe_get_user(temp.v[2], p++, Efault_read);
0176 unsafe_get_user(temp.v[3], p++, Efault_read);
0177 fallthrough;
0178 case 4:
0179 unsafe_get_user(temp.v[4], p++, Efault_read);
0180 unsafe_get_user(temp.v[5], p++, Efault_read);
0181 fallthrough;
0182 case 2:
0183 unsafe_get_user(temp.v[6], p++, Efault_read);
0184 unsafe_get_user(temp.v[7], p++, Efault_read);
0185 }
0186 user_read_access_end();
0187
0188 switch (instr) {
0189 case EVLDD:
0190 case EVLDW:
0191 case EVLDH:
0192 data.ll = temp.ll;
0193 break;
0194 case EVLHHESPLAT:
0195 data.h[0] = temp.h[3];
0196 data.h[2] = temp.h[3];
0197 break;
0198 case EVLHHOUSPLAT:
0199 case EVLHHOSSPLAT:
0200 data.h[1] = temp.h[3];
0201 data.h[3] = temp.h[3];
0202 break;
0203 case EVLWHE:
0204 data.h[0] = temp.h[2];
0205 data.h[2] = temp.h[3];
0206 break;
0207 case EVLWHOU:
0208 case EVLWHOS:
0209 data.h[1] = temp.h[2];
0210 data.h[3] = temp.h[3];
0211 break;
0212 case EVLWWSPLAT:
0213 data.w[0] = temp.w[1];
0214 data.w[1] = temp.w[1];
0215 break;
0216 case EVLWHSPLAT:
0217 data.h[0] = temp.h[2];
0218 data.h[1] = temp.h[2];
0219 data.h[2] = temp.h[3];
0220 data.h[3] = temp.h[3];
0221 break;
0222 default:
0223 return -EINVAL;
0224 }
0225 }
0226
0227 if (flags & SW) {
0228 switch (flags & 0xf0) {
0229 case E8:
0230 data.ll = swab64(data.ll);
0231 break;
0232 case E4:
0233 data.w[0] = swab32(data.w[0]);
0234 data.w[1] = swab32(data.w[1]);
0235 break;
0236
0237 default:
0238 data.h[0] = swab16(data.h[0]);
0239 data.h[1] = swab16(data.h[1]);
0240 data.h[2] = swab16(data.h[2]);
0241 data.h[3] = swab16(data.h[3]);
0242 break;
0243 }
0244 }
0245
0246 if (flags & SE) {
0247 data.w[0] = (s16)data.h[1];
0248 data.w[1] = (s16)data.h[3];
0249 }
0250
0251
0252 if (flags & ST) {
0253 p = addr;
0254
0255 if (!user_write_access_begin(addr, nb))
0256 return -EFAULT;
0257
0258 switch (nb) {
0259 case 8:
0260 unsafe_put_user(data.v[0], p++, Efault_write);
0261 unsafe_put_user(data.v[1], p++, Efault_write);
0262 unsafe_put_user(data.v[2], p++, Efault_write);
0263 unsafe_put_user(data.v[3], p++, Efault_write);
0264 fallthrough;
0265 case 4:
0266 unsafe_put_user(data.v[4], p++, Efault_write);
0267 unsafe_put_user(data.v[5], p++, Efault_write);
0268 fallthrough;
0269 case 2:
0270 unsafe_put_user(data.v[6], p++, Efault_write);
0271 unsafe_put_user(data.v[7], p++, Efault_write);
0272 }
0273 user_write_access_end();
0274 } else {
0275 *evr = data.w[0];
0276 regs->gpr[reg] = data.w[1];
0277 }
0278
0279 return 1;
0280
0281 Efault_read:
0282 user_read_access_end();
0283 return -EFAULT;
0284
0285 Efault_write:
0286 user_write_access_end();
0287 return -EFAULT;
0288 }
0289 #endif
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 int fix_alignment(struct pt_regs *regs)
0302 {
0303 ppc_inst_t instr;
0304 struct instruction_op op;
0305 int r, type;
0306
0307 if (is_kernel_addr(regs->nip))
0308 r = copy_inst_from_kernel_nofault(&instr, (void *)regs->nip);
0309 else
0310 r = __get_user_instr(instr, (void __user *)regs->nip);
0311
0312 if (unlikely(r))
0313 return -EFAULT;
0314 if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
0315
0316 if (cpu_has_feature(CPU_FTR_PPC_LE))
0317 return -EIO;
0318 instr = ppc_inst_swab(instr);
0319 }
0320
0321 #ifdef CONFIG_SPE
0322 if (ppc_inst_primary_opcode(instr) == 0x4) {
0323 int reg = (ppc_inst_val(instr) >> 21) & 0x1f;
0324 PPC_WARN_ALIGNMENT(spe, regs);
0325 return emulate_spe(regs, reg, instr);
0326 }
0327 #endif
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 if ((ppc_inst_val(instr) & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
0341 return -EIO;
0342
0343 r = analyse_instr(&op, regs, instr);
0344 if (r < 0)
0345 return -EINVAL;
0346
0347 type = GETTYPE(op.type);
0348 if (!OP_IS_LOAD_STORE(type)) {
0349 if (op.type != CACHEOP + DCBZ)
0350 return -EINVAL;
0351 PPC_WARN_ALIGNMENT(dcbz, regs);
0352 WARN_ON_ONCE(!user_mode(regs));
0353 r = emulate_dcbz(op.ea, regs);
0354 } else {
0355 if (type == LARX || type == STCX)
0356 return -EIO;
0357 PPC_WARN_ALIGNMENT(unaligned, regs);
0358 r = emulate_loadstore(regs, &op);
0359 }
0360
0361 if (!r)
0362 return 1;
0363 return r;
0364 }