Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Support for Vector Instructions
0004  *
0005  * Assembler macros to generate .byte/.word code for particular
0006  * vector instructions that are supported by recent binutils (>= 2.26) only.
0007  *
0008  * Copyright IBM Corp. 2015
0009  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
0010  */
0011 
0012 #ifndef __ASM_S390_VX_INSN_H
0013 #define __ASM_S390_VX_INSN_H
0014 
0015 #ifdef __ASSEMBLY__
0016 
0017 
0018 /* Macros to generate vector instruction byte code */
0019 
0020 /* GR_NUM - Retrieve general-purpose register number
0021  *
0022  * @opd:    Operand to store register number
0023  * @r64:    String designation register in the format "%rN"
0024  */
0025 .macro  GR_NUM  opd gr
0026     \opd = 255
0027     .ifc \gr,%r0
0028         \opd = 0
0029     .endif
0030     .ifc \gr,%r1
0031         \opd = 1
0032     .endif
0033     .ifc \gr,%r2
0034         \opd = 2
0035     .endif
0036     .ifc \gr,%r3
0037         \opd = 3
0038     .endif
0039     .ifc \gr,%r4
0040         \opd = 4
0041     .endif
0042     .ifc \gr,%r5
0043         \opd = 5
0044     .endif
0045     .ifc \gr,%r6
0046         \opd = 6
0047     .endif
0048     .ifc \gr,%r7
0049         \opd = 7
0050     .endif
0051     .ifc \gr,%r8
0052         \opd = 8
0053     .endif
0054     .ifc \gr,%r9
0055         \opd = 9
0056     .endif
0057     .ifc \gr,%r10
0058         \opd = 10
0059     .endif
0060     .ifc \gr,%r11
0061         \opd = 11
0062     .endif
0063     .ifc \gr,%r12
0064         \opd = 12
0065     .endif
0066     .ifc \gr,%r13
0067         \opd = 13
0068     .endif
0069     .ifc \gr,%r14
0070         \opd = 14
0071     .endif
0072     .ifc \gr,%r15
0073         \opd = 15
0074     .endif
0075     .if \opd == 255
0076         \opd = \gr
0077     .endif
0078 .endm
0079 
0080 /* VX_NUM - Retrieve vector register number
0081  *
0082  * @opd:    Operand to store register number
0083  * @vxr:    String designation register in the format "%vN"
0084  *
0085  * The vector register number is used for as input number to the
0086  * instruction and, as well as, to compute the RXB field of the
0087  * instruction.
0088  */
0089 .macro  VX_NUM  opd vxr
0090     \opd = 255
0091     .ifc \vxr,%v0
0092         \opd = 0
0093     .endif
0094     .ifc \vxr,%v1
0095         \opd = 1
0096     .endif
0097     .ifc \vxr,%v2
0098         \opd = 2
0099     .endif
0100     .ifc \vxr,%v3
0101         \opd = 3
0102     .endif
0103     .ifc \vxr,%v4
0104         \opd = 4
0105     .endif
0106     .ifc \vxr,%v5
0107         \opd = 5
0108     .endif
0109     .ifc \vxr,%v6
0110         \opd = 6
0111     .endif
0112     .ifc \vxr,%v7
0113         \opd = 7
0114     .endif
0115     .ifc \vxr,%v8
0116         \opd = 8
0117     .endif
0118     .ifc \vxr,%v9
0119         \opd = 9
0120     .endif
0121     .ifc \vxr,%v10
0122         \opd = 10
0123     .endif
0124     .ifc \vxr,%v11
0125         \opd = 11
0126     .endif
0127     .ifc \vxr,%v12
0128         \opd = 12
0129     .endif
0130     .ifc \vxr,%v13
0131         \opd = 13
0132     .endif
0133     .ifc \vxr,%v14
0134         \opd = 14
0135     .endif
0136     .ifc \vxr,%v15
0137         \opd = 15
0138     .endif
0139     .ifc \vxr,%v16
0140         \opd = 16
0141     .endif
0142     .ifc \vxr,%v17
0143         \opd = 17
0144     .endif
0145     .ifc \vxr,%v18
0146         \opd = 18
0147     .endif
0148     .ifc \vxr,%v19
0149         \opd = 19
0150     .endif
0151     .ifc \vxr,%v20
0152         \opd = 20
0153     .endif
0154     .ifc \vxr,%v21
0155         \opd = 21
0156     .endif
0157     .ifc \vxr,%v22
0158         \opd = 22
0159     .endif
0160     .ifc \vxr,%v23
0161         \opd = 23
0162     .endif
0163     .ifc \vxr,%v24
0164         \opd = 24
0165     .endif
0166     .ifc \vxr,%v25
0167         \opd = 25
0168     .endif
0169     .ifc \vxr,%v26
0170         \opd = 26
0171     .endif
0172     .ifc \vxr,%v27
0173         \opd = 27
0174     .endif
0175     .ifc \vxr,%v28
0176         \opd = 28
0177     .endif
0178     .ifc \vxr,%v29
0179         \opd = 29
0180     .endif
0181     .ifc \vxr,%v30
0182         \opd = 30
0183     .endif
0184     .ifc \vxr,%v31
0185         \opd = 31
0186     .endif
0187     .if \opd == 255
0188         \opd = \vxr
0189     .endif
0190 .endm
0191 
0192 /* RXB - Compute most significant bit used vector registers
0193  *
0194  * @rxb:    Operand to store computed RXB value
0195  * @v1:     First vector register designated operand
0196  * @v2:     Second vector register designated operand
0197  * @v3:     Third vector register designated operand
0198  * @v4:     Fourth vector register designated operand
0199  */
0200 .macro  RXB rxb v1 v2=0 v3=0 v4=0
0201     \rxb = 0
0202     .if \v1 & 0x10
0203         \rxb = \rxb | 0x08
0204     .endif
0205     .if \v2 & 0x10
0206         \rxb = \rxb | 0x04
0207     .endif
0208     .if \v3 & 0x10
0209         \rxb = \rxb | 0x02
0210     .endif
0211     .if \v4 & 0x10
0212         \rxb = \rxb | 0x01
0213     .endif
0214 .endm
0215 
0216 /* MRXB - Generate Element Size Control and RXB value
0217  *
0218  * @m:      Element size control
0219  * @v1:     First vector register designated operand (for RXB)
0220  * @v2:     Second vector register designated operand (for RXB)
0221  * @v3:     Third vector register designated operand (for RXB)
0222  * @v4:     Fourth vector register designated operand (for RXB)
0223  */
0224 .macro  MRXB    m v1 v2=0 v3=0 v4=0
0225     rxb = 0
0226     RXB rxb, \v1, \v2, \v3, \v4
0227     .byte   (\m << 4) | rxb
0228 .endm
0229 
0230 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
0231  *
0232  * @m:      Element size control
0233  * @opc:    Opcode
0234  * @v1:     First vector register designated operand (for RXB)
0235  * @v2:     Second vector register designated operand (for RXB)
0236  * @v3:     Third vector register designated operand (for RXB)
0237  * @v4:     Fourth vector register designated operand (for RXB)
0238  */
0239 .macro  MRXBOPC m opc v1 v2=0 v3=0 v4=0
0240     MRXB    \m, \v1, \v2, \v3, \v4
0241     .byte   \opc
0242 .endm
0243 
0244 /* Vector support instructions */
0245 
0246 /* VECTOR GENERATE BYTE MASK */
0247 .macro  VGBM    vr imm2
0248     VX_NUM  v1, \vr
0249     .word   (0xE700 | ((v1&15) << 4))
0250     .word   \imm2
0251     MRXBOPC 0, 0x44, v1
0252 .endm
0253 .macro  VZERO   vxr
0254     VGBM    \vxr, 0
0255 .endm
0256 .macro  VONE    vxr
0257     VGBM    \vxr, 0xFFFF
0258 .endm
0259 
0260 /* VECTOR LOAD VR ELEMENT FROM GR */
0261 .macro  VLVG    v, gr, disp, m
0262     VX_NUM  v1, \v
0263     GR_NUM  b2, "%r0"
0264     GR_NUM  r3, \gr
0265     .word   0xE700 | ((v1&15) << 4) | r3
0266     .word   (b2 << 12) | (\disp)
0267     MRXBOPC \m, 0x22, v1
0268 .endm
0269 .macro  VLVGB   v, gr, index, base
0270     VLVG    \v, \gr, \index, \base, 0
0271 .endm
0272 .macro  VLVGH   v, gr, index
0273     VLVG    \v, \gr, \index, 1
0274 .endm
0275 .macro  VLVGF   v, gr, index
0276     VLVG    \v, \gr, \index, 2
0277 .endm
0278 .macro  VLVGG   v, gr, index
0279     VLVG    \v, \gr, \index, 3
0280 .endm
0281 
0282 /* VECTOR LOAD REGISTER */
0283 .macro  VLR v1, v2
0284     VX_NUM  v1, \v1
0285     VX_NUM  v2, \v2
0286     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0287     .word   0
0288     MRXBOPC 0, 0x56, v1, v2
0289 .endm
0290 
0291 /* VECTOR LOAD */
0292 .macro  VL  v, disp, index="%r0", base
0293     VX_NUM  v1, \v
0294     GR_NUM  x2, \index
0295     GR_NUM  b2, \base
0296     .word   0xE700 | ((v1&15) << 4) | x2
0297     .word   (b2 << 12) | (\disp)
0298     MRXBOPC 0, 0x06, v1
0299 .endm
0300 
0301 /* VECTOR LOAD ELEMENT */
0302 .macro  VLEx    vr1, disp, index="%r0", base, m3, opc
0303     VX_NUM  v1, \vr1
0304     GR_NUM  x2, \index
0305     GR_NUM  b2, \base
0306     .word   0xE700 | ((v1&15) << 4) | x2
0307     .word   (b2 << 12) | (\disp)
0308     MRXBOPC \m3, \opc, v1
0309 .endm
0310 .macro  VLEB    vr1, disp, index="%r0", base, m3
0311     VLEx    \vr1, \disp, \index, \base, \m3, 0x00
0312 .endm
0313 .macro  VLEH    vr1, disp, index="%r0", base, m3
0314     VLEx    \vr1, \disp, \index, \base, \m3, 0x01
0315 .endm
0316 .macro  VLEF    vr1, disp, index="%r0", base, m3
0317     VLEx    \vr1, \disp, \index, \base, \m3, 0x03
0318 .endm
0319 .macro  VLEG    vr1, disp, index="%r0", base, m3
0320     VLEx    \vr1, \disp, \index, \base, \m3, 0x02
0321 .endm
0322 
0323 /* VECTOR LOAD ELEMENT IMMEDIATE */
0324 .macro  VLEIx   vr1, imm2, m3, opc
0325     VX_NUM  v1, \vr1
0326     .word   0xE700 | ((v1&15) << 4)
0327     .word   \imm2
0328     MRXBOPC \m3, \opc, v1
0329 .endm
0330 .macro  VLEIB   vr1, imm2, index
0331     VLEIx   \vr1, \imm2, \index, 0x40
0332 .endm
0333 .macro  VLEIH   vr1, imm2, index
0334     VLEIx   \vr1, \imm2, \index, 0x41
0335 .endm
0336 .macro  VLEIF   vr1, imm2, index
0337     VLEIx   \vr1, \imm2, \index, 0x43
0338 .endm
0339 .macro  VLEIG   vr1, imm2, index
0340     VLEIx   \vr1, \imm2, \index, 0x42
0341 .endm
0342 
0343 /* VECTOR LOAD GR FROM VR ELEMENT */
0344 .macro  VLGV    gr, vr, disp, base="%r0", m
0345     GR_NUM  r1, \gr
0346     GR_NUM  b2, \base
0347     VX_NUM  v3, \vr
0348     .word   0xE700 | (r1 << 4) | (v3&15)
0349     .word   (b2 << 12) | (\disp)
0350     MRXBOPC \m, 0x21, v3
0351 .endm
0352 .macro  VLGVB   gr, vr, disp, base="%r0"
0353     VLGV    \gr, \vr, \disp, \base, 0
0354 .endm
0355 .macro  VLGVH   gr, vr, disp, base="%r0"
0356     VLGV    \gr, \vr, \disp, \base, 1
0357 .endm
0358 .macro  VLGVF   gr, vr, disp, base="%r0"
0359     VLGV    \gr, \vr, \disp, \base, 2
0360 .endm
0361 .macro  VLGVG   gr, vr, disp, base="%r0"
0362     VLGV    \gr, \vr, \disp, \base, 3
0363 .endm
0364 
0365 /* VECTOR LOAD MULTIPLE */
0366 .macro  VLM vfrom, vto, disp, base, hint=3
0367     VX_NUM  v1, \vfrom
0368     VX_NUM  v3, \vto
0369     GR_NUM  b2, \base
0370     .word   0xE700 | ((v1&15) << 4) | (v3&15)
0371     .word   (b2 << 12) | (\disp)
0372     MRXBOPC \hint, 0x36, v1, v3
0373 .endm
0374 
0375 /* VECTOR STORE */
0376 .macro  VST vr1, disp, index="%r0", base
0377     VX_NUM  v1, \vr1
0378     GR_NUM  x2, \index
0379     GR_NUM  b2, \base
0380     .word   0xE700 | ((v1&15) << 4) | (x2&15)
0381     .word   (b2 << 12) | (\disp)
0382     MRXBOPC 0, 0x0E, v1
0383 .endm
0384 
0385 /* VECTOR STORE MULTIPLE */
0386 .macro  VSTM    vfrom, vto, disp, base, hint=3
0387     VX_NUM  v1, \vfrom
0388     VX_NUM  v3, \vto
0389     GR_NUM  b2, \base
0390     .word   0xE700 | ((v1&15) << 4) | (v3&15)
0391     .word   (b2 << 12) | (\disp)
0392     MRXBOPC \hint, 0x3E, v1, v3
0393 .endm
0394 
0395 /* VECTOR PERMUTE */
0396 .macro  VPERM   vr1, vr2, vr3, vr4
0397     VX_NUM  v1, \vr1
0398     VX_NUM  v2, \vr2
0399     VX_NUM  v3, \vr3
0400     VX_NUM  v4, \vr4
0401     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0402     .word   ((v3&15) << 12)
0403     MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
0404 .endm
0405 
0406 /* VECTOR UNPACK LOGICAL LOW */
0407 .macro  VUPLL   vr1, vr2, m3
0408     VX_NUM  v1, \vr1
0409     VX_NUM  v2, \vr2
0410     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0411     .word   0x0000
0412     MRXBOPC \m3, 0xD4, v1, v2
0413 .endm
0414 .macro  VUPLLB  vr1, vr2
0415     VUPLL   \vr1, \vr2, 0
0416 .endm
0417 .macro  VUPLLH  vr1, vr2
0418     VUPLL   \vr1, \vr2, 1
0419 .endm
0420 .macro  VUPLLF  vr1, vr2
0421     VUPLL   \vr1, \vr2, 2
0422 .endm
0423 
0424 /* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
0425 .macro  VPDI    vr1, vr2, vr3, m4
0426     VX_NUM  v1, \vr1
0427     VX_NUM  v2, \vr2
0428     VX_NUM  v3, \vr3
0429     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0430     .word   ((v3&15) << 12)
0431     MRXBOPC \m4, 0x84, v1, v2, v3
0432 .endm
0433 
0434 /* VECTOR REPLICATE */
0435 .macro  VREP    vr1, vr3, imm2, m4
0436     VX_NUM  v1, \vr1
0437     VX_NUM  v3, \vr3
0438     .word   0xE700 | ((v1&15) << 4) | (v3&15)
0439     .word   \imm2
0440     MRXBOPC \m4, 0x4D, v1, v3
0441 .endm
0442 .macro  VREPB   vr1, vr3, imm2
0443     VREP    \vr1, \vr3, \imm2, 0
0444 .endm
0445 .macro  VREPH   vr1, vr3, imm2
0446     VREP    \vr1, \vr3, \imm2, 1
0447 .endm
0448 .macro  VREPF   vr1, vr3, imm2
0449     VREP    \vr1, \vr3, \imm2, 2
0450 .endm
0451 .macro  VREPG   vr1, vr3, imm2
0452     VREP    \vr1, \vr3, \imm2, 3
0453 .endm
0454 
0455 /* VECTOR MERGE HIGH */
0456 .macro  VMRH    vr1, vr2, vr3, m4
0457     VX_NUM  v1, \vr1
0458     VX_NUM  v2, \vr2
0459     VX_NUM  v3, \vr3
0460     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0461     .word   ((v3&15) << 12)
0462     MRXBOPC \m4, 0x61, v1, v2, v3
0463 .endm
0464 .macro  VMRHB   vr1, vr2, vr3
0465     VMRH    \vr1, \vr2, \vr3, 0
0466 .endm
0467 .macro  VMRHH   vr1, vr2, vr3
0468     VMRH    \vr1, \vr2, \vr3, 1
0469 .endm
0470 .macro  VMRHF   vr1, vr2, vr3
0471     VMRH    \vr1, \vr2, \vr3, 2
0472 .endm
0473 .macro  VMRHG   vr1, vr2, vr3
0474     VMRH    \vr1, \vr2, \vr3, 3
0475 .endm
0476 
0477 /* VECTOR MERGE LOW */
0478 .macro  VMRL    vr1, vr2, vr3, m4
0479     VX_NUM  v1, \vr1
0480     VX_NUM  v2, \vr2
0481     VX_NUM  v3, \vr3
0482     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0483     .word   ((v3&15) << 12)
0484     MRXBOPC \m4, 0x60, v1, v2, v3
0485 .endm
0486 .macro  VMRLB   vr1, vr2, vr3
0487     VMRL    \vr1, \vr2, \vr3, 0
0488 .endm
0489 .macro  VMRLH   vr1, vr2, vr3
0490     VMRL    \vr1, \vr2, \vr3, 1
0491 .endm
0492 .macro  VMRLF   vr1, vr2, vr3
0493     VMRL    \vr1, \vr2, \vr3, 2
0494 .endm
0495 .macro  VMRLG   vr1, vr2, vr3
0496     VMRL    \vr1, \vr2, \vr3, 3
0497 .endm
0498 
0499 
0500 /* Vector integer instructions */
0501 
0502 /* VECTOR AND */
0503 .macro  VN  vr1, vr2, vr3
0504     VX_NUM  v1, \vr1
0505     VX_NUM  v2, \vr2
0506     VX_NUM  v3, \vr3
0507     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0508     .word   ((v3&15) << 12)
0509     MRXBOPC 0, 0x68, v1, v2, v3
0510 .endm
0511 
0512 /* VECTOR EXCLUSIVE OR */
0513 .macro  VX  vr1, vr2, vr3
0514     VX_NUM  v1, \vr1
0515     VX_NUM  v2, \vr2
0516     VX_NUM  v3, \vr3
0517     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0518     .word   ((v3&15) << 12)
0519     MRXBOPC 0, 0x6D, v1, v2, v3
0520 .endm
0521 
0522 /* VECTOR GALOIS FIELD MULTIPLY SUM */
0523 .macro  VGFM    vr1, vr2, vr3, m4
0524     VX_NUM  v1, \vr1
0525     VX_NUM  v2, \vr2
0526     VX_NUM  v3, \vr3
0527     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0528     .word   ((v3&15) << 12)
0529     MRXBOPC \m4, 0xB4, v1, v2, v3
0530 .endm
0531 .macro  VGFMB   vr1, vr2, vr3
0532     VGFM    \vr1, \vr2, \vr3, 0
0533 .endm
0534 .macro  VGFMH   vr1, vr2, vr3
0535     VGFM    \vr1, \vr2, \vr3, 1
0536 .endm
0537 .macro  VGFMF   vr1, vr2, vr3
0538     VGFM    \vr1, \vr2, \vr3, 2
0539 .endm
0540 .macro  VGFMG   vr1, vr2, vr3
0541     VGFM    \vr1, \vr2, \vr3, 3
0542 .endm
0543 
0544 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
0545 .macro  VGFMA   vr1, vr2, vr3, vr4, m5
0546     VX_NUM  v1, \vr1
0547     VX_NUM  v2, \vr2
0548     VX_NUM  v3, \vr3
0549     VX_NUM  v4, \vr4
0550     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0551     .word   ((v3&15) << 12) | (\m5 << 8)
0552     MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
0553 .endm
0554 .macro  VGFMAB  vr1, vr2, vr3, vr4
0555     VGFMA   \vr1, \vr2, \vr3, \vr4, 0
0556 .endm
0557 .macro  VGFMAH  vr1, vr2, vr3, vr4
0558     VGFMA   \vr1, \vr2, \vr3, \vr4, 1
0559 .endm
0560 .macro  VGFMAF  vr1, vr2, vr3, vr4
0561     VGFMA   \vr1, \vr2, \vr3, \vr4, 2
0562 .endm
0563 .macro  VGFMAG  vr1, vr2, vr3, vr4
0564     VGFMA   \vr1, \vr2, \vr3, \vr4, 3
0565 .endm
0566 
0567 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
0568 .macro  VSRLB   vr1, vr2, vr3
0569     VX_NUM  v1, \vr1
0570     VX_NUM  v2, \vr2
0571     VX_NUM  v3, \vr3
0572     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0573     .word   ((v3&15) << 12)
0574     MRXBOPC 0, 0x7D, v1, v2, v3
0575 .endm
0576 
0577 /* VECTOR REPLICATE IMMEDIATE */
0578 .macro  VREPI   vr1, imm2, m3
0579     VX_NUM  v1, \vr1
0580     .word   0xE700 | ((v1&15) << 4)
0581     .word   \imm2
0582     MRXBOPC \m3, 0x45, v1
0583 .endm
0584 .macro  VREPIB  vr1, imm2
0585     VREPI   \vr1, \imm2, 0
0586 .endm
0587 .macro  VREPIH  vr1, imm2
0588     VREPI   \vr1, \imm2, 1
0589 .endm
0590 .macro  VREPIF  vr1, imm2
0591     VREPI   \vr1, \imm2, 2
0592 .endm
0593 .macro  VREPIG  vr1, imm2
0594     VREP    \vr1, \imm2, 3
0595 .endm
0596 
0597 /* VECTOR ADD */
0598 .macro  VA  vr1, vr2, vr3, m4
0599     VX_NUM  v1, \vr1
0600     VX_NUM  v2, \vr2
0601     VX_NUM  v3, \vr3
0602     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0603     .word   ((v3&15) << 12)
0604     MRXBOPC \m4, 0xF3, v1, v2, v3
0605 .endm
0606 .macro  VAB vr1, vr2, vr3
0607     VA  \vr1, \vr2, \vr3, 0
0608 .endm
0609 .macro  VAH vr1, vr2, vr3
0610     VA  \vr1, \vr2, \vr3, 1
0611 .endm
0612 .macro  VAF vr1, vr2, vr3
0613     VA  \vr1, \vr2, \vr3, 2
0614 .endm
0615 .macro  VAG vr1, vr2, vr3
0616     VA  \vr1, \vr2, \vr3, 3
0617 .endm
0618 .macro  VAQ vr1, vr2, vr3
0619     VA  \vr1, \vr2, \vr3, 4
0620 .endm
0621 
0622 /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
0623 .macro  VESRAV  vr1, vr2, vr3, m4
0624     VX_NUM  v1, \vr1
0625     VX_NUM  v2, \vr2
0626     VX_NUM  v3, \vr3
0627     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0628     .word   ((v3&15) << 12)
0629     MRXBOPC \m4, 0x7A, v1, v2, v3
0630 .endm
0631 
0632 .macro  VESRAVB vr1, vr2, vr3
0633     VESRAV  \vr1, \vr2, \vr3, 0
0634 .endm
0635 .macro  VESRAVH vr1, vr2, vr3
0636     VESRAV  \vr1, \vr2, \vr3, 1
0637 .endm
0638 .macro  VESRAVF vr1, vr2, vr3
0639     VESRAV  \vr1, \vr2, \vr3, 2
0640 .endm
0641 .macro  VESRAVG vr1, vr2, vr3
0642     VESRAV  \vr1, \vr2, \vr3, 3
0643 .endm
0644 
0645 /* VECTOR ELEMENT ROTATE LEFT LOGICAL */
0646 .macro  VERLL   vr1, vr3, disp, base="%r0", m4
0647     VX_NUM  v1, \vr1
0648     VX_NUM  v3, \vr3
0649     GR_NUM  b2, \base
0650     .word   0xE700 | ((v1&15) << 4) | (v3&15)
0651     .word   (b2 << 12) | (\disp)
0652     MRXBOPC \m4, 0x33, v1, v3
0653 .endm
0654 .macro  VERLLB  vr1, vr3, disp, base="%r0"
0655     VERLL   \vr1, \vr3, \disp, \base, 0
0656 .endm
0657 .macro  VERLLH  vr1, vr3, disp, base="%r0"
0658     VERLL   \vr1, \vr3, \disp, \base, 1
0659 .endm
0660 .macro  VERLLF  vr1, vr3, disp, base="%r0"
0661     VERLL   \vr1, \vr3, \disp, \base, 2
0662 .endm
0663 .macro  VERLLG  vr1, vr3, disp, base="%r0"
0664     VERLL   \vr1, \vr3, \disp, \base, 3
0665 .endm
0666 
0667 /* VECTOR SHIFT LEFT DOUBLE BY BYTE */
0668 .macro  VSLDB   vr1, vr2, vr3, imm4
0669     VX_NUM  v1, \vr1
0670     VX_NUM  v2, \vr2
0671     VX_NUM  v3, \vr3
0672     .word   0xE700 | ((v1&15) << 4) | (v2&15)
0673     .word   ((v3&15) << 12) | (\imm4)
0674     MRXBOPC 0, 0x77, v1, v2, v3
0675 .endm
0676 
0677 #endif  /* __ASSEMBLY__ */
0678 #endif  /* __ASM_S390_VX_INSN_H */