0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #ifdef CONFIG_X86_32
0015
0016 #include <linux/raid/pq.h>
0017 #include "x86.h"
0018
0019
0020 const struct raid6_mmx_constants {
0021 u64 x1d;
0022 } raid6_mmx_constants = {
0023 0x1d1d1d1d1d1d1d1dULL,
0024 };
0025
0026 static int raid6_have_mmx(void)
0027 {
0028
0029 return boot_cpu_has(X86_FEATURE_MMX);
0030 }
0031
0032
0033
0034
0035 static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
0036 {
0037 u8 **dptr = (u8 **)ptrs;
0038 u8 *p, *q;
0039 int d, z, z0;
0040
0041 z0 = disks - 3;
0042 p = dptr[z0+1];
0043 q = dptr[z0+2];
0044
0045 kernel_fpu_begin();
0046
0047 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
0048 asm volatile("pxor %mm5,%mm5");
0049
0050 for ( d = 0 ; d < bytes ; d += 8 ) {
0051 asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d]));
0052 asm volatile("movq %mm2,%mm4");
0053 for ( z = z0-1 ; z >= 0 ; z-- ) {
0054 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
0055 asm volatile("pcmpgtb %mm4,%mm5");
0056 asm volatile("paddb %mm4,%mm4");
0057 asm volatile("pand %mm0,%mm5");
0058 asm volatile("pxor %mm5,%mm4");
0059 asm volatile("pxor %mm5,%mm5");
0060 asm volatile("pxor %mm6,%mm2");
0061 asm volatile("pxor %mm6,%mm4");
0062 }
0063 asm volatile("movq %%mm2,%0" : "=m" (p[d]));
0064 asm volatile("pxor %mm2,%mm2");
0065 asm volatile("movq %%mm4,%0" : "=m" (q[d]));
0066 asm volatile("pxor %mm4,%mm4");
0067 }
0068
0069 kernel_fpu_end();
0070 }
0071
0072 const struct raid6_calls raid6_mmxx1 = {
0073 raid6_mmx1_gen_syndrome,
0074 NULL,
0075 raid6_have_mmx,
0076 "mmxx1",
0077 0
0078 };
0079
0080
0081
0082
0083 static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
0084 {
0085 u8 **dptr = (u8 **)ptrs;
0086 u8 *p, *q;
0087 int d, z, z0;
0088
0089 z0 = disks - 3;
0090 p = dptr[z0+1];
0091 q = dptr[z0+2];
0092
0093 kernel_fpu_begin();
0094
0095 asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
0096 asm volatile("pxor %mm5,%mm5");
0097 asm volatile("pxor %mm7,%mm7");
0098
0099 for ( d = 0 ; d < bytes ; d += 16 ) {
0100 asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d]));
0101 asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8]));
0102 asm volatile("movq %mm2,%mm4");
0103 asm volatile("movq %mm3,%mm6");
0104 for ( z = z0-1 ; z >= 0 ; z-- ) {
0105 asm volatile("pcmpgtb %mm4,%mm5");
0106 asm volatile("pcmpgtb %mm6,%mm7");
0107 asm volatile("paddb %mm4,%mm4");
0108 asm volatile("paddb %mm6,%mm6");
0109 asm volatile("pand %mm0,%mm5");
0110 asm volatile("pand %mm0,%mm7");
0111 asm volatile("pxor %mm5,%mm4");
0112 asm volatile("pxor %mm7,%mm6");
0113 asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
0114 asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
0115 asm volatile("pxor %mm5,%mm2");
0116 asm volatile("pxor %mm7,%mm3");
0117 asm volatile("pxor %mm5,%mm4");
0118 asm volatile("pxor %mm7,%mm6");
0119 asm volatile("pxor %mm5,%mm5");
0120 asm volatile("pxor %mm7,%mm7");
0121 }
0122 asm volatile("movq %%mm2,%0" : "=m" (p[d]));
0123 asm volatile("movq %%mm3,%0" : "=m" (p[d+8]));
0124 asm volatile("movq %%mm4,%0" : "=m" (q[d]));
0125 asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
0126 }
0127
0128 kernel_fpu_end();
0129 }
0130
0131 const struct raid6_calls raid6_mmxx2 = {
0132 raid6_mmx2_gen_syndrome,
0133 NULL,
0134 raid6_have_mmx,
0135 "mmxx2",
0136 0
0137 };
0138
0139 #endif