正如另一个答案所说,内联汇编代码混乱且被误用。
使用intrinsic重写应该是个不错的选择,并允许您在编译时使用或不使用
-mavx
(或
-march=haswell
或
-march=znver1
或其他)以便让编译器保存一堆寄存器复制指令。
此外,还可以使编译器优化(向量)寄存器分配和何时进行加载/存储,这是编译器擅长的事情。
好吧,我无法使用您提供的测试数据。它使用了多个未在此处提供的例程,而我懒得去找它们。
尽管如此,我还是为测试数据凑合了一些东西。我的E256()返回与你的相同值。这并不意味着我100%正确(您需要进行自己的测试),但鉴于所有的异或/ aesenc 操作,如果有错误,我预计会出现。
使用intrinsics进行转换并不特别困难。大多数情况下,您只需要找到给定asm指令的等效_mm_
函数。这点非常重要,还要追踪所有您打字时将x12写成x13的地方(烦人的事情)。
请注意,虽然此代码使用名为x0-x15的变量,但那只是为了使翻译更容易。这些C变量名称与gcc在编译代码时要使用的寄存器之间没有任何关联。此外,gcc利用了大量有关SSE的知识来重新排序指令,因此输出(特别是对于-O3)与原始asm非常不同。如果您认为可以将它们进行比较以检查正确性(就像我一样),则可能会感到沮丧。
此代码包含原始例程(前缀为"old")和新例程,并从main()调用两者以查看它们是否产生相同的输出。我没有努力对内置函数进行任何更改,试图对其进行优化。一旦它工作了,我就停止了。现在,所有代码都是C代码,任何进一步的改进都留给您。
尽管如此,gcc能够优化intrinsic(这是它不能为asm做的事情)。这意味着如果您使用
-mavx2
重新编译此代码,则生成的代码会非常不同。
一些统计数据:
- E256()的原始(完全展开)代码占用了287条指令。
- 使用intrinsic构建而不使用-mavx2需要251个。
- 使用intrinsic并使用-mavx2需要196个。
我没有做任何计时,但是我认为删除100行汇编代码会有所帮助。另一方面,有时gcc的SSE优化效果不佳,因此不要做出任何假设。
希望这能帮到您。
#include <wmmintrin.h>
#include <x86intrin.h>
#include <stdio.h>
#define tos(a) #a
#define tostr(a) tos(a)
#define rev_reg_0321(j){ asm ("pshufb xmm" tostr(j)", [oldSHAVITE_REVERSE]"); }
#define replace_aes(i, j){ asm ("aesenc xmm" tostr(i)", xmm" tostr(j)""); }
__attribute__ ((aligned (16))) unsigned int oldSHAVITE_MESS[16];
__attribute__ ((aligned (16))) unsigned char oldSHAVITE_PTXT[8*4];
__attribute__ ((aligned (16))) unsigned int oldSHAVITE_CNTS[4] = {0,0,0,0};
__attribute__ ((aligned (16))) unsigned int oldSHAVITE_REVERSE[4] = {0x07060504, 0x0b0a0908, 0x0f0e0d0c, 0x03020100 };
__attribute__ ((aligned (16))) unsigned int oldSHAVITE256_XOR2[4] = {0x0, 0xFFFFFFFF, 0x0, 0x0};
__attribute__ ((aligned (16))) unsigned int oldSHAVITE256_XOR3[4] = {0x0, 0x0, 0xFFFFFFFF, 0x0};
__attribute__ ((aligned (16))) unsigned int oldSHAVITE256_XOR4[4] = {0x0, 0x0, 0x0, 0xFFFFFFFF};
#define oldmixing() do {\
asm("movaps xmm11, xmm15");\
asm("movaps xmm10, xmm14");\
asm("movaps xmm9, xmm13");\
asm("movaps xmm8, xmm12");\
\
asm("movaps xmm6, xmm11");\
asm("psrldq xmm6, 4");\
asm("pxor xmm8, xmm6");\
asm("movaps xmm6, xmm8");\
asm("pslldq xmm6, 12");\
asm("pxor xmm8, xmm6");\
\
asm("movaps xmm7, xmm8");\
asm("psrldq xmm7, 4");\
asm("pxor xmm9, xmm7");\
asm("movaps xmm7, xmm9");\
asm("pslldq xmm7, 12");\
asm("pxor xmm9, xmm7");\
\
asm("movaps xmm6, xmm9");\
asm("psrldq xmm6, 4");\
asm("pxor xmm10, xmm6");\
asm("movaps xmm6, xmm10");\
asm("pslldq xmm6, 12");\
asm("pxor xmm10, xmm6");\
\
asm("movaps xmm7, xmm10");\
asm("psrldq xmm7, 4");\
asm("pxor xmm11, xmm7");\
asm("movaps xmm7, xmm11");\
asm("pslldq xmm7, 12");\
asm("pxor xmm11, xmm7");\
} while(0);
void oldE256()
{
asm (".intel_syntax noprefix");
asm ("movaps xmm0, [oldSHAVITE_PTXT]");
asm ("movaps xmm1, [oldSHAVITE_PTXT+16]");
asm ("movaps xmm3, [oldSHAVITE_CNTS]");
asm ("movaps xmm4, [oldSHAVITE256_XOR2]");
asm ("pxor xmm2, xmm2");
asm ("movaps xmm8, [oldSHAVITE_MESS]");
asm ("movaps xmm9, [oldSHAVITE_MESS+16]");
asm ("movaps xmm10, [oldSHAVITE_MESS+32]");
asm ("movaps xmm11, [oldSHAVITE_MESS+48]");
asm ("movaps xmm12, xmm8");
asm ("movaps xmm13, xmm9");
asm ("movaps xmm14, xmm10");
asm ("movaps xmm15, xmm11");
rev_reg_0321(12);
rev_reg_0321(13);
rev_reg_0321(14);
rev_reg_0321(15);
replace_aes(12, 2);
replace_aes(13, 2);
replace_aes(14, 2);
replace_aes(15, 2);
asm ("pxor xmm12, xmm3");
asm ("pxor xmm12, xmm4");
asm ("movaps xmm4, [oldSHAVITE256_XOR3]");
asm ("pxor xmm12, xmm11");
asm ("pxor xmm13, xmm12");
asm ("pxor xmm14, xmm13");
asm ("pxor xmm15, xmm14");
asm ("movaps xmm6, xmm8");
asm ("pxor xmm8, xmm1");
replace_aes(8, 9);
replace_aes(8, 10);
replace_aes(8, 2);
asm ("pxor xmm0, xmm8");
asm ("movaps xmm8, xmm6");
asm ("movaps xmm6, xmm11");
asm ("pxor xmm11, xmm0");
replace_aes(11, 12);
replace_aes(11, 13);
replace_aes(11, 2);
asm ("pxor xmm1, xmm11");
asm ("movaps xmm11, xmm6");
oldmixing();
asm ("movaps xmm6, xmm14");
asm ("pxor xmm14, xmm1");
replace_aes(14, 15);
replace_aes(14, 8);
replace_aes(14, 2);
asm ("pxor xmm0, xmm14");
asm ("movaps xmm14, xmm6");
asm ("pshufd xmm3, xmm3,135");
asm ("movaps xmm12, xmm8");
asm ("movaps xmm13, xmm9");
asm ("movaps xmm14, xmm10");
asm ("movaps xmm15, xmm11");
rev_reg_0321(12);
rev_reg_0321(13);
rev_reg_0321(14);
rev_reg_0321(15);
replace_aes(12, 2);
replace_aes(13, 2);
replace_aes(14, 2);
replace_aes(15, 2);
asm ("pxor xmm12, xmm11");
asm ("pxor xmm14, xmm3");
asm ("pxor xmm14, xmm4");
asm ("movaps xmm4, [oldSHAVITE256_XOR4]");
asm ("pxor xmm13, xmm12");
asm ("pxor xmm14, xmm13");
asm ("pxor xmm15, xmm14");
asm ("movaps xmm6, xmm9");
asm ("pxor xmm9, xmm0");
replace_aes(9, 10);
replace_aes(9, 11);
replace_aes(9, 2);
asm ("pxor xmm1, xmm9");
asm ("movaps xmm9, xmm6");
oldmixing();
asm ("movaps xmm6, xmm12");
asm ("pxor xmm12, xmm1");
replace_aes(12, 13);
replace_aes(12, 14);
replace_aes(12, 2);
asm ("pxor xmm0, xmm12");
asm ("movaps xmm12, xmm6");
asm ("movaps xmm6, xmm15");
asm ("pxor xmm15, xmm0");
replace_aes(15, 8);
replace_aes(15, 9);
replace_aes(15, 2);
asm ("pxor xmm1, xmm15");
asm ("movaps xmm15, xmm6");
asm ("pshufd xmm3, xmm3, 147");
asm ("movaps xmm12, xmm8");
asm ("movaps xmm13, xmm9");
asm ("movaps xmm14, xmm10");
asm ("movaps xmm15, xmm11");
rev_reg_0321(12);
rev_reg_0321(13);
rev_reg_0321(14);
rev_reg_0321(15);
replace_aes(12, 2);
replace_aes(13, 2);
replace_aes(14, 2);
replace_aes(15, 2);
asm ("pxor xmm12, xmm11");
asm ("pxor xmm13, xmm3");
asm ("pxor xmm13, xmm4");
asm ("pxor xmm13, xmm12");
asm ("pxor xmm14, xmm13");
asm ("pxor xmm15, xmm14");
asm ("movaps xmm6, xmm10");
asm ("pxor xmm10, xmm1");
replace_aes(10, 11);
replace_aes(10, 12);
replace_aes(10, 2);
asm ("pxor xmm0, xmm10");
asm ("movaps xmm10, xmm6");
oldmixing();
asm ("movaps xmm6, xmm13");
asm ("pxor xmm13, xmm0");
replace_aes(13, 14);
replace_aes(13, 15);
replace_aes(13, 2);
asm ("pxor xmm1, xmm13");
asm ("movaps xmm13, xmm6");
asm ("pshufd xmm3, xmm3, 135");
asm ("movaps xmm12, xmm8");
asm ("movaps xmm13, xmm9");
asm ("movaps xmm14, xmm10");
asm ("movaps xmm15, xmm11");
rev_reg_0321(12);
rev_reg_0321(13);
rev_reg_0321(14);
rev_reg_0321(15);
replace_aes(12, 2);
replace_aes(13, 2);
replace_aes(14, 2);
replace_aes(15, 2);
asm ("pxor xmm12, xmm11");
asm ("pxor xmm15, xmm3");
asm ("pxor xmm15, xmm4");
asm ("pxor xmm13, xmm12");
asm ("pxor xmm14, xmm13");
asm ("pxor xmm15, xmm14");
asm ("movaps xmm6, xmm8");
asm ("pxor xmm8, xmm1");
replace_aes(8, 9);
replace_aes(8, 10);
replace_aes(8, 2);
asm ("pxor xmm0, xmm8");
asm ("movaps xmm8, xmm6");
asm ("movaps xmm6, xmm11");
asm ("pxor xmm11, xmm0");
replace_aes(11, 12);
replace_aes(11, 13);
replace_aes(11, 2);
asm ("pxor xmm1, xmm11");
asm ("movaps xmm11, xmm6");
oldmixing();
asm ("movaps xmm6, xmm14");
asm ("pxor xmm14, xmm1");
replace_aes(14, 15);
replace_aes(14, 8);
replace_aes(14, 2);
asm ("pxor xmm0, xmm14");
asm ("movaps xmm14, xmm6");
asm ("movaps xmm6, xmm9");
asm ("pxor xmm9, xmm0");
replace_aes(9, 10);
replace_aes(9, 11);
replace_aes(9, 2);
asm ("pxor xmm1, xmm9");
asm ("movaps xmm9, xmm6");
asm ("pxor xmm0, [oldSHAVITE_PTXT]");
asm ("pxor xmm1, [oldSHAVITE_PTXT+16]");
asm ("movaps [oldSHAVITE_PTXT], xmm0");
asm ("movaps [oldSHAVITE_PTXT+16], xmm1");
asm (".att_syntax noprefix");
return;
}
void oldCompress256(const unsigned char *message_block, unsigned char *chaining_value, unsigned long long counter,
const unsigned char salt[32])
{
int i, j;
for (i=0;i<8*4;i++)
oldSHAVITE_PTXT[i]=chaining_value[i];
for (i=0;i<16;i++)
oldSHAVITE_MESS[i] = *((unsigned int*)(message_block+4*i));
oldSHAVITE_CNTS[0] = (unsigned int)(counter & 0xFFFFFFFFULL);
oldSHAVITE_CNTS[1] = (unsigned int)(counter>>32);
oldE256();
for (i=0; i<4*8; i++)
chaining_value[i]=oldSHAVITE_PTXT[i];
return;
}
__attribute__ ((aligned (16))) unsigned int SHAVITE_MESS[16];
__attribute__ ((aligned (16))) unsigned char SHAVITE_PTXT[8*4];
__attribute__ ((aligned (16))) unsigned int SHAVITE_CNTS[4] = {0,0,0,0};
__attribute__ ((aligned (16))) unsigned int SHAVITE_REVERSE[4] = {0x07060504, 0x0b0a0908, 0x0f0e0d0c, 0x03020100 };
__attribute__ ((aligned (16))) unsigned int SHAVITE256_XOR2[4] = {0x0, 0xFFFFFFFF, 0x0, 0x0};
__attribute__ ((aligned (16))) unsigned int SHAVITE256_XOR3[4] = {0x0, 0x0, 0xFFFFFFFF, 0x0};
__attribute__ ((aligned (16))) unsigned int SHAVITE256_XOR4[4] = {0x0, 0x0, 0x0, 0xFFFFFFFF};
#define mixing() do {\
x11 = x15; \
x10 = x14; \
x9 = x13;\
x8 = x12;\
\
x6 = x11;\
x6 = _mm_srli_si128(x6, 4);\
x8 = _mm_xor_si128(x8, x6);\
x6 = x8;\
x6 = _mm_slli_si128(x6, 12);\
x8 = _mm_xor_si128(x8, x6);\
\
x7 = x8;\
x7 = _mm_srli_si128(x7, 4);\
x9 = _mm_xor_si128(x9, x7);\
x7 = x9;\
x7 = _mm_slli_si128(x7, 12);\
x9 = _mm_xor_si128(x9, x7);\
\
x6 = x9;\
x6 = _mm_srli_si128(x6, 4);\
x10 = _mm_xor_si128(x10, x6);\
x6 = x10;\
x6 = _mm_slli_si128(x6, 12);\
x10 = _mm_xor_si128(x10, x6);\
\
x7 = x10;\
x7 = _mm_srli_si128(x7, 4);\
x11 = _mm_xor_si128(x11, x7);\
x7 = x11;\
x7 = _mm_slli_si128(x7, 12);\
x11 = _mm_xor_si128(x11, x7);\
} while(0);
void E256()
{
__m128i x0;
__m128i x1;
__m128i x2;
__m128i x3;
__m128i x4;
__m128i x5;
__m128i x6;
__m128i x7;
__m128i x8;
__m128i x9;
__m128i x10;
__m128i x11;
__m128i x12;
__m128i x13;
__m128i x14;
__m128i x15;
const __m128i ptxt1 = _mm_loadu_si128((const __m128i*)SHAVITE_PTXT);
const __m128i ptxt2 = _mm_loadu_si128((const __m128i*)(SHAVITE_PTXT+16));
x0 = ptxt1;
x1 = ptxt2;
x3 = _mm_loadu_si128((__m128i*)SHAVITE_CNTS);
x4 = _mm_loadu_si128((__m128i*)SHAVITE256_XOR2);
x2 = _mm_setzero_si128();
x8 = _mm_loadu_si128((__m128i*)SHAVITE_MESS);
x9 = _mm_loadu_si128((__m128i*)(SHAVITE_MESS+4));
x10 = _mm_loadu_si128((__m128i*)(SHAVITE_MESS+8));
x11 = _mm_loadu_si128((__m128i*)(SHAVITE_MESS+12));
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
const __m128i xtemp = _mm_loadu_si128((__m128i*)SHAVITE_REVERSE);
x12 = _mm_shuffle_epi8(x12, xtemp);
x13 = _mm_shuffle_epi8(x13, xtemp);
x14 = _mm_shuffle_epi8(x14, xtemp);
x15 = _mm_shuffle_epi8(x15, xtemp);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x3);
x12 = _mm_xor_si128(x12, x4);
x4 = _mm_loadu_si128((__m128i*)SHAVITE256_XOR3);
x12 = _mm_xor_si128(x12, x11);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
x6 = x8;
x8 = _mm_xor_si128(x8, x1);
x8 = _mm_aesenc_si128(x8, x9);
x8 = _mm_aesenc_si128(x8, x10);
x8 = _mm_aesenc_si128(x8, x2);
x0 = _mm_xor_si128(x0, x8);
x8 = x6;
x6 = x11;
x11 = _mm_xor_si128(x11, x0);
x11 = _mm_aesenc_si128(x11, x12);
x11 = _mm_aesenc_si128(x11, x13);
x11 = _mm_aesenc_si128(x11, x2);
x1 = _mm_xor_si128(x1, x11);
x11 = x6;
mixing();
x6 = x14;
x14 = _mm_xor_si128(x14, x1);
x14 = _mm_aesenc_si128(x14, x15);
x14 = _mm_aesenc_si128(x14, x8);
x14 = _mm_aesenc_si128(x14, x2);
x0 = _mm_xor_si128(x0, x14);
x14 = x6;
x3 = _mm_shuffle_epi32(x3, 135);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, xtemp);
x13 = _mm_shuffle_epi8(x13, xtemp);
x14 = _mm_shuffle_epi8(x14, xtemp);
x15 = _mm_shuffle_epi8(x15, xtemp);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x14 = _mm_xor_si128(x14, x3);
x14 = _mm_xor_si128(x14, x4);
x4 = _mm_loadu_si128((__m128i*)SHAVITE256_XOR4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
x6 = x9;
x9 = _mm_xor_si128(x9, x0);
x9 = _mm_aesenc_si128(x9, x10);
x9 = _mm_aesenc_si128(x9, x11);
x9 = _mm_aesenc_si128(x9, x2);
x1 = _mm_xor_si128(x1, x9);
x9 = x6;
mixing();
x6 = x12;
x12 = _mm_xor_si128(x12, x1);
x12 = _mm_aesenc_si128(x12, x13);
x12 = _mm_aesenc_si128(x12, x14);
x12 = _mm_aesenc_si128(x12, x2);
x0 = _mm_xor_si128(x0, x12);
x12 = x6;
x6 = x15;
x15 = _mm_xor_si128(x15, x0);
x15 = _mm_aesenc_si128(x15, x8);
x15 = _mm_aesenc_si128(x15, x9);
x15 = _mm_aesenc_si128(x15, x2);
x1 = _mm_xor_si128(x1, x15);
x15 = x6;
x3 = _mm_shuffle_epi32(x3, 147);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, xtemp);
x13 = _mm_shuffle_epi8(x13, xtemp);
x14 = _mm_shuffle_epi8(x14, xtemp);
x15 = _mm_shuffle_epi8(x15, xtemp);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x13 = _mm_xor_si128(x13, x3);
x13 = _mm_xor_si128(x13, x4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
x6 = x10;
x10 = _mm_xor_si128(x10, x1);
x10 = _mm_aesenc_si128(x10, x11);
x10 = _mm_aesenc_si128(x10, x12);
x10 = _mm_aesenc_si128(x10, x2);
x0 = _mm_xor_si128(x0, x10);
x10 = x6;
mixing();
x6 = x13;
x13 = _mm_xor_si128(x13, x0);
x13 = _mm_aesenc_si128(x13, x14);
x13 = _mm_aesenc_si128(x13, x15);
x13 = _mm_aesenc_si128(x13, x2);
x1 = _mm_xor_si128(x1, x13);
x13 = x6;
x3 = _mm_shuffle_epi32(x3, 135);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, xtemp);
x13 = _mm_shuffle_epi8(x13, xtemp);
x14 = _mm_shuffle_epi8(x14, xtemp);
x15 = _mm_shuffle_epi8(x15, xtemp);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x15 = _mm_xor_si128(x15, x3);
x15 = _mm_xor_si128(x15, x4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
x6 = x8;
x8 = _mm_xor_si128(x8, x1);
x8 = _mm_aesenc_si128(x8, x9);
x8 = _mm_aesenc_si128(x8, x10);
x8 = _mm_aesenc_si128(x8, x2);
x0 = _mm_xor_si128(x0, x8);
x8 = x6;
x6 = x11;
x11 = _mm_xor_si128(x11, x0);
x11 = _mm_aesenc_si128(x11, x12);
x11 = _mm_aesenc_si128(x11, x13);
x11 = _mm_aesenc_si128(x11, x2);
x1 = _mm_xor_si128(x1, x11);
x11 = x6;
mixing();
x6 = x14;
x14 = _mm_xor_si128(x14, x1);
x14 = _mm_aesenc_si128(x14, x15);
x14 = _mm_aesenc_si128(x14, x8);
x14 = _mm_aesenc_si128(x14, x2);
x0 = _mm_xor_si128(x0, x14);
x14 = x6;
x6 = x9;
x9 = _mm_xor_si128(x9, x0);
x9 = _mm_aesenc_si128(x9, x10);
x9 = _mm_aesenc_si128(x9, x11);
x9 = _mm_aesenc_si128(x9, x2);
x1 = _mm_xor_si128(x1, x9);
x9 = x6;
x0 = _mm_xor_si128(x0, ptxt1);
x1 = _mm_xor_si128(x1, ptxt2);
_mm_storeu_si128((__m128i *)SHAVITE_PTXT, x0);
_mm_storeu_si128((__m128i *)(SHAVITE_PTXT + 16), x1);
return;
}
void Compress256(const unsigned char *message_block, unsigned char *chaining_value, unsigned long long counter,
const unsigned char salt[32])
{
int i, j;
for (i=0;i<8*4;i++)
SHAVITE_PTXT[i]=chaining_value[i];
for (i=0;i<16;i++)
SHAVITE_MESS[i] = *((unsigned int*)(message_block+4*i));
SHAVITE_CNTS[0] = (unsigned int)(counter & 0xFFFFFFFFULL);
SHAVITE_CNTS[1] = (unsigned int)(counter>>32);
E256();
for (i=0; i<4*8; i++)
chaining_value[i]=SHAVITE_PTXT[i];
return;
}
int main(int argc, char *argv[])
{
const int cvlen = 32;
unsigned char *cv = (unsigned char *)malloc(cvlen);
for (int x=0; x < cvlen; x++)
cv[x] = x + argc;
const int mblen = 64;
unsigned char *mb = (unsigned char *)malloc(mblen);
for (int x=0; x < mblen; x++)
mb[x] = x + argc;
unsigned long long counter = 0x1234567812345678ull;
unsigned char s[32] = {0};
oldCompress256(mb, cv, counter, s);
printf("old: ");
for (int x=0; x < cvlen; x++)
printf("%2x ", cv[x]);
printf("\n");
for (int x=0; x < cvlen; x++)
cv[x] = x + argc;
Compress256(mb, cv, counter, s);
printf("new: ");
for (int x=0; x < cvlen; x++)
printf("%2x ", cv[x]);
printf("\n");
}
编辑:
全局变量仅用于在C和asm之间传递值。也许汇编程序员不知道如何访问参数?无论如何,它们是不必要的(并且是线程安全问题的源头)。以下是没有这些变量的代码(以及一些外观上的更改):
最初的回答:
#include <x86intrin.h>
#include <stdio.h>
#include <time.h>
#define mixing() \
x11 = x15;\
x10 = x14;\
x9 = x13;\
x8 = x12;\
\
x6 = x11;\
x6 = _mm_srli_si128(x6, 4);\
x8 = _mm_xor_si128(x8, x6);\
x6 = x8;\
x6 = _mm_slli_si128(x6, 12);\
x8 = _mm_xor_si128(x8, x6);\
\
x7 = x8;\
x7 = _mm_srli_si128(x7, 4);\
x9 = _mm_xor_si128(x9, x7);\
x7 = x9;\
x7 = _mm_slli_si128(x7, 12);\
x9 = _mm_xor_si128(x9, x7);\
\
x6 = x9;\
x6 = _mm_srli_si128(x6, 4);\
x10 = _mm_xor_si128(x10, x6);\
x6 = x10;\
x6 = _mm_slli_si128(x6, 12);\
x10 = _mm_xor_si128(x10, x6);\
\
x7 = x10;\
x7 = _mm_srli_si128(x7, 4);\
x11 = _mm_xor_si128(x11, x7);\
x7 = x11;\
x7 = _mm_slli_si128(x7, 12);\
x11 = _mm_xor_si128(x11, x7);
void Compress256(const __m128i *mess, __m128i *chain, unsigned long long counter, const unsigned char salt[32])
{
const __m128i SHAVITE_REVERSE = _mm_set_epi32(0x03020100, 0x0f0e0d0c, 0x0b0a0908, 0x07060504);
const __m128i SHAVITE256_XOR2 = _mm_set_epi32(0x0, 0x0, 0xFFFFFFFF, 0x0);
const __m128i SHAVITE256_XOR3 = _mm_set_epi32(0x0, 0xFFFFFFFF, 0x0, 0x0);
const __m128i SHAVITE256_XOR4 = _mm_set_epi32(0xFFFFFFFF, 0x0, 0x0, 0x0);
const __m128i SHAVITE_CNTS =
_mm_set_epi32(0, 0, (unsigned int)(counter>>32), (unsigned int)(counter & 0xFFFFFFFFULL));
__m128i x0, x1, x2, x3, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
const __m128i ptxt1 = _mm_load_si128(chain);
const __m128i ptxt2 = _mm_load_si128(chain+1);
x0 = ptxt1;
x1 = ptxt2;
x3 = SHAVITE_CNTS;
x2 = _mm_setzero_si128();
x8 = _mm_load_si128(mess);
x9 = _mm_load_si128(mess+1);
x10 = _mm_load_si128(mess+2);
x11 = _mm_load_si128(mess+3);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, SHAVITE_REVERSE);
x13 = _mm_shuffle_epi8(x13, SHAVITE_REVERSE);
x14 = _mm_shuffle_epi8(x14, SHAVITE_REVERSE);
x15 = _mm_shuffle_epi8(x15, SHAVITE_REVERSE);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x3);
x12 = _mm_xor_si128(x12, SHAVITE256_XOR2);
x12 = _mm_xor_si128(x12, x11);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
x6 = x8;
x8 = _mm_xor_si128(x8, x1);
x8 = _mm_aesenc_si128(x8, x9);
x8 = _mm_aesenc_si128(x8, x10);
x8 = _mm_aesenc_si128(x8, x2);
x0 = _mm_xor_si128(x0, x8);
x8 = x6;
x6 = x11;
x11 = _mm_xor_si128(x11, x0);
x11 = _mm_aesenc_si128(x11, x12);
x11 = _mm_aesenc_si128(x11, x13);
x11 = _mm_aesenc_si128(x11, x2);
x1 = _mm_xor_si128(x1, x11);
x11 = x6;
mixing();
x6 = x14;
x14 = _mm_xor_si128(x14, x1);
x14 = _mm_aesenc_si128(x14, x15);
x14 = _mm_aesenc_si128(x14, x8);
x14 = _mm_aesenc_si128(x14, x2);
x0 = _mm_xor_si128(x0, x14);
x14 = x6;
x3 = _mm_shuffle_epi32(x3, 135);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, SHAVITE_REVERSE);
x13 = _mm_shuffle_epi8(x13, SHAVITE_REVERSE);
x14 = _mm_shuffle_epi8(x14, SHAVITE_REVERSE);
x15 = _mm_shuffle_epi8(x15, SHAVITE_REVERSE);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x14 = _mm_xor_si128(x14, x3);
x14 = _mm_xor_si128(x14, SHAVITE256_XOR3);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
x6 = x9;
x9 = _mm_xor_si128(x9, x0);
x9 = _mm_aesenc_si128(x9, x10);
x9 = _mm_aesenc_si128(x9, x11);
x9 = _mm_aesenc_si128(x9, x2);
x1 = _mm_xor_si128(x1, x9);
x9 = x6;
mixing();
x6 = x12;
x12 = _mm_xor_si128(x12, x1);
x12 = _mm_aesenc_si128(x12, x13);
x12 = _mm_aesenc_si128(x12, x14);
x12 = _mm_aesenc_si128(x12, x2);
x0 = _mm_xor_si128(x0, x12);
x12 = x6;
x6 = x15;
x15 = _mm_xor_si128(x15, x0);
x15 = _mm_aesenc_si128(x15, x8);
x15 = _mm_aesenc_si128(x15, x9);
x15 = _mm_aesenc_si128(x15, x2);
x1 = _mm_xor_si128(x1, x15);
x15 = x6;
x3 = _mm_shuffle_epi32(x3, 147);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, SHAVITE_REVERSE);
x13 = _mm_shuffle_epi8(x13, SHAVITE_REVERSE);
x14 = _mm_shuffle_epi8(x14, SHAVITE_REVERSE);
x15 = _mm_shuffle_epi8(x15, SHAVITE_REVERSE);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x13 = _mm_xor_si128(x13, x3);
x13 = _mm_xor_si128(x13, SHAVITE256_XOR4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
x6 = x10;
x10 = _mm_xor_si128(x10, x1);
x10 = _mm_aesenc_si128(x10, x11);
x10 = _mm_aesenc_si128(x10, x12);
x10 = _mm_aesenc_si128(x10, x2);
x0 = _mm_xor_si128(x0, x10);
x10 = x6;
mixing();
x6 = x13;
x13 = _mm_xor_si128(x13, x0);
x13 = _mm_aesenc_si128(x13, x14);
x13 = _mm_aesenc_si128(x13, x15);
x13 = _mm_aesenc_si128(x13, x2);
x1 = _mm_xor_si128(x1, x13);
x13 = x6;
x3 = _mm_shuffle_epi32(x3, 135);
x12 = x8;
x13 = x9;
x14 = x10;
x15 = x11;
x12 = _mm_shuffle_epi8(x12, SHAVITE_REVERSE);
x13 = _mm_shuffle_epi8(x13, SHAVITE_REVERSE);
x14 = _mm_shuffle_epi8(x14, SHAVITE_REVERSE);
x15 = _mm_shuffle_epi8(x15, SHAVITE_REVERSE);
x12 = _mm_aesenc_si128(x12, x2);
x13 = _mm_aesenc_si128(x13, x2);
x14 = _mm_aesenc_si128(x14, x2);
x15 = _mm_aesenc_si128(x15, x2);
x12 = _mm_xor_si128(x12, x11);
x15 = _mm_xor_si128(x15, x3);
x15 = _mm_xor_si128(x15, SHAVITE256_XOR4);
x13 = _mm_xor_si128(x13, x12);
x14 = _mm_xor_si128(x14, x13);
x15 = _mm_xor_si128(x15, x14);
x6 = x8;
x8 = _mm_xor_si128(x8, x1);
x8 = _mm_aesenc_si128(x8, x9);
x8 = _mm_aesenc_si128(x8, x10);
x8 = _mm_aesenc_si128(x8, x2);
x0 = _mm_xor_si128(x0, x8);
x8 = x6;
x6 = x11;
x11 = _mm_xor_si128(x11, x0);
x11 = _mm_aesenc_si128(x11, x12);
x11 = _mm_aesenc_si128(x11, x13);
x11 = _mm_aesenc_si128(x11, x2);
x1 = _mm_xor_si128(x1, x11);
x11 = x6;
mixing();
x6 = x14;
x14 = _mm_xor_si128(x14, x1);
x14 = _mm_aesenc_si128(x14, x15);
x14 = _mm_aesenc_si128(x14, x8);
x14 = _mm_aesenc_si128(x14, x2);
x0 = _mm_xor_si128(x0, x14);
x14 = x6;
x6 = x9;
x9 = _mm_xor_si128(x9, x0);
x9 = _mm_aesenc_si128(x9, x10);
x9 = _mm_aesenc_si128(x9, x11);
x9 = _mm_aesenc_si128(x9, x2);
x1 = _mm_xor_si128(x1, x9);
x9 = x6;
x0 = _mm_xor_si128(x0, ptxt1);
x1 = _mm_xor_si128(x1, ptxt2);
_mm_store_si128(chain, x0);
_mm_store_si128(chain + 1, x1);
}
int main(int argc, char *argv[])
{
__m128i chain[2], mess[4];
unsigned char *p;
p = (unsigned char *)mess;
for (int x=0; x < 64; x++)
p[x] = x + argc;
p = (unsigned char *)chain;
for (int x=0; x < 32; x++)
p[x] = x + argc;
unsigned long long counter = 0x1234567812345678ull + argc;
unsigned char s[32] = {0};
Compress256(mess, chain, counter, s);
for (int x=0; x < 32; x++)
printf("%02x ", p[x]);
printf("\n");
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
unsigned char res = 0;
for (int x=0; x < 400000; x++)
{
Compress256(mess, chain, counter, s);
res ^= *p;
}
clock_gettime(CLOCK_MONOTONIC, &end);
unsigned long long delta_us = (end.tv_sec - start.tv_sec) * 1000000ull + (end.tv_nsec - start.tv_nsec) / 1000ull;
printf("%x: %llu\n", res, delta_us);
}
asm
语句之间保留其内容),而且可能有更好的解决方案来实现您想要的功能。 - fuz