/linux/arch/x86/crypto/ |
A D | sm4-aesni-avx-asm_64.S | 37 #define RB2 %xmm14 macro 271 vpshufb RTMP2, RB2, RB2; 355 vpshufb RTMP2, RB2, RB2; 385 vmovdqa RB0, RB2; 391 vmovdqu (6 * 16)(%rdx), RB2; 407 vmovdqu RB2, (6 * 16)(%rsi); 459 vpshufb RBSWAP, RTMP0, RB2; 476 vpxor (6 * 16)(%rdx), RB2, RB2; 525 vpxor (5 * 16)(%rdx), RB2, RB2; 564 vmovdqu 5 * 16(%rdx), RB2; [all …]
|
A D | sm4-aesni-avx2-asm_64.S | 38 #define RB2 %ymm14 macro 177 vpshufb RTMP2, RB2, RB2; 267 vpshufb RTMP2, RB2, RB2; 328 vpshufb RTMP3, RTMP0, RB2; 376 vpxor (6 * 32)(%rdx), RB2, RB2; 385 vmovdqu RB2, (6 * 32)(%rsi); 415 vmovdqu (6 * 32)(%rdx), RB2; 428 vpxor (5 * 32 + 16)(%rdx), RB2, RB2; 439 vmovdqu RB2, (6 * 32)(%rsi); 486 vpxor (6 * 32)(%rdx), RB2, RB2; [all …]
|
A D | cast6-avx-x86_64-asm_64.S | 43 #define RB2 %xmm5 macro 266 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 290 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 314 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 337 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 354 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 358 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 377 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 381 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 402 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); [all …]
|
A D | twofish-avx-x86_64-asm_64.S | 43 #define RB2 %xmm5 macro 249 inpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); 268 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); 290 inpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); 308 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); 323 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 327 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); 343 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); 347 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 366 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); [all …]
|
A D | serpent-avx-x86_64-asm_64.S | 38 #define RB2 %xmm7 macro 565 read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); 602 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); 619 read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); 656 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); 669 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 673 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 687 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 691 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); 705 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); [all …]
|
A D | serpent-avx2-asm_64.S | 33 #define RB2 %ymm5 macro 565 read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); 602 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); 619 read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); 656 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); 671 load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 675 store_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 693 load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 697 store_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); 715 load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); [all …]
|
A D | serpent-sse2-x86_64-asm_64.S | 29 #define RB2 %xmm6 macro 634 read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); 676 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); 682 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); 698 read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); 736 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
|