/sysdeps/x86_64/ |
A D | dl-trampoline.S | 56 #define VMOVA vmovdqa64 macro 62 #undef VMOVA 66 #define VMOVA vmovdqa macro 72 #undef VMOVA 77 #define VMOVA movaps macro 84 #undef VMOVA
|
A D | dl-trampoline.h | 233 VMOVA %VEC(0), (LR_VECTOR_OFFSET)(%rsp) 234 VMOVA %VEC(1), (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp) 235 VMOVA %VEC(2), (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp) 236 VMOVA %VEC(3), (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp) 237 VMOVA %VEC(4), (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp) 238 VMOVA %VEC(5), (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp) 286 2: VMOVA (LR_VECTOR_OFFSET)(%rsp), %VEC(0) 436 VMOVA %VEC(0), LRV_VECTOR0_OFFSET(%rcx) 437 VMOVA %VEC(1), LRV_VECTOR1_OFFSET(%rcx) 466 VMOVA LRV_VECTOR0_OFFSET(%rsp), %VEC(0) [all …]
|
A D | memset.S | 29 #define VMOVA movaps macro
|
A D | memmove.S | 27 #define VMOVA movaps macro
|
/sysdeps/x86_64/multiarch/ |
A D | strchr-evex.S | 28 # define VMOVA vmovdqa64 macro 193 VMOVA (VEC_SIZE)(%rdi), %YMM1 205 VMOVA (VEC_SIZE * 2)(%rdi), %YMM1 213 VMOVA (VEC_SIZE * 3)(%rdi), %YMM1 223 VMOVA (VEC_SIZE * 4)(%rdi), %YMM1 239 VMOVA (VEC_SIZE * 4)(%rdi), %YMM1 240 VMOVA (VEC_SIZE * 5)(%rdi), %YMM2 241 VMOVA (VEC_SIZE * 6)(%rdi), %YMM3 242 VMOVA (VEC_SIZE * 7)(%rdi), %YMM4 344 VMOVA (%rdi), %YMM1
|
A D | memmove-avx-unaligned-erms.S | 6 # define VMOVA vmovdqa macro
|
A D | strrchr-evex.S | 28 # define VMOVA vmovdqa64 macro 105 VMOVA (%rdi), %YMM1 138 VMOVA (%rdi), %YMM1 150 VMOVA (%rdi), %YMM1 162 VMOVA (%rdi), %YMM1 174 VMOVA (%rdi), %YMM1
|
A D | strcpy-evex.S | 31 # define VMOVA vmovdqa64 macro 120 VMOVA (%rsi, %rcx), %YMM2 122 VMOVA VEC_SIZE(%rsi, %rcx), %YMM2 227 VMOVA (%rsi), %YMM4 228 VMOVA VEC_SIZE(%rsi), %YMM5 248 VMOVA (%rsi), %YMM4 250 VMOVA VEC_SIZE(%rsi), %YMM5 843 VMOVA %YMMZERO, (%rdi) 854 VMOVA %YMMZERO, (%rdi) 859 VMOVA %YMMZERO, (%rdi) [all …]
|
A D | memmove-avx-unaligned-erms-rtm.S | 6 # define VMOVA vmovdqa macro
|
A D | memmove-vec-unaligned-erms.S | 540 VMOVA %VEC(1), (%rdi) 541 VMOVA %VEC(2), VEC_SIZE(%rdi) 542 VMOVA %VEC(3), (VEC_SIZE * 2)(%rdi) 543 VMOVA %VEC(4), (VEC_SIZE * 3)(%rdi) 592 VMOVA %VEC(1), (VEC_SIZE * 3)(%rcx) 593 VMOVA %VEC(2), (VEC_SIZE * 2)(%rcx) 594 VMOVA %VEC(3), (VEC_SIZE * 1)(%rcx) 830 VMOVA %VEC(0), (%rdi) 831 VMOVA %VEC(1), VEC_SIZE(%rdi) 904 VMOVA %VEC(0), (%rdi) [all …]
|
A D | memset-avx2-unaligned-erms.S | 11 # define VMOVA vmovdqa macro
|
A D | memset-avx512-unaligned-erms.S | 14 # define VMOVA vmovdqa64 macro
|
A D | memset-evex-unaligned-erms.S | 14 # define VMOVA vmovdqa64 macro
|
A D | memmove-avx512-unaligned-erms.S | 26 # define VMOVA vmovdqa64 macro
|
A D | memmove-evex-unaligned-erms.S | 26 # define VMOVA vmovdqa64 macro
|
A D | memset-vec-unaligned-erms.S | 322 VMOVA %VEC(0), LOOP_4X_OFFSET(%LOOP_REG) 323 VMOVA %VEC(0), (VEC_SIZE + LOOP_4X_OFFSET)(%LOOP_REG) 324 VMOVA %VEC(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%LOOP_REG) 325 VMOVA %VEC(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%LOOP_REG)
|
A D | strlen-evex.S | 27 # define VMOVA vmovdqa64 macro 243 VMOVA (VEC_SIZE * 4)(%rdi), %YMM1 254 VMOVA (VEC_SIZE * 6)(%rdi), %YMM3 292 VMOVA (VEC_SIZE * 4)(%rdi), %YMM1
|
A D | strcat-evex.S | 28 # define VMOVA vmovdqa64 macro 196 VMOVA (%rax), %YMM0 197 VMOVA (VEC_SIZE * 2)(%rax), %YMM1
|
A D | strcmp-evex.S | 39 # define VMOVA vmovdqa64 macro 361 VMOVA (%rax), %YMM0 362 VMOVA VEC_SIZE(%rax), %YMM2 363 VMOVA (VEC_SIZE * 2)(%rax), %YMM4 364 VMOVA (VEC_SIZE * 3)(%rax), %YMM6
|
A D | memrchr-evex.S | 23 # define VMOVA vmovdqa64 macro
|