/sysdeps/aarch64/ |
A D | strchrnul.S | 34 #define tmp1 x1 macro 68 fmov tmp1, dend 69 lsr tmp1, tmp1, tmp2 /* Mask padding bits. */ 70 cbz tmp1, L(loop) 72 rbit tmp1, tmp1 73 clz tmp1, tmp1 83 fmov tmp1, dend 84 cbz tmp1, L(loop) 88 fmov tmp1, dend 90 rbit tmp1, tmp1 [all …]
|
A D | strchr.S | 34 #define tmp1 x1 macro 75 fmov tmp1, dend 76 lsr tmp1, tmp1, tmp3 79 rbit tmp1, tmp1 80 clz tmp1, tmp1 83 tst tmp1, 2 94 fmov tmp1, dend 101 fmov tmp1, dend 107 rbit tmp1, tmp1 109 clz tmp1, tmp1 [all …]
|
A D | strncmp.S | 43 #define tmp1 x8 macro 74 eor tmp1, src1, src2 76 tst tmp1, #7 90 sub tmp1, data1, zeroones 122 add tmp1, limit, 8 149 sub tmp1, tmp3, zeroones 151 bic has_nul, tmp1, tmp2 246 ldp tmp1, tmp2, [src2], #16 254 LS_FW data2, tmp1, offset 255 LS_BK tmp1, tmp2, neg_offset [all …]
|
A D | memcpy.S | 52 #define tmp1 x14 macro 118 lsr tmp1, count, 1 121 ldrb B_lw, [src, tmp1] 123 strb B_lw, [dstin, tmp1] 168 and tmp1, dstin, 15 170 sub src, src, tmp1 235 sub tmp1, dstin, src 236 cbz tmp1, L(copy0) 237 cmp tmp1, count 243 and tmp1, dstend, 15 [all …]
|
A D | memset.S | 108 mrs tmp1, dczid_el0 163 add tmp1, zva_len, 64 /* Max alignment bytes written. */ 164 cmp count, tmp1 168 add tmp1, dst, zva_len 170 subs count, tmp1, dst /* Actual alignment bytes to write. */ 171 bic tmp1, tmp1, tmp2 /* Aligned dc zva start address. */ 177 2: mov dst, tmp1 178 sub count, dstend, tmp1 /* Remaining bytes to write. */
|
/sysdeps/arm/armv7/ |
A D | strcmp.S | 126 bic tmp1, tmp1, #7 279 prepare_mask tmp1, tmp1 289 lsls tmp1, tmp1, #31 297 subs tmp1, tmp1, data2 304 subs tmp1, tmp1, data2 311 subs tmp1, tmp1, data2 320 subs tmp1, tmp1, data2 327 subs tmp1, tmp1, data2 368 eor tmp1, tmp1, data1 410 eor tmp1, tmp1, data1 [all …]
|
/sysdeps/aarch64/multiarch/ |
A D | memchr_nosimd.S | 42 #define tmp1 x2 macro 96 lsl tmp1, srcin, 3 99 lsr tmp3, tmp2, tmp1 101 lsl tmp3, tmp2, tmp1 118 orn tmp1, data1, tmp3 161 sub tmp1, data1, zeroones 167 bic has_chr1, tmp1, tmp3 171 ccmp tmp1, 0, 0, ne 200 sub tmp1, data1, zeroones 202 bic has_chr1, tmp1, tmp3 [all …]
|
A D | memcpy_falkor.S | 32 #define tmp1 x14 macro 89 sub tmp1, count, 1 94 tbz tmp1, 6, 1f 169 and tmp1, src, 15 172 sub dst, dstin, tmp1 173 add count, count, tmp1 231 sub tmp1, dstin, src 243 and tmp1, src, 15 245 sub dst, dstin, tmp1 285 and tmp1, srcend, 15 [all …]
|
A D | memcpy_thunderx.S | 56 #define tmp1 x14 macro 88 sub tmp1, dstin, src 111 sub tmp1, count, 1 115 tbz tmp1, 5, 1f 148 lsr tmp1, count, 1 190 and tmp1, dstin, 15 193 sub src, src, tmp1 221 and tmp1, dstin, 15 224 sub src, src, tmp1 265 cbz tmp1, 3f [all …]
|
A D | strlen_asimd.S | 36 #define tmp1 x4 macro 91 and tmp1, srcin, MIN_PAGE_SIZE - 1 92 cmp tmp1, MIN_PAGE_SIZE - 32 107 sub tmp1, data1, zeroones 111 bics has_nul1, tmp1, tmp2 122 clz tmp1, has_nul1 123 add len, len, tmp1, lsr 3 134 sub tmp1, data1, zeroones 138 bics has_nul1, tmp1, tmp2 149 clz tmp1, has_nul1 [all …]
|
A D | memcpy_advsimd.S | 41 #define tmp1 x14 macro 108 lsr tmp1, count, 1 111 ldrb B_lw, [src, tmp1] 113 strb B_lw, [dstin, tmp1] 150 and tmp1, src, 15 152 sub dst, dstin, tmp1 206 sub tmp1, dstin, src 207 cbz tmp1, L(move0) 208 cmp tmp1, count 215 and tmp1, srcend, 15 [all …]
|
A D | memcpy_thunderx2.S | 54 #define tmp1 x14 macro 107 sub tmp1, dstin, src 109 ccmp tmp1, count, 2, hi 139 and tmp1, src, 15 199 lsr tmp1, count, 1 221 sub dst, dstin, tmp1 259 sub dst, dstin, tmp1 262 and tmp1, dst, 15 328 str G_q, [dst, tmp1] 414 cbz tmp1, 3f [all …]
|
A D | memset_a64fx.S | 76 1: lsl tmp1, vector_length, 3 77 cmp count, tmp1 97 sub count, count, tmp1 107 add dst, dst, tmp1 108 subs count, count, tmp1 110 add count, count, tmp1
|
/sysdeps/sparc/sparc64/multiarch/ |
A D | sub_n-vis3.S | 27 #define tmp1 %g1 macro 40 ldx [s2_ptr + 0x00], tmp1 48 xnor tmp1, %g0, tmp1 49 addxccc tmp1, tmp2, tmp1 50 stx tmp1, [res_ptr - 0x10] 60 ldx [s2_ptr + 0x00], tmp1 62 xnor tmp1, %g0, tmp1 63 addxccc tmp1, tmp2, tmp1 64 stx tmp1, [res_ptr + 0x00]
|
A D | mul_1-vis3.S | 28 #define tmp1 %g1 macro 41 ldx [s1_ptr + 0x00], tmp1 43 mulx tmp1, s2_limb, tmp3 45 umulxhi tmp1, s2_limb, tmp2 47 mulx tmp4, s2_limb, tmp1 53 addcc carry, tmp1, tmp1 56 stx tmp1, [res_ptr - 0x08] 62 ldx [s1_ptr + 0x00], tmp1 63 mulx tmp1, s2_limb, tmp3 64 umulxhi tmp1, s2_limb, tmp2
|
A D | add_n-vis3.S | 27 #define tmp1 %g1 macro 40 ldx [s2_ptr + 0x00], tmp1 48 addxccc tmp1, tmp2, tmp1 49 stx tmp1, [res_ptr - 0x10] 58 ldx [s2_ptr + 0x00], tmp1 60 addxccc tmp1, tmp2, tmp1 61 stx tmp1, [res_ptr + 0x00]
|
A D | submul_1-vis3.S | 28 #define tmp1 %g1 macro 46 ldx [s1_ptr + 0x00], tmp1 50 mulx tmp1, s2_limb, tmp5 52 umulxhi tmp1, s2_limb, tmp6 73 ldx [s1_ptr + 0x00], tmp1 75 mulx tmp1, s2_limb, tmp5 76 umulxhi tmp1, s2_limb, tmp6
|
A D | addmul_1-vis3.S | 28 #define tmp1 %g1 macro 46 ldx [s1_ptr + 0x00], tmp1 50 mulx tmp1, s2_limb, tmp5 52 umulxhi tmp1, s2_limb, tmp6 73 ldx [s1_ptr + 0x00], tmp1 75 mulx tmp1, s2_limb, tmp5 76 umulxhi tmp1, s2_limb, tmp6
|
/sysdeps/arm/armv7/multiarch/ |
A D | memcpy_impl.S | 72 #define tmp1 r3 macro 97 rsb tmp1, tmp1, #((7 * 8) - PC_OFS + INSN_SIZE) 98 add pc, pc, tmp1 110 rsb tmp1, tmp1, #((15 * 4) - PC_OFS/2 + INSN_SIZE/2) 140 rsb tmp1, tmp1, #((\steps << \log2_bytes_per_step) \ 146 0: add tmp1, pc, tmp1, lsl #(ARM_BX_ALIGN_LOG2 - \log2_bytes_per_step) 147 bx tmp1 302 add dst, dst, tmp1 303 add src, src, tmp1 326 and tmp1, dst, #7 [all …]
|
/sysdeps/alpha/fpu/ |
A D | s_rint.c | 32 double tmp1, new_x; in __rint() local 35 : "=f"(new_x), "=&f"(tmp1) in __rint()
|
A D | s_rintf.c | 36 float tmp1, tmp2, new_x; in __rintf() local 41 : "=f"(new_x), "=&f"(tmp1), "=&f"(tmp2) in __rintf()
|
/sysdeps/arm/armv6t2/ |
A D | strlen.S | 48 #define tmp1 r4 /* Overlaps const_0 */ macro 62 ands tmp1, srcin, #7 /* (8 - bytes) to alignment. */ 127 and tmp2, tmp1, #3 128 rsb result, tmp1, #0 130 tst tmp1, #4
|
/sysdeps/sparc/sparc32/sparcv9/ |
A D | mul_1.S | 28 #define tmp1 %g1 macro 41 lduw [s1_ptr + 0x00], tmp1 43 mulx tmp1, s2_limb, tmp3 60 lduw [s1_ptr + 0x00], tmp1 61 mulx tmp1, s2_limb, tmp3
|
A D | addmul_1.S | 30 #define tmp1 %l0 macro 46 lduw [s1_ptr + 0x00], tmp1 50 mulx tmp1, s2_limb, tmp64_1 69 lduw [s1_ptr + 0x00], tmp1 71 mulx tmp1, s2_limb, tmp64_1
|
A D | submul_1.S | 30 #define tmp1 %l0 macro 46 lduw [s1_ptr + 0x00], tmp1 50 mulx tmp1, s2_limb, tmp64_1 69 lduw [s1_ptr + 0x00], tmp1 71 mulx tmp1, s2_limb, tmp64_1
|