/sysdeps/alpha/ |
A D | divq.S | 109 stq t5, 40(sp) 123 mov $31, t5 157 bne t5, $fix_sign_out 162 ldq t5, 40(sp) 171 cfi_restore (t5) 232 cmplt AT, 0, t5 236 s4addq AT, t5, t5 241 s8addq AT, t5, t5 257 and t5, 8, AT 261 and t5, 4, AT [all …]
|
A D | remq.S | 110 stq t5, 40(sp) 124 mov $31, t5 158 bne t5, $fix_sign_out 163 ldq t5, 40(sp) 172 cfi_restore (t5) 233 cmplt X, 0, t5 235 cmovne t5, t0, X 239 s4addq AT, t5, t5 254 and t5, 4, AT 259 cmovlbs t5, t4, X [all …]
|
A D | rawmemchr.S | 37 sll a1, 8, t5 # e0 : replicate the search character 40 or t5, a1, a1 # e0 : 42 sll a1, 16, t5 # e0 : 46 or t5, a1, a1 # .. e1 : 47 sll a1, 32, t5 # e0 : 50 or t5, a1, a1 # e0 :
|
A D | strrchr.S | 43 sll a1, 8, t5 # e0 : replicate our test character 45 or t5, a1, a1 # e0 : 47 sll a1, 16, t5 # e0 : 49 or t5, a1, a1 # e0 : 51 sll a1, 32, t5 # e0 : 54 or t5, a1, a1 # .. e1 : character replication complete 77 subq t4, 1, t5 # e0 : build a mask of the bytes upto... 78 or t4, t5, t4 # e1 : ... and including the null
|
A D | strchr.S | 43 sll a1, 8, t5 # e0 : replicate the search character 45 or t5, a1, a1 # e0 : 47 sll a1, 16, t5 # e0 : 50 or t5, a1, a1 # .. e1 : 51 sll a1, 32, t5 # e0 : 53 or t5, a1, a1 # e0 :
|
A D | ldiv.S | 106 mov $31, t5 131 cmovlbs t5, t4, Q 185 cmplt AT, 0, t5 194 blbc t5, $fix_sign_in_ret1
|
A D | _mcount.S | 63 stq t5, 0x70(sp) 90 ldq t5, 0x70(sp)
|
A D | strncmp.S | 54 and a1, 7, t5 # find s2 misalignment 131 cmpult t4, t5, t9 134 mskqh t3, t5, t7 # mask garbage in s2 214 cmpeq a3, 0, t5 # e0 : eoc in s2[1] 219 or t4, t5, t4 # e0 : eos or eoc in s2[1].
|
A D | strcmp.S | 77 and a1, 7, t5 # .. e1 : find s2 misalignment 83 cmplt t4, t5, t8 # .. e1 : 86 mskqh t3, t5, t3 # e0 : 94 extql t1, t5, t1 # e0 :
|
A D | stxcpy.S | 242 and a1, 7, t5 # e0 : find src misalignment 259 cmplt t4, t5, t8 # e0 : 263 mskqh t2, t5, t2 # e0 : 278 and a1, 7, t5 # .. e1 : 281 srl t8, t5, t8 # e0 : adjust final null return value
|
A D | stpncpy.S | 57 addq v0, 1, t5 # e0 : 62 cmovne t4, t5, v0 # .. e1 : if last written wasnt null, inc v0
|
A D | stxncpy.S | 300 and a1, 7, t5 # e0 : find src misalignment 317 cmplt t4, t5, t8 # e1 : 322 mskqh t2, t5, t2 # e0 : begin src byte validity mask 325 or t7, t10, t5 # .. e1 : test for end-of-count too 327 cmoveq a2, t5, t7 # .. e1 :
|
/sysdeps/alpha/alphaev67/ |
A D | stpncpy.S | 45 zapnot t0, t8, t5 51 cmpult zero, t5, t5 56 addq v0, t5, v0 66 cmpult zero, t5, t5 101 addq v0, t5, v0
|
A D | strrchr.S | 40 insbl a1, 2, t5 # U : 0000000000ch0000 45 sll t5, 8, t3 # U : 00000000ch000000 49 or t5, t3, t3 # E : 00000000chch0000 56 lda t5, -1 # E : build garbage mask 59 mskqh t5, a0, t4 # E : Complete garbage mask 89 subq t4, 1, t5 # E : build a mask of the bytes upto... 90 or t4, t5, t4 # E : ... and including the null 105 lda t5, 0x3f($31) # E : 106 subq t5, t2, t5 # E : Normalize leading zero count 108 addq t6, t5, v0 # E : and add to quadword address
|
A D | rawmemchr.S | 38 insbl a1, 1, t5 # U : 000000000000ch00 42 or t5, t3, a1 # E : 000000000000chch 48 inswl a1, 2, t5 # E : 00000000chch0000 52 or a3, t5, t5 # E : 0000chchchch0000 57 or t5, a1, a1 # E : chchchchchchchch
|
A D | strchr.S | 39 insbl a1, 1, t5 # U : 000000000000ch00 43 or t5, t3, a1 # E : 000000000000chch 49 inswl a1, 2, t5 # E : 00000000chch0000 53 or a3, t5, t5 # E : 0000chchchch0000 58 or t5, a1, a1 # E : chchchchchchchch
|
/sysdeps/mips/sys/ |
A D | regdef.h | 53 #define t5 $13 macro 57 #define ta1 t5
|
/sysdeps/ia64/fpu/ |
A D | libm_support.h | 580 t32,t1,t2,t3,t4,t5,t6) \ argument 582 t1=(y)*(t32); t5=y-t1; t5=t5+t1; t6=y-t5; \ 583 t1=(t3)*(t5); \ 584 t2=(t3)*(t6)+(t4)*(t5); \ 595 t32,t1,t2,t3,t4,t5,t6,t7,t8) \ argument 596 __LIBM_MULL1_K80(t7,t8,xhi,yhi, t32,t1,t2,t3,t4,t5,t6) \ 611 t32,t1,t2,t3,t4,t5,t6,t7,t8) 629 __LIBM_MULL_K80(r,x,y, t32,t1,t2,t3,t4,t5,t6,t7,t8) 657 t32,t1,t2,t3,t4,t5,t6,t7,t8,t9) 675 __LIBM_DIVL_K80(r,x,y, t32,t1,t2,t3,t4,t5,t6,t7,t8,t9) [all …]
|
/sysdeps/unix/sysv/linux/alpha/alpha/ |
A D | regdef.h | 11 #define t5 $6 macro
|
/sysdeps/alpha/alphaev6/ |
A D | stxcpy.S | 261 and a1, 7, t5 # E : find src misalignment 279 cmplt t4, t5, t8 # E : 283 mskqh t2, t5, t2 # U : 296 and a1, 7, t5 # E : 300 srl t8, t5, t8 # U : adjust final null return value
|
A D | stxncpy.S | 336 and a1, 7, t5 # E : find src misalignment 354 cmplt t4, t5, t8 # E : 359 mskqh t2, t5, t2 # U : begin src byte validity mask 362 or t7, t10, t5 # E : test for end-of-count too 365 cmoveq a2, t5, t7 # E : Latency=2, extra map slot
|
/sysdeps/sparc/sparc32/ |
A D | memcpy.S | 23 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 33 st %t5, [%dst + offset + 0x14]; \ 37 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 67 #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ argument 70 srl %t0, shir, %t5; \ 73 or %t5, %prev, %t5; \ 85 #define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ argument 89 srl %t1, shir, %t5; \ 93 or %t5, %t6, %t1; \ 95 srl %t3, shir, %t5; \ [all …]
|
/sysdeps/ieee754/dbl-64/ |
A D | e_lgamma_r.c | 112 t5 = -1.03142241298341437450e-02, /* 0xBF851F9F, 0xBA91EC6A */ variable 267 p3 = t2+w*(t5+w*(t8+w*(t11+w*t14))); in __ieee754_lgamma_r()
|
/sysdeps/ieee754/flt-32/ |
A D | e_lgammaf_r.c | 47 t5 = -1.0314224288e-02, /* 0xbc28fcfe */ variable 202 p3 = t2+w*(t5+w*(t8+w*(t11+w*t14))); in __ieee754_lgammaf_r()
|
/sysdeps/mips/ |
A D | memcpy.S | 210 # define REG5 t5
|