/optee_os/lib/libutils/isoc/arch/arm/softfloat/source/ |
A D | s_addMagsExtF80.c | 58 int_fast32_t expDiff; in softfloat_addMagsExtF80() local 75 expDiff = expA - expB; in softfloat_addMagsExtF80() 76 if ( ! expDiff ) { in softfloat_addMagsExtF80() 98 if ( expDiff < 0 ) { in softfloat_addMagsExtF80() 107 ++expDiff; in softfloat_addMagsExtF80() 109 if ( ! expDiff ) goto newlyAligned; in softfloat_addMagsExtF80() 111 sig64Extra = softfloat_shiftRightJam64Extra( sigA, 0, -expDiff ); in softfloat_addMagsExtF80() 123 --expDiff; in softfloat_addMagsExtF80() 125 if ( ! expDiff ) goto newlyAligned; in softfloat_addMagsExtF80() 127 sig64Extra = softfloat_shiftRightJam64Extra( sigB, 0, expDiff ); in softfloat_addMagsExtF80()
|
A D | s_subMagsExtF80.c | 58 int_fast32_t expDiff; in softfloat_subMagsExtF80() local 74 expDiff = expA - expB; in softfloat_subMagsExtF80() 75 if ( 0 < expDiff ) goto expABigger; in softfloat_subMagsExtF80() 76 if ( expDiff < 0 ) goto expBBigger; in softfloat_subMagsExtF80() 107 ++expDiff; in softfloat_subMagsExtF80() 109 if ( ! expDiff ) goto newlyAlignedBBigger; in softfloat_subMagsExtF80() 111 sig128 = softfloat_shiftRightJam128( sigA, 0, -expDiff ); in softfloat_subMagsExtF80() 130 --expDiff; in softfloat_subMagsExtF80() 132 if ( ! expDiff ) goto newlyAlignedABigger; in softfloat_subMagsExtF80() 134 sig128 = softfloat_shiftRightJam128( sigB, 0, expDiff ); in softfloat_subMagsExtF80()
|
A D | f32_rem.c | 58 int_fast16_t expDiff; in f32_rem() local 103 expDiff = expA - expB; in f32_rem() 104 if ( expDiff < 1 ) { in f32_rem() 105 if ( expDiff < -1 ) return a; in f32_rem() 107 if ( expDiff ) { in f32_rem() 122 expDiff -= 31; in f32_rem() 131 if ( expDiff < 0 ) break; in f32_rem() 133 expDiff -= 29; in f32_rem() 138 q >>= ~expDiff & 31; in f32_rem() 139 rem = (rem<<(expDiff + 30)) - q * (uint32_t) sigB; in f32_rem()
|
A D | s_addMagsF128.c | 57 int_fast32_t expDiff; in softfloat_addMagsF128() local 70 expDiff = expA - expB; in softfloat_addMagsF128() 71 if ( ! expDiff ) { in softfloat_addMagsF128() 89 if ( expDiff < 0 ) { in softfloat_addMagsF128() 100 ++expDiff; in softfloat_addMagsF128() 102 if ( ! expDiff ) goto newlyAligned; in softfloat_addMagsF128() 105 softfloat_shiftRightJam128Extra( sigA.v64, sigA.v0, 0, -expDiff ); in softfloat_addMagsF128() 119 --expDiff; in softfloat_addMagsF128() 121 if ( ! expDiff ) goto newlyAligned; in softfloat_addMagsF128() 124 softfloat_shiftRightJam128Extra( sigB.v64, sigB.v0, 0, expDiff ); in softfloat_addMagsF128()
|
A D | s_subMagsF128.c | 58 int_fast32_t expDiff, expZ; in softfloat_subMagsF128() local 70 expDiff = expA - expB; in softfloat_subMagsF128() 71 if ( 0 < expDiff ) goto expABigger; in softfloat_subMagsF128() 72 if ( expDiff < 0 ) goto expBBigger; in softfloat_subMagsF128() 101 ++expDiff; in softfloat_subMagsF128() 102 if ( ! expDiff ) goto newlyAlignedBBigger; in softfloat_subMagsF128() 104 sigA = softfloat_shiftRightJam128( sigA.v64, sigA.v0, -expDiff ); in softfloat_subMagsF128() 122 --expDiff; in softfloat_subMagsF128() 123 if ( ! expDiff ) goto newlyAlignedABigger; in softfloat_subMagsF128() 125 sigB = softfloat_shiftRightJam128( sigB.v64, sigB.v0, expDiff ); in softfloat_subMagsF128()
|
A D | f64_rem.c | 58 int_fast16_t expDiff; in f64_rem() local 108 expDiff = expA - expB; in f64_rem() 109 if ( expDiff < 1 ) { in f64_rem() 110 if ( expDiff < -1 ) return a; in f64_rem() 112 if ( expDiff ) { in f64_rem() 127 expDiff -= 30; in f64_rem() 136 if ( expDiff < 0 ) break; in f64_rem() 145 expDiff -= 29; in f64_rem() 150 q = (uint32_t) (q64>>32)>>(~expDiff & 31); in f64_rem() 151 rem = (rem<<(expDiff + 30)) - q * (uint64_t) sigB; in f64_rem()
|
A D | s_mulAddF64.c | 66 int_fast16_t expDiff; in softfloat_mulAddF64() local 142 expDiff = expZ - expC; in softfloat_mulAddF64() 143 if ( expDiff < 0 ) { in softfloat_mulAddF64() 151 } else if ( expDiff ) { in softfloat_mulAddF64() 159 if ( expDiff <= 0 ) { in softfloat_mulAddF64() 174 if ( expDiff < 0 ) { in softfloat_mulAddF64() 341 expDiff = expZ - expC; in softfloat_mulAddF64() 342 if ( expDiff < 0 ) { in softfloat_mulAddF64() 356 if ( ! expDiff ) { in softfloat_mulAddF64() 373 if ( expDiff <= 0 ) { in softfloat_mulAddF64() [all …]
|
A D | f128M_rem.c | 65 int32_t expDiff; in f128M_rem() local 100 expDiff = expA - expB; in f128M_rem() 101 if ( expDiff < 1 ) { in f128M_rem() 102 if ( expDiff < -1 ) goto copyA; in f128M_rem() 103 if ( expDiff ) { in f128M_rem() 117 expDiff -= 30; in f128M_rem() 120 if ( expDiff < 0 ) break; in f128M_rem() 126 expDiff -= 29; in f128M_rem() 131 q = (uint32_t) (q64>>32)>>(~expDiff & 31); in f128M_rem() 132 softfloat_remStep128MBy32( remPtr, expDiff + 30, x, q, remPtr ); in f128M_rem()
|
A D | extF80M_rem.c | 69 int32_t expDiff; in extF80M_rem() local 116 expDiff = expA - expB; in extF80M_rem() 117 if ( expDiff < -1 ) goto copyA; in extF80M_rem() 126 if ( expDiff < 1 ) { in extF80M_rem() 127 if ( expDiff ) { in extF80M_rem() 137 expDiff -= 30; in extF80M_rem() 140 if ( expDiff < 0 ) break; in extF80M_rem() 146 expDiff -= 29; in extF80M_rem() 151 q = (uint32_t) (x64>>32)>>(~expDiff & 31); in extF80M_rem() 152 softfloat_remStep96MBy32( rem, expDiff + 30, x, q, rem ); in extF80M_rem()
|
A D | s_addMagsF32.c | 51 int_fast16_t expDiff; in softfloat_addMagsF32() local 61 expDiff = expA - expB; in softfloat_addMagsF32() 64 if ( ! expDiff ) { in softfloat_addMagsF32() 77 if ( expDiff < 0 ) { in softfloat_addMagsF32() 85 sigA = softfloat_shiftRightJam32( sigA, -expDiff ); in softfloat_addMagsF32() 94 sigB = softfloat_shiftRightJam32( sigB, expDiff ); in softfloat_addMagsF32()
|
A D | s_mulAddF32.c | 66 int_fast16_t expDiff; in softfloat_mulAddF32() local 131 expDiff = expProd - expC; in softfloat_mulAddF32() 133 if ( expDiff <= 0 ) { in softfloat_mulAddF32() 135 sigZ = sigC + softfloat_shiftRightJam64( sigProd, 32 - expDiff ); in softfloat_mulAddF32() 141 (uint_fast64_t) sigC<<32, expDiff ); in softfloat_mulAddF32() 150 if ( expDiff < 0 ) { in softfloat_mulAddF32() 153 sig64Z = sig64C - softfloat_shiftRightJam64( sigProd, -expDiff ); in softfloat_mulAddF32() 154 } else if ( ! expDiff ) { in softfloat_mulAddF32() 164 sig64Z = sigProd - softfloat_shiftRightJam64( sig64C, expDiff ); in softfloat_mulAddF32()
|
A D | s_addMagsF64.c | 51 int_fast16_t expDiff; in softfloat_addMagsF64() local 61 expDiff = expA - expB; in softfloat_addMagsF64() 64 if ( ! expDiff ) { in softfloat_addMagsF64() 79 if ( expDiff < 0 ) { in softfloat_addMagsF64() 87 sigA = softfloat_shiftRightJam64( sigA, -expDiff ); in softfloat_addMagsF64() 96 sigB = softfloat_shiftRightJam64( sigB, expDiff ); in softfloat_addMagsF64()
|
A D | s_mulAddF128M.c | 76 int32_t expDiff; in softfloat_mulAddF128M() local 172 expDiff = expProd - expC; in softfloat_mulAddF128M() 173 if ( expDiff <= 0 ) { in softfloat_mulAddF128M() 185 if ( expDiff ) { in softfloat_mulAddF128M() 222 expDiff -= 128; in softfloat_mulAddF128M() 223 if ( 0 <= expDiff ) { in softfloat_mulAddF128M() 226 if ( expDiff ) softfloat_shiftRightJam160M( sigX, expDiff, sigX ); in softfloat_mulAddF128M() 246 shiftCount = expDiff & 31; in softfloat_mulAddF128M() 250 expDiff >>= 5; in softfloat_mulAddF128M() 253 + expDiff * -wordIncr; in softfloat_mulAddF128M() [all …]
|
A D | f128_rem.c | 59 int_fast32_t expDiff; in f128_rem() local 116 expDiff = expA - expB; in f128_rem() 117 if ( expDiff < 1 ) { in f128_rem() 118 if ( expDiff < -1 ) return a; in f128_rem() 119 if ( expDiff ) { in f128_rem() 131 expDiff -= 30; in f128_rem() 134 if ( expDiff < 0 ) break; in f128_rem() 142 expDiff -= 29; in f128_rem() 147 q = (uint32_t) (q64>>32)>>(~expDiff & 31); in f128_rem() 148 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, expDiff + 30 ); in f128_rem()
|
A D | extF80_rem.c | 60 int_fast32_t expDiff; in extF80_rem() local 126 expDiff = expA - expB; in extF80_rem() 127 if ( expDiff < -1 ) goto copyA; in extF80_rem() 130 if ( expDiff < 1 ) { in extF80_rem() 131 if ( expDiff ) { in extF80_rem() 145 expDiff -= 30; in extF80_rem() 148 if ( expDiff < 0 ) break; in extF80_rem() 158 expDiff -= 29; in extF80_rem() 163 q = (uint32_t) (q64>>32)>>(~expDiff & 31); in extF80_rem() 164 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, expDiff + 30 ); in extF80_rem()
|
A D | s_subMagsF32.c | 52 int_fast16_t expDiff; in softfloat_subMagsF32() local 62 expDiff = expA - expB; in softfloat_subMagsF32() 65 if ( 0 < expDiff ) goto expABigger; in softfloat_subMagsF32() 66 if ( expDiff < 0 ) goto expBBigger; in softfloat_subMagsF32() 88 sigA = softfloat_shiftRightJam32( sigA, -expDiff ); in softfloat_subMagsF32() 102 sigB = softfloat_shiftRightJam32( sigB, expDiff ); in softfloat_subMagsF32()
|
A D | s_subMagsF64.c | 52 int_fast16_t expDiff; in softfloat_subMagsF64() local 62 expDiff = expA - expB; in softfloat_subMagsF64() 65 if ( 0 < expDiff ) goto expABigger; in softfloat_subMagsF64() 66 if ( expDiff < 0 ) goto expBBigger; in softfloat_subMagsF64() 88 sigA = softfloat_shiftRightJam64( sigA, -expDiff ); in softfloat_subMagsF64() 102 sigB = softfloat_shiftRightJam64( sigB, expDiff ); in softfloat_subMagsF64()
|
A D | s_mulAddF128.c | 72 int_fast32_t shiftCount, expDiff; in softfloat_mulAddF128() local 162 expDiff = expZ - expC; in softfloat_mulAddF128() 163 if ( expDiff < 0 ) { in softfloat_mulAddF128() 165 if ( (signZ == signC) || (expDiff < -1) ) { in softfloat_mulAddF128() 166 shiftCount -= expDiff; in softfloat_mulAddF128() 188 if ( ! expDiff ) { in softfloat_mulAddF128() 205 if ( expDiff <= 0 ) { in softfloat_mulAddF128() 219 if ( expDiff < 0 ) { in softfloat_mulAddF128() 221 if ( expDiff < -1 ) { in softfloat_mulAddF128() 241 } else if ( ! expDiff ) { in softfloat_mulAddF128() [all …]
|
A D | s_addExtF80M.c | 64 int32_t expDiff; in softfloat_addExtF80M() local 111 expDiff = expA - expB; in softfloat_addExtF80M() 112 if ( expDiff ) { in softfloat_addExtF80M() 118 softfloat_shiftRightJam96M( extSigX, expDiff, extSigX ); in softfloat_addExtF80M()
|
A D | s_addF128M.c | 61 int32_t expDiff; in softfloat_addF128M() local 127 expDiff = expA - expB; in softfloat_addF128M() 128 if ( expDiff ) { in softfloat_addF128M() 136 softfloat_shiftRightJam160M( extSigZ, expDiff, extSigZ ); in softfloat_addF128M()
|