/optee_os/lib/libutils/isoc/arch/arm/softfloat/source/ |
A D | s_mulAddF128.c | 98 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in softfloat_mulAddF128() 112 uiZ.v64 = 0; in softfloat_mulAddF128() 116 uiZ.v64 = uiC64; in softfloat_mulAddF128() 141 softfloat_mul128To256M( sigA.v64, sigA.v0, sigB.v64, sigB.v0, sig256Z ); in softfloat_mulAddF128() 206 sigZ = softfloat_add128( sigC.v64, sigC.v0, sigZ.v64, sigZ.v0 ); in softfloat_mulAddF128() 223 softfloat_sub128( sigC.v64, sigC.v0, sigZ.v64, sigZ.v0 ); in softfloat_mulAddF128() 242 sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, sigC.v64, sigC.v0 ); in softfloat_mulAddF128() 273 if ( sigZ.v64 ) { in softfloat_mulAddF128() 280 if ( ! sigZ.v64 ) { in softfloat_mulAddF128() 340 uiZ.v64 = uiC64; in softfloat_mulAddF128() [all …]
|
A D | f128_rem.c | 70 uiA64 = uA.ui.v64; in f128_rem() 77 uiB64 = uB.ui.v64; in f128_rem() 87 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in f128_rem() 121 sigB = softfloat_add128( sigB.v64, sigB.v0, sigB.v64, sigB.v0 ); in f128_rem() 124 q = softfloat_le128( sigB.v64, sigB.v0, rem.v64, rem.v0 ); in f128_rem() 126 rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); in f128_rem() 138 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_rem() 150 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_rem() 152 altRem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); in f128_rem() 161 rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); in f128_rem() [all …]
|
A D | f128_div.c | 74 uiA64 = uA.ui.v64; in f128_div() 81 uiB64 = uB.ui.v64; in f128_div() 126 if ( softfloat_lt128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ) ) { in f128_div() 128 rem = softfloat_add128( sigA.v64, sigA.v0, sigA.v64, sigA.v0 ); in f128_div() 139 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_div() 142 rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); in f128_div() 151 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_div() 154 rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); in f128_div() 155 } else if ( softfloat_le128( sigB.v64, sigB.v0, rem.v64, rem.v0 ) ) { in f128_div() 157 rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); in f128_div() [all …]
|
A D | f128_sqrt.c | 67 uiA64 = uA.ui.v64; in f128_sqrt() 71 sigA.v64 = fracF128UI64( uiA64 ); in f128_sqrt() 76 if ( sigA.v64 | sigA.v0 ) { in f128_sqrt() 92 if ( ! (sigA.v64 | sigA.v0) ) return a; in f128_sqrt() 105 sig32A = sigA.v64>>17; in f128_sqrt() 125 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_sqrt() 138 rem = softfloat_sub128( y.v64, y.v0, term.v64, term.v0 ); in f128_sqrt() 151 term.v64, term.v0 in f128_sqrt() 163 term = softfloat_add128( term.v64, term.v0, 0, y.v64 ); in f128_sqrt() 165 term = softfloat_sub128( term.v64, term.v0, rem.v64, rem.v0 ); in f128_sqrt() [all …]
|
A D | s_subMagsF128.c | 63 sigA.v64 = fracF128UI64( uiA64 ); in softfloat_subMagsF128() 66 sigB.v64 = fracF128UI64( uiB64 ); in softfloat_subMagsF128() 74 if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_subMagsF128() 76 uiZ.v64 = defaultNaNF128UI64; in softfloat_subMagsF128() 82 if ( sigB.v64 < sigA.v64 ) goto aBigger; in softfloat_subMagsF128() 83 if ( sigA.v64 < sigB.v64 ) goto bBigger; in softfloat_subMagsF128() 86 uiZ.v64 = in softfloat_subMagsF128() 107 sigB.v64 |= UINT64_C( 0x0010000000000000 ); in softfloat_subMagsF128() 110 sigZ = softfloat_sub128( sigB.v64, sigB.v0, sigA.v64, sigA.v0 ); in softfloat_subMagsF128() 115 uiZ.v64 = uiA64; in softfloat_subMagsF128() [all …]
|
A D | f128_roundToInt.c | 59 uiA64 = uA.ui.v64; in f128_roundToInt() 78 uiZ.v64 = uiA64; in f128_roundToInt() 84 ++uiZ.v64; in f128_roundToInt() 89 uiZ.v64 &= ~1; in f128_roundToInt() 123 if ( uiZ.v64 ) uiZ.v64 = packToF128UI64( 1, 0x3FFF, 0 ); in f128_roundToInt() 126 if ( ! uiZ.v64 ) uiZ.v64 = packToF128UI64( 0, 0x3FFF, 0 ); in f128_roundToInt() 133 uiZ.v64 = uiA64; in f128_roundToInt() 138 uiZ.v64 += lastBitMask>>1; in f128_roundToInt() 140 uiZ.v64 += lastBitMask>>1; in f128_roundToInt() 148 uiZ.v64 = (uiZ.v64 | (uiA0 != 0)) + roundBitsMask; in f128_roundToInt() [all …]
|
A D | s_addMagsF128.c | 65 sigA.v64 = fracF128UI64( uiA64 ); in softfloat_addMagsF128() 68 sigB.v64 = fracF128UI64( uiB64 ); in softfloat_addMagsF128() 73 if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_addMagsF128() 74 uiZ.v64 = uiA64; in softfloat_addMagsF128() 78 sigZ = softfloat_add128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); in softfloat_addMagsF128() 80 uiZ.v64 = packToF128UI64( signZ, 0, sigZ.v64 ); in softfloat_addMagsF128() 85 sigZ.v64 |= UINT64_C( 0x0002000000000000 ); in softfloat_addMagsF128() 111 uiZ.v64 = uiA64; in softfloat_addMagsF128() 131 sigA.v64 | UINT64_C( 0x0001000000000000 ), in softfloat_addMagsF128() 133 sigB.v64, in softfloat_addMagsF128() [all …]
|
A D | f128_mul.c | 71 uiA64 = uA.ui.v64; in f128_mul() 75 sigA.v64 = fracF128UI64( uiA64 ); in f128_mul() 78 uiB64 = uB.ui.v64; in f128_mul() 82 sigB.v64 = fracF128UI64( uiB64 ); in f128_mul() 89 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in f128_mul() 93 magBits = expB | sigB.v64 | sigB.v0; in f128_mul() 98 magBits = expA | sigA.v64 | sigA.v0; in f128_mul() 120 softfloat_mul128To256M( sigA.v64, sigA.v0, sigB.v64, sigB.v0, sig256Z ); in f128_mul() 125 sigA.v64, sigA.v0 in f128_mul() 147 uiZ.v64 = defaultNaNF128UI64; in f128_mul() [all …]
|
A D | extF80_rem.c | 140 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 152 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_rem() 156 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 166 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_rem() 170 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 181 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 184 meanRem = softfloat_add128( rem.v64, rem.v0, altRem.v64, altRem.v0 ); in extF80_rem() 186 (meanRem.v64 & UINT64_C( 0x8000000000000000 )) in extF80_rem() 192 if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { in extF80_rem() 198 signRem, expB + 32, rem.v64, rem.v0, 80 ); in extF80_rem() [all …]
|
A D | extF80_div.c | 137 q64 = (uint_fast64_t) (uint32_t) (rem.v64>>2) * recip32; in extF80_div() 141 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_div() 143 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 144 if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { in extF80_div() 155 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 157 if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { in extF80_div() 159 rem = softfloat_add128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 160 } else if ( softfloat_le128( term.v64, term.v0, rem.v64, rem.v0 ) ) { in extF80_div() 162 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 164 if ( rem.v64 | rem.v0 ) q |= 1; in extF80_div() [all …]
|
A D | extF80_sqrt.c | 78 uiZ64 = uiZ.v64; in extF80_sqrt() 116 rem.v64 -= (uint_fast64_t) sig32Z * sig32Z; in extF80_sqrt() 119 q = ((uint_fast64_t) (uint32_t) (rem.v64>>2) * recipSqrt32)>>32; in extF80_sqrt() 123 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_sqrt() 124 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_sqrt() 127 q = (((uint_fast64_t) (uint32_t) (rem.v64>>2) * recipSqrt32)>>32) + 2; in extF80_sqrt() 138 term = softfloat_add128( term.v64, term.v0, 0, x64 ); in extF80_sqrt() 139 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 28 ); in extF80_sqrt() 140 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_sqrt() 141 if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { in extF80_sqrt() [all …]
|
A D | s_mulAddF64.c | 123 if ( sig128Z.v64 < UINT64_C( 0x2000000000000000 ) ) { in softfloat_mulAddF64() 127 sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); in softfloat_mulAddF64() 132 sigZ = sig128Z.v64<<1 | (sig128Z.v0 != 0); in softfloat_mulAddF64() 146 sig128Z.v64 = softfloat_shiftRightJam64( sig128Z.v64, -expDiff ); in softfloat_mulAddF64() 164 sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); in softfloat_mulAddF64() 165 sigZ = sig128Z.v64 | (sig128Z.v0 != 0); in softfloat_mulAddF64() 178 sig128Z.v64 = sig128Z.v64 - sigC; in softfloat_mulAddF64() 187 sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); in softfloat_mulAddF64() 191 if ( ! sig128Z.v64 ) { in softfloat_mulAddF64() 193 sig128Z.v64 = sig128Z.v0; in softfloat_mulAddF64() [all …]
|
A D | s_mul128To256M.c | 56 z64 = p64.v0 + p0.v64; in softfloat_mul128To256M() 57 z128 = p64.v64 + (z64 < p64.v0); in softfloat_mul128To256M() 60 z192 = p128.v64 + (z128 < p128.v0); in softfloat_mul128To256M() 64 p64.v64 += (z64 < p64.v0); in softfloat_mul128To256M() 65 z128 += p64.v64; in softfloat_mul128To256M() 67 zPtr[indexWord( 4, 3 )] = z192 + (z128 < p64.v64); in softfloat_mul128To256M()
|
A D | f64_to_f128.c | 68 uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); in f64_to_f128() 75 uiZ.v64 = packToF128UI64( sign, 0, 0 ); in f64_to_f128() 84 uiZ.v64 = packToF128UI64( sign, exp + 0x3C00, sig128.v64 ); in f64_to_f128()
|
A D | f128_to_extF80.c | 61 uiA64 = uA.ui.v64; in f128_to_extF80() 71 uiZ64 = uiZ.v64; in f128_to_extF80() 87 sig64 = normExpSig.sig.v64; in f128_to_extF80() 93 return softfloat_roundPackToExtF80( sign, exp, sig128.v64, sig128.v0, 80 ); in f128_to_extF80()
|
A D | ui64_to_f128.c | 56 zSig.v64 = a<<(shiftCount - 64); in ui64_to_f128() 61 uiZ64 = packToF128UI64( 0, 0x406E - shiftCount, zSig.v64 ); in ui64_to_f128() 64 uZ.ui.v64 = uiZ64; in ui64_to_f128()
|
A D | extF80_mul.c | 120 if ( sig128Z.v64 < UINT64_C( 0x8000000000000000 ) ) { in extF80_mul() 124 sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); in extF80_mul() 128 signZ, expZ, sig128Z.v64, sig128Z.v0, extF80_roundingPrecision ); in extF80_mul() 133 uiZ64 = uiZ.v64; in extF80_mul()
|
A D | s_mul64To128.c | 57 z.v64 = (uint_fast64_t) a32 * b32; in softfloat_mul64To128() 58 z.v64 += (uint_fast64_t) (mid < mid1)<<32 | mid>>32; in softfloat_mul64To128() 61 z.v64 += (z.v0 < mid); in softfloat_mul64To128()
|
A D | f128_mulAdd.c | 53 uiA64 = uA.ui.v64; in f128_mulAdd() 56 uiB64 = uB.ui.v64; in f128_mulAdd() 59 uiC64 = uC.ui.v64; in f128_mulAdd()
|
A D | i64_to_f128.c | 60 zSig.v64 = absA<<(shiftCount - 64); in i64_to_f128() 65 uiZ64 = packToF128UI64( sign, 0x406E - shiftCount, zSig.v64 ); in i64_to_f128() 68 uZ.ui.v64 = uiZ64; in i64_to_f128()
|
A D | s_normRoundPackToF128.c | 63 sig64 = sig128.v64; in softfloat_normRoundPackToF128() 67 uZ.ui.v64 = packToF128UI64( sign, sig64 | sig0 ? exp : 0, sig64 ); in softfloat_normRoundPackToF128() 76 sig64 = sig128Extra.v.v64; in softfloat_normRoundPackToF128()
|
A D | f32_to_f128.c | 67 uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); in f32_to_f128() 74 uiZ.v64 = packToF128UI64( sign, 0, 0 ); in f32_to_f128() 82 uiZ.v64 = packToF128UI64( sign, exp + 0x3F80, (uint_fast64_t) sig<<25 ); in f32_to_f128()
|
A D | s_subMagsExtF80.c | 112 sigA = sig128.v64; in softfloat_subMagsExtF80() 135 sigB = sig128.v64; in softfloat_subMagsExtF80() 146 signZ, expZ, sig128.v64, sig128.v0, extF80_roundingPrecision ); in softfloat_subMagsExtF80() 151 uiZ64 = uiZ.v64; in softfloat_subMagsExtF80()
|
/optee_os/lib/libutils/isoc/arch/arm/softfloat/source/8086-SSE/ |
A D | s_propagateNaNF128UI.c | 72 uiZ.v64 = uiA64; in softfloat_propagateNaNF128UI() 75 uiZ.v64 = uiB64; in softfloat_propagateNaNF128UI() 78 uiZ.v64 |= UINT64_C( 0x0000800000000000 ); in softfloat_propagateNaNF128UI()
|
/optee_os/lib/libutils/isoc/arch/arm/softfloat/source/8086/ |
A D | s_commonNaNToExtF80UI.c | 52 uiZ.v64 = (uint_fast16_t) aPtr->sign<<15 | 0x7FFF; in softfloat_commonNaNToExtF80UI() 53 uiZ.v0 = UINT64_C( 0xC000000000000000 ) | aPtr->v64>>1; in softfloat_commonNaNToExtF80UI()
|