/optee_os/lib/libutils/isoc/arch/arm/softfloat/source/ |
A D | s_mulAddF128.c | 170 sigZ.v64, sigZ.v0, shiftCount ); in softfloat_mulAddF128() 181 sigZ = softfloat_shortShiftRight128( sigZ.v64, sigZ.v0, 1 ); in softfloat_mulAddF128() 206 sigZ = softfloat_add128( sigC.v64, sigC.v0, sigZ.v64, sigZ.v0 ); in softfloat_mulAddF128() 227 sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, 0, 1 ); in softfloat_mulAddF128() 242 sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, sigC.v64, sigC.v0 ); in softfloat_mulAddF128() 277 sigZ.v64 = sigZ.v0; in softfloat_mulAddF128() 282 sigZ.v64 = sigZ.v0; in softfloat_mulAddF128() 287 sigZ.v64 = sigZ.v0; in softfloat_mulAddF128() 306 sigZ: in softfloat_mulAddF128() 310 sigZ = softfloat_shortShiftRight128( sigZ.v64, sigZ.v0, shiftCount ); in softfloat_mulAddF128() [all …]
|
A D | s_addExtF80M.c | 60 uint64_t sigZ, sigB; in softfloat_addExtF80M() local 106 sigZ = aSPtr->signif; in softfloat_addExtF80M() 123 sigZ -= sigB; in softfloat_addExtF80M() 126 --sigZ; in softfloat_addExtF80M() 132 sigZ = sigZ<<1 | sigZExtra>>31; in softfloat_addExtF80M() 139 sigZ += sigB; in softfloat_addExtF80M() 140 if ( sigZ & UINT64_C( 0x8000000000000000 ) ) goto sigZ; in softfloat_addExtF80M() 151 sigZ = sigB - sigZ; in softfloat_addExtF80M() 163 sigZ += sigB; in softfloat_addExtF80M() 168 sigZ = UINT64_C( 0x8000000000000000 ) | sigZ>>1; in softfloat_addExtF80M() [all …]
|
A D | s_mulAddF64.c | 265 uint64_t sigZ; in softfloat_mulAddF64() local 322 sigZ = in softfloat_mulAddF64() 357 sigZ = in softfloat_mulAddF64() 377 sigZ = in softfloat_mulAddF64() 383 sigZ = softfloat_shortShiftRightJam64( sigZ, 1 ); in softfloat_mulAddF64() 391 sigZ = sigC - sigZ; in softfloat_mulAddF64() 395 sigZ = (sigZ - 1) | 1; in softfloat_mulAddF64() 439 sigZ = in softfloat_mulAddF64() 444 sigZ = in softfloat_mulAddF64() 452 sigZ = in softfloat_mulAddF64() [all …]
|
A D | extF80_roundToInt.c | 53 uint_fast64_t sigZ; in extF80_roundToInt() local 71 sigZ = 0; in extF80_roundToInt() 85 sigZ = uiZ.v0; in extF80_roundToInt() 90 sigZ = sigA; in extF80_roundToInt() 111 sigZ = 0; in extF80_roundToInt() 123 sigZ = sigA; in extF80_roundToInt() 125 sigZ += lastBitMask>>1; in extF80_roundToInt() 128 if ( ! (sigZ & roundBitsMask) ) sigZ &= ~lastBitMask; in extF80_roundToInt() 134 sigZ &= ~roundBitsMask; in extF80_roundToInt() 135 if ( ! sigZ ) { in extF80_roundToInt() [all …]
|
A D | extF80M_roundToInt.c | 93 sigZ = 0; in extF80M_roundToInt() 116 sigZ = 0; in extF80M_roundToInt() 133 sigZ = sigA; in extF80M_roundToInt() 143 sigZ = sigA; in extF80M_roundToInt() 145 sigZ += lastBitMask>>1; in extF80M_roundToInt() 147 sigZ += lastBitMask>>1; in extF80M_roundToInt() 148 if ( ! (sigZ & roundBitsMask) ) sigZ &= ~lastBitMask; in extF80M_roundToInt() 151 sigZ += roundBitsMask; in extF80M_roundToInt() 154 sigZ &= ~roundBitsMask; in extF80M_roundToInt() 155 if ( ! sigZ ) { in extF80M_roundToInt() [all …]
|
A D | f32_div.c | 62 uint_fast32_t sigZ; in f32_div() local 64 uint_fast32_t sigZ; in f32_div() local 127 sigZ = sig64A / sigB; in f32_div() 128 if ( ! (sigZ & 0x3F) ) sigZ |= ((uint_fast64_t) sigB * sigZ != sig64A); in f32_div() 140 sigZ += 2; in f32_div() 141 if ( (sigZ & 0x3F) < 2 ) { in f32_div() 142 sigZ &= ~3; in f32_div() 144 rem = ((uint_fast64_t) sigA<<31) - (uint_fast64_t) sigZ * sigB; in f32_div() 149 sigZ -= 4; in f32_div() 151 if ( rem ) sigZ |= 1; in f32_div() [all …]
|
A D | f32_sqrt.c | 54 uint_fast32_t sigZ, shiftedSigZ; in f32_sqrt() local 94 sigZ = in f32_sqrt() 97 if ( expA ) sigZ >>= 1; in f32_sqrt() 98 sigZ += 2; in f32_sqrt() 101 if ( (sigZ & 0x3F) < 2 ) { in f32_sqrt() 102 shiftedSigZ = sigZ>>2; in f32_sqrt() 104 sigZ = shiftedSigZ<<2; in f32_sqrt() 106 sigZ |= 1; in f32_sqrt() 108 if ( negRem ) --sigZ; in f32_sqrt() 111 return softfloat_roundPackToF32( 0, expZ, sigZ ); in f32_sqrt()
|
A D | s_addMagsF128.c | 58 struct uint128 uiZ, sigZ; in softfloat_addMagsF128() local 78 sigZ = softfloat_add128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); in softfloat_addMagsF128() 80 uiZ.v64 = packToF128UI64( signZ, 0, sigZ.v64 ); in softfloat_addMagsF128() 81 uiZ.v0 = sigZ.v0; in softfloat_addMagsF128() 85 sigZ.v64 |= UINT64_C( 0x0002000000000000 ); in softfloat_addMagsF128() 129 sigZ = in softfloat_addMagsF128() 137 if ( sigZ.v64 < UINT64_C( 0x0002000000000000 ) ) goto roundAndPack; in softfloat_addMagsF128() 142 sigZ.v64, sigZ.v0, sigZExtra, 1 ); in softfloat_addMagsF128() 143 sigZ = sig128Extra.v; in softfloat_addMagsF128() 147 softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); in softfloat_addMagsF128()
|
A D | f64_sqrt.c | 57 uint_fast64_t sigZ, shiftedSigZ; in f64_sqrt() local 110 sigZ = ((uint_fast64_t) sig32Z<<32 | 1<<5) + ((uint_fast64_t) q<<3); in f64_sqrt() 113 if ( (sigZ & 0x1FF) < 1<<5 ) { in f64_sqrt() 114 sigZ &= ~(uint_fast64_t) 0x3F; in f64_sqrt() 115 shiftedSigZ = sigZ>>6; in f64_sqrt() 118 --sigZ; in f64_sqrt() 120 if ( rem ) sigZ |= 1; in f64_sqrt() 123 return softfloat_roundPackToF64( 0, expZ, sigZ ); in f64_sqrt()
|
A D | s_addMagsExtF80.c | 60 uint_fast64_t uiZ0, sigZ, sigZExtra; in softfloat_addMagsExtF80() local 85 sigZ = sigA + sigB; in softfloat_addMagsExtF80() 88 normExpSig = softfloat_normSubnormalExtF80Sig( sigZ ); in softfloat_addMagsExtF80() 90 sigZ = normExpSig.sig; in softfloat_addMagsExtF80() 132 sigZ = sigA + sigB; in softfloat_addMagsExtF80() 133 if ( sigZ & UINT64_C( 0x8000000000000000 ) ) goto roundAndPack; in softfloat_addMagsExtF80() 137 sig64Extra = softfloat_shortShiftRightJam64Extra( sigZ, sigZExtra, 1 ); in softfloat_addMagsExtF80() 138 sigZ = sig64Extra.v | UINT64_C( 0x8000000000000000 ); in softfloat_addMagsExtF80() 144 signZ, expZ, sigZ, sigZExtra, extF80_roundingPrecision ); in softfloat_addMagsExtF80()
|
A D | s_addMagsF32.c | 54 uint_fast32_t sigZ; in softfloat_addMagsF32() local 75 sigZ = 0x40000000 + sigA + sigB; in softfloat_addMagsF32() 96 sigZ = 0x20000000 + sigA + sigB; in softfloat_addMagsF32() 97 if ( sigZ < 0x40000000 ) { in softfloat_addMagsF32() 99 sigZ <<= 1; in softfloat_addMagsF32() 102 return softfloat_roundPackToF32( signZ, expZ, sigZ ); in softfloat_addMagsF32()
|
A D | f64_mul.c | 66 uint_fast64_t sigZ, uiZ; in f64_mul() local 115 sigZ = sig128Z.v64 | (sig128Z.v0 != 0); in f64_mul() 118 sigZ = in f64_mul() 120 if ( sig128Z[indexWord( 4, 1 )] || sig128Z[indexWord( 4, 0 )] ) sigZ |= 1; in f64_mul() 122 if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { in f64_mul() 124 sigZ <<= 1; in f64_mul() 126 return softfloat_roundPackToF64( signZ, expZ, sigZ ); in f64_mul()
|
A D | s_mulAddF32.c | 65 uint_fast32_t sigZ; in softfloat_mulAddF32() local 123 sigZ = softfloat_shortShiftRightJam64( sigProd, 31 ); in softfloat_mulAddF32() 135 sigZ = sigC + softfloat_shiftRightJam64( sigProd, 32 - expDiff ); in softfloat_mulAddF32() 142 sigZ = softfloat_shortShiftRightJam64( sig64Z, 32 ); in softfloat_mulAddF32() 144 if ( sigZ < 0x40000000 ) { in softfloat_mulAddF32() 146 sigZ <<= 1; in softfloat_mulAddF32() 170 sigZ = softfloat_shortShiftRightJam64( sig64Z, -shiftCount ); in softfloat_mulAddF32() 172 sigZ = (uint_fast32_t) sig64Z<<shiftCount; in softfloat_mulAddF32() 176 return softfloat_roundPackToF32( signZ, expZ, sigZ ); in softfloat_mulAddF32()
|
A D | s_addMagsF64.c | 54 uint_fast64_t sigZ; in softfloat_addMagsF64() local 77 sigZ = UINT64_C( 0x4000000000000000 ) + sigA + sigB; in softfloat_addMagsF64() 98 sigZ = UINT64_C( 0x2000000000000000 ) + sigA + sigB; in softfloat_addMagsF64() 99 if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { in softfloat_addMagsF64() 101 sigZ <<= 1; in softfloat_addMagsF64() 104 return softfloat_roundPackToF64( signZ, expZ, sigZ ); in softfloat_addMagsF64()
|
A D | f128_mul.c | 63 struct uint128 sigZ; in f128_mul() local 122 sigZ = in f128_mul() 127 if ( UINT64_C( 0x0002000000000000 ) <= sigZ.v64 ) { in f128_mul() 131 sigZ.v64, sigZ.v0, sigZExtra, 1 ); in f128_mul() 132 sigZ = sig128Extra.v; in f128_mul() 136 softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); in f128_mul()
|
A D | extF80_sqrt.c | 60 uint_fast64_t q, sigZ, x64; in extF80_sqrt() local 120 sigZ = ((uint_fast64_t) sig32Z<<32) + (q<<3); in extF80_sqrt() 121 x64 = ((uint_fast64_t) sig32Z<<32) + sigZ; in extF80_sqrt() 128 x64 = sigZ; in extF80_sqrt() 129 sigZ = (sigZ<<1) + (q>>25); in extF80_sqrt() 142 if ( ! sigZExtra ) --sigZ; in extF80_sqrt() 150 0, expZ, sigZ, sigZExtra, extF80_roundingPrecision ); in extF80_sqrt()
|
A D | f64_div.c | 63 uint_fast64_t sigZ; in f64_div() local 131 sigZ = ((uint_fast64_t) sig32Z<<32) + ((uint_fast64_t) q<<4); in f64_div() 134 if ( (sigZ & 0x1FF) < 4<<4 ) { in f64_div() 136 sigZ &= ~(uint_fast64_t) 0x7F; in f64_div() 142 sigZ -= 1<<7; in f64_div() 144 if ( rem ) sigZ |= 1; in f64_div() 147 return softfloat_roundPackToF64( signZ, expZ, sigZ ); in f64_div()
|
A D | f128_sqrt.c | 61 struct uint128 sigZ; in f128_sqrt() local 148 sigZ = in f128_sqrt() 158 y = softfloat_shortShiftLeft128( sigZ.v64, sigZ.v0, 6 ); in f128_sqrt() 177 sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, 0, 1 ); in f128_sqrt() 183 return softfloat_roundPackToF128( 0, expZ, sigZ.v64, sigZ.v0, sigZExtra ); in f128_sqrt()
|
A D | ui64_to_extF80M.c | 58 uint64_t sigZ; in ui64_to_extF80M() local 63 sigZ = 0; in ui64_to_extF80M() 67 sigZ = a<<shiftCount; in ui64_to_extF80M() 70 zSPtr->signif = sigZ; in ui64_to_extF80M()
|
A D | ui32_to_extF80M.c | 58 uint64_t sigZ; in ui32_to_extF80M() local 63 sigZ = 0; in ui32_to_extF80M() 67 sigZ = (uint64_t) (a<<shiftCount)<<32; in ui32_to_extF80M() 70 zSPtr->signif = sigZ; in ui32_to_extF80M()
|
A D | i64_to_extF80M.c | 58 uint64_t sigZ; in i64_to_extF80M() local 65 sigZ = 0; in i64_to_extF80M() 71 sigZ = absA<<shiftCount; in i64_to_extF80M() 74 zSPtr->signif = sigZ; in i64_to_extF80M()
|
A D | i32_to_extF80M.c | 58 uint64_t sigZ; in i32_to_extF80M() local 65 sigZ = 0; in i32_to_extF80M() 71 sigZ = (uint64_t) (absA<<shiftCount)<<32; in i32_to_extF80M() 74 zSPtr->signif = sigZ; in i32_to_extF80M()
|
A D | f32_mul.c | 61 uint_fast32_t sigZ, uiZ; in f32_mul() local 100 sigZ = softfloat_shortShiftRightJam64( (uint_fast64_t) sigA * sigB, 32 ); in f32_mul() 101 if ( sigZ < 0x40000000 ) { in f32_mul() 103 sigZ <<= 1; in f32_mul() 105 return softfloat_roundPackToF32( signZ, expZ, sigZ ); in f32_mul()
|
A D | extF80_div.c | 64 uint_fast64_t sigZ; in extF80_div() local 134 sigZ = 0; in extF80_div() 148 sigZ = (sigZ<<29) + q; in extF80_div() 168 sigZ = (sigZ<<6) + (q>>23); in extF80_div() 172 signZ, expZ, sigZ, sigZExtra, extF80_roundingPrecision ); in extF80_div()
|
A D | s_subMagsF32.c | 55 uint_fast32_t sigZ; in softfloat_subMagsF32() local 93 sigZ = sigB - sigA; in softfloat_subMagsF32() 106 sigZ = sigA - sigB; in softfloat_subMagsF32() 108 return softfloat_normRoundPackToF32( signZ, expZ - 1, sigZ ); in softfloat_subMagsF32()
|