Lines Matching refs:d1
943 mov.b 1+EXC_CMDREG(%a6),%d1
944 andi.w &0x007f,%d1 # extract extension
955 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
956 jsr (tbl_unsupp.l,%pc,%d1.l*1)
983 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1340 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1407 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1572 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1679 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1694 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1711 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1764 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1792 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1846 bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
1851 mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
1852 jsr (tbl_unsupp.l,%pc,%d1.l*1)
1892 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1991 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2022 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2123 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2318 mov.l FP_SRC_HI(%a6),%d1 # fetch DENORM hi(man)
2319 lsr.l %d0,%d1 # shift it
2320 bset &31,%d1 # set j-bit
2321 mov.l %d1,FP_SRC_HI(%a6) # insert new hi(man)
2339 mov.w &0x3c01,%d1 # pass denorm threshold
2359 clr.l %d1
2466 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2594 mov.b 1+EXC_CMDREG(%a6),%d1
2595 andi.w &0x007f,%d1 # extract extension
2603 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
2604 jsr (tbl_unsupp.l,%pc,%d1.l*1)
2955 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2975 bfextu %d0{&19:&3},%d1
2977 cmpi.b %d1,&0x7 # move all regs?
2993 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3022 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3044 movc %pcr,%d1
3045 btst &0x1,%d1
3059 movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
3167 mov.w FP_SRC_EX(%a6),%d1 # fetch exponent
3168 andi.w &0x7fff,%d1
3169 cmpi.w %d1,&0x7fff
3174 mov.l FP_SRC_HI(%a6),%d1
3175 andi.l &0x7fffffff,%d1
3182 mov.l &0x7fffffff,%d1
3185 addq.l &0x1,%d1
3187 mov.l %d1,L_SCR1(%a6)
3191 mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
3309 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3337 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3361 mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
3775 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3799 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3863 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3870 bfextu %d0{&0:&10},%d1 # is it an fmovecr?
3871 cmpi.w %d1,&0x03c8
3874 bfextu %d0{&16:&6},%d1 # is it an fmovecr?
3875 cmpi.b %d1,&0x17
4075 bfextu %d0{&16:&6},%d1 # extract upper 6 of cmdreg
4076 cmpi.b %d1,&0x17 # is op an fmovecr?
4085 mov.b 1+EXC_CMDREG(%a6),%d1
4086 andi.w &0x003f,%d1 # extract extension bits
4087 lsl.w &0x3,%d1 # shift right 3 bits
4088 or.b STAG(%a6),%d1 # insert src optag bits
4093 mov.w (tbl_trans.w,%pc,%d1.w*2),%d1
4094 jsr (tbl_trans.w,%pc,%d1.w*1) # emulate
4107 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4245 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4364 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4412 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
5200 ror.l &1,%d1
5201 and.l &0x80000000,%d1
5203 eor.l %d1,X(%a6) # X IS NOW R'= SGN*R
5261 and.l &0x80000000,%d1
5265 or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE
5266 mov.l %d1,POSNEG1(%a6)
5301 cmp.l %d1,&0x3FFF8000
5305 mov.l ADJN(%a6),%d1
5306 cmp.l %d1,&0
5315 mov.b &FMOV_OP,%d1 # last inst is MOVE
5348 mov.l (%a0),%d1
5349 mov.w 4(%a0),%d1
5350 and.l &0x7FFFFFFF,%d1 # COMPACTIFY X
5352 cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
5357 cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
5373 mov.l INT(%a6),%d1
5374 asl.l &4,%d1
5375 add.l %d1,%a1 # ADDRESS OF N*PIBY2, IN Y1, Y2
5383 mov.l INT(%a6),%d1
5384 ror.l &1,%d1
5385 cmp.l %d1,&0 # D0 < 0 IFF N IS ODD
5400 mov.l %d1,%d2
5403 eor.l %d1,%d2
5413 ror.l &1,%d1
5414 and.l &0x80000000,%d1
5416 eor.l %d1,POSNEG1(%a6)
5426 eor.l %d1,SPRIME(%a6)
5476 ror.l &1,%d1
5477 and.l &0x80000000,%d1
5482 eor.l %d1,RPRIME(%a6)
5483 eor.l %d1,SPRIME(%a6)
5487 or.l &0x3F800000,%d1
5488 mov.l %d1,POSNEG1(%a6)
5538 cmp.l %d1,&0x3FFF8000
5551 mov.b &FMOV_OP,%d1 # last inst is MOVE
5580 cmp.l %d1,&0x7ffeffff # is arg dangerously large?
5610 mov.w INARG(%a6),%d1
5611 mov.l %d1,%a1 # save a copy of D0
5612 and.l &0x00007FFF,%d1
5613 sub.l &0x00003FFF,%d1 # d0 = K
5614 cmp.l %d1,&28
5617 sub.l &27,%d1 # d0 = L := K-27
5621 clr.l %d1 # d0 = L := 0
5632 sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
6087 mov.l %d1,%d2 # d2 = L
6094 add.l &0x00003FDD,%d1
6095 mov.w %d1,FP_SCR1_EX(%a6)
6099 mov.b ENDFLAG(%a6),%d1
6124 cmp.b %d1,&0
6137 mov.l INT(%a6),%d1
6138 ror.l &1,%d1
6402 mov.l %d1,%d2 # THE EXP AND 16 BITS OF X
6403 and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION
6452 cmp.l %d1,&0x3FFF8000
6462 cmp.l %d1,&0x3FD78000
6505 mov.b &FMOV_OP,%d1 # last inst is MOVE
6513 cmp.l %d1,&0x40638000
6643 cmp.l %d1,&0x3FD78000
6673 mov.l (%a0),%d1
6674 and.l &0x80000000,%d1 # SIGN BIT OF X
6675 or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT
6676 mov.l %d1,-(%sp) # push SIGN(X) IN SGL-FMT
6684 mov.b &FMOV_OP,%d1 # last inst is MOVE
6733 mov.l (%a0),%d1 # pack exp w/ upper 16 fraction
6734 mov.w 4(%a0),%d1
6735 and.l &0x7FFFFFFF,%d1
6736 cmp.l %d1,&0x3FFF8000
9181 mov.l (%a0),%d1
9182 or.l &0x00800001,%d1
9183 fadd.s %d1,%fp0
9305 mov.l (%a0),%d1
9306 or.l &0x00800001,%d1
9307 fadd.s %d1,%fp0
9311 # smovcr(): returns the ROM constant at the offset specified in d1 #
9316 # d1 = ROM offset #
9453 mulu.w &0xc,%d1 # offset points into tables
9460 fmovm.x (%a0,%d1.w),&0x80 # return result in fp0
9470 mov.w 0x0(%a0,%d1.w),FP_SCR1_EX(%a6) # load first word
9471 mov.l 0x4(%a0,%d1.w),FP_SCR1_HI(%a6) # load second word
9472 mov.l 0x8(%a0,%d1.w),FP_SCR1_LO(%a6) # load third word
9473 mov.l %d0,%d1
9583 mov.w DST_EX(%a1),%d1 # get dst exponent
9585 andi.l &0x00007fff,%d1 # strip sign from dst exp
9633 mov.l &0x80000000,%d1 # load normalized mantissa
9638 lsr.l %d0,%d1 # no; bit stays in upper lw
9640 mov.l %d1,-(%sp) # insert new high mantissa
9645 lsr.l %d0,%d1 # make low mantissa longword
9646 mov.l %d1,-(%sp) # insert new low mantissa
10071 mov.b &FMOV_OP,%d1 # last inst is MOVE
10179 mov.l %d0,%d1 # make copy of rnd prec,mode
10180 andi.b &0xc0,%d1 # extended precision?
10194 smi.b %d1 # set d0 accordingly
10278 mov.b %d0,%d1 # fetch rnd mode/prec
10279 andi.b &0xc0,%d1 # extract rnd prec
10306 mov.l LOCAL_LO(%a0),%d1 # are any of lo 11 bits of
10307 andi.l &0x7ff,%d1 # dbl mantissa set?
11383 cmpi.b %d1,&FMOV_OP
11385 cmpi.b %d1,&FADD_OP
11630 fmov.l %fpsr,%d1 # save status
11633 or.l %d1,USER_FPSR(%a6) # save INEX2,N
11638 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
11639 mov.l %d1,%d2 # make a copy
11640 andi.l &0x7fff,%d1 # strip sign
11642 sub.l %d0,%d1 # add scale factor
11643 or.w %d2,%d1 # concat old sign,new exp
11644 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
11670 fmov.l %fpsr,%d1 # save status
11673 or.l %d1,USER_FPSR(%a6) # save INEX2,N
11679 mov.b FPCR_ENABLE(%a6),%d1
11680 andi.b &0x13,%d1 # is OVFL or INEX enabled?
11686 sne %d1 # set sign param accordingly
11700 mov.l L_SCR3(%a6),%d1
11701 andi.b &0xc0,%d1 # test the rnd prec
11995 mov.b DST_EX(%a1),%d1
11996 eor.b %d0,%d1
12012 mov.b DST_EX(%a1),%d1
12013 eor.b %d0,%d1
12280 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
12281 mov.l %d1,%d2 # make a copy
12282 andi.l &0x7fff,%d1 # strip sign
12284 sub.l %d0,%d1 # add scale factor
12285 sub.l &0x6000,%d1 # subtract bias
12286 andi.w &0x7fff,%d1
12287 or.w %d2,%d1
12288 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
12302 fmov.l %fpsr,%d1 # save status
12305 or.l %d1,USER_FPSR(%a6) # save INEX2,N
13060 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
13061 mov.l %d1,%d2 # make a copy
13062 andi.l &0x7fff,%d1 # strip sign
13064 sub.l %d0,%d1 # add scale factor
13065 subi.l &0x6000,%d1 # subtract bias
13066 andi.w &0x7fff,%d1
13067 or.w %d2,%d1 # concat sign,exp
13068 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
13082 fmov.l %fpsr,%d1 # save status
13085 or.l %d1,USER_FPSR(%a6) # save INEX2,N
13139 mov.b STAG(%a6),%d1
13157 cmpi.b %d1,&ZERO # weed out ZERO
13159 cmpi.b %d1,&INF # weed out INF
13161 cmpi.b %d1,&SNAN # weed out SNAN
13163 cmpi.b %d1,&QNAN # weed out QNAN
13337 mov.b STAG(%a6),%d1
13357 cmpi.b %d1,&ZERO # weed out ZERO
13359 cmpi.b %d1,&INF # weed out INF
13361 cmpi.b %d1,&DENORM # weed out DENORM
13363 cmpi.b %d1,&SNAN # weed out SNAN
13462 mov.b STAG(%a6),%d1
13479 mov.w SRC_EX(%a0),%d1
13480 bclr &15,%d1 # force absolute value
13481 mov.w %d1,FP_SCR0_EX(%a6) # insert exponent
13517 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
13518 andi.w &0x8000,%d1 # keep old sign
13520 or.w %d1,%d0 # concat old sign, new exponent
13556 fmov.l %fpsr,%d1 # save FPSR
13559 or.l %d1,USER_FPSR(%a6) # save INEX2,N
13564 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
13565 mov.l %d1,%d2 # make a copy
13566 andi.l &0x7fff,%d1 # strip sign
13567 sub.l %d0,%d1 # add scale factor
13569 or.w %d1,%d2 # concat old sign,new exp
13600 mov.b FPCR_ENABLE(%a6),%d1
13601 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
13606 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
13619 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
13622 mov.l %d1,%d2 # make a copy
13623 andi.l &0x7fff,%d1 # strip sign
13625 sub.l %d0,%d1 # subtract scale factor
13626 addi.l &0x6000,%d1 # add new bias
13627 andi.w &0x7fff,%d1
13628 or.w %d2,%d1 # concat new sign,new exp
13629 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
13644 fmov.l %fpsr,%d1 # save FPSR
13646 or.l %d1,USER_FPSR(%a6) # save INEX2,N
13651 mov.b FPCR_ENABLE(%a6),%d1
13652 andi.b &0x13,%d1 # is OVFL or INEX enabled?
13661 sne %d1 # set sign param accordingly
13715 cmpi.b %d1,&DENORM # weed out DENORM
13717 cmpi.b %d1,&SNAN # weed out SNAN
13719 cmpi.b %d1,&QNAN # weed out QNAN
13724 cmpi.b %d1,&INF # weed out INF
13758 clr.w %d1
13759 mov.b DTAG(%a6),%d1
13760 lsl.b &0x3,%d1
13761 or.b STAG(%a6),%d1
13782 mov.w (tbl_fcmp_op.b,%pc,%d1.w*2),%d1
13783 jmp (tbl_fcmp_op.b,%pc,%d1.w*1)
13962 clr.w %d1
13963 mov.b DTAG(%a6),%d1
13964 lsl.b &0x3,%d1
13965 or.b STAG(%a6),%d1
14001 fmov.l %fpsr,%d1 # save status
14004 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14009 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
14010 mov.l %d1,%d2 # make a copy
14011 andi.l &0x7fff,%d1 # strip sign
14013 sub.l %d0,%d1 # add scale factor
14014 or.w %d2,%d1 # concat old sign,new exp
14015 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14028 fmov.l %fpsr,%d1 # save status
14031 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14038 mov.b FPCR_ENABLE(%a6),%d1
14039 andi.b &0x13,%d1 # is OVFL or INEX enabled?
14044 sne %d1 # set sign param accordingly
14056 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14057 mov.l %d1,%d2 # make a copy
14058 andi.l &0x7fff,%d1 # strip sign
14059 sub.l %d0,%d1 # add scale factor
14060 subi.l &0x6000,%d1 # subtract bias
14061 andi.w &0x7fff,%d1
14063 or.w %d2,%d1 # concat old sign,new exp
14064 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14077 fmov.l %fpsr,%d1 # save status
14080 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14303 clr.w %d1
14304 mov.b DTAG(%a6),%d1
14305 lsl.b &0x3,%d1
14306 or.b STAG(%a6),%d1 # combine src tags
14330 mov.w 2+L_SCR3(%a6),%d1 # fetch precision,mode
14331 lsr.b &0x6,%d1
14348 fmov.l %fpsr,%d1 # save FPSR
14351 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14356 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
14357 mov.l %d1,%d2 # make a copy
14358 andi.l &0x7fff,%d1 # strip sign
14360 sub.l %d0,%d1 # add scale factor
14361 or.w %d2,%d1 # concat old sign,new exp
14362 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14375 fmov.l %fpsr,%d1
14378 or.l %d1,USER_FPSR(%a6) # save INEX,N
14381 mov.w (%sp),%d1 # fetch new exponent
14383 andi.l &0x7fff,%d1 # strip sign
14384 sub.l %d0,%d1 # add scale factor
14385 cmp.l %d1,&0x7fff # did divide overflow?
14391 mov.b FPCR_ENABLE(%a6),%d1
14392 andi.b &0x13,%d1 # is OVFL or INEX enabled?
14397 sne %d1 # set sign param accordingly
14409 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14410 mov.l %d1,%d2 # make a copy
14411 andi.l &0x7fff,%d1 # strip sign
14413 sub.l %d0,%d1 # add scale factor
14414 subi.l &0x6000,%d1 # subtract new bias
14415 andi.w &0x7fff,%d1 # clear ms bit
14416 or.w %d2,%d1 # concat old sign,new exp
14417 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14432 fmov.l %fpsr,%d1 # save status
14435 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14437 mov.b FPCR_ENABLE(%a6),%d1
14438 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14445 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
14466 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14467 mov.l %d1,%d2 # make a copy
14468 andi.l &0x7fff,%d1 # strip sign
14470 sub.l %d0,%d1 # add scale factor
14471 addi.l &0x6000,%d1 # add bias
14472 andi.w &0x7fff,%d1 # clear top bit
14473 or.w %d2,%d1 # concat old sign, new exp
14474 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14490 fmov.l %fpsr,%d1 # save status
14493 or.l %d1,USER_FPSR(%a6) # save INEX2,N
14509 clr.l %d1 # clear scratch register
14510 ori.b &rz_mode*0x10,%d1 # force RZ rnd mode
14512 fmov.l %d1,%fpcr # set FPCR
14529 mov.w (tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
14530 jmp (tbl_fsgldiv_op.b,%pc,%d1.w*1)
14650 clr.w %d1
14651 mov.b DTAG(%a6),%d1
14652 lsl.b &0x3,%d1
14653 or.b STAG(%a6),%d1 # combine src tags
14672 fmov.l %fpsr,%d1 # fetch INEX2,N,Z
14674 or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
14682 mov.w 2+L_SCR3(%a6),%d1
14683 lsr.b &0x6,%d1
14689 cmp.l %d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
14692 cmp.l %d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
14697 mov.w (%sp),%d1
14698 andi.w &0x8000,%d1 # keep sign
14699 or.w %d2,%d1 # concat sign,new exp
14700 mov.w %d1,(%sp) # insert new exponent
14724 mov.b FPCR_ENABLE(%a6),%d1
14725 andi.b &0x13,%d1 # is OVFL or INEX enabled?
14731 sne %d1 # set sign param accordingly
14740 mov.b L_SCR3(%a6),%d1
14741 andi.b &0xc0,%d1 # is precision extended?
14745 mov.w (%sp),%d1
14746 andi.w &0x8000,%d1 # keep sign
14749 or.w %d2,%d1 # concat sign,new exp
14750 mov.w %d1,(%sp) # insert new exponent
14758 mov.l L_SCR3(%a6),%d1
14759 andi.b &0x30,%d1 # keep rnd mode
14760 fmov.l %d1,%fpcr # set FPCR
14783 fmov.l %fpsr,%d1 # save status
14785 or.l %d1,USER_FPSR(%a6) # save INEX,N
14787 mov.b FPCR_ENABLE(%a6),%d1
14788 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14795 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
14805 mov.l L_SCR3(%a6),%d1
14806 andi.b &0xc0,%d1 # is precision extended?
14819 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14820 mov.l %d1,%d2 # make a copy
14821 andi.l &0x7fff,%d1 # strip sign
14823 sub.l %d0,%d1 # add scale factor
14824 addi.l &0x6000,%d1 # add new bias
14825 andi.w &0x7fff,%d1 # clear top bit
14826 or.w %d2,%d1 # concat sign,new exp
14827 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14832 mov.l L_SCR3(%a6),%d1
14833 andi.b &0x30,%d1 # use only rnd mode
14834 fmov.l %d1,%fpcr # set FPCR
14844 mov.l L_SCR3(%a6),%d1
14845 andi.b &0xc0,%d1
14848 mov.l 0x4(%sp),%d1 # extract hi(man)
14849 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
14870 mov.l L_SCR3(%a6),%d1
14871 andi.b &0xc0,%d1 # keep rnd prec
14872 ori.b &rz_mode*0x10,%d1 # insert rnd mode
14873 fmov.l %d1,%fpcr # set FPCR
14893 mov.w (tbl_fadd_op.b,%pc,%d1.w*2),%d1
14894 jmp (tbl_fadd_op.b,%pc,%d1.w*1)
14962 mov.b DST_EX(%a1),%d1
14963 eor.b %d0,%d1
14980 mov.b 3+L_SCR3(%a6),%d1
14981 andi.b &0x30,%d1 # extract rnd mode
14982 cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM?
15023 mov.b DST_EX(%a1),%d1
15024 eor.b %d1,%d0
15103 clr.w %d1
15104 mov.b DTAG(%a6),%d1
15105 lsl.b &0x3,%d1
15106 or.b STAG(%a6),%d1 # combine src tags
15125 fmov.l %fpsr,%d1 # fetch INEX2, N, Z
15127 or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
15135 mov.w 2+L_SCR3(%a6),%d1
15136 lsr.b &0x6,%d1
15142 cmp.l %d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
15145 cmp.l %d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
15150 mov.w (%sp),%d1
15151 andi.w &0x8000,%d1 # keep sign
15152 or.w %d2,%d1 # insert new exponent
15153 mov.w %d1,(%sp) # insert new exponent
15177 mov.b FPCR_ENABLE(%a6),%d1
15178 andi.b &0x13,%d1 # is OVFL or INEX enabled?
15184 sne %d1 # set sign param accordingly
15193 mov.b L_SCR3(%a6),%d1
15194 andi.b &0xc0,%d1 # is precision extended?
15198 mov.w (%sp),%d1 # fetch {sgn,exp}
15199 andi.w &0x8000,%d1 # keep sign
15202 or.w %d2,%d1 # concat sign,exp
15203 mov.w %d1,(%sp) # insert new exponent
15211 mov.l L_SCR3(%a6),%d1
15212 andi.b &0x30,%d1 # clear rnd prec
15213 fmov.l %d1,%fpcr # set FPCR
15236 fmov.l %fpsr,%d1 # save status
15238 or.l %d1,USER_FPSR(%a6)
15240 mov.b FPCR_ENABLE(%a6),%d1
15241 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
15248 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
15258 mov.l L_SCR3(%a6),%d1
15259 andi.b &0xc0,%d1 # is precision extended?
15272 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
15273 mov.l %d1,%d2 # make a copy
15274 andi.l &0x7fff,%d1 # strip sign
15276 sub.l %d0,%d1 # add scale factor
15277 addi.l &0x6000,%d1 # subtract new bias
15278 andi.w &0x7fff,%d1 # clear top bit
15279 or.w %d2,%d1 # concat sgn,exp
15280 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
15285 mov.l L_SCR3(%a6),%d1
15286 andi.b &0x30,%d1 # clear rnd prec
15287 fmov.l %d1,%fpcr # set FPCR
15297 mov.l L_SCR3(%a6),%d1
15298 andi.b &0xc0,%d1 # fetch rnd prec
15301 mov.l 0x4(%sp),%d1
15302 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
15323 mov.l L_SCR3(%a6),%d1
15324 andi.b &0xc0,%d1 # keep rnd prec
15325 ori.b &rz_mode*0x10,%d1 # insert rnd mode
15326 fmov.l %d1,%fpcr # set FPCR
15346 mov.w (tbl_fsub_op.b,%pc,%d1.w*2),%d1
15347 jmp (tbl_fsub_op.b,%pc,%d1.w*1)
15415 mov.b DST_EX(%a1),%d1
15416 eor.b %d1,%d0
15432 mov.b 3+L_SCR3(%a6),%d1
15433 andi.b &0x30,%d1 # extract rnd mode
15434 cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM?
15475 mov.b DST_EX(%a1),%d1
15476 eor.b %d1,%d0
15674 fmov.l %fpsr,%d1 # save status
15677 or.l %d1,USER_FPSR(%a6) # save INEX2,N
15680 mov.b FPCR_ENABLE(%a6),%d1
15681 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
15688 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
15701 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
15704 mov.l %d1,%d2 # make a copy
15705 andi.l &0x7fff,%d1 # strip sign
15707 sub.l %d0,%d1 # subtract scale factor
15708 addi.l &0x6000,%d1 # add new bias
15709 andi.w &0x7fff,%d1
15710 or.w %d2,%d1 # concat new sign,new exp
15711 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
15726 fmov.l %fpsr,%d1 # save FPSR
15728 or.l %d1,USER_FPSR(%a6) # save INEX2,N
15733 mov.b FPCR_ENABLE(%a6),%d1
15734 andi.b &0x13,%d1 # is OVFL or INEX enabled?
15743 sne %d1 # set sign param accordingly
15800 cmpi.b %d1,&DENORM # weed out DENORM
15802 cmpi.b %d1,&ZERO # weed out ZERO
15804 cmpi.b %d1,&INF # weed out INF
15806 cmpi.b %d1,&SNAN # weed out SNAN
15873 mov.w DST_EX(%a1),%d1
15875 mov.w %d1,FP_SCR1_EX(%a6)
15878 andi.w &0x7fff,%d1
15880 mov.w %d1,2+L_SCR1(%a6) # store dst exponent
15882 cmp.w %d0, %d1 # is src exp >= dst exp?
15907 mov.w FP_SCR0_EX(%a6),%d1
15908 and.w &0x8000,%d1
15909 or.w %d1,%d0 # concat {sgn,new exp}
15943 mov.w FP_SCR1_EX(%a6),%d1
15944 andi.w &0x8000,%d1
15945 or.w %d1,%d0 # concat {sgn,new exp}
15985 mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
16000 sub.l %d1,%d0 # scale = BIAS + (-exp)
16008 mov.l %d0,%d1 # prepare for op_norm call
16043 andi.l &0x7fff,%d1 # extract operand's exponent
16047 btst &0x0,%d1 # is exp even or odd?
16061 sub.l %d1,%d0 # scale = BIAS + (-exp)
16112 mov.w FP_SCR1_EX(%a6),%d1 # extract operand's {sgn,exp}
16127 sub.l %d1,%d0 # scale = BIAS + (-exp)
16134 mov.l %d0,%d1 # prepare for op_norm call
16284 clr.l %d1 # clear scratch reg
16285 mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
16286 ror.l &0x8,%d1 # rotate to top byte
16287 fmov.l %d1,%fpsr # insert into FPSR
16289 mov.w (tbl_fdbcc.b,%pc,%d0.w*2),%d1 # load table
16290 jmp (tbl_fdbcc.b,%pc,%d1.w) # jump to fdbcc routine
16848 mov.b 1+EXC_OPWORD(%a6), %d1 # fetch lo opword
16849 andi.w &0x7, %d1 # extract count register
18064 mov.b 1+EXC_OPWORD(%a6),%d1 # fetch lo opword
18065 mov.l %d1,%d0 # make a copy
18066 andi.b &0x38,%d1 # extract src mode
18106 tst.l %d1 # did dstore fail?
18109 mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
18110 andi.w &0x7,%d1 # pass index in d1
18276 tst.b %d1 # should FP0 be moved?
18284 lsl.b &0x1,%d1 # should FP1 be moved?
18292 lsl.b &0x1,%d1 # should FP2 be moved?
18299 lsl.b &0x1,%d1 # should FP3 be moved?
18306 lsl.b &0x1,%d1 # should FP4 be moved?
18313 lsl.b &0x1,%d1 # should FP5 be moved?
18320 lsl.b &0x1,%d1 # should FP6 be moved?
18327 lsl.b &0x1,%d1 # should FP7 be moved?
18343 tst.l %d1 # did dstore err?
18357 mov.l %d1,-(%sp) # save bit string for later
18364 tst.l %d1 # did dfetch fail?
18367 mov.l (%sp)+,%d1 # load bit string
18371 tst.b %d1 # should FP0 be moved?
18379 lsl.b &0x1,%d1 # should FP1 be moved?
18387 lsl.b &0x1,%d1 # should FP2 be moved?
18393 lsl.b &0x1,%d1 # should FP3 be moved?
18399 lsl.b &0x1,%d1 # should FP4 be moved?
18405 lsl.b &0x1,%d1 # should FP5 be moved?
18411 lsl.b &0x1,%d1 # should FP6 be moved?
18417 lsl.b &0x1,%d1 # should FP7 be moved?
18529 mov.w %d0,%d1 # make a copy
18532 andi.l &0x7,%d1 # extract reg field
18652 mov.l %d0,%d1
18653 add.l %a0,%d1 # Increment
18654 mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value
18660 mov.l %d0,%d1
18661 add.l %a0,%d1 # Increment
18662 mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value
18668 mov.l %d0,%d1
18669 add.l %a0,%d1 # Increment
18670 mov.l %d1,%a2 # Save incr value
18676 mov.l %d0,%d1
18677 add.l %a0,%d1 # Increment
18678 mov.l %d1,%a3 # Save incr value
18684 mov.l %d0,%d1
18685 add.l %a0,%d1 # Increment
18686 mov.l %d1,%a4 # Save incr value
18692 mov.l %d0,%d1
18693 add.l %a0,%d1 # Increment
18694 mov.l %d1,%a5 # Save incr value
18700 mov.l %d0,%d1
18701 add.l %a0,%d1 # Increment
18702 mov.l %d1,(%a6) # Save incr value
18710 mov.l %d0,%d1
18711 add.l %a0,%d1 # Increment
18712 mov.l %d1,EXC_A7(%a6) # Save incr value
18785 tst.l %d1 # did ifetch fail?
18798 tst.l %d1 # did ifetch fail?
18811 tst.l %d1 # did ifetch fail?
18824 tst.l %d1 # did ifetch fail?
18837 tst.l %d1 # did ifetch fail?
18850 tst.l %d1 # did ifetch fail?
18863 tst.l %d1 # did ifetch fail?
18876 tst.l %d1 # did ifetch fail?
18891 addq.l &0x8,%d1
18899 tst.l %d1 # did ifetch fail?
18909 mov.l %d0,%d1
18910 rol.w &0x4,%d1
18911 andi.w &0xf,%d1 # extract index regno
18923 mov.l %d2,%d1
18924 rol.w &0x7,%d1
18925 andi.l &0x3,%d1 # extract scale value
18927 lsl.l %d1,%d0 # shift index by scale
18944 tst.l %d1 # did ifetch fail?
18958 tst.l %d1 # did ifetch fail?
18972 tst.l %d1 # did ifetch fail?