Annotation of sys/arch/m68k/060sp/fpsp.s, Revision 1.1.1.1
1.1 nbrk 1: #
2: # $OpenBSD: fpsp.s,v 1.9 2007/04/10 17:47:54 miod Exp $
3: # $NetBSD: fpsp.s,v 1.2 1996/05/15 19:48:03 is Exp $
4: #
5:
6: #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7: # MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
8: # M68000 Hi-Performance Microprocessor Division
9: # M68060 Software Package Production Release
10: #
11: # M68060 Software Package Copyright (C) 1993, 1994, 1995, 1996 Motorola Inc.
12: # All rights reserved.
13: #
14: # THE SOFTWARE is provided on an "AS IS" basis and without warranty.
15: # To the maximum extent permitted by applicable law,
16: # MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
17: # INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS
18: # FOR A PARTICULAR PURPOSE and any warranty against infringement with
19: # regard to the SOFTWARE (INCLUDING ANY MODIFIED VERSIONS THEREOF)
20: # and any accompanying written materials.
21: #
22: # To the maximum extent permitted by applicable law,
23: # IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
24: # (INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
25: # BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
26: # ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
27: #
28: # Motorola assumes no responsibility for the maintenance and support
29: # of the SOFTWARE.
30: #
31: # You are hereby granted a copyright license to use, modify, and distribute the
32: # SOFTWARE so long as this entire notice is retained without alteration
33: # in any modified and/or redistributed versions, and that such modified
34: # versions are clearly identified as such.
35: # No licenses are granted by implication, estoppel or otherwise under any
36: # patents or trademarks of Motorola, Inc.
37: #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
38:
39: #
40: # freal.s:
41: # This file is appended to the top of the 060FPSP package
42: # and contains the entry points into the package. The user, in
43: # effect, branches to one of the branch table entries located
44: # after _060FPSP_TABLE.
45: # Also, subroutine stubs exist in this file (_fpsp_done for
46: # example) that are referenced by the FPSP package itself in order
47: # to call a given routine. The stub routine actually performs the
48: # callout. The FPSP code does a "bsr" to the stub routine. This
49: # extra layer of hierarchy adds a slight performance penalty but
50: # it makes the FPSP code easier to read and more mainatinable.
51: #
52:
53: set _off_bsun, 0x00
54: set _off_snan, 0x04
55: set _off_operr, 0x08
56: set _off_ovfl, 0x0c
57: set _off_unfl, 0x10
58: set _off_dz, 0x14
59: set _off_inex, 0x18
60: set _off_fline, 0x1c
61: set _off_fpu_dis, 0x20
62: set _off_trap, 0x24
63: set _off_trace, 0x28
64: set _off_access, 0x2c
65: set _off_done, 0x30
66:
67: set _off_imr, 0x40
68: set _off_dmr, 0x44
69: set _off_dmw, 0x48
70: set _off_irw, 0x4c
71: set _off_irl, 0x50
72: set _off_drb, 0x54
73: set _off_drw, 0x58
74: set _off_drl, 0x5c
75: set _off_dwb, 0x60
76: set _off_dww, 0x64
77: set _off_dwl, 0x68
78:
79: _060FPSP_TABLE:
80:
81: ###############################################################
82:
83: # Here's the table of ENTRY POINTS for those linking the package.
84: bra.l _fpsp_snan
85: short 0x0000
86: bra.l _fpsp_operr
87: short 0x0000
88: bra.l _fpsp_ovfl
89: short 0x0000
90: bra.l _fpsp_unfl
91: short 0x0000
92: bra.l _fpsp_dz
93: short 0x0000
94: bra.l _fpsp_inex
95: short 0x0000
96: bra.l _fpsp_fline
97: short 0x0000
98: bra.l _fpsp_unsupp
99: short 0x0000
100: bra.l _fpsp_effadd
101: short 0x0000
102:
103: space 56
104:
105: ###############################################################
106: global _fpsp_done
107: _fpsp_done:
108: mov.l %d0,-(%sp)
109: mov.l (_060FPSP_TABLE-0x80+_off_done,%pc),%d0
110: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
111: mov.l 0x4(%sp),%d0
112: rtd &0x4
113:
114: global _real_ovfl
115: _real_ovfl:
116: mov.l %d0,-(%sp)
117: mov.l (_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
118: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
119: mov.l 0x4(%sp),%d0
120: rtd &0x4
121:
122: global _real_unfl
123: _real_unfl:
124: mov.l %d0,-(%sp)
125: mov.l (_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
126: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
127: mov.l 0x4(%sp),%d0
128: rtd &0x4
129:
130: global _real_inex
131: _real_inex:
132: mov.l %d0,-(%sp)
133: mov.l (_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
134: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
135: mov.l 0x4(%sp),%d0
136: rtd &0x4
137:
138: global _real_bsun
139: _real_bsun:
140: mov.l %d0,-(%sp)
141: mov.l (_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
142: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
143: mov.l 0x4(%sp),%d0
144: rtd &0x4
145:
146: global _real_operr
147: _real_operr:
148: mov.l %d0,-(%sp)
149: mov.l (_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
150: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
151: mov.l 0x4(%sp),%d0
152: rtd &0x4
153:
154: global _real_snan
155: _real_snan:
156: mov.l %d0,-(%sp)
157: mov.l (_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
158: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
159: mov.l 0x4(%sp),%d0
160: rtd &0x4
161:
162: global _real_dz
163: _real_dz:
164: mov.l %d0,-(%sp)
165: mov.l (_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
166: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
167: mov.l 0x4(%sp),%d0
168: rtd &0x4
169:
170: global _real_fline
171: _real_fline:
172: mov.l %d0,-(%sp)
173: mov.l (_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
174: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
175: mov.l 0x4(%sp),%d0
176: rtd &0x4
177:
178: global _real_fpu_disabled
179: _real_fpu_disabled:
180: mov.l %d0,-(%sp)
181: mov.l (_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
182: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
183: mov.l 0x4(%sp),%d0
184: rtd &0x4
185:
186: global _real_trap
187: _real_trap:
188: mov.l %d0,-(%sp)
189: mov.l (_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
190: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
191: mov.l 0x4(%sp),%d0
192: rtd &0x4
193:
194: global _real_trace
195: _real_trace:
196: mov.l %d0,-(%sp)
197: mov.l (_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
198: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
199: mov.l 0x4(%sp),%d0
200: rtd &0x4
201:
202: global _real_access
203: _real_access:
204: mov.l %d0,-(%sp)
205: mov.l (_060FPSP_TABLE-0x80+_off_access,%pc),%d0
206: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
207: mov.l 0x4(%sp),%d0
208: rtd &0x4
209:
210: #######################################
211:
212: global _imem_read
213: _imem_read:
214: mov.l %d0,-(%sp)
215: mov.l (_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
216: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
217: mov.l 0x4(%sp),%d0
218: rtd &0x4
219:
220: global _dmem_read
221: _dmem_read:
222: mov.l %d0,-(%sp)
223: mov.l (_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
224: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
225: mov.l 0x4(%sp),%d0
226: rtd &0x4
227:
228: global _dmem_write
229: _dmem_write:
230: mov.l %d0,-(%sp)
231: mov.l (_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
232: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
233: mov.l 0x4(%sp),%d0
234: rtd &0x4
235:
236: global _imem_read_word
237: _imem_read_word:
238: mov.l %d0,-(%sp)
239: mov.l (_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
240: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
241: mov.l 0x4(%sp),%d0
242: rtd &0x4
243:
244: global _imem_read_long
245: _imem_read_long:
246: mov.l %d0,-(%sp)
247: mov.l (_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
248: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
249: mov.l 0x4(%sp),%d0
250: rtd &0x4
251:
252: global _dmem_read_byte
253: _dmem_read_byte:
254: mov.l %d0,-(%sp)
255: mov.l (_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
256: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
257: mov.l 0x4(%sp),%d0
258: rtd &0x4
259:
260: global _dmem_read_word
261: _dmem_read_word:
262: mov.l %d0,-(%sp)
263: mov.l (_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
264: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
265: mov.l 0x4(%sp),%d0
266: rtd &0x4
267:
268: global _dmem_read_long
269: _dmem_read_long:
270: mov.l %d0,-(%sp)
271: mov.l (_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
272: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
273: mov.l 0x4(%sp),%d0
274: rtd &0x4
275:
276: global _dmem_write_byte
277: _dmem_write_byte:
278: mov.l %d0,-(%sp)
279: mov.l (_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
280: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
281: mov.l 0x4(%sp),%d0
282: rtd &0x4
283:
284: global _dmem_write_word
285: _dmem_write_word:
286: mov.l %d0,-(%sp)
287: mov.l (_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
288: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
289: mov.l 0x4(%sp),%d0
290: rtd &0x4
291:
292: global _dmem_write_long
293: _dmem_write_long:
294: mov.l %d0,-(%sp)
295: mov.l (_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
296: pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
297: mov.l 0x4(%sp),%d0
298: rtd &0x4
299:
300: #
301: # This file contains a set of define statements for constants
302: # in order to promote readability within the corecode itself.
303: #
304:
305: set LOCAL_SIZE, 192 # stack frame size(bytes)
306: set LV, -LOCAL_SIZE # stack offset
307:
308: set EXC_SR, 0x4 # stack status register
309: set EXC_PC, 0x6 # stack pc
310: set EXC_VOFF, 0xa # stacked vector offset
311: set EXC_EA, 0xc # stacked <ea>
312:
313: set EXC_FP, 0x0 # frame pointer
314:
315: set EXC_AREGS, -68 # offset of all address regs
316: set EXC_DREGS, -100 # offset of all data regs
317: set EXC_FPREGS, -36 # offset of all fp regs
318:
319: set EXC_A7, EXC_AREGS+(7*4) # offset of saved a7
320: set OLD_A7, EXC_AREGS+(6*4) # extra copy of saved a7
321: set EXC_A6, EXC_AREGS+(6*4) # offset of saved a6
322: set EXC_A5, EXC_AREGS+(5*4)
323: set EXC_A4, EXC_AREGS+(4*4)
324: set EXC_A3, EXC_AREGS+(3*4)
325: set EXC_A2, EXC_AREGS+(2*4)
326: set EXC_A1, EXC_AREGS+(1*4)
327: set EXC_A0, EXC_AREGS+(0*4)
328: set EXC_D7, EXC_DREGS+(7*4)
329: set EXC_D6, EXC_DREGS+(6*4)
330: set EXC_D5, EXC_DREGS+(5*4)
331: set EXC_D4, EXC_DREGS+(4*4)
332: set EXC_D3, EXC_DREGS+(3*4)
333: set EXC_D2, EXC_DREGS+(2*4)
334: set EXC_D1, EXC_DREGS+(1*4)
335: set EXC_D0, EXC_DREGS+(0*4)
336:
337: set EXC_FP0, EXC_FPREGS+(0*12) # offset of saved fp0
338: set EXC_FP1, EXC_FPREGS+(1*12) # offset of saved fp1
339: set EXC_FP2, EXC_FPREGS+(2*12) # offset of saved fp2 (not used)
340:
341: set FP_SCR1, LV+80 # fp scratch 1
342: set FP_SCR1_EX, FP_SCR1+0
343: set FP_SCR1_SGN, FP_SCR1+2
344: set FP_SCR1_HI, FP_SCR1+4
345: set FP_SCR1_LO, FP_SCR1+8
346:
347: set FP_SCR0, LV+68 # fp scratch 0
348: set FP_SCR0_EX, FP_SCR0+0
349: set FP_SCR0_SGN, FP_SCR0+2
350: set FP_SCR0_HI, FP_SCR0+4
351: set FP_SCR0_LO, FP_SCR0+8
352:
353: set FP_DST, LV+56 # fp destination operand
354: set FP_DST_EX, FP_DST+0
355: set FP_DST_SGN, FP_DST+2
356: set FP_DST_HI, FP_DST+4
357: set FP_DST_LO, FP_DST+8
358:
359: set FP_SRC, LV+44 # fp source operand
360: set FP_SRC_EX, FP_SRC+0
361: set FP_SRC_SGN, FP_SRC+2
362: set FP_SRC_HI, FP_SRC+4
363: set FP_SRC_LO, FP_SRC+8
364:
365: set USER_FPIAR, LV+40 # FP instr address register
366:
367: set USER_FPSR, LV+36 # FP status register
368: set FPSR_CC, USER_FPSR+0 # FPSR condition codes
369: set FPSR_QBYTE, USER_FPSR+1 # FPSR qoutient byte
370: set FPSR_EXCEPT, USER_FPSR+2 # FPSR exception status byte
371: set FPSR_AEXCEPT, USER_FPSR+3 # FPSR accrued exception byte
372:
373: set USER_FPCR, LV+32 # FP control register
374: set FPCR_ENABLE, USER_FPCR+2 # FPCR exception enable
375: set FPCR_MODE, USER_FPCR+3 # FPCR rounding mode control
376:
377: set L_SCR3, LV+28 # integer scratch 3
378: set L_SCR2, LV+24 # integer scratch 2
379: set L_SCR1, LV+20 # integer scratch 1
380:
381: set STORE_FLG, LV+19 # flag: operand store (ie. not fcmp/ftst)
382:
383: set EXC_TEMP2, LV+24 # temporary space
384: set EXC_TEMP, LV+16 # temporary space
385:
386: set DTAG, LV+15 # destination operand type
387: set STAG, LV+14 # source operand type
388:
389: set SPCOND_FLG, LV+10 # flag: special case (see below)
390:
391: set EXC_CC, LV+8 # saved condition codes
392: set EXC_EXTWPTR, LV+4 # saved current PC (active)
393: set EXC_EXTWORD, LV+2 # saved extension word
394: set EXC_CMDREG, LV+2 # saved extension word
395: set EXC_OPWORD, LV+0 # saved operation word
396:
397: ################################
398:
399: # Helpful macros
400:
401: set FTEMP, 0 # offsets within an
402: set FTEMP_EX, 0 # extended precision
403: set FTEMP_SGN, 2 # value saved in memory.
404: set FTEMP_HI, 4
405: set FTEMP_LO, 8
406: set FTEMP_GRS, 12
407:
408: set LOCAL, 0 # offsets within an
409: set LOCAL_EX, 0 # extended precision
410: set LOCAL_SGN, 2 # value saved in memory.
411: set LOCAL_HI, 4
412: set LOCAL_LO, 8
413: set LOCAL_GRS, 12
414:
415: set DST, 0 # offsets within an
416: set DST_EX, 0 # extended precision
417: set DST_HI, 4 # value saved in memory.
418: set DST_LO, 8
419:
420: set SRC, 0 # offsets within an
421: set SRC_EX, 0 # extended precision
422: set SRC_HI, 4 # value saved in memory.
423: set SRC_LO, 8
424:
425: set SGL_LO, 0x3f81 # min sgl prec exponent
426: set SGL_HI, 0x407e # max sgl prec exponent
427: set DBL_LO, 0x3c01 # min dbl prec exponent
428: set DBL_HI, 0x43fe # max dbl prec exponent
429: set EXT_LO, 0x0 # min ext prec exponent
430: set EXT_HI, 0x7ffe # max ext prec exponent
431:
432: set EXT_BIAS, 0x3fff # extended precision bias
433: set SGL_BIAS, 0x007f # single precision bias
434: set DBL_BIAS, 0x03ff # double precision bias
435:
436: set NORM, 0x00 # operand type for STAG/DTAG
437: set ZERO, 0x01 # operand type for STAG/DTAG
438: set INF, 0x02 # operand type for STAG/DTAG
439: set QNAN, 0x03 # operand type for STAG/DTAG
440: set DENORM, 0x04 # operand type for STAG/DTAG
441: set SNAN, 0x05 # operand type for STAG/DTAG
442: set UNNORM, 0x06 # operand type for STAG/DTAG
443:
444: ##################
445: # FPSR/FPCR bits #
446: ##################
447: set neg_bit, 0x3 # negative result
448: set z_bit, 0x2 # zero result
449: set inf_bit, 0x1 # infinite result
450: set nan_bit, 0x0 # NAN result
451:
452: set q_sn_bit, 0x7 # sign bit of quotient byte
453:
454: set bsun_bit, 7 # branch on unordered
455: set snan_bit, 6 # signalling NAN
456: set operr_bit, 5 # operand error
457: set ovfl_bit, 4 # overflow
458: set unfl_bit, 3 # underflow
459: set dz_bit, 2 # divide by zero
460: set inex2_bit, 1 # inexact result 2
461: set inex1_bit, 0 # inexact result 1
462:
463: set aiop_bit, 7 # accrued inexact operation bit
464: set aovfl_bit, 6 # accrued overflow bit
465: set aunfl_bit, 5 # accrued underflow bit
466: set adz_bit, 4 # accrued dz bit
467: set ainex_bit, 3 # accrued inexact bit
468:
469: #############################
470: # FPSR individual bit masks #
471: #############################
472: set neg_mask, 0x08000000 # negative bit mask (lw)
473: set inf_mask, 0x02000000 # infinity bit mask (lw)
474: set z_mask, 0x04000000 # zero bit mask (lw)
475: set nan_mask, 0x01000000 # nan bit mask (lw)
476:
477: set neg_bmask, 0x08 # negative bit mask (byte)
478: set inf_bmask, 0x02 # infinity bit mask (byte)
479: set z_bmask, 0x04 # zero bit mask (byte)
480: set nan_bmask, 0x01 # nan bit mask (byte)
481:
482: set bsun_mask, 0x00008000 # bsun exception mask
483: set snan_mask, 0x00004000 # snan exception mask
484: set operr_mask, 0x00002000 # operr exception mask
485: set ovfl_mask, 0x00001000 # overflow exception mask
486: set unfl_mask, 0x00000800 # underflow exception mask
487: set dz_mask, 0x00000400 # dz exception mask
488: set inex2_mask, 0x00000200 # inex2 exception mask
489: set inex1_mask, 0x00000100 # inex1 exception mask
490:
491: set aiop_mask, 0x00000080 # accrued illegal operation
492: set aovfl_mask, 0x00000040 # accrued overflow
493: set aunfl_mask, 0x00000020 # accrued underflow
494: set adz_mask, 0x00000010 # accrued divide by zero
495: set ainex_mask, 0x00000008 # accrued inexact
496:
497: ######################################
498: # FPSR combinations used in the FPSP #
499: ######################################
500: set dzinf_mask, inf_mask+dz_mask+adz_mask
501: set opnan_mask, nan_mask+operr_mask+aiop_mask
502: set nzi_mask, 0x01ffffff #clears N, Z, and I
503: set unfinx_mask, unfl_mask+inex2_mask+aunfl_mask+ainex_mask
504: set unf2inx_mask, unfl_mask+inex2_mask+ainex_mask
505: set ovfinx_mask, ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
506: set inx1a_mask, inex1_mask+ainex_mask
507: set inx2a_mask, inex2_mask+ainex_mask
508: set snaniop_mask, nan_mask+snan_mask+aiop_mask
509: set snaniop2_mask, snan_mask+aiop_mask
510: set naniop_mask, nan_mask+aiop_mask
511: set neginf_mask, neg_mask+inf_mask
512: set infaiop_mask, inf_mask+aiop_mask
513: set negz_mask, neg_mask+z_mask
514: set opaop_mask, operr_mask+aiop_mask
515: set unfl_inx_mask, unfl_mask+aunfl_mask+ainex_mask
516: set ovfl_inx_mask, ovfl_mask+aovfl_mask+ainex_mask
517:
518: #########
519: # misc. #
520: #########
521: set rnd_stky_bit, 29 # stky bit pos in longword
522:
523: set sign_bit, 0x7 # sign bit
524: set signan_bit, 0x6 # signalling nan bit
525:
526: set sgl_thresh, 0x3f81 # minimum sgl exponent
527: set dbl_thresh, 0x3c01 # minimum dbl exponent
528:
529: set x_mode, 0x0 # extended precision
530: set s_mode, 0x4 # single precision
531: set d_mode, 0x8 # double precision
532:
533: set rn_mode, 0x0 # round-to-nearest
534: set rz_mode, 0x1 # round-to-zero
535: set rm_mode, 0x2 # round-tp-minus-infinity
536: set rp_mode, 0x3 # round-to-plus-infinity
537:
538: set mantissalen, 64 # length of mantissa in bits
539:
540: set BYTE, 1 # len(byte) == 1 byte
541: set WORD, 2 # len(word) == 2 bytes
542: set LONG, 4 # len(longword) == 2 bytes
543:
544: set BSUN_VEC, 0xc0 # bsun vector offset
545: set INEX_VEC, 0xc4 # inexact vector offset
546: set DZ_VEC, 0xc8 # dz vector offset
547: set UNFL_VEC, 0xcc # unfl vector offset
548: set OPERR_VEC, 0xd0 # operr vector offset
549: set OVFL_VEC, 0xd4 # ovfl vector offset
550: set SNAN_VEC, 0xd8 # snan vector offset
551:
552: ###########################
553: # SPecial CONDition FLaGs #
554: ###########################
555: set ftrapcc_flg, 0x01 # flag bit: ftrapcc exception
556: set fbsun_flg, 0x02 # flag bit: bsun exception
557: set mia7_flg, 0x04 # flag bit: (a7)+ <ea>
558: set mda7_flg, 0x08 # flag bit: -(a7) <ea>
559: set fmovm_flg, 0x40 # flag bit: fmovm instruction
560: set immed_flg, 0x80 # flag bit: &<data> <ea>
561:
562: set ftrapcc_bit, 0x0
563: set fbsun_bit, 0x1
564: set mia7_bit, 0x2
565: set mda7_bit, 0x3
566: set immed_bit, 0x7
567:
568: ##################################
569: # TRANSCENDENTAL "LAST-OP" FLAGS #
570: ##################################
571: set FMUL_OP, 0x0 # fmul instr performed last
572: set FDIV_OP, 0x1 # fdiv performed last
573: set FADD_OP, 0x2 # fadd performed last
574: set FMOV_OP, 0x3 # fmov performed last
575:
576: #############
577: # CONSTANTS #
578: #############
579: T1: long 0x40C62D38,0xD3D64634 # 16381 LOG2 LEAD
580: T2: long 0x3D6F90AE,0xB1E75CC7 # 16381 LOG2 TRAIL
581:
582: PI: long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
583: PIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
584:
585: TWOBYPI:
586: long 0x3FE45F30,0x6DC9C883
587:
588: #########################################################################
589: # XDEF **************************************************************** #
590: # _fpsp_ovfl(): 060FPSP entry point for FP Overflow exception. #
591: # #
592: # This handler should be the first code executed upon taking the #
593: # FP Overflow exception in an operating system. #
594: # #
595: # XREF **************************************************************** #
596: # _imem_read_long() - read instruction longword #
597: # fix_skewed_ops() - adjust src operand in fsave frame #
598: # set_tag_x() - determine optype of src/dst operands #
599: # store_fpreg() - store opclass 0 or 2 result to FP regfile #
600: # unnorm_fix() - change UNNORM operands to NORM or ZERO #
601: # load_fpn2() - load dst operand from FP regfile #
602: # fout() - emulate an opclass 3 instruction #
603: # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
604: # _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
605: # _real_ovfl() - "callout" for Overflow exception enabled code #
606: # _real_inex() - "callout" for Inexact exception enabled code #
607: # _real_trace() - "callout" for Trace exception code #
608: # #
609: # INPUT *************************************************************** #
610: # - The system stack contains the FP Ovfl exception stack frame #
611: # - The fsave frame contains the source operand #
612: # #
613: # OUTPUT ************************************************************** #
614: # Overflow Exception enabled: #
615: # - The system stack is unchanged #
616: # - The fsave frame contains the adjusted src op for opclass 0,2 #
617: # Overflow Exception disabled: #
618: # - The system stack is unchanged #
619: # - The "exception present" flag in the fsave frame is cleared #
620: # #
621: # ALGORITHM *********************************************************** #
622: # On the 060, if an FP overflow is present as the result of any #
623: # instruction, the 060 will take an overflow exception whether the #
624: # exception is enabled or disabled in the FPCR. For the disabled case, #
625: # This handler emulates the instruction to determine what the correct #
626: # default result should be for the operation. This default result is #
627: # then stored in either the FP regfile, data regfile, or memory. #
628: # Finally, the handler exits through the "callout" _fpsp_done() #
629: # denoting that no exceptional conditions exist within the machine. #
630: # If the exception is enabled, then this handler must create the #
631: # exceptional operand and plave it in the fsave state frame, and store #
632: # the default result (only if the instruction is opclass 3). For #
633: # exceptions enabled, this handler must exit through the "callout" #
634: # _real_ovfl() so that the operating system enabled overflow handler #
635: # can handle this case. #
636: # Two other conditions exist. First, if overflow was disabled #
637: # but the inexact exception was enabled, this handler must exit #
638: # through the "callout" _real_inex() regardless of whether the result #
639: # was inexact. #
640: # Also, in the case of an opclass three instruction where #
641: # overflow was disabled and the trace exception was enabled, this #
642: # handler must exit through the "callout" _real_trace(). #
643: # #
644: #########################################################################
645:
646: global _fpsp_ovfl
647: _fpsp_ovfl:
648:
649: #$# sub.l &24,%sp # make room for src/dst
650:
651: link.w %a6,&-LOCAL_SIZE # init stack frame
652:
653: fsave FP_SRC(%a6) # grab the "busy" frame
654:
655: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
656: fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
657: fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
658:
659: # the FPIAR holds the "current PC" of the faulting instruction
660: mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
661: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
662: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
663: bsr.l _imem_read_long # fetch the instruction words
664: mov.l %d0,EXC_OPWORD(%a6)
665:
666: ##############################################################################
667:
668: btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
669: bne.w fovfl_out
670:
671:
672: lea FP_SRC(%a6),%a0 # pass: ptr to src op
673: bsr.l fix_skewed_ops # fix src op
674:
675: # since, I believe, only NORMs and DENORMs can come through here,
676: # maybe we can avoid the subroutine call.
677: lea FP_SRC(%a6),%a0 # pass: ptr to src op
678: bsr.l set_tag_x # tag the operand type
679: mov.b %d0,STAG(%a6) # maybe NORM,DENORM
680:
681: # bit five of the fp extension word separates the monadic and dyadic operations
682: # that can pass through fpsp_ovfl(). remember that fcmp, ftst, and fsincos
683: # will never take this exception.
684: btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
685: beq.b fovfl_extract # monadic
686:
687: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
688: bsr.l load_fpn2 # load dst into FP_DST
689:
690: lea FP_DST(%a6),%a0 # pass: ptr to dst op
691: bsr.l set_tag_x # tag the operand type
692: cmpi.b %d0,&UNNORM # is operand an UNNORM?
693: bne.b fovfl_op2_done # no
694: bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
695: fovfl_op2_done:
696: mov.b %d0,DTAG(%a6) # save dst optype tag
697:
698: fovfl_extract:
699:
700: #$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
701: #$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
702: #$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
703: #$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
704: #$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
705: #$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
706:
707: clr.l %d0
708: mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
709:
710: mov.b 1+EXC_CMDREG(%a6),%d1
711: andi.w &0x007f,%d1 # extract extension
712:
713: andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
714:
715: fmov.l &0x0,%fpcr # zero current control regs
716: fmov.l &0x0,%fpsr
717:
718: lea FP_SRC(%a6),%a0
719: lea FP_DST(%a6),%a1
720:
721: # maybe we can make these entry points ONLY the OVFL entry points of each routine.
722: mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
723: jsr (tbl_unsupp.l,%pc,%d1.l*1)
724:
725: # the operation has been emulated. the result is in fp0.
726: # the EXOP, if an exception occurred, is in fp1.
727: # we must save the default result regardless of whether
728: # traps are enabled or disabled.
729: bfextu EXC_CMDREG(%a6){&6:&3},%d0
730: bsr.l store_fpreg
731:
732: # the exceptional possibilities we have left ourselves with are ONLY overflow
733: # and inexact. and, the inexact is such that overflow occurred and was disabled
734: # but inexact was enabled.
735: btst &ovfl_bit,FPCR_ENABLE(%a6)
736: bne.b fovfl_ovfl_on
737:
738: btst &inex2_bit,FPCR_ENABLE(%a6)
739: bne.b fovfl_inex_on
740:
741: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
742: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
743: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
744:
745: unlk %a6
746: #$# add.l &24,%sp
747: bra.l _fpsp_done
748:
749: # overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
750: # in fp1. now, simply jump to _real_ovfl()!
751: fovfl_ovfl_on:
752: fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
753:
754: mov.w &0xe005,2+FP_SRC(%a6) # save exc status
755:
756: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
757: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
758: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
759:
760: frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
761:
762: unlk %a6
763:
764: bra.l _real_ovfl
765:
766: # overflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
767: # we must jump to real_inex().
768: fovfl_inex_on:
769:
770: fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
771:
772: mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
773: mov.w &0xe001,2+FP_SRC(%a6) # save exc status
774:
775: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
776: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
777: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
778:
779: frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
780:
781: unlk %a6
782:
783: bra.l _real_inex
784:
785: ########################################################################
786: fovfl_out:
787:
788:
789: #$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
790: #$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
791: #$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
792:
793: # the src operand is definitely a NORM(!), so tag it as such
794: mov.b &NORM,STAG(%a6) # set src optype tag
795:
796: clr.l %d0
797: mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
798:
799: and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
800:
801: fmov.l &0x0,%fpcr # zero current control regs
802: fmov.l &0x0,%fpsr
803:
804: lea FP_SRC(%a6),%a0 # pass ptr to src operand
805:
806: bsr.l fout
807:
808: btst &ovfl_bit,FPCR_ENABLE(%a6)
809: bne.w fovfl_ovfl_on
810:
811: btst &inex2_bit,FPCR_ENABLE(%a6)
812: bne.w fovfl_inex_on
813:
814: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
815: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
816: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
817:
818: unlk %a6
819: #$# add.l &24,%sp
820:
821: btst &0x7,(%sp) # is trace on?
822: beq.l _fpsp_done # no
823:
824: fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
825: mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
826: bra.l _real_trace
827:
828: #########################################################################
829: # XDEF **************************************************************** #
830: # _fpsp_unfl(): 060FPSP entry point for FP Underflow exception. #
831: # #
832: # This handler should be the first code executed upon taking the #
833: # FP Underflow exception in an operating system. #
834: # #
835: # XREF **************************************************************** #
836: # _imem_read_long() - read instruction longword #
837: # fix_skewed_ops() - adjust src operand in fsave frame #
838: # set_tag_x() - determine optype of src/dst operands #
839: # store_fpreg() - store opclass 0 or 2 result to FP regfile #
840: # unnorm_fix() - change UNNORM operands to NORM or ZERO #
841: # load_fpn2() - load dst operand from FP regfile #
842: # fout() - emulate an opclass 3 instruction #
843: # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
844: # _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
845: # _real_ovfl() - "callout" for Overflow exception enabled code #
846: # _real_inex() - "callout" for Inexact exception enabled code #
847: # _real_trace() - "callout" for Trace exception code #
848: # #
849: # INPUT *************************************************************** #
850: # - The system stack contains the FP Unfl exception stack frame #
851: # - The fsave frame contains the source operand #
852: # #
853: # OUTPUT ************************************************************** #
854: # Underflow Exception enabled: #
855: # - The system stack is unchanged #
856: # - The fsave frame contains the adjusted src op for opclass 0,2 #
857: # Underflow Exception disabled: #
858: # - The system stack is unchanged #
859: # - The "exception present" flag in the fsave frame is cleared #
860: # #
861: # ALGORITHM *********************************************************** #
862: # On the 060, if an FP underflow is present as the result of any #
863: # instruction, the 060 will take an underflow exception whether the #
864: # exception is enabled or disabled in the FPCR. For the disabled case, #
865: # This handler emulates the instruction to determine what the correct #
866: # default result should be for the operation. This default result is #
867: # then stored in either the FP regfile, data regfile, or memory. #
868: # Finally, the handler exits through the "callout" _fpsp_done() #
869: # denoting that no exceptional conditions exist within the machine. #
870: # If the exception is enabled, then this handler must create the #
871: # exceptional operand and plave it in the fsave state frame, and store #
872: # the default result (only if the instruction is opclass 3). For #
873: # exceptions enabled, this handler must exit through the "callout" #
874: # _real_unfl() so that the operating system enabled overflow handler #
875: # can handle this case. #
876: # Two other conditions exist. First, if underflow was disabled #
877: # but the inexact exception was enabled and the result was inexact, #
878: # this handler must exit through the "callout" _real_inex(). #
879: # was inexact. #
880: # Also, in the case of an opclass three instruction where #
881: # underflow was disabled and the trace exception was enabled, this #
882: # handler must exit through the "callout" _real_trace(). #
883: # #
884: #########################################################################
885:
886: global _fpsp_unfl
887: _fpsp_unfl:
888:
889: #$# sub.l &24,%sp # make room for src/dst
890:
891: link.w %a6,&-LOCAL_SIZE # init stack frame
892:
893: fsave FP_SRC(%a6) # grab the "busy" frame
894:
895: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
896: fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
897: fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
898:
899: # the FPIAR holds the "current PC" of the faulting instruction
900: mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
901: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
902: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
903: bsr.l _imem_read_long # fetch the instruction words
904: mov.l %d0,EXC_OPWORD(%a6)
905:
906: ##############################################################################
907:
908: btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
909: bne.w funfl_out
910:
911:
912: lea FP_SRC(%a6),%a0 # pass: ptr to src op
913: bsr.l fix_skewed_ops # fix src op
914:
915: lea FP_SRC(%a6),%a0 # pass: ptr to src op
916: bsr.l set_tag_x # tag the operand type
917: mov.b %d0,STAG(%a6) # maybe NORM,DENORM
918:
919: # bit five of the fp ext word separates the monadic and dyadic operations
920: # that can pass through fpsp_unfl(). remember that fcmp, and ftst
921: # will never take this exception.
922: btst &0x5,1+EXC_CMDREG(%a6) # is op monadic or dyadic?
923: beq.b funfl_extract # monadic
924:
925: # now, what's left that's not dyadic is fsincos. we can distinguish it
926: # from all dyadics by the '0110xxx pattern
927: btst &0x4,1+EXC_CMDREG(%a6) # is op an fsincos?
928: bne.b funfl_extract # yes
929:
930: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
931: bsr.l load_fpn2 # load dst into FP_DST
932:
933: lea FP_DST(%a6),%a0 # pass: ptr to dst op
934: bsr.l set_tag_x # tag the operand type
935: cmpi.b %d0,&UNNORM # is operand an UNNORM?
936: bne.b funfl_op2_done # no
937: bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
938: funfl_op2_done:
939: mov.b %d0,DTAG(%a6) # save dst optype tag
940:
941: funfl_extract:
942:
943: #$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
944: #$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
945: #$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
946: #$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
947: #$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
948: #$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
949:
950: clr.l %d0
951: mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
952:
953: mov.b 1+EXC_CMDREG(%a6),%d1
954: andi.w &0x007f,%d1 # extract extension
955:
956: andi.l &0x00ff01ff,USER_FPSR(%a6)
957:
958: fmov.l &0x0,%fpcr # zero current control regs
959: fmov.l &0x0,%fpsr
960:
961: lea FP_SRC(%a6),%a0
962: lea FP_DST(%a6),%a1
963:
964: # maybe we can make these entry points ONLY the OVFL entry points of each routine.
965: mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
966: jsr (tbl_unsupp.l,%pc,%d1.l*1)
967:
968: bfextu EXC_CMDREG(%a6){&6:&3},%d0
969: bsr.l store_fpreg
970:
971: # The `060 FPU multiplier hardware is such that if the result of a
972: # multiply operation is the smallest possible normalized number
973: # (0x00000000_80000000_00000000), then the machine will take an
974: # underflow exception. Since this is incorrect, we need to check
975: # if our emulation, after re-doing the operation, decided that
976: # no underflow was called for. We do these checks only in
977: # funfl_{unfl,inex}_on() because w/ both exceptions disabled, this
978: # special case will simply exit gracefully with the correct result.
979:
980: # the exceptional possibilities we have left ourselves with are ONLY overflow
981: # and inexact. and, the inexact is such that overflow occurred and was disabled
982: # but inexact was enabled.
983: btst &unfl_bit,FPCR_ENABLE(%a6)
984: bne.b funfl_unfl_on
985:
986: funfl_chkinex:
987: btst &inex2_bit,FPCR_ENABLE(%a6)
988: bne.b funfl_inex_on
989:
990: funfl_exit:
991: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
992: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
993: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
994:
995: unlk %a6
996: #$# add.l &24,%sp
997: bra.l _fpsp_done
998:
999: # overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
1000: # in fp1 (don't forget to save fp0). what to do now?
1001: # well, we simply have to get to go to _real_unfl()!
1002: funfl_unfl_on:
1003:
1004: # The `060 FPU multiplier hardware is such that if the result of a
1005: # multiply operation is the smallest possible normalized number
1006: # (0x00000000_80000000_00000000), then the machine will take an
1007: # underflow exception. Since this is incorrect, we check here to see
1008: # if our emulation, after re-doing the operation, decided that
1009: # no underflow was called for.
1010: btst &unfl_bit,FPSR_EXCEPT(%a6)
1011: beq.w funfl_chkinex
1012:
1013: funfl_unfl_on2:
1014: fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
1015:
1016: mov.w &0xe003,2+FP_SRC(%a6) # save exc status
1017:
1018: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
1019: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1020: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1021:
1022: frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
1023:
1024: unlk %a6
1025:
1026: bra.l _real_unfl
1027:
1028: # undeflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
1029: # we must jump to real_inex().
1030: funfl_inex_on:
1031:
1032: # The `060 FPU multiplier hardware is such that if the result of a
1033: # multiply operation is the smallest possible normalized number
1034: # (0x00000000_80000000_00000000), then the machine will take an
1035: # underflow exception.
1036: # But, whether bogus or not, if inexact is enabled AND it occurred,
1037: # then we have to branch to real_inex.
1038:
1039: btst &inex2_bit,FPSR_EXCEPT(%a6)
1040: beq.w funfl_exit
1041:
1042: funfl_inex_on2:
1043:
1044: fmovm.x &0x40,FP_SRC(%a6) # save EXOP to stack
1045:
1046: mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
1047: mov.w &0xe001,2+FP_SRC(%a6) # save exc status
1048:
1049: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
1050: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1051: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1052:
1053: frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
1054:
1055: unlk %a6
1056:
1057: bra.l _real_inex
1058:
1059: #######################################################################
1060: funfl_out:
1061:
1062:
1063: #$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
1064: #$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
1065: #$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
1066:
1067: # the src operand is definitely a NORM(!), so tag it as such
1068: mov.b &NORM,STAG(%a6) # set src optype tag
1069:
1070: clr.l %d0
1071: mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
1072:
1073: and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
1074:
1075: fmov.l &0x0,%fpcr # zero current control regs
1076: fmov.l &0x0,%fpsr
1077:
1078: lea FP_SRC(%a6),%a0 # pass ptr to src operand
1079:
1080: bsr.l fout
1081:
1082: btst &unfl_bit,FPCR_ENABLE(%a6)
1083: bne.w funfl_unfl_on2
1084:
1085: btst &inex2_bit,FPCR_ENABLE(%a6)
1086: bne.w funfl_inex_on2
1087:
1088: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
1089: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1090: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1091:
1092: unlk %a6
1093: #$# add.l &24,%sp
1094:
1095: btst &0x7,(%sp) # is trace on?
1096: beq.l _fpsp_done # no
1097:
1098: fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
1099: mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
1100: bra.l _real_trace
1101:
1102: #########################################################################
1103: # XDEF **************************************************************** #
1104: # _fpsp_unsupp(): 060FPSP entry point for FP "Unimplemented #
1105: # Data Type" exception. #
1106: # #
1107: # This handler should be the first code executed upon taking the #
1108: # FP Unimplemented Data Type exception in an operating system. #
1109: # #
1110: # XREF **************************************************************** #
1111: # _imem_read_{word,long}() - read instruction word/longword #
1112: # fix_skewed_ops() - adjust src operand in fsave frame #
1113: # set_tag_x() - determine optype of src/dst operands #
1114: # store_fpreg() - store opclass 0 or 2 result to FP regfile #
1115: # unnorm_fix() - change UNNORM operands to NORM or ZERO #
1116: # load_fpn2() - load dst operand from FP regfile #
1117: # load_fpn1() - load src operand from FP regfile #
1118: # fout() - emulate an opclass 3 instruction #
1119: # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
1120: # _real_inex() - "callout" to operating system inexact handler #
1121: # _fpsp_done() - "callout" for exit; work all done #
1122: # _real_trace() - "callout" for Trace enabled exception #
1123: # funimp_skew() - adjust fsave src ops to "incorrect" value #
1124: # _real_snan() - "callout" for SNAN exception #
1125: # _real_operr() - "callout" for OPERR exception #
1126: # _real_ovfl() - "callout" for OVFL exception #
1127: # _real_unfl() - "callout" for UNFL exception #
1128: # get_packed() - fetch packed operand from memory #
1129: # #
1130: # INPUT *************************************************************** #
1131: # - The system stack contains the "Unimp Data Type" stk frame #
1132: # - The fsave frame contains the ssrc op (for UNNORM/DENORM) #
1133: # #
1134: # OUTPUT ************************************************************** #
1135: # If Inexact exception (opclass 3): #
1136: # - The system stack is changed to an Inexact exception stk frame #
1137: # If SNAN exception (opclass 3): #
1138: # - The system stack is changed to an SNAN exception stk frame #
1139: # If OPERR exception (opclass 3): #
1140: # - The system stack is changed to an OPERR exception stk frame #
1141: # If OVFL exception (opclass 3): #
1142: # - The system stack is changed to an OVFL exception stk frame #
1143: # If UNFL exception (opclass 3): #
1144: # - The system stack is changed to an UNFL exception stack frame #
1145: # If Trace exception enabled: #
1146: # - The system stack is changed to a Trace exception stack frame #
1147: # Else: (normal case) #
1148: # - Correct result has been stored as appropriate #
1149: # #
1150: # ALGORITHM *********************************************************** #
1151: # Two main instruction types can enter here: (1) DENORM or UNNORM #
1152: # unimplemented data types. These can be either opclass 0,2 or 3 #
1153: # instructions, and (2) PACKED unimplemented data format instructions #
1154: # also of opclasses 0,2, or 3. #
1155: # For UNNORM/DENORM opclass 0 and 2, the handler fetches the src #
1156: # operand from the fsave state frame and the dst operand (if dyadic) #
1157: # from the FP register file. The instruction is then emulated by #
1158: # choosing an emulation routine from a table of routines indexed by #
1159: # instruction type. Once the instruction has been emulated and result #
1160: # saved, then we check to see if any enabled exceptions resulted from #
1161: # instruction emulation. If none, then we exit through the "callout" #
1162: # _fpsp_done(). If there is an enabled FP exception, then we insert #
1163: # this exception into the FPU in the fsave state frame and then exit #
1164: # through _fpsp_done(). #
1165: # PACKED opclass 0 and 2 is similar in how the instruction is #
1166: # emulated and exceptions handled. The differences occur in how the #
1167: # handler loads the packed op (by calling get_packed() routine) and #
1168: # by the fact that a Trace exception could be pending for PACKED ops. #
1169: # If a Trace exception is pending, then the current exception stack #
1170: # frame is changed to a Trace exception stack frame and an exit is #
1171: # made through _real_trace(). #
1172: # For UNNORM/DENORM opclass 3, the actual move out to memory is #
1173: # performed by calling the routine fout(). If no exception should occur #
1174: # as the result of emulation, then an exit either occurs through #
1175: # _fpsp_done() or through _real_trace() if a Trace exception is pending #
1176: # (a Trace stack frame must be created here, too). If an FP exception #
1177: # should occur, then we must create an exception stack frame of that #
1178: # type and jump to either _real_snan(), _real_operr(), _real_inex(), #
1179: # _real_unfl(), or _real_ovfl() as appropriate. PACKED opclass 3 #
1180: # emulation is performed in a similar manner. #
1181: # #
1182: #########################################################################
1183:
1184: #
1185: # (1) DENORM and UNNORM (unimplemented) data types:
1186: #
1187: # post-instruction
1188: # *****************
1189: # * EA *
1190: # pre-instruction * *
1191: # ***************** *****************
1192: # * 0x0 * 0x0dc * * 0x3 * 0x0dc *
1193: # ***************** *****************
1194: # * Next * * Next *
1195: # * PC * * PC *
1196: # ***************** *****************
1197: # * SR * * SR *
1198: # ***************** *****************
1199: #
1200: # (2) PACKED format (unsupported) opclasses two and three:
1201: # *****************
1202: # * EA *
1203: # * *
1204: # *****************
1205: # * 0x2 * 0x0dc *
1206: # *****************
1207: # * Next *
1208: # * PC *
1209: # *****************
1210: # * SR *
1211: # *****************
1212: #
1213: global _fpsp_unsupp
1214: _fpsp_unsupp:
1215:
1216: link.w %a6,&-LOCAL_SIZE # init stack frame
1217:
1218: fsave FP_SRC(%a6) # save fp state
1219:
1220: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
1221: fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
1222: fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
1223:
1224: btst &0x5,EXC_SR(%a6) # user or supervisor mode?
1225: bne.b fu_s
1226: fu_u:
1227: mov.l %usp,%a0 # fetch user stack pointer
1228: mov.l %a0,EXC_A7(%a6) # save on stack
1229: bra.b fu_cont
1230: # if the exception is an opclass zero or two unimplemented data type
1231: # exception, then the a7' calculated here is wrong since it doesn't
1232: # stack an ea. however, we don't need an a7' for this case anyways.
1233: fu_s:
1234: lea 0x4+EXC_EA(%a6),%a0 # load old a7'
1235: mov.l %a0,EXC_A7(%a6) # save on stack
1236:
1237: fu_cont:
1238:
1239: # the FPIAR holds the "current PC" of the faulting instruction
1240: # the FPIAR should be set correctly for ALL exceptions passing through
1241: # this point.
1242: mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
1243: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
1244: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
1245: bsr.l _imem_read_long # fetch the instruction words
1246: mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
1247:
1248: ############################
1249:
1250: clr.b SPCOND_FLG(%a6) # clear special condition flag
1251:
1252: # Separate opclass three (fpn-to-mem) ops since they have a different
1253: # stack frame and protocol.
1254: btst &0x5,EXC_CMDREG(%a6) # is it an fmove out?
1255: bne.w fu_out # yes
1256:
1257: # Separate packed opclass two instructions.
1258: bfextu EXC_CMDREG(%a6){&0:&6},%d0
1259: cmpi.b %d0,&0x13
1260: beq.w fu_in_pack
1261:
1262:
1263: # I'm not sure at this point what FPSR bits are valid for this instruction.
1264: # so, since the emulation routines re-create them anyways, zero exception field
1265: andi.l &0x00ff00ff,USER_FPSR(%a6) # zero exception field
1266:
1267: fmov.l &0x0,%fpcr # zero current control regs
1268: fmov.l &0x0,%fpsr
1269:
1270: # Opclass two w/ memory-to-fpn operation will have an incorrect extended
1271: # precision format if the src format was single or double and the
1272: # source data type was an INF, NAN, DENORM, or UNNORM
1273: lea FP_SRC(%a6),%a0 # pass ptr to input
1274: bsr.l fix_skewed_ops
1275:
1276: # we don't know whether the src operand or the dst operand (or both) is the
1277: # UNNORM or DENORM. call the function that tags the operand type. if the
1278: # input is an UNNORM, then convert it to a NORM, DENORM, or ZERO.
1279: lea FP_SRC(%a6),%a0 # pass: ptr to src op
1280: bsr.l set_tag_x # tag the operand type
1281: cmpi.b %d0,&UNNORM # is operand an UNNORM?
1282: bne.b fu_op2 # no
1283: bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
1284:
1285: fu_op2:
1286: mov.b %d0,STAG(%a6) # save src optype tag
1287:
1288: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
1289:
1290: # bit five of the fp extension word separates the monadic and dyadic operations
1291: # at this point
1292: btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
1293: beq.b fu_extract # monadic
1294: cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
1295: beq.b fu_extract # yes, so it's monadic, too
1296:
1297: bsr.l load_fpn2 # load dst into FP_DST
1298:
1299: lea FP_DST(%a6),%a0 # pass: ptr to dst op
1300: bsr.l set_tag_x # tag the operand type
1301: cmpi.b %d0,&UNNORM # is operand an UNNORM?
1302: bne.b fu_op2_done # no
1303: bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
1304: fu_op2_done:
1305: mov.b %d0,DTAG(%a6) # save dst optype tag
1306:
1307: fu_extract:
1308: clr.l %d0
1309: mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
1310:
1311: bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
1312:
1313: lea FP_SRC(%a6),%a0
1314: lea FP_DST(%a6),%a1
1315:
1316: mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
1317: jsr (tbl_unsupp.l,%pc,%d1.l*1)
1318:
1319: #
1320: # Exceptions in order of precedence:
1321: # BSUN : none
1322: # SNAN : all dyadic ops
1323: # OPERR : fsqrt(-NORM)
1324: # OVFL : all except ftst,fcmp
1325: # UNFL : all except ftst,fcmp
1326: # DZ : fdiv
1327: # INEX2 : all except ftst,fcmp
1328: # INEX1 : none (packed doesn't go through here)
1329: #
1330:
1331: # we determine the highest priority exception(if any) set by the
1332: # emulation routine that has also been enabled by the user.
1333: mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions set
1334: bne.b fu_in_ena # some are enabled
1335:
1336: fu_in_cont:
1337: # fcmp and ftst do not store any result.
1338: mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
1339: andi.b &0x38,%d0 # extract bits 3-5
1340: cmpi.b %d0,&0x38 # is instr fcmp or ftst?
1341: beq.b fu_in_exit # yes
1342:
1343: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
1344: bsr.l store_fpreg # store the result
1345:
1346: fu_in_exit:
1347:
1348: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1349: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1350: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1351:
1352: unlk %a6
1353:
1354: bra.l _fpsp_done
1355:
1356: fu_in_ena:
1357: and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
1358: bfffo %d0{&24:&8},%d0 # find highest priority exception
1359: bne.b fu_in_exc # there is at least one set
1360:
1361: #
1362: # No exceptions occurred that were also enabled. Now:
1363: #
1364: # if (OVFL && ovfl_disabled && inexact_enabled) {
1365: # branch to _real_inex() (even if the result was exact!);
1366: # } else {
1367: # save the result in the proper fp reg (unless the op is fcmp or ftst);
1368: # return;
1369: # }
1370: #
1371: btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
1372: beq.b fu_in_cont # no
1373:
1374: fu_in_ovflchk:
1375: btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
1376: beq.b fu_in_cont # no
1377: bra.w fu_in_exc_ovfl # go insert overflow frame
1378:
1379: #
1380: # An exception occurred and that exception was enabled:
1381: #
1382: # shift enabled exception field into lo byte of d0;
1383: # if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
1384: # ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
1385: # /*
1386: # * this is the case where we must call _real_inex() now or else
1387: # * there will be no other way to pass it the exceptional operand
1388: # */
1389: # call _real_inex();
1390: # } else {
1391: # restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
1392: # }
1393: #
1394: fu_in_exc:
1395: subi.l &24,%d0 # fix offset to be 0-8
1396: cmpi.b %d0,&0x6 # is exception INEX? (6)
1397: bne.b fu_in_exc_exit # no
1398:
1399: # the enabled exception was inexact
1400: btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
1401: bne.w fu_in_exc_unfl # yes
1402: btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
1403: bne.w fu_in_exc_ovfl # yes
1404:
1405: # here, we insert the correct fsave status value into the fsave frame for the
1406: # corresponding exception. the operand in the fsave frame should be the original
1407: # src operand.
1408: fu_in_exc_exit:
1409: mov.l %d0,-(%sp) # save d0
1410: bsr.l funimp_skew # skew sgl or dbl inputs
1411: mov.l (%sp)+,%d0 # restore d0
1412:
1413: mov.w (tbl_except.b,%pc,%d0.w*2),2+FP_SRC(%a6) # create exc status
1414:
1415: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1416: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1417: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1418:
1419: frestore FP_SRC(%a6) # restore src op
1420:
1421: unlk %a6
1422:
1423: bra.l _fpsp_done
1424:
1425: tbl_except:
1426: short 0xe000,0xe006,0xe004,0xe005
1427: short 0xe003,0xe002,0xe001,0xe001
1428:
1429: fu_in_exc_unfl:
1430: mov.w &0x4,%d0
1431: bra.b fu_in_exc_exit
1432: fu_in_exc_ovfl:
1433: mov.w &0x03,%d0
1434: bra.b fu_in_exc_exit
1435:
1436: # If the input operand to this operation was opclass two and a single
1437: # or double precision denorm, inf, or nan, the operand needs to be
1438: # "corrected" in order to have the proper equivalent extended precision
1439: # number.
1440: global fix_skewed_ops
1441: fix_skewed_ops:
1442: bfextu EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
1443: cmpi.b %d0,&0x11 # is class = 2 & fmt = sgl?
1444: beq.b fso_sgl # yes
1445: cmpi.b %d0,&0x15 # is class = 2 & fmt = dbl?
1446: beq.b fso_dbl # yes
1447: rts # no
1448:
1449: fso_sgl:
1450: mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
1451: andi.w &0x7fff,%d0 # strip sign
1452: cmpi.w %d0,&0x3f80 # is |exp| == $3f80?
1453: beq.b fso_sgl_dnrm_zero # yes
1454: cmpi.w %d0,&0x407f # no; is |exp| == $407f?
1455: beq.b fso_infnan # yes
1456: rts # no
1457:
1458: fso_sgl_dnrm_zero:
1459: andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
1460: beq.b fso_zero # it's a skewed zero
1461: fso_sgl_dnrm:
1462: # here, we count on norm not to alter a0...
1463: bsr.l norm # normalize mantissa
1464: neg.w %d0 # -shft amt
1465: addi.w &0x3f81,%d0 # adjust new exponent
1466: andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
1467: or.w %d0,LOCAL_EX(%a0) # insert new exponent
1468: rts
1469:
1470: fso_zero:
1471: andi.w &0x8000,LOCAL_EX(%a0) # clear bogus exponent
1472: rts
1473:
1474: fso_infnan:
1475: andi.b &0x7f,LOCAL_HI(%a0) # clear j-bit
1476: ori.w &0x7fff,LOCAL_EX(%a0) # make exponent = $7fff
1477: rts
1478:
1479: fso_dbl:
1480: mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
1481: andi.w &0x7fff,%d0 # strip sign
1482: cmpi.w %d0,&0x3c00 # is |exp| == $3c00?
1483: beq.b fso_dbl_dnrm_zero # yes
1484: cmpi.w %d0,&0x43ff # no; is |exp| == $43ff?
1485: beq.b fso_infnan # yes
1486: rts # no
1487:
1488: fso_dbl_dnrm_zero:
1489: andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
1490: bne.b fso_dbl_dnrm # it's a skewed denorm
1491: tst.l LOCAL_LO(%a0) # is it a zero?
1492: beq.b fso_zero # yes
1493: fso_dbl_dnrm:
1494: # here, we count on norm not to alter a0...
1495: bsr.l norm # normalize mantissa
1496: neg.w %d0 # -shft amt
1497: addi.w &0x3c01,%d0 # adjust new exponent
1498: andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
1499: or.w %d0,LOCAL_EX(%a0) # insert new exponent
1500: rts
1501:
1502: #################################################################
1503:
1504: # fmove out took an unimplemented data type exception.
1505: # the src operand is in FP_SRC. Call _fout() to write out the result and
1506: # to determine which exceptions, if any, to take.
1507: fu_out:
1508:
1509: # Separate packed move outs from the UNNORM and DENORM move outs.
1510: bfextu EXC_CMDREG(%a6){&3:&3},%d0
1511: cmpi.b %d0,&0x3
1512: beq.w fu_out_pack
1513: cmpi.b %d0,&0x7
1514: beq.w fu_out_pack
1515:
1516:
1517: # I'm not sure at this point what FPSR bits are valid for this instruction.
1518: # so, since the emulation routines re-create them anyways, zero exception field.
1519: # fmove out doesn't affect ccodes.
1520: and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
1521:
1522: fmov.l &0x0,%fpcr # zero current control regs
1523: fmov.l &0x0,%fpsr
1524:
1525: # the src can ONLY be a DENORM or an UNNORM! so, don't make any big subroutine
1526: # call here. just figure out what it is...
1527: mov.w FP_SRC_EX(%a6),%d0 # get exponent
1528: andi.w &0x7fff,%d0 # strip sign
1529: beq.b fu_out_denorm # it's a DENORM
1530:
1531: lea FP_SRC(%a6),%a0
1532: bsr.l unnorm_fix # yes; fix it
1533:
1534: mov.b %d0,STAG(%a6)
1535:
1536: bra.b fu_out_cont
1537: fu_out_denorm:
1538: mov.b &DENORM,STAG(%a6)
1539: fu_out_cont:
1540:
1541: clr.l %d0
1542: mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
1543:
1544: lea FP_SRC(%a6),%a0 # pass ptr to src operand
1545:
1546: mov.l (%a6),EXC_A6(%a6) # in case a6 changes
1547: bsr.l fout # call fmove out routine
1548:
1549: # Exceptions in order of precedence:
1550: # BSUN : none
1551: # SNAN : none
1552: # OPERR : fmove.{b,w,l} out of large UNNORM
1553: # OVFL : fmove.{s,d}
1554: # UNFL : fmove.{s,d,x}
1555: # DZ : none
1556: # INEX2 : all
1557: # INEX1 : none (packed doesn't travel through here)
1558:
1559: # determine the highest priority exception(if any) set by the
1560: # emulation routine that has also been enabled by the user.
1561: mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
1562: bne.w fu_out_ena # some are enabled
1563:
1564: fu_out_done:
1565:
1566: mov.l EXC_A6(%a6),(%a6) # in case a6 changed
1567:
1568: # on extended precision opclass three instructions using pre-decrement or
1569: # post-increment addressing mode, the address register is not updated. is the
1570: # address register was the stack pointer used from user mode, then let's update
1571: # it here. if it was used from supervisor mode, then we have to handle this
1572: # as a special case.
1573: btst &0x5,EXC_SR(%a6)
1574: bne.b fu_out_done_s
1575:
1576: mov.l EXC_A7(%a6),%a0 # restore a7
1577: mov.l %a0,%usp
1578:
1579: fu_out_done_cont:
1580: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1581: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1582: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1583:
1584: unlk %a6
1585:
1586: btst &0x7,(%sp) # is trace on?
1587: bne.b fu_out_trace # yes
1588:
1589: bra.l _fpsp_done
1590:
1591: # is the ea mode pre-decrement of the stack pointer from supervisor mode?
1592: # ("fmov.x fpm,-(a7)") if so,
1593: fu_out_done_s:
1594: cmpi.b SPCOND_FLG(%a6),&mda7_flg
1595: bne.b fu_out_done_cont
1596:
1597: # the extended precision result is still in fp0. but, we need to save it
1598: # somewhere on the stack until we can copy it to its final resting place.
1599: # here, we're counting on the top of the stack to be the old place-holders
1600: # for fp0/fp1 which have already been restored. that way, we can write
1601: # over those destinations with the shifted stack frame.
1602: fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
1603:
1604: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1605: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1606: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1607:
1608: mov.l (%a6),%a6 # restore frame pointer
1609:
1610: mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
1611: mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
1612:
1613: # now, copy the result to the proper place on the stack
1614: mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
1615: mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
1616: mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
1617:
1618: add.l &LOCAL_SIZE-0x8,%sp
1619:
1620: btst &0x7,(%sp)
1621: bne.b fu_out_trace
1622:
1623: bra.l _fpsp_done
1624:
1625: fu_out_ena:
1626: and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
1627: bfffo %d0{&24:&8},%d0 # find highest priority exception
1628: bne.b fu_out_exc # there is at least one set
1629:
1630: # no exceptions were set.
1631: # if a disabled overflow occurred and inexact was enabled but the result
1632: # was exact, then a branch to _real_inex() is made.
1633: btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
1634: beq.w fu_out_done # no
1635:
1636: fu_out_ovflchk:
1637: btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
1638: beq.w fu_out_done # no
1639: bra.w fu_inex # yes
1640:
1641: #
1642: # The fp move out that took the "Unimplemented Data Type" exception was
1643: # being traced. Since the stack frames are similar, get the "current" PC
1644: # from FPIAR and put it in the trace stack frame then jump to _real_trace().
1645: #
1646: # UNSUPP FRAME TRACE FRAME
1647: # ***************** *****************
1648: # * EA * * Current *
1649: # * * * PC *
1650: # ***************** *****************
1651: # * 0x3 * 0x0dc * * 0x2 * 0x024 *
1652: # ***************** *****************
1653: # * Next * * Next *
1654: # * PC * * PC *
1655: # ***************** *****************
1656: # * SR * * SR *
1657: # ***************** *****************
1658: #
1659: fu_out_trace:
1660: mov.w &0x2024,0x6(%sp)
1661: fmov.l %fpiar,0x8(%sp)
1662: bra.l _real_trace
1663:
1664: # an exception occurred and that exception was enabled.
1665: fu_out_exc:
1666: subi.l &24,%d0 # fix offset to be 0-8
1667:
1668: # we don't mess with the existing fsave frame. just re-insert it and
1669: # jump to the "_real_{}()" handler...
1670: mov.w (tbl_fu_out.b,%pc,%d0.w*2),%d0
1671: jmp (tbl_fu_out.b,%pc,%d0.w*1)
1672:
1673: swbeg &0x8
1674: tbl_fu_out:
1675: short tbl_fu_out - tbl_fu_out # BSUN can't happen
1676: short tbl_fu_out - tbl_fu_out # SNAN can't happen
1677: short fu_operr - tbl_fu_out # OPERR
1678: short fu_ovfl - tbl_fu_out # OVFL
1679: short fu_unfl - tbl_fu_out # UNFL
1680: short tbl_fu_out - tbl_fu_out # DZ can't happen
1681: short fu_inex - tbl_fu_out # INEX2
1682: short tbl_fu_out - tbl_fu_out # INEX1 won't make it here
1683:
1684: # for snan,operr,ovfl,unfl, src op is still in FP_SRC so just
1685: # frestore it.
1686: fu_snan:
1687: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1688: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1689: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1690:
1691: mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd8
1692: mov.w &0xe006,2+FP_SRC(%a6)
1693:
1694: frestore FP_SRC(%a6)
1695:
1696: unlk %a6
1697:
1698:
1699: bra.l _real_snan
1700:
1701: fu_operr:
1702: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1703: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1704: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1705:
1706: mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
1707: mov.w &0xe004,2+FP_SRC(%a6)
1708:
1709: frestore FP_SRC(%a6)
1710:
1711: unlk %a6
1712:
1713:
1714: bra.l _real_operr
1715:
1716: fu_ovfl:
1717: fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
1718:
1719: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1720: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1721: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1722:
1723: mov.w &0x30d4,EXC_VOFF(%a6) # vector offset = 0xd4
1724: mov.w &0xe005,2+FP_SRC(%a6)
1725:
1726: frestore FP_SRC(%a6) # restore EXOP
1727:
1728: unlk %a6
1729:
1730: bra.l _real_ovfl
1731:
1732: # underflow can happen for extended precision. extended precision opclass
1733: # three instruction exceptions don't update the stack pointer. so, if the
1734: # exception occurred from user mode, then simply update a7 and exit normally.
1735: # if the exception occurred from supervisor mode, check if
1736: fu_unfl:
1737: mov.l EXC_A6(%a6),(%a6) # restore a6
1738:
1739: btst &0x5,EXC_SR(%a6)
1740: bne.w fu_unfl_s
1741:
1742: mov.l EXC_A7(%a6),%a0 # restore a7 whether we need
1743: mov.l %a0,%usp # to or not...
1744:
1745: fu_unfl_cont:
1746: fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
1747:
1748: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1749: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1750: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1751:
1752: mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
1753: mov.w &0xe003,2+FP_SRC(%a6)
1754:
1755: frestore FP_SRC(%a6) # restore EXOP
1756:
1757: unlk %a6
1758:
1759: bra.l _real_unfl
1760:
1761: fu_unfl_s:
1762: cmpi.b SPCOND_FLG(%a6),&mda7_flg # was the <ea> mode -(sp)?
1763: bne.b fu_unfl_cont
1764:
1765: # the extended precision result is still in fp0. but, we need to save it
1766: # somewhere on the stack until we can copy it to its final resting place
1767: # (where the exc frame is currently). make sure it's not at the top of the
1768: # frame or it will get overwritten when the exc stack frame is shifted "down".
1769: fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
1770: fmovm.x &0x40,FP_DST(%a6) # put EXOP on stack
1771:
1772: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1773: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1774: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1775:
1776: mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
1777: mov.w &0xe003,2+FP_DST(%a6)
1778:
1779: frestore FP_DST(%a6) # restore EXOP
1780:
1781: mov.l (%a6),%a6 # restore frame pointer
1782:
1783: mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
1784: mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
1785: mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
1786:
1787: # now, copy the result to the proper place on the stack
1788: mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
1789: mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
1790: mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
1791:
1792: add.l &LOCAL_SIZE-0x8,%sp
1793:
1794: bra.l _real_unfl
1795:
1796: # fmove in and out enter here.
1797: fu_inex:
1798: fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
1799:
1800: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1801: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1802: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1803:
1804: mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
1805: mov.w &0xe001,2+FP_SRC(%a6)
1806:
1807: frestore FP_SRC(%a6) # restore EXOP
1808:
1809: unlk %a6
1810:
1811:
1812: bra.l _real_inex
1813:
1814: #########################################################################
1815: #########################################################################
1816: fu_in_pack:
1817:
1818:
1819: # I'm not sure at this point what FPSR bits are valid for this instruction.
1820: # so, since the emulation routines re-create them anyways, zero exception field
1821: andi.l &0x0ff00ff,USER_FPSR(%a6) # zero exception field
1822:
1823: fmov.l &0x0,%fpcr # zero current control regs
1824: fmov.l &0x0,%fpsr
1825:
1826: bsr.l get_packed # fetch packed src operand
1827:
1828: lea FP_SRC(%a6),%a0 # pass ptr to src
1829: bsr.l set_tag_x # set src optype tag
1830:
1831: mov.b %d0,STAG(%a6) # save src optype tag
1832:
1833: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
1834:
1835: # bit five of the fp extension word separates the monadic and dyadic operations
1836: # at this point
1837: btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
1838: beq.b fu_extract_p # monadic
1839: cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
1840: beq.b fu_extract_p # yes, so it's monadic, too
1841:
1842: bsr.l load_fpn2 # load dst into FP_DST
1843:
1844: lea FP_DST(%a6),%a0 # pass: ptr to dst op
1845: bsr.l set_tag_x # tag the operand type
1846: cmpi.b %d0,&UNNORM # is operand an UNNORM?
1847: bne.b fu_op2_done_p # no
1848: bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
1849: fu_op2_done_p:
1850: mov.b %d0,DTAG(%a6) # save dst optype tag
1851:
1852: fu_extract_p:
1853: clr.l %d0
1854: mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
1855:
1856: bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
1857:
1858: lea FP_SRC(%a6),%a0
1859: lea FP_DST(%a6),%a1
1860:
1861: mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
1862: jsr (tbl_unsupp.l,%pc,%d1.l*1)
1863:
1864: #
1865: # Exceptions in order of precedence:
1866: # BSUN : none
1867: # SNAN : all dyadic ops
1868: # OPERR : fsqrt(-NORM)
1869: # OVFL : all except ftst,fcmp
1870: # UNFL : all except ftst,fcmp
1871: # DZ : fdiv
1872: # INEX2 : all except ftst,fcmp
1873: # INEX1 : all
1874: #
1875:
1876: # we determine the highest priority exception(if any) set by the
1877: # emulation routine that has also been enabled by the user.
1878: mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
1879: bne.w fu_in_ena_p # some are enabled
1880:
1881: fu_in_cont_p:
1882: # fcmp and ftst do not store any result.
1883: mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
1884: andi.b &0x38,%d0 # extract bits 3-5
1885: cmpi.b %d0,&0x38 # is instr fcmp or ftst?
1886: beq.b fu_in_exit_p # yes
1887:
1888: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
1889: bsr.l store_fpreg # store the result
1890:
1891: fu_in_exit_p:
1892:
1893: btst &0x5,EXC_SR(%a6) # user or supervisor?
1894: bne.w fu_in_exit_s_p # supervisor
1895:
1896: mov.l EXC_A7(%a6),%a0 # update user a7
1897: mov.l %a0,%usp
1898:
1899: fu_in_exit_cont_p:
1900: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1901: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1902: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1903:
1904: unlk %a6 # unravel stack frame
1905:
1906: btst &0x7,(%sp) # is trace on?
1907: bne.w fu_trace_p # yes
1908:
1909: bra.l _fpsp_done # exit to os
1910:
1911: # the exception occurred in supervisor mode. check to see if the
1912: # addressing mode was (a7)+. if so, we'll need to shift the
1913: # stack frame "up".
1914: fu_in_exit_s_p:
1915: btst &mia7_bit,SPCOND_FLG(%a6) # was ea mode (a7)+
1916: beq.b fu_in_exit_cont_p # no
1917:
1918: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1919: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
1920: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1921:
1922: unlk %a6 # unravel stack frame
1923:
1924: # shift the stack frame "up". we don't really care about the <ea> field.
1925: mov.l 0x4(%sp),0x10(%sp)
1926: mov.l 0x0(%sp),0xc(%sp)
1927: add.l &0xc,%sp
1928:
1929: btst &0x7,(%sp) # is trace on?
1930: bne.w fu_trace_p # yes
1931:
1932: bra.l _fpsp_done # exit to os
1933:
1934: fu_in_ena_p:
1935: and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled & set
1936: bfffo %d0{&24:&8},%d0 # find highest priority exception
1937: bne.b fu_in_exc_p # at least one was set
1938:
1939: #
1940: # No exceptions occurred that were also enabled. Now:
1941: #
1942: # if (OVFL && ovfl_disabled && inexact_enabled) {
1943: # branch to _real_inex() (even if the result was exact!);
1944: # } else {
1945: # save the result in the proper fp reg (unless the op is fcmp or ftst);
1946: # return;
1947: # }
1948: #
1949: btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
1950: beq.w fu_in_cont_p # no
1951:
1952: fu_in_ovflchk_p:
1953: btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
1954: beq.w fu_in_cont_p # no
1955: bra.w fu_in_exc_ovfl_p # do _real_inex() now
1956:
1957: #
1958: # An exception occurred and that exception was enabled:
1959: #
1960: # shift enabled exception field into lo byte of d0;
1961: # if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
1962: # ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
1963: # /*
1964: # * this is the case where we must call _real_inex() now or else
1965: # * there will be no other way to pass it the exceptional operand
1966: # */
1967: # call _real_inex();
1968: # } else {
1969: # restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
1970: # }
1971: #
1972: fu_in_exc_p:
1973: subi.l &24,%d0 # fix offset to be 0-8
1974: cmpi.b %d0,&0x6 # is exception INEX? (6 or 7)
1975: blt.b fu_in_exc_exit_p # no
1976:
1977: # the enabled exception was inexact
1978: btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
1979: bne.w fu_in_exc_unfl_p # yes
1980: btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
1981: bne.w fu_in_exc_ovfl_p # yes
1982:
1983: # here, we insert the correct fsave status value into the fsave frame for the
1984: # corresponding exception. the operand in the fsave frame should be the original
1985: # src operand.
1986: # as a reminder for future predicted pain and agony, we are passing in fsave the
1987: # "non-skewed" operand for cases of sgl and dbl src INFs,NANs, and DENORMs.
1988: # this is INCORRECT for enabled SNAN which would give to the user the skewed SNAN!!!
1989: fu_in_exc_exit_p:
1990: btst &0x5,EXC_SR(%a6) # user or supervisor?
1991: bne.w fu_in_exc_exit_s_p # supervisor
1992:
1993: mov.l EXC_A7(%a6),%a0 # update user a7
1994: mov.l %a0,%usp
1995:
1996: fu_in_exc_exit_cont_p:
1997: mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
1998:
1999: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2000: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2001: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2002:
2003: frestore FP_SRC(%a6) # restore src op
2004:
2005: unlk %a6
2006:
2007: btst &0x7,(%sp) # is trace enabled?
2008: bne.w fu_trace_p # yes
2009:
2010: bra.l _fpsp_done
2011:
2012: tbl_except_p:
2013: short 0xe000,0xe006,0xe004,0xe005
2014: short 0xe003,0xe002,0xe001,0xe001
2015:
2016: fu_in_exc_ovfl_p:
2017: mov.w &0x3,%d0
2018: bra.w fu_in_exc_exit_p
2019:
2020: fu_in_exc_unfl_p:
2021: mov.w &0x4,%d0
2022: bra.w fu_in_exc_exit_p
2023:
2024: fu_in_exc_exit_s_p:
2025: btst &mia7_bit,SPCOND_FLG(%a6)
2026: beq.b fu_in_exc_exit_cont_p
2027:
2028: mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
2029:
2030: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2031: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2032: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2033:
2034: frestore FP_SRC(%a6) # restore src op
2035:
2036: unlk %a6 # unravel stack frame
2037:
2038: # shift stack frame "up". who cares about <ea> field.
2039: mov.l 0x4(%sp),0x10(%sp)
2040: mov.l 0x0(%sp),0xc(%sp)
2041: add.l &0xc,%sp
2042:
2043: btst &0x7,(%sp) # is trace on?
2044: bne.b fu_trace_p # yes
2045:
2046: bra.l _fpsp_done # exit to os
2047:
2048: #
2049: # The opclass two PACKED instruction that took an "Unimplemented Data Type"
2050: # exception was being traced. Make the "current" PC the FPIAR and put it in the
2051: # trace stack frame then jump to _real_trace().
2052: #
2053: # UNSUPP FRAME TRACE FRAME
2054: # ***************** *****************
2055: # * EA * * Current *
2056: # * * * PC *
2057: # ***************** *****************
2058: # * 0x2 * 0x0dc * * 0x2 * 0x024 *
2059: # ***************** *****************
2060: # * Next * * Next *
2061: # * PC * * PC *
2062: # ***************** *****************
2063: # * SR * * SR *
2064: # ***************** *****************
2065: fu_trace_p:
2066: mov.w &0x2024,0x6(%sp)
2067: fmov.l %fpiar,0x8(%sp)
2068:
2069: bra.l _real_trace
2070:
2071: #########################################################
2072: #########################################################
2073: fu_out_pack:
2074:
2075:
2076: # I'm not sure at this point what FPSR bits are valid for this instruction.
2077: # so, since the emulation routines re-create them anyways, zero exception field.
2078: # fmove out doesn't affect ccodes.
2079: and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
2080:
2081: fmov.l &0x0,%fpcr # zero current control regs
2082: fmov.l &0x0,%fpsr
2083:
2084: bfextu EXC_CMDREG(%a6){&6:&3},%d0
2085: bsr.l load_fpn1
2086:
2087: # unlike other opclass 3, unimplemented data type exceptions, packed must be
2088: # able to detect all operand types.
2089: lea FP_SRC(%a6),%a0
2090: bsr.l set_tag_x # tag the operand type
2091: cmpi.b %d0,&UNNORM # is operand an UNNORM?
2092: bne.b fu_op2_p # no
2093: bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
2094:
2095: fu_op2_p:
2096: mov.b %d0,STAG(%a6) # save src optype tag
2097:
2098: clr.l %d0
2099: mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
2100:
2101: lea FP_SRC(%a6),%a0 # pass ptr to src operand
2102:
2103: mov.l (%a6),EXC_A6(%a6) # in case a6 changes
2104: bsr.l fout # call fmove out routine
2105:
2106: # Exceptions in order of precedence:
2107: # BSUN : no
2108: # SNAN : yes
2109: # OPERR : if ((k_factor > +17) || (dec. exp exceeds 3 digits))
2110: # OVFL : no
2111: # UNFL : no
2112: # DZ : no
2113: # INEX2 : yes
2114: # INEX1 : no
2115:
2116: # determine the highest priority exception(if any) set by the
2117: # emulation routine that has also been enabled by the user.
2118: mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
2119: bne.w fu_out_ena_p # some are enabled
2120:
2121: fu_out_exit_p:
2122: mov.l EXC_A6(%a6),(%a6) # restore a6
2123:
2124: btst &0x5,EXC_SR(%a6) # user or supervisor?
2125: bne.b fu_out_exit_s_p # supervisor
2126:
2127: mov.l EXC_A7(%a6),%a0 # update user a7
2128: mov.l %a0,%usp
2129:
2130: fu_out_exit_cont_p:
2131: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2132: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2133: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2134:
2135: unlk %a6 # unravel stack frame
2136:
2137: btst &0x7,(%sp) # is trace on?
2138: bne.w fu_trace_p # yes
2139:
2140: bra.l _fpsp_done # exit to os
2141:
2142: # the exception occurred in supervisor mode. check to see if the
2143: # addressing mode was -(a7). if so, we'll need to shift the
2144: # stack frame "down".
2145: fu_out_exit_s_p:
2146: btst &mda7_bit,SPCOND_FLG(%a6) # was ea mode -(a7)
2147: beq.b fu_out_exit_cont_p # no
2148:
2149: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2150: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2151: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2152:
2153: mov.l (%a6),%a6 # restore frame pointer
2154:
2155: mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2156: mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2157:
2158: # now, copy the result to the proper place on the stack
2159: mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
2160: mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
2161: mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
2162:
2163: add.l &LOCAL_SIZE-0x8,%sp
2164:
2165: btst &0x7,(%sp)
2166: bne.w fu_trace_p
2167:
2168: bra.l _fpsp_done
2169:
2170: fu_out_ena_p:
2171: and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
2172: bfffo %d0{&24:&8},%d0 # find highest priority exception
2173: beq.w fu_out_exit_p
2174:
2175: mov.l EXC_A6(%a6),(%a6) # restore a6
2176:
2177: # an exception occurred and that exception was enabled.
2178: # the only exception possible on packed move out are INEX, OPERR, and SNAN.
2179: fu_out_exc_p:
2180: cmpi.b %d0,&0x1a
2181: bgt.w fu_inex_p2
2182: beq.w fu_operr_p
2183:
2184: fu_snan_p:
2185: btst &0x5,EXC_SR(%a6)
2186: bne.b fu_snan_s_p
2187:
2188: mov.l EXC_A7(%a6),%a0
2189: mov.l %a0,%usp
2190: bra.w fu_snan
2191:
2192: fu_snan_s_p:
2193: cmpi.b SPCOND_FLG(%a6),&mda7_flg
2194: bne.w fu_snan
2195:
2196: # the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
2197: # the strategy is to move the exception frame "down" 12 bytes. then, we
2198: # can store the default result where the exception frame was.
2199: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2200: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2201: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2202:
2203: mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd0
2204: mov.w &0xe006,2+FP_SRC(%a6) # set fsave status
2205:
2206: frestore FP_SRC(%a6) # restore src operand
2207:
2208: mov.l (%a6),%a6 # restore frame pointer
2209:
2210: mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2211: mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2212: mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
2213:
2214: # now, we copy the default result to its proper location
2215: mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
2216: mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
2217: mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
2218:
2219: add.l &LOCAL_SIZE-0x8,%sp
2220:
2221:
2222: bra.l _real_snan
2223:
2224: fu_operr_p:
2225: btst &0x5,EXC_SR(%a6)
2226: bne.w fu_operr_p_s
2227:
2228: mov.l EXC_A7(%a6),%a0
2229: mov.l %a0,%usp
2230: bra.w fu_operr
2231:
2232: fu_operr_p_s:
2233: cmpi.b SPCOND_FLG(%a6),&mda7_flg
2234: bne.w fu_operr
2235:
2236: # the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
2237: # the strategy is to move the exception frame "down" 12 bytes. then, we
2238: # can store the default result where the exception frame was.
2239: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2240: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2241: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2242:
2243: mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
2244: mov.w &0xe004,2+FP_SRC(%a6) # set fsave status
2245:
2246: frestore FP_SRC(%a6) # restore src operand
2247:
2248: mov.l (%a6),%a6 # restore frame pointer
2249:
2250: mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2251: mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2252: mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
2253:
2254: # now, we copy the default result to its proper location
2255: mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
2256: mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
2257: mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
2258:
2259: add.l &LOCAL_SIZE-0x8,%sp
2260:
2261:
2262: bra.l _real_operr
2263:
2264: fu_inex_p2:
2265: btst &0x5,EXC_SR(%a6)
2266: bne.w fu_inex_s_p2
2267:
2268: mov.l EXC_A7(%a6),%a0
2269: mov.l %a0,%usp
2270: bra.w fu_inex
2271:
2272: fu_inex_s_p2:
2273: cmpi.b SPCOND_FLG(%a6),&mda7_flg
2274: bne.w fu_inex
2275:
2276: # the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
2277: # the strategy is to move the exception frame "down" 12 bytes. then, we
2278: # can store the default result where the exception frame was.
2279: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2280: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2281: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2282:
2283: mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
2284: mov.w &0xe001,2+FP_SRC(%a6) # set fsave status
2285:
2286: frestore FP_SRC(%a6) # restore src operand
2287:
2288: mov.l (%a6),%a6 # restore frame pointer
2289:
2290: mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2291: mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2292: mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
2293:
2294: # now, we copy the default result to its proper location
2295: mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
2296: mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
2297: mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
2298:
2299: add.l &LOCAL_SIZE-0x8,%sp
2300:
2301:
2302: bra.l _real_inex
2303:
2304: #########################################################################
2305:
2306: #
2307: # if we're stuffing a source operand back into an fsave frame then we
2308: # have to make sure that for single or double source operands that the
2309: # format stuffed is as weird as the hardware usually makes it.
2310: #
2311: global funimp_skew
2312: funimp_skew:
2313: bfextu EXC_EXTWORD(%a6){&3:&3},%d0 # extract src specifier
2314: cmpi.b %d0,&0x1 # was src sgl?
2315: beq.b funimp_skew_sgl # yes
2316: cmpi.b %d0,&0x5 # was src dbl?
2317: beq.b funimp_skew_dbl # yes
2318: rts
2319:
2320: funimp_skew_sgl:
2321: mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
2322: andi.w &0x7fff,%d0 # strip sign
2323: beq.b funimp_skew_sgl_not
2324: cmpi.w %d0,&0x3f80
2325: bgt.b funimp_skew_sgl_not
2326: neg.w %d0 # make exponent negative
2327: addi.w &0x3f81,%d0 # find amt to shift
2328: mov.l FP_SRC_HI(%a6),%d1 # fetch DENORM hi(man)
2329: lsr.l %d0,%d1 # shift it
2330: bset &31,%d1 # set j-bit
2331: mov.l %d1,FP_SRC_HI(%a6) # insert new hi(man)
2332: andi.w &0x8000,FP_SRC_EX(%a6) # clear old exponent
2333: ori.w &0x3f80,FP_SRC_EX(%a6) # insert new "skewed" exponent
2334: funimp_skew_sgl_not:
2335: rts
2336:
2337: funimp_skew_dbl:
2338: mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
2339: andi.w &0x7fff,%d0 # strip sign
2340: beq.b funimp_skew_dbl_not
2341: cmpi.w %d0,&0x3c00
2342: bgt.b funimp_skew_dbl_not
2343:
2344: tst.b FP_SRC_EX(%a6) # make "internal format"
2345: smi.b 0x2+FP_SRC(%a6)
2346: mov.w %d0,FP_SRC_EX(%a6) # insert exponent with cleared sign
2347: clr.l %d0 # clear g,r,s
2348: lea FP_SRC(%a6),%a0 # pass ptr to src op
2349: mov.w &0x3c01,%d1 # pass denorm threshold
2350: bsr.l dnrm_lp # denorm it
2351: mov.w &0x3c00,%d0 # new exponent
2352: tst.b 0x2+FP_SRC(%a6) # is sign set?
2353: beq.b fss_dbl_denorm_done # no
2354: bset &15,%d0 # set sign
2355: fss_dbl_denorm_done:
2356: bset &0x7,FP_SRC_HI(%a6) # set j-bit
2357: mov.w %d0,FP_SRC_EX(%a6) # insert new exponent
2358: funimp_skew_dbl_not:
2359: rts
2360:
2361: #########################################################################
2362: global _mem_write2
2363: _mem_write2:
2364: btst &0x5,EXC_SR(%a6)
2365: beq.l _dmem_write
2366: mov.l 0x0(%a0),FP_DST_EX(%a6)
2367: mov.l 0x4(%a0),FP_DST_HI(%a6)
2368: mov.l 0x8(%a0),FP_DST_LO(%a6)
2369: clr.l %d1
2370: rts
2371:
2372: #########################################################################
2373: # XDEF **************************************************************** #
2374: # _fpsp_effadd(): 060FPSP entry point for FP "Unimplemented #
2375: # effective address" exception. #
2376: # #
2377: # This handler should be the first code executed upon taking the #
2378: # FP Unimplemented Effective Address exception in an operating #
2379: # system. #
2380: # #
2381: # XREF **************************************************************** #
2382: # _imem_read_long() - read instruction longword #
2383: # fix_skewed_ops() - adjust src operand in fsave frame #
2384: # set_tag_x() - determine optype of src/dst operands #
2385: # store_fpreg() - store opclass 0 or 2 result to FP regfile #
2386: # unnorm_fix() - change UNNORM operands to NORM or ZERO #
2387: # load_fpn2() - load dst operand from FP regfile #
2388: # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
2389: # decbin() - convert packed data to FP binary data #
2390: # _real_fpu_disabled() - "callout" for "FPU disabled" exception #
2391: # _real_access() - "callout" for access error exception #
2392: # _mem_read() - read extended immediate operand from memory #
2393: # _fpsp_done() - "callout" for exit; work all done #
2394: # _real_trace() - "callout" for Trace enabled exception #
2395: # fmovm_dynamic() - emulate dynamic fmovm instruction #
2396: # fmovm_ctrl() - emulate fmovm control instruction #
2397: # #
2398: # INPUT *************************************************************** #
2399: # - The system stack contains the "Unimplemented <ea>" stk frame #
2400: # #
2401: # OUTPUT ************************************************************** #
2402: # If access error: #
2403: # - The system stack is changed to an access error stack frame #
2404: # If FPU disabled: #
2405: # - The system stack is changed to an FPU disabled stack frame #
2406: # If Trace exception enabled: #
2407: # - The system stack is changed to a Trace exception stack frame #
2408: # Else: (normal case) #
2409: # - None (correct result has been stored as appropriate) #
2410: # #
2411: # ALGORITHM *********************************************************** #
2412: # This exception handles 3 types of operations: #
2413: # (1) FP Instructions using extended precision or packed immediate #
2414: # addressing mode. #
2415: # (2) The "fmovm.x" instruction w/ dynamic register specification. #
2416: # (3) The "fmovm.l" instruction w/ 2 or 3 control registers. #
2417: # #
2418: # For immediate data operations, the data is read in w/ a #
2419: # _mem_read() "callout", converted to FP binary (if packed), and used #
2420: # as the source operand to the instruction specified by the instruction #
2421: # word. If no FP exception should be reported ads a result of the #
2422: # emulation, then the result is stored to the destination register and #
2423: # the handler exits through _fpsp_done(). If an enabled exc has been #
2424: # signalled as a result of emulation, then an fsave state frame #
2425: # corresponding to the FP exception type must be entered into the 060 #
2426: # FPU before exiting. In either the enabled or disabled cases, we #
2427: # must also check if a Trace exception is pending, in which case, we #
2428: # must create a Trace exception stack frame from the current exception #
2429: # stack frame. If no Trace is pending, we simply exit through #
2430: # _fpsp_done(). #
2431: # For "fmovm.x", call the routine fmovm_dynamic() which will #
2432: # decode and emulate the instruction. No FP exceptions can be pending #
2433: # as a result of this operation emulation. A Trace exception can be #
2434: # pending, though, which means the current stack frame must be changed #
2435: # to a Trace stack frame and an exit made through _real_trace(). #
2436: # For the case of "fmovm.x Dn,-(a7)", where the offending instruction #
2437: # was executed from supervisor mode, this handler must store the FP #
2438: # register file values to the system stack by itself since #
2439: # fmovm_dynamic() can't handle this. A normal exit is made through #
2440: # fpsp_done(). #
2441: # For "fmovm.l", fmovm_ctrl() is used to emulate the instruction. #
2442: # Again, a Trace exception may be pending and an exit made through #
2443: # _real_trace(). Else, a normal exit is made through _fpsp_done(). #
2444: # #
2445: # Before any of the above is attempted, it must be checked to #
2446: # see if the FPU is disabled. Since the "Unimp <ea>" exception is taken #
2447: # before the "FPU disabled" exception, but the "FPU disabled" exception #
2448: # has higher priority, we check the disabled bit in the PCR. If set, #
2449: # then we must create an 8 word "FPU disabled" exception stack frame #
2450: # from the current 4 word exception stack frame. This includes #
2451: # reproducing the effective address of the instruction to put on the #
2452: # new stack frame. #
2453: # #
2454: # In the process of all emulation work, if a _mem_read() #
2455: # "callout" returns a failing result indicating an access error, then #
2456: # we must create an access error stack frame from the current stack #
2457: # frame. This information includes a faulting address and a fault- #
2458: # status-longword. These are created within this handler. #
2459: # #
2460: #########################################################################
2461:
2462: global _fpsp_effadd
2463: _fpsp_effadd:
2464:
2465: # This exception type takes priority over the "Line F Emulator"
2466: # exception. Therefore, the FPU could be disabled when entering here.
2467: # So, we must check to see if it's disabled and handle that case separately.
2468: mov.l %d0,-(%sp) # save d0
2469: movc %pcr,%d0 # load proc cr
2470: btst &0x1,%d0 # is FPU disabled?
2471: bne.w iea_disabled # yes
2472: mov.l (%sp)+,%d0 # restore d0
2473:
2474: link %a6,&-LOCAL_SIZE # init stack frame
2475:
2476: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2477: fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
2478: fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
2479:
2480: # PC of instruction that took the exception is the PC in the frame
2481: mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
2482:
2483: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
2484: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
2485: bsr.l _imem_read_long # fetch the instruction words
2486: mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
2487:
2488: #########################################################################
2489:
2490: tst.w %d0 # is operation fmovem?
2491: bmi.w iea_fmovm # yes
2492:
2493: #
2494: # here, we will have:
2495: # fabs fdabs fsabs facos fmod
2496: # fadd fdadd fsadd fasin frem
2497: # fcmp fatan fscale
2498: # fdiv fddiv fsdiv fatanh fsin
2499: # fint fcos fsincos
2500: # fintrz fcosh fsinh
2501: # fmove fdmove fsmove fetox ftan
2502: # fmul fdmul fsmul fetoxm1 ftanh
2503: # fneg fdneg fsneg fgetexp ftentox
2504: # fsgldiv fgetman ftwotox
2505: # fsglmul flog10
2506: # fsqrt flog2
2507: # fsub fdsub fssub flogn
2508: # ftst flognp1
2509: # which can all use f<op>.{x,p}
2510: # so, now it's immediate data extended precision AND PACKED FORMAT!
2511: #
2512: iea_op:
2513: andi.l &0x00ff00ff,USER_FPSR(%a6)
2514:
2515: btst &0xa,%d0 # is src fmt x or p?
2516: bne.b iea_op_pack # packed
2517:
2518:
2519: mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
2520: lea FP_SRC(%a6),%a1 # pass: ptr to super addr
2521: mov.l &0xc,%d0 # pass: 12 bytes
2522: bsr.l _imem_read # read extended immediate
2523:
2524: tst.l %d1 # did ifetch fail?
2525: bne.w iea_iacc # yes
2526:
2527: bra.b iea_op_setsrc
2528:
2529: iea_op_pack:
2530:
2531: mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
2532: lea FP_SRC(%a6),%a1 # pass: ptr to super dst
2533: mov.l &0xc,%d0 # pass: 12 bytes
2534: bsr.l _imem_read # read packed operand
2535:
2536: tst.l %d1 # did ifetch fail?
2537: bne.w iea_iacc # yes
2538:
2539: # The packed operand is an INF or a NAN if the exponent field is all ones.
2540: bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
2541: cmpi.w %d0,&0x7fff # INF or NAN?
2542: beq.b iea_op_setsrc # operand is an INF or NAN
2543:
2544: # The packed operand is a zero if the mantissa is all zero, else it's
2545: # a normal packed op.
2546: mov.b 3+FP_SRC(%a6),%d0 # get byte 4
2547: andi.b &0x0f,%d0 # clear all but last nybble
2548: bne.b iea_op_gp_not_spec # not a zero
2549: tst.l FP_SRC_HI(%a6) # is lw 2 zero?
2550: bne.b iea_op_gp_not_spec # not a zero
2551: tst.l FP_SRC_LO(%a6) # is lw 3 zero?
2552: beq.b iea_op_setsrc # operand is a ZERO
2553: iea_op_gp_not_spec:
2554: lea FP_SRC(%a6),%a0 # pass: ptr to packed op
2555: bsr.l decbin # convert to extended
2556: fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
2557:
2558: iea_op_setsrc:
2559: addi.l &0xc,EXC_EXTWPTR(%a6) # update extension word pointer
2560:
2561: # FP_SRC now holds the src operand.
2562: lea FP_SRC(%a6),%a0 # pass: ptr to src op
2563: bsr.l set_tag_x # tag the operand type
2564: mov.b %d0,STAG(%a6) # could be ANYTHING!!!
2565: cmpi.b %d0,&UNNORM # is operand an UNNORM?
2566: bne.b iea_op_getdst # no
2567: bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
2568: mov.b %d0,STAG(%a6) # set new optype tag
2569: iea_op_getdst:
2570: clr.b STORE_FLG(%a6) # clear "store result" boolean
2571:
2572: btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
2573: beq.b iea_op_extract # monadic
2574: btst &0x4,1+EXC_CMDREG(%a6) # is operation fsincos,ftst,fcmp?
2575: bne.b iea_op_spec # yes
2576:
2577: iea_op_loaddst:
2578: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
2579: bsr.l load_fpn2 # load dst operand
2580:
2581: lea FP_DST(%a6),%a0 # pass: ptr to dst op
2582: bsr.l set_tag_x # tag the operand type
2583: mov.b %d0,DTAG(%a6) # could be ANYTHING!!!
2584: cmpi.b %d0,&UNNORM # is operand an UNNORM?
2585: bne.b iea_op_extract # no
2586: bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
2587: mov.b %d0,DTAG(%a6) # set new optype tag
2588: bra.b iea_op_extract
2589:
2590: # the operation is fsincos, ftst, or fcmp. only fcmp is dyadic
2591: iea_op_spec:
2592: btst &0x3,1+EXC_CMDREG(%a6) # is operation fsincos?
2593: beq.b iea_op_extract # yes
2594: # now, we're left with ftst and fcmp. so, first let's tag them so that they don't
2595: # store a result. then, only fcmp will branch back and pick up a dst operand.
2596: st STORE_FLG(%a6) # don't store a final result
2597: btst &0x1,1+EXC_CMDREG(%a6) # is operation fcmp?
2598: beq.b iea_op_loaddst # yes
2599:
2600: iea_op_extract:
2601: clr.l %d0
2602: mov.b FPCR_MODE(%a6),%d0 # pass: rnd mode,prec
2603:
2604: mov.b 1+EXC_CMDREG(%a6),%d1
2605: andi.w &0x007f,%d1 # extract extension
2606:
2607: fmov.l &0x0,%fpcr
2608: fmov.l &0x0,%fpsr
2609:
2610: lea FP_SRC(%a6),%a0
2611: lea FP_DST(%a6),%a1
2612:
2613: mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
2614: jsr (tbl_unsupp.l,%pc,%d1.l*1)
2615:
2616: #
2617: # Exceptions in order of precedence:
2618: # BSUN : none
2619: # SNAN : all operations
2620: # OPERR : all reg-reg or mem-reg operations that can normally operr
2621: # OVFL : same as OPERR
2622: # UNFL : same as OPERR
2623: # DZ : same as OPERR
2624: # INEX2 : same as OPERR
2625: # INEX1 : all packed immediate operations
2626: #
2627:
2628: # we determine the highest priority exception(if any) set by the
2629: # emulation routine that has also been enabled by the user.
2630: mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
2631: bne.b iea_op_ena # some are enabled
2632:
2633: # now, we save the result, unless, of course, the operation was ftst or fcmp.
2634: # these don't save results.
2635: iea_op_save:
2636: tst.b STORE_FLG(%a6) # does this op store a result?
2637: bne.b iea_op_exit1 # exit with no frestore
2638:
2639: iea_op_store:
2640: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
2641: bsr.l store_fpreg # store the result
2642:
2643: iea_op_exit1:
2644: mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
2645: mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
2646:
2647: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
2648: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2649: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2650:
2651: unlk %a6 # unravel the frame
2652:
2653: btst &0x7,(%sp) # is trace on?
2654: bne.w iea_op_trace # yes
2655:
2656: bra.l _fpsp_done # exit to os
2657:
2658: iea_op_ena:
2659: and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enable and set
2660: bfffo %d0{&24:&8},%d0 # find highest priority exception
2661: bne.b iea_op_exc # at least one was set
2662:
2663: # no exception occurred. now, did a disabled, exact overflow occur with inexact
2664: # enabled? if so, then we have to stuff an overflow frame into the FPU.
2665: btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
2666: beq.b iea_op_save
2667:
2668: iea_op_ovfl:
2669: btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
2670: beq.b iea_op_store # no
2671: bra.b iea_op_exc_ovfl # yes
2672:
2673: # an enabled exception occurred. we have to insert the exception type back into
2674: # the machine.
2675: iea_op_exc:
2676: subi.l &24,%d0 # fix offset to be 0-8
2677: cmpi.b %d0,&0x6 # is exception INEX?
2678: bne.b iea_op_exc_force # no
2679:
2680: # the enabled exception was inexact. so, if it occurs with an overflow
2681: # or underflow that was disabled, then we have to force an overflow or
2682: # underflow frame.
2683: btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
2684: bne.b iea_op_exc_ovfl # yes
2685: btst &unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
2686: bne.b iea_op_exc_unfl # yes
2687:
2688: iea_op_exc_force:
2689: mov.w (tbl_iea_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
2690: bra.b iea_op_exit2 # exit with frestore
2691:
2692: tbl_iea_except:
2693: short 0xe002, 0xe006, 0xe004, 0xe005
2694: short 0xe003, 0xe002, 0xe001, 0xe001
2695:
2696: iea_op_exc_ovfl:
2697: mov.w &0xe005,2+FP_SRC(%a6)
2698: bra.b iea_op_exit2
2699:
2700: iea_op_exc_unfl:
2701: mov.w &0xe003,2+FP_SRC(%a6)
2702:
2703: iea_op_exit2:
2704: mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
2705: mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
2706:
2707: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
2708: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2709: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2710:
2711: frestore FP_SRC(%a6) # restore exceptional state
2712:
2713: unlk %a6 # unravel the frame
2714:
2715: btst &0x7,(%sp) # is trace on?
2716: bne.b iea_op_trace # yes
2717:
2718: bra.l _fpsp_done # exit to os
2719:
2720: #
2721: # The opclass two instruction that took an "Unimplemented Effective Address"
2722: # exception was being traced. Make the "current" PC the FPIAR and put it in
2723: # the trace stack frame then jump to _real_trace().
2724: #
2725: # UNIMP EA FRAME TRACE FRAME
2726: # ***************** *****************
2727: # * 0x0 * 0x0f0 * * Current *
2728: # ***************** * PC *
2729: # * Current * *****************
2730: # * PC * * 0x2 * 0x024 *
2731: # ***************** *****************
2732: # * SR * * Next *
2733: # ***************** * PC *
2734: # *****************
2735: # * SR *
2736: # *****************
2737: iea_op_trace:
2738: mov.l (%sp),-(%sp) # shift stack frame "down"
2739: mov.w 0x8(%sp),0x4(%sp)
2740: mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
2741: fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
2742:
2743: bra.l _real_trace
2744:
2745: #########################################################################
2746: iea_fmovm:
2747: btst &14,%d0 # ctrl or data reg
2748: beq.w iea_fmovm_ctrl
2749:
2750: iea_fmovm_data:
2751:
2752: btst &0x5,EXC_SR(%a6) # user or supervisor mode
2753: bne.b iea_fmovm_data_s
2754:
2755: iea_fmovm_data_u:
2756: mov.l %usp,%a0
2757: mov.l %a0,EXC_A7(%a6) # store current a7
2758: bsr.l fmovm_dynamic # do dynamic fmovm
2759: mov.l EXC_A7(%a6),%a0 # load possibly new a7
2760: mov.l %a0,%usp # update usp
2761: bra.w iea_fmovm_exit
2762:
2763: iea_fmovm_data_s:
2764: clr.b SPCOND_FLG(%a6)
2765: lea 0x2+EXC_VOFF(%a6),%a0
2766: mov.l %a0,EXC_A7(%a6)
2767: bsr.l fmovm_dynamic # do dynamic fmovm
2768:
2769: cmpi.b SPCOND_FLG(%a6),&mda7_flg
2770: beq.w iea_fmovm_data_predec
2771: cmpi.b SPCOND_FLG(%a6),&mia7_flg
2772: bne.w iea_fmovm_exit
2773:
2774: # right now, d0 = the size.
2775: # the data has been fetched from the supervisor stack, but we have not
2776: # incremented the stack pointer by the appropriate number of bytes.
2777: # do it here.
2778: iea_fmovm_data_postinc:
2779: btst &0x7,EXC_SR(%a6)
2780: bne.b iea_fmovm_data_pi_trace
2781:
2782: mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
2783: mov.l EXC_EXTWPTR(%a6),(EXC_PC,%a6,%d0)
2784: mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
2785:
2786: lea (EXC_SR,%a6,%d0),%a0
2787: mov.l %a0,EXC_SR(%a6)
2788:
2789: fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
2790: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2791: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2792:
2793: unlk %a6
2794: mov.l (%sp)+,%sp
2795: bra.l _fpsp_done
2796:
2797: iea_fmovm_data_pi_trace:
2798: mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
2799: mov.l EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
2800: mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
2801: mov.l EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
2802:
2803: lea (EXC_SR-0x4,%a6,%d0),%a0
2804: mov.l %a0,EXC_SR(%a6)
2805:
2806: fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
2807: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2808: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2809:
2810: unlk %a6
2811: mov.l (%sp)+,%sp
2812: bra.l _real_trace
2813:
2814: # right now, d1 = size and d0 = the strg.
2815: iea_fmovm_data_predec:
2816: mov.b %d1,EXC_VOFF(%a6) # store strg
2817: mov.b %d0,0x1+EXC_VOFF(%a6) # store size
2818:
2819: fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
2820: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2821: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2822:
2823: mov.l (%a6),-(%sp) # make a copy of a6
2824: mov.l %d0,-(%sp) # save d0
2825: mov.l %d1,-(%sp) # save d1
2826: mov.l EXC_EXTWPTR(%a6),-(%sp) # make a copy of Next PC
2827:
2828: clr.l %d0
2829: mov.b 0x1+EXC_VOFF(%a6),%d0 # fetch size
2830: neg.l %d0 # get negative of size
2831:
2832: btst &0x7,EXC_SR(%a6) # is trace enabled?
2833: beq.b iea_fmovm_data_p2
2834:
2835: mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
2836: mov.l EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
2837: mov.l (%sp)+,(EXC_PC-0x4,%a6,%d0)
2838: mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
2839:
2840: pea (%a6,%d0) # create final sp
2841: bra.b iea_fmovm_data_p3
2842:
2843: iea_fmovm_data_p2:
2844: mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
2845: mov.l (%sp)+,(EXC_PC,%a6,%d0)
2846: mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
2847:
2848: pea (0x4,%a6,%d0) # create final sp
2849:
2850: iea_fmovm_data_p3:
2851: clr.l %d1
2852: mov.b EXC_VOFF(%a6),%d1 # fetch strg
2853:
2854: tst.b %d1
2855: bpl.b fm_1
2856: fmovm.x &0x80,(0x4+0x8,%a6,%d0)
2857: addi.l &0xc,%d0
2858: fm_1:
2859: lsl.b &0x1,%d1
2860: bpl.b fm_2
2861: fmovm.x &0x40,(0x4+0x8,%a6,%d0)
2862: addi.l &0xc,%d0
2863: fm_2:
2864: lsl.b &0x1,%d1
2865: bpl.b fm_3
2866: fmovm.x &0x20,(0x4+0x8,%a6,%d0)
2867: addi.l &0xc,%d0
2868: fm_3:
2869: lsl.b &0x1,%d1
2870: bpl.b fm_4
2871: fmovm.x &0x10,(0x4+0x8,%a6,%d0)
2872: addi.l &0xc,%d0
2873: fm_4:
2874: lsl.b &0x1,%d1
2875: bpl.b fm_5
2876: fmovm.x &0x08,(0x4+0x8,%a6,%d0)
2877: addi.l &0xc,%d0
2878: fm_5:
2879: lsl.b &0x1,%d1
2880: bpl.b fm_6
2881: fmovm.x &0x04,(0x4+0x8,%a6,%d0)
2882: addi.l &0xc,%d0
2883: fm_6:
2884: lsl.b &0x1,%d1
2885: bpl.b fm_7
2886: fmovm.x &0x02,(0x4+0x8,%a6,%d0)
2887: addi.l &0xc,%d0
2888: fm_7:
2889: lsl.b &0x1,%d1
2890: bpl.b fm_end
2891: fmovm.x &0x01,(0x4+0x8,%a6,%d0)
2892: fm_end:
2893: mov.l 0x4(%sp),%d1
2894: mov.l 0x8(%sp),%d0
2895: mov.l 0xc(%sp),%a6
2896: mov.l (%sp)+,%sp
2897:
2898: btst &0x7,(%sp) # is trace enabled?
2899: beq.l _fpsp_done
2900: bra.l _real_trace
2901:
2902: #########################################################################
2903: iea_fmovm_ctrl:
2904:
2905: bsr.l fmovm_ctrl # load ctrl regs
2906:
2907: iea_fmovm_exit:
2908: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
2909: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
2910: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2911:
2912: btst &0x7,EXC_SR(%a6) # is trace on?
2913: bne.b iea_fmovm_trace # yes
2914:
2915: mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set Next PC
2916:
2917: unlk %a6 # unravel the frame
2918:
2919: bra.l _fpsp_done # exit to os
2920:
2921: #
2922: # The control reg instruction that took an "Unimplemented Effective Address"
2923: # exception was being traced. The "Current PC" for the trace frame is the
2924: # PC stacked for Unimp EA. The "Next PC" is in EXC_EXTWPTR.
2925: # After fixing the stack frame, jump to _real_trace().
2926: #
2927: # UNIMP EA FRAME TRACE FRAME
2928: # ***************** *****************
2929: # * 0x0 * 0x0f0 * * Current *
2930: # ***************** * PC *
2931: # * Current * *****************
2932: # * PC * * 0x2 * 0x024 *
2933: # ***************** *****************
2934: # * SR * * Next *
2935: # ***************** * PC *
2936: # *****************
2937: # * SR *
2938: # *****************
2939: # this ain't a pretty solution, but it works:
2940: # -restore a6 (not with unlk)
2941: # -shift stack frame down over where old a6 used to be
2942: # -add LOCAL_SIZE to stack pointer
2943: iea_fmovm_trace:
2944: mov.l (%a6),%a6 # restore frame pointer
2945: mov.w EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
2946: mov.l EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
2947: mov.l EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
2948: mov.w &0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
2949: add.l &LOCAL_SIZE,%sp # clear stack frame
2950:
2951: bra.l _real_trace
2952:
2953: #########################################################################
2954: # The FPU is disabled and so we should really have taken the "Line
2955: # F Emulator" exception. So, here we create an 8-word stack frame
2956: # from our 4-word stack frame. This means we must calculate the length
2957: # of the faulting instruction to get the "next PC". This is trivial for
2958: # immediate operands but requires some extra work for fmovm dynamic
2959: # which can use most addressing modes.
2960: iea_disabled:
2961: mov.l (%sp)+,%d0 # restore d0
2962:
2963: link %a6,&-LOCAL_SIZE # init stack frame
2964:
2965: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2966:
2967: # PC of instruction that took the exception is the PC in the frame
2968: mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
2969: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
2970: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
2971: bsr.l _imem_read_long # fetch the instruction words
2972: mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
2973:
2974: tst.w %d0 # is instr fmovm?
2975: bmi.b iea_dis_fmovm # yes
2976: # instruction is using an extended precision immediate operand. therefore,
2977: # the total instruction length is 16 bytes.
2978: iea_dis_immed:
2979: mov.l &0x10,%d0 # 16 bytes of instruction
2980: bra.b iea_dis_cont
2981: iea_dis_fmovm:
2982: btst &0xe,%d0 # is instr fmovm ctrl
2983: bne.b iea_dis_fmovm_data # no
2984: # the instruction is a fmovm.l with 2 or 3 registers.
2985: bfextu %d0{&19:&3},%d1
2986: mov.l &0xc,%d0
2987: cmpi.b %d1,&0x7 # move all regs?
2988: bne.b iea_dis_cont
2989: addq.l &0x4,%d0
2990: bra.b iea_dis_cont
2991: # the instruction is an fmovm.x dynamic which can use many addressing
2992: # modes and thus can have several different total instruction lengths.
2993: # call fmovm_calc_ea which will go through the ea calc process and,
2994: # as a by-product, will tell us how long the instruction is.
2995: iea_dis_fmovm_data:
2996: clr.l %d0
2997: bsr.l fmovm_calc_ea
2998: mov.l EXC_EXTWPTR(%a6),%d0
2999: sub.l EXC_PC(%a6),%d0
3000: iea_dis_cont:
3001: mov.w %d0,EXC_VOFF(%a6) # store stack shift value
3002:
3003: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3004:
3005: unlk %a6
3006:
3007: # here, we actually create the 8-word frame from the 4-word frame,
3008: # with the "next PC" as additional info.
3009: # the <ea> field is let as undefined.
3010: subq.l &0x8,%sp # make room for new stack
3011: mov.l %d0,-(%sp) # save d0
3012: mov.w 0xc(%sp),0x4(%sp) # move SR
3013: mov.l 0xe(%sp),0x6(%sp) # move Current PC
3014: clr.l %d0
3015: mov.w 0x12(%sp),%d0
3016: mov.l 0x6(%sp),0x10(%sp) # move Current PC
3017: add.l %d0,0x6(%sp) # make Next PC
3018: mov.w &0x402c,0xa(%sp) # insert offset,frame format
3019: mov.l (%sp)+,%d0 # restore d0
3020:
3021: bra.l _real_fpu_disabled
3022:
3023: ##########
3024:
3025: iea_iacc:
3026: movc %pcr,%d0
3027: btst &0x1,%d0
3028: bne.b iea_iacc_cont
3029: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
3030: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
3031: iea_iacc_cont:
3032: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3033:
3034: unlk %a6
3035:
3036: subq.w &0x8,%sp # make stack frame bigger
3037: mov.l 0x8(%sp),(%sp) # store SR,hi(PC)
3038: mov.w 0xc(%sp),0x4(%sp) # store lo(PC)
3039: mov.w &0x4008,0x6(%sp) # store voff
3040: mov.l 0x2(%sp),0x8(%sp) # store ea
3041: mov.l &0x09428001,0xc(%sp) # store fslw
3042:
3043: iea_acc_done:
3044: btst &0x5,(%sp) # user or supervisor mode?
3045: beq.b iea_acc_done2 # user
3046: bset &0x2,0xd(%sp) # set supervisor TM bit
3047:
3048: iea_acc_done2:
3049: bra.l _real_access
3050:
3051: iea_dacc:
3052: lea -LOCAL_SIZE(%a6),%sp
3053:
3054: movc %pcr,%d1
3055: btst &0x1,%d1
3056: bne.b iea_dacc_cont
3057: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
3058: fmovm.l LOCAL_SIZE+USER_FPCR(%sp),%fpcr,%fpsr,%fpiar # restore ctrl regs
3059: iea_dacc_cont:
3060: mov.l (%a6),%a6
3061:
3062: mov.l 0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
3063: mov.w 0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
3064: mov.w &0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
3065: mov.l %a0,-0x8+0xc+LOCAL_SIZE(%sp)
3066: mov.w %d0,-0x8+0x10+LOCAL_SIZE(%sp)
3067: mov.w &0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
3068:
3069: movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
3070: add.w &LOCAL_SIZE-0x4,%sp
3071:
3072: bra.b iea_acc_done
3073:
3074: #########################################################################
3075: # XDEF **************************************************************** #
3076: # _fpsp_operr(): 060FPSP entry point for FP Operr exception. #
3077: # #
3078: # This handler should be the first code executed upon taking the #
3079: # FP Operand Error exception in an operating system. #
3080: # #
3081: # XREF **************************************************************** #
3082: # _imem_read_long() - read instruction longword #
3083: # fix_skewed_ops() - adjust src operand in fsave frame #
3084: # _real_operr() - "callout" to operating system operr handler #
3085: # _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
3086: # store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
3087: # facc_out_{b,w,l}() - store to memory took access error (opcl 3) #
3088: # #
3089: # INPUT *************************************************************** #
3090: # - The system stack contains the FP Operr exception frame #
3091: # - The fsave frame contains the source operand #
3092: # #
3093: # OUTPUT ************************************************************** #
3094: # No access error: #
3095: # - The system stack is unchanged #
3096: # - The fsave frame contains the adjusted src op for opclass 0,2 #
3097: # #
3098: # ALGORITHM *********************************************************** #
3099: # In a system where the FP Operr exception is enabled, the goal #
3100: # is to get to the handler specified at _real_operr(). But, on the 060, #
3101: # for opclass zero and two instruction taking this exception, the #
3102: # input operand in the fsave frame may be incorrect for some cases #
3103: # and needs to be corrected. This handler calls fix_skewed_ops() to #
3104: # do just this and then exits through _real_operr(). #
3105: # For opclass 3 instructions, the 060 doesn't store the default #
3106: # operr result out to memory or data register file as it should. #
3107: # This code must emulate the move out before finally exiting through #
3108: # _real_inex(). The move out, if to memory, is performed using #
3109: # _mem_write() "callout" routines that may return a failing result. #
3110: # In this special case, the handler must exit through facc_out() #
3111: # which creates an access error stack frame from the current operr #
3112: # stack frame. #
3113: # #
3114: #########################################################################
3115:
3116: global _fpsp_operr
3117: _fpsp_operr:
3118:
3119: link.w %a6,&-LOCAL_SIZE # init stack frame
3120:
3121: fsave FP_SRC(%a6) # grab the "busy" frame
3122:
3123: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3124: fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
3125: fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3126:
3127: # the FPIAR holds the "current PC" of the faulting instruction
3128: mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
3129:
3130: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
3131: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3132: bsr.l _imem_read_long # fetch the instruction words
3133: mov.l %d0,EXC_OPWORD(%a6)
3134:
3135: ##############################################################################
3136:
3137: btst &13,%d0 # is instr an fmove out?
3138: bne.b foperr_out # fmove out
3139:
3140:
3141: # here, we simply see if the operand in the fsave frame needs to be "unskewed".
3142: # this would be the case for opclass two operations with a source infinity or
3143: # denorm operand in the sgl or dbl format. NANs also become skewed, but can't
3144: # cause an operr so we don't need to check for them here.
3145: lea FP_SRC(%a6),%a0 # pass: ptr to src op
3146: bsr.l fix_skewed_ops # fix src op
3147:
3148: foperr_exit:
3149: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3150: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
3151: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3152:
3153: frestore FP_SRC(%a6)
3154:
3155: unlk %a6
3156: bra.l _real_operr
3157:
3158: ########################################################################
3159:
3160: #
3161: # the hardware does not save the default result to memory on enabled
3162: # operand error exceptions. we do this here before passing control to
3163: # the user operand error handler.
3164: #
3165: # byte, word, and long destination format operations can pass
3166: # through here. we simply need to test the sign of the src
3167: # operand and save the appropriate minimum or maximum integer value
3168: # to the effective address as pointed to by the stacked effective address.
3169: #
3170: # although packed opclass three operations can take operand error
3171: # exceptions, they won't pass through here since they are caught
3172: # first by the unsupported data format exception handler. that handler
3173: # sends them directly to _real_operr() if necessary.
3174: #
3175: foperr_out:
3176:
3177: mov.w FP_SRC_EX(%a6),%d1 # fetch exponent
3178: andi.w &0x7fff,%d1
3179: cmpi.w %d1,&0x7fff
3180: bne.b foperr_out_not_qnan
3181: # the operand is either an infinity or a QNAN.
3182: tst.l FP_SRC_LO(%a6)
3183: bne.b foperr_out_qnan
3184: mov.l FP_SRC_HI(%a6),%d1
3185: andi.l &0x7fffffff,%d1
3186: beq.b foperr_out_not_qnan
3187: foperr_out_qnan:
3188: mov.l FP_SRC_HI(%a6),L_SCR1(%a6)
3189: bra.b foperr_out_jmp
3190:
3191: foperr_out_not_qnan:
3192: mov.l &0x7fffffff,%d1
3193: tst.b FP_SRC_EX(%a6)
3194: bpl.b foperr_out_not_qnan2
3195: addq.l &0x1,%d1
3196: foperr_out_not_qnan2:
3197: mov.l %d1,L_SCR1(%a6)
3198:
3199: foperr_out_jmp:
3200: bfextu %d0{&19:&3},%d0 # extract dst format field
3201: mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
3202: mov.w (tbl_operr.b,%pc,%d0.w*2),%a0
3203: jmp (tbl_operr.b,%pc,%a0)
3204:
3205: tbl_operr:
3206: short foperr_out_l - tbl_operr # long word integer
3207: short tbl_operr - tbl_operr # sgl prec shouldn't happen
3208: short tbl_operr - tbl_operr # ext prec shouldn't happen
3209: short foperr_exit - tbl_operr # packed won't enter here
3210: short foperr_out_w - tbl_operr # word integer
3211: short tbl_operr - tbl_operr # dbl prec shouldn't happen
3212: short foperr_out_b - tbl_operr # byte integer
3213: short tbl_operr - tbl_operr # packed won't enter here
3214:
3215: foperr_out_b:
3216: mov.b L_SCR1(%a6),%d0 # load positive default result
3217: cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3218: ble.b foperr_out_b_save_dn # yes
3219: mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
3220: bsr.l _dmem_write_byte # write the default result
3221:
3222: tst.l %d1 # did dstore fail?
3223: bne.l facc_out_b # yes
3224:
3225: bra.w foperr_exit
3226: foperr_out_b_save_dn:
3227: andi.w &0x0007,%d1
3228: bsr.l store_dreg_b # store result to regfile
3229: bra.w foperr_exit
3230:
3231: foperr_out_w:
3232: mov.w L_SCR1(%a6),%d0 # load positive default result
3233: cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3234: ble.b foperr_out_w_save_dn # yes
3235: mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
3236: bsr.l _dmem_write_word # write the default result
3237:
3238: tst.l %d1 # did dstore fail?
3239: bne.l facc_out_w # yes
3240:
3241: bra.w foperr_exit
3242: foperr_out_w_save_dn:
3243: andi.w &0x0007,%d1
3244: bsr.l store_dreg_w # store result to regfile
3245: bra.w foperr_exit
3246:
3247: foperr_out_l:
3248: mov.l L_SCR1(%a6),%d0 # load positive default result
3249: cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3250: ble.b foperr_out_l_save_dn # yes
3251: mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
3252: bsr.l _dmem_write_long # write the default result
3253:
3254: tst.l %d1 # did dstore fail?
3255: bne.l facc_out_l # yes
3256:
3257: bra.w foperr_exit
3258: foperr_out_l_save_dn:
3259: andi.w &0x0007,%d1
3260: bsr.l store_dreg_l # store result to regfile
3261: bra.w foperr_exit
3262:
3263: #########################################################################
3264: # XDEF **************************************************************** #
3265: # _fpsp_snan(): 060FPSP entry point for FP SNAN exception. #
3266: # #
3267: # This handler should be the first code executed upon taking the #
3268: # FP Signalling NAN exception in an operating system. #
3269: # #
3270: # XREF **************************************************************** #
3271: # _imem_read_long() - read instruction longword #
3272: # fix_skewed_ops() - adjust src operand in fsave frame #
3273: # _real_snan() - "callout" to operating system SNAN handler #
3274: # _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
3275: # store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
3276: # facc_out_{b,w,l,d,x}() - store to mem took acc error (opcl 3) #
3277: # _calc_ea_fout() - fix An if <ea> is -() or ()+; also get <ea> #
3278: # #
3279: # INPUT *************************************************************** #
3280: # - The system stack contains the FP SNAN exception frame #
3281: # - The fsave frame contains the source operand #
3282: # #
3283: # OUTPUT ************************************************************** #
3284: # No access error: #
3285: # - The system stack is unchanged #
3286: # - The fsave frame contains the adjusted src op for opclass 0,2 #
3287: # #
3288: # ALGORITHM *********************************************************** #
3289: # In a system where the FP SNAN exception is enabled, the goal #
3290: # is to get to the handler specified at _real_snan(). But, on the 060, #
3291: # for opclass zero and two instructions taking this exception, the #
3292: # input operand in the fsave frame may be incorrect for some cases #
3293: # and needs to be corrected. This handler calls fix_skewed_ops() to #
3294: # do just this and then exits through _real_snan(). #
3295: # For opclass 3 instructions, the 060 doesn't store the default #
3296: # SNAN result out to memory or data register file as it should. #
3297: # This code must emulate the move out before finally exiting through #
3298: # _real_snan(). The move out, if to memory, is performed using #
3299: # _mem_write() "callout" routines that may return a failing result. #
3300: # In this special case, the handler must exit through facc_out() #
3301: # which creates an access error stack frame from the current SNAN #
3302: # stack frame. #
3303: # For the case of an extended precision opclass 3 instruction, #
3304: # if the effective addressing mode was -() or ()+, then the address #
3305: # register must get updated by calling _calc_ea_fout(). If the <ea> #
3306: # was -(a7) from supervisor mode, then the exception frame currently #
3307: # on the system stack must be carefully moved "down" to make room #
3308: # for the operand being moved. #
3309: # #
3310: #########################################################################
3311:
3312: global _fpsp_snan
3313: _fpsp_snan:
3314:
3315: link.w %a6,&-LOCAL_SIZE # init stack frame
3316:
3317: fsave FP_SRC(%a6) # grab the "busy" frame
3318:
3319: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3320: fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
3321: fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3322:
3323: # the FPIAR holds the "current PC" of the faulting instruction
3324: mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
3325:
3326: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
3327: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3328: bsr.l _imem_read_long # fetch the instruction words
3329: mov.l %d0,EXC_OPWORD(%a6)
3330:
3331: ##############################################################################
3332:
3333: btst &13,%d0 # is instr an fmove out?
3334: bne.w fsnan_out # fmove out
3335:
3336:
3337: # here, we simply see if the operand in the fsave frame needs to be "unskewed".
3338: # this would be the case for opclass two operations with a source infinity or
3339: # denorm operand in the sgl or dbl format. NANs also become skewed and must be
3340: # fixed here.
3341: lea FP_SRC(%a6),%a0 # pass: ptr to src op
3342: bsr.l fix_skewed_ops # fix src op
3343:
3344: fsnan_exit:
3345: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3346: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
3347: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3348:
3349: frestore FP_SRC(%a6)
3350:
3351: unlk %a6
3352: bra.l _real_snan
3353:
3354: ########################################################################
3355:
3356: #
3357: # the hardware does not save the default result to memory on enabled
3358: # snan exceptions. we do this here before passing control to
3359: # the user snan handler.
3360: #
3361: # byte, word, long, and packed destination format operations can pass
3362: # through here. since packed format operations already were handled by
3363: # fpsp_unsupp(), then we need to do nothing else for them here.
3364: # for byte, word, and long, we simply need to test the sign of the src
3365: # operand and save the appropriate minimum or maximum integer value
3366: # to the effective address as pointed to by the stacked effective address.
3367: #
3368: fsnan_out:
3369:
3370: bfextu %d0{&19:&3},%d0 # extract dst format field
3371: mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
3372: mov.w (tbl_snan.b,%pc,%d0.w*2),%a0
3373: jmp (tbl_snan.b,%pc,%a0)
3374:
3375: tbl_snan:
3376: short fsnan_out_l - tbl_snan # long word integer
3377: short fsnan_out_s - tbl_snan # sgl prec shouldn't happen
3378: short fsnan_out_x - tbl_snan # ext prec shouldn't happen
3379: short tbl_snan - tbl_snan # packed needs no help
3380: short fsnan_out_w - tbl_snan # word integer
3381: short fsnan_out_d - tbl_snan # dbl prec shouldn't happen
3382: short fsnan_out_b - tbl_snan # byte integer
3383: short tbl_snan - tbl_snan # packed needs no help
3384:
3385: fsnan_out_b:
3386: mov.b FP_SRC_HI(%a6),%d0 # load upper byte of SNAN
3387: bset &6,%d0 # set SNAN bit
3388: cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3389: ble.b fsnan_out_b_dn # yes
3390: mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
3391: bsr.l _dmem_write_byte # write the default result
3392:
3393: tst.l %d1 # did dstore fail?
3394: bne.l facc_out_b # yes
3395:
3396: bra.w fsnan_exit
3397: fsnan_out_b_dn:
3398: andi.w &0x0007,%d1
3399: bsr.l store_dreg_b # store result to regfile
3400: bra.w fsnan_exit
3401:
3402: fsnan_out_w:
3403: mov.w FP_SRC_HI(%a6),%d0 # load upper word of SNAN
3404: bset &14,%d0 # set SNAN bit
3405: cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3406: ble.b fsnan_out_w_dn # yes
3407: mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
3408: bsr.l _dmem_write_word # write the default result
3409:
3410: tst.l %d1 # did dstore fail?
3411: bne.l facc_out_w # yes
3412:
3413: bra.w fsnan_exit
3414: fsnan_out_w_dn:
3415: andi.w &0x0007,%d1
3416: bsr.l store_dreg_w # store result to regfile
3417: bra.w fsnan_exit
3418:
3419: fsnan_out_l:
3420: mov.l FP_SRC_HI(%a6),%d0 # load upper longword of SNAN
3421: bset &30,%d0 # set SNAN bit
3422: cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3423: ble.b fsnan_out_l_dn # yes
3424: mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
3425: bsr.l _dmem_write_long # write the default result
3426:
3427: tst.l %d1 # did dstore fail?
3428: bne.l facc_out_l # yes
3429:
3430: bra.w fsnan_exit
3431: fsnan_out_l_dn:
3432: andi.w &0x0007,%d1
3433: bsr.l store_dreg_l # store result to regfile
3434: bra.w fsnan_exit
3435:
3436: fsnan_out_s:
3437: cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3438: ble.b fsnan_out_d_dn # yes
3439: mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
3440: andi.l &0x80000000,%d0 # keep sign
3441: ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
3442: mov.l FP_SRC_HI(%a6),%d1 # load mantissa
3443: lsr.l &0x8,%d1 # shift mantissa for sgl
3444: or.l %d1,%d0 # create sgl SNAN
3445: mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
3446: bsr.l _dmem_write_long # write the default result
3447:
3448: tst.l %d1 # did dstore fail?
3449: bne.l facc_out_l # yes
3450:
3451: bra.w fsnan_exit
3452: fsnan_out_d_dn:
3453: mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
3454: andi.l &0x80000000,%d0 # keep sign
3455: ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
3456: mov.l %d1,-(%sp)
3457: mov.l FP_SRC_HI(%a6),%d1 # load mantissa
3458: lsr.l &0x8,%d1 # shift mantissa for sgl
3459: or.l %d1,%d0 # create sgl SNAN
3460: mov.l (%sp)+,%d1
3461: andi.w &0x0007,%d1
3462: bsr.l store_dreg_l # store result to regfile
3463: bra.w fsnan_exit
3464:
3465: fsnan_out_d:
3466: mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
3467: andi.l &0x80000000,%d0 # keep sign
3468: ori.l &0x7ff80000,%d0 # insert new exponent,SNAN bit
3469: mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
3470: mov.l %d0,FP_SCR0_EX(%a6) # store to temp space
3471: mov.l &11,%d0 # load shift amt
3472: lsr.l %d0,%d1
3473: or.l %d1,FP_SCR0_EX(%a6) # create dbl hi
3474: mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
3475: andi.l &0x000007ff,%d1
3476: ror.l %d0,%d1
3477: mov.l %d1,FP_SCR0_HI(%a6) # store to temp space
3478: mov.l FP_SRC_LO(%a6),%d1 # load lo mantissa
3479: lsr.l %d0,%d1
3480: or.l %d1,FP_SCR0_HI(%a6) # create dbl lo
3481: lea FP_SCR0(%a6),%a0 # pass: ptr to operand
3482: mov.l EXC_EA(%a6),%a1 # pass: dst addr
3483: movq.l &0x8,%d0 # pass: size of 8 bytes
3484: bsr.l _dmem_write # write the default result
3485:
3486: tst.l %d1 # did dstore fail?
3487: bne.l facc_out_d # yes
3488:
3489: bra.w fsnan_exit
3490:
3491: # for extended precision, if the addressing mode is pre-decrement or
3492: # post-increment, then the address register did not get updated.
3493: # in addition, for pre-decrement, the stacked <ea> is incorrect.
3494: fsnan_out_x:
3495: clr.b SPCOND_FLG(%a6) # clear special case flag
3496:
3497: mov.w FP_SRC_EX(%a6),FP_SCR0_EX(%a6)
3498: clr.w 2+FP_SCR0(%a6)
3499: mov.l FP_SRC_HI(%a6),%d0
3500: bset &30,%d0
3501: mov.l %d0,FP_SCR0_HI(%a6)
3502: mov.l FP_SRC_LO(%a6),FP_SCR0_LO(%a6)
3503:
3504: btst &0x5,EXC_SR(%a6) # supervisor mode exception?
3505: bne.b fsnan_out_x_s # yes
3506:
3507: mov.l %usp,%a0 # fetch user stack pointer
3508: mov.l %a0,EXC_A7(%a6) # save on stack for calc_ea()
3509: mov.l (%a6),EXC_A6(%a6)
3510:
3511: bsr.l _calc_ea_fout # find the correct ea,update An
3512: mov.l %a0,%a1
3513: mov.l %a0,EXC_EA(%a6) # stack correct <ea>
3514:
3515: mov.l EXC_A7(%a6),%a0
3516: mov.l %a0,%usp # restore user stack pointer
3517: mov.l EXC_A6(%a6),(%a6)
3518:
3519: fsnan_out_x_save:
3520: lea FP_SCR0(%a6),%a0 # pass: ptr to operand
3521: movq.l &0xc,%d0 # pass: size of extended
3522: bsr.l _dmem_write # write the default result
3523:
3524: tst.l %d1 # did dstore fail?
3525: bne.l facc_out_x # yes
3526:
3527: bra.w fsnan_exit
3528:
3529: fsnan_out_x_s:
3530: mov.l (%a6),EXC_A6(%a6)
3531:
3532: bsr.l _calc_ea_fout # find the correct ea,update An
3533: mov.l %a0,%a1
3534: mov.l %a0,EXC_EA(%a6) # stack correct <ea>
3535:
3536: mov.l EXC_A6(%a6),(%a6)
3537:
3538: cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
3539: bne.b fsnan_out_x_save # no
3540:
3541: # the operation was "fmove.x SNAN,-(a7)" from supervisor mode.
3542: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3543: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
3544: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3545:
3546: frestore FP_SRC(%a6)
3547:
3548: mov.l EXC_A6(%a6),%a6 # restore frame pointer
3549:
3550: mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
3551: mov.l LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
3552: mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
3553:
3554: mov.l LOCAL_SIZE+FP_SCR0_EX(%sp),LOCAL_SIZE+EXC_SR(%sp)
3555: mov.l LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
3556: mov.l LOCAL_SIZE+FP_SCR0_LO(%sp),LOCAL_SIZE+EXC_EA(%sp)
3557:
3558: add.l &LOCAL_SIZE-0x8,%sp
3559:
3560: bra.l _real_snan
3561:
3562: #########################################################################
3563: # XDEF **************************************************************** #
3564: # _fpsp_inex(): 060FPSP entry point for FP Inexact exception. #
3565: # #
3566: # This handler should be the first code executed upon taking the #
3567: # FP Inexact exception in an operating system. #
3568: # #
3569: # XREF **************************************************************** #
3570: # _imem_read_long() - read instruction longword #
3571: # fix_skewed_ops() - adjust src operand in fsave frame #
3572: # set_tag_x() - determine optype of src/dst operands #
3573: # store_fpreg() - store opclass 0 or 2 result to FP regfile #
3574: # unnorm_fix() - change UNNORM operands to NORM or ZERO #
3575: # load_fpn2() - load dst operand from FP regfile #
3576: # smovcr() - emulate an "fmovcr" instruction #
3577: # fout() - emulate an opclass 3 instruction #
3578: # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
3579: # _real_inex() - "callout" to operating system inexact handler #
3580: # #
3581: # INPUT *************************************************************** #
3582: # - The system stack contains the FP Inexact exception frame #
3583: # - The fsave frame contains the source operand #
3584: # #
3585: # OUTPUT ************************************************************** #
3586: # - The system stack is unchanged #
3587: # - The fsave frame contains the adjusted src op for opclass 0,2 #
3588: # #
3589: # ALGORITHM *********************************************************** #
3590: # In a system where the FP Inexact exception is enabled, the goal #
3591: # is to get to the handler specified at _real_inex(). But, on the 060, #
3592: # for opclass zero and two instruction taking this exception, the #
3593: # hardware doesn't store the correct result to the destination FP #
3594: # register as did the '040 and '881/2. This handler must emulate the #
3595: # instruction in order to get this value and then store it to the #
3596: # correct register before calling _real_inex(). #
3597: # For opclass 3 instructions, the 060 doesn't store the default #
3598: # inexact result out to memory or data register file as it should. #
3599: # This code must emulate the move out by calling fout() before finally #
3600: # exiting through _real_inex(). #
3601: # #
3602: #########################################################################
3603:
3604: global _fpsp_inex
3605: _fpsp_inex:
3606:
3607: link.w %a6,&-LOCAL_SIZE # init stack frame
3608:
3609: fsave FP_SRC(%a6) # grab the "busy" frame
3610:
3611: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3612: fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
3613: fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3614:
3615: # the FPIAR holds the "current PC" of the faulting instruction
3616: mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
3617:
3618: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
3619: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3620: bsr.l _imem_read_long # fetch the instruction words
3621: mov.l %d0,EXC_OPWORD(%a6)
3622:
3623: ##############################################################################
3624:
3625: btst &13,%d0 # is instr an fmove out?
3626: bne.w finex_out # fmove out
3627:
3628:
3629: # the hardware, for "fabs" and "fneg" w/ a long source format, puts the
3630: # longword integer directly into the upper longword of the mantissa along
3631: # w/ an exponent value of 0x401e. we convert this to extended precision here.
3632: bfextu %d0{&19:&3},%d0 # fetch instr size
3633: bne.b finex_cont # instr size is not long
3634: cmpi.w FP_SRC_EX(%a6),&0x401e # is exponent 0x401e?
3635: bne.b finex_cont # no
3636: fmov.l &0x0,%fpcr
3637: fmov.l FP_SRC_HI(%a6),%fp0 # load integer src
3638: fmov.x %fp0,FP_SRC(%a6) # store integer as extended precision
3639: mov.w &0xe001,0x2+FP_SRC(%a6)
3640:
3641: finex_cont:
3642: lea FP_SRC(%a6),%a0 # pass: ptr to src op
3643: bsr.l fix_skewed_ops # fix src op
3644:
3645: # Here, we zero the ccode and exception byte field since we're going to
3646: # emulate the whole instruction. Notice, though, that we don't kill the
3647: # INEX1 bit. This is because a packed op has long since been converted
3648: # to extended before arriving here. Therefore, we need to retain the
3649: # INEX1 bit from when the operand was first converted.
3650: andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
3651:
3652: fmov.l &0x0,%fpcr # zero current control regs
3653: fmov.l &0x0,%fpsr
3654:
3655: bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
3656: cmpi.b %d1,&0x17 # is op an fmovecr?
3657: beq.w finex_fmovcr # yes
3658:
3659: lea FP_SRC(%a6),%a0 # pass: ptr to src op
3660: bsr.l set_tag_x # tag the operand type
3661: mov.b %d0,STAG(%a6) # maybe NORM,DENORM
3662:
3663: # bits four and five of the fp extension word separate the monadic and dyadic
3664: # operations that can pass through fpsp_inex(). remember that fcmp and ftst
3665: # will never take this exception, but fsincos will.
3666: btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
3667: beq.b finex_extract # monadic
3668:
3669: btst &0x4,1+EXC_CMDREG(%a6) # is operation an fsincos?
3670: bne.b finex_extract # yes
3671:
3672: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
3673: bsr.l load_fpn2 # load dst into FP_DST
3674:
3675: lea FP_DST(%a6),%a0 # pass: ptr to dst op
3676: bsr.l set_tag_x # tag the operand type
3677: cmpi.b %d0,&UNNORM # is operand an UNNORM?
3678: bne.b finex_op2_done # no
3679: bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
3680: finex_op2_done:
3681: mov.b %d0,DTAG(%a6) # save dst optype tag
3682:
3683: finex_extract:
3684: clr.l %d0
3685: mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
3686:
3687: mov.b 1+EXC_CMDREG(%a6),%d1
3688: andi.w &0x007f,%d1 # extract extension
3689:
3690: lea FP_SRC(%a6),%a0
3691: lea FP_DST(%a6),%a1
3692:
3693: mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
3694: jsr (tbl_unsupp.l,%pc,%d1.l*1)
3695:
3696: # the operation has been emulated. the result is in fp0.
3697: finex_save:
3698: bfextu EXC_CMDREG(%a6){&6:&3},%d0
3699: bsr.l store_fpreg
3700:
3701: finex_exit:
3702: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3703: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
3704: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3705:
3706: frestore FP_SRC(%a6)
3707:
3708: unlk %a6
3709: bra.l _real_inex
3710:
3711: finex_fmovcr:
3712: clr.l %d0
3713: mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
3714: mov.b 1+EXC_CMDREG(%a6),%d1
3715: andi.l &0x0000007f,%d1 # pass rom offset
3716: bsr.l smovcr
3717: bra.b finex_save
3718:
3719: ########################################################################
3720:
3721: #
3722: # the hardware does not save the default result to memory on enabled
3723: # inexact exceptions. we do this here before passing control to
3724: # the user inexact handler.
3725: #
3726: # byte, word, and long destination format operations can pass
3727: # through here. so can double and single precision.
3728: # although packed opclass three operations can take inexact
3729: # exceptions, they won't pass through here since they are caught
3730: # first by the unsupported data format exception handler. that handler
3731: # sends them directly to _real_inex() if necessary.
3732: #
3733: finex_out:
3734:
3735: mov.b &NORM,STAG(%a6) # src is a NORM
3736:
3737: clr.l %d0
3738: mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
3739:
3740: andi.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
3741:
3742: lea FP_SRC(%a6),%a0 # pass ptr to src operand
3743:
3744: bsr.l fout # store the default result
3745:
3746: bra.b finex_exit
3747:
3748: #########################################################################
3749: # XDEF **************************************************************** #
3750: # _fpsp_dz(): 060FPSP entry point for FP DZ exception. #
3751: # #
3752: # This handler should be the first code executed upon taking #
3753: # the FP DZ exception in an operating system. #
3754: # #
3755: # XREF **************************************************************** #
3756: # _imem_read_long() - read instruction longword from memory #
3757: # fix_skewed_ops() - adjust fsave operand #
3758: # _real_dz() - "callout" exit point from FP DZ handler #
3759: # #
3760: # INPUT *************************************************************** #
3761: # - The system stack contains the FP DZ exception stack. #
3762: # - The fsave frame contains the source operand. #
3763: # #
3764: # OUTPUT ************************************************************** #
3765: # - The system stack contains the FP DZ exception stack. #
3766: # - The fsave frame contains the adjusted source operand. #
3767: # #
3768: # ALGORITHM *********************************************************** #
3769: # In a system where the DZ exception is enabled, the goal is to #
3770: # get to the handler specified at _real_dz(). But, on the 060, when the #
3771: # exception is taken, the input operand in the fsave state frame may #
3772: # be incorrect for some cases and need to be adjusted. So, this package #
3773: # adjusts the operand using fix_skewed_ops() and then branches to #
3774: # _real_dz(). #
3775: # #
3776: #########################################################################
3777:
3778: global _fpsp_dz
3779: _fpsp_dz:
3780:
3781: link.w %a6,&-LOCAL_SIZE # init stack frame
3782:
3783: fsave FP_SRC(%a6) # grab the "busy" frame
3784:
3785: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3786: fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
3787: fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3788:
3789: # the FPIAR holds the "current PC" of the faulting instruction
3790: mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
3791:
3792: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
3793: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3794: bsr.l _imem_read_long # fetch the instruction words
3795: mov.l %d0,EXC_OPWORD(%a6)
3796:
3797: ##############################################################################
3798:
3799:
3800: # here, we simply see if the operand in the fsave frame needs to be "unskewed".
3801: # this would be the case for opclass two operations with a source zero
3802: # in the sgl or dbl format.
3803: lea FP_SRC(%a6),%a0 # pass: ptr to src op
3804: bsr.l fix_skewed_ops # fix src op
3805:
3806: fdz_exit:
3807: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3808: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
3809: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3810:
3811: frestore FP_SRC(%a6)
3812:
3813: unlk %a6
3814: bra.l _real_dz
3815:
3816: #########################################################################
3817: # XDEF **************************************************************** #
3818: # _fpsp_fline(): 060FPSP entry point for "Line F emulator" exc. #
3819: # #
3820: # This handler should be the first code executed upon taking the #
3821: # "Line F Emulator" exception in an operating system. #
3822: # #
3823: # XREF **************************************************************** #
3824: # _fpsp_unimp() - handle "FP Unimplemented" exceptions #
3825: # _real_fpu_disabled() - handle "FPU disabled" exceptions #
3826: # _real_fline() - handle "FLINE" exceptions #
3827: # _imem_read_long() - read instruction longword #
3828: # #
3829: # INPUT *************************************************************** #
3830: # - The system stack contains a "Line F Emulator" exception #
3831: # stack frame. #
3832: # #
3833: # OUTPUT ************************************************************** #
3834: # - The system stack is unchanged #
3835: # #
3836: # ALGORITHM *********************************************************** #
3837: # When a "Line F Emulator" exception occurs, there are 3 possible #
3838: # exception types, denoted by the exception stack frame format number: #
3839: # (1) FPU unimplemented instruction (6 word stack frame) #
3840: # (2) FPU disabled (8 word stack frame) #
3841: # (3) Line F (4 word stack frame) #
3842: # #
3843: # This module determines which and forks the flow off to the #
3844: # appropriate "callout" (for "disabled" and "Line F") or to the #
3845: # correct emulation code (for "FPU unimplemented"). #
3846: # This code also must check for "fmovecr" instructions w/ a #
3847: # non-zero <ea> field. These may get flagged as "Line F" but should #
3848: # really be flagged as "FPU Unimplemented". (This is a "feature" on #
3849: # the '060. #
3850: # #
3851: #########################################################################
3852:
3853: global _fpsp_fline
3854: _fpsp_fline:
3855:
3856: # check to see if this exception is a "FP Unimplemented Instruction"
3857: # exception. if so, branch directly to that handler's entry point.
3858: cmpi.w 0x6(%sp),&0x202c
3859: beq.l _fpsp_unimp
3860:
3861: # check to see if the FPU is disabled. if so, jump to the OS entry
3862: # point for that condition.
3863: cmpi.w 0x6(%sp),&0x402c
3864: beq.l _real_fpu_disabled
3865:
3866: # the exception was an "F-Line Illegal" exception. we check to see
3867: # if the F-Line instruction is an "fmovecr" w/ a non-zero <ea>. if
3868: # so, convert the F-Line exception stack frame to an FP Unimplemented
3869: # Instruction exception stack frame else branch to the OS entry
3870: # point for the F-Line exception handler.
3871: link.w %a6,&-LOCAL_SIZE # init stack frame
3872:
3873: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3874:
3875: mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
3876: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
3877: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3878: bsr.l _imem_read_long # fetch instruction words
3879:
3880: bfextu %d0{&0:&10},%d1 # is it an fmovecr?
3881: cmpi.w %d1,&0x03c8
3882: bne.b fline_fline # no
3883:
3884: bfextu %d0{&16:&6},%d1 # is it an fmovecr?
3885: cmpi.b %d1,&0x17
3886: bne.b fline_fline # no
3887:
3888: # it's an fmovecr w/ a non-zero <ea> that has entered through
3889: # the F-Line Illegal exception.
3890: # so, we need to convert the F-Line exception stack frame into an
3891: # FP Unimplemented Instruction stack frame and jump to that entry
3892: # point.
3893: #
3894: # but, if the FPU is disabled, then we need to jump to the FPU disabled
3895: # entry point.
3896: movc %pcr,%d0
3897: btst &0x1,%d0
3898: beq.b fline_fmovcr
3899:
3900: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3901:
3902: unlk %a6
3903:
3904: sub.l &0x8,%sp # make room for "Next PC", <ea>
3905: mov.w 0x8(%sp),(%sp)
3906: mov.l 0xa(%sp),0x2(%sp) # move "Current PC"
3907: mov.w &0x402c,0x6(%sp)
3908: mov.l 0x2(%sp),0xc(%sp)
3909: addq.l &0x4,0x2(%sp) # set "Next PC"
3910:
3911: bra.l _real_fpu_disabled
3912:
3913: fline_fmovcr:
3914: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3915:
3916: unlk %a6
3917:
3918: fmov.l 0x2(%sp),%fpiar # set current PC
3919: addq.l &0x4,0x2(%sp) # set Next PC
3920:
3921: mov.l (%sp),-(%sp)
3922: mov.l 0x8(%sp),0x4(%sp)
3923: mov.b &0x20,0x6(%sp)
3924:
3925: bra.l _fpsp_unimp
3926:
3927: fline_fline:
3928: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3929:
3930: unlk %a6
3931:
3932: bra.l _real_fline
3933:
3934: #########################################################################
3935: # XDEF **************************************************************** #
3936: # _fpsp_unimp(): 060FPSP entry point for FP "Unimplemented #
3937: # Instruction" exception. #
3938: # #
3939: # This handler should be the first code executed upon taking the #
3940: # FP Unimplemented Instruction exception in an operating system. #
3941: # #
3942: # XREF **************************************************************** #
3943: # _imem_read_{word,long}() - read instruction word/longword #
3944: # load_fop() - load src/dst ops from memory and/or FP regfile #
3945: # store_fpreg() - store opclass 0 or 2 result to FP regfile #
3946: # tbl_trans - addr of table of emulation routines for trnscndls #
3947: # _real_access() - "callout" for access error exception #
3948: # _fpsp_done() - "callout" for exit; work all done #
3949: # _real_trace() - "callout" for Trace enabled exception #
3950: # smovcr() - emulate "fmovecr" instruction #
3951: # funimp_skew() - adjust fsave src ops to "incorrect" value #
3952: # _ftrapcc() - emulate an "ftrapcc" instruction #
3953: # _fdbcc() - emulate an "fdbcc" instruction #
3954: # _fscc() - emulate an "fscc" instruction #
3955: # _real_trap() - "callout" for Trap exception #
3956: # _real_bsun() - "callout" for enabled Bsun exception #
3957: # #
3958: # INPUT *************************************************************** #
3959: # - The system stack contains the "Unimplemented Instr" stk frame #
3960: # #
3961: # OUTPUT ************************************************************** #
3962: # If access error: #
3963: # - The system stack is changed to an access error stack frame #
3964: # If Trace exception enabled: #
3965: # - The system stack is changed to a Trace exception stack frame #
3966: # Else: (normal case) #
3967: # - Correct result has been stored as appropriate #
3968: # #
3969: # ALGORITHM *********************************************************** #
3970: # There are two main cases of instructions that may enter here to #
3971: # be emulated: (1) the FPgen instructions, most of which were also #
3972: # unimplemented on the 040, and (2) "ftrapcc", "fscc", and "fdbcc". #
3973: # For the first set, this handler calls the routine load_fop() #
3974: # to load the source and destination (for dyadic) operands to be used #
3975: # for instruction emulation. The correct emulation routine is then #
3976: # chosen by decoding the instruction type and indexing into an #
3977: # emulation subroutine index table. After emulation returns, this #
3978: # handler checks to see if an exception should occur as a result of the #
3979: # FP instruction emulation. If so, then an FP exception of the correct #
3980: # type is inserted into the FPU state frame using the "frestore" #
3981: # instruction before exiting through _fpsp_done(). In either the #
3982: # exceptional or non-exceptional cases, we must check to see if the #
3983: # Trace exception is enabled. If so, then we must create a Trace #
3984: # exception frame from the current exception frame and exit through #
3985: # _real_trace(). #
3986: # For "fdbcc", "ftrapcc", and "fscc", the emulation subroutines #
3987: # _fdbcc(), _ftrapcc(), and _fscc() respectively are used. All three #
3988: # may flag that a BSUN exception should be taken. If so, then the #
3989: # current exception stack frame is converted into a BSUN exception #
3990: # stack frame and an exit is made through _real_bsun(). If the #
3991: # instruction was "ftrapcc" and a Trap exception should result, a Trap #
3992: # exception stack frame is created from the current frame and an exit #
3993: # is made through _real_trap(). If a Trace exception is pending, then #
3994: # a Trace exception frame is created from the current frame and a jump #
3995: # is made to _real_trace(). Finally, if none of these conditions exist, #
3996: # then the handler exits though the callout _fpsp_done(). #
3997: # #
3998: # In any of the above scenarios, if a _mem_read() or _mem_write() #
3999: # "callout" returns a failing value, then an access error stack frame #
4000: # is created from the current stack frame and an exit is made through #
4001: # _real_access(). #
4002: # #
4003: #########################################################################
4004:
4005: #
4006: # FP UNIMPLEMENTED INSTRUCTION STACK FRAME:
4007: #
4008: # *****************
4009: # * * => <ea> of fp unimp instr.
4010: # - EA -
4011: # * *
4012: # *****************
4013: # * 0x2 * 0x02c * => frame format and vector offset(vector #11)
4014: # *****************
4015: # * *
4016: # - Next PC - => PC of instr to execute after exc handling
4017: # * *
4018: # *****************
4019: # * SR * => SR at the time the exception was taken
4020: # *****************
4021: #
4022: # Note: the !NULL bit does not get set in the fsave frame when the
4023: # machine encounters an fp unimp exception. Therefore, it must be set
4024: # before leaving this handler.
4025: #
4026: global _fpsp_unimp
4027: _fpsp_unimp:
4028:
4029: link.w %a6,&-LOCAL_SIZE # init stack frame
4030:
4031: movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
4032: fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
4033: fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1
4034:
4035: btst &0x5,EXC_SR(%a6) # user mode exception?
4036: bne.b funimp_s # no; supervisor mode
4037:
4038: # save the value of the user stack pointer onto the stack frame
4039: funimp_u:
4040: mov.l %usp,%a0 # fetch user stack pointer
4041: mov.l %a0,EXC_A7(%a6) # store in stack frame
4042: bra.b funimp_cont
4043:
4044: # store the value of the supervisor stack pointer BEFORE the exc occurred.
4045: # old_sp is address just above stacked effective address.
4046: funimp_s:
4047: lea 4+EXC_EA(%a6),%a0 # load old a7'
4048: mov.l %a0,EXC_A7(%a6) # store a7'
4049: mov.l %a0,OLD_A7(%a6) # make a copy
4050:
4051: funimp_cont:
4052:
4053: # the FPIAR holds the "current PC" of the faulting instruction.
4054: mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
4055:
4056: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
4057: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
4058: bsr.l _imem_read_long # fetch the instruction words
4059: mov.l %d0,EXC_OPWORD(%a6)
4060:
4061: ############################################################################
4062:
4063: fmov.l &0x0,%fpcr # clear FPCR
4064: fmov.l &0x0,%fpsr # clear FPSR
4065:
4066: clr.b SPCOND_FLG(%a6) # clear "special case" flag
4067:
4068: # Divide the fp instructions into 8 types based on the TYPE field in
4069: # bits 6-8 of the opword(classes 6,7 are undefined).
4070: # (for the '060, only two types can take this exception)
4071: # bftst %d0{&7:&3} # test TYPE
4072: btst &22,%d0 # type 0 or 1 ?
4073: bne.w funimp_misc # type 1
4074:
4075: #########################################
4076: # TYPE == 0: General instructions #
4077: #########################################
4078: funimp_gen:
4079:
4080: clr.b STORE_FLG(%a6) # clear "store result" flag
4081:
4082: # clear the ccode byte and exception status byte
4083: andi.l &0x00ff00ff,USER_FPSR(%a6)
4084:
4085: bfextu %d0{&16:&6},%d1 # extract upper 6 of cmdreg
4086: cmpi.b %d1,&0x17 # is op an fmovecr?
4087: beq.w funimp_fmovcr # yes
4088:
4089: funimp_gen_op:
4090: bsr.l _load_fop # load
4091:
4092: clr.l %d0
4093: mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode
4094:
4095: mov.b 1+EXC_CMDREG(%a6),%d1
4096: andi.w &0x003f,%d1 # extract extension bits
4097: lsl.w &0x3,%d1 # shift right 3 bits
4098: or.b STAG(%a6),%d1 # insert src optag bits
4099:
4100: lea FP_DST(%a6),%a1 # pass dst ptr in a1
4101: lea FP_SRC(%a6),%a0 # pass src ptr in a0
4102:
4103: mov.w (tbl_trans.w,%pc,%d1.w*2),%d1
4104: jsr (tbl_trans.w,%pc,%d1.w*1) # emulate
4105:
4106: funimp_fsave:
4107: mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
4108: bne.w funimp_ena # some are enabled
4109:
4110: funimp_store:
4111: bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch Dn
4112: bsr.l store_fpreg # store result to fp regfile
4113:
4114: funimp_gen_exit:
4115: fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4116: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
4117: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4118:
4119: funimp_gen_exit_cmp:
4120: cmpi.b SPCOND_FLG(%a6),&mia7_flg # was the ea mode (sp)+ ?
4121: beq.b funimp_gen_exit_a7 # yes
4122:
4123: cmpi.b SPCOND_FLG(%a6),&mda7_flg # was the ea mode -(sp) ?
4124: beq.b funimp_gen_exit_a7 # yes
4125:
4126: funimp_gen_exit_cont:
4127: unlk %a6
4128:
4129: funimp_gen_exit_cont2:
4130: btst &0x7,(%sp) # is trace on?
4131: beq.l _fpsp_done # no
4132:
4133: # this catches a problem with the case where an exception will be re-inserted
4134: # into the machine. the frestore has already been executed...so, the fmov.l
4135: # alone of the control register would trigger an unwanted exception.
4136: # until I feel like fixing this, we'll sidestep the exception.
4137: fsave -(%sp)
4138: fmov.l %fpiar,0x14(%sp) # "Current PC" is in FPIAR
4139: frestore (%sp)+
4140: mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x24
4141: bra.l _real_trace
4142:
4143: funimp_gen_exit_a7:
4144: btst &0x5,EXC_SR(%a6) # supervisor or user mode?
4145: bne.b funimp_gen_exit_a7_s # supervisor
4146:
4147: mov.l %a0,-(%sp)
4148: mov.l EXC_A7(%a6),%a0
4149: mov.l %a0,%usp
4150: mov.l (%sp)+,%a0
4151: bra.b funimp_gen_exit_cont
4152:
4153: # if the instruction was executed from supervisor mode and the addressing
4154: # mode was (a7)+, then the stack frame for the rte must be shifted "up"
4155: # "n" bytes where "n" is the size of the src operand type.
4156: # f<op>.{b,w,l,s,d,x,p}
4157: funimp_gen_exit_a7_s:
4158: mov.l %d0,-(%sp) # save d0
4159: mov.l EXC_A7(%a6),%d0 # load new a7'
4160: sub.l OLD_A7(%a6),%d0 # subtract old a7'
4161: mov.l 0x2+EXC_PC(%a6),(0x2+EXC_PC,%a6,%d0) # shift stack frame
4162: mov.l EXC_SR(%a6),(EXC_SR,%a6,%d0) # shift stack frame
4163: mov.w %d0,EXC_SR(%a6) # store incr number
4164: mov.l (%sp)+,%d0 # restore d0
4165:
4166: unlk %a6
4167:
4168: add.w (%sp),%sp # stack frame shifted
4169: bra.b funimp_gen_exit_cont2
4170:
4171: ######################
4172: # fmovecr.x #ccc,fpn #
4173: ######################
4174: funimp_fmovcr:
4175: clr.l %d0
4176: mov.b FPCR_MODE(%a6),%d0
4177: mov.b 1+EXC_CMDREG(%a6),%d1
4178: andi.l &0x0000007f,%d1 # pass rom offset in d1
4179: bsr.l smovcr
4180: bra.w funimp_fsave
4181:
4182: #########################################################################
4183:
4184: #
4185: # the user has enabled some exceptions. we figure not to see this too
4186: # often so that's why it gets lower priority.
4187: #
4188: funimp_ena:
4189:
4190: # was an exception set that was also enabled?
4191: and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled and set
4192: bfffo %d0{&24:&8},%d0 # find highest priority exception
4193: bne.b funimp_exc # at least one was set
4194:
4195: # no exception that was enabled was set BUT if we got an exact overflow
4196: # and overflow wasn't enabled but inexact was (yech!) then this is
4197: # an inexact exception; otherwise, return to normal non-exception flow.
4198: btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
4199: beq.w funimp_store # no; return to normal flow
4200:
4201: # the overflow w/ exact result happened but was inexact set in the FPCR?
4202: funimp_ovfl:
4203: btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
4204: beq.w funimp_store # no; return to normal flow
4205: bra.b funimp_exc_ovfl # yes
4206:
4207: # some exception happened that was actually enabled.
4208: # we'll insert this new exception into the FPU and then return.
4209: funimp_exc:
4210: subi.l &24,%d0 # fix offset to be 0-8
4211: cmpi.b %d0,&0x6 # is exception INEX?
4212: bne.b funimp_exc_force # no
4213:
4214: # the enabled exception was inexact. so, if it occurs with an overflow
4215: # or underflow that was disabled, then we have to force an overflow or
4216: # underflow frame. the eventual overflow or underflow handler will see that
4217: # it's actually an inexact and act appropriately. this is the only easy
4218: # way to have the EXOP available for the enabled inexact handler when
4219: # a disabled overflow or underflow has also happened.
4220: btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
4221: bne.b funimp_exc_ovfl # yes
4222: btst &unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
4223: bne.b funimp_exc_unfl # yes
4224:
4225: # force the fsave exception status bits to signal an exception of the
4226: # appropriate type. don't forget to "skew" the source operand in case we
4227: # "unskewed" the one the hardware initially gave us.
4228: funimp_exc_force:
4229: mov.l %d0,-(%sp) # save d0
4230: bsr.l funimp_skew # check for special case
4231: mov.l (%sp)+,%d0 # restore d0
4232: mov.w (tbl_funimp_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
4233: bra.b funimp_gen_exit2 # exit with frestore
4234:
4235: tbl_funimp_except:
4236: short 0xe002, 0xe006, 0xe004, 0xe005
4237: short 0xe003, 0xe002, 0xe001, 0xe001
4238:
4239: # insert an overflow frame
4240: funimp_exc_ovfl:
4241: bsr.l funimp_skew # check for special case
4242: mov.w &0xe005,2+FP_SRC(%a6)
4243: bra.b funimp_gen_exit2
4244:
4245: # insert an underflow frame
4246: funimp_exc_unfl:
4247: bsr.l funimp_skew # check for special case
4248: mov.w &0xe003,2+FP_SRC(%a6)
4249:
4250: # this is the general exit point for an enabled exception that will be
4251: # restored into the machine for the instruction just emulated.
4252: funimp_gen_exit2:
4253: fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4254: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
4255: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4256:
4257: frestore FP_SRC(%a6) # insert exceptional status
4258:
4259: bra.w funimp_gen_exit_cmp
4260:
4261: ############################################################################
4262:
4263: #
4264: # TYPE == 1: FDB<cc>, FS<cc>, FTRAP<cc>
4265: #
4266: # These instructions were implemented on the '881/2 and '040 in hardware but
4267: # are emulated in software on the '060.
4268: #
4269: funimp_misc:
4270: bfextu %d0{&10:&3},%d1 # extract mode field
4271: cmpi.b %d1,&0x1 # is it an fdb<cc>?
4272: beq.w funimp_fdbcc # yes
4273: cmpi.b %d1,&0x7 # is it an fs<cc>?
4274: bne.w funimp_fscc # yes
4275: bfextu %d0{&13:&3},%d1
4276: cmpi.b %d1,&0x2 # is it an fs<cc>?
4277: blt.w funimp_fscc # yes
4278:
4279: #########################
4280: # ftrap<cc> #
4281: # ftrap<cc>.w #<data> #
4282: # ftrap<cc>.l #<data> #
4283: #########################
4284: funimp_ftrapcc:
4285:
4286: bsr.l _ftrapcc # FTRAP<cc>()
4287:
4288: cmpi.b SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
4289: beq.w funimp_bsun # yes
4290:
4291: cmpi.b SPCOND_FLG(%a6),&ftrapcc_flg # should a trap occur?
4292: bne.w funimp_done # no
4293:
4294: # FP UNIMP FRAME TRAP FRAME
4295: # ***************** *****************
4296: # ** <EA> ** ** Current PC **
4297: # ***************** *****************
4298: # * 0x2 * 0x02c * * 0x2 * 0x01c *
4299: # ***************** *****************
4300: # ** Next PC ** ** Next PC **
4301: # ***************** *****************
4302: # * SR * * SR *
4303: # ***************** *****************
4304: # (6 words) (6 words)
4305: #
4306: # the ftrapcc instruction should take a trap. so, here we must create a
4307: # trap stack frame from an unimplemented fp instruction stack frame and
4308: # jump to the user supplied entry point for the trap exception
4309: funimp_ftrapcc_tp:
4310: mov.l USER_FPIAR(%a6),EXC_EA(%a6) # Address = Current PC
4311: mov.w &0x201c,EXC_VOFF(%a6) # Vector Offset = 0x01c
4312:
4313: fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4314: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
4315: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4316:
4317: unlk %a6
4318: bra.l _real_trap
4319:
4320: #########################
4321: # fdb<cc> Dn,<label> #
4322: #########################
4323: funimp_fdbcc:
4324:
4325: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
4326: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4327: bsr.l _imem_read_word # read displacement
4328:
4329: tst.l %d1 # did ifetch fail?
4330: bne.w funimp_iacc # yes
4331:
4332: ext.l %d0 # sign extend displacement
4333:
4334: bsr.l _fdbcc # FDB<cc>()
4335:
4336: cmpi.b SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
4337: beq.w funimp_bsun
4338:
4339: bra.w funimp_done # branch to finish
4340:
4341: #################
4342: # fs<cc>.b <ea> #
4343: #################
4344: funimp_fscc:
4345:
4346: bsr.l _fscc # FS<cc>()
4347:
4348: # I am assuming here that an "fs<cc>.b -(An)" or "fs<cc>.b (An)+" instruction
4349: # does not need to update "An" before taking a bsun exception.
4350: cmpi.b SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
4351: beq.w funimp_bsun
4352:
4353: btst &0x5,EXC_SR(%a6) # yes; is it a user mode exception?
4354: bne.b funimp_fscc_s # no
4355:
4356: funimp_fscc_u:
4357: mov.l EXC_A7(%a6),%a0 # yes; set new USP
4358: mov.l %a0,%usp
4359: bra.w funimp_done # branch to finish
4360:
4361: # remember, I'm assuming that post-increment is bogus...(it IS!!!)
4362: # so, the least significant WORD of the stacked effective address got
4363: # overwritten by the "fs<cc> -(An)". We must shift the stack frame "down"
4364: # so that the rte will work correctly without destroying the result.
4365: # even though the operation size is byte, the stack ptr is decr by 2.
4366: #
4367: # remember, also, this instruction may be traced.
4368: funimp_fscc_s:
4369: cmpi.b SPCOND_FLG(%a6),&mda7_flg # was a7 modified?
4370: bne.w funimp_done # no
4371:
4372: fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4373: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
4374: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4375:
4376: unlk %a6
4377:
4378: btst &0x7,(%sp) # is trace enabled?
4379: bne.b funimp_fscc_s_trace # yes
4380:
4381: subq.l &0x2,%sp
4382: mov.l 0x2(%sp),(%sp) # shift SR,hi(PC) "down"
4383: mov.l 0x6(%sp),0x4(%sp) # shift lo(PC),voff "down"
4384: bra.l _fpsp_done
4385:
4386: funimp_fscc_s_trace:
4387: subq.l &0x2,%sp
4388: mov.l 0x2(%sp),(%sp) # shift SR,hi(PC) "down"
4389: mov.w 0x6(%sp),0x4(%sp) # shift lo(PC)
4390: mov.w &0x2024,0x6(%sp) # fmt/voff = $2024
4391: fmov.l %fpiar,0x8(%sp) # insert "current PC"
4392:
4393: bra.l _real_trace
4394:
4395: #
4396: # The ftrap<cc>, fs<cc>, or fdb<cc> is to take an enabled bsun. we must convert
4397: # the fp unimplemented instruction exception stack frame into a bsun stack frame,
4398: # restore a bsun exception into the machine, and branch to the user
4399: # supplied bsun hook.
4400: #
4401: # FP UNIMP FRAME BSUN FRAME
4402: # ***************** *****************
4403: # ** <EA> ** * 0x0 * 0x0c0 *
4404: # ***************** *****************
4405: # * 0x2 * 0x02c * ** Current PC **
4406: # ***************** *****************
4407: # ** Next PC ** * SR *
4408: # ***************** *****************
4409: # * SR * (4 words)
4410: # *****************
4411: # (6 words)
4412: #
4413: funimp_bsun:
4414: mov.w &0x00c0,2+EXC_EA(%a6) # Fmt = 0x0; Vector Offset = 0x0c0
4415: mov.l USER_FPIAR(%a6),EXC_VOFF(%a6) # PC = Current PC
4416: mov.w EXC_SR(%a6),2+EXC_PC(%a6) # shift SR "up"
4417:
4418: mov.w &0xe000,2+FP_SRC(%a6) # bsun exception enabled
4419:
4420: fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4421: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
4422: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4423:
4424: frestore FP_SRC(%a6) # restore bsun exception
4425:
4426: unlk %a6
4427:
4428: addq.l &0x4,%sp # erase sludge
4429:
4430: bra.l _real_bsun # branch to user bsun hook
4431:
4432: #
4433: # all ftrapcc/fscc/fdbcc processing has been completed. unwind the stack frame
4434: # and return.
4435: #
4436: # as usual, we have to check for trace mode being on here. since instructions
4437: # modifying the supervisor stack frame don't pass through here, this is a
4438: # relatively easy task.
4439: #
4440: funimp_done:
4441: fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
4442: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
4443: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4444:
4445: unlk %a6
4446:
4447: btst &0x7,(%sp) # is trace enabled?
4448: bne.b funimp_trace # yes
4449:
4450: bra.l _fpsp_done
4451:
4452: # FP UNIMP FRAME TRACE FRAME
4453: # ***************** *****************
4454: # ** <EA> ** ** Current PC **
4455: # ***************** *****************
4456: # * 0x2 * 0x02c * * 0x2 * 0x024 *
4457: # ***************** *****************
4458: # ** Next PC ** ** Next PC **
4459: # ***************** *****************
4460: # * SR * * SR *
4461: # ***************** *****************
4462: # (6 words) (6 words)
4463: #
4464: # the fscc instruction should take a trace trap. so, here we must create a
4465: # trace stack frame from an unimplemented fp instruction stack frame and
4466: # jump to the user supplied entry point for the trace exception
4467: funimp_trace:
4468: fmov.l %fpiar,0x8(%sp) # current PC is in fpiar
4469: mov.b &0x24,0x7(%sp) # vector offset = 0x024
4470:
4471: bra.l _real_trace
4472:
4473: ################################################################
4474:
4475: global tbl_trans
4476: swbeg &0x1c0
4477: tbl_trans:
4478: short tbl_trans - tbl_trans # $00-0 fmovecr all
4479: short tbl_trans - tbl_trans # $00-1 fmovecr all
4480: short tbl_trans - tbl_trans # $00-2 fmovecr all
4481: short tbl_trans - tbl_trans # $00-3 fmovecr all
4482: short tbl_trans - tbl_trans # $00-4 fmovecr all
4483: short tbl_trans - tbl_trans # $00-5 fmovecr all
4484: short tbl_trans - tbl_trans # $00-6 fmovecr all
4485: short tbl_trans - tbl_trans # $00-7 fmovecr all
4486:
4487: short tbl_trans - tbl_trans # $01-0 fint norm
4488: short tbl_trans - tbl_trans # $01-1 fint zero
4489: short tbl_trans - tbl_trans # $01-2 fint inf
4490: short tbl_trans - tbl_trans # $01-3 fint qnan
4491: short tbl_trans - tbl_trans # $01-5 fint denorm
4492: short tbl_trans - tbl_trans # $01-4 fint snan
4493: short tbl_trans - tbl_trans # $01-6 fint unnorm
4494: short tbl_trans - tbl_trans # $01-7 ERROR
4495:
4496: short ssinh - tbl_trans # $02-0 fsinh norm
4497: short src_zero - tbl_trans # $02-1 fsinh zero
4498: short src_inf - tbl_trans # $02-2 fsinh inf
4499: short src_qnan - tbl_trans # $02-3 fsinh qnan
4500: short ssinhd - tbl_trans # $02-5 fsinh denorm
4501: short src_snan - tbl_trans # $02-4 fsinh snan
4502: short tbl_trans - tbl_trans # $02-6 fsinh unnorm
4503: short tbl_trans - tbl_trans # $02-7 ERROR
4504:
4505: short tbl_trans - tbl_trans # $03-0 fintrz norm
4506: short tbl_trans - tbl_trans # $03-1 fintrz zero
4507: short tbl_trans - tbl_trans # $03-2 fintrz inf
4508: short tbl_trans - tbl_trans # $03-3 fintrz qnan
4509: short tbl_trans - tbl_trans # $03-5 fintrz denorm
4510: short tbl_trans - tbl_trans # $03-4 fintrz snan
4511: short tbl_trans - tbl_trans # $03-6 fintrz unnorm
4512: short tbl_trans - tbl_trans # $03-7 ERROR
4513:
4514: short tbl_trans - tbl_trans # $04-0 fsqrt norm
4515: short tbl_trans - tbl_trans # $04-1 fsqrt zero
4516: short tbl_trans - tbl_trans # $04-2 fsqrt inf
4517: short tbl_trans - tbl_trans # $04-3 fsqrt qnan
4518: short tbl_trans - tbl_trans # $04-5 fsqrt denorm
4519: short tbl_trans - tbl_trans # $04-4 fsqrt snan
4520: short tbl_trans - tbl_trans # $04-6 fsqrt unnorm
4521: short tbl_trans - tbl_trans # $04-7 ERROR
4522:
4523: short tbl_trans - tbl_trans # $05-0 ERROR
4524: short tbl_trans - tbl_trans # $05-1 ERROR
4525: short tbl_trans - tbl_trans # $05-2 ERROR
4526: short tbl_trans - tbl_trans # $05-3 ERROR
4527: short tbl_trans - tbl_trans # $05-4 ERROR
4528: short tbl_trans - tbl_trans # $05-5 ERROR
4529: short tbl_trans - tbl_trans # $05-6 ERROR
4530: short tbl_trans - tbl_trans # $05-7 ERROR
4531:
4532: short slognp1 - tbl_trans # $06-0 flognp1 norm
4533: short src_zero - tbl_trans # $06-1 flognp1 zero
4534: short sopr_inf - tbl_trans # $06-2 flognp1 inf
4535: short src_qnan - tbl_trans # $06-3 flognp1 qnan
4536: short slognp1d - tbl_trans # $06-5 flognp1 denorm
4537: short src_snan - tbl_trans # $06-4 flognp1 snan
4538: short tbl_trans - tbl_trans # $06-6 flognp1 unnorm
4539: short tbl_trans - tbl_trans # $06-7 ERROR
4540:
4541: short tbl_trans - tbl_trans # $07-0 ERROR
4542: short tbl_trans - tbl_trans # $07-1 ERROR
4543: short tbl_trans - tbl_trans # $07-2 ERROR
4544: short tbl_trans - tbl_trans # $07-3 ERROR
4545: short tbl_trans - tbl_trans # $07-4 ERROR
4546: short tbl_trans - tbl_trans # $07-5 ERROR
4547: short tbl_trans - tbl_trans # $07-6 ERROR
4548: short tbl_trans - tbl_trans # $07-7 ERROR
4549:
4550: short setoxm1 - tbl_trans # $08-0 fetoxm1 norm
4551: short src_zero - tbl_trans # $08-1 fetoxm1 zero
4552: short setoxm1i - tbl_trans # $08-2 fetoxm1 inf
4553: short src_qnan - tbl_trans # $08-3 fetoxm1 qnan
4554: short setoxm1d - tbl_trans # $08-5 fetoxm1 denorm
4555: short src_snan - tbl_trans # $08-4 fetoxm1 snan
4556: short tbl_trans - tbl_trans # $08-6 fetoxm1 unnorm
4557: short tbl_trans - tbl_trans # $08-7 ERROR
4558:
4559: short stanh - tbl_trans # $09-0 ftanh norm
4560: short src_zero - tbl_trans # $09-1 ftanh zero
4561: short src_one - tbl_trans # $09-2 ftanh inf
4562: short src_qnan - tbl_trans # $09-3 ftanh qnan
4563: short stanhd - tbl_trans # $09-5 ftanh denorm
4564: short src_snan - tbl_trans # $09-4 ftanh snan
4565: short tbl_trans - tbl_trans # $09-6 ftanh unnorm
4566: short tbl_trans - tbl_trans # $09-7 ERROR
4567:
4568: short satan - tbl_trans # $0a-0 fatan norm
4569: short src_zero - tbl_trans # $0a-1 fatan zero
4570: short spi_2 - tbl_trans # $0a-2 fatan inf
4571: short src_qnan - tbl_trans # $0a-3 fatan qnan
4572: short satand - tbl_trans # $0a-5 fatan denorm
4573: short src_snan - tbl_trans # $0a-4 fatan snan
4574: short tbl_trans - tbl_trans # $0a-6 fatan unnorm
4575: short tbl_trans - tbl_trans # $0a-7 ERROR
4576:
4577: short tbl_trans - tbl_trans # $0b-0 ERROR
4578: short tbl_trans - tbl_trans # $0b-1 ERROR
4579: short tbl_trans - tbl_trans # $0b-2 ERROR
4580: short tbl_trans - tbl_trans # $0b-3 ERROR
4581: short tbl_trans - tbl_trans # $0b-4 ERROR
4582: short tbl_trans - tbl_trans # $0b-5 ERROR
4583: short tbl_trans - tbl_trans # $0b-6 ERROR
4584: short tbl_trans - tbl_trans # $0b-7 ERROR
4585:
4586: short sasin - tbl_trans # $0c-0 fasin norm
4587: short src_zero - tbl_trans # $0c-1 fasin zero
4588: short t_operr - tbl_trans # $0c-2 fasin inf
4589: short src_qnan - tbl_trans # $0c-3 fasin qnan
4590: short sasind - tbl_trans # $0c-5 fasin denorm
4591: short src_snan - tbl_trans # $0c-4 fasin snan
4592: short tbl_trans - tbl_trans # $0c-6 fasin unnorm
4593: short tbl_trans - tbl_trans # $0c-7 ERROR
4594:
4595: short satanh - tbl_trans # $0d-0 fatanh norm
4596: short src_zero - tbl_trans # $0d-1 fatanh zero
4597: short t_operr - tbl_trans # $0d-2 fatanh inf
4598: short src_qnan - tbl_trans # $0d-3 fatanh qnan
4599: short satanhd - tbl_trans # $0d-5 fatanh denorm
4600: short src_snan - tbl_trans # $0d-4 fatanh snan
4601: short tbl_trans - tbl_trans # $0d-6 fatanh unnorm
4602: short tbl_trans - tbl_trans # $0d-7 ERROR
4603:
4604: short ssin - tbl_trans # $0e-0 fsin norm
4605: short src_zero - tbl_trans # $0e-1 fsin zero
4606: short t_operr - tbl_trans # $0e-2 fsin inf
4607: short src_qnan - tbl_trans # $0e-3 fsin qnan
4608: short ssind - tbl_trans # $0e-5 fsin denorm
4609: short src_snan - tbl_trans # $0e-4 fsin snan
4610: short tbl_trans - tbl_trans # $0e-6 fsin unnorm
4611: short tbl_trans - tbl_trans # $0e-7 ERROR
4612:
4613: short stan - tbl_trans # $0f-0 ftan norm
4614: short src_zero - tbl_trans # $0f-1 ftan zero
4615: short t_operr - tbl_trans # $0f-2 ftan inf
4616: short src_qnan - tbl_trans # $0f-3 ftan qnan
4617: short stand - tbl_trans # $0f-5 ftan denorm
4618: short src_snan - tbl_trans # $0f-4 ftan snan
4619: short tbl_trans - tbl_trans # $0f-6 ftan unnorm
4620: short tbl_trans - tbl_trans # $0f-7 ERROR
4621:
4622: short setox - tbl_trans # $10-0 fetox norm
4623: short ld_pone - tbl_trans # $10-1 fetox zero
4624: short szr_inf - tbl_trans # $10-2 fetox inf
4625: short src_qnan - tbl_trans # $10-3 fetox qnan
4626: short setoxd - tbl_trans # $10-5 fetox denorm
4627: short src_snan - tbl_trans # $10-4 fetox snan
4628: short tbl_trans - tbl_trans # $10-6 fetox unnorm
4629: short tbl_trans - tbl_trans # $10-7 ERROR
4630:
4631: short stwotox - tbl_trans # $11-0 ftwotox norm
4632: short ld_pone - tbl_trans # $11-1 ftwotox zero
4633: short szr_inf - tbl_trans # $11-2 ftwotox inf
4634: short src_qnan - tbl_trans # $11-3 ftwotox qnan
4635: short stwotoxd - tbl_trans # $11-5 ftwotox denorm
4636: short src_snan - tbl_trans # $11-4 ftwotox snan
4637: short tbl_trans - tbl_trans # $11-6 ftwotox unnorm
4638: short tbl_trans - tbl_trans # $11-7 ERROR
4639:
4640: short stentox - tbl_trans # $12-0 ftentox norm
4641: short ld_pone - tbl_trans # $12-1 ftentox zero
4642: short szr_inf - tbl_trans # $12-2 ftentox inf
4643: short src_qnan - tbl_trans # $12-3 ftentox qnan
4644: short stentoxd - tbl_trans # $12-5 ftentox denorm
4645: short src_snan - tbl_trans # $12-4 ftentox snan
4646: short tbl_trans - tbl_trans # $12-6 ftentox unnorm
4647: short tbl_trans - tbl_trans # $12-7 ERROR
4648:
4649: short tbl_trans - tbl_trans # $13-0 ERROR
4650: short tbl_trans - tbl_trans # $13-1 ERROR
4651: short tbl_trans - tbl_trans # $13-2 ERROR
4652: short tbl_trans - tbl_trans # $13-3 ERROR
4653: short tbl_trans - tbl_trans # $13-4 ERROR
4654: short tbl_trans - tbl_trans # $13-5 ERROR
4655: short tbl_trans - tbl_trans # $13-6 ERROR
4656: short tbl_trans - tbl_trans # $13-7 ERROR
4657:
4658: short slogn - tbl_trans # $14-0 flogn norm
4659: short t_dz2 - tbl_trans # $14-1 flogn zero
4660: short sopr_inf - tbl_trans # $14-2 flogn inf
4661: short src_qnan - tbl_trans # $14-3 flogn qnan
4662: short slognd - tbl_trans # $14-5 flogn denorm
4663: short src_snan - tbl_trans # $14-4 flogn snan
4664: short tbl_trans - tbl_trans # $14-6 flogn unnorm
4665: short tbl_trans - tbl_trans # $14-7 ERROR
4666:
4667: short slog10 - tbl_trans # $15-0 flog10 norm
4668: short t_dz2 - tbl_trans # $15-1 flog10 zero
4669: short sopr_inf - tbl_trans # $15-2 flog10 inf
4670: short src_qnan - tbl_trans # $15-3 flog10 qnan
4671: short slog10d - tbl_trans # $15-5 flog10 denorm
4672: short src_snan - tbl_trans # $15-4 flog10 snan
4673: short tbl_trans - tbl_trans # $15-6 flog10 unnorm
4674: short tbl_trans - tbl_trans # $15-7 ERROR
4675:
4676: short slog2 - tbl_trans # $16-0 flog2 norm
4677: short t_dz2 - tbl_trans # $16-1 flog2 zero
4678: short sopr_inf - tbl_trans # $16-2 flog2 inf
4679: short src_qnan - tbl_trans # $16-3 flog2 qnan
4680: short slog2d - tbl_trans # $16-5 flog2 denorm
4681: short src_snan - tbl_trans # $16-4 flog2 snan
4682: short tbl_trans - tbl_trans # $16-6 flog2 unnorm
4683: short tbl_trans - tbl_trans # $16-7 ERROR
4684:
4685: short tbl_trans - tbl_trans # $17-0 ERROR
4686: short tbl_trans - tbl_trans # $17-1 ERROR
4687: short tbl_trans - tbl_trans # $17-2 ERROR
4688: short tbl_trans - tbl_trans # $17-3 ERROR
4689: short tbl_trans - tbl_trans # $17-4 ERROR
4690: short tbl_trans - tbl_trans # $17-5 ERROR
4691: short tbl_trans - tbl_trans # $17-6 ERROR
4692: short tbl_trans - tbl_trans # $17-7 ERROR
4693:
4694: short tbl_trans - tbl_trans # $18-0 fabs norm
4695: short tbl_trans - tbl_trans # $18-1 fabs zero
4696: short tbl_trans - tbl_trans # $18-2 fabs inf
4697: short tbl_trans - tbl_trans # $18-3 fabs qnan
4698: short tbl_trans - tbl_trans # $18-5 fabs denorm
4699: short tbl_trans - tbl_trans # $18-4 fabs snan
4700: short tbl_trans - tbl_trans # $18-6 fabs unnorm
4701: short tbl_trans - tbl_trans # $18-7 ERROR
4702:
4703: short scosh - tbl_trans # $19-0 fcosh norm
4704: short ld_pone - tbl_trans # $19-1 fcosh zero
4705: short ld_pinf - tbl_trans # $19-2 fcosh inf
4706: short src_qnan - tbl_trans # $19-3 fcosh qnan
4707: short scoshd - tbl_trans # $19-5 fcosh denorm
4708: short src_snan - tbl_trans # $19-4 fcosh snan
4709: short tbl_trans - tbl_trans # $19-6 fcosh unnorm
4710: short tbl_trans - tbl_trans # $19-7 ERROR
4711:
4712: short tbl_trans - tbl_trans # $1a-0 fneg norm
4713: short tbl_trans - tbl_trans # $1a-1 fneg zero
4714: short tbl_trans - tbl_trans # $1a-2 fneg inf
4715: short tbl_trans - tbl_trans # $1a-3 fneg qnan
4716: short tbl_trans - tbl_trans # $1a-5 fneg denorm
4717: short tbl_trans - tbl_trans # $1a-4 fneg snan
4718: short tbl_trans - tbl_trans # $1a-6 fneg unnorm
4719: short tbl_trans - tbl_trans # $1a-7 ERROR
4720:
4721: short tbl_trans - tbl_trans # $1b-0 ERROR
4722: short tbl_trans - tbl_trans # $1b-1 ERROR
4723: short tbl_trans - tbl_trans # $1b-2 ERROR
4724: short tbl_trans - tbl_trans # $1b-3 ERROR
4725: short tbl_trans - tbl_trans # $1b-4 ERROR
4726: short tbl_trans - tbl_trans # $1b-5 ERROR
4727: short tbl_trans - tbl_trans # $1b-6 ERROR
4728: short tbl_trans - tbl_trans # $1b-7 ERROR
4729:
4730: short sacos - tbl_trans # $1c-0 facos norm
4731: short ld_ppi2 - tbl_trans # $1c-1 facos zero
4732: short t_operr - tbl_trans # $1c-2 facos inf
4733: short src_qnan - tbl_trans # $1c-3 facos qnan
4734: short sacosd - tbl_trans # $1c-5 facos denorm
4735: short src_snan - tbl_trans # $1c-4 facos snan
4736: short tbl_trans - tbl_trans # $1c-6 facos unnorm
4737: short tbl_trans - tbl_trans # $1c-7 ERROR
4738:
4739: short scos - tbl_trans # $1d-0 fcos norm
4740: short ld_pone - tbl_trans # $1d-1 fcos zero
4741: short t_operr - tbl_trans # $1d-2 fcos inf
4742: short src_qnan - tbl_trans # $1d-3 fcos qnan
4743: short scosd - tbl_trans # $1d-5 fcos denorm
4744: short src_snan - tbl_trans # $1d-4 fcos snan
4745: short tbl_trans - tbl_trans # $1d-6 fcos unnorm
4746: short tbl_trans - tbl_trans # $1d-7 ERROR
4747:
4748: short sgetexp - tbl_trans # $1e-0 fgetexp norm
4749: short src_zero - tbl_trans # $1e-1 fgetexp zero
4750: short t_operr - tbl_trans # $1e-2 fgetexp inf
4751: short src_qnan - tbl_trans # $1e-3 fgetexp qnan
4752: short sgetexpd - tbl_trans # $1e-5 fgetexp denorm
4753: short src_snan - tbl_trans # $1e-4 fgetexp snan
4754: short tbl_trans - tbl_trans # $1e-6 fgetexp unnorm
4755: short tbl_trans - tbl_trans # $1e-7 ERROR
4756:
4757: short sgetman - tbl_trans # $1f-0 fgetman norm
4758: short src_zero - tbl_trans # $1f-1 fgetman zero
4759: short t_operr - tbl_trans # $1f-2 fgetman inf
4760: short src_qnan - tbl_trans # $1f-3 fgetman qnan
4761: short sgetmand - tbl_trans # $1f-5 fgetman denorm
4762: short src_snan - tbl_trans # $1f-4 fgetman snan
4763: short tbl_trans - tbl_trans # $1f-6 fgetman unnorm
4764: short tbl_trans - tbl_trans # $1f-7 ERROR
4765:
4766: short tbl_trans - tbl_trans # $20-0 fdiv norm
4767: short tbl_trans - tbl_trans # $20-1 fdiv zero
4768: short tbl_trans - tbl_trans # $20-2 fdiv inf
4769: short tbl_trans - tbl_trans # $20-3 fdiv qnan
4770: short tbl_trans - tbl_trans # $20-5 fdiv denorm
4771: short tbl_trans - tbl_trans # $20-4 fdiv snan
4772: short tbl_trans - tbl_trans # $20-6 fdiv unnorm
4773: short tbl_trans - tbl_trans # $20-7 ERROR
4774:
4775: short smod_snorm - tbl_trans # $21-0 fmod norm
4776: short smod_szero - tbl_trans # $21-1 fmod zero
4777: short smod_sinf - tbl_trans # $21-2 fmod inf
4778: short sop_sqnan - tbl_trans # $21-3 fmod qnan
4779: short smod_sdnrm - tbl_trans # $21-5 fmod denorm
4780: short sop_ssnan - tbl_trans # $21-4 fmod snan
4781: short tbl_trans - tbl_trans # $21-6 fmod unnorm
4782: short tbl_trans - tbl_trans # $21-7 ERROR
4783:
4784: short tbl_trans - tbl_trans # $22-0 fadd norm
4785: short tbl_trans - tbl_trans # $22-1 fadd zero
4786: short tbl_trans - tbl_trans # $22-2 fadd inf
4787: short tbl_trans - tbl_trans # $22-3 fadd qnan
4788: short tbl_trans - tbl_trans # $22-5 fadd denorm
4789: short tbl_trans - tbl_trans # $22-4 fadd snan
4790: short tbl_trans - tbl_trans # $22-6 fadd unnorm
4791: short tbl_trans - tbl_trans # $22-7 ERROR
4792:
4793: short tbl_trans - tbl_trans # $23-0 fmul norm
4794: short tbl_trans - tbl_trans # $23-1 fmul zero
4795: short tbl_trans - tbl_trans # $23-2 fmul inf
4796: short tbl_trans - tbl_trans # $23-3 fmul qnan
4797: short tbl_trans - tbl_trans # $23-5 fmul denorm
4798: short tbl_trans - tbl_trans # $23-4 fmul snan
4799: short tbl_trans - tbl_trans # $23-6 fmul unnorm
4800: short tbl_trans - tbl_trans # $23-7 ERROR
4801:
4802: short tbl_trans - tbl_trans # $24-0 fsgldiv norm
4803: short tbl_trans - tbl_trans # $24-1 fsgldiv zero
4804: short tbl_trans - tbl_trans # $24-2 fsgldiv inf
4805: short tbl_trans - tbl_trans # $24-3 fsgldiv qnan
4806: short tbl_trans - tbl_trans # $24-5 fsgldiv denorm
4807: short tbl_trans - tbl_trans # $24-4 fsgldiv snan
4808: short tbl_trans - tbl_trans # $24-6 fsgldiv unnorm
4809: short tbl_trans - tbl_trans # $24-7 ERROR
4810:
4811: short srem_snorm - tbl_trans # $25-0 frem norm
4812: short srem_szero - tbl_trans # $25-1 frem zero
4813: short srem_sinf - tbl_trans # $25-2 frem inf
4814: short sop_sqnan - tbl_trans # $25-3 frem qnan
4815: short srem_sdnrm - tbl_trans # $25-5 frem denorm
4816: short sop_ssnan - tbl_trans # $25-4 frem snan
4817: short tbl_trans - tbl_trans # $25-6 frem unnorm
4818: short tbl_trans - tbl_trans # $25-7 ERROR
4819:
4820: short sscale_snorm - tbl_trans # $26-0 fscale norm
4821: short sscale_szero - tbl_trans # $26-1 fscale zero
4822: short sscale_sinf - tbl_trans # $26-2 fscale inf
4823: short sop_sqnan - tbl_trans # $26-3 fscale qnan
4824: short sscale_sdnrm - tbl_trans # $26-5 fscale denorm
4825: short sop_ssnan - tbl_trans # $26-4 fscale snan
4826: short tbl_trans - tbl_trans # $26-6 fscale unnorm
4827: short tbl_trans - tbl_trans # $26-7 ERROR
4828:
4829: short tbl_trans - tbl_trans # $27-0 fsglmul norm
4830: short tbl_trans - tbl_trans # $27-1 fsglmul zero
4831: short tbl_trans - tbl_trans # $27-2 fsglmul inf
4832: short tbl_trans - tbl_trans # $27-3 fsglmul qnan
4833: short tbl_trans - tbl_trans # $27-5 fsglmul denorm
4834: short tbl_trans - tbl_trans # $27-4 fsglmul snan
4835: short tbl_trans - tbl_trans # $27-6 fsglmul unnorm
4836: short tbl_trans - tbl_trans # $27-7 ERROR
4837:
4838: short tbl_trans - tbl_trans # $28-0 fsub norm
4839: short tbl_trans - tbl_trans # $28-1 fsub zero
4840: short tbl_trans - tbl_trans # $28-2 fsub inf
4841: short tbl_trans - tbl_trans # $28-3 fsub qnan
4842: short tbl_trans - tbl_trans # $28-5 fsub denorm
4843: short tbl_trans - tbl_trans # $28-4 fsub snan
4844: short tbl_trans - tbl_trans # $28-6 fsub unnorm
4845: short tbl_trans - tbl_trans # $28-7 ERROR
4846:
4847: short tbl_trans - tbl_trans # $29-0 ERROR
4848: short tbl_trans - tbl_trans # $29-1 ERROR
4849: short tbl_trans - tbl_trans # $29-2 ERROR
4850: short tbl_trans - tbl_trans # $29-3 ERROR
4851: short tbl_trans - tbl_trans # $29-4 ERROR
4852: short tbl_trans - tbl_trans # $29-5 ERROR
4853: short tbl_trans - tbl_trans # $29-6 ERROR
4854: short tbl_trans - tbl_trans # $29-7 ERROR
4855:
4856: short tbl_trans - tbl_trans # $2a-0 ERROR
4857: short tbl_trans - tbl_trans # $2a-1 ERROR
4858: short tbl_trans - tbl_trans # $2a-2 ERROR
4859: short tbl_trans - tbl_trans # $2a-3 ERROR
4860: short tbl_trans - tbl_trans # $2a-4 ERROR
4861: short tbl_trans - tbl_trans # $2a-5 ERROR
4862: short tbl_trans - tbl_trans # $2a-6 ERROR
4863: short tbl_trans - tbl_trans # $2a-7 ERROR
4864:
4865: short tbl_trans - tbl_trans # $2b-0 ERROR
4866: short tbl_trans - tbl_trans # $2b-1 ERROR
4867: short tbl_trans - tbl_trans # $2b-2 ERROR
4868: short tbl_trans - tbl_trans # $2b-3 ERROR
4869: short tbl_trans - tbl_trans # $2b-4 ERROR
4870: short tbl_trans - tbl_trans # $2b-5 ERROR
4871: short tbl_trans - tbl_trans # $2b-6 ERROR
4872: short tbl_trans - tbl_trans # $2b-7 ERROR
4873:
4874: short tbl_trans - tbl_trans # $2c-0 ERROR
4875: short tbl_trans - tbl_trans # $2c-1 ERROR
4876: short tbl_trans - tbl_trans # $2c-2 ERROR
4877: short tbl_trans - tbl_trans # $2c-3 ERROR
4878: short tbl_trans - tbl_trans # $2c-4 ERROR
4879: short tbl_trans - tbl_trans # $2c-5 ERROR
4880: short tbl_trans - tbl_trans # $2c-6 ERROR
4881: short tbl_trans - tbl_trans # $2c-7 ERROR
4882:
4883: short tbl_trans - tbl_trans # $2d-0 ERROR
4884: short tbl_trans - tbl_trans # $2d-1 ERROR
4885: short tbl_trans - tbl_trans # $2d-2 ERROR
4886: short tbl_trans - tbl_trans # $2d-3 ERROR
4887: short tbl_trans - tbl_trans # $2d-4 ERROR
4888: short tbl_trans - tbl_trans # $2d-5 ERROR
4889: short tbl_trans - tbl_trans # $2d-6 ERROR
4890: short tbl_trans - tbl_trans # $2d-7 ERROR
4891:
4892: short tbl_trans - tbl_trans # $2e-0 ERROR
4893: short tbl_trans - tbl_trans # $2e-1 ERROR
4894: short tbl_trans - tbl_trans # $2e-2 ERROR
4895: short tbl_trans - tbl_trans # $2e-3 ERROR
4896: short tbl_trans - tbl_trans # $2e-4 ERROR
4897: short tbl_trans - tbl_trans # $2e-5 ERROR
4898: short tbl_trans - tbl_trans # $2e-6 ERROR
4899: short tbl_trans - tbl_trans # $2e-7 ERROR
4900:
4901: short tbl_trans - tbl_trans # $2f-0 ERROR
4902: short tbl_trans - tbl_trans # $2f-1 ERROR
4903: short tbl_trans - tbl_trans # $2f-2 ERROR
4904: short tbl_trans - tbl_trans # $2f-3 ERROR
4905: short tbl_trans - tbl_trans # $2f-4 ERROR
4906: short tbl_trans - tbl_trans # $2f-5 ERROR
4907: short tbl_trans - tbl_trans # $2f-6 ERROR
4908: short tbl_trans - tbl_trans # $2f-7 ERROR
4909:
4910: short ssincos - tbl_trans # $30-0 fsincos norm
4911: short ssincosz - tbl_trans # $30-1 fsincos zero
4912: short ssincosi - tbl_trans # $30-2 fsincos inf
4913: short ssincosqnan - tbl_trans # $30-3 fsincos qnan
4914: short ssincosd - tbl_trans # $30-5 fsincos denorm
4915: short ssincossnan - tbl_trans # $30-4 fsincos snan
4916: short tbl_trans - tbl_trans # $30-6 fsincos unnorm
4917: short tbl_trans - tbl_trans # $30-7 ERROR
4918:
4919: short ssincos - tbl_trans # $31-0 fsincos norm
4920: short ssincosz - tbl_trans # $31-1 fsincos zero
4921: short ssincosi - tbl_trans # $31-2 fsincos inf
4922: short ssincosqnan - tbl_trans # $31-3 fsincos qnan
4923: short ssincosd - tbl_trans # $31-5 fsincos denorm
4924: short ssincossnan - tbl_trans # $31-4 fsincos snan
4925: short tbl_trans - tbl_trans # $31-6 fsincos unnorm
4926: short tbl_trans - tbl_trans # $31-7 ERROR
4927:
4928: short ssincos - tbl_trans # $32-0 fsincos norm
4929: short ssincosz - tbl_trans # $32-1 fsincos zero
4930: short ssincosi - tbl_trans # $32-2 fsincos inf
4931: short ssincosqnan - tbl_trans # $32-3 fsincos qnan
4932: short ssincosd - tbl_trans # $32-5 fsincos denorm
4933: short ssincossnan - tbl_trans # $32-4 fsincos snan
4934: short tbl_trans - tbl_trans # $32-6 fsincos unnorm
4935: short tbl_trans - tbl_trans # $32-7 ERROR
4936:
4937: short ssincos - tbl_trans # $33-0 fsincos norm
4938: short ssincosz - tbl_trans # $33-1 fsincos zero
4939: short ssincosi - tbl_trans # $33-2 fsincos inf
4940: short ssincosqnan - tbl_trans # $33-3 fsincos qnan
4941: short ssincosd - tbl_trans # $33-5 fsincos denorm
4942: short ssincossnan - tbl_trans # $33-4 fsincos snan
4943: short tbl_trans - tbl_trans # $33-6 fsincos unnorm
4944: short tbl_trans - tbl_trans # $33-7 ERROR
4945:
4946: short ssincos - tbl_trans # $34-0 fsincos norm
4947: short ssincosz - tbl_trans # $34-1 fsincos zero
4948: short ssincosi - tbl_trans # $34-2 fsincos inf
4949: short ssincosqnan - tbl_trans # $34-3 fsincos qnan
4950: short ssincosd - tbl_trans # $34-5 fsincos denorm
4951: short ssincossnan - tbl_trans # $34-4 fsincos snan
4952: short tbl_trans - tbl_trans # $34-6 fsincos unnorm
4953: short tbl_trans - tbl_trans # $34-7 ERROR
4954:
4955: short ssincos - tbl_trans # $35-0 fsincos norm
4956: short ssincosz - tbl_trans # $35-1 fsincos zero
4957: short ssincosi - tbl_trans # $35-2 fsincos inf
4958: short ssincosqnan - tbl_trans # $35-3 fsincos qnan
4959: short ssincosd - tbl_trans # $35-5 fsincos denorm
4960: short ssincossnan - tbl_trans # $35-4 fsincos snan
4961: short tbl_trans - tbl_trans # $35-6 fsincos unnorm
4962: short tbl_trans - tbl_trans # $35-7 ERROR
4963:
4964: short ssincos - tbl_trans # $36-0 fsincos norm
4965: short ssincosz - tbl_trans # $36-1 fsincos zero
4966: short ssincosi - tbl_trans # $36-2 fsincos inf
4967: short ssincosqnan - tbl_trans # $36-3 fsincos qnan
4968: short ssincosd - tbl_trans # $36-5 fsincos denorm
4969: short ssincossnan - tbl_trans # $36-4 fsincos snan
4970: short tbl_trans - tbl_trans # $36-6 fsincos unnorm
4971: short tbl_trans - tbl_trans # $36-7 ERROR
4972:
4973: short ssincos - tbl_trans # $37-0 fsincos norm
4974: short ssincosz - tbl_trans # $37-1 fsincos zero
4975: short ssincosi - tbl_trans # $37-2 fsincos inf
4976: short ssincosqnan - tbl_trans # $37-3 fsincos qnan
4977: short ssincosd - tbl_trans # $37-5 fsincos denorm
4978: short ssincossnan - tbl_trans # $37-4 fsincos snan
4979: short tbl_trans - tbl_trans # $37-6 fsincos unnorm
4980: short tbl_trans - tbl_trans # $37-7 ERROR
4981:
4982: ##########
4983:
4984: # the instruction fetch access for the displacement word for the
4985: # fdbcc emulation failed. here, we create an access error frame
4986: # from the current frame and branch to _real_access().
4987: funimp_iacc:
4988: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
4989: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
4990: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
4991:
4992: mov.l USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
4993:
4994: unlk %a6
4995:
4996: mov.l (%sp),-(%sp) # store SR,hi(PC)
4997: mov.w 0x8(%sp),0x4(%sp) # store lo(PC)
4998: mov.w &0x4008,0x6(%sp) # store voff
4999: mov.l 0x2(%sp),0x8(%sp) # store EA
5000: mov.l &0x09428001,0xc(%sp) # store FSLW
5001:
5002: btst &0x5,(%sp) # user or supervisor mode?
5003: beq.b funimp_iacc_end # user
5004: bset &0x2,0xd(%sp) # set supervisor TM bit
5005:
5006: funimp_iacc_end:
5007: bra.l _real_access
5008:
5009: #########################################################################
5010: # ssin(): computes the sine of a normalized input #
5011: # ssind(): computes the sine of a denormalized input #
5012: # scos(): computes the cosine of a normalized input #
5013: # scosd(): computes the cosine of a denormalized input #
5014: # ssincos(): computes the sine and cosine of a normalized input #
5015: # ssincosd(): computes the sine and cosine of a denormalized input #
5016: # #
5017: # INPUT *************************************************************** #
5018: # a0 = pointer to extended precision input #
5019: # d0 = round precision,mode #
5020: # #
5021: # OUTPUT ************************************************************** #
5022: # fp0 = sin(X) or cos(X) #
5023: # #
5024: # For ssincos(X): #
5025: # fp0 = sin(X) #
5026: # fp1 = cos(X) #
5027: # #
5028: # ACCURACY and MONOTONICITY ******************************************* #
5029: # The returned result is within 1 ulp in 64 significant bit, i.e. #
5030: # within 0.5001 ulp to 53 bits if the result is subsequently #
5031: # rounded to double precision. The result is provably monotonic #
5032: # in double precision. #
5033: # #
5034: # ALGORITHM *********************************************************** #
5035: # #
5036: # SIN and COS: #
5037: # 1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1. #
5038: # #
5039: # 2. If |X| >= 15Pi or |X| < 2**(-40), go to 7. #
5040: # #
5041: # 3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
5042: # k = N mod 4, so in particular, k = 0,1,2,or 3. #
5043: # Overwrite k by k := k + AdjN. #
5044: # #
5045: # 4. If k is even, go to 6. #
5046: # #
5047: # 5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j. #
5048: # Return sgn*cos(r) where cos(r) is approximated by an #
5049: # even polynomial in r, 1 + r*r*(B1+s*(B2+ ... + s*B8)), #
5050: # s = r*r. #
5051: # Exit. #
5052: # #
5053: # 6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r) #
5054: # where sin(r) is approximated by an odd polynomial in r #
5055: # r + r*s*(A1+s*(A2+ ... + s*A7)), s = r*r. #
5056: # Exit. #
5057: # #
5058: # 7. If |X| > 1, go to 9. #
5059: # #
5060: # 8. (|X|<2**(-40)) If SIN is invoked, return X; #
5061: # otherwise return 1. #
5062: # #
5063: # 9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
5064: # go back to 3. #
5065: # #
5066: # SINCOS: #
5067: # 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
5068: # #
5069: # 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
5070: # k = N mod 4, so in particular, k = 0,1,2,or 3. #
5071: # #
5072: # 3. If k is even, go to 5. #
5073: # #
5074: # 4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), ie. #
5075: # j1 exclusive or with the l.s.b. of k. #
5076: # sgn1 := (-1)**j1, sgn2 := (-1)**j2. #
5077: # SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where #
5078: # sin(r) and cos(r) are computed as odd and even #
5079: # polynomials in r, respectively. Exit #
5080: # #
5081: # 5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1. #
5082: # SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where #
5083: # sin(r) and cos(r) are computed as odd and even #
5084: # polynomials in r, respectively. Exit #
5085: # #
5086: # 6. If |X| > 1, go to 8. #
5087: # #
5088: # 7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit. #
5089: # #
5090: # 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
5091: # go back to 2. #
5092: # #
5093: #########################################################################
5094:
5095: SINA7: long 0xBD6AAA77,0xCCC994F5
5096: SINA6: long 0x3DE61209,0x7AAE8DA1
5097: SINA5: long 0xBE5AE645,0x2A118AE4
5098: SINA4: long 0x3EC71DE3,0xA5341531
5099: SINA3: long 0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
5100: SINA2: long 0x3FF80000,0x88888888,0x888859AF,0x00000000
5101: SINA1: long 0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
5102:
5103: COSB8: long 0x3D2AC4D0,0xD6011EE3
5104: COSB7: long 0xBDA9396F,0x9F45AC19
5105: COSB6: long 0x3E21EED9,0x0612C972
5106: COSB5: long 0xBE927E4F,0xB79D9FCF
5107: COSB4: long 0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
5108: COSB3: long 0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
5109: COSB2: long 0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
5110: COSB1: long 0xBF000000
5111:
5112: set INARG,FP_SCR0
5113:
5114: set X,FP_SCR0
5115: # set XDCARE,X+2
5116: set XFRAC,X+4
5117:
5118: set RPRIME,FP_SCR0
5119: set SPRIME,FP_SCR1
5120:
5121: set POSNEG1,L_SCR1
5122: set TWOTO63,L_SCR1
5123:
5124: set ENDFLAG,L_SCR2
5125: set INT,L_SCR2
5126:
5127: set ADJN,L_SCR3
5128:
5129: ############################################
5130: global ssin
5131: ssin:
5132: mov.l &0,ADJN(%a6) # yes; SET ADJN TO 0
5133: bra.b SINBGN
5134:
5135: ############################################
5136: global scos
5137: scos:
5138: mov.l &1,ADJN(%a6) # yes; SET ADJN TO 1
5139:
5140: ############################################
5141: SINBGN:
5142: #--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
5143:
5144: fmov.x (%a0),%fp0 # LOAD INPUT
5145: fmov.x %fp0,X(%a6) # save input at X
5146:
5147: # "COMPACTIFY" X
5148: mov.l (%a0),%d1 # put exp in hi word
5149: mov.w 4(%a0),%d1 # fetch hi(man)
5150: and.l &0x7FFFFFFF,%d1 # strip sign
5151:
5152: cmpi.l %d1,&0x3FD78000 # is |X| >= 2**(-40)?
5153: bge.b SOK1 # no
5154: bra.w SINSM # yes; input is very small
5155:
5156: SOK1:
5157: cmp.l %d1,&0x4004BC7E # is |X| < 15 PI?
5158: blt.b SINMAIN # no
5159: bra.w SREDUCEX # yes; input is very large
5160:
5161: #--THIS IS THE USUAL CASE, |X| <= 15 PI.
5162: #--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
5163: SINMAIN:
5164: fmov.x %fp0,%fp1
5165: fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
5166:
5167: lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
5168:
5169: fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
5170:
5171: mov.l INT(%a6),%d1 # make a copy of N
5172: asl.l &4,%d1 # N *= 16
5173: add.l %d1,%a1 # tbl_addr = a1 + (N*16)
5174:
5175: # A1 IS THE ADDRESS OF N*PIBY2
5176: # ...WHICH IS IN TWO PIECES Y1 & Y2
5177: fsub.x (%a1)+,%fp0 # X-Y1
5178: fsub.s (%a1),%fp0 # fp0 = R = (X-Y1)-Y2
5179:
5180: SINCONT:
5181: #--continuation from REDUCEX
5182:
5183: #--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
5184: mov.l INT(%a6),%d1
5185: add.l ADJN(%a6),%d1 # SEE IF D0 IS ODD OR EVEN
5186: ror.l &1,%d1 # D0 WAS ODD IFF D0 IS NEGATIVE
5187: cmp.l %d1,&0
5188: blt.w COSPOLY
5189:
5190: #--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
5191: #--THEN WE RETURN SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
5192: #--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
5193: #--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
5194: #--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
5195: #--WHERE T=S*S.
5196: #--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
5197: #--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
5198: SINPOLY:
5199: fmovm.x &0x0c,-(%sp) # save fp2/fp3
5200:
5201: fmov.x %fp0,X(%a6) # X IS R
5202: fmul.x %fp0,%fp0 # FP0 IS S
5203:
5204: fmov.d SINA7(%pc),%fp3
5205: fmov.d SINA6(%pc),%fp2
5206:
5207: fmov.x %fp0,%fp1
5208: fmul.x %fp1,%fp1 # FP1 IS T
5209:
5210: ror.l &1,%d1
5211: and.l &0x80000000,%d1
5212: # ...LEAST SIG. BIT OF D0 IN SIGN POSITION
5213: eor.l %d1,X(%a6) # X IS NOW R'= SGN*R
5214:
5215: fmul.x %fp1,%fp3 # TA7
5216: fmul.x %fp1,%fp2 # TA6
5217:
5218: fadd.d SINA5(%pc),%fp3 # A5+TA7
5219: fadd.d SINA4(%pc),%fp2 # A4+TA6
5220:
5221: fmul.x %fp1,%fp3 # T(A5+TA7)
5222: fmul.x %fp1,%fp2 # T(A4+TA6)
5223:
5224: fadd.d SINA3(%pc),%fp3 # A3+T(A5+TA7)
5225: fadd.x SINA2(%pc),%fp2 # A2+T(A4+TA6)
5226:
5227: fmul.x %fp3,%fp1 # T(A3+T(A5+TA7))
5228:
5229: fmul.x %fp0,%fp2 # S(A2+T(A4+TA6))
5230: fadd.x SINA1(%pc),%fp1 # A1+T(A3+T(A5+TA7))
5231: fmul.x X(%a6),%fp0 # R'*S
5232:
5233: fadd.x %fp2,%fp1 # [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
5234:
5235: fmul.x %fp1,%fp0 # SIN(R')-R'
5236:
5237: fmovm.x (%sp)+,&0x30 # restore fp2/fp3
5238:
5239: fmov.l %d0,%fpcr # restore users round mode,prec
5240: fadd.x X(%a6),%fp0 # last inst - possible exception set
5241: bra t_inx2
5242:
5243: #--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
5244: #--THEN WE RETURN SGN*COS(R). SGN*COS(R) IS COMPUTED BY
5245: #--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
5246: #--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
5247: #--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
5248: #--WHERE T=S*S.
5249: #--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
5250: #--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
5251: #--AND IS THEREFORE STORED AS SINGLE PRECISION.
5252: COSPOLY:
5253: fmovm.x &0x0c,-(%sp) # save fp2/fp3
5254:
5255: fmul.x %fp0,%fp0 # FP0 IS S
5256:
5257: fmov.d COSB8(%pc),%fp2
5258: fmov.d COSB7(%pc),%fp3
5259:
5260: fmov.x %fp0,%fp1
5261: fmul.x %fp1,%fp1 # FP1 IS T
5262:
5263: fmov.x %fp0,X(%a6) # X IS S
5264: ror.l &1,%d1
5265: and.l &0x80000000,%d1
5266: # ...LEAST SIG. BIT OF D0 IN SIGN POSITION
5267:
5268: fmul.x %fp1,%fp2 # TB8
5269:
5270: eor.l %d1,X(%a6) # X IS NOW S'= SGN*S
5271: and.l &0x80000000,%d1
5272:
5273: fmul.x %fp1,%fp3 # TB7
5274:
5275: or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE
5276: mov.l %d1,POSNEG1(%a6)
5277:
5278: fadd.d COSB6(%pc),%fp2 # B6+TB8
5279: fadd.d COSB5(%pc),%fp3 # B5+TB7
5280:
5281: fmul.x %fp1,%fp2 # T(B6+TB8)
5282: fmul.x %fp1,%fp3 # T(B5+TB7)
5283:
5284: fadd.d COSB4(%pc),%fp2 # B4+T(B6+TB8)
5285: fadd.x COSB3(%pc),%fp3 # B3+T(B5+TB7)
5286:
5287: fmul.x %fp1,%fp2 # T(B4+T(B6+TB8))
5288: fmul.x %fp3,%fp1 # T(B3+T(B5+TB7))
5289:
5290: fadd.x COSB2(%pc),%fp2 # B2+T(B4+T(B6+TB8))
5291: fadd.s COSB1(%pc),%fp1 # B1+T(B3+T(B5+TB7))
5292:
5293: fmul.x %fp2,%fp0 # S(B2+T(B4+T(B6+TB8)))
5294:
5295: fadd.x %fp1,%fp0
5296:
5297: fmul.x X(%a6),%fp0
5298:
5299: fmovm.x (%sp)+,&0x30 # restore fp2/fp3
5300:
5301: fmov.l %d0,%fpcr # restore users round mode,prec
5302: fadd.s POSNEG1(%a6),%fp0 # last inst - possible exception set
5303: bra t_inx2
5304:
5305: ##############################################
5306:
5307: # SINe: Big OR Small?
5308: #--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
5309: #--IF |X| < 2**(-40), RETURN X OR 1.
5310: SINBORS:
5311: cmp.l %d1,&0x3FFF8000
5312: bgt.l SREDUCEX
5313:
5314: SINSM:
5315: mov.l ADJN(%a6),%d1
5316: cmp.l %d1,&0
5317: bgt.b COSTINY
5318:
5319: # here, the operation may underflow iff the precision is sgl or dbl.
5320: # extended denorms are handled through another entry point.
5321: SINTINY:
5322: # mov.w &0x0000,XDCARE(%a6) # JUST IN CASE
5323:
5324: fmov.l %d0,%fpcr # restore users round mode,prec
5325: mov.b &FMOV_OP,%d1 # last inst is MOVE
5326: fmov.x X(%a6),%fp0 # last inst - possible exception set
5327: bra t_catch
5328:
5329: COSTINY:
5330: fmov.s &0x3F800000,%fp0 # fp0 = 1.0
5331: fmov.l %d0,%fpcr # restore users round mode,prec
5332: fadd.s &0x80800000,%fp0 # last inst - possible exception set
5333: bra t_pinx2
5334:
5335: ################################################
5336: global ssind
5337: #--SIN(X) = X FOR DENORMALIZED X
5338: ssind:
5339: bra t_extdnrm
5340:
5341: ############################################
5342: global scosd
5343: #--COS(X) = 1 FOR DENORMALIZED X
5344: scosd:
5345: fmov.s &0x3F800000,%fp0 # fp0 = 1.0
5346: bra t_pinx2
5347:
5348: ##################################################
5349:
5350: global ssincos
5351: ssincos:
5352: #--SET ADJN TO 4
5353: mov.l &4,ADJN(%a6)
5354:
5355: fmov.x (%a0),%fp0 # LOAD INPUT
5356: fmov.x %fp0,X(%a6)
5357:
5358: mov.l (%a0),%d1
5359: mov.w 4(%a0),%d1
5360: and.l &0x7FFFFFFF,%d1 # COMPACTIFY X
5361:
5362: cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
5363: bge.b SCOK1
5364: bra.w SCSM
5365:
5366: SCOK1:
5367: cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
5368: blt.b SCMAIN
5369: bra.w SREDUCEX
5370:
5371:
5372: #--THIS IS THE USUAL CASE, |X| <= 15 PI.
5373: #--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
5374: SCMAIN:
5375: fmov.x %fp0,%fp1
5376:
5377: fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
5378:
5379: lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
5380:
5381: fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
5382:
5383: mov.l INT(%a6),%d1
5384: asl.l &4,%d1
5385: add.l %d1,%a1 # ADDRESS OF N*PIBY2, IN Y1, Y2
5386:
5387: fsub.x (%a1)+,%fp0 # X-Y1
5388: fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
5389:
5390: SCCONT:
5391: #--continuation point from REDUCEX
5392:
5393: mov.l INT(%a6),%d1
5394: ror.l &1,%d1
5395: cmp.l %d1,&0 # D0 < 0 IFF N IS ODD
5396: bge.w NEVEN
5397:
5398: SNODD:
5399: #--REGISTERS SAVED SO FAR: D0, A0, FP2.
5400: fmovm.x &0x04,-(%sp) # save fp2
5401:
5402: fmov.x %fp0,RPRIME(%a6)
5403: fmul.x %fp0,%fp0 # FP0 IS S = R*R
5404: fmov.d SINA7(%pc),%fp1 # A7
5405: fmov.d COSB8(%pc),%fp2 # B8
5406: fmul.x %fp0,%fp1 # SA7
5407: fmul.x %fp0,%fp2 # SB8
5408:
5409: mov.l %d2,-(%sp)
5410: mov.l %d1,%d2
5411: ror.l &1,%d2
5412: and.l &0x80000000,%d2
5413: eor.l %d1,%d2
5414: and.l &0x80000000,%d2
5415:
5416: fadd.d SINA6(%pc),%fp1 # A6+SA7
5417: fadd.d COSB7(%pc),%fp2 # B7+SB8
5418:
5419: fmul.x %fp0,%fp1 # S(A6+SA7)
5420: eor.l %d2,RPRIME(%a6)
5421: mov.l (%sp)+,%d2
5422: fmul.x %fp0,%fp2 # S(B7+SB8)
5423: ror.l &1,%d1
5424: and.l &0x80000000,%d1
5425: mov.l &0x3F800000,POSNEG1(%a6)
5426: eor.l %d1,POSNEG1(%a6)
5427:
5428: fadd.d SINA5(%pc),%fp1 # A5+S(A6+SA7)
5429: fadd.d COSB6(%pc),%fp2 # B6+S(B7+SB8)
5430:
5431: fmul.x %fp0,%fp1 # S(A5+S(A6+SA7))
5432: fmul.x %fp0,%fp2 # S(B6+S(B7+SB8))
5433: fmov.x %fp0,SPRIME(%a6)
5434:
5435: fadd.d SINA4(%pc),%fp1 # A4+S(A5+S(A6+SA7))
5436: eor.l %d1,SPRIME(%a6)
5437: fadd.d COSB5(%pc),%fp2 # B5+S(B6+S(B7+SB8))
5438:
5439: fmul.x %fp0,%fp1 # S(A4+...)
5440: fmul.x %fp0,%fp2 # S(B5+...)
5441:
5442: fadd.d SINA3(%pc),%fp1 # A3+S(A4+...)
5443: fadd.d COSB4(%pc),%fp2 # B4+S(B5+...)
5444:
5445: fmul.x %fp0,%fp1 # S(A3+...)
5446: fmul.x %fp0,%fp2 # S(B4+...)
5447:
5448: fadd.x SINA2(%pc),%fp1 # A2+S(A3+...)
5449: fadd.x COSB3(%pc),%fp2 # B3+S(B4+...)
5450:
5451: fmul.x %fp0,%fp1 # S(A2+...)
5452: fmul.x %fp0,%fp2 # S(B3+...)
5453:
5454: fadd.x SINA1(%pc),%fp1 # A1+S(A2+...)
5455: fadd.x COSB2(%pc),%fp2 # B2+S(B3+...)
5456:
5457: fmul.x %fp0,%fp1 # S(A1+...)
5458: fmul.x %fp2,%fp0 # S(B2+...)
5459:
5460: fmul.x RPRIME(%a6),%fp1 # R'S(A1+...)
5461: fadd.s COSB1(%pc),%fp0 # B1+S(B2...)
5462: fmul.x SPRIME(%a6),%fp0 # S'(B1+S(B2+...))
5463:
5464: fmovm.x (%sp)+,&0x20 # restore fp2
5465:
5466: fmov.l %d0,%fpcr
5467: fadd.x RPRIME(%a6),%fp1 # COS(X)
5468: bsr sto_cos # store cosine result
5469: fadd.s POSNEG1(%a6),%fp0 # SIN(X)
5470: bra t_inx2
5471:
5472: NEVEN:
5473: #--REGISTERS SAVED SO FAR: FP2.
5474: fmovm.x &0x04,-(%sp) # save fp2
5475:
5476: fmov.x %fp0,RPRIME(%a6)
5477: fmul.x %fp0,%fp0 # FP0 IS S = R*R
5478:
5479: fmov.d COSB8(%pc),%fp1 # B8
5480: fmov.d SINA7(%pc),%fp2 # A7
5481:
5482: fmul.x %fp0,%fp1 # SB8
5483: fmov.x %fp0,SPRIME(%a6)
5484: fmul.x %fp0,%fp2 # SA7
5485:
5486: ror.l &1,%d1
5487: and.l &0x80000000,%d1
5488:
5489: fadd.d COSB7(%pc),%fp1 # B7+SB8
5490: fadd.d SINA6(%pc),%fp2 # A6+SA7
5491:
5492: eor.l %d1,RPRIME(%a6)
5493: eor.l %d1,SPRIME(%a6)
5494:
5495: fmul.x %fp0,%fp1 # S(B7+SB8)
5496:
5497: or.l &0x3F800000,%d1
5498: mov.l %d1,POSNEG1(%a6)
5499:
5500: fmul.x %fp0,%fp2 # S(A6+SA7)
5501:
5502: fadd.d COSB6(%pc),%fp1 # B6+S(B7+SB8)
5503: fadd.d SINA5(%pc),%fp2 # A5+S(A6+SA7)
5504:
5505: fmul.x %fp0,%fp1 # S(B6+S(B7+SB8))
5506: fmul.x %fp0,%fp2 # S(A5+S(A6+SA7))
5507:
5508: fadd.d COSB5(%pc),%fp1 # B5+S(B6+S(B7+SB8))
5509: fadd.d SINA4(%pc),%fp2 # A4+S(A5+S(A6+SA7))
5510:
5511: fmul.x %fp0,%fp1 # S(B5+...)
5512: fmul.x %fp0,%fp2 # S(A4+...)
5513:
5514: fadd.d COSB4(%pc),%fp1 # B4+S(B5+...)
5515: fadd.d SINA3(%pc),%fp2 # A3+S(A4+...)
5516:
5517: fmul.x %fp0,%fp1 # S(B4+...)
5518: fmul.x %fp0,%fp2 # S(A3+...)
5519:
5520: fadd.x COSB3(%pc),%fp1 # B3+S(B4+...)
5521: fadd.x SINA2(%pc),%fp2 # A2+S(A3+...)
5522:
5523: fmul.x %fp0,%fp1 # S(B3+...)
5524: fmul.x %fp0,%fp2 # S(A2+...)
5525:
5526: fadd.x COSB2(%pc),%fp1 # B2+S(B3+...)
5527: fadd.x SINA1(%pc),%fp2 # A1+S(A2+...)
5528:
5529: fmul.x %fp0,%fp1 # S(B2+...)
5530: fmul.x %fp2,%fp0 # s(a1+...)
5531:
5532:
5533: fadd.s COSB1(%pc),%fp1 # B1+S(B2...)
5534: fmul.x RPRIME(%a6),%fp0 # R'S(A1+...)
5535: fmul.x SPRIME(%a6),%fp1 # S'(B1+S(B2+...))
5536:
5537: fmovm.x (%sp)+,&0x20 # restore fp2
5538:
5539: fmov.l %d0,%fpcr
5540: fadd.s POSNEG1(%a6),%fp1 # COS(X)
5541: bsr sto_cos # store cosine result
5542: fadd.x RPRIME(%a6),%fp0 # SIN(X)
5543: bra t_inx2
5544:
5545: ################################################
5546:
5547: SCBORS:
5548: cmp.l %d1,&0x3FFF8000
5549: bgt.w SREDUCEX
5550:
5551: ################################################
5552:
5553: SCSM:
5554: # mov.w &0x0000,XDCARE(%a6)
5555: fmov.s &0x3F800000,%fp1
5556:
5557: fmov.l %d0,%fpcr
5558: fsub.s &0x00800000,%fp1
5559: bsr sto_cos # store cosine result
5560: fmov.l %fpcr,%d0 # d0 must have fpcr,too
5561: mov.b &FMOV_OP,%d1 # last inst is MOVE
5562: fmov.x X(%a6),%fp0
5563: bra t_catch
5564:
5565: ##############################################
5566:
5567: global ssincosd
5568: #--SIN AND COS OF X FOR DENORMALIZED X
5569: ssincosd:
5570: mov.l %d0,-(%sp) # save d0
5571: fmov.s &0x3F800000,%fp1
5572: bsr sto_cos # store cosine result
5573: mov.l (%sp)+,%d0 # restore d0
5574: bra t_extdnrm
5575:
5576: ############################################
5577:
5578: #--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
5579: #--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
5580: #--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
5581: SREDUCEX:
5582: fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
5583: mov.l %d2,-(%sp) # save d2
5584: fmov.s &0x00000000,%fp1 # fp1 = 0
5585:
5586: #--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
5587: #--there is a danger of unwanted overflow in first LOOP iteration. In this
5588: #--case, reduce argument by one remainder step to make subsequent reduction
5589: #--safe.
5590: cmp.l %d1,&0x7ffeffff # is arg dangerously large?
5591: bne.b SLOOP # no
5592:
5593: # yes; create 2**16383*PI/2
5594: mov.w &0x7ffe,FP_SCR0_EX(%a6)
5595: mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
5596: clr.l FP_SCR0_LO(%a6)
5597:
5598: # create low half of 2**16383*PI/2 at FP_SCR1
5599: mov.w &0x7fdc,FP_SCR1_EX(%a6)
5600: mov.l &0x85a308d3,FP_SCR1_HI(%a6)
5601: clr.l FP_SCR1_LO(%a6)
5602:
5603: ftest.x %fp0 # test sign of argument
5604: fblt.w sred_neg
5605:
5606: or.b &0x80,FP_SCR0_EX(%a6) # positive arg
5607: or.b &0x80,FP_SCR1_EX(%a6)
5608: sred_neg:
5609: fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
5610: fmov.x %fp0,%fp1 # save high result in fp1
5611: fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
5612: fsub.x %fp0,%fp1 # determine low component of result
5613: fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
5614:
5615: #--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
5616: #--integer quotient will be stored in N
5617: #--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
5618: SLOOP:
5619: fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
5620: mov.w INARG(%a6),%d1
5621: mov.l %d1,%a1 # save a copy of D0
5622: and.l &0x00007FFF,%d1
5623: sub.l &0x00003FFF,%d1 # d0 = K
5624: cmp.l %d1,&28
5625: ble.b SLASTLOOP
5626: SCONTLOOP:
5627: sub.l &27,%d1 # d0 = L := K-27
5628: mov.b &0,ENDFLAG(%a6)
5629: bra.b SWORK
5630: SLASTLOOP:
5631: clr.l %d1 # d0 = L := 0
5632: mov.b &1,ENDFLAG(%a6)
5633:
5634: SWORK:
5635: #--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
5636: #--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
5637:
5638: #--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
5639: #--2**L * (PIby2_1), 2**L * (PIby2_2)
5640:
5641: mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
5642: sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
5643:
5644: mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
5645: mov.l &0x4E44152A,FP_SCR0_LO(%a6)
5646: mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
5647:
5648: fmov.x %fp0,%fp2
5649: fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
5650:
5651: #--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
5652: #--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
5653: #--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
5654: #--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
5655: #--US THE DESIRED VALUE IN FLOATING POINT.
5656: mov.l %a1,%d2
5657: swap %d2
5658: and.l &0x80000000,%d2
5659: or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
5660: mov.l %d2,TWOTO63(%a6)
5661: fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
5662: fsub.s TWOTO63(%a6),%fp2 # fp2 = N
5663: # fint.x %fp2
5664:
5665: #--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
5666: mov.l %d1,%d2 # d2 = L
5667:
5668: add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
5669: mov.w %d2,FP_SCR0_EX(%a6)
5670: mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
5671: clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
5672:
5673: add.l &0x00003FDD,%d1
5674: mov.w %d1,FP_SCR1_EX(%a6)
5675: mov.l &0x85A308D3,FP_SCR1_HI(%a6)
5676: clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
5677:
5678: mov.b ENDFLAG(%a6),%d1
5679:
5680: #--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
5681: #--P2 = 2**(L) * Piby2_2
5682: fmov.x %fp2,%fp4 # fp4 = N
5683: fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
5684: fmov.x %fp2,%fp5 # fp5 = N
5685: fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
5686: fmov.x %fp4,%fp3 # fp3 = W = N*P1
5687:
5688: #--we want P+p = W+w but |p| <= half ulp of P
5689: #--Then, we need to compute A := R-P and a := r-p
5690: fadd.x %fp5,%fp3 # fp3 = P
5691: fsub.x %fp3,%fp4 # fp4 = W-P
5692:
5693: fsub.x %fp3,%fp0 # fp0 = A := R - P
5694: fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
5695:
5696: fmov.x %fp0,%fp3 # fp3 = A
5697: fsub.x %fp4,%fp1 # fp1 = a := r - p
5698:
5699: #--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
5700: #--|r| <= half ulp of R.
5701: fadd.x %fp1,%fp0 # fp0 = R := A+a
5702: #--No need to calculate r if this is the last loop
5703: cmp.b %d1,&0
5704: bgt.w SRESTORE
5705:
5706: #--Need to calculate r
5707: fsub.x %fp0,%fp3 # fp3 = A-R
5708: fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
5709: bra.w SLOOP
5710:
5711: SRESTORE:
5712: fmov.l %fp2,INT(%a6)
5713: mov.l (%sp)+,%d2 # restore d2
5714: fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
5715:
5716: mov.l ADJN(%a6),%d1
5717: cmp.l %d1,&4
5718:
5719: blt.w SINCONT
5720: bra.w SCCONT
5721:
5722: #########################################################################
5723: # stan(): computes the tangent of a normalized input #
5724: # stand(): computes the tangent of a denormalized input #
5725: # #
5726: # INPUT *************************************************************** #
5727: # a0 = pointer to extended precision input #
5728: # d0 = round precision,mode #
5729: # #
5730: # OUTPUT ************************************************************** #
5731: # fp0 = tan(X) #
5732: # #
5733: # ACCURACY and MONOTONICITY ******************************************* #
5734: # The returned result is within 3 ulp in 64 significant bit, i.e. #
5735: # within 0.5001 ulp to 53 bits if the result is subsequently #
5736: # rounded to double precision. The result is provably monotonic #
5737: # in double precision. #
5738: # #
5739: # ALGORITHM *********************************************************** #
5740: # #
5741: # 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
5742: # #
5743: # 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
5744: # k = N mod 2, so in particular, k = 0 or 1. #
5745: # #
5746: # 3. If k is odd, go to 5. #
5747: # #
5748: # 4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a #
5749: # rational function U/V where #
5750: # U = r + r*s*(P1 + s*(P2 + s*P3)), and #
5751: # V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r. #
5752: # Exit. #
5753: # #
5754: # 4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by #
5755: # a rational function U/V where #
5756: # U = r + r*s*(P1 + s*(P2 + s*P3)), and #
5757: # V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r, #
5758: # -Cot(r) = -V/U. Exit. #
5759: # #
5760: # 6. If |X| > 1, go to 8. #
5761: # #
5762: # 7. (|X|<2**(-40)) Tan(X) = X. Exit. #
5763: # #
5764: # 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back #
5765: # to 2. #
5766: # #
5767: #########################################################################
5768:
5769: TANQ4:
5770: long 0x3EA0B759,0xF50F8688
5771: TANP3:
5772: long 0xBEF2BAA5,0xA8924F04
5773:
5774: TANQ3:
5775: long 0xBF346F59,0xB39BA65F,0x00000000,0x00000000
5776:
5777: TANP2:
5778: long 0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
5779:
5780: TANQ2:
5781: long 0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
5782:
5783: TANP1:
5784: long 0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
5785:
5786: TANQ1:
5787: long 0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
5788:
5789: INVTWOPI:
5790: long 0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
5791:
5792: TWOPI1:
5793: long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
5794: TWOPI2:
5795: long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
5796:
5797: #--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
5798: #--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
5799: #--MOST 69 BITS LONG.
5800: # global PITBL
5801: PITBL:
5802: long 0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
5803: long 0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
5804: long 0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
5805: long 0xC0040000,0xB6365E22,0xEE46F000,0x21480000
5806: long 0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
5807: long 0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
5808: long 0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
5809: long 0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
5810: long 0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
5811: long 0xC0040000,0x90836524,0x88034B96,0x20B00000
5812: long 0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
5813: long 0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
5814: long 0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
5815: long 0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
5816: long 0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
5817: long 0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
5818: long 0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
5819: long 0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
5820: long 0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
5821: long 0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
5822: long 0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
5823: long 0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
5824: long 0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
5825: long 0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
5826: long 0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
5827: long 0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
5828: long 0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
5829: long 0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
5830: long 0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
5831: long 0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
5832: long 0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
5833: long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
5834: long 0x00000000,0x00000000,0x00000000,0x00000000
5835: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
5836: long 0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
5837: long 0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
5838: long 0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
5839: long 0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
5840: long 0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
5841: long 0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
5842: long 0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
5843: long 0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
5844: long 0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
5845: long 0x40030000,0x8A3AE64F,0x76F80584,0x21080000
5846: long 0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
5847: long 0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
5848: long 0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
5849: long 0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
5850: long 0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
5851: long 0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
5852: long 0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
5853: long 0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
5854: long 0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
5855: long 0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
5856: long 0x40040000,0x8A3AE64F,0x76F80584,0x21880000
5857: long 0x40040000,0x90836524,0x88034B96,0xA0B00000
5858: long 0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
5859: long 0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
5860: long 0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
5861: long 0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
5862: long 0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
5863: long 0x40040000,0xB6365E22,0xEE46F000,0xA1480000
5864: long 0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
5865: long 0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
5866: long 0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
5867:
5868: set INARG,FP_SCR0
5869:
5870: set TWOTO63,L_SCR1
5871: set INT,L_SCR1
5872: set ENDFLAG,L_SCR2
5873:
5874: global stan
5875: stan:
5876: fmov.x (%a0),%fp0 # LOAD INPUT
5877:
5878: mov.l (%a0),%d1
5879: mov.w 4(%a0),%d1
5880: and.l &0x7FFFFFFF,%d1
5881:
5882: cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
5883: bge.b TANOK1
5884: bra.w TANSM
5885: TANOK1:
5886: cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
5887: blt.b TANMAIN
5888: bra.w REDUCEX
5889:
5890: TANMAIN:
5891: #--THIS IS THE USUAL CASE, |X| <= 15 PI.
5892: #--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
5893: fmov.x %fp0,%fp1
5894: fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
5895:
5896: lea.l PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
5897:
5898: fmov.l %fp1,%d1 # CONVERT TO INTEGER
5899:
5900: asl.l &4,%d1
5901: add.l %d1,%a1 # ADDRESS N*PIBY2 IN Y1, Y2
5902:
5903: fsub.x (%a1)+,%fp0 # X-Y1
5904:
5905: fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
5906:
5907: ror.l &5,%d1
5908: and.l &0x80000000,%d1 # D0 WAS ODD IFF D0 < 0
5909:
5910: TANCONT:
5911: fmovm.x &0x0c,-(%sp) # save fp2,fp3
5912:
5913: cmp.l %d1,&0
5914: blt.w NODD
5915:
5916: fmov.x %fp0,%fp1
5917: fmul.x %fp1,%fp1 # S = R*R
5918:
5919: fmov.d TANQ4(%pc),%fp3
5920: fmov.d TANP3(%pc),%fp2
5921:
5922: fmul.x %fp1,%fp3 # SQ4
5923: fmul.x %fp1,%fp2 # SP3
5924:
5925: fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
5926: fadd.x TANP2(%pc),%fp2 # P2+SP3
5927:
5928: fmul.x %fp1,%fp3 # S(Q3+SQ4)
5929: fmul.x %fp1,%fp2 # S(P2+SP3)
5930:
5931: fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
5932: fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
5933:
5934: fmul.x %fp1,%fp3 # S(Q2+S(Q3+SQ4))
5935: fmul.x %fp1,%fp2 # S(P1+S(P2+SP3))
5936:
5937: fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
5938: fmul.x %fp0,%fp2 # RS(P1+S(P2+SP3))
5939:
5940: fmul.x %fp3,%fp1 # S(Q1+S(Q2+S(Q3+SQ4)))
5941:
5942: fadd.x %fp2,%fp0 # R+RS(P1+S(P2+SP3))
5943:
5944: fadd.s &0x3F800000,%fp1 # 1+S(Q1+...)
5945:
5946: fmovm.x (%sp)+,&0x30 # restore fp2,fp3
5947:
5948: fmov.l %d0,%fpcr # restore users round mode,prec
5949: fdiv.x %fp1,%fp0 # last inst - possible exception set
5950: bra t_inx2
5951:
5952: NODD:
5953: fmov.x %fp0,%fp1
5954: fmul.x %fp0,%fp0 # S = R*R
5955:
5956: fmov.d TANQ4(%pc),%fp3
5957: fmov.d TANP3(%pc),%fp2
5958:
5959: fmul.x %fp0,%fp3 # SQ4
5960: fmul.x %fp0,%fp2 # SP3
5961:
5962: fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
5963: fadd.x TANP2(%pc),%fp2 # P2+SP3
5964:
5965: fmul.x %fp0,%fp3 # S(Q3+SQ4)
5966: fmul.x %fp0,%fp2 # S(P2+SP3)
5967:
5968: fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
5969: fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
5970:
5971: fmul.x %fp0,%fp3 # S(Q2+S(Q3+SQ4))
5972: fmul.x %fp0,%fp2 # S(P1+S(P2+SP3))
5973:
5974: fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
5975: fmul.x %fp1,%fp2 # RS(P1+S(P2+SP3))
5976:
5977: fmul.x %fp3,%fp0 # S(Q1+S(Q2+S(Q3+SQ4)))
5978:
5979: fadd.x %fp2,%fp1 # R+RS(P1+S(P2+SP3))
5980: fadd.s &0x3F800000,%fp0 # 1+S(Q1+...)
5981:
5982: fmovm.x (%sp)+,&0x30 # restore fp2,fp3
5983:
5984: fmov.x %fp1,-(%sp)
5985: eor.l &0x80000000,(%sp)
5986:
5987: fmov.l %d0,%fpcr # restore users round mode,prec
5988: fdiv.x (%sp)+,%fp0 # last inst - possible exception set
5989: bra t_inx2
5990:
5991: TANBORS:
5992: #--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
5993: #--IF |X| < 2**(-40), RETURN X OR 1.
5994: cmp.l %d1,&0x3FFF8000
5995: bgt.b REDUCEX
5996:
5997: TANSM:
5998: fmov.x %fp0,-(%sp)
5999: fmov.l %d0,%fpcr # restore users round mode,prec
6000: mov.b &FMOV_OP,%d1 # last inst is MOVE
6001: fmov.x (%sp)+,%fp0 # last inst - posibble exception set
6002: bra t_catch
6003:
6004: global stand
6005: #--TAN(X) = X FOR DENORMALIZED X
6006: stand:
6007: bra t_extdnrm
6008:
6009: #--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
6010: #--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
6011: #--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
6012: REDUCEX:
6013: fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
6014: mov.l %d2,-(%sp) # save d2
6015: fmov.s &0x00000000,%fp1 # fp1 = 0
6016:
6017: #--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
6018: #--there is a danger of unwanted overflow in first LOOP iteration. In this
6019: #--case, reduce argument by one remainder step to make subsequent reduction
6020: #--safe.
6021: cmp.l %d1,&0x7ffeffff # is arg dangerously large?
6022: bne.b LOOP # no
6023:
6024: # yes; create 2**16383*PI/2
6025: mov.w &0x7ffe,FP_SCR0_EX(%a6)
6026: mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
6027: clr.l FP_SCR0_LO(%a6)
6028:
6029: # create low half of 2**16383*PI/2 at FP_SCR1
6030: mov.w &0x7fdc,FP_SCR1_EX(%a6)
6031: mov.l &0x85a308d3,FP_SCR1_HI(%a6)
6032: clr.l FP_SCR1_LO(%a6)
6033:
6034: ftest.x %fp0 # test sign of argument
6035: fblt.w red_neg
6036:
6037: or.b &0x80,FP_SCR0_EX(%a6) # positive arg
6038: or.b &0x80,FP_SCR1_EX(%a6)
6039: red_neg:
6040: fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
6041: fmov.x %fp0,%fp1 # save high result in fp1
6042: fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
6043: fsub.x %fp0,%fp1 # determine low component of result
6044: fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
6045:
6046: #--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
6047: #--integer quotient will be stored in N
6048: #--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
6049: LOOP:
6050: fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
6051: mov.w INARG(%a6),%d1
6052: mov.l %d1,%a1 # save a copy of D0
6053: and.l &0x00007FFF,%d1
6054: sub.l &0x00003FFF,%d1 # d0 = K
6055: cmp.l %d1,&28
6056: ble.b LASTLOOP
6057: CONTLOOP:
6058: sub.l &27,%d1 # d0 = L := K-27
6059: mov.b &0,ENDFLAG(%a6)
6060: bra.b WORK
6061: LASTLOOP:
6062: clr.l %d1 # d0 = L := 0
6063: mov.b &1,ENDFLAG(%a6)
6064:
6065: WORK:
6066: #--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
6067: #--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
6068:
6069: #--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
6070: #--2**L * (PIby2_1), 2**L * (PIby2_2)
6071:
6072: mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
6073: sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
6074:
6075: mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
6076: mov.l &0x4E44152A,FP_SCR0_LO(%a6)
6077: mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
6078:
6079: fmov.x %fp0,%fp2
6080: fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
6081:
6082: #--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
6083: #--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
6084: #--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
6085: #--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
6086: #--US THE DESIRED VALUE IN FLOATING POINT.
6087: mov.l %a1,%d2
6088: swap %d2
6089: and.l &0x80000000,%d2
6090: or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
6091: mov.l %d2,TWOTO63(%a6)
6092: fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
6093: fsub.s TWOTO63(%a6),%fp2 # fp2 = N
6094: # fintrz.x %fp2,%fp2
6095:
6096: #--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
6097: mov.l %d1,%d2 # d2 = L
6098:
6099: add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
6100: mov.w %d2,FP_SCR0_EX(%a6)
6101: mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
6102: clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
6103:
6104: add.l &0x00003FDD,%d1
6105: mov.w %d1,FP_SCR1_EX(%a6)
6106: mov.l &0x85A308D3,FP_SCR1_HI(%a6)
6107: clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
6108:
6109: mov.b ENDFLAG(%a6),%d1
6110:
6111: #--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
6112: #--P2 = 2**(L) * Piby2_2
6113: fmov.x %fp2,%fp4 # fp4 = N
6114: fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
6115: fmov.x %fp2,%fp5 # fp5 = N
6116: fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
6117: fmov.x %fp4,%fp3 # fp3 = W = N*P1
6118:
6119: #--we want P+p = W+w but |p| <= half ulp of P
6120: #--Then, we need to compute A := R-P and a := r-p
6121: fadd.x %fp5,%fp3 # fp3 = P
6122: fsub.x %fp3,%fp4 # fp4 = W-P
6123:
6124: fsub.x %fp3,%fp0 # fp0 = A := R - P
6125: fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
6126:
6127: fmov.x %fp0,%fp3 # fp3 = A
6128: fsub.x %fp4,%fp1 # fp1 = a := r - p
6129:
6130: #--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
6131: #--|r| <= half ulp of R.
6132: fadd.x %fp1,%fp0 # fp0 = R := A+a
6133: #--No need to calculate r if this is the last loop
6134: cmp.b %d1,&0
6135: bgt.w RESTORE
6136:
6137: #--Need to calculate r
6138: fsub.x %fp0,%fp3 # fp3 = A-R
6139: fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
6140: bra.w LOOP
6141:
6142: RESTORE:
6143: fmov.l %fp2,INT(%a6)
6144: mov.l (%sp)+,%d2 # restore d2
6145: fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
6146:
6147: mov.l INT(%a6),%d1
6148: ror.l &1,%d1
6149:
6150: bra.w TANCONT
6151:
6152: #########################################################################
6153: # satan(): computes the arctangent of a normalized number #
6154: # satand(): computes the arctangent of a denormalized number #
6155: # #
6156: # INPUT *************************************************************** #
6157: # a0 = pointer to extended precision input #
6158: # d0 = round precision,mode #
6159: # #
6160: # OUTPUT ************************************************************** #
6161: # fp0 = arctan(X) #
6162: # #
6163: # ACCURACY and MONOTONICITY ******************************************* #
6164: # The returned result is within 2 ulps in 64 significant bit, #
6165: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
6166: # rounded to double precision. The result is provably monotonic #
6167: # in double precision. #
6168: # #
6169: # ALGORITHM *********************************************************** #
6170: # Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5. #
6171: # #
6172: # Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x. #
6173: # Note that k = -4, -3,..., or 3. #
6174: # Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5 #
6175: # significant bits of X with a bit-1 attached at the 6-th #
6176: # bit position. Define u to be u = (X-F) / (1 + X*F). #
6177: # #
6178: # Step 3. Approximate arctan(u) by a polynomial poly. #
6179: # #
6180: # Step 4. Return arctan(F) + poly, arctan(F) is fetched from a #
6181: # table of values calculated beforehand. Exit. #
6182: # #
6183: # Step 5. If |X| >= 16, go to Step 7. #
6184: # #
6185: # Step 6. Approximate arctan(X) by an odd polynomial in X. Exit. #
6186: # #
6187: # Step 7. Define X' = -1/X. Approximate arctan(X') by an odd #
6188: # polynomial in X'. #
6189: # Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit. #
6190: # #
6191: #########################################################################
6192:
6193: ATANA3: long 0xBFF6687E,0x314987D8
6194: ATANA2: long 0x4002AC69,0x34A26DB3
6195: ATANA1: long 0xBFC2476F,0x4E1DA28E
6196:
6197: ATANB6: long 0x3FB34444,0x7F876989
6198: ATANB5: long 0xBFB744EE,0x7FAF45DB
6199: ATANB4: long 0x3FBC71C6,0x46940220
6200: ATANB3: long 0xBFC24924,0x921872F9
6201: ATANB2: long 0x3FC99999,0x99998FA9
6202: ATANB1: long 0xBFD55555,0x55555555
6203:
6204: ATANC5: long 0xBFB70BF3,0x98539E6A
6205: ATANC4: long 0x3FBC7187,0x962D1D7D
6206: ATANC3: long 0xBFC24924,0x827107B8
6207: ATANC2: long 0x3FC99999,0x9996263E
6208: ATANC1: long 0xBFD55555,0x55555536
6209:
6210: PPIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
6211: NPIBY2: long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
6212:
6213: PTINY: long 0x00010000,0x80000000,0x00000000,0x00000000
6214: NTINY: long 0x80010000,0x80000000,0x00000000,0x00000000
6215:
6216: ATANTBL:
6217: long 0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
6218: long 0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
6219: long 0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
6220: long 0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
6221: long 0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
6222: long 0x3FFB0000,0xAB98E943,0x62765619,0x00000000
6223: long 0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
6224: long 0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
6225: long 0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
6226: long 0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
6227: long 0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
6228: long 0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
6229: long 0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
6230: long 0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
6231: long 0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
6232: long 0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
6233: long 0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
6234: long 0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
6235: long 0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
6236: long 0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
6237: long 0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
6238: long 0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
6239: long 0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
6240: long 0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
6241: long 0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
6242: long 0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
6243: long 0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
6244: long 0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
6245: long 0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
6246: long 0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
6247: long 0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
6248: long 0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
6249: long 0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
6250: long 0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
6251: long 0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
6252: long 0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
6253: long 0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
6254: long 0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
6255: long 0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
6256: long 0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
6257: long 0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
6258: long 0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
6259: long 0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
6260: long 0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
6261: long 0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
6262: long 0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
6263: long 0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
6264: long 0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
6265: long 0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
6266: long 0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
6267: long 0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
6268: long 0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
6269: long 0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
6270: long 0x3FFE0000,0x97731420,0x365E538C,0x00000000
6271: long 0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
6272: long 0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
6273: long 0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
6274: long 0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
6275: long 0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
6276: long 0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
6277: long 0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
6278: long 0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
6279: long 0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
6280: long 0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
6281: long 0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
6282: long 0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
6283: long 0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
6284: long 0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
6285: long 0x3FFE0000,0xE8771129,0xC4353259,0x00000000
6286: long 0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
6287: long 0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
6288: long 0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
6289: long 0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
6290: long 0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
6291: long 0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
6292: long 0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
6293: long 0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
6294: long 0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
6295: long 0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
6296: long 0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
6297: long 0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
6298: long 0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
6299: long 0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
6300: long 0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
6301: long 0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
6302: long 0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
6303: long 0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
6304: long 0x3FFF0000,0x9F100575,0x006CC571,0x00000000
6305: long 0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
6306: long 0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
6307: long 0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
6308: long 0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
6309: long 0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
6310: long 0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
6311: long 0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
6312: long 0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
6313: long 0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
6314: long 0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
6315: long 0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
6316: long 0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
6317: long 0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
6318: long 0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
6319: long 0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
6320: long 0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
6321: long 0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
6322: long 0x3FFF0000,0xB525529D,0x562246BD,0x00000000
6323: long 0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
6324: long 0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
6325: long 0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
6326: long 0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
6327: long 0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
6328: long 0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
6329: long 0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
6330: long 0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
6331: long 0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
6332: long 0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
6333: long 0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
6334: long 0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
6335: long 0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
6336: long 0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
6337: long 0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
6338: long 0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
6339: long 0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
6340: long 0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
6341: long 0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
6342: long 0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
6343: long 0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
6344: long 0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
6345:
6346: set X,FP_SCR0
6347: set XDCARE,X+2
6348: set XFRAC,X+4
6349: set XFRACLO,X+8
6350:
6351: set ATANF,FP_SCR1
6352: set ATANFHI,ATANF+4
6353: set ATANFLO,ATANF+8
6354:
6355: global satan
6356: #--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
6357: satan:
6358: fmov.x (%a0),%fp0 # LOAD INPUT
6359:
6360: mov.l (%a0),%d1
6361: mov.w 4(%a0),%d1
6362: fmov.x %fp0,X(%a6)
6363: and.l &0x7FFFFFFF,%d1
6364:
6365: cmp.l %d1,&0x3FFB8000 # |X| >= 1/16?
6366: bge.b ATANOK1
6367: bra.w ATANSM
6368:
6369: ATANOK1:
6370: cmp.l %d1,&0x4002FFFF # |X| < 16 ?
6371: ble.b ATANMAIN
6372: bra.w ATANBIG
6373:
6374: #--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
6375: #--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
6376: #--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
6377: #--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
6378: #--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
6379: #--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
6380: #--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
6381: #--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
6382: #--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
6383: #--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
6384: #--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
6385: #--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
6386: #--WILL INVOLVE A VERY LONG POLYNOMIAL.
6387:
6388: #--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
6389: #--WE CHOSE F TO BE +-2^K * 1.BBBB1
6390: #--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
6391: #--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
6392: #--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
6393: #-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
6394:
6395: ATANMAIN:
6396:
6397: and.l &0xF8000000,XFRAC(%a6) # FIRST 5 BITS
6398: or.l &0x04000000,XFRAC(%a6) # SET 6-TH BIT TO 1
6399: mov.l &0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
6400:
6401: fmov.x %fp0,%fp1 # FP1 IS X
6402: fmul.x X(%a6),%fp1 # FP1 IS X*F, NOTE THAT X*F > 0
6403: fsub.x X(%a6),%fp0 # FP0 IS X-F
6404: fadd.s &0x3F800000,%fp1 # FP1 IS 1 + X*F
6405: fdiv.x %fp1,%fp0 # FP0 IS U = (X-F)/(1+X*F)
6406:
6407: #--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
6408: #--CREATE ATAN(F) AND STORE IT IN ATANF, AND
6409: #--SAVE REGISTERS FP2.
6410:
6411: mov.l %d2,-(%sp) # SAVE d2 TEMPORARILY
6412: mov.l %d1,%d2 # THE EXP AND 16 BITS OF X
6413: and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION
6414: and.l &0x7FFF0000,%d2 # EXPONENT OF F
6415: sub.l &0x3FFB0000,%d2 # K+4
6416: asr.l &1,%d2
6417: add.l %d2,%d1 # THE 7 BITS IDENTIFYING F
6418: asr.l &7,%d1 # INDEX INTO TBL OF ATAN(|F|)
6419: lea ATANTBL(%pc),%a1
6420: add.l %d1,%a1 # ADDRESS OF ATAN(|F|)
6421: mov.l (%a1)+,ATANF(%a6)
6422: mov.l (%a1)+,ATANFHI(%a6)
6423: mov.l (%a1)+,ATANFLO(%a6) # ATANF IS NOW ATAN(|F|)
6424: mov.l X(%a6),%d1 # LOAD SIGN AND EXPO. AGAIN
6425: and.l &0x80000000,%d1 # SIGN(F)
6426: or.l %d1,ATANF(%a6) # ATANF IS NOW SIGN(F)*ATAN(|F|)
6427: mov.l (%sp)+,%d2 # RESTORE d2
6428:
6429: #--THAT'S ALL I HAVE TO DO FOR NOW,
6430: #--BUT ALAS, THE DIVIDE IS STILL CRANKING!
6431:
6432: #--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
6433: #--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
6434: #--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
6435: #--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
6436: #--WHAT WE HAVE HERE IS MERELY A1 = A3, A2 = A1/A3, A3 = A2/A3.
6437: #--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
6438: #--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
6439:
6440: fmovm.x &0x04,-(%sp) # save fp2
6441:
6442: fmov.x %fp0,%fp1
6443: fmul.x %fp1,%fp1
6444: fmov.d ATANA3(%pc),%fp2
6445: fadd.x %fp1,%fp2 # A3+V
6446: fmul.x %fp1,%fp2 # V*(A3+V)
6447: fmul.x %fp0,%fp1 # U*V
6448: fadd.d ATANA2(%pc),%fp2 # A2+V*(A3+V)
6449: fmul.d ATANA1(%pc),%fp1 # A1*U*V
6450: fmul.x %fp2,%fp1 # A1*U*V*(A2+V*(A3+V))
6451: fadd.x %fp1,%fp0 # ATAN(U), FP1 RELEASED
6452:
6453: fmovm.x (%sp)+,&0x20 # restore fp2
6454:
6455: fmov.l %d0,%fpcr # restore users rnd mode,prec
6456: fadd.x ATANF(%a6),%fp0 # ATAN(X)
6457: bra t_inx2
6458:
6459: ATANBORS:
6460: #--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
6461: #--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
6462: cmp.l %d1,&0x3FFF8000
6463: bgt.w ATANBIG # I.E. |X| >= 16
6464:
6465: ATANSM:
6466: #--|X| <= 1/16
6467: #--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
6468: #--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
6469: #--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
6470: #--WHERE Y = X*X, AND Z = Y*Y.
6471:
6472: cmp.l %d1,&0x3FD78000
6473: blt.w ATANTINY
6474:
6475: #--COMPUTE POLYNOMIAL
6476: fmovm.x &0x0c,-(%sp) # save fp2/fp3
6477:
6478: fmul.x %fp0,%fp0 # FPO IS Y = X*X
6479:
6480: fmov.x %fp0,%fp1
6481: fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
6482:
6483: fmov.d ATANB6(%pc),%fp2
6484: fmov.d ATANB5(%pc),%fp3
6485:
6486: fmul.x %fp1,%fp2 # Z*B6
6487: fmul.x %fp1,%fp3 # Z*B5
6488:
6489: fadd.d ATANB4(%pc),%fp2 # B4+Z*B6
6490: fadd.d ATANB3(%pc),%fp3 # B3+Z*B5
6491:
6492: fmul.x %fp1,%fp2 # Z*(B4+Z*B6)
6493: fmul.x %fp3,%fp1 # Z*(B3+Z*B5)
6494:
6495: fadd.d ATANB2(%pc),%fp2 # B2+Z*(B4+Z*B6)
6496: fadd.d ATANB1(%pc),%fp1 # B1+Z*(B3+Z*B5)
6497:
6498: fmul.x %fp0,%fp2 # Y*(B2+Z*(B4+Z*B6))
6499: fmul.x X(%a6),%fp0 # X*Y
6500:
6501: fadd.x %fp2,%fp1 # [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
6502:
6503: fmul.x %fp1,%fp0 # X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
6504:
6505: fmovm.x (%sp)+,&0x30 # restore fp2/fp3
6506:
6507: fmov.l %d0,%fpcr # restore users rnd mode,prec
6508: fadd.x X(%a6),%fp0
6509: bra t_inx2
6510:
6511: ATANTINY:
6512: #--|X| < 2^(-40), ATAN(X) = X
6513:
6514: fmov.l %d0,%fpcr # restore users rnd mode,prec
6515: mov.b &FMOV_OP,%d1 # last inst is MOVE
6516: fmov.x X(%a6),%fp0 # last inst - possible exception set
6517:
6518: bra t_catch
6519:
6520: ATANBIG:
6521: #--IF |X| > 2^(100), RETURN SIGN(X)*(PI/2 - TINY). OTHERWISE,
6522: #--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
6523: cmp.l %d1,&0x40638000
6524: bgt.w ATANHUGE
6525:
6526: #--APPROXIMATE ATAN(-1/X) BY
6527: #--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
6528: #--THIS CAN BE RE-WRITTEN AS
6529: #--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
6530:
6531: fmovm.x &0x0c,-(%sp) # save fp2/fp3
6532:
6533: fmov.s &0xBF800000,%fp1 # LOAD -1
6534: fdiv.x %fp0,%fp1 # FP1 IS -1/X
6535:
6536: #--DIVIDE IS STILL CRANKING
6537:
6538: fmov.x %fp1,%fp0 # FP0 IS X'
6539: fmul.x %fp0,%fp0 # FP0 IS Y = X'*X'
6540: fmov.x %fp1,X(%a6) # X IS REALLY X'
6541:
6542: fmov.x %fp0,%fp1
6543: fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
6544:
6545: fmov.d ATANC5(%pc),%fp3
6546: fmov.d ATANC4(%pc),%fp2
6547:
6548: fmul.x %fp1,%fp3 # Z*C5
6549: fmul.x %fp1,%fp2 # Z*B4
6550:
6551: fadd.d ATANC3(%pc),%fp3 # C3+Z*C5
6552: fadd.d ATANC2(%pc),%fp2 # C2+Z*C4
6553:
6554: fmul.x %fp3,%fp1 # Z*(C3+Z*C5), FP3 RELEASED
6555: fmul.x %fp0,%fp2 # Y*(C2+Z*C4)
6556:
6557: fadd.d ATANC1(%pc),%fp1 # C1+Z*(C3+Z*C5)
6558: fmul.x X(%a6),%fp0 # X'*Y
6559:
6560: fadd.x %fp2,%fp1 # [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
6561:
6562: fmul.x %fp1,%fp0 # X'*Y*([B1+Z*(B3+Z*B5)]
6563: # ... +[Y*(B2+Z*(B4+Z*B6))])
6564: fadd.x X(%a6),%fp0
6565:
6566: fmovm.x (%sp)+,&0x30 # restore fp2/fp3
6567:
6568: fmov.l %d0,%fpcr # restore users rnd mode,prec
6569: tst.b (%a0)
6570: bpl.b pos_big
6571:
6572: neg_big:
6573: fadd.x NPIBY2(%pc),%fp0
6574: bra t_minx2
6575:
6576: pos_big:
6577: fadd.x PPIBY2(%pc),%fp0
6578: bra t_pinx2
6579:
6580: ATANHUGE:
6581: #--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
6582: tst.b (%a0)
6583: bpl.b pos_huge
6584:
6585: neg_huge:
6586: fmov.x NPIBY2(%pc),%fp0
6587: fmov.l %d0,%fpcr
6588: fadd.x PTINY(%pc),%fp0
6589: bra t_minx2
6590:
6591: pos_huge:
6592: fmov.x PPIBY2(%pc),%fp0
6593: fmov.l %d0,%fpcr
6594: fadd.x NTINY(%pc),%fp0
6595: bra t_pinx2
6596:
6597: global satand
6598: #--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
6599: satand:
6600: bra t_extdnrm
6601:
6602: #########################################################################
6603: # sasin(): computes the inverse sine of a normalized input #
6604: # sasind(): computes the inverse sine of a denormalized input #
6605: # #
6606: # INPUT *************************************************************** #
6607: # a0 = pointer to extended precision input #
6608: # d0 = round precision,mode #
6609: # #
6610: # OUTPUT ************************************************************** #
6611: # fp0 = arcsin(X) #
6612: # #
6613: # ACCURACY and MONOTONICITY ******************************************* #
6614: # The returned result is within 3 ulps in 64 significant bit, #
6615: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
6616: # rounded to double precision. The result is provably monotonic #
6617: # in double precision. #
6618: # #
6619: # ALGORITHM *********************************************************** #
6620: # #
6621: # ASIN #
6622: # 1. If |X| >= 1, go to 3. #
6623: # #
6624: # 2. (|X| < 1) Calculate asin(X) by #
6625: # z := sqrt( [1-X][1+X] ) #
6626: # asin(X) = atan( x / z ). #
6627: # Exit. #
6628: # #
6629: # 3. If |X| > 1, go to 5. #
6630: # #
6631: # 4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.#
6632: # #
6633: # 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
6634: # Exit. #
6635: # #
6636: #########################################################################
6637:
6638: global sasin
6639: sasin:
6640: fmov.x (%a0),%fp0 # LOAD INPUT
6641:
6642: mov.l (%a0),%d1
6643: mov.w 4(%a0),%d1
6644: and.l &0x7FFFFFFF,%d1
6645: cmp.l %d1,&0x3FFF8000
6646: bge.b ASINBIG
6647:
6648: # This catch is added here for the '060 QSP. Originally, the call to
6649: # satan() would handle this case by causing the exception which would
6650: # not be caught until gen_except(). Now, with the exceptions being
6651: # detected inside of satan(), the exception would have been handled there
6652: # instead of inside sasin() as expected.
6653: cmp.l %d1,&0x3FD78000
6654: blt.w ASINTINY
6655:
6656: #--THIS IS THE USUAL CASE, |X| < 1
6657: #--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
6658:
6659: ASINMAIN:
6660: fmov.s &0x3F800000,%fp1
6661: fsub.x %fp0,%fp1 # 1-X
6662: fmovm.x &0x4,-(%sp) # {fp2}
6663: fmov.s &0x3F800000,%fp2
6664: fadd.x %fp0,%fp2 # 1+X
6665: fmul.x %fp2,%fp1 # (1+X)(1-X)
6666: fmovm.x (%sp)+,&0x20 # {fp2}
6667: fsqrt.x %fp1 # SQRT([1-X][1+X])
6668: fdiv.x %fp1,%fp0 # X/SQRT([1-X][1+X])
6669: fmovm.x &0x01,-(%sp) # save X/SQRT(...)
6670: lea (%sp),%a0 # pass ptr to X/SQRT(...)
6671: bsr satan
6672: add.l &0xc,%sp # clear X/SQRT(...) from stack
6673: bra t_inx2
6674:
6675: ASINBIG:
6676: fabs.x %fp0 # |X|
6677: fcmp.s %fp0,&0x3F800000
6678: fbgt t_operr # cause an operr exception
6679:
6680: #--|X| = 1, ASIN(X) = +- PI/2.
6681: ASINONE:
6682: fmov.x PIBY2(%pc),%fp0
6683: mov.l (%a0),%d1
6684: and.l &0x80000000,%d1 # SIGN BIT OF X
6685: or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT
6686: mov.l %d1,-(%sp) # push SIGN(X) IN SGL-FMT
6687: fmov.l %d0,%fpcr
6688: fmul.s (%sp)+,%fp0
6689: bra t_inx2
6690:
6691: #--|X| < 2^(-40), ATAN(X) = X
6692: ASINTINY:
6693: fmov.l %d0,%fpcr # restore users rnd mode,prec
6694: mov.b &FMOV_OP,%d1 # last inst is MOVE
6695: fmov.x (%a0),%fp0 # last inst - possible exception
6696: bra t_catch
6697:
6698: global sasind
6699: #--ASIN(X) = X FOR DENORMALIZED X
6700: sasind:
6701: bra t_extdnrm
6702:
6703: #########################################################################
6704: # sacos(): computes the inverse cosine of a normalized input #
6705: # sacosd(): computes the inverse cosine of a denormalized input #
6706: # #
6707: # INPUT *************************************************************** #
6708: # a0 = pointer to extended precision input #
6709: # d0 = round precision,mode #
6710: # #
6711: # OUTPUT ************************************************************** #
6712: # fp0 = arccos(X) #
6713: # #
6714: # ACCURACY and MONOTONICITY ******************************************* #
6715: # The returned result is within 3 ulps in 64 significant bit, #
6716: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
6717: # rounded to double precision. The result is provably monotonic #
6718: # in double precision. #
6719: # #
6720: # ALGORITHM *********************************************************** #
6721: # #
6722: # ACOS #
6723: # 1. If |X| >= 1, go to 3. #
6724: # #
6725: # 2. (|X| < 1) Calculate acos(X) by #
6726: # z := (1-X) / (1+X) #
6727: # acos(X) = 2 * atan( sqrt(z) ). #
6728: # Exit. #
6729: # #
6730: # 3. If |X| > 1, go to 5. #
6731: # #
6732: # 4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit. #
6733: # #
6734: # 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
6735: # Exit. #
6736: # #
6737: #########################################################################
6738:
6739: global sacos
6740: sacos:
6741: fmov.x (%a0),%fp0 # LOAD INPUT
6742:
6743: mov.l (%a0),%d1 # pack exp w/ upper 16 fraction
6744: mov.w 4(%a0),%d1
6745: and.l &0x7FFFFFFF,%d1
6746: cmp.l %d1,&0x3FFF8000
6747: bge.b ACOSBIG
6748:
6749: #--THIS IS THE USUAL CASE, |X| < 1
6750: #--ACOS(X) = 2 * ATAN( SQRT( (1-X)/(1+X) ) )
6751:
6752: ACOSMAIN:
6753: fmov.s &0x3F800000,%fp1
6754: fadd.x %fp0,%fp1 # 1+X
6755: fneg.x %fp0 # -X
6756: fadd.s &0x3F800000,%fp0 # 1-X
6757: fdiv.x %fp1,%fp0 # (1-X)/(1+X)
6758: fsqrt.x %fp0 # SQRT((1-X)/(1+X))
6759: mov.l %d0,-(%sp) # save original users fpcr
6760: clr.l %d0
6761: fmovm.x &0x01,-(%sp) # save SQRT(...) to stack
6762: lea (%sp),%a0 # pass ptr to sqrt
6763: bsr satan # ATAN(SQRT([1-X]/[1+X]))
6764: add.l &0xc,%sp # clear SQRT(...) from stack
6765:
6766: fmov.l (%sp)+,%fpcr # restore users round prec,mode
6767: fadd.x %fp0,%fp0 # 2 * ATAN( STUFF )
6768: bra t_pinx2
6769:
6770: ACOSBIG:
6771: fabs.x %fp0
6772: fcmp.s %fp0,&0x3F800000
6773: fbgt t_operr # cause an operr exception
6774:
6775: #--|X| = 1, ACOS(X) = 0 OR PI
6776: tst.b (%a0) # is X positive or negative?
6777: bpl.b ACOSP1
6778:
6779: #--X = -1
6780: #Returns PI and inexact exception
6781: ACOSM1:
6782: fmov.x PI(%pc),%fp0 # load PI
6783: fmov.l %d0,%fpcr # load round mode,prec
6784: fadd.s &0x00800000,%fp0 # add a small value
6785: bra t_pinx2
6786:
6787: ACOSP1:
6788: bra ld_pzero # answer is positive zero
6789:
6790: global sacosd
6791: #--ACOS(X) = PI/2 FOR DENORMALIZED X
6792: sacosd:
6793: fmov.l %d0,%fpcr # load user's rnd mode/prec
6794: fmov.x PIBY2(%pc),%fp0
6795: bra t_pinx2
6796:
6797: #########################################################################
6798: # setox(): computes the exponential for a normalized input #
6799: # setoxd(): computes the exponential for a denormalized input #
6800: # setoxm1(): computes the exponential minus 1 for a normalized input #
6801: # setoxm1d(): computes the exponential minus 1 for a denormalized input #
6802: # #
6803: # INPUT *************************************************************** #
6804: # a0 = pointer to extended precision input #
6805: # d0 = round precision,mode #
6806: # #
6807: # OUTPUT ************************************************************** #
6808: # fp0 = exp(X) or exp(X)-1 #
6809: # #
6810: # ACCURACY and MONOTONICITY ******************************************* #
6811: # The returned result is within 0.85 ulps in 64 significant bit, #
6812: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
6813: # rounded to double precision. The result is provably monotonic #
6814: # in double precision. #
6815: # #
6816: # ALGORITHM and IMPLEMENTATION **************************************** #
6817: # #
6818: # setoxd #
6819: # ------ #
6820: # Step 1. Set ans := 1.0 #
6821: # #
6822: # Step 2. Return ans := ans + sign(X)*2^(-126). Exit. #
6823: # Notes: This will always generate one exception -- inexact. #
6824: # #
6825: # #
6826: # setox #
6827: # ----- #
6828: # #
6829: # Step 1. Filter out extreme cases of input argument. #
6830: # 1.1 If |X| >= 2^(-65), go to Step 1.3. #
6831: # 1.2 Go to Step 7. #
6832: # 1.3 If |X| < 16380 log(2), go to Step 2. #
6833: # 1.4 Go to Step 8. #
6834: # Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
6835: # To avoid the use of floating-point comparisons, a #
6836: # compact representation of |X| is used. This format is a #
6837: # 32-bit integer, the upper (more significant) 16 bits #
6838: # are the sign and biased exponent field of |X|; the #
6839: # lower 16 bits are the 16 most significant fraction #
6840: # (including the explicit bit) bits of |X|. Consequently, #
6841: # the comparisons in Steps 1.1 and 1.3 can be performed #
6842: # by integer comparison. Note also that the constant #
6843: # 16380 log(2) used in Step 1.3 is also in the compact #
6844: # form. Thus taking the branch to Step 2 guarantees #
6845: # |X| < 16380 log(2). There is no harm to have a small #
6846: # number of cases where |X| is less than, but close to, #
6847: # 16380 log(2) and the branch to Step 9 is taken. #
6848: # #
6849: # Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
6850: # 2.1 Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
6851: # was taken) #
6852: # 2.2 N := round-to-nearest-integer( X * 64/log2 ). #
6853: # 2.3 Calculate J = N mod 64; so J = 0,1,2,..., #
6854: # or 63. #
6855: # 2.4 Calculate M = (N - J)/64; so N = 64M + J. #
6856: # 2.5 Calculate the address of the stored value of #
6857: # 2^(J/64). #
6858: # 2.6 Create the value Scale = 2^M. #
6859: # Notes: The calculation in 2.2 is really performed by #
6860: # Z := X * constant #
6861: # N := round-to-nearest-integer(Z) #
6862: # where #
6863: # constant := single-precision( 64/log 2 ). #
6864: # #
6865: # Using a single-precision constant avoids memory #
6866: # access. Another effect of using a single-precision #
6867: # "constant" is that the calculated value Z is #
6868: # #
6869: # Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24). #
6870: # #
6871: # This error has to be considered later in Steps 3 and 4. #
6872: # #
6873: # Step 3. Calculate X - N*log2/64. #
6874: # 3.1 R := X + N*L1, #
6875: # where L1 := single-precision(-log2/64). #
6876: # 3.2 R := R + N*L2, #
6877: # L2 := extended-precision(-log2/64 - L1).#
6878: # Notes: a) The way L1 and L2 are chosen ensures L1+L2 #
6879: # approximate the value -log2/64 to 88 bits of accuracy. #
6880: # b) N*L1 is exact because N is no longer than 22 bits #
6881: # and L1 is no longer than 24 bits. #
6882: # c) The calculation X+N*L1 is also exact due to #
6883: # cancellation. Thus, R is practically X+N(L1+L2) to full #
6884: # 64 bits. #
6885: # d) It is important to estimate how large can |R| be #
6886: # after Step 3.2. #
6887: # #
6888: # N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24) #
6889: # X*64/log2 (1+eps) = N + f, |f| <= 0.5 #
6890: # X*64/log2 - N = f - eps*X 64/log2 #
6891: # X - N*log2/64 = f*log2/64 - eps*X #
6892: # #
6893: # #
6894: # Now |X| <= 16446 log2, thus #
6895: # #
6896: # |X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64 #
6897: # <= 0.57 log2/64. #
6898: # This bound will be used in Step 4. #
6899: # #
6900: # Step 4. Approximate exp(R)-1 by a polynomial #
6901: # p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5)))) #
6902: # Notes: a) In order to reduce memory access, the coefficients #
6903: # are made as "short" as possible: A1 (which is 1/2), A4 #
6904: # and A5 are single precision; A2 and A3 are double #
6905: # precision. #
6906: # b) Even with the restrictions above, #
6907: # |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062. #
6908: # Note that 0.0062 is slightly bigger than 0.57 log2/64. #
6909: # c) To fully utilize the pipeline, p is separated into #
6910: # two independent pieces of roughly equal complexities #
6911: # p = [ R + R*S*(A2 + S*A4) ] + #
6912: # [ S*(A1 + S*(A3 + S*A5)) ] #
6913: # where S = R*R. #
6914: # #
6915: # Step 5. Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by #
6916: # ans := T + ( T*p + t) #
6917: # where T and t are the stored values for 2^(J/64). #
6918: # Notes: 2^(J/64) is stored as T and t where T+t approximates #
6919: # 2^(J/64) to roughly 85 bits; T is in extended precision #
6920: # and t is in single precision. Note also that T is #
6921: # rounded to 62 bits so that the last two bits of T are #
6922: # zero. The reason for such a special form is that T-1, #
6923: # T-2, and T-8 will all be exact --- a property that will #
6924: # give much more accurate computation of the function #
6925: # EXPM1. #
6926: # #
6927: # Step 6. Reconstruction of exp(X) #
6928: # exp(X) = 2^M * 2^(J/64) * exp(R). #
6929: # 6.1 If AdjFlag = 0, go to 6.3 #
6930: # 6.2 ans := ans * AdjScale #
6931: # 6.3 Restore the user FPCR #
6932: # 6.4 Return ans := ans * Scale. Exit. #
6933: # Notes: If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R, #
6934: # |M| <= 16380, and Scale = 2^M. Moreover, exp(X) will #
6935: # neither overflow nor underflow. If AdjFlag = 1, that #
6936: # means that #
6937: # X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380. #
6938: # Hence, exp(X) may overflow or underflow or neither. #
6939: # When that is the case, AdjScale = 2^(M1) where M1 is #
6940: # approximately M. Thus 6.2 will never cause #
6941: # over/underflow. Possible exception in 6.4 is overflow #
6942: # or underflow. The inexact exception is not generated in #
6943: # 6.4. Although one can argue that the inexact flag #
6944: # should always be raised, to simulate that exception #
6945: # cost to much than the flag is worth in practical uses. #
6946: # #
6947: # Step 7. Return 1 + X. #
6948: # 7.1 ans := X #
6949: # 7.2 Restore user FPCR. #
6950: # 7.3 Return ans := 1 + ans. Exit #
6951: # Notes: For non-zero X, the inexact exception will always be #
6952: # raised by 7.3. That is the only exception raised by 7.3.#
6953: # Note also that we use the FMOVEM instruction to move X #
6954: # in Step 7.1 to avoid unnecessary trapping. (Although #
6955: # the FMOVEM may not seem relevant since X is normalized, #
6956: # the precaution will be useful in the library version of #
6957: # this code where the separate entry for denormalized #
6958: # inputs will be done away with.) #
6959: # #
6960: # Step 8. Handle exp(X) where |X| >= 16380log2. #
6961: # 8.1 If |X| > 16480 log2, go to Step 9. #
6962: # (mimic 2.2 - 2.6) #
6963: # 8.2 N := round-to-integer( X * 64/log2 ) #
6964: # 8.3 Calculate J = N mod 64, J = 0,1,...,63 #
6965: # 8.4 K := (N-J)/64, M1 := truncate(K/2), M = K-M1, #
6966: # AdjFlag := 1. #
6967: # 8.5 Calculate the address of the stored value #
6968: # 2^(J/64). #
6969: # 8.6 Create the values Scale = 2^M, AdjScale = 2^M1. #
6970: # 8.7 Go to Step 3. #
6971: # Notes: Refer to notes for 2.2 - 2.6. #
6972: # #
6973: # Step 9. Handle exp(X), |X| > 16480 log2. #
6974: # 9.1 If X < 0, go to 9.3 #
6975: # 9.2 ans := Huge, go to 9.4 #
6976: # 9.3 ans := Tiny. #
6977: # 9.4 Restore user FPCR. #
6978: # 9.5 Return ans := ans * ans. Exit. #
6979: # Notes: Exp(X) will surely overflow or underflow, depending on #
6980: # X's sign. "Huge" and "Tiny" are respectively large/tiny #
6981: # extended-precision numbers whose square over/underflow #
6982: # with an inexact result. Thus, 9.5 always raises the #
6983: # inexact together with either overflow or underflow. #
6984: # #
6985: # setoxm1d #
6986: # -------- #
6987: # #
6988: # Step 1. Set ans := 0 #
6989: # #
6990: # Step 2. Return ans := X + ans. Exit. #
6991: # Notes: This will return X with the appropriate rounding #
6992: # precision prescribed by the user FPCR. #
6993: # #
6994: # setoxm1 #
6995: # ------- #
6996: # #
6997: # Step 1. Check |X| #
6998: # 1.1 If |X| >= 1/4, go to Step 1.3. #
6999: # 1.2 Go to Step 7. #
7000: # 1.3 If |X| < 70 log(2), go to Step 2. #
7001: # 1.4 Go to Step 10. #
7002: # Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
7003: # However, it is conceivable |X| can be small very often #
7004: # because EXPM1 is intended to evaluate exp(X)-1 #
7005: # accurately when |X| is small. For further details on #
7006: # the comparisons, see the notes on Step 1 of setox. #
7007: # #
7008: # Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
7009: # 2.1 N := round-to-nearest-integer( X * 64/log2 ). #
7010: # 2.2 Calculate J = N mod 64; so J = 0,1,2,..., #
7011: # or 63. #
7012: # 2.3 Calculate M = (N - J)/64; so N = 64M + J. #
7013: # 2.4 Calculate the address of the stored value of #
7014: # 2^(J/64). #
7015: # 2.5 Create the values Sc = 2^M and #
7016: # OnebySc := -2^(-M). #
7017: # Notes: See the notes on Step 2 of setox. #
7018: # #
7019: # Step 3. Calculate X - N*log2/64. #
7020: # 3.1 R := X + N*L1, #
7021: # where L1 := single-precision(-log2/64). #
7022: # 3.2 R := R + N*L2, #
7023: # L2 := extended-precision(-log2/64 - L1).#
7024: # Notes: Applying the analysis of Step 3 of setox in this case #
7025: # shows that |R| <= 0.0055 (note that |X| <= 70 log2 in #
7026: # this case). #
7027: # #
7028: # Step 4. Approximate exp(R)-1 by a polynomial #
7029: # p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6))))) #
7030: # Notes: a) In order to reduce memory access, the coefficients #
7031: # are made as "short" as possible: A1 (which is 1/2), A5 #
7032: # and A6 are single precision; A2, A3 and A4 are double #
7033: # precision. #
7034: # b) Even with the restriction above, #
7035: # |p - (exp(R)-1)| < |R| * 2^(-72.7) #
7036: # for all |R| <= 0.0055. #
7037: # c) To fully utilize the pipeline, p is separated into #
7038: # two independent pieces of roughly equal complexity #
7039: # p = [ R*S*(A2 + S*(A4 + S*A6)) ] + #
7040: # [ R + S*(A1 + S*(A3 + S*A5)) ] #
7041: # where S = R*R. #
7042: # #
7043: # Step 5. Compute 2^(J/64)*p by #
7044: # p := T*p #
7045: # where T and t are the stored values for 2^(J/64). #
7046: # Notes: 2^(J/64) is stored as T and t where T+t approximates #
7047: # 2^(J/64) to roughly 85 bits; T is in extended precision #
7048: # and t is in single precision. Note also that T is #
7049: # rounded to 62 bits so that the last two bits of T are #
7050: # zero. The reason for such a special form is that T-1, #
7051: # T-2, and T-8 will all be exact --- a property that will #
7052: # be exploited in Step 6 below. The total relative error #
7053: # in p is no bigger than 2^(-67.7) compared to the final #
7054: # result. #
7055: # #
7056: # Step 6. Reconstruction of exp(X)-1 #
7057: # exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ). #
7058: # 6.1 If M <= 63, go to Step 6.3. #
7059: # 6.2 ans := T + (p + (t + OnebySc)). Go to 6.6 #
7060: # 6.3 If M >= -3, go to 6.5. #
7061: # 6.4 ans := (T + (p + t)) + OnebySc. Go to 6.6 #
7062: # 6.5 ans := (T + OnebySc) + (p + t). #
7063: # 6.6 Restore user FPCR. #
7064: # 6.7 Return ans := Sc * ans. Exit. #
7065: # Notes: The various arrangements of the expressions give #
7066: # accurate evaluations. #
7067: # #
7068: # Step 7. exp(X)-1 for |X| < 1/4. #
7069: # 7.1 If |X| >= 2^(-65), go to Step 9. #
7070: # 7.2 Go to Step 8. #
7071: # #
7072: # Step 8. Calculate exp(X)-1, |X| < 2^(-65). #
7073: # 8.1 If |X| < 2^(-16312), goto 8.3 #
7074: # 8.2 Restore FPCR; return ans := X - 2^(-16382). #
7075: # Exit. #
7076: # 8.3 X := X * 2^(140). #
7077: # 8.4 Restore FPCR; ans := ans - 2^(-16382). #
7078: # Return ans := ans*2^(140). Exit #
7079: # Notes: The idea is to return "X - tiny" under the user #
7080: # precision and rounding modes. To avoid unnecessary #
7081: # inefficiency, we stay away from denormalized numbers #
7082: # the best we can. For |X| >= 2^(-16312), the #
7083: # straightforward 8.2 generates the inexact exception as #
7084: # the case warrants. #
7085: # #
7086: # Step 9. Calculate exp(X)-1, |X| < 1/4, by a polynomial #
7087: # p = X + X*X*(B1 + X*(B2 + ... + X*B12)) #
7088: # Notes: a) In order to reduce memory access, the coefficients #
7089: # are made as "short" as possible: B1 (which is 1/2), B9 #
7090: # to B12 are single precision; B3 to B8 are double #
7091: # precision; and B2 is double extended. #
7092: # b) Even with the restriction above, #
7093: # |p - (exp(X)-1)| < |X| 2^(-70.6) #
7094: # for all |X| <= 0.251. #
7095: # Note that 0.251 is slightly bigger than 1/4. #
7096: # c) To fully preserve accuracy, the polynomial is #
7097: # computed as #
7098: # X + ( S*B1 + Q ) where S = X*X and #
7099: # Q = X*S*(B2 + X*(B3 + ... + X*B12)) #
7100: # d) To fully utilize the pipeline, Q is separated into #
7101: # two independent pieces of roughly equal complexity #
7102: # Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] + #
7103: # [ S*S*(B3 + S*(B5 + ... + S*B11)) ] #
7104: # #
7105: # Step 10. Calculate exp(X)-1 for |X| >= 70 log 2. #
7106: # 10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all #
7107: # practical purposes. Therefore, go to Step 1 of setox. #
7108: # 10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical #
7109: # purposes. #
7110: # ans := -1 #
7111: # Restore user FPCR #
7112: # Return ans := ans + 2^(-126). Exit. #
7113: # Notes: 10.2 will always create an inexact and return -1 + tiny #
7114: # in the user rounding precision and mode. #
7115: # #
7116: #########################################################################
7117:
7118: L2: long 0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
7119:
7120: EEXPA3: long 0x3FA55555,0x55554CC1
7121: EEXPA2: long 0x3FC55555,0x55554A54
7122:
7123: EM1A4: long 0x3F811111,0x11174385
7124: EM1A3: long 0x3FA55555,0x55554F5A
7125:
7126: EM1A2: long 0x3FC55555,0x55555555,0x00000000,0x00000000
7127:
7128: EM1B8: long 0x3EC71DE3,0xA5774682
7129: EM1B7: long 0x3EFA01A0,0x19D7CB68
7130:
7131: EM1B6: long 0x3F2A01A0,0x1A019DF3
7132: EM1B5: long 0x3F56C16C,0x16C170E2
7133:
7134: EM1B4: long 0x3F811111,0x11111111
7135: EM1B3: long 0x3FA55555,0x55555555
7136:
7137: EM1B2: long 0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
7138: long 0x00000000
7139:
7140: TWO140: long 0x48B00000,0x00000000
7141: TWON140:
7142: long 0x37300000,0x00000000
7143:
7144: EEXPTBL:
7145: long 0x3FFF0000,0x80000000,0x00000000,0x00000000
7146: long 0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
7147: long 0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
7148: long 0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
7149: long 0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
7150: long 0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
7151: long 0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
7152: long 0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
7153: long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
7154: long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
7155: long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
7156: long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
7157: long 0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
7158: long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
7159: long 0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
7160: long 0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
7161: long 0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
7162: long 0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
7163: long 0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
7164: long 0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
7165: long 0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
7166: long 0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
7167: long 0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
7168: long 0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
7169: long 0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
7170: long 0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
7171: long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
7172: long 0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
7173: long 0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
7174: long 0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
7175: long 0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
7176: long 0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
7177: long 0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
7178: long 0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
7179: long 0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
7180: long 0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
7181: long 0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
7182: long 0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
7183: long 0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
7184: long 0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
7185: long 0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
7186: long 0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
7187: long 0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
7188: long 0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
7189: long 0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
7190: long 0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
7191: long 0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
7192: long 0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
7193: long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
7194: long 0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
7195: long 0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
7196: long 0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
7197: long 0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
7198: long 0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
7199: long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
7200: long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
7201: long 0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
7202: long 0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
7203: long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
7204: long 0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
7205: long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
7206: long 0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
7207: long 0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
7208: long 0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
7209:
7210: set ADJFLAG,L_SCR2
7211: set SCALE,FP_SCR0
7212: set ADJSCALE,FP_SCR1
7213: set SC,FP_SCR0
7214: set ONEBYSC,FP_SCR1
7215:
7216: global setox
7217: setox:
7218: #--entry point for EXP(X), here X is finite, non-zero, and not NaN's
7219:
7220: #--Step 1.
7221: mov.l (%a0),%d1 # load part of input X
7222: and.l &0x7FFF0000,%d1 # biased expo. of X
7223: cmp.l %d1,&0x3FBE0000 # 2^(-65)
7224: bge.b EXPC1 # normal case
7225: bra EXPSM
7226:
7227: EXPC1:
7228: #--The case |X| >= 2^(-65)
7229: mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
7230: cmp.l %d1,&0x400CB167 # 16380 log2 trunc. 16 bits
7231: blt.b EXPMAIN # normal case
7232: bra EEXPBIG
7233:
7234: EXPMAIN:
7235: #--Step 2.
7236: #--This is the normal branch: 2^(-65) <= |X| < 16380 log2.
7237: fmov.x (%a0),%fp0 # load input from (a0)
7238:
7239: fmov.x %fp0,%fp1
7240: fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
7241: fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
7242: mov.l &0,ADJFLAG(%a6)
7243: fmov.l %fp0,%d1 # N = int( X * 64/log2 )
7244: lea EEXPTBL(%pc),%a1
7245: fmov.l %d1,%fp0 # convert to floating-format
7246:
7247: mov.l %d1,L_SCR1(%a6) # save N temporarily
7248: and.l &0x3F,%d1 # D0 is J = N mod 64
7249: lsl.l &4,%d1
7250: add.l %d1,%a1 # address of 2^(J/64)
7251: mov.l L_SCR1(%a6),%d1
7252: asr.l &6,%d1 # D0 is M
7253: add.w &0x3FFF,%d1 # biased expo. of 2^(M)
7254: mov.w L2(%pc),L_SCR1(%a6) # prefetch L2, no need in CB
7255:
7256: EXPCONT1:
7257: #--Step 3.
7258: #--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
7259: #--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
7260: fmov.x %fp0,%fp2
7261: fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
7262: fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
7263: fadd.x %fp1,%fp0 # X + N*L1
7264: fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
7265:
7266: #--Step 4.
7267: #--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
7268: #-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
7269: #--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
7270: #--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
7271:
7272: fmov.x %fp0,%fp1
7273: fmul.x %fp1,%fp1 # fp1 IS S = R*R
7274:
7275: fmov.s &0x3AB60B70,%fp2 # fp2 IS A5
7276:
7277: fmul.x %fp1,%fp2 # fp2 IS S*A5
7278: fmov.x %fp1,%fp3
7279: fmul.s &0x3C088895,%fp3 # fp3 IS S*A4
7280:
7281: fadd.d EEXPA3(%pc),%fp2 # fp2 IS A3+S*A5
7282: fadd.d EEXPA2(%pc),%fp3 # fp3 IS A2+S*A4
7283:
7284: fmul.x %fp1,%fp2 # fp2 IS S*(A3+S*A5)
7285: mov.w %d1,SCALE(%a6) # SCALE is 2^(M) in extended
7286: mov.l &0x80000000,SCALE+4(%a6)
7287: clr.l SCALE+8(%a6)
7288:
7289: fmul.x %fp1,%fp3 # fp3 IS S*(A2+S*A4)
7290:
7291: fadd.s &0x3F000000,%fp2 # fp2 IS A1+S*(A3+S*A5)
7292: fmul.x %fp0,%fp3 # fp3 IS R*S*(A2+S*A4)
7293:
7294: fmul.x %fp1,%fp2 # fp2 IS S*(A1+S*(A3+S*A5))
7295: fadd.x %fp3,%fp0 # fp0 IS R+R*S*(A2+S*A4),
7296:
7297: fmov.x (%a1)+,%fp1 # fp1 is lead. pt. of 2^(J/64)
7298: fadd.x %fp2,%fp0 # fp0 is EXP(R) - 1
7299:
7300: #--Step 5
7301: #--final reconstruction process
7302: #--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
7303:
7304: fmul.x %fp1,%fp0 # 2^(J/64)*(Exp(R)-1)
7305: fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
7306: fadd.s (%a1),%fp0 # accurate 2^(J/64)
7307:
7308: fadd.x %fp1,%fp0 # 2^(J/64) + 2^(J/64)*...
7309: mov.l ADJFLAG(%a6),%d1
7310:
7311: #--Step 6
7312: tst.l %d1
7313: beq.b NORMAL
7314: ADJUST:
7315: fmul.x ADJSCALE(%a6),%fp0
7316: NORMAL:
7317: fmov.l %d0,%fpcr # restore user FPCR
7318: mov.b &FMUL_OP,%d1 # last inst is MUL
7319: fmul.x SCALE(%a6),%fp0 # multiply 2^(M)
7320: bra t_catch
7321:
7322: EXPSM:
7323: #--Step 7
7324: fmovm.x (%a0),&0x80 # load X
7325: fmov.l %d0,%fpcr
7326: fadd.s &0x3F800000,%fp0 # 1+X in user mode
7327: bra t_pinx2
7328:
7329: EEXPBIG:
7330: #--Step 8
7331: cmp.l %d1,&0x400CB27C # 16480 log2
7332: bgt.b EXP2BIG
7333: #--Steps 8.2 -- 8.6
7334: fmov.x (%a0),%fp0 # load input from (a0)
7335:
7336: fmov.x %fp0,%fp1
7337: fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
7338: fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
7339: mov.l &1,ADJFLAG(%a6)
7340: fmov.l %fp0,%d1 # N = int( X * 64/log2 )
7341: lea EEXPTBL(%pc),%a1
7342: fmov.l %d1,%fp0 # convert to floating-format
7343: mov.l %d1,L_SCR1(%a6) # save N temporarily
7344: and.l &0x3F,%d1 # D0 is J = N mod 64
7345: lsl.l &4,%d1
7346: add.l %d1,%a1 # address of 2^(J/64)
7347: mov.l L_SCR1(%a6),%d1
7348: asr.l &6,%d1 # D0 is K
7349: mov.l %d1,L_SCR1(%a6) # save K temporarily
7350: asr.l &1,%d1 # D0 is M1
7351: sub.l %d1,L_SCR1(%a6) # a1 is M
7352: add.w &0x3FFF,%d1 # biased expo. of 2^(M1)
7353: mov.w %d1,ADJSCALE(%a6) # ADJSCALE := 2^(M1)
7354: mov.l &0x80000000,ADJSCALE+4(%a6)
7355: clr.l ADJSCALE+8(%a6)
7356: mov.l L_SCR1(%a6),%d1 # D0 is M
7357: add.w &0x3FFF,%d1 # biased expo. of 2^(M)
7358: bra.w EXPCONT1 # go back to Step 3
7359:
7360: EXP2BIG:
7361: #--Step 9
7362: tst.b (%a0) # is X positive or negative?
7363: bmi t_unfl2
7364: bra t_ovfl2
7365:
7366: global setoxd
7367: setoxd:
7368: #--entry point for EXP(X), X is denormalized
7369: mov.l (%a0),-(%sp)
7370: andi.l &0x80000000,(%sp)
7371: ori.l &0x00800000,(%sp) # sign(X)*2^(-126)
7372:
7373: fmov.s &0x3F800000,%fp0
7374:
7375: fmov.l %d0,%fpcr
7376: fadd.s (%sp)+,%fp0
7377: bra t_pinx2
7378:
7379: global setoxm1
7380: setoxm1:
7381: #--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
7382:
7383: #--Step 1.
7384: #--Step 1.1
7385: mov.l (%a0),%d1 # load part of input X
7386: and.l &0x7FFF0000,%d1 # biased expo. of X
7387: cmp.l %d1,&0x3FFD0000 # 1/4
7388: bge.b EM1CON1 # |X| >= 1/4
7389: bra EM1SM
7390:
7391: EM1CON1:
7392: #--Step 1.3
7393: #--The case |X| >= 1/4
7394: mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
7395: cmp.l %d1,&0x4004C215 # 70log2 rounded up to 16 bits
7396: ble.b EM1MAIN # 1/4 <= |X| <= 70log2
7397: bra EM1BIG
7398:
7399: EM1MAIN:
7400: #--Step 2.
7401: #--This is the case: 1/4 <= |X| <= 70 log2.
7402: fmov.x (%a0),%fp0 # load input from (a0)
7403:
7404: fmov.x %fp0,%fp1
7405: fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
7406: fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
7407: fmov.l %fp0,%d1 # N = int( X * 64/log2 )
7408: lea EEXPTBL(%pc),%a1
7409: fmov.l %d1,%fp0 # convert to floating-format
7410:
7411: mov.l %d1,L_SCR1(%a6) # save N temporarily
7412: and.l &0x3F,%d1 # D0 is J = N mod 64
7413: lsl.l &4,%d1
7414: add.l %d1,%a1 # address of 2^(J/64)
7415: mov.l L_SCR1(%a6),%d1
7416: asr.l &6,%d1 # D0 is M
7417: mov.l %d1,L_SCR1(%a6) # save a copy of M
7418:
7419: #--Step 3.
7420: #--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
7421: #--a0 points to 2^(J/64), D0 and a1 both contain M
7422: fmov.x %fp0,%fp2
7423: fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
7424: fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
7425: fadd.x %fp1,%fp0 # X + N*L1
7426: fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
7427: add.w &0x3FFF,%d1 # D0 is biased expo. of 2^M
7428:
7429: #--Step 4.
7430: #--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
7431: #-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
7432: #--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
7433: #--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
7434:
7435: fmov.x %fp0,%fp1
7436: fmul.x %fp1,%fp1 # fp1 IS S = R*R
7437:
7438: fmov.s &0x3950097B,%fp2 # fp2 IS a6
7439:
7440: fmul.x %fp1,%fp2 # fp2 IS S*A6
7441: fmov.x %fp1,%fp3
7442: fmul.s &0x3AB60B6A,%fp3 # fp3 IS S*A5
7443:
7444: fadd.d EM1A4(%pc),%fp2 # fp2 IS A4+S*A6
7445: fadd.d EM1A3(%pc),%fp3 # fp3 IS A3+S*A5
7446: mov.w %d1,SC(%a6) # SC is 2^(M) in extended
7447: mov.l &0x80000000,SC+4(%a6)
7448: clr.l SC+8(%a6)
7449:
7450: fmul.x %fp1,%fp2 # fp2 IS S*(A4+S*A6)
7451: mov.l L_SCR1(%a6),%d1 # D0 is M
7452: neg.w %d1 # D0 is -M
7453: fmul.x %fp1,%fp3 # fp3 IS S*(A3+S*A5)
7454: add.w &0x3FFF,%d1 # biased expo. of 2^(-M)
7455: fadd.d EM1A2(%pc),%fp2 # fp2 IS A2+S*(A4+S*A6)
7456: fadd.s &0x3F000000,%fp3 # fp3 IS A1+S*(A3+S*A5)
7457:
7458: fmul.x %fp1,%fp2 # fp2 IS S*(A2+S*(A4+S*A6))
7459: or.w &0x8000,%d1 # signed/expo. of -2^(-M)
7460: mov.w %d1,ONEBYSC(%a6) # OnebySc is -2^(-M)
7461: mov.l &0x80000000,ONEBYSC+4(%a6)
7462: clr.l ONEBYSC+8(%a6)
7463: fmul.x %fp3,%fp1 # fp1 IS S*(A1+S*(A3+S*A5))
7464:
7465: fmul.x %fp0,%fp2 # fp2 IS R*S*(A2+S*(A4+S*A6))
7466: fadd.x %fp1,%fp0 # fp0 IS R+S*(A1+S*(A3+S*A5))
7467:
7468: fadd.x %fp2,%fp0 # fp0 IS EXP(R)-1
7469:
7470: fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
7471:
7472: #--Step 5
7473: #--Compute 2^(J/64)*p
7474:
7475: fmul.x (%a1),%fp0 # 2^(J/64)*(Exp(R)-1)
7476:
7477: #--Step 6
7478: #--Step 6.1
7479: mov.l L_SCR1(%a6),%d1 # retrieve M
7480: cmp.l %d1,&63
7481: ble.b MLE63
7482: #--Step 6.2 M >= 64
7483: fmov.s 12(%a1),%fp1 # fp1 is t
7484: fadd.x ONEBYSC(%a6),%fp1 # fp1 is t+OnebySc
7485: fadd.x %fp1,%fp0 # p+(t+OnebySc), fp1 released
7486: fadd.x (%a1),%fp0 # T+(p+(t+OnebySc))
7487: bra EM1SCALE
7488: MLE63:
7489: #--Step 6.3 M <= 63
7490: cmp.l %d1,&-3
7491: bge.b MGEN3
7492: MLTN3:
7493: #--Step 6.4 M <= -4
7494: fadd.s 12(%a1),%fp0 # p+t
7495: fadd.x (%a1),%fp0 # T+(p+t)
7496: fadd.x ONEBYSC(%a6),%fp0 # OnebySc + (T+(p+t))
7497: bra EM1SCALE
7498: MGEN3:
7499: #--Step 6.5 -3 <= M <= 63
7500: fmov.x (%a1)+,%fp1 # fp1 is T
7501: fadd.s (%a1),%fp0 # fp0 is p+t
7502: fadd.x ONEBYSC(%a6),%fp1 # fp1 is T+OnebySc
7503: fadd.x %fp1,%fp0 # (T+OnebySc)+(p+t)
7504:
7505: EM1SCALE:
7506: #--Step 6.6
7507: fmov.l %d0,%fpcr
7508: fmul.x SC(%a6),%fp0
7509: bra t_inx2
7510:
7511: EM1SM:
7512: #--Step 7 |X| < 1/4.
7513: cmp.l %d1,&0x3FBE0000 # 2^(-65)
7514: bge.b EM1POLY
7515:
7516: EM1TINY:
7517: #--Step 8 |X| < 2^(-65)
7518: cmp.l %d1,&0x00330000 # 2^(-16312)
7519: blt.b EM12TINY
7520: #--Step 8.2
7521: mov.l &0x80010000,SC(%a6) # SC is -2^(-16382)
7522: mov.l &0x80000000,SC+4(%a6)
7523: clr.l SC+8(%a6)
7524: fmov.x (%a0),%fp0
7525: fmov.l %d0,%fpcr
7526: mov.b &FADD_OP,%d1 # last inst is ADD
7527: fadd.x SC(%a6),%fp0
7528: bra t_catch
7529:
7530: EM12TINY:
7531: #--Step 8.3
7532: fmov.x (%a0),%fp0
7533: fmul.d TWO140(%pc),%fp0
7534: mov.l &0x80010000,SC(%a6)
7535: mov.l &0x80000000,SC+4(%a6)
7536: clr.l SC+8(%a6)
7537: fadd.x SC(%a6),%fp0
7538: fmov.l %d0,%fpcr
7539: mov.b &FMUL_OP,%d1 # last inst is MUL
7540: fmul.d TWON140(%pc),%fp0
7541: bra t_catch
7542:
7543: EM1POLY:
7544: #--Step 9 exp(X)-1 by a simple polynomial
7545: fmov.x (%a0),%fp0 # fp0 is X
7546: fmul.x %fp0,%fp0 # fp0 is S := X*X
7547: fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
7548: fmov.s &0x2F30CAA8,%fp1 # fp1 is B12
7549: fmul.x %fp0,%fp1 # fp1 is S*B12
7550: fmov.s &0x310F8290,%fp2 # fp2 is B11
7551: fadd.s &0x32D73220,%fp1 # fp1 is B10+S*B12
7552:
7553: fmul.x %fp0,%fp2 # fp2 is S*B11
7554: fmul.x %fp0,%fp1 # fp1 is S*(B10 + ...
7555:
7556: fadd.s &0x3493F281,%fp2 # fp2 is B9+S*...
7557: fadd.d EM1B8(%pc),%fp1 # fp1 is B8+S*...
7558:
7559: fmul.x %fp0,%fp2 # fp2 is S*(B9+...
7560: fmul.x %fp0,%fp1 # fp1 is S*(B8+...
7561:
7562: fadd.d EM1B7(%pc),%fp2 # fp2 is B7+S*...
7563: fadd.d EM1B6(%pc),%fp1 # fp1 is B6+S*...
7564:
7565: fmul.x %fp0,%fp2 # fp2 is S*(B7+...
7566: fmul.x %fp0,%fp1 # fp1 is S*(B6+...
7567:
7568: fadd.d EM1B5(%pc),%fp2 # fp2 is B5+S*...
7569: fadd.d EM1B4(%pc),%fp1 # fp1 is B4+S*...
7570:
7571: fmul.x %fp0,%fp2 # fp2 is S*(B5+...
7572: fmul.x %fp0,%fp1 # fp1 is S*(B4+...
7573:
7574: fadd.d EM1B3(%pc),%fp2 # fp2 is B3+S*...
7575: fadd.x EM1B2(%pc),%fp1 # fp1 is B2+S*...
7576:
7577: fmul.x %fp0,%fp2 # fp2 is S*(B3+...
7578: fmul.x %fp0,%fp1 # fp1 is S*(B2+...
7579:
7580: fmul.x %fp0,%fp2 # fp2 is S*S*(B3+...)
7581: fmul.x (%a0),%fp1 # fp1 is X*S*(B2...
7582:
7583: fmul.s &0x3F000000,%fp0 # fp0 is S*B1
7584: fadd.x %fp2,%fp1 # fp1 is Q
7585:
7586: fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
7587:
7588: fadd.x %fp1,%fp0 # fp0 is S*B1+Q
7589:
7590: fmov.l %d0,%fpcr
7591: fadd.x (%a0),%fp0
7592: bra t_inx2
7593:
7594: EM1BIG:
7595: #--Step 10 |X| > 70 log2
7596: mov.l (%a0),%d1
7597: cmp.l %d1,&0
7598: bgt.w EXPC1
7599: #--Step 10.2
7600: fmov.s &0xBF800000,%fp0 # fp0 is -1
7601: fmov.l %d0,%fpcr
7602: fadd.s &0x00800000,%fp0 # -1 + 2^(-126)
7603: bra t_minx2
7604:
7605: global setoxm1d
7606: setoxm1d:
7607: #--entry point for EXPM1(X), here X is denormalized
7608: #--Step 0.
7609: bra t_extdnrm
7610:
7611: #########################################################################
7612: # sgetexp(): returns the exponent portion of the input argument. #
7613: # The exponent bias is removed and the exponent value is #
7614: # returned as an extended precision number in fp0. #
7615: # sgetexpd(): handles denormalized numbers. #
7616: # #
7617: # sgetman(): extracts the mantissa of the input argument. The #
7618: # mantissa is converted to an extended precision number w/ #
7619: # an exponent of $3fff and is returned in fp0. The range of #
7620: # the result is [1.0 - 2.0). #
7621: # sgetmand(): handles denormalized numbers. #
7622: # #
7623: # INPUT *************************************************************** #
7624: # a0 = pointer to extended precision input #
7625: # #
7626: # OUTPUT ************************************************************** #
7627: # fp0 = exponent(X) or mantissa(X) #
7628: # #
7629: #########################################################################
7630:
7631: global sgetexp
7632: sgetexp:
7633: mov.w SRC_EX(%a0),%d0 # get the exponent
7634: bclr &0xf,%d0 # clear the sign bit
7635: subi.w &0x3fff,%d0 # subtract off the bias
7636: fmov.w %d0,%fp0 # return exp in fp0
7637: blt.b sgetexpn # it's negative
7638: rts
7639:
7640: sgetexpn:
7641: mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
7642: rts
7643:
7644: global sgetexpd
7645: sgetexpd:
7646: bsr.l norm # normalize
7647: neg.w %d0 # new exp = -(shft amt)
7648: subi.w &0x3fff,%d0 # subtract off the bias
7649: fmov.w %d0,%fp0 # return exp in fp0
7650: mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
7651: rts
7652:
7653: global sgetman
7654: sgetman:
7655: mov.w SRC_EX(%a0),%d0 # get the exp
7656: ori.w &0x7fff,%d0 # clear old exp
7657: bclr &0xe,%d0 # make it the new exp +-3fff
7658:
7659: # here, we build the result in a tmp location so as not to disturb the input
7660: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy to tmp loc
7661: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy to tmp loc
7662: mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
7663: fmov.x FP_SCR0(%a6),%fp0 # put new value back in fp0
7664: bmi.b sgetmann # it's negative
7665: rts
7666:
7667: sgetmann:
7668: mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
7669: rts
7670:
7671: #
7672: # For denormalized numbers, shift the mantissa until the j-bit = 1,
7673: # then load the exponent with +/1 $3fff.
7674: #
7675: global sgetmand
7676: sgetmand:
7677: bsr.l norm # normalize exponent
7678: bra.b sgetman
7679:
7680: #########################################################################
7681: # scosh(): computes the hyperbolic cosine of a normalized input #
7682: # scoshd(): computes the hyperbolic cosine of a denormalized input #
7683: # #
7684: # INPUT *************************************************************** #
7685: # a0 = pointer to extended precision input #
7686: # d0 = round precision,mode #
7687: # #
7688: # OUTPUT ************************************************************** #
7689: # fp0 = cosh(X) #
7690: # #
7691: # ACCURACY and MONOTONICITY ******************************************* #
7692: # The returned result is within 3 ulps in 64 significant bit, #
7693: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
7694: # rounded to double precision. The result is provably monotonic #
7695: # in double precision. #
7696: # #
7697: # ALGORITHM *********************************************************** #
7698: # #
7699: # COSH #
7700: # 1. If |X| > 16380 log2, go to 3. #
7701: # #
7702: # 2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae #
7703: # y = |X|, z = exp(Y), and #
7704: # cosh(X) = (1/2)*( z + 1/z ). #
7705: # Exit. #
7706: # #
7707: # 3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5. #
7708: # #
7709: # 4. (16380 log2 < |X| <= 16480 log2) #
7710: # cosh(X) = sign(X) * exp(|X|)/2. #
7711: # However, invoking exp(|X|) may cause premature #
7712: # overflow. Thus, we calculate sinh(X) as follows: #
7713: # Y := |X| #
7714: # Fact := 2**(16380) #
7715: # Y' := Y - 16381 log2 #
7716: # cosh(X) := Fact * exp(Y'). #
7717: # Exit. #
7718: # #
7719: # 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
7720: # Huge*Huge to generate overflow and an infinity with #
7721: # the appropriate sign. Huge is the largest finite number #
7722: # in extended format. Exit. #
7723: # #
7724: #########################################################################
7725:
7726: TWO16380:
7727: long 0x7FFB0000,0x80000000,0x00000000,0x00000000
7728:
7729: global scosh
7730: scosh:
7731: fmov.x (%a0),%fp0 # LOAD INPUT
7732:
7733: mov.l (%a0),%d1
7734: mov.w 4(%a0),%d1
7735: and.l &0x7FFFFFFF,%d1
7736: cmp.l %d1,&0x400CB167
7737: bgt.b COSHBIG
7738:
7739: #--THIS IS THE USUAL CASE, |X| < 16380 LOG2
7740: #--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
7741:
7742: fabs.x %fp0 # |X|
7743:
7744: mov.l %d0,-(%sp)
7745: clr.l %d0
7746: fmovm.x &0x01,-(%sp) # save |X| to stack
7747: lea (%sp),%a0 # pass ptr to |X|
7748: bsr setox # FP0 IS EXP(|X|)
7749: add.l &0xc,%sp # erase |X| from stack
7750: fmul.s &0x3F000000,%fp0 # (1/2)EXP(|X|)
7751: mov.l (%sp)+,%d0
7752:
7753: fmov.s &0x3E800000,%fp1 # (1/4)
7754: fdiv.x %fp0,%fp1 # 1/(2 EXP(|X|))
7755:
7756: fmov.l %d0,%fpcr
7757: mov.b &FADD_OP,%d1 # last inst is ADD
7758: fadd.x %fp1,%fp0
7759: bra t_catch
7760:
7761: COSHBIG:
7762: cmp.l %d1,&0x400CB2B3
7763: bgt.b COSHHUGE
7764:
7765: fabs.x %fp0
7766: fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
7767: fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
7768:
7769: mov.l %d0,-(%sp)
7770: clr.l %d0
7771: fmovm.x &0x01,-(%sp) # save fp0 to stack
7772: lea (%sp),%a0 # pass ptr to fp0
7773: bsr setox
7774: add.l &0xc,%sp # clear fp0 from stack
7775: mov.l (%sp)+,%d0
7776:
7777: fmov.l %d0,%fpcr
7778: mov.b &FMUL_OP,%d1 # last inst is MUL
7779: fmul.x TWO16380(%pc),%fp0
7780: bra t_catch
7781:
7782: COSHHUGE:
7783: bra t_ovfl2
7784:
7785: global scoshd
7786: #--COSH(X) = 1 FOR DENORMALIZED X
7787: scoshd:
7788: fmov.s &0x3F800000,%fp0
7789:
7790: fmov.l %d0,%fpcr
7791: fadd.s &0x00800000,%fp0
7792: bra t_pinx2
7793:
7794: #########################################################################
7795: # ssinh(): computes the hyperbolic sine of a normalized input #
7796: # ssinhd(): computes the hyperbolic sine of a denormalized input #
7797: # #
7798: # INPUT *************************************************************** #
7799: # a0 = pointer to extended precision input #
7800: # d0 = round precision,mode #
7801: # #
7802: # OUTPUT ************************************************************** #
7803: # fp0 = sinh(X) #
7804: # #
7805: # ACCURACY and MONOTONICITY ******************************************* #
7806: # The returned result is within 3 ulps in 64 significant bit, #
7807: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
7808: # rounded to double precision. The result is provably monotonic #
7809: # in double precision. #
7810: # #
7811: # ALGORITHM *********************************************************** #
7812: # #
7813: # SINH #
7814: # 1. If |X| > 16380 log2, go to 3. #
7815: # #
7816: # 2. (|X| <= 16380 log2) Sinh(X) is obtained by the formula #
7817: # y = |X|, sgn = sign(X), and z = expm1(Y), #
7818: # sinh(X) = sgn*(1/2)*( z + z/(1+z) ). #
7819: # Exit. #
7820: # #
7821: # 3. If |X| > 16480 log2, go to 5. #
7822: # #
7823: # 4. (16380 log2 < |X| <= 16480 log2) #
7824: # sinh(X) = sign(X) * exp(|X|)/2. #
7825: # However, invoking exp(|X|) may cause premature overflow. #
7826: # Thus, we calculate sinh(X) as follows: #
7827: # Y := |X| #
7828: # sgn := sign(X) #
7829: # sgnFact := sgn * 2**(16380) #
7830: # Y' := Y - 16381 log2 #
7831: # sinh(X) := sgnFact * exp(Y'). #
7832: # Exit. #
7833: # #
7834: # 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
7835: # sign(X)*Huge*Huge to generate overflow and an infinity with #
7836: # the appropriate sign. Huge is the largest finite number in #
7837: # extended format. Exit. #
7838: # #
7839: #########################################################################
7840:
7841: global ssinh
7842: ssinh:
7843: fmov.x (%a0),%fp0 # LOAD INPUT
7844:
7845: mov.l (%a0),%d1
7846: mov.w 4(%a0),%d1
7847: mov.l %d1,%a1 # save (compacted) operand
7848: and.l &0x7FFFFFFF,%d1
7849: cmp.l %d1,&0x400CB167
7850: bgt.b SINHBIG
7851:
7852: #--THIS IS THE USUAL CASE, |X| < 16380 LOG2
7853: #--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
7854:
7855: fabs.x %fp0 # Y = |X|
7856:
7857: movm.l &0x8040,-(%sp) # {a1/d0}
7858: fmovm.x &0x01,-(%sp) # save Y on stack
7859: lea (%sp),%a0 # pass ptr to Y
7860: clr.l %d0
7861: bsr setoxm1 # FP0 IS Z = EXPM1(Y)
7862: add.l &0xc,%sp # clear Y from stack
7863: fmov.l &0,%fpcr
7864: movm.l (%sp)+,&0x0201 # {a1/d0}
7865:
7866: fmov.x %fp0,%fp1
7867: fadd.s &0x3F800000,%fp1 # 1+Z
7868: fmov.x %fp0,-(%sp)
7869: fdiv.x %fp1,%fp0 # Z/(1+Z)
7870: mov.l %a1,%d1
7871: and.l &0x80000000,%d1
7872: or.l &0x3F000000,%d1
7873: fadd.x (%sp)+,%fp0
7874: mov.l %d1,-(%sp)
7875:
7876: fmov.l %d0,%fpcr
7877: mov.b &FMUL_OP,%d1 # last inst is MUL
7878: fmul.s (%sp)+,%fp0 # last fp inst - possible exceptions set
7879: bra t_catch
7880:
7881: SINHBIG:
7882: cmp.l %d1,&0x400CB2B3
7883: bgt t_ovfl
7884: fabs.x %fp0
7885: fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
7886: mov.l &0,-(%sp)
7887: mov.l &0x80000000,-(%sp)
7888: mov.l %a1,%d1
7889: and.l &0x80000000,%d1
7890: or.l &0x7FFB0000,%d1
7891: mov.l %d1,-(%sp) # EXTENDED FMT
7892: fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
7893:
7894: mov.l %d0,-(%sp)
7895: clr.l %d0
7896: fmovm.x &0x01,-(%sp) # save fp0 on stack
7897: lea (%sp),%a0 # pass ptr to fp0
7898: bsr setox
7899: add.l &0xc,%sp # clear fp0 from stack
7900:
7901: mov.l (%sp)+,%d0
7902: fmov.l %d0,%fpcr
7903: mov.b &FMUL_OP,%d1 # last inst is MUL
7904: fmul.x (%sp)+,%fp0 # possible exception
7905: bra t_catch
7906:
7907: global ssinhd
7908: #--SINH(X) = X FOR DENORMALIZED X
7909: ssinhd:
7910: bra t_extdnrm
7911:
7912: #########################################################################
7913: # stanh(): computes the hyperbolic tangent of a normalized input #
7914: # stanhd(): computes the hyperbolic tangent of a denormalized input #
7915: # #
7916: # INPUT *************************************************************** #
7917: # a0 = pointer to extended precision input #
7918: # d0 = round precision,mode #
7919: # #
7920: # OUTPUT ************************************************************** #
7921: # fp0 = tanh(X) #
7922: # #
7923: # ACCURACY and MONOTONICITY ******************************************* #
7924: # The returned result is within 3 ulps in 64 significant bit, #
7925: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
7926: # rounded to double precision. The result is provably monotonic #
7927: # in double precision. #
7928: # #
7929: # ALGORITHM *********************************************************** #
7930: # #
7931: # TANH #
7932: # 1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3. #
7933: # #
7934: # 2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by #
7935: # sgn := sign(X), y := 2|X|, z := expm1(Y), and #
7936: # tanh(X) = sgn*( z/(2+z) ). #
7937: # Exit. #
7938: # #
7939: # 3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1, #
7940: # go to 7. #
7941: # #
7942: # 4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6. #
7943: # #
7944: # 5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by #
7945: # sgn := sign(X), y := 2|X|, z := exp(Y), #
7946: # tanh(X) = sgn - [ sgn*2/(1+z) ]. #
7947: # Exit. #
7948: # #
7949: # 6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we #
7950: # calculate Tanh(X) by #
7951: # sgn := sign(X), Tiny := 2**(-126), #
7952: # tanh(X) := sgn - sgn*Tiny. #
7953: # Exit. #
7954: # #
7955: # 7. (|X| < 2**(-40)). Tanh(X) = X. Exit. #
7956: # #
7957: #########################################################################
7958:
7959: set X,FP_SCR0
7960: set XFRAC,X+4
7961:
7962: set SGN,L_SCR3
7963:
7964: set V,FP_SCR0
7965:
7966: global stanh
7967: stanh:
7968: fmov.x (%a0),%fp0 # LOAD INPUT
7969:
7970: fmov.x %fp0,X(%a6)
7971: mov.l (%a0),%d1
7972: mov.w 4(%a0),%d1
7973: mov.l %d1,X(%a6)
7974: and.l &0x7FFFFFFF,%d1
7975: cmp.l %d1, &0x3fd78000 # is |X| < 2^(-40)?
7976: blt.w TANHBORS # yes
7977: cmp.l %d1, &0x3fffddce # is |X| > (5/2)LOG2?
7978: bgt.w TANHBORS # yes
7979:
7980: #--THIS IS THE USUAL CASE
7981: #--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
7982:
7983: mov.l X(%a6),%d1
7984: mov.l %d1,SGN(%a6)
7985: and.l &0x7FFF0000,%d1
7986: add.l &0x00010000,%d1 # EXPONENT OF 2|X|
7987: mov.l %d1,X(%a6)
7988: and.l &0x80000000,SGN(%a6)
7989: fmov.x X(%a6),%fp0 # FP0 IS Y = 2|X|
7990:
7991: mov.l %d0,-(%sp)
7992: clr.l %d0
7993: fmovm.x &0x1,-(%sp) # save Y on stack
7994: lea (%sp),%a0 # pass ptr to Y
7995: bsr setoxm1 # FP0 IS Z = EXPM1(Y)
7996: add.l &0xc,%sp # clear Y from stack
7997: mov.l (%sp)+,%d0
7998:
7999: fmov.x %fp0,%fp1
8000: fadd.s &0x40000000,%fp1 # Z+2
8001: mov.l SGN(%a6),%d1
8002: fmov.x %fp1,V(%a6)
8003: eor.l %d1,V(%a6)
8004:
8005: fmov.l %d0,%fpcr # restore users round prec,mode
8006: fdiv.x V(%a6),%fp0
8007: bra t_inx2
8008:
8009: TANHBORS:
8010: cmp.l %d1,&0x3FFF8000
8011: blt.w TANHSM
8012:
8013: cmp.l %d1,&0x40048AA1
8014: bgt.w TANHHUGE
8015:
8016: #-- (5/2) LOG2 < |X| < 50 LOG2,
8017: #--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
8018: #--TANH(X) = SGN - SGN*2/[EXP(Y)+1].
8019:
8020: mov.l X(%a6),%d1
8021: mov.l %d1,SGN(%a6)
8022: and.l &0x7FFF0000,%d1
8023: add.l &0x00010000,%d1 # EXPO OF 2|X|
8024: mov.l %d1,X(%a6) # Y = 2|X|
8025: and.l &0x80000000,SGN(%a6)
8026: mov.l SGN(%a6),%d1
8027: fmov.x X(%a6),%fp0 # Y = 2|X|
8028:
8029: mov.l %d0,-(%sp)
8030: clr.l %d0
8031: fmovm.x &0x01,-(%sp) # save Y on stack
8032: lea (%sp),%a0 # pass ptr to Y
8033: bsr setox # FP0 IS EXP(Y)
8034: add.l &0xc,%sp # clear Y from stack
8035: mov.l (%sp)+,%d0
8036: mov.l SGN(%a6),%d1
8037: fadd.s &0x3F800000,%fp0 # EXP(Y)+1
8038:
8039: eor.l &0xC0000000,%d1 # -SIGN(X)*2
8040: fmov.s %d1,%fp1 # -SIGN(X)*2 IN SGL FMT
8041: fdiv.x %fp0,%fp1 # -SIGN(X)2 / [EXP(Y)+1 ]
8042:
8043: mov.l SGN(%a6),%d1
8044: or.l &0x3F800000,%d1 # SGN
8045: fmov.s %d1,%fp0 # SGN IN SGL FMT
8046:
8047: fmov.l %d0,%fpcr # restore users round prec,mode
8048: mov.b &FADD_OP,%d1 # last inst is ADD
8049: fadd.x %fp1,%fp0
8050: bra t_inx2
8051:
8052: TANHSM:
8053: fmov.l %d0,%fpcr # restore users round prec,mode
8054: mov.b &FMOV_OP,%d1 # last inst is MOVE
8055: fmov.x X(%a6),%fp0 # last inst - possible exception set
8056: bra t_catch
8057:
8058: #---RETURN SGN(X) - SGN(X)EPS
8059: TANHHUGE:
8060: mov.l X(%a6),%d1
8061: and.l &0x80000000,%d1
8062: or.l &0x3F800000,%d1
8063: fmov.s %d1,%fp0
8064: and.l &0x80000000,%d1
8065: eor.l &0x80800000,%d1 # -SIGN(X)*EPS
8066:
8067: fmov.l %d0,%fpcr # restore users round prec,mode
8068: fadd.s %d1,%fp0
8069: bra t_inx2
8070:
8071: global stanhd
8072: #--TANH(X) = X FOR DENORMALIZED X
8073: stanhd:
8074: bra t_extdnrm
8075:
8076: #########################################################################
8077: # slogn(): computes the natural logarithm of a normalized input #
8078: # slognd(): computes the natural logarithm of a denormalized input #
8079: # slognp1(): computes the log(1+X) of a normalized input #
8080: # slognp1d(): computes the log(1+X) of a denormalized input #
8081: # #
8082: # INPUT *************************************************************** #
8083: # a0 = pointer to extended precision input #
8084: # d0 = round precision,mode #
8085: # #
8086: # OUTPUT ************************************************************** #
8087: # fp0 = log(X) or log(1+X) #
8088: # #
8089: # ACCURACY and MONOTONICITY ******************************************* #
8090: # The returned result is within 2 ulps in 64 significant bit, #
8091: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
8092: # rounded to double precision. The result is provably monotonic #
8093: # in double precision. #
8094: # #
8095: # ALGORITHM *********************************************************** #
8096: # LOGN: #
8097: # Step 1. If |X-1| < 1/16, approximate log(X) by an odd #
8098: # polynomial in u, where u = 2(X-1)/(X+1). Otherwise, #
8099: # move on to Step 2. #
8100: # #
8101: # Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first #
8102: # seven significant bits of Y plus 2**(-7), i.e. #
8103: # F = 1.xxxxxx1 in base 2 where the six "x" match those #
8104: # of Y. Note that |Y-F| <= 2**(-7). #
8105: # #
8106: # Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a #
8107: # polynomial in u, log(1+u) = poly. #
8108: # #
8109: # Step 4. Reconstruct #
8110: # log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u) #
8111: # by k*log(2) + (log(F) + poly). The values of log(F) are #
8112: # calculated beforehand and stored in the program. #
8113: # #
8114: # lognp1: #
8115: # Step 1: If |X| < 1/16, approximate log(1+X) by an odd #
8116: # polynomial in u where u = 2X/(2+X). Otherwise, move on #
8117: # to Step 2. #
8118: # #
8119: # Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done #
8120: # in Step 2 of the algorithm for LOGN and compute #
8121: # log(1+X) as k*log(2) + log(F) + poly where poly #
8122: # approximates log(1+u), u = (Y-F)/F. #
8123: # #
8124: # Implementation Notes: #
8125: # Note 1. There are 64 different possible values for F, thus 64 #
8126: # log(F)'s need to be tabulated. Moreover, the values of #
8127: # 1/F are also tabulated so that the division in (Y-F)/F #
8128: # can be performed by a multiplication. #
8129: # #
8130: # Note 2. In Step 2 of lognp1, in order to preserved accuracy, #
8131: # the value Y-F has to be calculated carefully when #
8132: # 1/2 <= X < 3/2. #
8133: # #
8134: # Note 3. To fully exploit the pipeline, polynomials are usually #
8135: # separated into two parts evaluated independently before #
8136: # being added up. #
8137: # #
8138: #########################################################################
8139: LOGOF2:
8140: long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
8141:
8142: one:
8143: long 0x3F800000
8144: zero:
8145: long 0x00000000
8146: infty:
8147: long 0x7F800000
8148: negone:
8149: long 0xBF800000
8150:
8151: LOGA6:
8152: long 0x3FC2499A,0xB5E4040B
8153: LOGA5:
8154: long 0xBFC555B5,0x848CB7DB
8155:
8156: LOGA4:
8157: long 0x3FC99999,0x987D8730
8158: LOGA3:
8159: long 0xBFCFFFFF,0xFF6F7E97
8160:
8161: LOGA2:
8162: long 0x3FD55555,0x555555A4
8163: LOGA1:
8164: long 0xBFE00000,0x00000008
8165:
8166: LOGB5:
8167: long 0x3F175496,0xADD7DAD6
8168: LOGB4:
8169: long 0x3F3C71C2,0xFE80C7E0
8170:
8171: LOGB3:
8172: long 0x3F624924,0x928BCCFF
8173: LOGB2:
8174: long 0x3F899999,0x999995EC
8175:
8176: LOGB1:
8177: long 0x3FB55555,0x55555555
8178: TWO:
8179: long 0x40000000,0x00000000
8180:
8181: LTHOLD:
8182: long 0x3f990000,0x80000000,0x00000000,0x00000000
8183:
8184: LOGTBL:
8185: long 0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
8186: long 0x3FF70000,0xFF015358,0x833C47E2,0x00000000
8187: long 0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
8188: long 0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
8189: long 0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
8190: long 0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
8191: long 0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
8192: long 0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
8193: long 0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
8194: long 0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
8195: long 0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
8196: long 0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
8197: long 0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
8198: long 0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
8199: long 0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
8200: long 0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
8201: long 0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
8202: long 0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
8203: long 0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
8204: long 0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
8205: long 0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
8206: long 0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
8207: long 0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
8208: long 0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
8209: long 0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
8210: long 0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
8211: long 0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
8212: long 0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
8213: long 0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
8214: long 0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
8215: long 0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
8216: long 0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
8217: long 0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
8218: long 0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
8219: long 0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
8220: long 0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
8221: long 0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
8222: long 0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
8223: long 0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
8224: long 0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
8225: long 0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
8226: long 0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
8227: long 0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
8228: long 0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
8229: long 0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
8230: long 0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
8231: long 0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
8232: long 0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
8233: long 0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
8234: long 0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
8235: long 0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
8236: long 0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
8237: long 0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
8238: long 0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
8239: long 0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
8240: long 0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
8241: long 0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
8242: long 0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
8243: long 0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
8244: long 0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
8245: long 0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
8246: long 0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
8247: long 0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
8248: long 0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
8249: long 0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
8250: long 0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
8251: long 0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
8252: long 0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
8253: long 0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
8254: long 0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
8255: long 0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
8256: long 0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
8257: long 0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
8258: long 0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
8259: long 0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
8260: long 0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
8261: long 0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
8262: long 0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
8263: long 0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
8264: long 0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
8265: long 0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
8266: long 0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
8267: long 0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
8268: long 0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
8269: long 0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
8270: long 0x3FFE0000,0x825EFCED,0x49369330,0x00000000
8271: long 0x3FFE0000,0x9868C809,0x868C8098,0x00000000
8272: long 0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
8273: long 0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
8274: long 0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
8275: long 0x3FFE0000,0x95A02568,0x095A0257,0x00000000
8276: long 0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
8277: long 0x3FFE0000,0x94458094,0x45809446,0x00000000
8278: long 0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
8279: long 0x3FFE0000,0x92F11384,0x0497889C,0x00000000
8280: long 0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
8281: long 0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
8282: long 0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
8283: long 0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
8284: long 0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
8285: long 0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
8286: long 0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
8287: long 0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
8288: long 0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
8289: long 0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
8290: long 0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
8291: long 0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
8292: long 0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
8293: long 0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
8294: long 0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
8295: long 0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
8296: long 0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
8297: long 0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
8298: long 0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
8299: long 0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
8300: long 0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
8301: long 0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
8302: long 0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
8303: long 0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
8304: long 0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
8305: long 0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
8306: long 0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
8307: long 0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
8308: long 0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
8309: long 0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
8310: long 0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
8311: long 0x3FFE0000,0x80808080,0x80808081,0x00000000
8312: long 0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
8313:
8314: set ADJK,L_SCR1
8315:
8316: set X,FP_SCR0
8317: set XDCARE,X+2
8318: set XFRAC,X+4
8319:
8320: set F,FP_SCR1
8321: set FFRAC,F+4
8322:
8323: set KLOG2,FP_SCR0
8324:
8325: set SAVEU,FP_SCR0
8326:
8327: global slogn
8328: #--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
8329: slogn:
8330: fmov.x (%a0),%fp0 # LOAD INPUT
8331: mov.l &0x00000000,ADJK(%a6)
8332:
8333: LOGBGN:
8334: #--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
8335: #--A FINITE, NON-ZERO, NORMALIZED NUMBER.
8336:
8337: mov.l (%a0),%d1
8338: mov.w 4(%a0),%d1
8339:
8340: mov.l (%a0),X(%a6)
8341: mov.l 4(%a0),X+4(%a6)
8342: mov.l 8(%a0),X+8(%a6)
8343:
8344: cmp.l %d1,&0 # CHECK IF X IS NEGATIVE
8345: blt.w LOGNEG # LOG OF NEGATIVE ARGUMENT IS INVALID
8346: # X IS POSITIVE, CHECK IF X IS NEAR 1
8347: cmp.l %d1,&0x3ffef07d # IS X < 15/16?
8348: blt.b LOGMAIN # YES
8349: cmp.l %d1,&0x3fff8841 # IS X > 17/16?
8350: ble.w LOGNEAR1 # NO
8351:
8352: LOGMAIN:
8353: #--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
8354:
8355: #--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
8356: #--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
8357: #--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
8358: #-- = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
8359: #--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
8360: #--LOG(1+U) CAN BE VERY EFFICIENT.
8361: #--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
8362: #--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
8363:
8364: #--GET K, Y, F, AND ADDRESS OF 1/F.
8365: asr.l &8,%d1
8366: asr.l &8,%d1 # SHIFTED 16 BITS, BIASED EXPO. OF X
8367: sub.l &0x3FFF,%d1 # THIS IS K
8368: add.l ADJK(%a6),%d1 # ADJUST K, ORIGINAL INPUT MAY BE DENORM.
8369: lea LOGTBL(%pc),%a0 # BASE ADDRESS OF 1/F AND LOG(F)
8370: fmov.l %d1,%fp1 # CONVERT K TO FLOATING-POINT FORMAT
8371:
8372: #--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
8373: mov.l &0x3FFF0000,X(%a6) # X IS NOW Y, I.E. 2^(-K)*X
8374: mov.l XFRAC(%a6),FFRAC(%a6)
8375: and.l &0xFE000000,FFRAC(%a6) # FIRST 7 BITS OF Y
8376: or.l &0x01000000,FFRAC(%a6) # GET F: ATTACH A 1 AT THE EIGHTH BIT
8377: mov.l FFRAC(%a6),%d1 # READY TO GET ADDRESS OF 1/F
8378: and.l &0x7E000000,%d1
8379: asr.l &8,%d1
8380: asr.l &8,%d1
8381: asr.l &4,%d1 # SHIFTED 20, D0 IS THE DISPLACEMENT
8382: add.l %d1,%a0 # A0 IS THE ADDRESS FOR 1/F
8383:
8384: fmov.x X(%a6),%fp0
8385: mov.l &0x3fff0000,F(%a6)
8386: clr.l F+8(%a6)
8387: fsub.x F(%a6),%fp0 # Y-F
8388: fmovm.x &0xc,-(%sp) # SAVE FP2-3 WHILE FP0 IS NOT READY
8389: #--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
8390: #--REGISTERS SAVED: FPCR, FP1, FP2
8391:
8392: LP1CONT1:
8393: #--AN RE-ENTRY POINT FOR LOGNP1
8394: fmul.x (%a0),%fp0 # FP0 IS U = (Y-F)/F
8395: fmul.x LOGOF2(%pc),%fp1 # GET K*LOG2 WHILE FP0 IS NOT READY
8396: fmov.x %fp0,%fp2
8397: fmul.x %fp2,%fp2 # FP2 IS V=U*U
8398: fmov.x %fp1,KLOG2(%a6) # PUT K*LOG2 IN MEMEORY, FREE FP1
8399:
8400: #--LOG(1+U) IS APPROXIMATED BY
8401: #--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
8402: #--[U + V*(A1+V*(A3+V*A5))] + [U*V*(A2+V*(A4+V*A6))]
8403:
8404: fmov.x %fp2,%fp3
8405: fmov.x %fp2,%fp1
8406:
8407: fmul.d LOGA6(%pc),%fp1 # V*A6
8408: fmul.d LOGA5(%pc),%fp2 # V*A5
8409:
8410: fadd.d LOGA4(%pc),%fp1 # A4+V*A6
8411: fadd.d LOGA3(%pc),%fp2 # A3+V*A5
8412:
8413: fmul.x %fp3,%fp1 # V*(A4+V*A6)
8414: fmul.x %fp3,%fp2 # V*(A3+V*A5)
8415:
8416: fadd.d LOGA2(%pc),%fp1 # A2+V*(A4+V*A6)
8417: fadd.d LOGA1(%pc),%fp2 # A1+V*(A3+V*A5)
8418:
8419: fmul.x %fp3,%fp1 # V*(A2+V*(A4+V*A6))
8420: add.l &16,%a0 # ADDRESS OF LOG(F)
8421: fmul.x %fp3,%fp2 # V*(A1+V*(A3+V*A5))
8422:
8423: fmul.x %fp0,%fp1 # U*V*(A2+V*(A4+V*A6))
8424: fadd.x %fp2,%fp0 # U+V*(A1+V*(A3+V*A5))
8425:
8426: fadd.x (%a0),%fp1 # LOG(F)+U*V*(A2+V*(A4+V*A6))
8427: fmovm.x (%sp)+,&0x30 # RESTORE FP2-3
8428: fadd.x %fp1,%fp0 # FP0 IS LOG(F) + LOG(1+U)
8429:
8430: fmov.l %d0,%fpcr
8431: fadd.x KLOG2(%a6),%fp0 # FINAL ADD
8432: bra t_inx2
8433:
8434:
8435: LOGNEAR1:
8436:
8437: # if the input is exactly equal to one, then exit through ld_pzero.
8438: # if these 2 lines weren't here, the correct answer would be returned
8439: # but the INEX2 bit would be set.
8440: fcmp.b %fp0,&0x1 # is it equal to one?
8441: fbeq.l ld_pzero # yes
8442:
8443: #--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
8444: fmov.x %fp0,%fp1
8445: fsub.s one(%pc),%fp1 # FP1 IS X-1
8446: fadd.s one(%pc),%fp0 # FP0 IS X+1
8447: fadd.x %fp1,%fp1 # FP1 IS 2(X-1)
8448: #--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
8449: #--IN U, U = 2(X-1)/(X+1) = FP1/FP0
8450:
8451: LP1CONT2:
8452: #--THIS IS AN RE-ENTRY POINT FOR LOGNP1
8453: fdiv.x %fp0,%fp1 # FP1 IS U
8454: fmovm.x &0xc,-(%sp) # SAVE FP2-3
8455: #--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
8456: #--LET V=U*U, W=V*V, CALCULATE
8457: #--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
8458: #--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] )
8459: fmov.x %fp1,%fp0
8460: fmul.x %fp0,%fp0 # FP0 IS V
8461: fmov.x %fp1,SAVEU(%a6) # STORE U IN MEMORY, FREE FP1
8462: fmov.x %fp0,%fp1
8463: fmul.x %fp1,%fp1 # FP1 IS W
8464:
8465: fmov.d LOGB5(%pc),%fp3
8466: fmov.d LOGB4(%pc),%fp2
8467:
8468: fmul.x %fp1,%fp3 # W*B5
8469: fmul.x %fp1,%fp2 # W*B4
8470:
8471: fadd.d LOGB3(%pc),%fp3 # B3+W*B5
8472: fadd.d LOGB2(%pc),%fp2 # B2+W*B4
8473:
8474: fmul.x %fp3,%fp1 # W*(B3+W*B5), FP3 RELEASED
8475:
8476: fmul.x %fp0,%fp2 # V*(B2+W*B4)
8477:
8478: fadd.d LOGB1(%pc),%fp1 # B1+W*(B3+W*B5)
8479: fmul.x SAVEU(%a6),%fp0 # FP0 IS U*V
8480:
8481: fadd.x %fp2,%fp1 # B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
8482: fmovm.x (%sp)+,&0x30 # FP2-3 RESTORED
8483:
8484: fmul.x %fp1,%fp0 # U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
8485:
8486: fmov.l %d0,%fpcr
8487: fadd.x SAVEU(%a6),%fp0
8488: bra t_inx2
8489:
8490: #--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
8491: LOGNEG:
8492: bra t_operr
8493:
8494: global slognd
8495: slognd:
8496: #--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
8497:
8498: mov.l &-100,ADJK(%a6) # INPUT = 2^(ADJK) * FP0
8499:
8500: #----normalize the input value by left shifting k bits (k to be determined
8501: #----below), adjusting exponent and storing -k to ADJK
8502: #----the value TWOTO100 is no longer needed.
8503: #----Note that this code assumes the denormalized input is NON-ZERO.
8504:
8505: movm.l &0x3f00,-(%sp) # save some registers {d2-d7}
8506: mov.l (%a0),%d3 # D3 is exponent of smallest norm. #
8507: mov.l 4(%a0),%d4
8508: mov.l 8(%a0),%d5 # (D4,D5) is (Hi_X,Lo_X)
8509: clr.l %d2 # D2 used for holding K
8510:
8511: tst.l %d4
8512: bne.b Hi_not0
8513:
8514: Hi_0:
8515: mov.l %d5,%d4
8516: clr.l %d5
8517: mov.l &32,%d2
8518: clr.l %d6
8519: bfffo %d4{&0:&32},%d6
8520: lsl.l %d6,%d4
8521: add.l %d6,%d2 # (D3,D4,D5) is normalized
8522:
8523: mov.l %d3,X(%a6)
8524: mov.l %d4,XFRAC(%a6)
8525: mov.l %d5,XFRAC+4(%a6)
8526: neg.l %d2
8527: mov.l %d2,ADJK(%a6)
8528: fmov.x X(%a6),%fp0
8529: movm.l (%sp)+,&0xfc # restore registers {d2-d7}
8530: lea X(%a6),%a0
8531: bra.w LOGBGN # begin regular log(X)
8532:
8533: Hi_not0:
8534: clr.l %d6
8535: bfffo %d4{&0:&32},%d6 # find first 1
8536: mov.l %d6,%d2 # get k
8537: lsl.l %d6,%d4
8538: mov.l %d5,%d7 # a copy of D5
8539: lsl.l %d6,%d5
8540: neg.l %d6
8541: add.l &32,%d6
8542: lsr.l %d6,%d7
8543: or.l %d7,%d4 # (D3,D4,D5) normalized
8544:
8545: mov.l %d3,X(%a6)
8546: mov.l %d4,XFRAC(%a6)
8547: mov.l %d5,XFRAC+4(%a6)
8548: neg.l %d2
8549: mov.l %d2,ADJK(%a6)
8550: fmov.x X(%a6),%fp0
8551: movm.l (%sp)+,&0xfc # restore registers {d2-d7}
8552: lea X(%a6),%a0
8553: bra.w LOGBGN # begin regular log(X)
8554:
8555: global slognp1
8556: #--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
8557: slognp1:
8558: fmov.x (%a0),%fp0 # LOAD INPUT
8559: fabs.x %fp0 # test magnitude
8560: fcmp.x %fp0,LTHOLD(%pc) # compare with min threshold
8561: fbgt.w LP1REAL # if greater, continue
8562: fmov.l %d0,%fpcr
8563: mov.b &FMOV_OP,%d1 # last inst is MOVE
8564: fmov.x (%a0),%fp0 # return signed argument
8565: bra t_catch
8566:
8567: LP1REAL:
8568: fmov.x (%a0),%fp0 # LOAD INPUT
8569: mov.l &0x00000000,ADJK(%a6)
8570: fmov.x %fp0,%fp1 # FP1 IS INPUT Z
8571: fadd.s one(%pc),%fp0 # X := ROUND(1+Z)
8572: fmov.x %fp0,X(%a6)
8573: mov.w XFRAC(%a6),XDCARE(%a6)
8574: mov.l X(%a6),%d1
8575: cmp.l %d1,&0
8576: ble.w LP1NEG0 # LOG OF ZERO OR -VE
8577: cmp.l %d1,&0x3ffe8000 # IS BOUNDS [1/2,3/2]?
8578: blt.w LOGMAIN
8579: cmp.l %d1,&0x3fffc000
8580: bgt.w LOGMAIN
8581: #--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
8582: #--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
8583: #--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
8584:
8585: LP1NEAR1:
8586: #--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
8587: cmp.l %d1,&0x3ffef07d
8588: blt.w LP1CARE
8589: cmp.l %d1,&0x3fff8841
8590: bgt.w LP1CARE
8591:
8592: LP1ONE16:
8593: #--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
8594: #--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
8595: fadd.x %fp1,%fp1 # FP1 IS 2Z
8596: fadd.s one(%pc),%fp0 # FP0 IS 1+X
8597: #--U = FP1/FP0
8598: bra.w LP1CONT2
8599:
8600: LP1CARE:
8601: #--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
8602: #--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
8603: #--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
8604: #--THERE ARE ONLY TWO CASES.
8605: #--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
8606: #--CASE 2: 1+Z > 1, THEN K = 0 AND Y-F = (1-F) + Z
8607: #--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
8608: #--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
8609:
8610: mov.l XFRAC(%a6),FFRAC(%a6)
8611: and.l &0xFE000000,FFRAC(%a6)
8612: or.l &0x01000000,FFRAC(%a6) # F OBTAINED
8613: cmp.l %d1,&0x3FFF8000 # SEE IF 1+Z > 1
8614: bge.b KISZERO
8615:
8616: KISNEG1:
8617: fmov.s TWO(%pc),%fp0
8618: mov.l &0x3fff0000,F(%a6)
8619: clr.l F+8(%a6)
8620: fsub.x F(%a6),%fp0 # 2-F
8621: mov.l FFRAC(%a6),%d1
8622: and.l &0x7E000000,%d1
8623: asr.l &8,%d1
8624: asr.l &8,%d1
8625: asr.l &4,%d1 # D0 CONTAINS DISPLACEMENT FOR 1/F
8626: fadd.x %fp1,%fp1 # GET 2Z
8627: fmovm.x &0xc,-(%sp) # SAVE FP2 {%fp2/%fp3}
8628: fadd.x %fp1,%fp0 # FP0 IS Y-F = (2-F)+2Z
8629: lea LOGTBL(%pc),%a0 # A0 IS ADDRESS OF 1/F
8630: add.l %d1,%a0
8631: fmov.s negone(%pc),%fp1 # FP1 IS K = -1
8632: bra.w LP1CONT1
8633:
8634: KISZERO:
8635: fmov.s one(%pc),%fp0
8636: mov.l &0x3fff0000,F(%a6)
8637: clr.l F+8(%a6)
8638: fsub.x F(%a6),%fp0 # 1-F
8639: mov.l FFRAC(%a6),%d1
8640: and.l &0x7E000000,%d1
8641: asr.l &8,%d1
8642: asr.l &8,%d1
8643: asr.l &4,%d1
8644: fadd.x %fp1,%fp0 # FP0 IS Y-F
8645: fmovm.x &0xc,-(%sp) # FP2 SAVED {%fp2/%fp3}
8646: lea LOGTBL(%pc),%a0
8647: add.l %d1,%a0 # A0 IS ADDRESS OF 1/F
8648: fmov.s zero(%pc),%fp1 # FP1 IS K = 0
8649: bra.w LP1CONT1
8650:
8651: LP1NEG0:
8652: #--FPCR SAVED. D0 IS X IN COMPACT FORM.
8653: cmp.l %d1,&0
8654: blt.b LP1NEG
8655: LP1ZERO:
8656: fmov.s negone(%pc),%fp0
8657:
8658: fmov.l %d0,%fpcr
8659: bra t_dz
8660:
8661: LP1NEG:
8662: fmov.s zero(%pc),%fp0
8663:
8664: fmov.l %d0,%fpcr
8665: bra t_operr
8666:
8667: global slognp1d
8668: #--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
8669: # Simply return the denorm
8670: slognp1d:
8671: bra t_extdnrm
8672:
8673: #########################################################################
8674: # satanh(): computes the inverse hyperbolic tangent of a norm input #
8675: # satanhd(): computes the inverse hyperbolic tangent of a denorm input #
8676: # #
8677: # INPUT *************************************************************** #
8678: # a0 = pointer to extended precision input #
8679: # d0 = round precision,mode #
8680: # #
8681: # OUTPUT ************************************************************** #
8682: # fp0 = arctanh(X) #
8683: # #
8684: # ACCURACY and MONOTONICITY ******************************************* #
8685: # The returned result is within 3 ulps in 64 significant bit, #
8686: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
8687: # rounded to double precision. The result is provably monotonic #
8688: # in double precision. #
8689: # #
8690: # ALGORITHM *********************************************************** #
8691: # #
8692: # ATANH #
8693: # 1. If |X| >= 1, go to 3. #
8694: # #
8695: # 2. (|X| < 1) Calculate atanh(X) by #
8696: # sgn := sign(X) #
8697: # y := |X| #
8698: # z := 2y/(1-y) #
8699: # atanh(X) := sgn * (1/2) * logp1(z) #
8700: # Exit. #
8701: # #
8702: # 3. If |X| > 1, go to 5. #
8703: # #
8704: # 4. (|X| = 1) Generate infinity with an appropriate sign and #
8705: # divide-by-zero by #
8706: # sgn := sign(X) #
8707: # atan(X) := sgn / (+0). #
8708: # Exit. #
8709: # #
8710: # 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
8711: # Exit. #
8712: # #
8713: #########################################################################
8714:
8715: global satanh
8716: satanh:
8717: mov.l (%a0),%d1
8718: mov.w 4(%a0),%d1
8719: and.l &0x7FFFFFFF,%d1
8720: cmp.l %d1,&0x3FFF8000
8721: bge.b ATANHBIG
8722:
8723: #--THIS IS THE USUAL CASE, |X| < 1
8724: #--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
8725:
8726: fabs.x (%a0),%fp0 # Y = |X|
8727: fmov.x %fp0,%fp1
8728: fneg.x %fp1 # -Y
8729: fadd.x %fp0,%fp0 # 2Y
8730: fadd.s &0x3F800000,%fp1 # 1-Y
8731: fdiv.x %fp1,%fp0 # 2Y/(1-Y)
8732: mov.l (%a0),%d1
8733: and.l &0x80000000,%d1
8734: or.l &0x3F000000,%d1 # SIGN(X)*HALF
8735: mov.l %d1,-(%sp)
8736:
8737: mov.l %d0,-(%sp) # save rnd prec,mode
8738: clr.l %d0 # pass ext prec,RN
8739: fmovm.x &0x01,-(%sp) # save Z on stack
8740: lea (%sp),%a0 # pass ptr to Z
8741: bsr slognp1 # LOG1P(Z)
8742: add.l &0xc,%sp # clear Z from stack
8743:
8744: mov.l (%sp)+,%d0 # fetch old prec,mode
8745: fmov.l %d0,%fpcr # load it
8746: mov.b &FMUL_OP,%d1 # last inst is MUL
8747: fmul.s (%sp)+,%fp0
8748: bra t_catch
8749:
8750: ATANHBIG:
8751: fabs.x (%a0),%fp0 # |X|
8752: fcmp.s %fp0,&0x3F800000
8753: fbgt t_operr
8754: bra t_dz
8755:
8756: global satanhd
8757: #--ATANH(X) = X FOR DENORMALIZED X
8758: satanhd:
8759: bra t_extdnrm
8760:
8761: #########################################################################
8762: # slog10(): computes the base-10 logarithm of a normalized input #
8763: # slog10d(): computes the base-10 logarithm of a denormalized input #
8764: # slog2(): computes the base-2 logarithm of a normalized input #
8765: # slog2d(): computes the base-2 logarithm of a denormalized input #
8766: # #
8767: # INPUT *************************************************************** #
8768: # a0 = pointer to extended precision input #
8769: # d0 = round precision,mode #
8770: # #
8771: # OUTPUT ************************************************************** #
8772: # fp0 = log_10(X) or log_2(X) #
8773: # #
8774: # ACCURACY and MONOTONICITY ******************************************* #
8775: # The returned result is within 1.7 ulps in 64 significant bit, #
8776: # i.e. within 0.5003 ulp to 53 bits if the result is subsequently #
8777: # rounded to double precision. The result is provably monotonic #
8778: # in double precision. #
8779: # #
8780: # ALGORITHM *********************************************************** #
8781: # #
8782: # slog10d: #
8783: # #
8784: # Step 0. If X < 0, create a NaN and raise the invalid operation #
8785: # flag. Otherwise, save FPCR in D1; set FpCR to default. #
8786: # Notes: Default means round-to-nearest mode, no floating-point #
8787: # traps, and precision control = double extended. #
8788: # #
8789: # Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
8790: # Notes: Even if X is denormalized, log(X) is always normalized. #
8791: # #
8792: # Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
8793: # 2.1 Restore the user FPCR #
8794: # 2.2 Return ans := Y * INV_L10. #
8795: # #
8796: # slog10: #
8797: # #
8798: # Step 0. If X < 0, create a NaN and raise the invalid operation #
8799: # flag. Otherwise, save FPCR in D1; set FpCR to default. #
8800: # Notes: Default means round-to-nearest mode, no floating-point #
8801: # traps, and precision control = double extended. #
8802: # #
8803: # Step 1. Call sLogN to obtain Y = log(X), the natural log of X. #
8804: # #
8805: # Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
8806: # 2.1 Restore the user FPCR #
8807: # 2.2 Return ans := Y * INV_L10. #
8808: # #
8809: # sLog2d: #
8810: # #
8811: # Step 0. If X < 0, create a NaN and raise the invalid operation #
8812: # flag. Otherwise, save FPCR in D1; set FpCR to default. #
8813: # Notes: Default means round-to-nearest mode, no floating-point #
8814: # traps, and precision control = double extended. #
8815: # #
8816: # Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
8817: # Notes: Even if X is denormalized, log(X) is always normalized. #
8818: # #
8819: # Step 2. Compute log_10(X) = log(X) * (1/log(2)). #
8820: # 2.1 Restore the user FPCR #
8821: # 2.2 Return ans := Y * INV_L2. #
8822: # #
8823: # sLog2: #
8824: # #
8825: # Step 0. If X < 0, create a NaN and raise the invalid operation #
8826: # flag. Otherwise, save FPCR in D1; set FpCR to default. #
8827: # Notes: Default means round-to-nearest mode, no floating-point #
8828: # traps, and precision control = double extended. #
8829: # #
8830: # Step 1. If X is not an integer power of two, i.e., X != 2^k, #
8831: # go to Step 3. #
8832: # #
8833: # Step 2. Return k. #
8834: # 2.1 Get integer k, X = 2^k. #
8835: # 2.2 Restore the user FPCR. #
8836: # 2.3 Return ans := convert-to-double-extended(k). #
8837: # #
8838: # Step 3. Call sLogN to obtain Y = log(X), the natural log of X. #
8839: # #
8840: # Step 4. Compute log_2(X) = log(X) * (1/log(2)). #
8841: # 4.1 Restore the user FPCR #
8842: # 4.2 Return ans := Y * INV_L2. #
8843: # #
8844: #########################################################################
8845:
8846: INV_L10:
8847: long 0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
8848:
8849: INV_L2:
8850: long 0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
8851:
8852: global slog10
8853: #--entry point for Log10(X), X is normalized
8854: slog10:
8855: fmov.b &0x1,%fp0
8856: fcmp.x %fp0,(%a0) # if operand == 1,
8857: fbeq.l ld_pzero # return an EXACT zero
8858:
8859: mov.l (%a0),%d1
8860: blt.w invalid
8861: mov.l %d0,-(%sp)
8862: clr.l %d0
8863: bsr slogn # log(X), X normal.
8864: fmov.l (%sp)+,%fpcr
8865: fmul.x INV_L10(%pc),%fp0
8866: bra t_inx2
8867:
8868: global slog10d
8869: #--entry point for Log10(X), X is denormalized
8870: slog10d:
8871: mov.l (%a0),%d1
8872: blt.w invalid
8873: mov.l %d0,-(%sp)
8874: clr.l %d0
8875: bsr slognd # log(X), X denorm.
8876: fmov.l (%sp)+,%fpcr
8877: fmul.x INV_L10(%pc),%fp0
8878: bra t_minx2
8879:
8880: global slog2
8881: #--entry point for Log2(X), X is normalized
8882: slog2:
8883: mov.l (%a0),%d1
8884: blt.w invalid
8885:
8886: mov.l 8(%a0),%d1
8887: bne.b continue # X is not 2^k
8888:
8889: mov.l 4(%a0),%d1
8890: and.l &0x7FFFFFFF,%d1
8891: bne.b continue
8892:
8893: #--X = 2^k.
8894: mov.w (%a0),%d1
8895: and.l &0x00007FFF,%d1
8896: sub.l &0x3FFF,%d1
8897: beq.l ld_pzero
8898: fmov.l %d0,%fpcr
8899: fmov.l %d1,%fp0
8900: bra t_inx2
8901:
8902: continue:
8903: mov.l %d0,-(%sp)
8904: clr.l %d0
8905: bsr slogn # log(X), X normal.
8906: fmov.l (%sp)+,%fpcr
8907: fmul.x INV_L2(%pc),%fp0
8908: bra t_inx2
8909:
8910: invalid:
8911: bra t_operr
8912:
8913: global slog2d
8914: #--entry point for Log2(X), X is denormalized
8915: slog2d:
8916: mov.l (%a0),%d1
8917: blt.w invalid
8918: mov.l %d0,-(%sp)
8919: clr.l %d0
8920: bsr slognd # log(X), X denorm.
8921: fmov.l (%sp)+,%fpcr
8922: fmul.x INV_L2(%pc),%fp0
8923: bra t_minx2
8924:
8925: #########################################################################
8926: # stwotox(): computes 2**X for a normalized input #
8927: # stwotoxd(): computes 2**X for a denormalized input #
8928: # stentox(): computes 10**X for a normalized input #
8929: # stentoxd(): computes 10**X for a denormalized input #
8930: # #
8931: # INPUT *************************************************************** #
8932: # a0 = pointer to extended precision input #
8933: # d0 = round precision,mode #
8934: # #
8935: # OUTPUT ************************************************************** #
8936: # fp0 = 2**X or 10**X #
8937: # #
8938: # ACCURACY and MONOTONICITY ******************************************* #
8939: # The returned result is within 2 ulps in 64 significant bit, #
8940: # i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
8941: # rounded to double precision. The result is provably monotonic #
8942: # in double precision. #
8943: # #
8944: # ALGORITHM *********************************************************** #
8945: # #
8946: # twotox #
8947: # 1. If |X| > 16480, go to ExpBig. #
8948: # #
8949: # 2. If |X| < 2**(-70), go to ExpSm. #
8950: # #
8951: # 3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore #
8952: # decompose N as #
8953: # N = 64(M + M') + j, j = 0,1,2,...,63. #
8954: # #
8955: # 4. Overwrite r := r * log2. Then #
8956: # 2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
8957: # Go to expr to compute that expression. #
8958: # #
8959: # tentox #
8960: # 1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig. #
8961: # #
8962: # 2. If |X| < 2**(-70), go to ExpSm. #
8963: # #
8964: # 3. Set y := X*log_2(10)*64 (base 2 log of 10). Set #
8965: # N := round-to-int(y). Decompose N as #
8966: # N = 64(M + M') + j, j = 0,1,2,...,63. #
8967: # #
8968: # 4. Define r as #
8969: # r := ((X - N*L1)-N*L2) * L10 #
8970: # where L1, L2 are the leading and trailing parts of #
8971: # log_10(2)/64 and L10 is the natural log of 10. Then #
8972: # 10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
8973: # Go to expr to compute that expression. #
8974: # #
8975: # expr #
8976: # 1. Fetch 2**(j/64) from table as Fact1 and Fact2. #
8977: # #
8978: # 2. Overwrite Fact1 and Fact2 by #
8979: # Fact1 := 2**(M) * Fact1 #
8980: # Fact2 := 2**(M) * Fact2 #
8981: # Thus Fact1 + Fact2 = 2**(M) * 2**(j/64). #
8982: # #
8983: # 3. Calculate P where 1 + P approximates exp(r): #
8984: # P = r + r*r*(A1+r*(A2+...+r*A5)). #
8985: # #
8986: # 4. Let AdjFact := 2**(M'). Return #
8987: # AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ). #
8988: # Exit. #
8989: # #
8990: # ExpBig #
8991: # 1. Generate overflow by Huge * Huge if X > 0; otherwise, #
8992: # generate underflow by Tiny * Tiny. #
8993: # #
8994: # ExpSm #
8995: # 1. Return 1 + X. #
8996: # #
8997: #########################################################################
8998:
8999: L2TEN64:
9000: long 0x406A934F,0x0979A371 # 64LOG10/LOG2
9001: L10TWO1:
9002: long 0x3F734413,0x509F8000 # LOG2/64LOG10
9003:
9004: L10TWO2:
9005: long 0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
9006:
9007: LOG10: long 0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
9008:
9009: LOG2: long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
9010:
9011: EXPA5: long 0x3F56C16D,0x6F7BD0B2
9012: EXPA4: long 0x3F811112,0x302C712C
9013: EXPA3: long 0x3FA55555,0x55554CC1
9014: EXPA2: long 0x3FC55555,0x55554A54
9015: EXPA1: long 0x3FE00000,0x00000000,0x00000000,0x00000000
9016:
9017: TEXPTBL:
9018: long 0x3FFF0000,0x80000000,0x00000000,0x3F738000
9019: long 0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
9020: long 0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
9021: long 0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
9022: long 0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
9023: long 0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
9024: long 0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
9025: long 0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
9026: long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
9027: long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
9028: long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
9029: long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
9030: long 0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
9031: long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
9032: long 0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
9033: long 0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
9034: long 0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
9035: long 0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
9036: long 0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
9037: long 0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
9038: long 0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
9039: long 0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
9040: long 0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
9041: long 0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
9042: long 0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
9043: long 0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
9044: long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
9045: long 0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
9046: long 0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
9047: long 0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
9048: long 0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
9049: long 0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
9050: long 0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
9051: long 0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
9052: long 0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
9053: long 0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
9054: long 0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
9055: long 0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
9056: long 0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
9057: long 0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
9058: long 0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
9059: long 0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
9060: long 0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
9061: long 0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
9062: long 0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
9063: long 0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
9064: long 0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
9065: long 0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
9066: long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
9067: long 0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
9068: long 0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
9069: long 0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
9070: long 0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
9071: long 0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
9072: long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
9073: long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
9074: long 0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
9075: long 0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
9076: long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
9077: long 0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
9078: long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
9079: long 0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
9080: long 0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
9081: long 0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
9082:
9083: set INT,L_SCR1
9084:
9085: set X,FP_SCR0
9086: set XDCARE,X+2
9087: set XFRAC,X+4
9088:
9089: set ADJFACT,FP_SCR0
9090:
9091: set FACT1,FP_SCR0
9092: set FACT1HI,FACT1+4
9093: set FACT1LOW,FACT1+8
9094:
9095: set FACT2,FP_SCR1
9096: set FACT2HI,FACT2+4
9097: set FACT2LOW,FACT2+8
9098:
9099: global stwotox
9100: #--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
9101: stwotox:
9102: fmovm.x (%a0),&0x80 # LOAD INPUT
9103:
9104: mov.l (%a0),%d1
9105: mov.w 4(%a0),%d1
9106: fmov.x %fp0,X(%a6)
9107: and.l &0x7FFFFFFF,%d1
9108:
9109: cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
9110: bge.b TWOOK1
9111: bra.w EXPBORS
9112:
9113: TWOOK1:
9114: cmp.l %d1,&0x400D80C0 # |X| > 16480?
9115: ble.b TWOMAIN
9116: bra.w EXPBORS
9117:
9118: TWOMAIN:
9119: #--USUAL CASE, 2^(-70) <= |X| <= 16480
9120:
9121: fmov.x %fp0,%fp1
9122: fmul.s &0x42800000,%fp1 # 64 * X
9123: fmov.l %fp1,INT(%a6) # N = ROUND-TO-INT(64 X)
9124: mov.l %d2,-(%sp)
9125: lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
9126: fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
9127: mov.l INT(%a6),%d1
9128: mov.l %d1,%d2
9129: and.l &0x3F,%d1 # D0 IS J
9130: asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
9131: add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
9132: asr.l &6,%d2 # d2 IS L, N = 64L + J
9133: mov.l %d2,%d1
9134: asr.l &1,%d1 # D0 IS M
9135: sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
9136: add.l &0x3FFF,%d2
9137:
9138: #--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
9139: #--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
9140: #--ADJFACT = 2^(M').
9141: #--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
9142:
9143: fmovm.x &0x0c,-(%sp) # save fp2/fp3
9144:
9145: fmul.s &0x3C800000,%fp1 # (1/64)*N
9146: mov.l (%a1)+,FACT1(%a6)
9147: mov.l (%a1)+,FACT1HI(%a6)
9148: mov.l (%a1)+,FACT1LOW(%a6)
9149: mov.w (%a1)+,FACT2(%a6)
9150:
9151: fsub.x %fp1,%fp0 # X - (1/64)*INT(64 X)
9152:
9153: mov.w (%a1)+,FACT2HI(%a6)
9154: clr.w FACT2HI+2(%a6)
9155: clr.l FACT2LOW(%a6)
9156: add.w %d1,FACT1(%a6)
9157: fmul.x LOG2(%pc),%fp0 # FP0 IS R
9158: add.w %d1,FACT2(%a6)
9159:
9160: bra.w expr
9161:
9162: EXPBORS:
9163: #--FPCR, D0 SAVED
9164: cmp.l %d1,&0x3FFF8000
9165: bgt.b TEXPBIG
9166:
9167: #--|X| IS SMALL, RETURN 1 + X
9168:
9169: fmov.l %d0,%fpcr # restore users round prec,mode
9170: fadd.s &0x3F800000,%fp0 # RETURN 1 + X
9171: bra t_pinx2
9172:
9173: TEXPBIG:
9174: #--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
9175: #--REGISTERS SAVE SO FAR ARE FPCR AND D0
9176: mov.l X(%a6),%d1
9177: cmp.l %d1,&0
9178: blt.b EXPNEG
9179:
9180: bra t_ovfl2 # t_ovfl expects positive value
9181:
9182: EXPNEG:
9183: bra t_unfl2 # t_unfl expects positive value
9184:
9185: global stwotoxd
9186: stwotoxd:
9187: #--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
9188:
9189: fmov.l %d0,%fpcr # set user's rounding mode/precision
9190: fmov.s &0x3F800000,%fp0 # RETURN 1 + X
9191: mov.l (%a0),%d1
9192: or.l &0x00800001,%d1
9193: fadd.s %d1,%fp0
9194: bra t_pinx2
9195:
9196: global stentox
9197: #--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
9198: stentox:
9199: fmovm.x (%a0),&0x80 # LOAD INPUT
9200:
9201: mov.l (%a0),%d1
9202: mov.w 4(%a0),%d1
9203: fmov.x %fp0,X(%a6)
9204: and.l &0x7FFFFFFF,%d1
9205:
9206: cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
9207: bge.b TENOK1
9208: bra.w EXPBORS
9209:
9210: TENOK1:
9211: cmp.l %d1,&0x400B9B07 # |X| <= 16480*log2/log10 ?
9212: ble.b TENMAIN
9213: bra.w EXPBORS
9214:
9215: TENMAIN:
9216: #--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
9217:
9218: fmov.x %fp0,%fp1
9219: fmul.d L2TEN64(%pc),%fp1 # X*64*LOG10/LOG2
9220: fmov.l %fp1,INT(%a6) # N=INT(X*64*LOG10/LOG2)
9221: mov.l %d2,-(%sp)
9222: lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
9223: fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
9224: mov.l INT(%a6),%d1
9225: mov.l %d1,%d2
9226: and.l &0x3F,%d1 # D0 IS J
9227: asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
9228: add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
9229: asr.l &6,%d2 # d2 IS L, N = 64L + J
9230: mov.l %d2,%d1
9231: asr.l &1,%d1 # D0 IS M
9232: sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
9233: add.l &0x3FFF,%d2
9234:
9235: #--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
9236: #--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
9237: #--ADJFACT = 2^(M').
9238: #--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
9239: fmovm.x &0x0c,-(%sp) # save fp2/fp3
9240:
9241: fmov.x %fp1,%fp2
9242:
9243: fmul.d L10TWO1(%pc),%fp1 # N*(LOG2/64LOG10)_LEAD
9244: mov.l (%a1)+,FACT1(%a6)
9245:
9246: fmul.x L10TWO2(%pc),%fp2 # N*(LOG2/64LOG10)_TRAIL
9247:
9248: mov.l (%a1)+,FACT1HI(%a6)
9249: mov.l (%a1)+,FACT1LOW(%a6)
9250: fsub.x %fp1,%fp0 # X - N L_LEAD
9251: mov.w (%a1)+,FACT2(%a6)
9252:
9253: fsub.x %fp2,%fp0 # X - N L_TRAIL
9254:
9255: mov.w (%a1)+,FACT2HI(%a6)
9256: clr.w FACT2HI+2(%a6)
9257: clr.l FACT2LOW(%a6)
9258:
9259: fmul.x LOG10(%pc),%fp0 # FP0 IS R
9260: add.w %d1,FACT1(%a6)
9261: add.w %d1,FACT2(%a6)
9262:
9263: expr:
9264: #--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
9265: #--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
9266: #--FP0 IS R. THE FOLLOWING CODE COMPUTES
9267: #-- 2**(M'+M) * 2**(J/64) * EXP(R)
9268:
9269: fmov.x %fp0,%fp1
9270: fmul.x %fp1,%fp1 # FP1 IS S = R*R
9271:
9272: fmov.d EXPA5(%pc),%fp2 # FP2 IS A5
9273: fmov.d EXPA4(%pc),%fp3 # FP3 IS A4
9274:
9275: fmul.x %fp1,%fp2 # FP2 IS S*A5
9276: fmul.x %fp1,%fp3 # FP3 IS S*A4
9277:
9278: fadd.d EXPA3(%pc),%fp2 # FP2 IS A3+S*A5
9279: fadd.d EXPA2(%pc),%fp3 # FP3 IS A2+S*A4
9280:
9281: fmul.x %fp1,%fp2 # FP2 IS S*(A3+S*A5)
9282: fmul.x %fp1,%fp3 # FP3 IS S*(A2+S*A4)
9283:
9284: fadd.d EXPA1(%pc),%fp2 # FP2 IS A1+S*(A3+S*A5)
9285: fmul.x %fp0,%fp3 # FP3 IS R*S*(A2+S*A4)
9286:
9287: fmul.x %fp1,%fp2 # FP2 IS S*(A1+S*(A3+S*A5))
9288: fadd.x %fp3,%fp0 # FP0 IS R+R*S*(A2+S*A4)
9289: fadd.x %fp2,%fp0 # FP0 IS EXP(R) - 1
9290:
9291: fmovm.x (%sp)+,&0x30 # restore fp2/fp3
9292:
9293: #--FINAL RECONSTRUCTION PROCESS
9294: #--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1) - (1 OR 0)
9295:
9296: fmul.x FACT1(%a6),%fp0
9297: fadd.x FACT2(%a6),%fp0
9298: fadd.x FACT1(%a6),%fp0
9299:
9300: fmov.l %d0,%fpcr # restore users round prec,mode
9301: mov.w %d2,ADJFACT(%a6) # INSERT EXPONENT
9302: mov.l (%sp)+,%d2
9303: mov.l &0x80000000,ADJFACT+4(%a6)
9304: clr.l ADJFACT+8(%a6)
9305: mov.b &FMUL_OP,%d1 # last inst is MUL
9306: fmul.x ADJFACT(%a6),%fp0 # FINAL ADJUSTMENT
9307: bra t_catch
9308:
9309: global stentoxd
9310: stentoxd:
9311: #--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
9312:
9313: fmov.l %d0,%fpcr # set user's rounding mode/precision
9314: fmov.s &0x3F800000,%fp0 # RETURN 1 + X
9315: mov.l (%a0),%d1
9316: or.l &0x00800001,%d1
9317: fadd.s %d1,%fp0
9318: bra t_pinx2
9319:
9320: #########################################################################
9321: # smovcr(): returns the ROM constant at the offset specified in d1 #
9322: # rounded to the mode and precision specified in d0. #
9323: # #
9324: # INPUT *************************************************************** #
9325: # d0 = rnd prec,mode #
9326: # d1 = ROM offset #
9327: # #
9328: # OUTPUT ************************************************************** #
9329: # fp0 = the ROM constant rounded to the user's rounding mode,prec #
9330: # #
9331: #########################################################################
9332:
9333: global smovcr
9334: smovcr:
9335: mov.l %d1,-(%sp) # save rom offset for a sec
9336:
9337: lsr.b &0x4,%d0 # shift ctrl bits to lo
9338: mov.l %d0,%d1 # make a copy
9339: andi.w &0x3,%d1 # extract rnd mode
9340: andi.w &0xc,%d0 # extract rnd prec
9341: swap %d0 # put rnd prec in hi
9342: mov.w %d1,%d0 # put rnd mode in lo
9343:
9344: mov.l (%sp)+,%d1 # get rom offset
9345:
9346: #
9347: # check range of offset
9348: #
9349: tst.b %d1 # if zero, offset is to pi
9350: beq.b pi_tbl # it is pi
9351: cmpi.b %d1,&0x0a # check range $01 - $0a
9352: ble.b z_val # if in this range, return zero
9353: cmpi.b %d1,&0x0e # check range $0b - $0e
9354: ble.b sm_tbl # valid constants in this range
9355: cmpi.b %d1,&0x2f # check range $10 - $2f
9356: ble.b z_val # if in this range, return zero
9357: cmpi.b %d1,&0x3f # check range $30 - $3f
9358: ble.b bg_tbl # valid constants in this range
9359:
9360: z_val:
9361: bra.l ld_pzero # return a zero
9362:
9363: #
9364: # the answer is PI rounded to the proper precision.
9365: #
9366: # fetch a pointer to the answer table relating to the proper rounding
9367: # precision.
9368: #
9369: pi_tbl:
9370: tst.b %d0 # is rmode RN?
9371: bne.b pi_not_rn # no
9372: pi_rn:
9373: lea.l PIRN(%pc),%a0 # yes; load PI RN table addr
9374: bra.w set_finx
9375: pi_not_rn:
9376: cmpi.b %d0,&rp_mode # is rmode RP?
9377: beq.b pi_rp # yes
9378: pi_rzrm:
9379: lea.l PIRZRM(%pc),%a0 # no; load PI RZ,RM table addr
9380: bra.b set_finx
9381: pi_rp:
9382: lea.l PIRP(%pc),%a0 # load PI RP table addr
9383: bra.b set_finx
9384:
9385: #
9386: # the answer is one of:
9387: # $0B log10(2) (inexact)
9388: # $0C e (inexact)
9389: # $0D log2(e) (inexact)
9390: # $0E log10(e) (exact)
9391: #
9392: # fetch a pointer to the answer table relating to the proper rounding
9393: # precision.
9394: #
9395: sm_tbl:
9396: subi.b &0xb,%d1 # make offset in 0-4 range
9397: tst.b %d0 # is rmode RN?
9398: bne.b sm_not_rn # no
9399: sm_rn:
9400: lea.l SMALRN(%pc),%a0 # yes; load RN table addr
9401: sm_tbl_cont:
9402: cmpi.b %d1,&0x2 # is result log10(e)?
9403: ble.b set_finx # no; answer is inexact
9404: bra.b no_finx # yes; answer is exact
9405: sm_not_rn:
9406: cmpi.b %d0,&rp_mode # is rmode RP?
9407: beq.b sm_rp # yes
9408: sm_rzrm:
9409: lea.l SMALRZRM(%pc),%a0 # no; load RZ,RM table addr
9410: bra.b sm_tbl_cont
9411: sm_rp:
9412: lea.l SMALRP(%pc),%a0 # load RP table addr
9413: bra.b sm_tbl_cont
9414:
9415: #
9416: # the answer is one of:
9417: # $30 ln(2) (inexact)
9418: # $31 ln(10) (inexact)
9419: # $32 10^0 (exact)
9420: # $33 10^1 (exact)
9421: # $34 10^2 (exact)
9422: # $35 10^4 (exact)
9423: # $36 10^8 (exact)
9424: # $37 10^16 (exact)
9425: # $38 10^32 (inexact)
9426: # $39 10^64 (inexact)
9427: # $3A 10^128 (inexact)
9428: # $3B 10^256 (inexact)
9429: # $3C 10^512 (inexact)
9430: # $3D 10^1024 (inexact)
9431: # $3E 10^2048 (inexact)
9432: # $3F 10^4096 (inexact)
9433: #
9434: # fetch a pointer to the answer table relating to the proper rounding
9435: # precision.
9436: #
9437: bg_tbl:
9438: subi.b &0x30,%d1 # make offset in 0-f range
9439: tst.b %d0 # is rmode RN?
9440: bne.b bg_not_rn # no
9441: bg_rn:
9442: lea.l BIGRN(%pc),%a0 # yes; load RN table addr
9443: bg_tbl_cont:
9444: cmpi.b %d1,&0x1 # is offset <= $31?
9445: ble.b set_finx # yes; answer is inexact
9446: cmpi.b %d1,&0x7 # is $32 <= offset <= $37?
9447: ble.b no_finx # yes; answer is exact
9448: bra.b set_finx # no; answer is inexact
9449: bg_not_rn:
9450: cmpi.b %d0,&rp_mode # is rmode RP?
9451: beq.b bg_rp # yes
9452: bg_rzrm:
9453: lea.l BIGRZRM(%pc),%a0 # no; load RZ,RM table addr
9454: bra.b bg_tbl_cont
9455: bg_rp:
9456: lea.l BIGRP(%pc),%a0 # load RP table addr
9457: bra.b bg_tbl_cont
9458:
9459: # answer is inexact, so set INEX2 and AINEX in the user's FPSR.
9460: set_finx:
9461: ori.l &inx2a_mask,USER_FPSR(%a6) # set INEX2/AINEX
9462: no_finx:
9463: mulu.w &0xc,%d1 # offset points into tables
9464: swap %d0 # put rnd prec in lo word
9465: tst.b %d0 # is precision extended?
9466:
9467: bne.b not_ext # if xprec, do not call round
9468:
9469: # Precision is extended
9470: fmovm.x (%a0,%d1.w),&0x80 # return result in fp0
9471: rts
9472:
9473: # Precision is single or double
9474: not_ext:
9475: swap %d0 # rnd prec in upper word
9476:
9477: # call round() to round the answer to the proper precision.
9478: # exponents out of range for single or double DO NOT cause underflow
9479: # or overflow.
9480: mov.w 0x0(%a0,%d1.w),FP_SCR1_EX(%a6) # load first word
9481: mov.l 0x4(%a0,%d1.w),FP_SCR1_HI(%a6) # load second word
9482: mov.l 0x8(%a0,%d1.w),FP_SCR1_LO(%a6) # load third word
9483: mov.l %d0,%d1
9484: clr.l %d0 # clear g,r,s
9485: lea FP_SCR1(%a6),%a0 # pass ptr to answer
9486: clr.w LOCAL_SGN(%a0) # sign always positive
9487: bsr.l _round # round the mantissa
9488:
9489: fmovm.x (%a0),&0x80 # return rounded result in fp0
9490: rts
9491:
9492: align 0x4
9493:
9494: PIRN: long 0x40000000,0xc90fdaa2,0x2168c235 # pi
9495: PIRZRM: long 0x40000000,0xc90fdaa2,0x2168c234 # pi
9496: PIRP: long 0x40000000,0xc90fdaa2,0x2168c235 # pi
9497:
9498: SMALRN: long 0x3ffd0000,0x9a209a84,0xfbcff798 # log10(2)
9499: long 0x40000000,0xadf85458,0xa2bb4a9a # e
9500: long 0x3fff0000,0xb8aa3b29,0x5c17f0bc # log2(e)
9501: long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
9502: long 0x00000000,0x00000000,0x00000000 # 0.0
9503:
9504: SMALRZRM:
9505: long 0x3ffd0000,0x9a209a84,0xfbcff798 # log10(2)
9506: long 0x40000000,0xadf85458,0xa2bb4a9a # e
9507: long 0x3fff0000,0xb8aa3b29,0x5c17f0bb # log2(e)
9508: long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
9509: long 0x00000000,0x00000000,0x00000000 # 0.0
9510:
9511: SMALRP: long 0x3ffd0000,0x9a209a84,0xfbcff799 # log10(2)
9512: long 0x40000000,0xadf85458,0xa2bb4a9b # e
9513: long 0x3fff0000,0xb8aa3b29,0x5c17f0bc # log2(e)
9514: long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
9515: long 0x00000000,0x00000000,0x00000000 # 0.0
9516:
9517: BIGRN: long 0x3ffe0000,0xb17217f7,0xd1cf79ac # ln(2)
9518: long 0x40000000,0x935d8ddd,0xaaa8ac17 # ln(10)
9519:
9520: long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
9521: long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
9522: long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
9523: long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
9524: long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
9525: long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
9526: long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
9527: long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
9528: long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
9529: long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
9530: long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
9531: long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
9532: long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
9533: long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
9534:
9535: BIGRZRM:
9536: long 0x3ffe0000,0xb17217f7,0xd1cf79ab # ln(2)
9537: long 0x40000000,0x935d8ddd,0xaaa8ac16 # ln(10)
9538:
9539: long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
9540: long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
9541: long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
9542: long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
9543: long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
9544: long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
9545: long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
9546: long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
9547: long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
9548: long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
9549: long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
9550: long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
9551: long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
9552: long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
9553:
9554: BIGRP:
9555: long 0x3ffe0000,0xb17217f7,0xd1cf79ac # ln(2)
9556: long 0x40000000,0x935d8ddd,0xaaa8ac17 # ln(10)
9557:
9558: long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
9559: long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
9560: long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
9561: long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
9562: long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
9563: long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
9564: long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
9565: long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
9566: long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
9567: long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
9568: long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
9569: long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
9570: long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
9571: long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
9572:
9573: #########################################################################
9574: # sscale(): computes the destination operand scaled by the source #
9575: # operand. If the absoulute value of the source operand is #
9576: # >= 2^14, an overflow or underflow is returned. #
9577: # #
9578: # INPUT *************************************************************** #
9579: # a0 = pointer to double-extended source operand X #
9580: # a1 = pointer to double-extended destination operand Y #
9581: # #
9582: # OUTPUT ************************************************************** #
9583: # fp0 = scale(X,Y) #
9584: # #
9585: #########################################################################
9586:
9587: set SIGN, L_SCR1
9588:
9589: global sscale
9590: sscale:
9591: mov.l %d0,-(%sp) # store off ctrl bits for now
9592:
9593: mov.w DST_EX(%a1),%d1 # get dst exponent
9594: smi.b SIGN(%a6) # use SIGN to hold dst sign
9595: andi.l &0x00007fff,%d1 # strip sign from dst exp
9596:
9597: mov.w SRC_EX(%a0),%d0 # check src bounds
9598: andi.w &0x7fff,%d0 # clr src sign bit
9599: cmpi.w %d0,&0x3fff # is src ~ ZERO?
9600: blt.w src_small # yes
9601: cmpi.w %d0,&0x400c # no; is src too big?
9602: bgt.w src_out # yes
9603:
9604: #
9605: # Source is within 2^14 range.
9606: #
9607: src_ok:
9608: fintrz.x SRC(%a0),%fp0 # calc int of src
9609: fmov.l %fp0,%d0 # int src to d0
9610: # don't want any accrued bits from the fintrz showing up later since
9611: # we may need to read the fpsr for the last fp op in t_catch2().
9612: fmov.l &0x0,%fpsr
9613:
9614: tst.b DST_HI(%a1) # is dst denormalized?
9615: bmi.b sok_norm
9616:
9617: # the dst is a DENORM. normalize the DENORM and add the adjustment to
9618: # the src value. then, jump to the norm part of the routine.
9619: sok_dnrm:
9620: mov.l %d0,-(%sp) # save src for now
9621:
9622: mov.w DST_EX(%a1),FP_SCR0_EX(%a6) # make a copy
9623: mov.l DST_HI(%a1),FP_SCR0_HI(%a6)
9624: mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
9625:
9626: lea FP_SCR0(%a6),%a0 # pass ptr to DENORM
9627: bsr.l norm # normalize the DENORM
9628: neg.l %d0
9629: add.l (%sp)+,%d0 # add adjustment to src
9630:
9631: fmovm.x FP_SCR0(%a6),&0x80 # load normalized DENORM
9632:
9633: cmpi.w %d0,&-0x3fff # is the shft amt really low?
9634: bge.b sok_norm2 # thank goodness no
9635:
9636: # the multiply factor that we're trying to create should be a denorm
9637: # for the multiply to work. therefore, we're going to actually do a
9638: # multiply with a denorm which will cause an unimplemented data type
9639: # exception to be put into the machine which will be caught and corrected
9640: # later. we don't do this with the DENORMs above because this method
9641: # is slower. but, don't fret, I don't see it being used much either.
9642: fmov.l (%sp)+,%fpcr # restore user fpcr
9643: mov.l &0x80000000,%d1 # load normalized mantissa
9644: subi.l &-0x3fff,%d0 # how many should we shift?
9645: neg.l %d0 # make it positive
9646: cmpi.b %d0,&0x20 # is it > 32?
9647: bge.b sok_dnrm_32 # yes
9648: lsr.l %d0,%d1 # no; bit stays in upper lw
9649: clr.l -(%sp) # insert zero low mantissa
9650: mov.l %d1,-(%sp) # insert new high mantissa
9651: clr.l -(%sp) # make zero exponent
9652: bra.b sok_norm_cont
9653: sok_dnrm_32:
9654: subi.b &0x20,%d0 # get shift count
9655: lsr.l %d0,%d1 # make low mantissa longword
9656: mov.l %d1,-(%sp) # insert new low mantissa
9657: clr.l -(%sp) # insert zero high mantissa
9658: clr.l -(%sp) # make zero exponent
9659: bra.b sok_norm_cont
9660:
9661: # the src will force the dst to a DENORM value or worse. so, let's
9662: # create an fp multiply that will create the result.
9663: sok_norm:
9664: fmovm.x DST(%a1),&0x80 # load fp0 with normalized src
9665: sok_norm2:
9666: fmov.l (%sp)+,%fpcr # restore user fpcr
9667:
9668: addi.w &0x3fff,%d0 # turn src amt into exp value
9669: swap %d0 # put exponent in high word
9670: clr.l -(%sp) # insert new exponent
9671: mov.l &0x80000000,-(%sp) # insert new high mantissa
9672: mov.l %d0,-(%sp) # insert new lo mantissa
9673:
9674: sok_norm_cont:
9675: fmov.l %fpcr,%d0 # d0 needs fpcr for t_catch2
9676: mov.b &FMUL_OP,%d1 # last inst is MUL
9677: fmul.x (%sp)+,%fp0 # do the multiply
9678: bra t_catch2 # catch any exceptions
9679:
9680: #
9681: # Source is outside of 2^14 range. Test the sign and branch
9682: # to the appropriate exception handler.
9683: #
9684: src_out:
9685: mov.l (%sp)+,%d0 # restore ctrl bits
9686: exg %a0,%a1 # swap src,dst ptrs
9687: tst.b SRC_EX(%a1) # is src negative?
9688: bmi t_unfl # yes; underflow
9689: bra t_ovfl_sc # no; overflow
9690:
9691: #
9692: # The source input is below 1, so we check for denormalized numbers
9693: # and set unfl.
9694: #
9695: src_small:
9696: tst.b DST_HI(%a1) # is dst denormalized?
9697: bpl.b ssmall_done # yes
9698:
9699: mov.l (%sp)+,%d0
9700: fmov.l %d0,%fpcr # no; load control bits
9701: mov.b &FMOV_OP,%d1 # last inst is MOVE
9702: fmov.x DST(%a1),%fp0 # simply return dest
9703: bra t_catch2
9704: ssmall_done:
9705: mov.l (%sp)+,%d0 # load control bits into d1
9706: mov.l %a1,%a0 # pass ptr to dst
9707: bra t_resdnrm
9708:
9709: #########################################################################
9710: # smod(): computes the fp MOD of the input values X,Y. #
9711: # srem(): computes the fp (IEEE) REM of the input values X,Y. #
9712: # #
9713: # INPUT *************************************************************** #
9714: # a0 = pointer to extended precision input X #
9715: # a1 = pointer to extended precision input Y #
9716: # d0 = round precision,mode #
9717: # #
9718: # The input operands X and Y can be either normalized or #
9719: # denormalized. #
9720: # #
9721: # OUTPUT ************************************************************** #
9722: # fp0 = FREM(X,Y) or FMOD(X,Y) #
9723: # #
9724: # ALGORITHM *********************************************************** #
9725: # #
9726: # Step 1. Save and strip signs of X and Y: signX := sign(X), #
9727: # signY := sign(Y), X := |X|, Y := |Y|, #
9728: # signQ := signX EOR signY. Record whether MOD or REM #
9729: # is requested. #
9730: # #
9731: # Step 2. Set L := expo(X)-expo(Y), k := 0, Q := 0. #
9732: # If (L < 0) then #
9733: # R := X, go to Step 4. #
9734: # else #
9735: # R := 2^(-L)X, j := L. #
9736: # endif #
9737: # #
9738: # Step 3. Perform MOD(X,Y) #
9739: # 3.1 If R = Y, go to Step 9. #
9740: # 3.2 If R > Y, then { R := R - Y, Q := Q + 1} #
9741: # 3.3 If j = 0, go to Step 4. #
9742: # 3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to #
9743: # Step 3.1. #
9744: # #
9745: # Step 4. At this point, R = X - QY = MOD(X,Y). Set #
9746: # Last_Subtract := false (used in Step 7 below). If #
9747: # MOD is requested, go to Step 6. #
9748: # #
9749: # Step 5. R = MOD(X,Y), but REM(X,Y) is requested. #
9750: # 5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to #
9751: # Step 6. #
9752: # 5.2 If R > Y/2, then { set Last_Subtract := true, #
9753: # Q := Q + 1, Y := signY*Y }. Go to Step 6. #
9754: # 5.3 This is the tricky case of R = Y/2. If Q is odd, #
9755: # then { Q := Q + 1, signX := -signX }. #
9756: # #
9757: # Step 6. R := signX*R. #
9758: # #
9759: # Step 7. If Last_Subtract = true, R := R - Y. #
9760: # #
9761: # Step 8. Return signQ, last 7 bits of Q, and R as required. #
9762: # #
9763: # Step 9. At this point, R = 2^(-j)*X - Q Y = Y. Thus, #
9764: # X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1), #
9765: # R := 0. Return signQ, last 7 bits of Q, and R. #
9766: # #
9767: #########################################################################
9768:
9769: set Mod_Flag,L_SCR3
9770: set Sc_Flag,L_SCR3+1
9771:
9772: set SignY,L_SCR2
9773: set SignX,L_SCR2+2
9774: set SignQ,L_SCR3+2
9775:
9776: set Y,FP_SCR0
9777: set Y_Hi,Y+4
9778: set Y_Lo,Y+8
9779:
9780: set R,FP_SCR1
9781: set R_Hi,R+4
9782: set R_Lo,R+8
9783:
9784: Scale:
9785: long 0x00010000,0x80000000,0x00000000,0x00000000
9786:
9787: global smod
9788: smod:
9789: clr.b FPSR_QBYTE(%a6)
9790: mov.l %d0,-(%sp) # save ctrl bits
9791: clr.b Mod_Flag(%a6)
9792: bra.b Mod_Rem
9793:
9794: global srem
9795: srem:
9796: clr.b FPSR_QBYTE(%a6)
9797: mov.l %d0,-(%sp) # save ctrl bits
9798: mov.b &0x1,Mod_Flag(%a6)
9799:
9800: Mod_Rem:
9801: #..Save sign of X and Y
9802: movm.l &0x3f00,-(%sp) # save data registers
9803: mov.w SRC_EX(%a0),%d3
9804: mov.w %d3,SignY(%a6)
9805: and.l &0x00007FFF,%d3 # Y := |Y|
9806:
9807: #
9808: mov.l SRC_HI(%a0),%d4
9809: mov.l SRC_LO(%a0),%d5 # (D3,D4,D5) is |Y|
9810:
9811: tst.l %d3
9812: bne.b Y_Normal
9813:
9814: mov.l &0x00003FFE,%d3 # $3FFD + 1
9815: tst.l %d4
9816: bne.b HiY_not0
9817:
9818: HiY_0:
9819: mov.l %d5,%d4
9820: clr.l %d5
9821: sub.l &32,%d3
9822: clr.l %d6
9823: bfffo %d4{&0:&32},%d6
9824: lsl.l %d6,%d4
9825: sub.l %d6,%d3 # (D3,D4,D5) is normalized
9826: # ...with bias $7FFD
9827: bra.b Chk_X
9828:
9829: HiY_not0:
9830: clr.l %d6
9831: bfffo %d4{&0:&32},%d6
9832: sub.l %d6,%d3
9833: lsl.l %d6,%d4
9834: mov.l %d5,%d7 # a copy of D5
9835: lsl.l %d6,%d5
9836: neg.l %d6
9837: add.l &32,%d6
9838: lsr.l %d6,%d7
9839: or.l %d7,%d4 # (D3,D4,D5) normalized
9840: # ...with bias $7FFD
9841: bra.b Chk_X
9842:
9843: Y_Normal:
9844: add.l &0x00003FFE,%d3 # (D3,D4,D5) normalized
9845: # ...with bias $7FFD
9846:
9847: Chk_X:
9848: mov.w DST_EX(%a1),%d0
9849: mov.w %d0,SignX(%a6)
9850: mov.w SignY(%a6),%d1
9851: eor.l %d0,%d1
9852: and.l &0x00008000,%d1
9853: mov.w %d1,SignQ(%a6) # sign(Q) obtained
9854: and.l &0x00007FFF,%d0
9855: mov.l DST_HI(%a1),%d1
9856: mov.l DST_LO(%a1),%d2 # (D0,D1,D2) is |X|
9857: tst.l %d0
9858: bne.b X_Normal
9859: mov.l &0x00003FFE,%d0
9860: tst.l %d1
9861: bne.b HiX_not0
9862:
9863: HiX_0:
9864: mov.l %d2,%d1
9865: clr.l %d2
9866: sub.l &32,%d0
9867: clr.l %d6
9868: bfffo %d1{&0:&32},%d6
9869: lsl.l %d6,%d1
9870: sub.l %d6,%d0 # (D0,D1,D2) is normalized
9871: # ...with bias $7FFD
9872: bra.b Init
9873:
9874: HiX_not0:
9875: clr.l %d6
9876: bfffo %d1{&0:&32},%d6
9877: sub.l %d6,%d0
9878: lsl.l %d6,%d1
9879: mov.l %d2,%d7 # a copy of D2
9880: lsl.l %d6,%d2
9881: neg.l %d6
9882: add.l &32,%d6
9883: lsr.l %d6,%d7
9884: or.l %d7,%d1 # (D0,D1,D2) normalized
9885: # ...with bias $7FFD
9886: bra.b Init
9887:
9888: X_Normal:
9889: add.l &0x00003FFE,%d0 # (D0,D1,D2) normalized
9890: # ...with bias $7FFD
9891:
9892: Init:
9893: #
9894: mov.l %d3,L_SCR1(%a6) # save biased exp(Y)
9895: mov.l %d0,-(%sp) # save biased exp(X)
9896: sub.l %d3,%d0 # L := expo(X)-expo(Y)
9897:
9898: clr.l %d6 # D6 := carry <- 0
9899: clr.l %d3 # D3 is Q
9900: mov.l &0,%a1 # A1 is k; j+k=L, Q=0
9901:
9902: #..(Carry,D1,D2) is R
9903: tst.l %d0
9904: bge.b Mod_Loop_pre
9905:
9906: #..expo(X) < expo(Y). Thus X = mod(X,Y)
9907: #
9908: mov.l (%sp)+,%d0 # restore d0
9909: bra.w Get_Mod
9910:
9911: Mod_Loop_pre:
9912: addq.l &0x4,%sp # erase exp(X)
9913: #..At this point R = 2^(-L)X; Q = 0; k = 0; and k+j = L
9914: Mod_Loop:
9915: tst.l %d6 # test carry bit
9916: bgt.b R_GT_Y
9917:
9918: #..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
9919: cmp.l %d1,%d4 # compare hi(R) and hi(Y)
9920: bne.b R_NE_Y
9921: cmp.l %d2,%d5 # compare lo(R) and lo(Y)
9922: bne.b R_NE_Y
9923:
9924: #..At this point, R = Y
9925: bra.w Rem_is_0
9926:
9927: R_NE_Y:
9928: #..use the borrow of the previous compare
9929: bcs.b R_LT_Y # borrow is set iff R < Y
9930:
9931: R_GT_Y:
9932: #..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
9933: #..and Y < (D1,D2) < 2Y. Either way, perform R - Y
9934: sub.l %d5,%d2 # lo(R) - lo(Y)
9935: subx.l %d4,%d1 # hi(R) - hi(Y)
9936: clr.l %d6 # clear carry
9937: addq.l &1,%d3 # Q := Q + 1
9938:
9939: R_LT_Y:
9940: #..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
9941: tst.l %d0 # see if j = 0.
9942: beq.b PostLoop
9943:
9944: add.l %d3,%d3 # Q := 2Q
9945: add.l %d2,%d2 # lo(R) = 2lo(R)
9946: roxl.l &1,%d1 # hi(R) = 2hi(R) + carry
9947: scs %d6 # set Carry if 2(R) overflows
9948: addq.l &1,%a1 # k := k+1
9949: subq.l &1,%d0 # j := j - 1
9950: #..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
9951:
9952: bra.b Mod_Loop
9953:
9954: PostLoop:
9955: #..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
9956:
9957: #..normalize R.
9958: mov.l L_SCR1(%a6),%d0 # new biased expo of R
9959: tst.l %d1
9960: bne.b HiR_not0
9961:
9962: HiR_0:
9963: mov.l %d2,%d1
9964: clr.l %d2
9965: sub.l &32,%d0
9966: clr.l %d6
9967: bfffo %d1{&0:&32},%d6
9968: lsl.l %d6,%d1
9969: sub.l %d6,%d0 # (D0,D1,D2) is normalized
9970: # ...with bias $7FFD
9971: bra.b Get_Mod
9972:
9973: HiR_not0:
9974: clr.l %d6
9975: bfffo %d1{&0:&32},%d6
9976: bmi.b Get_Mod # already normalized
9977: sub.l %d6,%d0
9978: lsl.l %d6,%d1
9979: mov.l %d2,%d7 # a copy of D2
9980: lsl.l %d6,%d2
9981: neg.l %d6
9982: add.l &32,%d6
9983: lsr.l %d6,%d7
9984: or.l %d7,%d1 # (D0,D1,D2) normalized
9985:
9986: #
9987: Get_Mod:
9988: cmp.l %d0,&0x000041FE
9989: bge.b No_Scale
9990: Do_Scale:
9991: mov.w %d0,R(%a6)
9992: mov.l %d1,R_Hi(%a6)
9993: mov.l %d2,R_Lo(%a6)
9994: mov.l L_SCR1(%a6),%d6
9995: mov.w %d6,Y(%a6)
9996: mov.l %d4,Y_Hi(%a6)
9997: mov.l %d5,Y_Lo(%a6)
9998: fmov.x R(%a6),%fp0 # no exception
9999: mov.b &1,Sc_Flag(%a6)
10000: bra.b ModOrRem
10001: No_Scale:
10002: mov.l %d1,R_Hi(%a6)
10003: mov.l %d2,R_Lo(%a6)
10004: sub.l &0x3FFE,%d0
10005: mov.w %d0,R(%a6)
10006: mov.l L_SCR1(%a6),%d6
10007: sub.l &0x3FFE,%d6
10008: mov.l %d6,L_SCR1(%a6)
10009: fmov.x R(%a6),%fp0
10010: mov.w %d6,Y(%a6)
10011: mov.l %d4,Y_Hi(%a6)
10012: mov.l %d5,Y_Lo(%a6)
10013: clr.b Sc_Flag(%a6)
10014:
10015: #
10016: ModOrRem:
10017: tst.b Mod_Flag(%a6)
10018: beq.b Fix_Sign
10019:
10020: mov.l L_SCR1(%a6),%d6 # new biased expo(Y)
10021: subq.l &1,%d6 # biased expo(Y/2)
10022: cmp.l %d0,%d6
10023: blt.b Fix_Sign
10024: bgt.b Last_Sub
10025:
10026: cmp.l %d1,%d4
10027: bne.b Not_EQ
10028: cmp.l %d2,%d5
10029: bne.b Not_EQ
10030: bra.w Tie_Case
10031:
10032: Not_EQ:
10033: bcs.b Fix_Sign
10034:
10035: Last_Sub:
10036: #
10037: fsub.x Y(%a6),%fp0 # no exceptions
10038: addq.l &1,%d3 # Q := Q + 1
10039:
10040: #
10041: Fix_Sign:
10042: #..Get sign of X
10043: mov.w SignX(%a6),%d6
10044: bge.b Get_Q
10045: fneg.x %fp0
10046:
10047: #..Get Q
10048: #
10049: Get_Q:
10050: clr.l %d6
10051: mov.w SignQ(%a6),%d6 # D6 is sign(Q)
10052: mov.l &8,%d7
10053: lsr.l %d7,%d6
10054: and.l &0x0000007F,%d3 # 7 bits of Q
10055: or.l %d6,%d3 # sign and bits of Q
10056: # swap %d3
10057: # fmov.l %fpsr,%d6
10058: # and.l &0xFF00FFFF,%d6
10059: # or.l %d3,%d6
10060: # fmov.l %d6,%fpsr # put Q in fpsr
10061: mov.b %d3,FPSR_QBYTE(%a6) # put Q in fpsr
10062:
10063: #
10064: Restore:
10065: movm.l (%sp)+,&0xfc # {%d2-%d7}
10066: mov.l (%sp)+,%d0
10067: fmov.l %d0,%fpcr
10068: tst.b Sc_Flag(%a6)
10069: beq.b Finish
10070: mov.b &FMUL_OP,%d1 # last inst is MUL
10071: fmul.x Scale(%pc),%fp0 # may cause underflow
10072: bra t_catch2
10073: # the '040 package did this apparently to see if the dst operand for the
10074: # preceding fmul was a denorm. but, it better not have been since the
10075: # algorithm just got done playing with fp0 and expected no exceptions
10076: # as a result. trust me...
10077: # bra t_avoid_unsupp # check for denorm as a
10078: # ;result of the scaling
10079:
10080: Finish:
10081: mov.b &FMOV_OP,%d1 # last inst is MOVE
10082: fmov.x %fp0,%fp0 # capture exceptions & round
10083: bra t_catch2
10084:
10085: Rem_is_0:
10086: #..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
10087: addq.l &1,%d3
10088: cmp.l %d0,&8 # D0 is j
10089: bge.b Q_Big
10090:
10091: lsl.l %d0,%d3
10092: bra.b Set_R_0
10093:
10094: Q_Big:
10095: clr.l %d3
10096:
10097: Set_R_0:
10098: fmov.s &0x00000000,%fp0
10099: clr.b Sc_Flag(%a6)
10100: bra.w Fix_Sign
10101:
10102: Tie_Case:
10103: #..Check parity of Q
10104: mov.l %d3,%d6
10105: and.l &0x00000001,%d6
10106: tst.l %d6
10107: beq.w Fix_Sign # Q is even
10108:
10109: #..Q is odd, Q := Q + 1, signX := -signX
10110: addq.l &1,%d3
10111: mov.w SignX(%a6),%d6
10112: eor.l &0x00008000,%d6
10113: mov.w %d6,SignX(%a6)
10114: bra.w Fix_Sign
10115:
10116: qnan: long 0x7fff0000, 0xffffffff, 0xffffffff
10117:
10118: #########################################################################
10119: # XDEF **************************************************************** #
10120: # t_dz(): Handle DZ exception during transcendental emulation. #
10121: # Sets N bit according to sign of source operand. #
10122: # t_dz2(): Handle DZ exception during transcendental emulation. #
10123: # Sets N bit always. #
10124: # #
10125: # XREF **************************************************************** #
10126: # None #
10127: # #
10128: # INPUT *************************************************************** #
10129: # a0 = pointer to source operand #
10130: # #
10131: # OUTPUT ************************************************************** #
10132: # fp0 = default result #
10133: # #
10134: # ALGORITHM *********************************************************** #
10135: # - Store properly signed INF into fp0. #
10136: # - Set FPSR exception status dz bit, ccode inf bit, and #
10137: # accrued dz bit. #
10138: # #
10139: #########################################################################
10140:
10141: global t_dz
10142: t_dz:
10143: tst.b SRC_EX(%a0) # no; is src negative?
10144: bmi.b t_dz2 # yes
10145:
10146: dz_pinf:
10147: fmov.s &0x7f800000,%fp0 # return +INF in fp0
10148: ori.l &dzinf_mask,USER_FPSR(%a6) # set I/DZ/ADZ
10149: rts
10150:
10151: global t_dz2
10152: t_dz2:
10153: fmov.s &0xff800000,%fp0 # return -INF in fp0
10154: ori.l &dzinf_mask+neg_mask,USER_FPSR(%a6) # set N/I/DZ/ADZ
10155: rts
10156:
10157: #################################################################
10158: # OPERR exception: #
10159: # - set FPSR exception status operr bit, condition code #
10160: # nan bit; Store default NAN into fp0 #
10161: #################################################################
10162: global t_operr
10163: t_operr:
10164: ori.l &opnan_mask,USER_FPSR(%a6) # set NaN/OPERR/AIOP
10165: fmovm.x qnan(%pc),&0x80 # return default NAN in fp0
10166: rts
10167:
10168: #################################################################
10169: # Extended DENORM: #
10170: # - For all functions that have a denormalized input and #
10171: # that f(x)=x, this is the entry point. #
10172: # - we only return the EXOP here if either underflow or #
10173: # inexact is enabled. #
10174: #################################################################
10175:
10176: # Entry point for scale w/ extended denorm. The function does
10177: # NOT set INEX2/AUNFL/AINEX.
10178: global t_resdnrm
10179: t_resdnrm:
10180: ori.l &unfl_mask,USER_FPSR(%a6) # set UNFL
10181: bra.b xdnrm_con
10182:
10183: global t_extdnrm
10184: t_extdnrm:
10185: ori.l &unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
10186:
10187: xdnrm_con:
10188: mov.l %a0,%a1 # make copy of src ptr
10189: mov.l %d0,%d1 # make copy of rnd prec,mode
10190: andi.b &0xc0,%d1 # extended precision?
10191: bne.b xdnrm_sd # no
10192:
10193: # result precision is extended.
10194: tst.b LOCAL_EX(%a0) # is denorm negative?
10195: bpl.b xdnrm_exit # no
10196:
10197: bset &neg_bit,FPSR_CC(%a6) # yes; set 'N' ccode bit
10198: bra.b xdnrm_exit
10199:
10200: # result precision is single or double
10201: xdnrm_sd:
10202: mov.l %a1,-(%sp)
10203: tst.b LOCAL_EX(%a0) # is denorm pos or neg?
10204: smi.b %d1 # set d0 accodingly
10205: bsr.l unf_sub
10206: mov.l (%sp)+,%a1
10207: xdnrm_exit:
10208: fmovm.x (%a0),&0x80 # return default result in fp0
10209:
10210: mov.b FPCR_ENABLE(%a6),%d0
10211: andi.b &0x0a,%d0 # is UNFL or INEX enabled?
10212: bne.b xdnrm_ena # yes
10213: rts
10214:
10215: ################
10216: # unfl enabled #
10217: ################
10218: # we have a DENORM that needs to be converted into an EXOP.
10219: # so, normalize the mantissa, add 0x6000 to the new exponent,
10220: # and return the result in fp1.
10221: xdnrm_ena:
10222: mov.w LOCAL_EX(%a1),FP_SCR0_EX(%a6)
10223: mov.l LOCAL_HI(%a1),FP_SCR0_HI(%a6)
10224: mov.l LOCAL_LO(%a1),FP_SCR0_LO(%a6)
10225:
10226: lea FP_SCR0(%a6),%a0
10227: bsr.l norm # normalize mantissa
10228: addi.l &0x6000,%d0 # add extra bias
10229: andi.w &0x8000,FP_SCR0_EX(%a6) # keep old sign
10230: or.w %d0,FP_SCR0_EX(%a6) # insert new exponent
10231:
10232: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
10233: rts
10234:
10235: #################################################################
10236: # UNFL exception: #
10237: # - This routine is for cases where even an EXOP isn't #
10238: # large enough to hold the range of this result. #
10239: # In such a case, the EXOP equals zero. #
10240: # - Return the default result to the proper precision #
10241: # with the sign of this result being the same as that #
10242: # of the src operand. #
10243: # - t_unfl2() is provided to force the result sign to #
10244: # positive which is the desired result for fetox(). #
10245: #################################################################
10246: global t_unfl
10247: t_unfl:
10248: ori.l &unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
10249:
10250: tst.b (%a0) # is result pos or neg?
10251: smi.b %d1 # set d1 accordingly
10252: bsr.l unf_sub # calc default unfl result
10253: fmovm.x (%a0),&0x80 # return default result in fp0
10254:
10255: fmov.s &0x00000000,%fp1 # return EXOP in fp1
10256: rts
10257:
10258: # t_unfl2 ALWAYS tells unf_sub to create a positive result
10259: global t_unfl2
10260: t_unfl2:
10261: ori.l &unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
10262:
10263: sf.b %d1 # set d0 to represent positive
10264: bsr.l unf_sub # calc default unfl result
10265: fmovm.x (%a0),&0x80 # return default result in fp0
10266:
10267: fmov.s &0x0000000,%fp1 # return EXOP in fp1
10268: rts
10269:
10270: #################################################################
10271: # OVFL exception: #
10272: # - This routine is for cases where even an EXOP isn't #
10273: # large enough to hold the range of this result. #
10274: # - Return the default result to the proper precision #
10275: # with the sign of this result being the same as that #
10276: # of the src operand. #
10277: # - t_ovfl2() is provided to force the result sign to #
10278: # positive which is the desired result for fcosh(). #
10279: # - t_ovfl_sc() is provided for scale() which only sets #
10280: # the inexact bits if the number is inexact for the #
10281: # precision indicated. #
10282: #################################################################
10283:
10284: global t_ovfl_sc
10285: t_ovfl_sc:
10286: ori.l &ovfl_inx_mask,USER_FPSR(%a6) # set OVFL/AOVFL/AINEX
10287:
10288: mov.b %d0,%d1 # fetch rnd mode/prec
10289: andi.b &0xc0,%d1 # extract rnd prec
10290: beq.b ovfl_work # prec is extended
10291:
10292: tst.b LOCAL_HI(%a0) # is dst a DENORM?
10293: bmi.b ovfl_sc_norm # no
10294:
10295: # dst op is a DENORM. we have to normalize the mantissa to see if the
10296: # result would be inexact for the given precision. make a copy of the
10297: # dst so we don't screw up the version passed to us.
10298: mov.w LOCAL_EX(%a0),FP_SCR0_EX(%a6)
10299: mov.l LOCAL_HI(%a0),FP_SCR0_HI(%a6)
10300: mov.l LOCAL_LO(%a0),FP_SCR0_LO(%a6)
10301: lea FP_SCR0(%a6),%a0 # pass ptr to FP_SCR0
10302: movm.l &0xc080,-(%sp) # save d0-d1/a0
10303: bsr.l norm # normalize mantissa
10304: movm.l (%sp)+,&0x0103 # restore d0-d1/a0
10305:
10306: ovfl_sc_norm:
10307: cmpi.b %d1,&0x40 # is prec dbl?
10308: bne.b ovfl_sc_dbl # no; sgl
10309: ovfl_sc_sgl:
10310: tst.l LOCAL_LO(%a0) # is lo lw of sgl set?
10311: bne.b ovfl_sc_inx # yes
10312: tst.b 3+LOCAL_HI(%a0) # is lo byte of hi lw set?
10313: bne.b ovfl_sc_inx # yes
10314: bra.b ovfl_work # don't set INEX2
10315: ovfl_sc_dbl:
10316: mov.l LOCAL_LO(%a0),%d1 # are any of lo 11 bits of
10317: andi.l &0x7ff,%d1 # dbl mantissa set?
10318: beq.b ovfl_work # no; don't set INEX2
10319: ovfl_sc_inx:
10320: ori.l &inex2_mask,USER_FPSR(%a6) # set INEX2
10321: bra.b ovfl_work # continue
10322:
10323: global t_ovfl
10324: t_ovfl:
10325: ori.l &ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
10326:
10327: ovfl_work:
10328: tst.b LOCAL_EX(%a0) # what is the sign?
10329: smi.b %d1 # set d1 accordingly
10330: bsr.l ovf_res # calc default ovfl result
10331: mov.b %d0,FPSR_CC(%a6) # insert new ccodes
10332: fmovm.x (%a0),&0x80 # return default result in fp0
10333:
10334: fmov.s &0x00000000,%fp1 # return EXOP in fp1
10335: rts
10336:
10337: # t_ovfl2 ALWAYS tells ovf_res to create a positive result
10338: global t_ovfl2
10339: t_ovfl2:
10340: ori.l &ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
10341:
10342: sf.b %d1 # clear sign flag for positive
10343: bsr.l ovf_res # calc default ovfl result
10344: mov.b %d0,FPSR_CC(%a6) # insert new ccodes
10345: fmovm.x (%a0),&0x80 # return default result in fp0
10346:
10347: fmov.s &0x00000000,%fp1 # return EXOP in fp1
10348: rts
10349:
10350: #################################################################
10351: # t_catch(): #
10352: # - the last operation of a transcendental emulation #
10353: # routine may have caused an underflow or overflow. #
10354: # we find out if this occurred by doing an fsave and #
10355: # checking the exception bit. if one did occur, then we #
10356: # jump to fgen_except() which creates the default #
10357: # result and EXOP for us. #
10358: #################################################################
10359: global t_catch
10360: t_catch:
10361:
10362: fsave -(%sp)
10363: tst.b 0x2(%sp)
10364: bmi.b catch
10365: add.l &0xc,%sp
10366:
10367: #################################################################
10368: # INEX2 exception: #
10369: # - The inex2 and ainex bits are set. #
10370: #################################################################
10371: global t_inx2
10372: t_inx2:
10373: fblt.w t_minx2
10374: fbeq.w inx2_zero
10375:
10376: global t_pinx2
10377: t_pinx2:
10378: ori.w &inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
10379: rts
10380:
10381: global t_minx2
10382: t_minx2:
10383: ori.l &inx2a_mask+neg_mask,USER_FPSR(%a6) # set N/INEX2/AINEX
10384: rts
10385:
10386: inx2_zero:
10387: mov.b &z_bmask,FPSR_CC(%a6)
10388: ori.w &inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
10389: rts
10390:
10391: # an underflow or overflow exception occurred.
10392: # we must set INEX/AINEX since the fmul/fdiv/fmov emulation may not!
10393: catch:
10394: ori.w &inx2a_mask,FPSR_EXCEPT(%a6)
10395: catch2:
10396: bsr.l fgen_except
10397: add.l &0xc,%sp
10398: rts
10399:
10400: global t_catch2
10401: t_catch2:
10402:
10403: fsave -(%sp)
10404:
10405: tst.b 0x2(%sp)
10406: bmi.b catch2
10407: add.l &0xc,%sp
10408:
10409: fmov.l %fpsr,%d0
10410: or.l %d0,USER_FPSR(%a6)
10411:
10412: rts
10413:
10414: #########################################################################
10415:
10416: #########################################################################
10417: # unf_res(): underflow default result calculation for transcendentals #
10418: # #
10419: # INPUT: #
10420: # d0 : rnd mode,precision #
10421: # d1.b : sign bit of result ('11111111 = (-) ; '00000000 = (+)) #
10422: # OUTPUT: #
10423: # a0 : points to result (in instruction memory) #
10424: #########################################################################
10425: unf_sub:
10426: ori.l &unfinx_mask,USER_FPSR(%a6)
10427:
10428: andi.w &0x10,%d1 # keep sign bit in 4th spot
10429:
10430: lsr.b &0x4,%d0 # shift rnd prec,mode to lo bits
10431: andi.b &0xf,%d0 # strip hi rnd mode bit
10432: or.b %d1,%d0 # concat {sgn,mode,prec}
10433:
10434: mov.l %d0,%d1 # make a copy
10435: lsl.b &0x1,%d1 # mult index 2 by 2
10436:
10437: mov.b (tbl_unf_cc.b,%pc,%d0.w*1),FPSR_CC(%a6) # insert ccode bits
10438: lea (tbl_unf_result.b,%pc,%d1.w*8),%a0 # grab result ptr
10439: rts
10440:
10441: tbl_unf_cc:
10442: byte 0x4, 0x4, 0x4, 0x0
10443: byte 0x4, 0x4, 0x4, 0x0
10444: byte 0x4, 0x4, 0x4, 0x0
10445: byte 0x0, 0x0, 0x0, 0x0
10446: byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
10447: byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
10448: byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
10449:
10450: tbl_unf_result:
10451: long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10452: long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10453: long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10454: long 0x00000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
10455:
10456: long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10457: long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10458: long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10459: long 0x3f810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
10460:
10461: long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10462: long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZER0;dbl
10463: long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10464: long 0x3c010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
10465:
10466: long 0x0,0x0,0x0,0x0
10467: long 0x0,0x0,0x0,0x0
10468: long 0x0,0x0,0x0,0x0
10469: long 0x0,0x0,0x0,0x0
10470:
10471: long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10472: long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10473: long 0x80000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
10474: long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
10475:
10476: long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10477: long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10478: long 0xbf810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
10479: long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
10480:
10481: long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10482: long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10483: long 0xbc010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
10484: long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
10485:
10486: ############################################################
10487:
10488: #########################################################################
10489: # src_zero(): Return signed zero according to sign of src operand. #
10490: #########################################################################
10491: global src_zero
10492: src_zero:
10493: tst.b SRC_EX(%a0) # get sign of src operand
10494: bmi.b ld_mzero # if neg, load neg zero
10495:
10496: #
10497: # ld_pzero(): return a positive zero.
10498: #
10499: global ld_pzero
10500: ld_pzero:
10501: fmov.s &0x00000000,%fp0 # load +0
10502: mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
10503: rts
10504:
10505: # ld_mzero(): return a negative zero.
10506: global ld_mzero
10507: ld_mzero:
10508: fmov.s &0x80000000,%fp0 # load -0
10509: mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set 'N','Z' ccode bits
10510: rts
10511:
10512: #########################################################################
10513: # dst_zero(): Return signed zero according to sign of dst operand. #
10514: #########################################################################
10515: global dst_zero
10516: dst_zero:
10517: tst.b DST_EX(%a1) # get sign of dst operand
10518: bmi.b ld_mzero # if neg, load neg zero
10519: bra.b ld_pzero # load positive zero
10520:
10521: #########################################################################
10522: # src_inf(): Return signed inf according to sign of src operand. #
10523: #########################################################################
10524: global src_inf
10525: src_inf:
10526: tst.b SRC_EX(%a0) # get sign of src operand
10527: bmi.b ld_minf # if negative branch
10528:
10529: #
10530: # ld_pinf(): return a positive infinity.
10531: #
10532: global ld_pinf
10533: ld_pinf:
10534: fmov.s &0x7f800000,%fp0 # load +INF
10535: mov.b &inf_bmask,FPSR_CC(%a6) # set 'INF' ccode bit
10536: rts
10537:
10538: #
10539: # ld_minf():return a negative infinity.
10540: #
10541: global ld_minf
10542: ld_minf:
10543: fmov.s &0xff800000,%fp0 # load -INF
10544: mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
10545: rts
10546:
10547: #########################################################################
10548: # dst_inf(): Return signed inf according to sign of dst operand. #
10549: #########################################################################
10550: global dst_inf
10551: dst_inf:
10552: tst.b DST_EX(%a1) # get sign of dst operand
10553: bmi.b ld_minf # if negative branch
10554: bra.b ld_pinf
10555:
10556: global szr_inf
10557: #################################################################
10558: # szr_inf(): Return +ZERO for a negative src operand or #
10559: # +INF for a positive src operand. #
10560: # Routine used for fetox, ftwotox, and ftentox. #
10561: #################################################################
10562: szr_inf:
10563: tst.b SRC_EX(%a0) # check sign of source
10564: bmi.b ld_pzero
10565: bra.b ld_pinf
10566:
10567: #########################################################################
10568: # sopr_inf(): Return +INF for a positive src operand or #
10569: # jump to operand error routine for a negative src operand. #
10570: # Routine used for flogn, flognp1, flog10, and flog2. #
10571: #########################################################################
10572: global sopr_inf
10573: sopr_inf:
10574: tst.b SRC_EX(%a0) # check sign of source
10575: bmi.w t_operr
10576: bra.b ld_pinf
10577:
10578: #################################################################
10579: # setoxm1i(): Return minus one for a negative src operand or #
10580: # positive infinity for a positive src operand. #
10581: # Routine used for fetoxm1. #
10582: #################################################################
10583: global setoxm1i
10584: setoxm1i:
10585: tst.b SRC_EX(%a0) # check sign of source
10586: bmi.b ld_mone
10587: bra.b ld_pinf
10588:
10589: #########################################################################
10590: # src_one(): Return signed one according to sign of src operand. #
10591: #########################################################################
10592: global src_one
10593: src_one:
10594: tst.b SRC_EX(%a0) # check sign of source
10595: bmi.b ld_mone
10596:
10597: #
10598: # ld_pone(): return positive one.
10599: #
10600: global ld_pone
10601: ld_pone:
10602: fmov.s &0x3f800000,%fp0 # load +1
10603: clr.b FPSR_CC(%a6)
10604: rts
10605:
10606: #
10607: # ld_mone(): return negative one.
10608: #
10609: global ld_mone
10610: ld_mone:
10611: fmov.s &0xbf800000,%fp0 # load -1
10612: mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
10613: rts
10614:
10615: ppiby2: long 0x3fff0000, 0xc90fdaa2, 0x2168c235
10616: mpiby2: long 0xbfff0000, 0xc90fdaa2, 0x2168c235
10617:
10618: #################################################################
10619: # spi_2(): Return signed PI/2 according to sign of src operand. #
10620: #################################################################
10621: global spi_2
10622: spi_2:
10623: tst.b SRC_EX(%a0) # check sign of source
10624: bmi.b ld_mpi2
10625:
10626: #
10627: # ld_ppi2(): return positive PI/2.
10628: #
10629: global ld_ppi2
10630: ld_ppi2:
10631: fmov.l %d0,%fpcr
10632: fmov.x ppiby2(%pc),%fp0 # load +pi/2
10633: bra.w t_pinx2 # set INEX2
10634:
10635: #
10636: # ld_mpi2(): return negative PI/2.
10637: #
10638: global ld_mpi2
10639: ld_mpi2:
10640: fmov.l %d0,%fpcr
10641: fmov.x mpiby2(%pc),%fp0 # load -pi/2
10642: bra.w t_minx2 # set INEX2
10643:
10644: ####################################################
10645: # The following routines give support for fsincos. #
10646: ####################################################
10647:
10648: #
10649: # ssincosz(): When the src operand is ZERO, store a one in the
10650: # cosine register and return a ZERO in fp0 w/ the same sign
10651: # as the src operand.
10652: #
10653: global ssincosz
10654: ssincosz:
10655: fmov.s &0x3f800000,%fp1
10656: tst.b SRC_EX(%a0) # test sign
10657: bpl.b sincoszp
10658: fmov.s &0x80000000,%fp0 # return sin result in fp0
10659: mov.b &z_bmask+neg_bmask,FPSR_CC(%a6)
10660: bra.b sto_cos # store cosine result
10661: sincoszp:
10662: fmov.s &0x00000000,%fp0 # return sin result in fp0
10663: mov.b &z_bmask,FPSR_CC(%a6)
10664: bra.b sto_cos # store cosine result
10665:
10666: #
10667: # ssincosi(): When the src operand is INF, store a QNAN in the cosine
10668: # register and jump to the operand error routine for negative
10669: # src operands.
10670: #
10671: global ssincosi
10672: ssincosi:
10673: fmov.x qnan(%pc),%fp1 # load NAN
10674: bsr.l sto_cos # store cosine result
10675: bra.w t_operr
10676:
10677: #
10678: # ssincosqnan(): When the src operand is a QNAN, store the QNAN in the cosine
10679: # register and branch to the src QNAN routine.
10680: #
10681: global ssincosqnan
10682: ssincosqnan:
10683: fmov.x LOCAL_EX(%a0),%fp1
10684: bsr.l sto_cos
10685: bra.w src_qnan
10686:
10687: #
10688: # ssincossnan(): When the src operand is an SNAN, store the SNAN w/ the SNAN bit set
10689: # in the cosine register and branch to the src SNAN routine.
10690: #
10691: global ssincossnan
10692: ssincossnan:
10693: fmov.x LOCAL_EX(%a0),%fp1
10694: bsr.l sto_cos
10695: bra.w src_snan
10696:
10697: ########################################################################
10698:
10699: #########################################################################
10700: # sto_cos(): store fp1 to the fpreg designated by the CMDREG dst field. #
10701: # fp1 holds the result of the cosine portion of ssincos(). #
10702: # the value in fp1 will not take any exceptions when moved. #
10703: # INPUT: #
10704: # fp1 : fp value to store #
10705: # MODIFIED: #
10706: # d0 #
10707: #########################################################################
10708: global sto_cos
10709: sto_cos:
10710: mov.b 1+EXC_CMDREG(%a6),%d0
10711: andi.w &0x7,%d0
10712: mov.w (tbl_sto_cos.b,%pc,%d0.w*2),%d0
10713: jmp (tbl_sto_cos.b,%pc,%d0.w*1)
10714:
10715: tbl_sto_cos:
10716: short sto_cos_0 - tbl_sto_cos
10717: short sto_cos_1 - tbl_sto_cos
10718: short sto_cos_2 - tbl_sto_cos
10719: short sto_cos_3 - tbl_sto_cos
10720: short sto_cos_4 - tbl_sto_cos
10721: short sto_cos_5 - tbl_sto_cos
10722: short sto_cos_6 - tbl_sto_cos
10723: short sto_cos_7 - tbl_sto_cos
10724:
10725: sto_cos_0:
10726: fmovm.x &0x40,EXC_FP0(%a6)
10727: rts
10728: sto_cos_1:
10729: fmovm.x &0x40,EXC_FP1(%a6)
10730: rts
10731: sto_cos_2:
10732: fmov.x %fp1,%fp2
10733: rts
10734: sto_cos_3:
10735: fmov.x %fp1,%fp3
10736: rts
10737: sto_cos_4:
10738: fmov.x %fp1,%fp4
10739: rts
10740: sto_cos_5:
10741: fmov.x %fp1,%fp5
10742: rts
10743: sto_cos_6:
10744: fmov.x %fp1,%fp6
10745: rts
10746: sto_cos_7:
10747: fmov.x %fp1,%fp7
10748: rts
10749:
10750: ##################################################################
10751: global smod_sdnrm
10752: global smod_snorm
10753: smod_sdnrm:
10754: smod_snorm:
10755: mov.b DTAG(%a6),%d1
10756: beq.l smod
10757: cmpi.b %d1,&ZERO
10758: beq.w smod_zro
10759: cmpi.b %d1,&INF
10760: beq.l t_operr
10761: cmpi.b %d1,&DENORM
10762: beq.l smod
10763: cmpi.b %d1,&SNAN
10764: beq.l dst_snan
10765: bra.l dst_qnan
10766:
10767: global smod_szero
10768: smod_szero:
10769: mov.b DTAG(%a6),%d1
10770: beq.l t_operr
10771: cmpi.b %d1,&ZERO
10772: beq.l t_operr
10773: cmpi.b %d1,&INF
10774: beq.l t_operr
10775: cmpi.b %d1,&DENORM
10776: beq.l t_operr
10777: cmpi.b %d1,&QNAN
10778: beq.l dst_qnan
10779: bra.l dst_snan
10780:
10781: global smod_sinf
10782: smod_sinf:
10783: mov.b DTAG(%a6),%d1
10784: beq.l smod_fpn
10785: cmpi.b %d1,&ZERO
10786: beq.l smod_zro
10787: cmpi.b %d1,&INF
10788: beq.l t_operr
10789: cmpi.b %d1,&DENORM
10790: beq.l smod_fpn
10791: cmpi.b %d1,&QNAN
10792: beq.l dst_qnan
10793: bra.l dst_snan
10794:
10795: smod_zro:
10796: srem_zro:
10797: mov.b SRC_EX(%a0),%d1 # get src sign
10798: mov.b DST_EX(%a1),%d0 # get dst sign
10799: eor.b %d0,%d1 # get qbyte sign
10800: andi.b &0x80,%d1
10801: mov.b %d1,FPSR_QBYTE(%a6)
10802: tst.b %d0
10803: bpl.w ld_pzero
10804: bra.w ld_mzero
10805:
10806: smod_fpn:
10807: srem_fpn:
10808: clr.b FPSR_QBYTE(%a6)
10809: mov.l %d0,-(%sp)
10810: mov.b SRC_EX(%a0),%d1 # get src sign
10811: mov.b DST_EX(%a1),%d0 # get dst sign
10812: eor.b %d0,%d1 # get qbyte sign
10813: andi.b &0x80,%d1
10814: mov.b %d1,FPSR_QBYTE(%a6)
10815: cmpi.b DTAG(%a6),&DENORM
10816: bne.b smod_nrm
10817: lea DST(%a1),%a0
10818: mov.l (%sp)+,%d0
10819: bra t_resdnrm
10820: smod_nrm:
10821: fmov.l (%sp)+,%fpcr
10822: fmov.x DST(%a1),%fp0
10823: tst.b DST_EX(%a1)
10824: bmi.b smod_nrm_neg
10825: rts
10826:
10827: smod_nrm_neg:
10828: mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode
10829: rts
10830:
10831: #########################################################################
10832: global srem_snorm
10833: global srem_sdnrm
10834: srem_sdnrm:
10835: srem_snorm:
10836: mov.b DTAG(%a6),%d1
10837: beq.l srem
10838: cmpi.b %d1,&ZERO
10839: beq.w srem_zro
10840: cmpi.b %d1,&INF
10841: beq.l t_operr
10842: cmpi.b %d1,&DENORM
10843: beq.l srem
10844: cmpi.b %d1,&QNAN
10845: beq.l dst_qnan
10846: bra.l dst_snan
10847:
10848: global srem_szero
10849: srem_szero:
10850: mov.b DTAG(%a6),%d1
10851: beq.l t_operr
10852: cmpi.b %d1,&ZERO
10853: beq.l t_operr
10854: cmpi.b %d1,&INF
10855: beq.l t_operr
10856: cmpi.b %d1,&DENORM
10857: beq.l t_operr
10858: cmpi.b %d1,&QNAN
10859: beq.l dst_qnan
10860: bra.l dst_snan
10861:
10862: global srem_sinf
10863: srem_sinf:
10864: mov.b DTAG(%a6),%d1
10865: beq.w srem_fpn
10866: cmpi.b %d1,&ZERO
10867: beq.w srem_zro
10868: cmpi.b %d1,&INF
10869: beq.l t_operr
10870: cmpi.b %d1,&DENORM
10871: beq.l srem_fpn
10872: cmpi.b %d1,&QNAN
10873: beq.l dst_qnan
10874: bra.l dst_snan
10875:
10876: #########################################################################
10877: global sscale_snorm
10878: global sscale_sdnrm
10879: sscale_snorm:
10880: sscale_sdnrm:
10881: mov.b DTAG(%a6),%d1
10882: beq.l sscale
10883: cmpi.b %d1,&ZERO
10884: beq.l dst_zero
10885: cmpi.b %d1,&INF
10886: beq.l dst_inf
10887: cmpi.b %d1,&DENORM
10888: beq.l sscale
10889: cmpi.b %d1,&QNAN
10890: beq.l dst_qnan
10891: bra.l dst_snan
10892:
10893: global sscale_szero
10894: sscale_szero:
10895: mov.b DTAG(%a6),%d1
10896: beq.l sscale
10897: cmpi.b %d1,&ZERO
10898: beq.l dst_zero
10899: cmpi.b %d1,&INF
10900: beq.l dst_inf
10901: cmpi.b %d1,&DENORM
10902: beq.l sscale
10903: cmpi.b %d1,&QNAN
10904: beq.l dst_qnan
10905: bra.l dst_snan
10906:
10907: global sscale_sinf
10908: sscale_sinf:
10909: mov.b DTAG(%a6),%d1
10910: beq.l t_operr
10911: cmpi.b %d1,&QNAN
10912: beq.l dst_qnan
10913: cmpi.b %d1,&SNAN
10914: beq.l dst_snan
10915: bra.l t_operr
10916:
10917: ########################################################################
10918:
10919: #
10920: # sop_sqnan(): The src op for frem/fmod/fscale was a QNAN.
10921: #
10922: global sop_sqnan
10923: sop_sqnan:
10924: mov.b DTAG(%a6),%d1
10925: cmpi.b %d1,&QNAN
10926: beq.b dst_qnan
10927: cmpi.b %d1,&SNAN
10928: beq.b dst_snan
10929: bra.b src_qnan
10930:
10931: #
10932: # sop_ssnan(): The src op for frem/fmod/fscale was an SNAN.
10933: #
10934: global sop_ssnan
10935: sop_ssnan:
10936: mov.b DTAG(%a6),%d1
10937: cmpi.b %d1,&QNAN
10938: beq.b dst_qnan_src_snan
10939: cmpi.b %d1,&SNAN
10940: beq.b dst_snan
10941: bra.b src_snan
10942:
10943: dst_qnan_src_snan:
10944: ori.l &snaniop_mask,USER_FPSR(%a6) # set NAN/SNAN/AIOP
10945: bra.b dst_qnan
10946:
10947: #
10948: # dst_qnan(): Return the dst SNAN w/ the SNAN bit set.
10949: #
10950: global dst_snan
10951: dst_snan:
10952: fmov.x DST(%a1),%fp0 # the fmove sets the SNAN bit
10953: fmov.l %fpsr,%d0 # catch resulting status
10954: or.l %d0,USER_FPSR(%a6) # store status
10955: rts
10956:
10957: #
10958: # dst_qnan(): Return the dst QNAN.
10959: #
10960: global dst_qnan
10961: dst_qnan:
10962: fmov.x DST(%a1),%fp0 # return the non-signalling nan
10963: tst.b DST_EX(%a1) # set ccodes according to QNAN sign
10964: bmi.b dst_qnan_m
10965: dst_qnan_p:
10966: mov.b &nan_bmask,FPSR_CC(%a6)
10967: rts
10968: dst_qnan_m:
10969: mov.b &neg_bmask+nan_bmask,FPSR_CC(%a6)
10970: rts
10971:
10972: #
10973: # src_snan(): Return the src SNAN w/ the SNAN bit set.
10974: #
10975: global src_snan
10976: src_snan:
10977: fmov.x SRC(%a0),%fp0 # the fmove sets the SNAN bit
10978: fmov.l %fpsr,%d0 # catch resulting status
10979: or.l %d0,USER_FPSR(%a6) # store status
10980: rts
10981:
10982: #
10983: # src_qnan(): Return the src QNAN.
10984: #
10985: global src_qnan
10986: src_qnan:
10987: fmov.x SRC(%a0),%fp0 # return the non-signalling nan
10988: tst.b SRC_EX(%a0) # set ccodes according to QNAN sign
10989: bmi.b dst_qnan_m
10990: src_qnan_p:
10991: mov.b &nan_bmask,FPSR_CC(%a6)
10992: rts
10993: src_qnan_m:
10994: mov.b &neg_bmask+nan_bmask,FPSR_CC(%a6)
10995: rts
10996:
10997: #
10998: # fkern2.s:
10999: # These entry points are used by the exception handler
11000: # routines where an instruction is selected by an index into
11001: # a large jump table corresponding to a given instruction which
11002: # has been decoded. Flow continues here where we now decode
11003: # further accoding to the source operand type.
11004: #
11005:
11006: global fsinh
11007: fsinh:
11008: mov.b STAG(%a6),%d1
11009: beq.l ssinh
11010: cmpi.b %d1,&ZERO
11011: beq.l src_zero
11012: cmpi.b %d1,&INF
11013: beq.l src_inf
11014: cmpi.b %d1,&DENORM
11015: beq.l ssinhd
11016: cmpi.b %d1,&QNAN
11017: beq.l src_qnan
11018: bra.l src_snan
11019:
11020: global flognp1
11021: flognp1:
11022: mov.b STAG(%a6),%d1
11023: beq.l slognp1
11024: cmpi.b %d1,&ZERO
11025: beq.l src_zero
11026: cmpi.b %d1,&INF
11027: beq.l sopr_inf
11028: cmpi.b %d1,&DENORM
11029: beq.l slognp1d
11030: cmpi.b %d1,&QNAN
11031: beq.l src_qnan
11032: bra.l src_snan
11033:
11034: global fetoxm1
11035: fetoxm1:
11036: mov.b STAG(%a6),%d1
11037: beq.l setoxm1
11038: cmpi.b %d1,&ZERO
11039: beq.l src_zero
11040: cmpi.b %d1,&INF
11041: beq.l setoxm1i
11042: cmpi.b %d1,&DENORM
11043: beq.l setoxm1d
11044: cmpi.b %d1,&QNAN
11045: beq.l src_qnan
11046: bra.l src_snan
11047:
11048: global ftanh
11049: ftanh:
11050: mov.b STAG(%a6),%d1
11051: beq.l stanh
11052: cmpi.b %d1,&ZERO
11053: beq.l src_zero
11054: cmpi.b %d1,&INF
11055: beq.l src_one
11056: cmpi.b %d1,&DENORM
11057: beq.l stanhd
11058: cmpi.b %d1,&QNAN
11059: beq.l src_qnan
11060: bra.l src_snan
11061:
11062: global fatan
11063: fatan:
11064: mov.b STAG(%a6),%d1
11065: beq.l satan
11066: cmpi.b %d1,&ZERO
11067: beq.l src_zero
11068: cmpi.b %d1,&INF
11069: beq.l spi_2
11070: cmpi.b %d1,&DENORM
11071: beq.l satand
11072: cmpi.b %d1,&QNAN
11073: beq.l src_qnan
11074: bra.l src_snan
11075:
11076: global fasin
11077: fasin:
11078: mov.b STAG(%a6),%d1
11079: beq.l sasin
11080: cmpi.b %d1,&ZERO
11081: beq.l src_zero
11082: cmpi.b %d1,&INF
11083: beq.l t_operr
11084: cmpi.b %d1,&DENORM
11085: beq.l sasind
11086: cmpi.b %d1,&QNAN
11087: beq.l src_qnan
11088: bra.l src_snan
11089:
11090: global fatanh
11091: fatanh:
11092: mov.b STAG(%a6),%d1
11093: beq.l satanh
11094: cmpi.b %d1,&ZERO
11095: beq.l src_zero
11096: cmpi.b %d1,&INF
11097: beq.l t_operr
11098: cmpi.b %d1,&DENORM
11099: beq.l satanhd
11100: cmpi.b %d1,&QNAN
11101: beq.l src_qnan
11102: bra.l src_snan
11103:
11104: global fsine
11105: fsine:
11106: mov.b STAG(%a6),%d1
11107: beq.l ssin
11108: cmpi.b %d1,&ZERO
11109: beq.l src_zero
11110: cmpi.b %d1,&INF
11111: beq.l t_operr
11112: cmpi.b %d1,&DENORM
11113: beq.l ssind
11114: cmpi.b %d1,&QNAN
11115: beq.l src_qnan
11116: bra.l src_snan
11117:
11118: global ftan
11119: ftan:
11120: mov.b STAG(%a6),%d1
11121: beq.l stan
11122: cmpi.b %d1,&ZERO
11123: beq.l src_zero
11124: cmpi.b %d1,&INF
11125: beq.l t_operr
11126: cmpi.b %d1,&DENORM
11127: beq.l stand
11128: cmpi.b %d1,&QNAN
11129: beq.l src_qnan
11130: bra.l src_snan
11131:
11132: global fetox
11133: fetox:
11134: mov.b STAG(%a6),%d1
11135: beq.l setox
11136: cmpi.b %d1,&ZERO
11137: beq.l ld_pone
11138: cmpi.b %d1,&INF
11139: beq.l szr_inf
11140: cmpi.b %d1,&DENORM
11141: beq.l setoxd
11142: cmpi.b %d1,&QNAN
11143: beq.l src_qnan
11144: bra.l src_snan
11145:
11146: global ftwotox
11147: ftwotox:
11148: mov.b STAG(%a6),%d1
11149: beq.l stwotox
11150: cmpi.b %d1,&ZERO
11151: beq.l ld_pone
11152: cmpi.b %d1,&INF
11153: beq.l szr_inf
11154: cmpi.b %d1,&DENORM
11155: beq.l stwotoxd
11156: cmpi.b %d1,&QNAN
11157: beq.l src_qnan
11158: bra.l src_snan
11159:
11160: global ftentox
11161: ftentox:
11162: mov.b STAG(%a6),%d1
11163: beq.l stentox
11164: cmpi.b %d1,&ZERO
11165: beq.l ld_pone
11166: cmpi.b %d1,&INF
11167: beq.l szr_inf
11168: cmpi.b %d1,&DENORM
11169: beq.l stentoxd
11170: cmpi.b %d1,&QNAN
11171: beq.l src_qnan
11172: bra.l src_snan
11173:
11174: global flogn
11175: flogn:
11176: mov.b STAG(%a6),%d1
11177: beq.l slogn
11178: cmpi.b %d1,&ZERO
11179: beq.l t_dz2
11180: cmpi.b %d1,&INF
11181: beq.l sopr_inf
11182: cmpi.b %d1,&DENORM
11183: beq.l slognd
11184: cmpi.b %d1,&QNAN
11185: beq.l src_qnan
11186: bra.l src_snan
11187:
11188: global flog10
11189: flog10:
11190: mov.b STAG(%a6),%d1
11191: beq.l slog10
11192: cmpi.b %d1,&ZERO
11193: beq.l t_dz2
11194: cmpi.b %d1,&INF
11195: beq.l sopr_inf
11196: cmpi.b %d1,&DENORM
11197: beq.l slog10d
11198: cmpi.b %d1,&QNAN
11199: beq.l src_qnan
11200: bra.l src_snan
11201:
11202: global flog2
11203: flog2:
11204: mov.b STAG(%a6),%d1
11205: beq.l slog2
11206: cmpi.b %d1,&ZERO
11207: beq.l t_dz2
11208: cmpi.b %d1,&INF
11209: beq.l sopr_inf
11210: cmpi.b %d1,&DENORM
11211: beq.l slog2d
11212: cmpi.b %d1,&QNAN
11213: beq.l src_qnan
11214: bra.l src_snan
11215:
11216: global fcosh
11217: fcosh:
11218: mov.b STAG(%a6),%d1
11219: beq.l scosh
11220: cmpi.b %d1,&ZERO
11221: beq.l ld_pone
11222: cmpi.b %d1,&INF
11223: beq.l ld_pinf
11224: cmpi.b %d1,&DENORM
11225: beq.l scoshd
11226: cmpi.b %d1,&QNAN
11227: beq.l src_qnan
11228: bra.l src_snan
11229:
11230: global facos
11231: facos:
11232: mov.b STAG(%a6),%d1
11233: beq.l sacos
11234: cmpi.b %d1,&ZERO
11235: beq.l ld_ppi2
11236: cmpi.b %d1,&INF
11237: beq.l t_operr
11238: cmpi.b %d1,&DENORM
11239: beq.l sacosd
11240: cmpi.b %d1,&QNAN
11241: beq.l src_qnan
11242: bra.l src_snan
11243:
11244: global fcos
11245: fcos:
11246: mov.b STAG(%a6),%d1
11247: beq.l scos
11248: cmpi.b %d1,&ZERO
11249: beq.l ld_pone
11250: cmpi.b %d1,&INF
11251: beq.l t_operr
11252: cmpi.b %d1,&DENORM
11253: beq.l scosd
11254: cmpi.b %d1,&QNAN
11255: beq.l src_qnan
11256: bra.l src_snan
11257:
11258: global fgetexp
11259: fgetexp:
11260: mov.b STAG(%a6),%d1
11261: beq.l sgetexp
11262: cmpi.b %d1,&ZERO
11263: beq.l src_zero
11264: cmpi.b %d1,&INF
11265: beq.l t_operr
11266: cmpi.b %d1,&DENORM
11267: beq.l sgetexpd
11268: cmpi.b %d1,&QNAN
11269: beq.l src_qnan
11270: bra.l src_snan
11271:
11272: global fgetman
11273: fgetman:
11274: mov.b STAG(%a6),%d1
11275: beq.l sgetman
11276: cmpi.b %d1,&ZERO
11277: beq.l src_zero
11278: cmpi.b %d1,&INF
11279: beq.l t_operr
11280: cmpi.b %d1,&DENORM
11281: beq.l sgetmand
11282: cmpi.b %d1,&QNAN
11283: beq.l src_qnan
11284: bra.l src_snan
11285:
11286: global fsincos
11287: fsincos:
11288: mov.b STAG(%a6),%d1
11289: beq.l ssincos
11290: cmpi.b %d1,&ZERO
11291: beq.l ssincosz
11292: cmpi.b %d1,&INF
11293: beq.l ssincosi
11294: cmpi.b %d1,&DENORM
11295: beq.l ssincosd
11296: cmpi.b %d1,&QNAN
11297: beq.l ssincosqnan
11298: bra.l ssincossnan
11299:
11300: global fmod
11301: fmod:
11302: mov.b STAG(%a6),%d1
11303: beq.l smod_snorm
11304: cmpi.b %d1,&ZERO
11305: beq.l smod_szero
11306: cmpi.b %d1,&INF
11307: beq.l smod_sinf
11308: cmpi.b %d1,&DENORM
11309: beq.l smod_sdnrm
11310: cmpi.b %d1,&QNAN
11311: beq.l sop_sqnan
11312: bra.l sop_ssnan
11313:
11314: global frem
11315: frem:
11316: mov.b STAG(%a6),%d1
11317: beq.l srem_snorm
11318: cmpi.b %d1,&ZERO
11319: beq.l srem_szero
11320: cmpi.b %d1,&INF
11321: beq.l srem_sinf
11322: cmpi.b %d1,&DENORM
11323: beq.l srem_sdnrm
11324: cmpi.b %d1,&QNAN
11325: beq.l sop_sqnan
11326: bra.l sop_ssnan
11327:
11328: global fscale
11329: fscale:
11330: mov.b STAG(%a6),%d1
11331: beq.l sscale_snorm
11332: cmpi.b %d1,&ZERO
11333: beq.l sscale_szero
11334: cmpi.b %d1,&INF
11335: beq.l sscale_sinf
11336: cmpi.b %d1,&DENORM
11337: beq.l sscale_sdnrm
11338: cmpi.b %d1,&QNAN
11339: beq.l sop_sqnan
11340: bra.l sop_ssnan
11341:
11342: #########################################################################
11343: # XDEF **************************************************************** #
11344: # fgen_except(): catch an exception during transcendental #
11345: # emulation #
11346: # #
11347: # XREF **************************************************************** #
11348: # fmul() - emulate a multiply instruction #
11349: # fadd() - emulate an add instruction #
11350: # fin() - emulate an fmove instruction #
11351: # #
11352: # INPUT *************************************************************** #
11353: # fp0 = destination operand #
11354: # d0 = type of instruction that took exception #
11355: # fsave frame = source operand #
11356: # #
11357: # OUTPUT ************************************************************** #
11358: # fp0 = result #
11359: # fp1 = EXOP #
11360: # #
11361: # ALGORITHM *********************************************************** #
11362: # An exception occurred on the last instruction of the #
11363: # transcendental emulation. hopefully, this won't be happening much #
11364: # because it will be VERY slow. #
11365: # The only exceptions capable of passing through here are #
11366: # Overflow, Underflow, and Unsupported Data Type. #
11367: # #
11368: #########################################################################
11369:
11370: global fgen_except
11371: fgen_except:
11372: cmpi.b 0x3(%sp),&0x7 # is exception UNSUPP?
11373: beq.b fge_unsupp # yes
11374:
11375: mov.b &NORM,STAG(%a6)
11376:
11377: fge_cont:
11378: mov.b &NORM,DTAG(%a6)
11379:
11380: # ok, I have a problem with putting the dst op at FP_DST. the emulation
11381: # routines aren't supposed to alter the operands but we've just squashed
11382: # FP_DST here...
11383:
11384: # 8/17/93 - this turns out to be more of a "cleanliness" standpoint
11385: # then a potential bug. to begin with, only the dyadic functions
11386: # frem,fmod, and fscale would get the dst trashed here. But, for
11387: # the 060SP, the FP_DST is never used again anyways.
11388: fmovm.x &0x80,FP_DST(%a6) # dst op is in fp0
11389:
11390: lea 0x4(%sp),%a0 # pass: ptr to src op
11391: lea FP_DST(%a6),%a1 # pass: ptr to dst op
11392:
11393: cmpi.b %d1,&FMOV_OP
11394: beq.b fge_fin # it was an "fmov"
11395: cmpi.b %d1,&FADD_OP
11396: beq.b fge_fadd # it was an "fadd"
11397: fge_fmul:
11398: bsr.l fmul
11399: rts
11400: fge_fadd:
11401: bsr.l fadd
11402: rts
11403: fge_fin:
11404: bsr.l fin
11405: rts
11406:
11407: fge_unsupp:
11408: mov.b &DENORM,STAG(%a6)
11409: bra.b fge_cont
11410:
11411: #
11412: # This table holds the offsets of the emulation routines for each individual
11413: # math operation relative to the address of this table. Included are
11414: # routines like fadd/fmul/fabs as well as the transcendentals.
11415: # The location within the table is determined by the extension bits of the
11416: # operation longword.
11417: #
11418:
11419: swbeg &109
11420: tbl_unsupp:
11421: long fin - tbl_unsupp # 00: fmove
11422: long fint - tbl_unsupp # 01: fint
11423: long fsinh - tbl_unsupp # 02: fsinh
11424: long fintrz - tbl_unsupp # 03: fintrz
11425: long fsqrt - tbl_unsupp # 04: fsqrt
11426: long tbl_unsupp - tbl_unsupp
11427: long flognp1 - tbl_unsupp # 06: flognp1
11428: long tbl_unsupp - tbl_unsupp
11429: long fetoxm1 - tbl_unsupp # 08: fetoxm1
11430: long ftanh - tbl_unsupp # 09: ftanh
11431: long fatan - tbl_unsupp # 0a: fatan
11432: long tbl_unsupp - tbl_unsupp
11433: long fasin - tbl_unsupp # 0c: fasin
11434: long fatanh - tbl_unsupp # 0d: fatanh
11435: long fsine - tbl_unsupp # 0e: fsin
11436: long ftan - tbl_unsupp # 0f: ftan
11437: long fetox - tbl_unsupp # 10: fetox
11438: long ftwotox - tbl_unsupp # 11: ftwotox
11439: long ftentox - tbl_unsupp # 12: ftentox
11440: long tbl_unsupp - tbl_unsupp
11441: long flogn - tbl_unsupp # 14: flogn
11442: long flog10 - tbl_unsupp # 15: flog10
11443: long flog2 - tbl_unsupp # 16: flog2
11444: long tbl_unsupp - tbl_unsupp
11445: long fabs - tbl_unsupp # 18: fabs
11446: long fcosh - tbl_unsupp # 19: fcosh
11447: long fneg - tbl_unsupp # 1a: fneg
11448: long tbl_unsupp - tbl_unsupp
11449: long facos - tbl_unsupp # 1c: facos
11450: long fcos - tbl_unsupp # 1d: fcos
11451: long fgetexp - tbl_unsupp # 1e: fgetexp
11452: long fgetman - tbl_unsupp # 1f: fgetman
11453: long fdiv - tbl_unsupp # 20: fdiv
11454: long fmod - tbl_unsupp # 21: fmod
11455: long fadd - tbl_unsupp # 22: fadd
11456: long fmul - tbl_unsupp # 23: fmul
11457: long fsgldiv - tbl_unsupp # 24: fsgldiv
11458: long frem - tbl_unsupp # 25: frem
11459: long fscale - tbl_unsupp # 26: fscale
11460: long fsglmul - tbl_unsupp # 27: fsglmul
11461: long fsub - tbl_unsupp # 28: fsub
11462: long tbl_unsupp - tbl_unsupp
11463: long tbl_unsupp - tbl_unsupp
11464: long tbl_unsupp - tbl_unsupp
11465: long tbl_unsupp - tbl_unsupp
11466: long tbl_unsupp - tbl_unsupp
11467: long tbl_unsupp - tbl_unsupp
11468: long tbl_unsupp - tbl_unsupp
11469: long fsincos - tbl_unsupp # 30: fsincos
11470: long fsincos - tbl_unsupp # 31: fsincos
11471: long fsincos - tbl_unsupp # 32: fsincos
11472: long fsincos - tbl_unsupp # 33: fsincos
11473: long fsincos - tbl_unsupp # 34: fsincos
11474: long fsincos - tbl_unsupp # 35: fsincos
11475: long fsincos - tbl_unsupp # 36: fsincos
11476: long fsincos - tbl_unsupp # 37: fsincos
11477: long fcmp - tbl_unsupp # 38: fcmp
11478: long tbl_unsupp - tbl_unsupp
11479: long ftst - tbl_unsupp # 3a: ftst
11480: long tbl_unsupp - tbl_unsupp
11481: long tbl_unsupp - tbl_unsupp
11482: long tbl_unsupp - tbl_unsupp
11483: long tbl_unsupp - tbl_unsupp
11484: long tbl_unsupp - tbl_unsupp
11485: long fsin - tbl_unsupp # 40: fsmove
11486: long fssqrt - tbl_unsupp # 41: fssqrt
11487: long tbl_unsupp - tbl_unsupp
11488: long tbl_unsupp - tbl_unsupp
11489: long fdin - tbl_unsupp # 44: fdmove
11490: long fdsqrt - tbl_unsupp # 45: fdsqrt
11491: long tbl_unsupp - tbl_unsupp
11492: long tbl_unsupp - tbl_unsupp
11493: long tbl_unsupp - tbl_unsupp
11494: long tbl_unsupp - tbl_unsupp
11495: long tbl_unsupp - tbl_unsupp
11496: long tbl_unsupp - tbl_unsupp
11497: long tbl_unsupp - tbl_unsupp
11498: long tbl_unsupp - tbl_unsupp
11499: long tbl_unsupp - tbl_unsupp
11500: long tbl_unsupp - tbl_unsupp
11501: long tbl_unsupp - tbl_unsupp
11502: long tbl_unsupp - tbl_unsupp
11503: long tbl_unsupp - tbl_unsupp
11504: long tbl_unsupp - tbl_unsupp
11505: long tbl_unsupp - tbl_unsupp
11506: long tbl_unsupp - tbl_unsupp
11507: long tbl_unsupp - tbl_unsupp
11508: long tbl_unsupp - tbl_unsupp
11509: long fsabs - tbl_unsupp # 58: fsabs
11510: long tbl_unsupp - tbl_unsupp
11511: long fsneg - tbl_unsupp # 5a: fsneg
11512: long tbl_unsupp - tbl_unsupp
11513: long fdabs - tbl_unsupp # 5c: fdabs
11514: long tbl_unsupp - tbl_unsupp
11515: long fdneg - tbl_unsupp # 5e: fdneg
11516: long tbl_unsupp - tbl_unsupp
11517: long fsdiv - tbl_unsupp # 60: fsdiv
11518: long tbl_unsupp - tbl_unsupp
11519: long fsadd - tbl_unsupp # 62: fsadd
11520: long fsmul - tbl_unsupp # 63: fsmul
11521: long fddiv - tbl_unsupp # 64: fddiv
11522: long tbl_unsupp - tbl_unsupp
11523: long fdadd - tbl_unsupp # 66: fdadd
11524: long fdmul - tbl_unsupp # 67: fdmul
11525: long fssub - tbl_unsupp # 68: fssub
11526: long tbl_unsupp - tbl_unsupp
11527: long tbl_unsupp - tbl_unsupp
11528: long tbl_unsupp - tbl_unsupp
11529: long fdsub - tbl_unsupp # 6c: fdsub
11530:
11531: #########################################################################
11532: # XDEF **************************************************************** #
11533: # fmul(): emulates the fmul instruction #
11534: # fsmul(): emulates the fsmul instruction #
11535: # fdmul(): emulates the fdmul instruction #
11536: # #
11537: # XREF **************************************************************** #
11538: # scale_to_zero_src() - scale src exponent to zero #
11539: # scale_to_zero_dst() - scale dst exponent to zero #
11540: # unf_res() - return default underflow result #
11541: # ovf_res() - return default overflow result #
11542: # res_qnan() - return QNAN result #
11543: # res_snan() - return SNAN result #
11544: # #
11545: # INPUT *************************************************************** #
11546: # a0 = pointer to extended precision source operand #
11547: # a1 = pointer to extended precision destination operand #
11548: # d0 rnd prec,mode #
11549: # #
11550: # OUTPUT ************************************************************** #
11551: # fp0 = result #
11552: # fp1 = EXOP (if exception occurred) #
11553: # #
11554: # ALGORITHM *********************************************************** #
11555: # Handle NANs, infinities, and zeroes as special cases. Divide #
11556: # norms/denorms into ext/sgl/dbl precision. #
11557: # For norms/denorms, scale the exponents such that a multiply #
11558: # instruction won't cause an exception. Use the regular fmul to #
11559: # compute a result. Check if the regular operands would have taken #
11560: # an exception. If so, return the default overflow/underflow result #
11561: # and return the EXOP if exceptions are enabled. Else, scale the #
11562: # result operand to the proper exponent. #
11563: # #
11564: #########################################################################
11565:
11566: align 0x10
11567: tbl_fmul_ovfl:
11568: long 0x3fff - 0x7ffe # ext_max
11569: long 0x3fff - 0x407e # sgl_max
11570: long 0x3fff - 0x43fe # dbl_max
11571: tbl_fmul_unfl:
11572: long 0x3fff + 0x0001 # ext_unfl
11573: long 0x3fff - 0x3f80 # sgl_unfl
11574: long 0x3fff - 0x3c00 # dbl_unfl
11575:
11576: global fsmul
11577: fsmul:
11578: andi.b &0x30,%d0 # clear rnd prec
11579: ori.b &s_mode*0x10,%d0 # insert sgl prec
11580: bra.b fmul
11581:
11582: global fdmul
11583: fdmul:
11584: andi.b &0x30,%d0
11585: ori.b &d_mode*0x10,%d0 # insert dbl prec
11586:
11587: global fmul
11588: fmul:
11589: mov.l %d0,L_SCR3(%a6) # store rnd info
11590:
11591: clr.w %d1
11592: mov.b DTAG(%a6),%d1
11593: lsl.b &0x3,%d1
11594: or.b STAG(%a6),%d1 # combine src tags
11595: bne.w fmul_not_norm # optimize on non-norm input
11596:
11597: fmul_norm:
11598: mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
11599: mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
11600: mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
11601:
11602: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
11603: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
11604: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
11605:
11606: bsr.l scale_to_zero_src # scale src exponent
11607: mov.l %d0,-(%sp) # save scale factor 1
11608:
11609: bsr.l scale_to_zero_dst # scale dst exponent
11610:
11611: add.l %d0,(%sp) # SCALE_FACTOR = scale1 + scale2
11612:
11613: mov.w 2+L_SCR3(%a6),%d1 # fetch precision
11614: lsr.b &0x6,%d1 # shift to lo bits
11615: mov.l (%sp)+,%d0 # load S.F.
11616: cmp.l %d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
11617: beq.w fmul_may_ovfl # result may rnd to overflow
11618: blt.w fmul_ovfl # result will overflow
11619:
11620: cmp.l %d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
11621: beq.w fmul_may_unfl # result may rnd to no unfl
11622: bgt.w fmul_unfl # result will underflow
11623:
11624: #
11625: # NORMAL:
11626: # - the result of the multiply operation will neither overflow nor underflow.
11627: # - do the multiply to the proper precision and rounding mode.
11628: # - scale the result exponent using the scale factor. if both operands were
11629: # normalized then we really don't need to go through this scaling. but for now,
11630: # this will do.
11631: #
11632: fmul_normal:
11633: fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11634:
11635: fmov.l L_SCR3(%a6),%fpcr # set FPCR
11636: fmov.l &0x0,%fpsr # clear FPSR
11637:
11638: fmul.x FP_SCR0(%a6),%fp0 # execute multiply
11639:
11640: fmov.l %fpsr,%d1 # save status
11641: fmov.l &0x0,%fpcr # clear FPCR
11642:
11643: or.l %d1,USER_FPSR(%a6) # save INEX2,N
11644:
11645: fmul_normal_exit:
11646: fmovm.x &0x80,FP_SCR0(%a6) # store out result
11647: mov.l %d2,-(%sp) # save d2
11648: mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
11649: mov.l %d1,%d2 # make a copy
11650: andi.l &0x7fff,%d1 # strip sign
11651: andi.w &0x8000,%d2 # keep old sign
11652: sub.l %d0,%d1 # add scale factor
11653: or.w %d2,%d1 # concat old sign,new exp
11654: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
11655: mov.l (%sp)+,%d2 # restore d2
11656: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
11657: rts
11658:
11659: #
11660: # OVERFLOW:
11661: # - the result of the multiply operation is an overflow.
11662: # - do the multiply to the proper precision and rounding mode in order to
11663: # set the inexact bits.
11664: # - calculate the default result and return it in fp0.
11665: # - if overflow or inexact is enabled, we need a multiply result rounded to
11666: # extended precision. if the original operation was extended, then we have this
11667: # result. if the original operation was single or double, we have to do another
11668: # multiply using extended precision and the correct rounding mode. the result
11669: # of this operation then has its exponent scaled by -0x6000 to create the
11670: # exceptional operand.
11671: #
11672: fmul_ovfl:
11673: fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11674:
11675: fmov.l L_SCR3(%a6),%fpcr # set FPCR
11676: fmov.l &0x0,%fpsr # clear FPSR
11677:
11678: fmul.x FP_SCR0(%a6),%fp0 # execute multiply
11679:
11680: fmov.l %fpsr,%d1 # save status
11681: fmov.l &0x0,%fpcr # clear FPCR
11682:
11683: or.l %d1,USER_FPSR(%a6) # save INEX2,N
11684:
11685: # save setting this until now because this is where fmul_may_ovfl may jump in
11686: fmul_ovfl_tst:
11687: or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
11688:
11689: mov.b FPCR_ENABLE(%a6),%d1
11690: andi.b &0x13,%d1 # is OVFL or INEX enabled?
11691: bne.b fmul_ovfl_ena # yes
11692:
11693: # calculate the default result
11694: fmul_ovfl_dis:
11695: btst &neg_bit,FPSR_CC(%a6) # is result negative?
11696: sne %d1 # set sign param accordingly
11697: mov.l L_SCR3(%a6),%d0 # pass rnd prec,mode
11698: bsr.l ovf_res # calculate default result
11699: or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
11700: fmovm.x (%a0),&0x80 # return default result in fp0
11701: rts
11702:
11703: #
11704: # OVFL is enabled; Create EXOP:
11705: # - if precision is extended, then we have the EXOP. simply bias the exponent
11706: # with an extra -0x6000. if the precision is single or double, we need to
11707: # calculate a result rounded to extended precision.
11708: #
11709: fmul_ovfl_ena:
11710: mov.l L_SCR3(%a6),%d1
11711: andi.b &0xc0,%d1 # test the rnd prec
11712: bne.b fmul_ovfl_ena_sd # it's sgl or dbl
11713:
11714: fmul_ovfl_ena_cont:
11715: fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
11716:
11717: mov.l %d2,-(%sp) # save d2
11718: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
11719: mov.w %d1,%d2 # make a copy
11720: andi.l &0x7fff,%d1 # strip sign
11721: sub.l %d0,%d1 # add scale factor
11722: subi.l &0x6000,%d1 # subtract bias
11723: andi.w &0x7fff,%d1 # clear sign bit
11724: andi.w &0x8000,%d2 # keep old sign
11725: or.w %d2,%d1 # concat old sign,new exp
11726: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
11727: mov.l (%sp)+,%d2 # restore d2
11728: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
11729: bra.b fmul_ovfl_dis
11730:
11731: fmul_ovfl_ena_sd:
11732: fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11733:
11734: mov.l L_SCR3(%a6),%d1
11735: andi.b &0x30,%d1 # keep rnd mode only
11736: fmov.l %d1,%fpcr # set FPCR
11737:
11738: fmul.x FP_SCR0(%a6),%fp0 # execute multiply
11739:
11740: fmov.l &0x0,%fpcr # clear FPCR
11741: bra.b fmul_ovfl_ena_cont
11742:
11743: #
11744: # may OVERFLOW:
11745: # - the result of the multiply operation MAY overflow.
11746: # - do the multiply to the proper precision and rounding mode in order to
11747: # set the inexact bits.
11748: # - calculate the default result and return it in fp0.
11749: #
11750: fmul_may_ovfl:
11751: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
11752:
11753: fmov.l L_SCR3(%a6),%fpcr # set FPCR
11754: fmov.l &0x0,%fpsr # clear FPSR
11755:
11756: fmul.x FP_SCR0(%a6),%fp0 # execute multiply
11757:
11758: fmov.l %fpsr,%d1 # save status
11759: fmov.l &0x0,%fpcr # clear FPCR
11760:
11761: or.l %d1,USER_FPSR(%a6) # save INEX2,N
11762:
11763: fabs.x %fp0,%fp1 # make a copy of result
11764: fcmp.b %fp1,&0x2 # is |result| >= 2.b?
11765: fbge.w fmul_ovfl_tst # yes; overflow has occurred
11766:
11767: # no, it didn't overflow; we have correct result
11768: bra.w fmul_normal_exit
11769:
11770: #
11771: # UNDERFLOW:
11772: # - the result of the multiply operation is an underflow.
11773: # - do the multiply to the proper precision and rounding mode in order to
11774: # set the inexact bits.
11775: # - calculate the default result and return it in fp0.
11776: # - if overflow or inexact is enabled, we need a multiply result rounded to
11777: # extended precision. if the original operation was extended, then we have this
11778: # result. if the original operation was single or double, we have to do another
11779: # multiply using extended precision and the correct rounding mode. the result
11780: # of this operation then has its exponent scaled by -0x6000 to create the
11781: # exceptional operand.
11782: #
11783: fmul_unfl:
11784: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
11785:
11786: # for fun, let's use only extended precision, round to zero. then, let
11787: # the unf_res() routine figure out all the rest.
11788: # will we get the correct answer.
11789: fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11790:
11791: fmov.l &rz_mode*0x10,%fpcr # set FPCR
11792: fmov.l &0x0,%fpsr # clear FPSR
11793:
11794: fmul.x FP_SCR0(%a6),%fp0 # execute multiply
11795:
11796: fmov.l %fpsr,%d1 # save status
11797: fmov.l &0x0,%fpcr # clear FPCR
11798:
11799: or.l %d1,USER_FPSR(%a6) # save INEX2,N
11800:
11801: mov.b FPCR_ENABLE(%a6),%d1
11802: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
11803: bne.b fmul_unfl_ena # yes
11804:
11805: fmul_unfl_dis:
11806: fmovm.x &0x80,FP_SCR0(%a6) # store out result
11807:
11808: lea FP_SCR0(%a6),%a0 # pass: result addr
11809: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
11810: bsr.l unf_res # calculate default result
11811: or.b %d0,FPSR_CC(%a6) # unf_res2 may have set 'Z'
11812: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
11813: rts
11814:
11815: #
11816: # UNFL is enabled.
11817: #
11818: fmul_unfl_ena:
11819: fmovm.x FP_SCR1(%a6),&0x40 # load dst op
11820:
11821: mov.l L_SCR3(%a6),%d1
11822: andi.b &0xc0,%d1 # is precision extended?
11823: bne.b fmul_unfl_ena_sd # no, sgl or dbl
11824:
11825: # if the rnd mode is anything but RZ, then we have to re-do the above
11826: # multiplication because we used RZ for all.
11827: fmov.l L_SCR3(%a6),%fpcr # set FPCR
11828:
11829: fmul_unfl_ena_cont:
11830: fmov.l &0x0,%fpsr # clear FPSR
11831:
11832: fmul.x FP_SCR0(%a6),%fp1 # execute multiply
11833:
11834: fmov.l &0x0,%fpcr # clear FPCR
11835:
11836: fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
11837: mov.l %d2,-(%sp) # save d2
11838: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
11839: mov.l %d1,%d2 # make a copy
11840: andi.l &0x7fff,%d1 # strip sign
11841: andi.w &0x8000,%d2 # keep old sign
11842: sub.l %d0,%d1 # add scale factor
11843: addi.l &0x6000,%d1 # add bias
11844: andi.w &0x7fff,%d1
11845: or.w %d2,%d1 # concat old sign,new exp
11846: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
11847: mov.l (%sp)+,%d2 # restore d2
11848: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
11849: bra.w fmul_unfl_dis
11850:
11851: fmul_unfl_ena_sd:
11852: mov.l L_SCR3(%a6),%d1
11853: andi.b &0x30,%d1 # use only rnd mode
11854: fmov.l %d1,%fpcr # set FPCR
11855:
11856: bra.b fmul_unfl_ena_cont
11857:
11858: # MAY UNDERFLOW:
11859: # -use the correct rounding mode and precision. this code favors operations
11860: # that do not underflow.
11861: fmul_may_unfl:
11862: fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
11863:
11864: fmov.l L_SCR3(%a6),%fpcr # set FPCR
11865: fmov.l &0x0,%fpsr # clear FPSR
11866:
11867: fmul.x FP_SCR0(%a6),%fp0 # execute multiply
11868:
11869: fmov.l %fpsr,%d1 # save status
11870: fmov.l &0x0,%fpcr # clear FPCR
11871:
11872: or.l %d1,USER_FPSR(%a6) # save INEX2,N
11873:
11874: fabs.x %fp0,%fp1 # make a copy of result
11875: fcmp.b %fp1,&0x2 # is |result| > 2.b?
11876: fbgt.w fmul_normal_exit # no; no underflow occurred
11877: fblt.w fmul_unfl # yes; underflow occurred
11878:
11879: #
11880: # we still don't know if underflow occurred. result is ~ equal to 2. but,
11881: # we don't know if the result was an underflow that rounded up to a 2 or
11882: # a normalized number that rounded down to a 2. so, redo the entire operation
11883: # using RZ as the rounding mode to see what the pre-rounded result is.
11884: # this case should be relatively rare.
11885: #
11886: fmovm.x FP_SCR1(%a6),&0x40 # load dst operand
11887:
11888: mov.l L_SCR3(%a6),%d1
11889: andi.b &0xc0,%d1 # keep rnd prec
11890: ori.b &rz_mode*0x10,%d1 # insert RZ
11891:
11892: fmov.l %d1,%fpcr # set FPCR
11893: fmov.l &0x0,%fpsr # clear FPSR
11894:
11895: fmul.x FP_SCR0(%a6),%fp1 # execute multiply
11896:
11897: fmov.l &0x0,%fpcr # clear FPCR
11898: fabs.x %fp1 # make absolute value
11899: fcmp.b %fp1,&0x2 # is |result| < 2.b?
11900: fbge.w fmul_normal_exit # no; no underflow occurred
11901: bra.w fmul_unfl # yes, underflow occurred
11902:
11903: ################################################################################
11904:
11905: #
11906: # Multiply: inputs are not both normalized; what are they?
11907: #
11908: fmul_not_norm:
11909: mov.w (tbl_fmul_op.b,%pc,%d1.w*2),%d1
11910: jmp (tbl_fmul_op.b,%pc,%d1.w)
11911:
11912: swbeg &48
11913: tbl_fmul_op:
11914: short fmul_norm - tbl_fmul_op # NORM x NORM
11915: short fmul_zero - tbl_fmul_op # NORM x ZERO
11916: short fmul_inf_src - tbl_fmul_op # NORM x INF
11917: short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
11918: short fmul_norm - tbl_fmul_op # NORM x DENORM
11919: short fmul_res_snan - tbl_fmul_op # NORM x SNAN
11920: short tbl_fmul_op - tbl_fmul_op #
11921: short tbl_fmul_op - tbl_fmul_op #
11922:
11923: short fmul_zero - tbl_fmul_op # ZERO x NORM
11924: short fmul_zero - tbl_fmul_op # ZERO x ZERO
11925: short fmul_res_operr - tbl_fmul_op # ZERO x INF
11926: short fmul_res_qnan - tbl_fmul_op # ZERO x QNAN
11927: short fmul_zero - tbl_fmul_op # ZERO x DENORM
11928: short fmul_res_snan - tbl_fmul_op # ZERO x SNAN
11929: short tbl_fmul_op - tbl_fmul_op #
11930: short tbl_fmul_op - tbl_fmul_op #
11931:
11932: short fmul_inf_dst - tbl_fmul_op # INF x NORM
11933: short fmul_res_operr - tbl_fmul_op # INF x ZERO
11934: short fmul_inf_dst - tbl_fmul_op # INF x INF
11935: short fmul_res_qnan - tbl_fmul_op # INF x QNAN
11936: short fmul_inf_dst - tbl_fmul_op # INF x DENORM
11937: short fmul_res_snan - tbl_fmul_op # INF x SNAN
11938: short tbl_fmul_op - tbl_fmul_op #
11939: short tbl_fmul_op - tbl_fmul_op #
11940:
11941: short fmul_res_qnan - tbl_fmul_op # QNAN x NORM
11942: short fmul_res_qnan - tbl_fmul_op # QNAN x ZERO
11943: short fmul_res_qnan - tbl_fmul_op # QNAN x INF
11944: short fmul_res_qnan - tbl_fmul_op # QNAN x QNAN
11945: short fmul_res_qnan - tbl_fmul_op # QNAN x DENORM
11946: short fmul_res_snan - tbl_fmul_op # QNAN x SNAN
11947: short tbl_fmul_op - tbl_fmul_op #
11948: short tbl_fmul_op - tbl_fmul_op #
11949:
11950: short fmul_norm - tbl_fmul_op # NORM x NORM
11951: short fmul_zero - tbl_fmul_op # NORM x ZERO
11952: short fmul_inf_src - tbl_fmul_op # NORM x INF
11953: short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
11954: short fmul_norm - tbl_fmul_op # NORM x DENORM
11955: short fmul_res_snan - tbl_fmul_op # NORM x SNAN
11956: short tbl_fmul_op - tbl_fmul_op #
11957: short tbl_fmul_op - tbl_fmul_op #
11958:
11959: short fmul_res_snan - tbl_fmul_op # SNAN x NORM
11960: short fmul_res_snan - tbl_fmul_op # SNAN x ZERO
11961: short fmul_res_snan - tbl_fmul_op # SNAN x INF
11962: short fmul_res_snan - tbl_fmul_op # SNAN x QNAN
11963: short fmul_res_snan - tbl_fmul_op # SNAN x DENORM
11964: short fmul_res_snan - tbl_fmul_op # SNAN x SNAN
11965: short tbl_fmul_op - tbl_fmul_op #
11966: short tbl_fmul_op - tbl_fmul_op #
11967:
11968: fmul_res_operr:
11969: bra.l res_operr
11970: fmul_res_snan:
11971: bra.l res_snan
11972: fmul_res_qnan:
11973: bra.l res_qnan
11974:
11975: #
11976: # Multiply: (Zero x Zero) || (Zero x norm) || (Zero x denorm)
11977: #
11978: global fmul_zero # global for fsglmul
11979: fmul_zero:
11980: mov.b SRC_EX(%a0),%d0 # exclusive or the signs
11981: mov.b DST_EX(%a1),%d1
11982: eor.b %d0,%d1
11983: bpl.b fmul_zero_p # result ZERO is pos.
11984: fmul_zero_n:
11985: fmov.s &0x80000000,%fp0 # load -ZERO
11986: mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
11987: rts
11988: fmul_zero_p:
11989: fmov.s &0x00000000,%fp0 # load +ZERO
11990: mov.b &z_bmask,FPSR_CC(%a6) # set Z
11991: rts
11992:
11993: #
11994: # Multiply: (inf x inf) || (inf x norm) || (inf x denorm)
11995: #
11996: # Note: The j-bit for an infinity is a don't-care. However, to be
11997: # strictly compatible w/ the 68881/882, we make sure to return an
11998: # INF w/ the j-bit set if the input INF j-bit was set. Destination
11999: # INFs take priority.
12000: #
12001: global fmul_inf_dst # global for fsglmul
12002: fmul_inf_dst:
12003: fmovm.x DST(%a1),&0x80 # return INF result in fp0
12004: mov.b SRC_EX(%a0),%d0 # exclusive or the signs
12005: mov.b DST_EX(%a1),%d1
12006: eor.b %d0,%d1
12007: bpl.b fmul_inf_dst_p # result INF is pos.
12008: fmul_inf_dst_n:
12009: fabs.x %fp0 # clear result sign
12010: fneg.x %fp0 # set result sign
12011: mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
12012: rts
12013: fmul_inf_dst_p:
12014: fabs.x %fp0 # clear result sign
12015: mov.b &inf_bmask,FPSR_CC(%a6) # set INF
12016: rts
12017:
12018: global fmul_inf_src # global for fsglmul
12019: fmul_inf_src:
12020: fmovm.x SRC(%a0),&0x80 # return INF result in fp0
12021: mov.b SRC_EX(%a0),%d0 # exclusive or the signs
12022: mov.b DST_EX(%a1),%d1
12023: eor.b %d0,%d1
12024: bpl.b fmul_inf_dst_p # result INF is pos.
12025: bra.b fmul_inf_dst_n
12026:
12027: #########################################################################
12028: # XDEF **************************************************************** #
12029: # fin(): emulates the fmove instruction #
12030: # fsin(): emulates the fsmove instruction #
12031: # fdin(): emulates the fdmove instruction #
12032: # #
12033: # XREF **************************************************************** #
12034: # norm() - normalize mantissa for EXOP on denorm #
12035: # scale_to_zero_src() - scale src exponent to zero #
12036: # ovf_res() - return default overflow result #
12037: # unf_res() - return default underflow result #
12038: # res_qnan_1op() - return QNAN result #
12039: # res_snan_1op() - return SNAN result #
12040: # #
12041: # INPUT *************************************************************** #
12042: # a0 = pointer to extended precision source operand #
12043: # d0 = round prec/mode #
12044: # #
12045: # OUTPUT ************************************************************** #
12046: # fp0 = result #
12047: # fp1 = EXOP (if exception occurred) #
12048: # #
12049: # ALGORITHM *********************************************************** #
12050: # Handle NANs, infinities, and zeroes as special cases. Divide #
12051: # norms into extended, single, and double precision. #
12052: # Norms can be emulated w/ a regular fmove instruction. For #
12053: # sgl/dbl, must scale exponent and perform an "fmove". Check to see #
12054: # if the result would have overflowed/underflowed. If so, use unf_res() #
12055: # or ovf_res() to return the default result. Also return EXOP if #
12056: # exception is enabled. If no exception, return the default result. #
12057: # Unnorms don't pass through here. #
12058: # #
12059: #########################################################################
12060:
12061: global fsin
12062: fsin:
12063: andi.b &0x30,%d0 # clear rnd prec
12064: ori.b &s_mode*0x10,%d0 # insert sgl precision
12065: bra.b fin
12066:
12067: global fdin
12068: fdin:
12069: andi.b &0x30,%d0 # clear rnd prec
12070: ori.b &d_mode*0x10,%d0 # insert dbl precision
12071:
12072: global fin
12073: fin:
12074: mov.l %d0,L_SCR3(%a6) # store rnd info
12075:
12076: mov.b STAG(%a6),%d1 # fetch src optype tag
12077: bne.w fin_not_norm # optimize on non-norm input
12078:
12079: #
12080: # FP MOVE IN: NORMs and DENORMs ONLY!
12081: #
12082: fin_norm:
12083: andi.b &0xc0,%d0 # is precision extended?
12084: bne.w fin_not_ext # no, so go handle dbl or sgl
12085:
12086: #
12087: # precision selected is extended. so...we cannot get an underflow
12088: # or overflow because of rounding to the correct precision. so...
12089: # skip the scaling and unscaling...
12090: #
12091: tst.b SRC_EX(%a0) # is the operand negative?
12092: bpl.b fin_norm_done # no
12093: bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
12094: fin_norm_done:
12095: fmovm.x SRC(%a0),&0x80 # return result in fp0
12096: rts
12097:
12098: #
12099: # for an extended precision DENORM, the UNFL exception bit is set
12100: # the accrued bit is NOT set in this instance(no inexactness!)
12101: #
12102: fin_denorm:
12103: andi.b &0xc0,%d0 # is precision extended?
12104: bne.w fin_not_ext # no, so go handle dbl or sgl
12105:
12106: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
12107: tst.b SRC_EX(%a0) # is the operand negative?
12108: bpl.b fin_denorm_done # no
12109: bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
12110: fin_denorm_done:
12111: fmovm.x SRC(%a0),&0x80 # return result in fp0
12112: btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
12113: bne.b fin_denorm_unfl_ena # yes
12114: rts
12115:
12116: #
12117: # the input is an extended DENORM and underflow is enabled in the FPCR.
12118: # normalize the mantissa and add the bias of 0x6000 to the resulting negative
12119: # exponent and insert back into the operand.
12120: #
12121: fin_denorm_unfl_ena:
12122: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
12123: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
12124: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
12125: lea FP_SCR0(%a6),%a0 # pass: ptr to operand
12126: bsr.l norm # normalize result
12127: neg.w %d0 # new exponent = -(shft val)
12128: addi.w &0x6000,%d0 # add new bias to exponent
12129: mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
12130: andi.w &0x8000,%d1 # keep old sign
12131: andi.w &0x7fff,%d0 # clear sign position
12132: or.w %d1,%d0 # concat new exo,old sign
12133: mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
12134: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12135: rts
12136:
12137: #
12138: # operand is to be rounded to single or double precision
12139: #
12140: fin_not_ext:
12141: cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
12142: bne.b fin_dbl
12143:
12144: #
12145: # operand is to be rounded to single precision
12146: #
12147: fin_sgl:
12148: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
12149: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
12150: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
12151: bsr.l scale_to_zero_src # calculate scale factor
12152:
12153: cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
12154: bge.w fin_sd_unfl # yes; go handle underflow
12155: cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
12156: beq.w fin_sd_may_ovfl # maybe; go check
12157: blt.w fin_sd_ovfl # yes; go handle overflow
12158:
12159: #
12160: # operand will NOT overflow or underflow when moved into the fp reg file
12161: #
12162: fin_sd_normal:
12163: fmov.l &0x0,%fpsr # clear FPSR
12164: fmov.l L_SCR3(%a6),%fpcr # set FPCR
12165:
12166: fmov.x FP_SCR0(%a6),%fp0 # perform move
12167:
12168: fmov.l %fpsr,%d1 # save FPSR
12169: fmov.l &0x0,%fpcr # clear FPCR
12170:
12171: or.l %d1,USER_FPSR(%a6) # save INEX2,N
12172:
12173: fin_sd_normal_exit:
12174: mov.l %d2,-(%sp) # save d2
12175: fmovm.x &0x80,FP_SCR0(%a6) # store out result
12176: mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
12177: mov.w %d1,%d2 # make a copy
12178: andi.l &0x7fff,%d1 # strip sign
12179: sub.l %d0,%d1 # add scale factor
12180: andi.w &0x8000,%d2 # keep old sign
12181: or.w %d1,%d2 # concat old sign,new exponent
12182: mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
12183: mov.l (%sp)+,%d2 # restore d2
12184: fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
12185: rts
12186:
12187: #
12188: # operand is to be rounded to double precision
12189: #
12190: fin_dbl:
12191: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
12192: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
12193: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
12194: bsr.l scale_to_zero_src # calculate scale factor
12195:
12196: cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
12197: bge.w fin_sd_unfl # yes; go handle underflow
12198: cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
12199: beq.w fin_sd_may_ovfl # maybe; go check
12200: blt.w fin_sd_ovfl # yes; go handle overflow
12201: bra.w fin_sd_normal # no; ho handle normalized op
12202:
12203: #
12204: # operand WILL underflow when moved in to the fp register file
12205: #
12206: fin_sd_unfl:
12207: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
12208:
12209: tst.b FP_SCR0_EX(%a6) # is operand negative?
12210: bpl.b fin_sd_unfl_tst
12211: bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
12212:
12213: # if underflow or inexact is enabled, then go calculate the EXOP first.
12214: fin_sd_unfl_tst:
12215: mov.b FPCR_ENABLE(%a6),%d1
12216: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12217: bne.b fin_sd_unfl_ena # yes
12218:
12219: fin_sd_unfl_dis:
12220: lea FP_SCR0(%a6),%a0 # pass: result addr
12221: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
12222: bsr.l unf_res # calculate default result
12223: or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
12224: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
12225: rts
12226:
12227: #
12228: # operand will underflow AND underflow or inexact is enabled.
12229: # therefore, we must return the result rounded to extended precision.
12230: #
12231: fin_sd_unfl_ena:
12232: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
12233: mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
12234: mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
12235:
12236: mov.l %d2,-(%sp) # save d2
12237: mov.w %d1,%d2 # make a copy
12238: andi.l &0x7fff,%d1 # strip sign
12239: sub.l %d0,%d1 # subtract scale factor
12240: andi.w &0x8000,%d2 # extract old sign
12241: addi.l &0x6000,%d1 # add new bias
12242: andi.w &0x7fff,%d1
12243: or.w %d1,%d2 # concat old sign,new exp
12244: mov.w %d2,FP_SCR1_EX(%a6) # insert new exponent
12245: fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
12246: mov.l (%sp)+,%d2 # restore d2
12247: bra.b fin_sd_unfl_dis
12248:
12249: #
12250: # operand WILL overflow.
12251: #
12252: fin_sd_ovfl:
12253: fmov.l &0x0,%fpsr # clear FPSR
12254: fmov.l L_SCR3(%a6),%fpcr # set FPCR
12255:
12256: fmov.x FP_SCR0(%a6),%fp0 # perform move
12257:
12258: fmov.l &0x0,%fpcr # clear FPCR
12259: fmov.l %fpsr,%d1 # save FPSR
12260:
12261: or.l %d1,USER_FPSR(%a6) # save INEX2,N
12262:
12263: fin_sd_ovfl_tst:
12264: or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
12265:
12266: mov.b FPCR_ENABLE(%a6),%d1
12267: andi.b &0x13,%d1 # is OVFL or INEX enabled?
12268: bne.b fin_sd_ovfl_ena # yes
12269:
12270: #
12271: # OVFL is not enabled; therefore, we must create the default result by
12272: # calling ovf_res().
12273: #
12274: fin_sd_ovfl_dis:
12275: btst &neg_bit,FPSR_CC(%a6) # is result negative?
12276: sne %d1 # set sign param accordingly
12277: mov.l L_SCR3(%a6),%d0 # pass: prec,mode
12278: bsr.l ovf_res # calculate default result
12279: or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
12280: fmovm.x (%a0),&0x80 # return default result in fp0
12281: rts
12282:
12283: #
12284: # OVFL is enabled.
12285: # the INEX2 bit has already been updated by the round to the correct precision.
12286: # now, round to extended(and don't alter the FPSR).
12287: #
12288: fin_sd_ovfl_ena:
12289: mov.l %d2,-(%sp) # save d2
12290: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
12291: mov.l %d1,%d2 # make a copy
12292: andi.l &0x7fff,%d1 # strip sign
12293: andi.w &0x8000,%d2 # keep old sign
12294: sub.l %d0,%d1 # add scale factor
12295: sub.l &0x6000,%d1 # subtract bias
12296: andi.w &0x7fff,%d1
12297: or.w %d2,%d1
12298: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
12299: mov.l (%sp)+,%d2 # restore d2
12300: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12301: bra.b fin_sd_ovfl_dis
12302:
12303: #
12304: # the move in MAY overflow. so...
12305: #
12306: fin_sd_may_ovfl:
12307: fmov.l &0x0,%fpsr # clear FPSR
12308: fmov.l L_SCR3(%a6),%fpcr # set FPCR
12309:
12310: fmov.x FP_SCR0(%a6),%fp0 # perform the move
12311:
12312: fmov.l %fpsr,%d1 # save status
12313: fmov.l &0x0,%fpcr # clear FPCR
12314:
12315: or.l %d1,USER_FPSR(%a6) # save INEX2,N
12316:
12317: fabs.x %fp0,%fp1 # make a copy of result
12318: fcmp.b %fp1,&0x2 # is |result| >= 2.b?
12319: fbge.w fin_sd_ovfl_tst # yes; overflow has occurred
12320:
12321: # no, it didn't overflow; we have correct result
12322: bra.w fin_sd_normal_exit
12323:
12324: ##########################################################################
12325:
12326: #
12327: # operand is not a NORM: check its optype and branch accordingly
12328: #
12329: fin_not_norm:
12330: cmpi.b %d1,&DENORM # weed out DENORM
12331: beq.w fin_denorm
12332: cmpi.b %d1,&SNAN # weed out SNANs
12333: beq.l res_snan_1op
12334: cmpi.b %d1,&QNAN # weed out QNANs
12335: beq.l res_qnan_1op
12336:
12337: #
12338: # do the fmove in; at this point, only possible ops are ZERO and INF.
12339: # use fmov to determine ccodes.
12340: # prec:mode should be zero at this point but it won't affect answer anyways.
12341: #
12342: fmov.x SRC(%a0),%fp0 # do fmove in
12343: fmov.l %fpsr,%d0 # no exceptions possible
12344: rol.l &0x8,%d0 # put ccodes in lo byte
12345: mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
12346: rts
12347:
12348: #########################################################################
12349: # XDEF **************************************************************** #
12350: # fdiv(): emulates the fdiv instruction #
12351: # fsdiv(): emulates the fsdiv instruction #
12352: # fddiv(): emulates the fddiv instruction #
12353: # #
12354: # XREF **************************************************************** #
12355: # scale_to_zero_src() - scale src exponent to zero #
12356: # scale_to_zero_dst() - scale dst exponent to zero #
12357: # unf_res() - return default underflow result #
12358: # ovf_res() - return default overflow result #
12359: # res_qnan() - return QNAN result #
12360: # res_snan() - return SNAN result #
12361: # #
12362: # INPUT *************************************************************** #
12363: # a0 = pointer to extended precision source operand #
12364: # a1 = pointer to extended precision destination operand #
12365: # d0 rnd prec,mode #
12366: # #
12367: # OUTPUT ************************************************************** #
12368: # fp0 = result #
12369: # fp1 = EXOP (if exception occurred) #
12370: # #
12371: # ALGORITHM *********************************************************** #
12372: # Handle NANs, infinities, and zeroes as special cases. Divide #
12373: # norms/denorms into ext/sgl/dbl precision. #
12374: # For norms/denorms, scale the exponents such that a divide #
12375: # instruction won't cause an exception. Use the regular fdiv to #
12376: # compute a result. Check if the regular operands would have taken #
12377: # an exception. If so, return the default overflow/underflow result #
12378: # and return the EXOP if exceptions are enabled. Else, scale the #
12379: # result operand to the proper exponent. #
12380: # #
12381: #########################################################################
12382:
12383: align 0x10
12384: tbl_fdiv_unfl:
12385: long 0x3fff - 0x0000 # ext_unfl
12386: long 0x3fff - 0x3f81 # sgl_unfl
12387: long 0x3fff - 0x3c01 # dbl_unfl
12388:
12389: tbl_fdiv_ovfl:
12390: long 0x3fff - 0x7ffe # ext overflow exponent
12391: long 0x3fff - 0x407e # sgl overflow exponent
12392: long 0x3fff - 0x43fe # dbl overflow exponent
12393:
12394: global fsdiv
12395: fsdiv:
12396: andi.b &0x30,%d0 # clear rnd prec
12397: ori.b &s_mode*0x10,%d0 # insert sgl prec
12398: bra.b fdiv
12399:
12400: global fddiv
12401: fddiv:
12402: andi.b &0x30,%d0 # clear rnd prec
12403: ori.b &d_mode*0x10,%d0 # insert dbl prec
12404:
12405: global fdiv
12406: fdiv:
12407: mov.l %d0,L_SCR3(%a6) # store rnd info
12408:
12409: clr.w %d1
12410: mov.b DTAG(%a6),%d1
12411: lsl.b &0x3,%d1
12412: or.b STAG(%a6),%d1 # combine src tags
12413:
12414: bne.w fdiv_not_norm # optimize on non-norm input
12415:
12416: #
12417: # DIVIDE: NORMs and DENORMs ONLY!
12418: #
12419: fdiv_norm:
12420: mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
12421: mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
12422: mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
12423:
12424: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
12425: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
12426: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
12427:
12428: bsr.l scale_to_zero_src # scale src exponent
12429: mov.l %d0,-(%sp) # save scale factor 1
12430:
12431: bsr.l scale_to_zero_dst # scale dst exponent
12432:
12433: neg.l (%sp) # SCALE FACTOR = scale1 - scale2
12434: add.l %d0,(%sp)
12435:
12436: mov.w 2+L_SCR3(%a6),%d1 # fetch precision
12437: lsr.b &0x6,%d1 # shift to lo bits
12438: mov.l (%sp)+,%d0 # load S.F.
12439: cmp.l %d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
12440: ble.w fdiv_may_ovfl # result will overflow
12441:
12442: cmp.l %d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
12443: beq.w fdiv_may_unfl # maybe
12444: bgt.w fdiv_unfl # yes; go handle underflow
12445:
12446: fdiv_normal:
12447: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
12448:
12449: fmov.l L_SCR3(%a6),%fpcr # save FPCR
12450: fmov.l &0x0,%fpsr # clear FPSR
12451:
12452: fdiv.x FP_SCR0(%a6),%fp0 # perform divide
12453:
12454: fmov.l %fpsr,%d1 # save FPSR
12455: fmov.l &0x0,%fpcr # clear FPCR
12456:
12457: or.l %d1,USER_FPSR(%a6) # save INEX2,N
12458:
12459: fdiv_normal_exit:
12460: fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
12461: mov.l %d2,-(%sp) # store d2
12462: mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
12463: mov.l %d1,%d2 # make a copy
12464: andi.l &0x7fff,%d1 # strip sign
12465: andi.w &0x8000,%d2 # keep old sign
12466: sub.l %d0,%d1 # add scale factor
12467: or.w %d2,%d1 # concat old sign,new exp
12468: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
12469: mov.l (%sp)+,%d2 # restore d2
12470: fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
12471: rts
12472:
12473: tbl_fdiv_ovfl2:
12474: long 0x7fff
12475: long 0x407f
12476: long 0x43ff
12477:
12478: fdiv_no_ovfl:
12479: mov.l (%sp)+,%d0 # restore scale factor
12480: bra.b fdiv_normal_exit
12481:
12482: fdiv_may_ovfl:
12483: mov.l %d0,-(%sp) # save scale factor
12484:
12485: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
12486:
12487: fmov.l L_SCR3(%a6),%fpcr # set FPCR
12488: fmov.l &0x0,%fpsr # set FPSR
12489:
12490: fdiv.x FP_SCR0(%a6),%fp0 # execute divide
12491:
12492: fmov.l %fpsr,%d0
12493: fmov.l &0x0,%fpcr
12494:
12495: or.l %d0,USER_FPSR(%a6) # save INEX,N
12496:
12497: fmovm.x &0x01,-(%sp) # save result to stack
12498: mov.w (%sp),%d0 # fetch new exponent
12499: add.l &0xc,%sp # clear result from stack
12500: andi.l &0x7fff,%d0 # strip sign
12501: sub.l (%sp),%d0 # add scale factor
12502: cmp.l %d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
12503: blt.b fdiv_no_ovfl
12504: mov.l (%sp)+,%d0
12505:
12506: fdiv_ovfl_tst:
12507: or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
12508:
12509: mov.b FPCR_ENABLE(%a6),%d1
12510: andi.b &0x13,%d1 # is OVFL or INEX enabled?
12511: bne.b fdiv_ovfl_ena # yes
12512:
12513: fdiv_ovfl_dis:
12514: btst &neg_bit,FPSR_CC(%a6) # is result negative?
12515: sne %d1 # set sign param accordingly
12516: mov.l L_SCR3(%a6),%d0 # pass prec:rnd
12517: bsr.l ovf_res # calculate default result
12518: or.b %d0,FPSR_CC(%a6) # set INF if applicable
12519: fmovm.x (%a0),&0x80 # return default result in fp0
12520: rts
12521:
12522: fdiv_ovfl_ena:
12523: mov.l L_SCR3(%a6),%d1
12524: andi.b &0xc0,%d1 # is precision extended?
12525: bne.b fdiv_ovfl_ena_sd # no, do sgl or dbl
12526:
12527: fdiv_ovfl_ena_cont:
12528: fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
12529:
12530: mov.l %d2,-(%sp) # save d2
12531: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
12532: mov.w %d1,%d2 # make a copy
12533: andi.l &0x7fff,%d1 # strip sign
12534: sub.l %d0,%d1 # add scale factor
12535: subi.l &0x6000,%d1 # subtract bias
12536: andi.w &0x7fff,%d1 # clear sign bit
12537: andi.w &0x8000,%d2 # keep old sign
12538: or.w %d2,%d1 # concat old sign,new exp
12539: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
12540: mov.l (%sp)+,%d2 # restore d2
12541: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12542: bra.b fdiv_ovfl_dis
12543:
12544: fdiv_ovfl_ena_sd:
12545: fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
12546:
12547: mov.l L_SCR3(%a6),%d1
12548: andi.b &0x30,%d1 # keep rnd mode
12549: fmov.l %d1,%fpcr # set FPCR
12550:
12551: fdiv.x FP_SCR0(%a6),%fp0 # execute divide
12552:
12553: fmov.l &0x0,%fpcr # clear FPCR
12554: bra.b fdiv_ovfl_ena_cont
12555:
12556: fdiv_unfl:
12557: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
12558:
12559: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
12560:
12561: fmov.l &rz_mode*0x10,%fpcr # set FPCR
12562: fmov.l &0x0,%fpsr # clear FPSR
12563:
12564: fdiv.x FP_SCR0(%a6),%fp0 # execute divide
12565:
12566: fmov.l %fpsr,%d1 # save status
12567: fmov.l &0x0,%fpcr # clear FPCR
12568:
12569: or.l %d1,USER_FPSR(%a6) # save INEX2,N
12570:
12571: mov.b FPCR_ENABLE(%a6),%d1
12572: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12573: bne.b fdiv_unfl_ena # yes
12574:
12575: fdiv_unfl_dis:
12576: fmovm.x &0x80,FP_SCR0(%a6) # store out result
12577:
12578: lea FP_SCR0(%a6),%a0 # pass: result addr
12579: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
12580: bsr.l unf_res # calculate default result
12581: or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
12582: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
12583: rts
12584:
12585: #
12586: # UNFL is enabled.
12587: #
12588: fdiv_unfl_ena:
12589: fmovm.x FP_SCR1(%a6),&0x40 # load dst op
12590:
12591: mov.l L_SCR3(%a6),%d1
12592: andi.b &0xc0,%d1 # is precision extended?
12593: bne.b fdiv_unfl_ena_sd # no, sgl or dbl
12594:
12595: fmov.l L_SCR3(%a6),%fpcr # set FPCR
12596:
12597: fdiv_unfl_ena_cont:
12598: fmov.l &0x0,%fpsr # clear FPSR
12599:
12600: fdiv.x FP_SCR0(%a6),%fp1 # execute divide
12601:
12602: fmov.l &0x0,%fpcr # clear FPCR
12603:
12604: fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
12605: mov.l %d2,-(%sp) # save d2
12606: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
12607: mov.l %d1,%d2 # make a copy
12608: andi.l &0x7fff,%d1 # strip sign
12609: andi.w &0x8000,%d2 # keep old sign
12610: sub.l %d0,%d1 # add scale factor
12611: addi.l &0x6000,%d1 # add bias
12612: andi.w &0x7fff,%d1
12613: or.w %d2,%d1 # concat old sign,new exp
12614: mov.w %d1,FP_SCR0_EX(%a6) # insert new exp
12615: mov.l (%sp)+,%d2 # restore d2
12616: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12617: bra.w fdiv_unfl_dis
12618:
12619: fdiv_unfl_ena_sd:
12620: mov.l L_SCR3(%a6),%d1
12621: andi.b &0x30,%d1 # use only rnd mode
12622: fmov.l %d1,%fpcr # set FPCR
12623:
12624: bra.b fdiv_unfl_ena_cont
12625:
12626: #
12627: # the divide operation MAY underflow:
12628: #
12629: fdiv_may_unfl:
12630: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
12631:
12632: fmov.l L_SCR3(%a6),%fpcr # set FPCR
12633: fmov.l &0x0,%fpsr # clear FPSR
12634:
12635: fdiv.x FP_SCR0(%a6),%fp0 # execute divide
12636:
12637: fmov.l %fpsr,%d1 # save status
12638: fmov.l &0x0,%fpcr # clear FPCR
12639:
12640: or.l %d1,USER_FPSR(%a6) # save INEX2,N
12641:
12642: fabs.x %fp0,%fp1 # make a copy of result
12643: fcmp.b %fp1,&0x1 # is |result| > 1.b?
12644: fbgt.w fdiv_normal_exit # no; no underflow occurred
12645: fblt.w fdiv_unfl # yes; underflow occurred
12646:
12647: #
12648: # we still don't know if underflow occurred. result is ~ equal to 1. but,
12649: # we don't know if the result was an underflow that rounded up to a 1
12650: # or a normalized number that rounded down to a 1. so, redo the entire
12651: # operation using RZ as the rounding mode to see what the pre-rounded
12652: # result is. this case should be relatively rare.
12653: #
12654: fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
12655:
12656: mov.l L_SCR3(%a6),%d1
12657: andi.b &0xc0,%d1 # keep rnd prec
12658: ori.b &rz_mode*0x10,%d1 # insert RZ
12659:
12660: fmov.l %d1,%fpcr # set FPCR
12661: fmov.l &0x0,%fpsr # clear FPSR
12662:
12663: fdiv.x FP_SCR0(%a6),%fp1 # execute divide
12664:
12665: fmov.l &0x0,%fpcr # clear FPCR
12666: fabs.x %fp1 # make absolute value
12667: fcmp.b %fp1,&0x1 # is |result| < 1.b?
12668: fbge.w fdiv_normal_exit # no; no underflow occurred
12669: bra.w fdiv_unfl # yes; underflow occurred
12670:
12671: ############################################################################
12672:
12673: #
12674: # Divide: inputs are not both normalized; what are they?
12675: #
12676: fdiv_not_norm:
12677: mov.w (tbl_fdiv_op.b,%pc,%d1.w*2),%d1
12678: jmp (tbl_fdiv_op.b,%pc,%d1.w*1)
12679:
12680: swbeg &48
12681: tbl_fdiv_op:
12682: short fdiv_norm - tbl_fdiv_op # NORM / NORM
12683: short fdiv_inf_load - tbl_fdiv_op # NORM / ZERO
12684: short fdiv_zero_load - tbl_fdiv_op # NORM / INF
12685: short fdiv_res_qnan - tbl_fdiv_op # NORM / QNAN
12686: short fdiv_norm - tbl_fdiv_op # NORM / DENORM
12687: short fdiv_res_snan - tbl_fdiv_op # NORM / SNAN
12688: short tbl_fdiv_op - tbl_fdiv_op #
12689: short tbl_fdiv_op - tbl_fdiv_op #
12690:
12691: short fdiv_zero_load - tbl_fdiv_op # ZERO / NORM
12692: short fdiv_res_operr - tbl_fdiv_op # ZERO / ZERO
12693: short fdiv_zero_load - tbl_fdiv_op # ZERO / INF
12694: short fdiv_res_qnan - tbl_fdiv_op # ZERO / QNAN
12695: short fdiv_zero_load - tbl_fdiv_op # ZERO / DENORM
12696: short fdiv_res_snan - tbl_fdiv_op # ZERO / SNAN
12697: short tbl_fdiv_op - tbl_fdiv_op #
12698: short tbl_fdiv_op - tbl_fdiv_op #
12699:
12700: short fdiv_inf_dst - tbl_fdiv_op # INF / NORM
12701: short fdiv_inf_dst - tbl_fdiv_op # INF / ZERO
12702: short fdiv_res_operr - tbl_fdiv_op # INF / INF
12703: short fdiv_res_qnan - tbl_fdiv_op # INF / QNAN
12704: short fdiv_inf_dst - tbl_fdiv_op # INF / DENORM
12705: short fdiv_res_snan - tbl_fdiv_op # INF / SNAN
12706: short tbl_fdiv_op - tbl_fdiv_op #
12707: short tbl_fdiv_op - tbl_fdiv_op #
12708:
12709: short fdiv_res_qnan - tbl_fdiv_op # QNAN / NORM
12710: short fdiv_res_qnan - tbl_fdiv_op # QNAN / ZERO
12711: short fdiv_res_qnan - tbl_fdiv_op # QNAN / INF
12712: short fdiv_res_qnan - tbl_fdiv_op # QNAN / QNAN
12713: short fdiv_res_qnan - tbl_fdiv_op # QNAN / DENORM
12714: short fdiv_res_snan - tbl_fdiv_op # QNAN / SNAN
12715: short tbl_fdiv_op - tbl_fdiv_op #
12716: short tbl_fdiv_op - tbl_fdiv_op #
12717:
12718: short fdiv_norm - tbl_fdiv_op # DENORM / NORM
12719: short fdiv_inf_load - tbl_fdiv_op # DENORM / ZERO
12720: short fdiv_zero_load - tbl_fdiv_op # DENORM / INF
12721: short fdiv_res_qnan - tbl_fdiv_op # DENORM / QNAN
12722: short fdiv_norm - tbl_fdiv_op # DENORM / DENORM
12723: short fdiv_res_snan - tbl_fdiv_op # DENORM / SNAN
12724: short tbl_fdiv_op - tbl_fdiv_op #
12725: short tbl_fdiv_op - tbl_fdiv_op #
12726:
12727: short fdiv_res_snan - tbl_fdiv_op # SNAN / NORM
12728: short fdiv_res_snan - tbl_fdiv_op # SNAN / ZERO
12729: short fdiv_res_snan - tbl_fdiv_op # SNAN / INF
12730: short fdiv_res_snan - tbl_fdiv_op # SNAN / QNAN
12731: short fdiv_res_snan - tbl_fdiv_op # SNAN / DENORM
12732: short fdiv_res_snan - tbl_fdiv_op # SNAN / SNAN
12733: short tbl_fdiv_op - tbl_fdiv_op #
12734: short tbl_fdiv_op - tbl_fdiv_op #
12735:
12736: fdiv_res_qnan:
12737: bra.l res_qnan
12738: fdiv_res_snan:
12739: bra.l res_snan
12740: fdiv_res_operr:
12741: bra.l res_operr
12742:
12743: global fdiv_zero_load # global for fsgldiv
12744: fdiv_zero_load:
12745: mov.b SRC_EX(%a0),%d0 # result sign is exclusive
12746: mov.b DST_EX(%a1),%d1 # or of input signs.
12747: eor.b %d0,%d1
12748: bpl.b fdiv_zero_load_p # result is positive
12749: fmov.s &0x80000000,%fp0 # load a -ZERO
12750: mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
12751: rts
12752: fdiv_zero_load_p:
12753: fmov.s &0x00000000,%fp0 # load a +ZERO
12754: mov.b &z_bmask,FPSR_CC(%a6) # set Z
12755: rts
12756:
12757: #
12758: # The destination was In Range and the source was a ZERO. The result,
12759: # therefore, is an INF w/ the proper sign.
12760: # So, determine the sign and return a new INF (w/ the j-bit cleared).
12761: #
12762: global fdiv_inf_load # global for fsgldiv
12763: fdiv_inf_load:
12764: ori.w &dz_mask+adz_mask,2+USER_FPSR(%a6) # no; set DZ/ADZ
12765: mov.b SRC_EX(%a0),%d0 # load both signs
12766: mov.b DST_EX(%a1),%d1
12767: eor.b %d0,%d1
12768: bpl.b fdiv_inf_load_p # result is positive
12769: fmov.s &0xff800000,%fp0 # make result -INF
12770: mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
12771: rts
12772: fdiv_inf_load_p:
12773: fmov.s &0x7f800000,%fp0 # make result +INF
12774: mov.b &inf_bmask,FPSR_CC(%a6) # set INF
12775: rts
12776:
12777: #
12778: # The destination was an INF w/ an In Range or ZERO source, the result is
12779: # an INF w/ the proper sign.
12780: # The 68881/882 returns the destination INF w/ the new sign(if the j-bit of the
12781: # dst INF is set, then then j-bit of the result INF is also set).
12782: #
12783: global fdiv_inf_dst # global for fsgldiv
12784: fdiv_inf_dst:
12785: mov.b DST_EX(%a1),%d0 # load both signs
12786: mov.b SRC_EX(%a0),%d1
12787: eor.b %d0,%d1
12788: bpl.b fdiv_inf_dst_p # result is positive
12789:
12790: fmovm.x DST(%a1),&0x80 # return result in fp0
12791: fabs.x %fp0 # clear sign bit
12792: fneg.x %fp0 # set sign bit
12793: mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/NEG
12794: rts
12795:
12796: fdiv_inf_dst_p:
12797: fmovm.x DST(%a1),&0x80 # return result in fp0
12798: fabs.x %fp0 # return positive INF
12799: mov.b &inf_bmask,FPSR_CC(%a6) # set INF
12800: rts
12801:
12802: #########################################################################
12803: # XDEF **************************************************************** #
12804: # fneg(): emulates the fneg instruction #
12805: # fsneg(): emulates the fsneg instruction #
12806: # fdneg(): emulates the fdneg instruction #
12807: # #
12808: # XREF **************************************************************** #
12809: # norm() - normalize a denorm to provide EXOP #
12810: # scale_to_zero_src() - scale sgl/dbl source exponent #
12811: # ovf_res() - return default overflow result #
12812: # unf_res() - return default underflow result #
12813: # res_qnan_1op() - return QNAN result #
12814: # res_snan_1op() - return SNAN result #
12815: # #
12816: # INPUT *************************************************************** #
12817: # a0 = pointer to extended precision source operand #
12818: # d0 = rnd prec,mode #
12819: # #
12820: # OUTPUT ************************************************************** #
12821: # fp0 = result #
12822: # fp1 = EXOP (if exception occurred) #
12823: # #
12824: # ALGORITHM *********************************************************** #
12825: # Handle NANs, zeroes, and infinities as special cases. Separate #
12826: # norms/denorms into ext/sgl/dbl precisions. Extended precision can be #
12827: # emulated by simply setting sign bit. Sgl/dbl operands must be scaled #
12828: # and an actual fneg performed to see if overflow/underflow would have #
12829: # occurred. If so, return default underflow/overflow result. Else, #
12830: # scale the result exponent and return result. FPSR gets set based on #
12831: # the result value. #
12832: # #
12833: #########################################################################
12834:
12835: global fsneg
12836: fsneg:
12837: andi.b &0x30,%d0 # clear rnd prec
12838: ori.b &s_mode*0x10,%d0 # insert sgl precision
12839: bra.b fneg
12840:
12841: global fdneg
12842: fdneg:
12843: andi.b &0x30,%d0 # clear rnd prec
12844: ori.b &d_mode*0x10,%d0 # insert dbl prec
12845:
12846: global fneg
12847: fneg:
12848: mov.l %d0,L_SCR3(%a6) # store rnd info
12849: mov.b STAG(%a6),%d1
12850: bne.w fneg_not_norm # optimize on non-norm input
12851:
12852: #
12853: # NEGATE SIGN : norms and denorms ONLY!
12854: #
12855: fneg_norm:
12856: andi.b &0xc0,%d0 # is precision extended?
12857: bne.w fneg_not_ext # no; go handle sgl or dbl
12858:
12859: #
12860: # precision selected is extended. so...we can not get an underflow
12861: # or overflow because of rounding to the correct precision. so...
12862: # skip the scaling and unscaling...
12863: #
12864: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
12865: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
12866: mov.w SRC_EX(%a0),%d0
12867: eori.w &0x8000,%d0 # negate sign
12868: bpl.b fneg_norm_load # sign is positive
12869: mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
12870: fneg_norm_load:
12871: mov.w %d0,FP_SCR0_EX(%a6)
12872: fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
12873: rts
12874:
12875: #
12876: # for an extended precision DENORM, the UNFL exception bit is set
12877: # the accrued bit is NOT set in this instance(no inexactness!)
12878: #
12879: fneg_denorm:
12880: andi.b &0xc0,%d0 # is precision extended?
12881: bne.b fneg_not_ext # no; go handle sgl or dbl
12882:
12883: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
12884:
12885: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
12886: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
12887: mov.w SRC_EX(%a0),%d0
12888: eori.w &0x8000,%d0 # negate sign
12889: bpl.b fneg_denorm_done # no
12890: mov.b &neg_bmask,FPSR_CC(%a6) # yes, set 'N' ccode bit
12891: fneg_denorm_done:
12892: mov.w %d0,FP_SCR0_EX(%a6)
12893: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
12894:
12895: btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
12896: bne.b fneg_ext_unfl_ena # yes
12897: rts
12898:
12899: #
12900: # the input is an extended DENORM and underflow is enabled in the FPCR.
12901: # normalize the mantissa and add the bias of 0x6000 to the resulting negative
12902: # exponent and insert back into the operand.
12903: #
12904: fneg_ext_unfl_ena:
12905: lea FP_SCR0(%a6),%a0 # pass: ptr to operand
12906: bsr.l norm # normalize result
12907: neg.w %d0 # new exponent = -(shft val)
12908: addi.w &0x6000,%d0 # add new bias to exponent
12909: mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
12910: andi.w &0x8000,%d1 # keep old sign
12911: andi.w &0x7fff,%d0 # clear sign position
12912: or.w %d1,%d0 # concat old sign, new exponent
12913: mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
12914: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12915: rts
12916:
12917: #
12918: # operand is either single or double
12919: #
12920: fneg_not_ext:
12921: cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
12922: bne.b fneg_dbl
12923:
12924: #
12925: # operand is to be rounded to single precision
12926: #
12927: fneg_sgl:
12928: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
12929: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
12930: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
12931: bsr.l scale_to_zero_src # calculate scale factor
12932:
12933: cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
12934: bge.w fneg_sd_unfl # yes; go handle underflow
12935: cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
12936: beq.w fneg_sd_may_ovfl # maybe; go check
12937: blt.w fneg_sd_ovfl # yes; go handle overflow
12938:
12939: #
12940: # operand will NOT overflow or underflow when moved in to the fp reg file
12941: #
12942: fneg_sd_normal:
12943: fmov.l &0x0,%fpsr # clear FPSR
12944: fmov.l L_SCR3(%a6),%fpcr # set FPCR
12945:
12946: fneg.x FP_SCR0(%a6),%fp0 # perform negation
12947:
12948: fmov.l %fpsr,%d1 # save FPSR
12949: fmov.l &0x0,%fpcr # clear FPCR
12950:
12951: or.l %d1,USER_FPSR(%a6) # save INEX2,N
12952:
12953: fneg_sd_normal_exit:
12954: mov.l %d2,-(%sp) # save d2
12955: fmovm.x &0x80,FP_SCR0(%a6) # store out result
12956: mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
12957: mov.w %d1,%d2 # make a copy
12958: andi.l &0x7fff,%d1 # strip sign
12959: sub.l %d0,%d1 # add scale factor
12960: andi.w &0x8000,%d2 # keep old sign
12961: or.w %d1,%d2 # concat old sign,new exp
12962: mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
12963: mov.l (%sp)+,%d2 # restore d2
12964: fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
12965: rts
12966:
12967: #
12968: # operand is to be rounded to double precision
12969: #
12970: fneg_dbl:
12971: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
12972: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
12973: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
12974: bsr.l scale_to_zero_src # calculate scale factor
12975:
12976: cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
12977: bge.b fneg_sd_unfl # yes; go handle underflow
12978: cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
12979: beq.w fneg_sd_may_ovfl # maybe; go check
12980: blt.w fneg_sd_ovfl # yes; go handle overflow
12981: bra.w fneg_sd_normal # no; ho handle normalized op
12982:
12983: #
12984: # operand WILL underflow when moved in to the fp register file
12985: #
12986: fneg_sd_unfl:
12987: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
12988:
12989: eori.b &0x80,FP_SCR0_EX(%a6) # negate sign
12990: bpl.b fneg_sd_unfl_tst
12991: bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
12992:
12993: # if underflow or inexact is enabled, go calculate EXOP first.
12994: fneg_sd_unfl_tst:
12995: mov.b FPCR_ENABLE(%a6),%d1
12996: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12997: bne.b fneg_sd_unfl_ena # yes
12998:
12999: fneg_sd_unfl_dis:
13000: lea FP_SCR0(%a6),%a0 # pass: result addr
13001: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
13002: bsr.l unf_res # calculate default result
13003: or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
13004: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
13005: rts
13006:
13007: #
13008: # operand will underflow AND underflow is enabled.
13009: # therefore, we must return the result rounded to extended precision.
13010: #
13011: fneg_sd_unfl_ena:
13012: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
13013: mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
13014: mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
13015:
13016: mov.l %d2,-(%sp) # save d2
13017: mov.l %d1,%d2 # make a copy
13018: andi.l &0x7fff,%d1 # strip sign
13019: andi.w &0x8000,%d2 # keep old sign
13020: sub.l %d0,%d1 # subtract scale factor
13021: addi.l &0x6000,%d1 # add new bias
13022: andi.w &0x7fff,%d1
13023: or.w %d2,%d1 # concat new sign,new exp
13024: mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
13025: fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
13026: mov.l (%sp)+,%d2 # restore d2
13027: bra.b fneg_sd_unfl_dis
13028:
13029: #
13030: # operand WILL overflow.
13031: #
13032: fneg_sd_ovfl:
13033: fmov.l &0x0,%fpsr # clear FPSR
13034: fmov.l L_SCR3(%a6),%fpcr # set FPCR
13035:
13036: fneg.x FP_SCR0(%a6),%fp0 # perform negation
13037:
13038: fmov.l &0x0,%fpcr # clear FPCR
13039: fmov.l %fpsr,%d1 # save FPSR
13040:
13041: or.l %d1,USER_FPSR(%a6) # save INEX2,N
13042:
13043: fneg_sd_ovfl_tst:
13044: or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
13045:
13046: mov.b FPCR_ENABLE(%a6),%d1
13047: andi.b &0x13,%d1 # is OVFL or INEX enabled?
13048: bne.b fneg_sd_ovfl_ena # yes
13049:
13050: #
13051: # OVFL is not enabled; therefore, we must create the default result by
13052: # calling ovf_res().
13053: #
13054: fneg_sd_ovfl_dis:
13055: btst &neg_bit,FPSR_CC(%a6) # is result negative?
13056: sne %d1 # set sign param accordingly
13057: mov.l L_SCR3(%a6),%d0 # pass: prec,mode
13058: bsr.l ovf_res # calculate default result
13059: or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
13060: fmovm.x (%a0),&0x80 # return default result in fp0
13061: rts
13062:
13063: #
13064: # OVFL is enabled.
13065: # the INEX2 bit has already been updated by the round to the correct precision.
13066: # now, round to extended(and don't alter the FPSR).
13067: #
13068: fneg_sd_ovfl_ena:
13069: mov.l %d2,-(%sp) # save d2
13070: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
13071: mov.l %d1,%d2 # make a copy
13072: andi.l &0x7fff,%d1 # strip sign
13073: andi.w &0x8000,%d2 # keep old sign
13074: sub.l %d0,%d1 # add scale factor
13075: subi.l &0x6000,%d1 # subtract bias
13076: andi.w &0x7fff,%d1
13077: or.w %d2,%d1 # concat sign,exp
13078: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
13079: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
13080: mov.l (%sp)+,%d2 # restore d2
13081: bra.b fneg_sd_ovfl_dis
13082:
13083: #
13084: # the move in MAY underflow. so...
13085: #
13086: fneg_sd_may_ovfl:
13087: fmov.l &0x0,%fpsr # clear FPSR
13088: fmov.l L_SCR3(%a6),%fpcr # set FPCR
13089:
13090: fneg.x FP_SCR0(%a6),%fp0 # perform negation
13091:
13092: fmov.l %fpsr,%d1 # save status
13093: fmov.l &0x0,%fpcr # clear FPCR
13094:
13095: or.l %d1,USER_FPSR(%a6) # save INEX2,N
13096:
13097: fabs.x %fp0,%fp1 # make a copy of result
13098: fcmp.b %fp1,&0x2 # is |result| >= 2.b?
13099: fbge.w fneg_sd_ovfl_tst # yes; overflow has occurred
13100:
13101: # no, it didn't overflow; we have correct result
13102: bra.w fneg_sd_normal_exit
13103:
13104: ##########################################################################
13105:
13106: #
13107: # input is not normalized; what is it?
13108: #
13109: fneg_not_norm:
13110: cmpi.b %d1,&DENORM # weed out DENORM
13111: beq.w fneg_denorm
13112: cmpi.b %d1,&SNAN # weed out SNAN
13113: beq.l res_snan_1op
13114: cmpi.b %d1,&QNAN # weed out QNAN
13115: beq.l res_qnan_1op
13116:
13117: #
13118: # do the fneg; at this point, only possible ops are ZERO and INF.
13119: # use fneg to determine ccodes.
13120: # prec:mode should be zero at this point but it won't affect answer anyways.
13121: #
13122: fneg.x SRC_EX(%a0),%fp0 # do fneg
13123: fmov.l %fpsr,%d0
13124: rol.l &0x8,%d0 # put ccodes in lo byte
13125: mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
13126: rts
13127:
13128: #########################################################################
13129: # XDEF **************************************************************** #
13130: # ftst(): emulates the ftest instruction #
13131: # #
13132: # XREF **************************************************************** #
13133: # res{s,q}nan_1op() - set NAN result for monadic instruction #
13134: # #
13135: # INPUT *************************************************************** #
13136: # a0 = pointer to extended precision source operand #
13137: # #
13138: # OUTPUT ************************************************************** #
13139: # none #
13140: # #
13141: # ALGORITHM *********************************************************** #
13142: # Check the source operand tag (STAG) and set the FPCR according #
13143: # to the operand type and sign. #
13144: # #
13145: #########################################################################
13146:
13147: global ftst
13148: ftst:
13149: mov.b STAG(%a6),%d1
13150: bne.b ftst_not_norm # optimize on non-norm input
13151:
13152: #
13153: # Norm:
13154: #
13155: ftst_norm:
13156: tst.b SRC_EX(%a0) # is operand negative?
13157: bmi.b ftst_norm_m # yes
13158: rts
13159: ftst_norm_m:
13160: mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
13161: rts
13162:
13163: #
13164: # input is not normalized; what is it?
13165: #
13166: ftst_not_norm:
13167: cmpi.b %d1,&ZERO # weed out ZERO
13168: beq.b ftst_zero
13169: cmpi.b %d1,&INF # weed out INF
13170: beq.b ftst_inf
13171: cmpi.b %d1,&SNAN # weed out SNAN
13172: beq.l res_snan_1op
13173: cmpi.b %d1,&QNAN # weed out QNAN
13174: beq.l res_qnan_1op
13175:
13176: #
13177: # Denorm:
13178: #
13179: ftst_denorm:
13180: tst.b SRC_EX(%a0) # is operand negative?
13181: bmi.b ftst_denorm_m # yes
13182: rts
13183: ftst_denorm_m:
13184: mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
13185: rts
13186:
13187: #
13188: # Infinity:
13189: #
13190: ftst_inf:
13191: tst.b SRC_EX(%a0) # is operand negative?
13192: bmi.b ftst_inf_m # yes
13193: ftst_inf_p:
13194: mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
13195: rts
13196: ftst_inf_m:
13197: mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'I','N' ccode bits
13198: rts
13199:
13200: #
13201: # Zero:
13202: #
13203: ftst_zero:
13204: tst.b SRC_EX(%a0) # is operand negative?
13205: bmi.b ftst_zero_m # yes
13206: ftst_zero_p:
13207: mov.b &z_bmask,FPSR_CC(%a6) # set 'N' ccode bit
13208: rts
13209: ftst_zero_m:
13210: mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
13211: rts
13212:
13213: #########################################################################
13214: # XDEF **************************************************************** #
13215: # fint(): emulates the fint instruction #
13216: # #
13217: # XREF **************************************************************** #
13218: # res_{s,q}nan_1op() - set NAN result for monadic operation #
13219: # #
13220: # INPUT *************************************************************** #
13221: # a0 = pointer to extended precision source operand #
13222: # d0 = round precision/mode #
13223: # #
13224: # OUTPUT ************************************************************** #
13225: # fp0 = result #
13226: # #
13227: # ALGORITHM *********************************************************** #
13228: # Separate according to operand type. Unnorms don't pass through #
13229: # here. For norms, load the rounding mode/prec, execute a "fint", then #
13230: # store the resulting FPSR bits. #
13231: # For denorms, force the j-bit to a one and do the same as for #
13232: # norms. Denorms are so low that the answer will either be a zero or a #
13233: # one. #
13234: # For zeroes/infs/NANs, return the same while setting the FPSR #
13235: # as appropriate. #
13236: # #
13237: #########################################################################
13238:
13239: global fint
13240: fint:
13241: mov.b STAG(%a6),%d1
13242: bne.b fint_not_norm # optimize on non-norm input
13243:
13244: #
13245: # Norm:
13246: #
13247: fint_norm:
13248: andi.b &0x30,%d0 # set prec = ext
13249:
13250: fmov.l %d0,%fpcr # set FPCR
13251: fmov.l &0x0,%fpsr # clear FPSR
13252:
13253: fint.x SRC(%a0),%fp0 # execute fint
13254:
13255: fmov.l &0x0,%fpcr # clear FPCR
13256: fmov.l %fpsr,%d0 # save FPSR
13257: or.l %d0,USER_FPSR(%a6) # set exception bits
13258:
13259: rts
13260:
13261: #
13262: # input is not normalized; what is it?
13263: #
13264: fint_not_norm:
13265: cmpi.b %d1,&ZERO # weed out ZERO
13266: beq.b fint_zero
13267: cmpi.b %d1,&INF # weed out INF
13268: beq.b fint_inf
13269: cmpi.b %d1,&DENORM # weed out DENORM
13270: beq.b fint_denorm
13271: cmpi.b %d1,&SNAN # weed out SNAN
13272: beq.l res_snan_1op
13273: bra.l res_qnan_1op # weed out QNAN
13274:
13275: #
13276: # Denorm:
13277: #
13278: # for DENORMs, the result will be either (+/-)ZERO or (+/-)1.
13279: # also, the INEX2 and AINEX exception bits will be set.
13280: # so, we could either set these manually or force the DENORM
13281: # to a very small NORM and ship it to the NORM routine.
13282: # I do the latter.
13283: #
13284: fint_denorm:
13285: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
13286: mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
13287: lea FP_SCR0(%a6),%a0
13288: bra.b fint_norm
13289:
13290: #
13291: # Zero:
13292: #
13293: fint_zero:
13294: tst.b SRC_EX(%a0) # is ZERO negative?
13295: bmi.b fint_zero_m # yes
13296: fint_zero_p:
13297: fmov.s &0x00000000,%fp0 # return +ZERO in fp0
13298: mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
13299: rts
13300: fint_zero_m:
13301: fmov.s &0x80000000,%fp0 # return -ZERO in fp0
13302: mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
13303: rts
13304:
13305: #
13306: # Infinity:
13307: #
13308: fint_inf:
13309: fmovm.x SRC(%a0),&0x80 # return result in fp0
13310: tst.b SRC_EX(%a0) # is INF negative?
13311: bmi.b fint_inf_m # yes
13312: fint_inf_p:
13313: mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
13314: rts
13315: fint_inf_m:
13316: mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
13317: rts
13318:
13319: #########################################################################
13320: # XDEF **************************************************************** #
13321: # fintrz(): emulates the fintrz instruction #
13322: # #
13323: # XREF **************************************************************** #
13324: # res_{s,q}nan_1op() - set NAN result for monadic operation #
13325: # #
13326: # INPUT *************************************************************** #
13327: # a0 = pointer to extended precision source operand #
13328: # d0 = round precision/mode #
13329: # #
13330: # OUTPUT ************************************************************** #
13331: # fp0 = result #
13332: # #
13333: # ALGORITHM *********************************************************** #
13334: # Separate according to operand type. Unnorms don't pass through #
13335: # here. For norms, load the rounding mode/prec, execute a "fintrz", #
13336: # then store the resulting FPSR bits. #
13337: # For denorms, force the j-bit to a one and do the same as for #
13338: # norms. Denorms are so low that the answer will either be a zero or a #
13339: # one. #
13340: # For zeroes/infs/NANs, return the same while setting the FPSR #
13341: # as appropriate. #
13342: # #
13343: #########################################################################
13344:
13345: global fintrz
13346: fintrz:
13347: mov.b STAG(%a6),%d1
13348: bne.b fintrz_not_norm # optimize on non-norm input
13349:
13350: #
13351: # Norm:
13352: #
13353: fintrz_norm:
13354: fmov.l &0x0,%fpsr # clear FPSR
13355:
13356: fintrz.x SRC(%a0),%fp0 # execute fintrz
13357:
13358: fmov.l %fpsr,%d0 # save FPSR
13359: or.l %d0,USER_FPSR(%a6) # set exception bits
13360:
13361: rts
13362:
13363: #
13364: # input is not normalized; what is it?
13365: #
13366: fintrz_not_norm:
13367: cmpi.b %d1,&ZERO # weed out ZERO
13368: beq.b fintrz_zero
13369: cmpi.b %d1,&INF # weed out INF
13370: beq.b fintrz_inf
13371: cmpi.b %d1,&DENORM # weed out DENORM
13372: beq.b fintrz_denorm
13373: cmpi.b %d1,&SNAN # weed out SNAN
13374: beq.l res_snan_1op
13375: bra.l res_qnan_1op # weed out QNAN
13376:
13377: #
13378: # Denorm:
13379: #
13380: # for DENORMs, the result will be (+/-)ZERO.
13381: # also, the INEX2 and AINEX exception bits will be set.
13382: # so, we could either set these manually or force the DENORM
13383: # to a very small NORM and ship it to the NORM routine.
13384: # I do the latter.
13385: #
13386: fintrz_denorm:
13387: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
13388: mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
13389: lea FP_SCR0(%a6),%a0
13390: bra.b fintrz_norm
13391:
13392: #
13393: # Zero:
13394: #
13395: fintrz_zero:
13396: tst.b SRC_EX(%a0) # is ZERO negative?
13397: bmi.b fintrz_zero_m # yes
13398: fintrz_zero_p:
13399: fmov.s &0x00000000,%fp0 # return +ZERO in fp0
13400: mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
13401: rts
13402: fintrz_zero_m:
13403: fmov.s &0x80000000,%fp0 # return -ZERO in fp0
13404: mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
13405: rts
13406:
13407: #
13408: # Infinity:
13409: #
13410: fintrz_inf:
13411: fmovm.x SRC(%a0),&0x80 # return result in fp0
13412: tst.b SRC_EX(%a0) # is INF negative?
13413: bmi.b fintrz_inf_m # yes
13414: fintrz_inf_p:
13415: mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
13416: rts
13417: fintrz_inf_m:
13418: mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
13419: rts
13420:
13421: #########################################################################
13422: # XDEF **************************************************************** #
13423: # fabs(): emulates the fabs instruction #
13424: # fsabs(): emulates the fsabs instruction #
13425: # fdabs(): emulates the fdabs instruction #
13426: # #
13427: # XREF **************************************************************** #
13428: # norm() - normalize denorm mantissa to provide EXOP #
13429: # scale_to_zero_src() - make exponent. = 0; get scale factor #
13430: # unf_res() - calculate underflow result #
13431: # ovf_res() - calculate overflow result #
13432: # res_{s,q}nan_1op() - set NAN result for monadic operation #
13433: # #
13434: # INPUT *************************************************************** #
13435: # a0 = pointer to extended precision source operand #
13436: # d0 = rnd precision/mode #
13437: # #
13438: # OUTPUT ************************************************************** #
13439: # fp0 = result #
13440: # fp1 = EXOP (if exception occurred) #
13441: # #
13442: # ALGORITHM *********************************************************** #
13443: # Handle NANs, infinities, and zeroes as special cases. Divide #
13444: # norms into extended, single, and double precision. #
13445: # Simply clear sign for extended precision norm. Ext prec denorm #
13446: # gets an EXOP created for it since it's an underflow. #
13447: # Double and single precision can overflow and underflow. First, #
13448: # scale the operand such that the exponent is zero. Perform an "fabs" #
13449: # using the correct rnd mode/prec. Check to see if the original #
13450: # exponent would take an exception. If so, use unf_res() or ovf_res() #
13451: # to calculate the default result. Also, create the EXOP for the #
13452: # exceptional case. If no exception should occur, insert the correct #
13453: # result exponent and return. #
13454: # Unnorms don't pass through here. #
13455: # #
13456: #########################################################################
13457:
13458: global fsabs
13459: fsabs:
13460: andi.b &0x30,%d0 # clear rnd prec
13461: ori.b &s_mode*0x10,%d0 # insert sgl precision
13462: bra.b fabs
13463:
13464: global fdabs
13465: fdabs:
13466: andi.b &0x30,%d0 # clear rnd prec
13467: ori.b &d_mode*0x10,%d0 # insert dbl precision
13468:
13469: global fabs
13470: fabs:
13471: mov.l %d0,L_SCR3(%a6) # store rnd info
13472: mov.b STAG(%a6),%d1
13473: bne.w fabs_not_norm # optimize on non-norm input
13474:
13475: #
13476: # ABSOLUTE VALUE: norms and denorms ONLY!
13477: #
13478: fabs_norm:
13479: andi.b &0xc0,%d0 # is precision extended?
13480: bne.b fabs_not_ext # no; go handle sgl or dbl
13481:
13482: #
13483: # precision selected is extended. so...we can not get an underflow
13484: # or overflow because of rounding to the correct precision. so...
13485: # skip the scaling and unscaling...
13486: #
13487: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
13488: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
13489: mov.w SRC_EX(%a0),%d1
13490: bclr &15,%d1 # force absolute value
13491: mov.w %d1,FP_SCR0_EX(%a6) # insert exponent
13492: fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
13493: rts
13494:
13495: #
13496: # for an extended precision DENORM, the UNFL exception bit is set
13497: # the accrued bit is NOT set in this instance(no inexactness!)
13498: #
13499: fabs_denorm:
13500: andi.b &0xc0,%d0 # is precision extended?
13501: bne.b fabs_not_ext # no
13502:
13503: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
13504:
13505: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
13506: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
13507: mov.w SRC_EX(%a0),%d0
13508: bclr &15,%d0 # clear sign
13509: mov.w %d0,FP_SCR0_EX(%a6) # insert exponent
13510:
13511: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
13512:
13513: btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
13514: bne.b fabs_ext_unfl_ena
13515: rts
13516:
13517: #
13518: # the input is an extended DENORM and underflow is enabled in the FPCR.
13519: # normalize the mantissa and add the bias of 0x6000 to the resulting negative
13520: # exponent and insert back into the operand.
13521: #
13522: fabs_ext_unfl_ena:
13523: lea FP_SCR0(%a6),%a0 # pass: ptr to operand
13524: bsr.l norm # normalize result
13525: neg.w %d0 # new exponent = -(shft val)
13526: addi.w &0x6000,%d0 # add new bias to exponent
13527: mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
13528: andi.w &0x8000,%d1 # keep old sign
13529: andi.w &0x7fff,%d0 # clear sign position
13530: or.w %d1,%d0 # concat old sign, new exponent
13531: mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
13532: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
13533: rts
13534:
13535: #
13536: # operand is either single or double
13537: #
13538: fabs_not_ext:
13539: cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
13540: bne.b fabs_dbl
13541:
13542: #
13543: # operand is to be rounded to single precision
13544: #
13545: fabs_sgl:
13546: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
13547: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
13548: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
13549: bsr.l scale_to_zero_src # calculate scale factor
13550:
13551: cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
13552: bge.w fabs_sd_unfl # yes; go handle underflow
13553: cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
13554: beq.w fabs_sd_may_ovfl # maybe; go check
13555: blt.w fabs_sd_ovfl # yes; go handle overflow
13556:
13557: #
13558: # operand will NOT overflow or underflow when moved in to the fp reg file
13559: #
13560: fabs_sd_normal:
13561: fmov.l &0x0,%fpsr # clear FPSR
13562: fmov.l L_SCR3(%a6),%fpcr # set FPCR
13563:
13564: fabs.x FP_SCR0(%a6),%fp0 # perform absolute
13565:
13566: fmov.l %fpsr,%d1 # save FPSR
13567: fmov.l &0x0,%fpcr # clear FPCR
13568:
13569: or.l %d1,USER_FPSR(%a6) # save INEX2,N
13570:
13571: fabs_sd_normal_exit:
13572: mov.l %d2,-(%sp) # save d2
13573: fmovm.x &0x80,FP_SCR0(%a6) # store out result
13574: mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
13575: mov.l %d1,%d2 # make a copy
13576: andi.l &0x7fff,%d1 # strip sign
13577: sub.l %d0,%d1 # add scale factor
13578: andi.w &0x8000,%d2 # keep old sign
13579: or.w %d1,%d2 # concat old sign,new exp
13580: mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
13581: mov.l (%sp)+,%d2 # restore d2
13582: fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
13583: rts
13584:
13585: #
13586: # operand is to be rounded to double precision
13587: #
13588: fabs_dbl:
13589: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
13590: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
13591: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
13592: bsr.l scale_to_zero_src # calculate scale factor
13593:
13594: cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
13595: bge.b fabs_sd_unfl # yes; go handle underflow
13596: cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
13597: beq.w fabs_sd_may_ovfl # maybe; go check
13598: blt.w fabs_sd_ovfl # yes; go handle overflow
13599: bra.w fabs_sd_normal # no; ho handle normalized op
13600:
13601: #
13602: # operand WILL underflow when moved in to the fp register file
13603: #
13604: fabs_sd_unfl:
13605: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
13606:
13607: bclr &0x7,FP_SCR0_EX(%a6) # force absolute value
13608:
13609: # if underflow or inexact is enabled, go calculate EXOP first.
13610: mov.b FPCR_ENABLE(%a6),%d1
13611: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
13612: bne.b fabs_sd_unfl_ena # yes
13613:
13614: fabs_sd_unfl_dis:
13615: lea FP_SCR0(%a6),%a0 # pass: result addr
13616: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
13617: bsr.l unf_res # calculate default result
13618: or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
13619: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
13620: rts
13621:
13622: #
13623: # operand will underflow AND underflow is enabled.
13624: # therefore, we must return the result rounded to extended precision.
13625: #
13626: fabs_sd_unfl_ena:
13627: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
13628: mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
13629: mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
13630:
13631: mov.l %d2,-(%sp) # save d2
13632: mov.l %d1,%d2 # make a copy
13633: andi.l &0x7fff,%d1 # strip sign
13634: andi.w &0x8000,%d2 # keep old sign
13635: sub.l %d0,%d1 # subtract scale factor
13636: addi.l &0x6000,%d1 # add new bias
13637: andi.w &0x7fff,%d1
13638: or.w %d2,%d1 # concat new sign,new exp
13639: mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
13640: fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
13641: mov.l (%sp)+,%d2 # restore d2
13642: bra.b fabs_sd_unfl_dis
13643:
13644: #
13645: # operand WILL overflow.
13646: #
13647: fabs_sd_ovfl:
13648: fmov.l &0x0,%fpsr # clear FPSR
13649: fmov.l L_SCR3(%a6),%fpcr # set FPCR
13650:
13651: fabs.x FP_SCR0(%a6),%fp0 # perform absolute
13652:
13653: fmov.l &0x0,%fpcr # clear FPCR
13654: fmov.l %fpsr,%d1 # save FPSR
13655:
13656: or.l %d1,USER_FPSR(%a6) # save INEX2,N
13657:
13658: fabs_sd_ovfl_tst:
13659: or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
13660:
13661: mov.b FPCR_ENABLE(%a6),%d1
13662: andi.b &0x13,%d1 # is OVFL or INEX enabled?
13663: bne.b fabs_sd_ovfl_ena # yes
13664:
13665: #
13666: # OVFL is not enabled; therefore, we must create the default result by
13667: # calling ovf_res().
13668: #
13669: fabs_sd_ovfl_dis:
13670: btst &neg_bit,FPSR_CC(%a6) # is result negative?
13671: sne %d1 # set sign param accordingly
13672: mov.l L_SCR3(%a6),%d0 # pass: prec,mode
13673: bsr.l ovf_res # calculate default result
13674: or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
13675: fmovm.x (%a0),&0x80 # return default result in fp0
13676: rts
13677:
13678: #
13679: # OVFL is enabled.
13680: # the INEX2 bit has already been updated by the round to the correct precision.
13681: # now, round to extended(and don't alter the FPSR).
13682: #
13683: fabs_sd_ovfl_ena:
13684: mov.l %d2,-(%sp) # save d2
13685: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
13686: mov.l %d1,%d2 # make a copy
13687: andi.l &0x7fff,%d1 # strip sign
13688: andi.w &0x8000,%d2 # keep old sign
13689: sub.l %d0,%d1 # add scale factor
13690: subi.l &0x6000,%d1 # subtract bias
13691: andi.w &0x7fff,%d1
13692: or.w %d2,%d1 # concat sign,exp
13693: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
13694: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
13695: mov.l (%sp)+,%d2 # restore d2
13696: bra.b fabs_sd_ovfl_dis
13697:
13698: #
13699: # the move in MAY underflow. so...
13700: #
13701: fabs_sd_may_ovfl:
13702: fmov.l &0x0,%fpsr # clear FPSR
13703: fmov.l L_SCR3(%a6),%fpcr # set FPCR
13704:
13705: fabs.x FP_SCR0(%a6),%fp0 # perform absolute
13706:
13707: fmov.l %fpsr,%d1 # save status
13708: fmov.l &0x0,%fpcr # clear FPCR
13709:
13710: or.l %d1,USER_FPSR(%a6) # save INEX2,N
13711:
13712: fabs.x %fp0,%fp1 # make a copy of result
13713: fcmp.b %fp1,&0x2 # is |result| >= 2.b?
13714: fbge.w fabs_sd_ovfl_tst # yes; overflow has occurred
13715:
13716: # no, it didn't overflow; we have correct result
13717: bra.w fabs_sd_normal_exit
13718:
13719: ##########################################################################
13720:
13721: #
13722: # input is not normalized; what is it?
13723: #
13724: fabs_not_norm:
13725: cmpi.b %d1,&DENORM # weed out DENORM
13726: beq.w fabs_denorm
13727: cmpi.b %d1,&SNAN # weed out SNAN
13728: beq.l res_snan_1op
13729: cmpi.b %d1,&QNAN # weed out QNAN
13730: beq.l res_qnan_1op
13731:
13732: fabs.x SRC(%a0),%fp0 # force absolute value
13733:
13734: cmpi.b %d1,&INF # weed out INF
13735: beq.b fabs_inf
13736: fabs_zero:
13737: mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
13738: rts
13739: fabs_inf:
13740: mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
13741: rts
13742:
13743: #########################################################################
13744: # XDEF **************************************************************** #
13745: # fcmp(): fp compare op routine #
13746: # #
13747: # XREF **************************************************************** #
13748: # res_qnan() - return QNAN result #
13749: # res_snan() - return SNAN result #
13750: # #
13751: # INPUT *************************************************************** #
13752: # a0 = pointer to extended precision source operand #
13753: # a1 = pointer to extended precision destination operand #
13754: # d0 = round prec/mode #
13755: # #
13756: # OUTPUT ************************************************************** #
13757: # None #
13758: # #
13759: # ALGORITHM *********************************************************** #
13760: # Handle NANs and denorms as special cases. For everything else, #
13761: # just use the actual fcmp instruction to produce the correct condition #
13762: # codes. #
13763: # #
13764: #########################################################################
13765:
13766: global fcmp
13767: fcmp:
13768: clr.w %d1
13769: mov.b DTAG(%a6),%d1
13770: lsl.b &0x3,%d1
13771: or.b STAG(%a6),%d1
13772: bne.b fcmp_not_norm # optimize on non-norm input
13773:
13774: #
13775: # COMPARE FP OPs : NORMs, ZEROs, INFs, and "corrected" DENORMs
13776: #
13777: fcmp_norm:
13778: fmovm.x DST(%a1),&0x80 # load dst op
13779:
13780: fcmp.x %fp0,SRC(%a0) # do compare
13781:
13782: fmov.l %fpsr,%d0 # save FPSR
13783: rol.l &0x8,%d0 # extract ccode bits
13784: mov.b %d0,FPSR_CC(%a6) # set ccode bits(no exc bits are set)
13785:
13786: rts
13787:
13788: #
13789: # fcmp: inputs are not both normalized; what are they?
13790: #
13791: fcmp_not_norm:
13792: mov.w (tbl_fcmp_op.b,%pc,%d1.w*2),%d1
13793: jmp (tbl_fcmp_op.b,%pc,%d1.w*1)
13794:
13795: swbeg &48
13796: tbl_fcmp_op:
13797: short fcmp_norm - tbl_fcmp_op # NORM - NORM
13798: short fcmp_norm - tbl_fcmp_op # NORM - ZERO
13799: short fcmp_norm - tbl_fcmp_op # NORM - INF
13800: short fcmp_res_qnan - tbl_fcmp_op # NORM - QNAN
13801: short fcmp_nrm_dnrm - tbl_fcmp_op # NORM - DENORM
13802: short fcmp_res_snan - tbl_fcmp_op # NORM - SNAN
13803: short tbl_fcmp_op - tbl_fcmp_op #
13804: short tbl_fcmp_op - tbl_fcmp_op #
13805:
13806: short fcmp_norm - tbl_fcmp_op # ZERO - NORM
13807: short fcmp_norm - tbl_fcmp_op # ZERO - ZERO
13808: short fcmp_norm - tbl_fcmp_op # ZERO - INF
13809: short fcmp_res_qnan - tbl_fcmp_op # ZERO - QNAN
13810: short fcmp_dnrm_s - tbl_fcmp_op # ZERO - DENORM
13811: short fcmp_res_snan - tbl_fcmp_op # ZERO - SNAN
13812: short tbl_fcmp_op - tbl_fcmp_op #
13813: short tbl_fcmp_op - tbl_fcmp_op #
13814:
13815: short fcmp_norm - tbl_fcmp_op # INF - NORM
13816: short fcmp_norm - tbl_fcmp_op # INF - ZERO
13817: short fcmp_norm - tbl_fcmp_op # INF - INF
13818: short fcmp_res_qnan - tbl_fcmp_op # INF - QNAN
13819: short fcmp_dnrm_s - tbl_fcmp_op # INF - DENORM
13820: short fcmp_res_snan - tbl_fcmp_op # INF - SNAN
13821: short tbl_fcmp_op - tbl_fcmp_op #
13822: short tbl_fcmp_op - tbl_fcmp_op #
13823:
13824: short fcmp_res_qnan - tbl_fcmp_op # QNAN - NORM
13825: short fcmp_res_qnan - tbl_fcmp_op # QNAN - ZERO
13826: short fcmp_res_qnan - tbl_fcmp_op # QNAN - INF
13827: short fcmp_res_qnan - tbl_fcmp_op # QNAN - QNAN
13828: short fcmp_res_qnan - tbl_fcmp_op # QNAN - DENORM
13829: short fcmp_res_snan - tbl_fcmp_op # QNAN - SNAN
13830: short tbl_fcmp_op - tbl_fcmp_op #
13831: short tbl_fcmp_op - tbl_fcmp_op #
13832:
13833: short fcmp_dnrm_nrm - tbl_fcmp_op # DENORM - NORM
13834: short fcmp_dnrm_d - tbl_fcmp_op # DENORM - ZERO
13835: short fcmp_dnrm_d - tbl_fcmp_op # DENORM - INF
13836: short fcmp_res_qnan - tbl_fcmp_op # DENORM - QNAN
13837: short fcmp_dnrm_sd - tbl_fcmp_op # DENORM - DENORM
13838: short fcmp_res_snan - tbl_fcmp_op # DENORM - SNAN
13839: short tbl_fcmp_op - tbl_fcmp_op #
13840: short tbl_fcmp_op - tbl_fcmp_op #
13841:
13842: short fcmp_res_snan - tbl_fcmp_op # SNAN - NORM
13843: short fcmp_res_snan - tbl_fcmp_op # SNAN - ZERO
13844: short fcmp_res_snan - tbl_fcmp_op # SNAN - INF
13845: short fcmp_res_snan - tbl_fcmp_op # SNAN - QNAN
13846: short fcmp_res_snan - tbl_fcmp_op # SNAN - DENORM
13847: short fcmp_res_snan - tbl_fcmp_op # SNAN - SNAN
13848: short tbl_fcmp_op - tbl_fcmp_op #
13849: short tbl_fcmp_op - tbl_fcmp_op #
13850:
13851: # unlike all other functions for QNAN and SNAN, fcmp does NOT set the
13852: # 'N' bit for a negative QNAN or SNAN input so we must squelch it here.
13853: fcmp_res_qnan:
13854: bsr.l res_qnan
13855: andi.b &0xf7,FPSR_CC(%a6)
13856: rts
13857: fcmp_res_snan:
13858: bsr.l res_snan
13859: andi.b &0xf7,FPSR_CC(%a6)
13860: rts
13861:
13862: #
13863: # DENORMs are a little more difficult.
13864: # If you have a 2 DENORMs, then you can just force the j-bit to a one
13865: # and use the fcmp_norm routine.
13866: # If you have a DENORM and an INF or ZERO, just force the DENORM's j-bit to a one
13867: # and use the fcmp_norm routine.
13868: # If you have a DENORM and a NORM with opposite signs, then use fcmp_norm, also.
13869: # But with a DENORM and a NORM of the same sign, the neg bit is set if the
13870: # (1) signs are (+) and the DENORM is the dst or
13871: # (2) signs are (-) and the DENORM is the src
13872: #
13873:
13874: fcmp_dnrm_s:
13875: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
13876: mov.l SRC_HI(%a0),%d0
13877: bset &31,%d0 # DENORM src; make into small norm
13878: mov.l %d0,FP_SCR0_HI(%a6)
13879: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
13880: lea FP_SCR0(%a6),%a0
13881: bra.w fcmp_norm
13882:
13883: fcmp_dnrm_d:
13884: mov.l DST_EX(%a1),FP_SCR0_EX(%a6)
13885: mov.l DST_HI(%a1),%d0
13886: bset &31,%d0 # DENORM src; make into small norm
13887: mov.l %d0,FP_SCR0_HI(%a6)
13888: mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
13889: lea FP_SCR0(%a6),%a1
13890: bra.w fcmp_norm
13891:
13892: fcmp_dnrm_sd:
13893: mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
13894: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
13895: mov.l DST_HI(%a1),%d0
13896: bset &31,%d0 # DENORM dst; make into small norm
13897: mov.l %d0,FP_SCR1_HI(%a6)
13898: mov.l SRC_HI(%a0),%d0
13899: bset &31,%d0 # DENORM dst; make into small norm
13900: mov.l %d0,FP_SCR0_HI(%a6)
13901: mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
13902: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
13903: lea FP_SCR1(%a6),%a1
13904: lea FP_SCR0(%a6),%a0
13905: bra.w fcmp_norm
13906:
13907: fcmp_nrm_dnrm:
13908: mov.b SRC_EX(%a0),%d0 # determine if like signs
13909: mov.b DST_EX(%a1),%d1
13910: eor.b %d0,%d1
13911: bmi.w fcmp_dnrm_s
13912:
13913: # signs are the same, so must determine the answer ourselves.
13914: tst.b %d0 # is src op negative?
13915: bmi.b fcmp_nrm_dnrm_m # yes
13916: rts
13917: fcmp_nrm_dnrm_m:
13918: mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
13919: rts
13920:
13921: fcmp_dnrm_nrm:
13922: mov.b SRC_EX(%a0),%d0 # determine if like signs
13923: mov.b DST_EX(%a1),%d1
13924: eor.b %d0,%d1
13925: bmi.w fcmp_dnrm_d
13926:
13927: # signs are the same, so must determine the answer ourselves.
13928: tst.b %d0 # is src op negative?
13929: bpl.b fcmp_dnrm_nrm_m # no
13930: rts
13931: fcmp_dnrm_nrm_m:
13932: mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
13933: rts
13934:
13935: #########################################################################
13936: # XDEF **************************************************************** #
13937: # fsglmul(): emulates the fsglmul instruction #
13938: # #
13939: # XREF **************************************************************** #
13940: # scale_to_zero_src() - scale src exponent to zero #
13941: # scale_to_zero_dst() - scale dst exponent to zero #
13942: # unf_res4() - return default underflow result for sglop #
13943: # ovf_res() - return default overflow result #
13944: # res_qnan() - return QNAN result #
13945: # res_snan() - return SNAN result #
13946: # #
13947: # INPUT *************************************************************** #
13948: # a0 = pointer to extended precision source operand #
13949: # a1 = pointer to extended precision destination operand #
13950: # d0 rnd prec,mode #
13951: # #
13952: # OUTPUT ************************************************************** #
13953: # fp0 = result #
13954: # fp1 = EXOP (if exception occurred) #
13955: # #
13956: # ALGORITHM *********************************************************** #
13957: # Handle NANs, infinities, and zeroes as special cases. Divide #
13958: # norms/denorms into ext/sgl/dbl precision. #
13959: # For norms/denorms, scale the exponents such that a multiply #
13960: # instruction won't cause an exception. Use the regular fsglmul to #
13961: # compute a result. Check if the regular operands would have taken #
13962: # an exception. If so, return the default overflow/underflow result #
13963: # and return the EXOP if exceptions are enabled. Else, scale the #
13964: # result operand to the proper exponent. #
13965: # #
13966: #########################################################################
13967:
13968: global fsglmul
13969: fsglmul:
13970: mov.l %d0,L_SCR3(%a6) # store rnd info
13971:
13972: clr.w %d1
13973: mov.b DTAG(%a6),%d1
13974: lsl.b &0x3,%d1
13975: or.b STAG(%a6),%d1
13976:
13977: bne.w fsglmul_not_norm # optimize on non-norm input
13978:
13979: fsglmul_norm:
13980: mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
13981: mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
13982: mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
13983:
13984: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
13985: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
13986: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
13987:
13988: bsr.l scale_to_zero_src # scale exponent
13989: mov.l %d0,-(%sp) # save scale factor 1
13990:
13991: bsr.l scale_to_zero_dst # scale dst exponent
13992:
13993: add.l (%sp)+,%d0 # SCALE_FACTOR = scale1 + scale2
13994:
13995: cmpi.l %d0,&0x3fff-0x7ffe # would result ovfl?
13996: beq.w fsglmul_may_ovfl # result may rnd to overflow
13997: blt.w fsglmul_ovfl # result will overflow
13998:
13999: cmpi.l %d0,&0x3fff+0x0001 # would result unfl?
14000: beq.w fsglmul_may_unfl # result may rnd to no unfl
14001: bgt.w fsglmul_unfl # result will underflow
14002:
14003: fsglmul_normal:
14004: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14005:
14006: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14007: fmov.l &0x0,%fpsr # clear FPSR
14008:
14009: fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
14010:
14011: fmov.l %fpsr,%d1 # save status
14012: fmov.l &0x0,%fpcr # clear FPCR
14013:
14014: or.l %d1,USER_FPSR(%a6) # save INEX2,N
14015:
14016: fsglmul_normal_exit:
14017: fmovm.x &0x80,FP_SCR0(%a6) # store out result
14018: mov.l %d2,-(%sp) # save d2
14019: mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
14020: mov.l %d1,%d2 # make a copy
14021: andi.l &0x7fff,%d1 # strip sign
14022: andi.w &0x8000,%d2 # keep old sign
14023: sub.l %d0,%d1 # add scale factor
14024: or.w %d2,%d1 # concat old sign,new exp
14025: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14026: mov.l (%sp)+,%d2 # restore d2
14027: fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
14028: rts
14029:
14030: fsglmul_ovfl:
14031: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14032:
14033: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14034: fmov.l &0x0,%fpsr # clear FPSR
14035:
14036: fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
14037:
14038: fmov.l %fpsr,%d1 # save status
14039: fmov.l &0x0,%fpcr # clear FPCR
14040:
14041: or.l %d1,USER_FPSR(%a6) # save INEX2,N
14042:
14043: fsglmul_ovfl_tst:
14044:
14045: # save setting this until now because this is where fsglmul_may_ovfl may jump in
14046: or.l &ovfl_inx_mask, USER_FPSR(%a6) # set ovfl/aovfl/ainex
14047:
14048: mov.b FPCR_ENABLE(%a6),%d1
14049: andi.b &0x13,%d1 # is OVFL or INEX enabled?
14050: bne.b fsglmul_ovfl_ena # yes
14051:
14052: fsglmul_ovfl_dis:
14053: btst &neg_bit,FPSR_CC(%a6) # is result negative?
14054: sne %d1 # set sign param accordingly
14055: mov.l L_SCR3(%a6),%d0 # pass prec:rnd
14056: andi.b &0x30,%d0 # force prec = ext
14057: bsr.l ovf_res # calculate default result
14058: or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
14059: fmovm.x (%a0),&0x80 # return default result in fp0
14060: rts
14061:
14062: fsglmul_ovfl_ena:
14063: fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
14064:
14065: mov.l %d2,-(%sp) # save d2
14066: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14067: mov.l %d1,%d2 # make a copy
14068: andi.l &0x7fff,%d1 # strip sign
14069: sub.l %d0,%d1 # add scale factor
14070: subi.l &0x6000,%d1 # subtract bias
14071: andi.w &0x7fff,%d1
14072: andi.w &0x8000,%d2 # keep old sign
14073: or.w %d2,%d1 # concat old sign,new exp
14074: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14075: mov.l (%sp)+,%d2 # restore d2
14076: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14077: bra.b fsglmul_ovfl_dis
14078:
14079: fsglmul_may_ovfl:
14080: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14081:
14082: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14083: fmov.l &0x0,%fpsr # clear FPSR
14084:
14085: fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
14086:
14087: fmov.l %fpsr,%d1 # save status
14088: fmov.l &0x0,%fpcr # clear FPCR
14089:
14090: or.l %d1,USER_FPSR(%a6) # save INEX2,N
14091:
14092: fabs.x %fp0,%fp1 # make a copy of result
14093: fcmp.b %fp1,&0x2 # is |result| >= 2.b?
14094: fbge.w fsglmul_ovfl_tst # yes; overflow has occurred
14095:
14096: # no, it didn't overflow; we have correct result
14097: bra.w fsglmul_normal_exit
14098:
14099: fsglmul_unfl:
14100: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
14101:
14102: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14103:
14104: fmov.l &rz_mode*0x10,%fpcr # set FPCR
14105: fmov.l &0x0,%fpsr # clear FPSR
14106:
14107: fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
14108:
14109: fmov.l %fpsr,%d1 # save status
14110: fmov.l &0x0,%fpcr # clear FPCR
14111:
14112: or.l %d1,USER_FPSR(%a6) # save INEX2,N
14113:
14114: mov.b FPCR_ENABLE(%a6),%d1
14115: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14116: bne.b fsglmul_unfl_ena # yes
14117:
14118: fsglmul_unfl_dis:
14119: fmovm.x &0x80,FP_SCR0(%a6) # store out result
14120:
14121: lea FP_SCR0(%a6),%a0 # pass: result addr
14122: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
14123: bsr.l unf_res4 # calculate default result
14124: or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
14125: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
14126: rts
14127:
14128: #
14129: # UNFL is enabled.
14130: #
14131: fsglmul_unfl_ena:
14132: fmovm.x FP_SCR1(%a6),&0x40 # load dst op
14133:
14134: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14135: fmov.l &0x0,%fpsr # clear FPSR
14136:
14137: fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
14138:
14139: fmov.l &0x0,%fpcr # clear FPCR
14140:
14141: fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
14142: mov.l %d2,-(%sp) # save d2
14143: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14144: mov.l %d1,%d2 # make a copy
14145: andi.l &0x7fff,%d1 # strip sign
14146: andi.w &0x8000,%d2 # keep old sign
14147: sub.l %d0,%d1 # add scale factor
14148: addi.l &0x6000,%d1 # add bias
14149: andi.w &0x7fff,%d1
14150: or.w %d2,%d1 # concat old sign,new exp
14151: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14152: mov.l (%sp)+,%d2 # restore d2
14153: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14154: bra.w fsglmul_unfl_dis
14155:
14156: fsglmul_may_unfl:
14157: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14158:
14159: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14160: fmov.l &0x0,%fpsr # clear FPSR
14161:
14162: fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
14163:
14164: fmov.l %fpsr,%d1 # save status
14165: fmov.l &0x0,%fpcr # clear FPCR
14166:
14167: or.l %d1,USER_FPSR(%a6) # save INEX2,N
14168:
14169: fabs.x %fp0,%fp1 # make a copy of result
14170: fcmp.b %fp1,&0x2 # is |result| > 2.b?
14171: fbgt.w fsglmul_normal_exit # no; no underflow occurred
14172: fblt.w fsglmul_unfl # yes; underflow occurred
14173:
14174: #
14175: # we still don't know if underflow occurred. result is ~ equal to 2. but,
14176: # we don't know if the result was an underflow that rounded up to a 2 or
14177: # a normalized number that rounded down to a 2. so, redo the entire operation
14178: # using RZ as the rounding mode to see what the pre-rounded result is.
14179: # this case should be relatively rare.
14180: #
14181: fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
14182:
14183: mov.l L_SCR3(%a6),%d1
14184: andi.b &0xc0,%d1 # keep rnd prec
14185: ori.b &rz_mode*0x10,%d1 # insert RZ
14186:
14187: fmov.l %d1,%fpcr # set FPCR
14188: fmov.l &0x0,%fpsr # clear FPSR
14189:
14190: fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
14191:
14192: fmov.l &0x0,%fpcr # clear FPCR
14193: fabs.x %fp1 # make absolute value
14194: fcmp.b %fp1,&0x2 # is |result| < 2.b?
14195: fbge.w fsglmul_normal_exit # no; no underflow occurred
14196: bra.w fsglmul_unfl # yes, underflow occurred
14197:
14198: ##############################################################################
14199:
14200: #
14201: # Single Precision Multiply: inputs are not both normalized; what are they?
14202: #
14203: fsglmul_not_norm:
14204: mov.w (tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
14205: jmp (tbl_fsglmul_op.b,%pc,%d1.w*1)
14206:
14207: swbeg &48
14208: tbl_fsglmul_op:
14209: short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
14210: short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
14211: short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
14212: short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
14213: short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
14214: short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
14215: short tbl_fsglmul_op - tbl_fsglmul_op #
14216: short tbl_fsglmul_op - tbl_fsglmul_op #
14217:
14218: short fsglmul_zero - tbl_fsglmul_op # ZERO x NORM
14219: short fsglmul_zero - tbl_fsglmul_op # ZERO x ZERO
14220: short fsglmul_res_operr - tbl_fsglmul_op # ZERO x INF
14221: short fsglmul_res_qnan - tbl_fsglmul_op # ZERO x QNAN
14222: short fsglmul_zero - tbl_fsglmul_op # ZERO x DENORM
14223: short fsglmul_res_snan - tbl_fsglmul_op # ZERO x SNAN
14224: short tbl_fsglmul_op - tbl_fsglmul_op #
14225: short tbl_fsglmul_op - tbl_fsglmul_op #
14226:
14227: short fsglmul_inf_dst - tbl_fsglmul_op # INF x NORM
14228: short fsglmul_res_operr - tbl_fsglmul_op # INF x ZERO
14229: short fsglmul_inf_dst - tbl_fsglmul_op # INF x INF
14230: short fsglmul_res_qnan - tbl_fsglmul_op # INF x QNAN
14231: short fsglmul_inf_dst - tbl_fsglmul_op # INF x DENORM
14232: short fsglmul_res_snan - tbl_fsglmul_op # INF x SNAN
14233: short tbl_fsglmul_op - tbl_fsglmul_op #
14234: short tbl_fsglmul_op - tbl_fsglmul_op #
14235:
14236: short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x NORM
14237: short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x ZERO
14238: short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x INF
14239: short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x QNAN
14240: short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x DENORM
14241: short fsglmul_res_snan - tbl_fsglmul_op # QNAN x SNAN
14242: short tbl_fsglmul_op - tbl_fsglmul_op #
14243: short tbl_fsglmul_op - tbl_fsglmul_op #
14244:
14245: short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
14246: short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
14247: short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
14248: short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
14249: short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
14250: short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
14251: short tbl_fsglmul_op - tbl_fsglmul_op #
14252: short tbl_fsglmul_op - tbl_fsglmul_op #
14253:
14254: short fsglmul_res_snan - tbl_fsglmul_op # SNAN x NORM
14255: short fsglmul_res_snan - tbl_fsglmul_op # SNAN x ZERO
14256: short fsglmul_res_snan - tbl_fsglmul_op # SNAN x INF
14257: short fsglmul_res_snan - tbl_fsglmul_op # SNAN x QNAN
14258: short fsglmul_res_snan - tbl_fsglmul_op # SNAN x DENORM
14259: short fsglmul_res_snan - tbl_fsglmul_op # SNAN x SNAN
14260: short tbl_fsglmul_op - tbl_fsglmul_op #
14261: short tbl_fsglmul_op - tbl_fsglmul_op #
14262:
14263: fsglmul_res_operr:
14264: bra.l res_operr
14265: fsglmul_res_snan:
14266: bra.l res_snan
14267: fsglmul_res_qnan:
14268: bra.l res_qnan
14269: fsglmul_zero:
14270: bra.l fmul_zero
14271: fsglmul_inf_src:
14272: bra.l fmul_inf_src
14273: fsglmul_inf_dst:
14274: bra.l fmul_inf_dst
14275:
14276: #########################################################################
14277: # XDEF **************************************************************** #
14278: # fsgldiv(): emulates the fsgldiv instruction #
14279: # #
14280: # XREF **************************************************************** #
14281: # scale_to_zero_src() - scale src exponent to zero #
14282: # scale_to_zero_dst() - scale dst exponent to zero #
14283: # unf_res4() - return default underflow result for sglop #
14284: # ovf_res() - return default overflow result #
14285: # res_qnan() - return QNAN result #
14286: # res_snan() - return SNAN result #
14287: # #
14288: # INPUT *************************************************************** #
14289: # a0 = pointer to extended precision source operand #
14290: # a1 = pointer to extended precision destination operand #
14291: # d0 rnd prec,mode #
14292: # #
14293: # OUTPUT ************************************************************** #
14294: # fp0 = result #
14295: # fp1 = EXOP (if exception occurred) #
14296: # #
14297: # ALGORITHM *********************************************************** #
14298: # Handle NANs, infinities, and zeroes as special cases. Divide #
14299: # norms/denorms into ext/sgl/dbl precision. #
14300: # For norms/denorms, scale the exponents such that a divide #
14301: # instruction won't cause an exception. Use the regular fsgldiv to #
14302: # compute a result. Check if the regular operands would have taken #
14303: # an exception. If so, return the default overflow/underflow result #
14304: # and return the EXOP if exceptions are enabled. Else, scale the #
14305: # result operand to the proper exponent. #
14306: # #
14307: #########################################################################
14308:
14309: global fsgldiv
14310: fsgldiv:
14311: mov.l %d0,L_SCR3(%a6) # store rnd info
14312:
14313: clr.w %d1
14314: mov.b DTAG(%a6),%d1
14315: lsl.b &0x3,%d1
14316: or.b STAG(%a6),%d1 # combine src tags
14317:
14318: bne.w fsgldiv_not_norm # optimize on non-norm input
14319:
14320: #
14321: # DIVIDE: NORMs and DENORMs ONLY!
14322: #
14323: fsgldiv_norm:
14324: mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
14325: mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
14326: mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
14327:
14328: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
14329: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
14330: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
14331:
14332: bsr.l scale_to_zero_src # calculate scale factor 1
14333: mov.l %d0,-(%sp) # save scale factor 1
14334:
14335: bsr.l scale_to_zero_dst # calculate scale factor 2
14336:
14337: neg.l (%sp) # S.F. = scale1 - scale2
14338: add.l %d0,(%sp)
14339:
14340: mov.w 2+L_SCR3(%a6),%d1 # fetch precision,mode
14341: lsr.b &0x6,%d1
14342: mov.l (%sp)+,%d0
14343: cmpi.l %d0,&0x3fff-0x7ffe
14344: ble.w fsgldiv_may_ovfl
14345:
14346: cmpi.l %d0,&0x3fff-0x0000 # will result underflow?
14347: beq.w fsgldiv_may_unfl # maybe
14348: bgt.w fsgldiv_unfl # yes; go handle underflow
14349:
14350: fsgldiv_normal:
14351: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14352:
14353: fmov.l L_SCR3(%a6),%fpcr # save FPCR
14354: fmov.l &0x0,%fpsr # clear FPSR
14355:
14356: fsgldiv.x FP_SCR0(%a6),%fp0 # perform sgl divide
14357:
14358: fmov.l %fpsr,%d1 # save FPSR
14359: fmov.l &0x0,%fpcr # clear FPCR
14360:
14361: or.l %d1,USER_FPSR(%a6) # save INEX2,N
14362:
14363: fsgldiv_normal_exit:
14364: fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
14365: mov.l %d2,-(%sp) # save d2
14366: mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
14367: mov.l %d1,%d2 # make a copy
14368: andi.l &0x7fff,%d1 # strip sign
14369: andi.w &0x8000,%d2 # keep old sign
14370: sub.l %d0,%d1 # add scale factor
14371: or.w %d2,%d1 # concat old sign,new exp
14372: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14373: mov.l (%sp)+,%d2 # restore d2
14374: fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
14375: rts
14376:
14377: fsgldiv_may_ovfl:
14378: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14379:
14380: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14381: fmov.l &0x0,%fpsr # set FPSR
14382:
14383: fsgldiv.x FP_SCR0(%a6),%fp0 # execute divide
14384:
14385: fmov.l %fpsr,%d1
14386: fmov.l &0x0,%fpcr
14387:
14388: or.l %d1,USER_FPSR(%a6) # save INEX,N
14389:
14390: fmovm.x &0x01,-(%sp) # save result to stack
14391: mov.w (%sp),%d1 # fetch new exponent
14392: add.l &0xc,%sp # clear result
14393: andi.l &0x7fff,%d1 # strip sign
14394: sub.l %d0,%d1 # add scale factor
14395: cmp.l %d1,&0x7fff # did divide overflow?
14396: blt.b fsgldiv_normal_exit
14397:
14398: fsgldiv_ovfl_tst:
14399: or.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
14400:
14401: mov.b FPCR_ENABLE(%a6),%d1
14402: andi.b &0x13,%d1 # is OVFL or INEX enabled?
14403: bne.b fsgldiv_ovfl_ena # yes
14404:
14405: fsgldiv_ovfl_dis:
14406: btst &neg_bit,FPSR_CC(%a6) # is result negative
14407: sne %d1 # set sign param accordingly
14408: mov.l L_SCR3(%a6),%d0 # pass prec:rnd
14409: andi.b &0x30,%d0 # kill precision
14410: bsr.l ovf_res # calculate default result
14411: or.b %d0,FPSR_CC(%a6) # set INF if applicable
14412: fmovm.x (%a0),&0x80 # return default result in fp0
14413: rts
14414:
14415: fsgldiv_ovfl_ena:
14416: fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
14417:
14418: mov.l %d2,-(%sp) # save d2
14419: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14420: mov.l %d1,%d2 # make a copy
14421: andi.l &0x7fff,%d1 # strip sign
14422: andi.w &0x8000,%d2 # keep old sign
14423: sub.l %d0,%d1 # add scale factor
14424: subi.l &0x6000,%d1 # subtract new bias
14425: andi.w &0x7fff,%d1 # clear ms bit
14426: or.w %d2,%d1 # concat old sign,new exp
14427: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14428: mov.l (%sp)+,%d2 # restore d2
14429: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14430: bra.b fsgldiv_ovfl_dis
14431:
14432: fsgldiv_unfl:
14433: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
14434:
14435: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14436:
14437: fmov.l &rz_mode*0x10,%fpcr # set FPCR
14438: fmov.l &0x0,%fpsr # clear FPSR
14439:
14440: fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
14441:
14442: fmov.l %fpsr,%d1 # save status
14443: fmov.l &0x0,%fpcr # clear FPCR
14444:
14445: or.l %d1,USER_FPSR(%a6) # save INEX2,N
14446:
14447: mov.b FPCR_ENABLE(%a6),%d1
14448: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14449: bne.b fsgldiv_unfl_ena # yes
14450:
14451: fsgldiv_unfl_dis:
14452: fmovm.x &0x80,FP_SCR0(%a6) # store out result
14453:
14454: lea FP_SCR0(%a6),%a0 # pass: result addr
14455: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
14456: bsr.l unf_res4 # calculate default result
14457: or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
14458: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
14459: rts
14460:
14461: #
14462: # UNFL is enabled.
14463: #
14464: fsgldiv_unfl_ena:
14465: fmovm.x FP_SCR1(%a6),&0x40 # load dst op
14466:
14467: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14468: fmov.l &0x0,%fpsr # clear FPSR
14469:
14470: fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
14471:
14472: fmov.l &0x0,%fpcr # clear FPCR
14473:
14474: fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
14475: mov.l %d2,-(%sp) # save d2
14476: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14477: mov.l %d1,%d2 # make a copy
14478: andi.l &0x7fff,%d1 # strip sign
14479: andi.w &0x8000,%d2 # keep old sign
14480: sub.l %d0,%d1 # add scale factor
14481: addi.l &0x6000,%d1 # add bias
14482: andi.w &0x7fff,%d1 # clear top bit
14483: or.w %d2,%d1 # concat old sign, new exp
14484: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14485: mov.l (%sp)+,%d2 # restore d2
14486: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14487: bra.b fsgldiv_unfl_dis
14488:
14489: #
14490: # the divide operation MAY underflow:
14491: #
14492: fsgldiv_may_unfl:
14493: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14494:
14495: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14496: fmov.l &0x0,%fpsr # clear FPSR
14497:
14498: fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
14499:
14500: fmov.l %fpsr,%d1 # save status
14501: fmov.l &0x0,%fpcr # clear FPCR
14502:
14503: or.l %d1,USER_FPSR(%a6) # save INEX2,N
14504:
14505: fabs.x %fp0,%fp1 # make a copy of result
14506: fcmp.b %fp1,&0x1 # is |result| > 1.b?
14507: fbgt.w fsgldiv_normal_exit # no; no underflow occurred
14508: fblt.w fsgldiv_unfl # yes; underflow occurred
14509:
14510: #
14511: # we still don't know if underflow occurred. result is ~ equal to 1. but,
14512: # we don't know if the result was an underflow that rounded up to a 1
14513: # or a normalized number that rounded down to a 1. so, redo the entire
14514: # operation using RZ as the rounding mode to see what the pre-rounded
14515: # result is. this case should be relatively rare.
14516: #
14517: fmovm.x FP_SCR1(%a6),&0x40 # load dst op into %fp1
14518:
14519: clr.l %d1 # clear scratch register
14520: ori.b &rz_mode*0x10,%d1 # force RZ rnd mode
14521:
14522: fmov.l %d1,%fpcr # set FPCR
14523: fmov.l &0x0,%fpsr # clear FPSR
14524:
14525: fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
14526:
14527: fmov.l &0x0,%fpcr # clear FPCR
14528: fabs.x %fp1 # make absolute value
14529: fcmp.b %fp1,&0x1 # is |result| < 1.b?
14530: fbge.w fsgldiv_normal_exit # no; no underflow occurred
14531: bra.w fsgldiv_unfl # yes; underflow occurred
14532:
14533: ############################################################################
14534:
14535: #
14536: # Divide: inputs are not both normalized; what are they?
14537: #
14538: fsgldiv_not_norm:
14539: mov.w (tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
14540: jmp (tbl_fsgldiv_op.b,%pc,%d1.w*1)
14541:
14542: swbeg &48
14543: tbl_fsgldiv_op:
14544: short fsgldiv_norm - tbl_fsgldiv_op # NORM / NORM
14545: short fsgldiv_inf_load - tbl_fsgldiv_op # NORM / ZERO
14546: short fsgldiv_zero_load - tbl_fsgldiv_op # NORM / INF
14547: short fsgldiv_res_qnan - tbl_fsgldiv_op # NORM / QNAN
14548: short fsgldiv_norm - tbl_fsgldiv_op # NORM / DENORM
14549: short fsgldiv_res_snan - tbl_fsgldiv_op # NORM / SNAN
14550: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14551: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14552:
14553: short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / NORM
14554: short fsgldiv_res_operr - tbl_fsgldiv_op # ZERO / ZERO
14555: short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / INF
14556: short fsgldiv_res_qnan - tbl_fsgldiv_op # ZERO / QNAN
14557: short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / DENORM
14558: short fsgldiv_res_snan - tbl_fsgldiv_op # ZERO / SNAN
14559: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14560: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14561:
14562: short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / NORM
14563: short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / ZERO
14564: short fsgldiv_res_operr - tbl_fsgldiv_op # INF / INF
14565: short fsgldiv_res_qnan - tbl_fsgldiv_op # INF / QNAN
14566: short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / DENORM
14567: short fsgldiv_res_snan - tbl_fsgldiv_op # INF / SNAN
14568: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14569: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14570:
14571: short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / NORM
14572: short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / ZERO
14573: short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / INF
14574: short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / QNAN
14575: short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / DENORM
14576: short fsgldiv_res_snan - tbl_fsgldiv_op # QNAN / SNAN
14577: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14578: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14579:
14580: short fsgldiv_norm - tbl_fsgldiv_op # DENORM / NORM
14581: short fsgldiv_inf_load - tbl_fsgldiv_op # DENORM / ZERO
14582: short fsgldiv_zero_load - tbl_fsgldiv_op # DENORM / INF
14583: short fsgldiv_res_qnan - tbl_fsgldiv_op # DENORM / QNAN
14584: short fsgldiv_norm - tbl_fsgldiv_op # DENORM / DENORM
14585: short fsgldiv_res_snan - tbl_fsgldiv_op # DENORM / SNAN
14586: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14587: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14588:
14589: short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / NORM
14590: short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / ZERO
14591: short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / INF
14592: short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / QNAN
14593: short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / DENORM
14594: short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / SNAN
14595: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14596: short tbl_fsgldiv_op - tbl_fsgldiv_op #
14597:
14598: fsgldiv_res_qnan:
14599: bra.l res_qnan
14600: fsgldiv_res_snan:
14601: bra.l res_snan
14602: fsgldiv_res_operr:
14603: bra.l res_operr
14604: fsgldiv_inf_load:
14605: bra.l fdiv_inf_load
14606: fsgldiv_zero_load:
14607: bra.l fdiv_zero_load
14608: fsgldiv_inf_dst:
14609: bra.l fdiv_inf_dst
14610:
14611: #########################################################################
14612: # XDEF **************************************************************** #
14613: # fadd(): emulates the fadd instruction #
14614: # fsadd(): emulates the fadd instruction #
14615: # fdadd(): emulates the fdadd instruction #
14616: # #
14617: # XREF **************************************************************** #
14618: # addsub_scaler2() - scale the operands so they won't take exc #
14619: # ovf_res() - return default overflow result #
14620: # unf_res() - return default underflow result #
14621: # res_qnan() - set QNAN result #
14622: # res_snan() - set SNAN result #
14623: # res_operr() - set OPERR result #
14624: # scale_to_zero_src() - set src operand exponent equal to zero #
14625: # scale_to_zero_dst() - set dst operand exponent equal to zero #
14626: # #
14627: # INPUT *************************************************************** #
14628: # a0 = pointer to extended precision source operand #
14629: # a1 = pointer to extended precision destination operand #
14630: # #
14631: # OUTPUT ************************************************************** #
14632: # fp0 = result #
14633: # fp1 = EXOP (if exception occurred) #
14634: # #
14635: # ALGORITHM *********************************************************** #
14636: # Handle NANs, infinities, and zeroes as special cases. Divide #
14637: # norms into extended, single, and double precision. #
14638: # Do addition after scaling exponents such that exception won't #
14639: # occur. Then, check result exponent to see if exception would have #
14640: # occurred. If so, return default result and maybe EXOP. Else, insert #
14641: # the correct result exponent and return. Set FPSR bits as appropriate. #
14642: # #
14643: #########################################################################
14644:
14645: global fsadd
14646: fsadd:
14647: andi.b &0x30,%d0 # clear rnd prec
14648: ori.b &s_mode*0x10,%d0 # insert sgl prec
14649: bra.b fadd
14650:
14651: global fdadd
14652: fdadd:
14653: andi.b &0x30,%d0 # clear rnd prec
14654: ori.b &d_mode*0x10,%d0 # insert dbl prec
14655:
14656: global fadd
14657: fadd:
14658: mov.l %d0,L_SCR3(%a6) # store rnd info
14659:
14660: clr.w %d1
14661: mov.b DTAG(%a6),%d1
14662: lsl.b &0x3,%d1
14663: or.b STAG(%a6),%d1 # combine src tags
14664:
14665: bne.w fadd_not_norm # optimize on non-norm input
14666:
14667: #
14668: # ADD: norms and denorms
14669: #
14670: fadd_norm:
14671: bsr.l addsub_scaler2 # scale exponents
14672:
14673: fadd_zero_entry:
14674: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14675:
14676: fmov.l &0x0,%fpsr # clear FPSR
14677: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14678:
14679: fadd.x FP_SCR0(%a6),%fp0 # execute add
14680:
14681: fmov.l &0x0,%fpcr # clear FPCR
14682: fmov.l %fpsr,%d1 # fetch INEX2,N,Z
14683:
14684: or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
14685:
14686: fbeq.w fadd_zero_exit # if result is zero, end now
14687:
14688: mov.l %d2,-(%sp) # save d2
14689:
14690: fmovm.x &0x01,-(%sp) # save result to stack
14691:
14692: mov.w 2+L_SCR3(%a6),%d1
14693: lsr.b &0x6,%d1
14694:
14695: mov.w (%sp),%d2 # fetch new sign, exp
14696: andi.l &0x7fff,%d2 # strip sign
14697: sub.l %d0,%d2 # add scale factor
14698:
14699: cmp.l %d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
14700: bge.b fadd_ovfl # yes
14701:
14702: cmp.l %d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
14703: blt.w fadd_unfl # yes
14704: beq.w fadd_may_unfl # maybe; go find out
14705:
14706: fadd_normal:
14707: mov.w (%sp),%d1
14708: andi.w &0x8000,%d1 # keep sign
14709: or.w %d2,%d1 # concat sign,new exp
14710: mov.w %d1,(%sp) # insert new exponent
14711:
14712: fmovm.x (%sp)+,&0x80 # return result in fp0
14713:
14714: mov.l (%sp)+,%d2 # restore d2
14715: rts
14716:
14717: fadd_zero_exit:
14718: # fmov.s &0x00000000,%fp0 # return zero in fp0
14719: rts
14720:
14721: tbl_fadd_ovfl:
14722: long 0x7fff # ext ovfl
14723: long 0x407f # sgl ovfl
14724: long 0x43ff # dbl ovfl
14725:
14726: tbl_fadd_unfl:
14727: long 0x0000 # ext unfl
14728: long 0x3f81 # sgl unfl
14729: long 0x3c01 # dbl unfl
14730:
14731: fadd_ovfl:
14732: or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
14733:
14734: mov.b FPCR_ENABLE(%a6),%d1
14735: andi.b &0x13,%d1 # is OVFL or INEX enabled?
14736: bne.b fadd_ovfl_ena # yes
14737:
14738: add.l &0xc,%sp
14739: fadd_ovfl_dis:
14740: btst &neg_bit,FPSR_CC(%a6) # is result negative?
14741: sne %d1 # set sign param accordingly
14742: mov.l L_SCR3(%a6),%d0 # pass prec:rnd
14743: bsr.l ovf_res # calculate default result
14744: or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
14745: fmovm.x (%a0),&0x80 # return default result in fp0
14746: mov.l (%sp)+,%d2 # restore d2
14747: rts
14748:
14749: fadd_ovfl_ena:
14750: mov.b L_SCR3(%a6),%d1
14751: andi.b &0xc0,%d1 # is precision extended?
14752: bne.b fadd_ovfl_ena_sd # no; prec = sgl or dbl
14753:
14754: fadd_ovfl_ena_cont:
14755: mov.w (%sp),%d1
14756: andi.w &0x8000,%d1 # keep sign
14757: subi.l &0x6000,%d2 # add extra bias
14758: andi.w &0x7fff,%d2
14759: or.w %d2,%d1 # concat sign,new exp
14760: mov.w %d1,(%sp) # insert new exponent
14761:
14762: fmovm.x (%sp)+,&0x40 # return EXOP in fp1
14763: bra.b fadd_ovfl_dis
14764:
14765: fadd_ovfl_ena_sd:
14766: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14767:
14768: mov.l L_SCR3(%a6),%d1
14769: andi.b &0x30,%d1 # keep rnd mode
14770: fmov.l %d1,%fpcr # set FPCR
14771:
14772: fadd.x FP_SCR0(%a6),%fp0 # execute add
14773:
14774: fmov.l &0x0,%fpcr # clear FPCR
14775:
14776: add.l &0xc,%sp
14777: fmovm.x &0x01,-(%sp)
14778: bra.b fadd_ovfl_ena_cont
14779:
14780: fadd_unfl:
14781: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
14782:
14783: add.l &0xc,%sp
14784:
14785: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
14786:
14787: fmov.l &rz_mode*0x10,%fpcr # set FPCR
14788: fmov.l &0x0,%fpsr # clear FPSR
14789:
14790: fadd.x FP_SCR0(%a6),%fp0 # execute add
14791:
14792: fmov.l &0x0,%fpcr # clear FPCR
14793: fmov.l %fpsr,%d1 # save status
14794:
14795: or.l %d1,USER_FPSR(%a6) # save INEX,N
14796:
14797: mov.b FPCR_ENABLE(%a6),%d1
14798: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
14799: bne.b fadd_unfl_ena # yes
14800:
14801: fadd_unfl_dis:
14802: fmovm.x &0x80,FP_SCR0(%a6) # store out result
14803:
14804: lea FP_SCR0(%a6),%a0 # pass: result addr
14805: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
14806: bsr.l unf_res # calculate default result
14807: or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
14808: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
14809: mov.l (%sp)+,%d2 # restore d2
14810: rts
14811:
14812: fadd_unfl_ena:
14813: fmovm.x FP_SCR1(%a6),&0x40 # load dst op
14814:
14815: mov.l L_SCR3(%a6),%d1
14816: andi.b &0xc0,%d1 # is precision extended?
14817: bne.b fadd_unfl_ena_sd # no; sgl or dbl
14818:
14819: fmov.l L_SCR3(%a6),%fpcr # set FPCR
14820:
14821: fadd_unfl_ena_cont:
14822: fmov.l &0x0,%fpsr # clear FPSR
14823:
14824: fadd.x FP_SCR0(%a6),%fp1 # execute multiply
14825:
14826: fmov.l &0x0,%fpcr # clear FPCR
14827:
14828: fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
14829: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
14830: mov.l %d1,%d2 # make a copy
14831: andi.l &0x7fff,%d1 # strip sign
14832: andi.w &0x8000,%d2 # keep old sign
14833: sub.l %d0,%d1 # add scale factor
14834: addi.l &0x6000,%d1 # add new bias
14835: andi.w &0x7fff,%d1 # clear top bit
14836: or.w %d2,%d1 # concat sign,new exp
14837: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
14838: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
14839: bra.w fadd_unfl_dis
14840:
14841: fadd_unfl_ena_sd:
14842: mov.l L_SCR3(%a6),%d1
14843: andi.b &0x30,%d1 # use only rnd mode
14844: fmov.l %d1,%fpcr # set FPCR
14845:
14846: bra.b fadd_unfl_ena_cont
14847:
14848: #
14849: # result is equal to the smallest normalized number in the selected precision
14850: # if the precision is extended, this result could not have come from an
14851: # underflow that rounded up.
14852: #
14853: fadd_may_unfl:
14854: mov.l L_SCR3(%a6),%d1
14855: andi.b &0xc0,%d1
14856: beq.w fadd_normal # yes; no underflow occurred
14857:
14858: mov.l 0x4(%sp),%d1 # extract hi(man)
14859: cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
14860: bne.w fadd_normal # no; no underflow occurred
14861:
14862: tst.l 0x8(%sp) # is lo(man) = 0x0?
14863: bne.w fadd_normal # no; no underflow occurred
14864:
14865: btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
14866: beq.w fadd_normal # no; no underflow occurred
14867:
14868: #
14869: # ok, so now the result has a exponent equal to the smallest normalized
14870: # exponent for the selected precision. also, the mantissa is equal to
14871: # 0x8000000000000000 and this mantissa is the result of rounding non-zero
14872: # g,r,s.
14873: # now, we must determine whether the pre-rounded result was an underflow
14874: # rounded "up" or a normalized number rounded "down".
14875: # so, we do this be re-executing the add using RZ as the rounding mode and
14876: # seeing if the new result is smaller or equal to the current result.
14877: #
14878: fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
14879:
14880: mov.l L_SCR3(%a6),%d1
14881: andi.b &0xc0,%d1 # keep rnd prec
14882: ori.b &rz_mode*0x10,%d1 # insert rnd mode
14883: fmov.l %d1,%fpcr # set FPCR
14884: fmov.l &0x0,%fpsr # clear FPSR
14885:
14886: fadd.x FP_SCR0(%a6),%fp1 # execute add
14887:
14888: fmov.l &0x0,%fpcr # clear FPCR
14889:
14890: fabs.x %fp0 # compare absolute values
14891: fabs.x %fp1
14892: fcmp.x %fp0,%fp1 # is first result > second?
14893:
14894: fbgt.w fadd_unfl # yes; it's an underflow
14895: bra.w fadd_normal # no; it's not an underflow
14896:
14897: ##########################################################################
14898:
14899: #
14900: # Add: inputs are not both normalized; what are they?
14901: #
14902: fadd_not_norm:
14903: mov.w (tbl_fadd_op.b,%pc,%d1.w*2),%d1
14904: jmp (tbl_fadd_op.b,%pc,%d1.w*1)
14905:
14906: swbeg &48
14907: tbl_fadd_op:
14908: short fadd_norm - tbl_fadd_op # NORM + NORM
14909: short fadd_zero_src - tbl_fadd_op # NORM + ZERO
14910: short fadd_inf_src - tbl_fadd_op # NORM + INF
14911: short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
14912: short fadd_norm - tbl_fadd_op # NORM + DENORM
14913: short fadd_res_snan - tbl_fadd_op # NORM + SNAN
14914: short tbl_fadd_op - tbl_fadd_op #
14915: short tbl_fadd_op - tbl_fadd_op #
14916:
14917: short fadd_zero_dst - tbl_fadd_op # ZERO + NORM
14918: short fadd_zero_2 - tbl_fadd_op # ZERO + ZERO
14919: short fadd_inf_src - tbl_fadd_op # ZERO + INF
14920: short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
14921: short fadd_zero_dst - tbl_fadd_op # ZERO + DENORM
14922: short fadd_res_snan - tbl_fadd_op # NORM + SNAN
14923: short tbl_fadd_op - tbl_fadd_op #
14924: short tbl_fadd_op - tbl_fadd_op #
14925:
14926: short fadd_inf_dst - tbl_fadd_op # INF + NORM
14927: short fadd_inf_dst - tbl_fadd_op # INF + ZERO
14928: short fadd_inf_2 - tbl_fadd_op # INF + INF
14929: short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
14930: short fadd_inf_dst - tbl_fadd_op # INF + DENORM
14931: short fadd_res_snan - tbl_fadd_op # NORM + SNAN
14932: short tbl_fadd_op - tbl_fadd_op #
14933: short tbl_fadd_op - tbl_fadd_op #
14934:
14935: short fadd_res_qnan - tbl_fadd_op # QNAN + NORM
14936: short fadd_res_qnan - tbl_fadd_op # QNAN + ZERO
14937: short fadd_res_qnan - tbl_fadd_op # QNAN + INF
14938: short fadd_res_qnan - tbl_fadd_op # QNAN + QNAN
14939: short fadd_res_qnan - tbl_fadd_op # QNAN + DENORM
14940: short fadd_res_snan - tbl_fadd_op # QNAN + SNAN
14941: short tbl_fadd_op - tbl_fadd_op #
14942: short tbl_fadd_op - tbl_fadd_op #
14943:
14944: short fadd_norm - tbl_fadd_op # DENORM + NORM
14945: short fadd_zero_src - tbl_fadd_op # DENORM + ZERO
14946: short fadd_inf_src - tbl_fadd_op # DENORM + INF
14947: short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
14948: short fadd_norm - tbl_fadd_op # DENORM + DENORM
14949: short fadd_res_snan - tbl_fadd_op # NORM + SNAN
14950: short tbl_fadd_op - tbl_fadd_op #
14951: short tbl_fadd_op - tbl_fadd_op #
14952:
14953: short fadd_res_snan - tbl_fadd_op # SNAN + NORM
14954: short fadd_res_snan - tbl_fadd_op # SNAN + ZERO
14955: short fadd_res_snan - tbl_fadd_op # SNAN + INF
14956: short fadd_res_snan - tbl_fadd_op # SNAN + QNAN
14957: short fadd_res_snan - tbl_fadd_op # SNAN + DENORM
14958: short fadd_res_snan - tbl_fadd_op # SNAN + SNAN
14959: short tbl_fadd_op - tbl_fadd_op #
14960: short tbl_fadd_op - tbl_fadd_op #
14961:
14962: fadd_res_qnan:
14963: bra.l res_qnan
14964: fadd_res_snan:
14965: bra.l res_snan
14966:
14967: #
14968: # both operands are ZEROes
14969: #
14970: fadd_zero_2:
14971: mov.b SRC_EX(%a0),%d0 # are the signs opposite
14972: mov.b DST_EX(%a1),%d1
14973: eor.b %d0,%d1
14974: bmi.w fadd_zero_2_chk_rm # weed out (-ZERO)+(+ZERO)
14975:
14976: # the signs are the same. so determine whether they are positive or negative
14977: # and return the appropriately signed zero.
14978: tst.b %d0 # are ZEROes positive or negative?
14979: bmi.b fadd_zero_rm # negative
14980: fmov.s &0x00000000,%fp0 # return +ZERO
14981: mov.b &z_bmask,FPSR_CC(%a6) # set Z
14982: rts
14983:
14984: #
14985: # the ZEROes have opposite signs:
14986: # - therefore, we return +ZERO if the rounding modes are RN,RZ, or RP.
14987: # - -ZERO is returned in the case of RM.
14988: #
14989: fadd_zero_2_chk_rm:
14990: mov.b 3+L_SCR3(%a6),%d1
14991: andi.b &0x30,%d1 # extract rnd mode
14992: cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM?
14993: beq.b fadd_zero_rm # yes
14994: fmov.s &0x00000000,%fp0 # return +ZERO
14995: mov.b &z_bmask,FPSR_CC(%a6) # set Z
14996: rts
14997:
14998: fadd_zero_rm:
14999: fmov.s &0x80000000,%fp0 # return -ZERO
15000: mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set NEG/Z
15001: rts
15002:
15003: #
15004: # one operand is a ZERO and the other is a DENORM or NORM. scale
15005: # the DENORM or NORM and jump to the regular fadd routine.
15006: #
15007: fadd_zero_dst:
15008: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
15009: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
15010: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
15011: bsr.l scale_to_zero_src # scale the operand
15012: clr.w FP_SCR1_EX(%a6)
15013: clr.l FP_SCR1_HI(%a6)
15014: clr.l FP_SCR1_LO(%a6)
15015: bra.w fadd_zero_entry # go execute fadd
15016:
15017: fadd_zero_src:
15018: mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
15019: mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
15020: mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
15021: bsr.l scale_to_zero_dst # scale the operand
15022: clr.w FP_SCR0_EX(%a6)
15023: clr.l FP_SCR0_HI(%a6)
15024: clr.l FP_SCR0_LO(%a6)
15025: bra.w fadd_zero_entry # go execute fadd
15026:
15027: #
15028: # both operands are INFs. an OPERR will result if the INFs have
15029: # different signs. else, an INF of the same sign is returned
15030: #
15031: fadd_inf_2:
15032: mov.b SRC_EX(%a0),%d0 # exclusive or the signs
15033: mov.b DST_EX(%a1),%d1
15034: eor.b %d1,%d0
15035: bmi.l res_operr # weed out (-INF)+(+INF)
15036:
15037: # ok, so it's not an OPERR. but, we do have to remember to return the
15038: # src INF since that's where the 881/882 gets the j-bit from...
15039:
15040: #
15041: # operands are INF and one of {ZERO, INF, DENORM, NORM}
15042: #
15043: fadd_inf_src:
15044: fmovm.x SRC(%a0),&0x80 # return src INF
15045: tst.b SRC_EX(%a0) # is INF positive?
15046: bpl.b fadd_inf_done # yes; we're done
15047: mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
15048: rts
15049:
15050: #
15051: # operands are INF and one of {ZERO, INF, DENORM, NORM}
15052: #
15053: fadd_inf_dst:
15054: fmovm.x DST(%a1),&0x80 # return dst INF
15055: tst.b DST_EX(%a1) # is INF positive?
15056: bpl.b fadd_inf_done # yes; we're done
15057: mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
15058: rts
15059:
15060: fadd_inf_done:
15061: mov.b &inf_bmask,FPSR_CC(%a6) # set INF
15062: rts
15063:
15064: #########################################################################
15065: # XDEF **************************************************************** #
15066: # fsub(): emulates the fsub instruction #
15067: # fssub(): emulates the fssub instruction #
15068: # fdsub(): emulates the fdsub instruction #
15069: # #
15070: # XREF **************************************************************** #
15071: # addsub_scaler2() - scale the operands so they won't take exc #
15072: # ovf_res() - return default overflow result #
15073: # unf_res() - return default underflow result #
15074: # res_qnan() - set QNAN result #
15075: # res_snan() - set SNAN result #
15076: # res_operr() - set OPERR result #
15077: # scale_to_zero_src() - set src operand exponent equal to zero #
15078: # scale_to_zero_dst() - set dst operand exponent equal to zero #
15079: # #
15080: # INPUT *************************************************************** #
15081: # a0 = pointer to extended precision source operand #
15082: # a1 = pointer to extended precision destination operand #
15083: # #
15084: # OUTPUT ************************************************************** #
15085: # fp0 = result #
15086: # fp1 = EXOP (if exception occurred) #
15087: # #
15088: # ALGORITHM *********************************************************** #
15089: # Handle NANs, infinities, and zeroes as special cases. Divide #
15090: # norms into extended, single, and double precision. #
15091: # Do subtraction after scaling exponents such that exception won't#
15092: # occur. Then, check result exponent to see if exception would have #
15093: # occurred. If so, return default result and maybe EXOP. Else, insert #
15094: # the correct result exponent and return. Set FPSR bits as appropriate. #
15095: # #
15096: #########################################################################
15097:
15098: global fssub
15099: fssub:
15100: andi.b &0x30,%d0 # clear rnd prec
15101: ori.b &s_mode*0x10,%d0 # insert sgl prec
15102: bra.b fsub
15103:
15104: global fdsub
15105: fdsub:
15106: andi.b &0x30,%d0 # clear rnd prec
15107: ori.b &d_mode*0x10,%d0 # insert dbl prec
15108:
15109: global fsub
15110: fsub:
15111: mov.l %d0,L_SCR3(%a6) # store rnd info
15112:
15113: clr.w %d1
15114: mov.b DTAG(%a6),%d1
15115: lsl.b &0x3,%d1
15116: or.b STAG(%a6),%d1 # combine src tags
15117:
15118: bne.w fsub_not_norm # optimize on non-norm input
15119:
15120: #
15121: # SUB: norms and denorms
15122: #
15123: fsub_norm:
15124: bsr.l addsub_scaler2 # scale exponents
15125:
15126: fsub_zero_entry:
15127: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
15128:
15129: fmov.l &0x0,%fpsr # clear FPSR
15130: fmov.l L_SCR3(%a6),%fpcr # set FPCR
15131:
15132: fsub.x FP_SCR0(%a6),%fp0 # execute subtract
15133:
15134: fmov.l &0x0,%fpcr # clear FPCR
15135: fmov.l %fpsr,%d1 # fetch INEX2, N, Z
15136:
15137: or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
15138:
15139: fbeq.w fsub_zero_exit # if result zero, end now
15140:
15141: mov.l %d2,-(%sp) # save d2
15142:
15143: fmovm.x &0x01,-(%sp) # save result to stack
15144:
15145: mov.w 2+L_SCR3(%a6),%d1
15146: lsr.b &0x6,%d1
15147:
15148: mov.w (%sp),%d2 # fetch new exponent
15149: andi.l &0x7fff,%d2 # strip sign
15150: sub.l %d0,%d2 # add scale factor
15151:
15152: cmp.l %d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
15153: bge.b fsub_ovfl # yes
15154:
15155: cmp.l %d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
15156: blt.w fsub_unfl # yes
15157: beq.w fsub_may_unfl # maybe; go find out
15158:
15159: fsub_normal:
15160: mov.w (%sp),%d1
15161: andi.w &0x8000,%d1 # keep sign
15162: or.w %d2,%d1 # insert new exponent
15163: mov.w %d1,(%sp) # insert new exponent
15164:
15165: fmovm.x (%sp)+,&0x80 # return result in fp0
15166:
15167: mov.l (%sp)+,%d2 # restore d2
15168: rts
15169:
15170: fsub_zero_exit:
15171: # fmov.s &0x00000000,%fp0 # return zero in fp0
15172: rts
15173:
15174: tbl_fsub_ovfl:
15175: long 0x7fff # ext ovfl
15176: long 0x407f # sgl ovfl
15177: long 0x43ff # dbl ovfl
15178:
15179: tbl_fsub_unfl:
15180: long 0x0000 # ext unfl
15181: long 0x3f81 # sgl unfl
15182: long 0x3c01 # dbl unfl
15183:
15184: fsub_ovfl:
15185: or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
15186:
15187: mov.b FPCR_ENABLE(%a6),%d1
15188: andi.b &0x13,%d1 # is OVFL or INEX enabled?
15189: bne.b fsub_ovfl_ena # yes
15190:
15191: add.l &0xc,%sp
15192: fsub_ovfl_dis:
15193: btst &neg_bit,FPSR_CC(%a6) # is result negative?
15194: sne %d1 # set sign param accordingly
15195: mov.l L_SCR3(%a6),%d0 # pass prec:rnd
15196: bsr.l ovf_res # calculate default result
15197: or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
15198: fmovm.x (%a0),&0x80 # return default result in fp0
15199: mov.l (%sp)+,%d2 # restore d2
15200: rts
15201:
15202: fsub_ovfl_ena:
15203: mov.b L_SCR3(%a6),%d1
15204: andi.b &0xc0,%d1 # is precision extended?
15205: bne.b fsub_ovfl_ena_sd # no
15206:
15207: fsub_ovfl_ena_cont:
15208: mov.w (%sp),%d1 # fetch {sgn,exp}
15209: andi.w &0x8000,%d1 # keep sign
15210: subi.l &0x6000,%d2 # subtract new bias
15211: andi.w &0x7fff,%d2 # clear top bit
15212: or.w %d2,%d1 # concat sign,exp
15213: mov.w %d1,(%sp) # insert new exponent
15214:
15215: fmovm.x (%sp)+,&0x40 # return EXOP in fp1
15216: bra.b fsub_ovfl_dis
15217:
15218: fsub_ovfl_ena_sd:
15219: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
15220:
15221: mov.l L_SCR3(%a6),%d1
15222: andi.b &0x30,%d1 # clear rnd prec
15223: fmov.l %d1,%fpcr # set FPCR
15224:
15225: fsub.x FP_SCR0(%a6),%fp0 # execute subtract
15226:
15227: fmov.l &0x0,%fpcr # clear FPCR
15228:
15229: add.l &0xc,%sp
15230: fmovm.x &0x01,-(%sp)
15231: bra.b fsub_ovfl_ena_cont
15232:
15233: fsub_unfl:
15234: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
15235:
15236: add.l &0xc,%sp
15237:
15238: fmovm.x FP_SCR1(%a6),&0x80 # load dst op
15239:
15240: fmov.l &rz_mode*0x10,%fpcr # set FPCR
15241: fmov.l &0x0,%fpsr # clear FPSR
15242:
15243: fsub.x FP_SCR0(%a6),%fp0 # execute subtract
15244:
15245: fmov.l &0x0,%fpcr # clear FPCR
15246: fmov.l %fpsr,%d1 # save status
15247:
15248: or.l %d1,USER_FPSR(%a6)
15249:
15250: mov.b FPCR_ENABLE(%a6),%d1
15251: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
15252: bne.b fsub_unfl_ena # yes
15253:
15254: fsub_unfl_dis:
15255: fmovm.x &0x80,FP_SCR0(%a6) # store out result
15256:
15257: lea FP_SCR0(%a6),%a0 # pass: result addr
15258: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
15259: bsr.l unf_res # calculate default result
15260: or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
15261: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
15262: mov.l (%sp)+,%d2 # restore d2
15263: rts
15264:
15265: fsub_unfl_ena:
15266: fmovm.x FP_SCR1(%a6),&0x40
15267:
15268: mov.l L_SCR3(%a6),%d1
15269: andi.b &0xc0,%d1 # is precision extended?
15270: bne.b fsub_unfl_ena_sd # no
15271:
15272: fmov.l L_SCR3(%a6),%fpcr # set FPCR
15273:
15274: fsub_unfl_ena_cont:
15275: fmov.l &0x0,%fpsr # clear FPSR
15276:
15277: fsub.x FP_SCR0(%a6),%fp1 # execute subtract
15278:
15279: fmov.l &0x0,%fpcr # clear FPCR
15280:
15281: fmovm.x &0x40,FP_SCR0(%a6) # store result to stack
15282: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
15283: mov.l %d1,%d2 # make a copy
15284: andi.l &0x7fff,%d1 # strip sign
15285: andi.w &0x8000,%d2 # keep old sign
15286: sub.l %d0,%d1 # add scale factor
15287: addi.l &0x6000,%d1 # subtract new bias
15288: andi.w &0x7fff,%d1 # clear top bit
15289: or.w %d2,%d1 # concat sgn,exp
15290: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
15291: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
15292: bra.w fsub_unfl_dis
15293:
15294: fsub_unfl_ena_sd:
15295: mov.l L_SCR3(%a6),%d1
15296: andi.b &0x30,%d1 # clear rnd prec
15297: fmov.l %d1,%fpcr # set FPCR
15298:
15299: bra.b fsub_unfl_ena_cont
15300:
15301: #
15302: # result is equal to the smallest normalized number in the selected precision
15303: # if the precision is extended, this result could not have come from an
15304: # underflow that rounded up.
15305: #
15306: fsub_may_unfl:
15307: mov.l L_SCR3(%a6),%d1
15308: andi.b &0xc0,%d1 # fetch rnd prec
15309: beq.w fsub_normal # yes; no underflow occurred
15310:
15311: mov.l 0x4(%sp),%d1
15312: cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
15313: bne.w fsub_normal # no; no underflow occurred
15314:
15315: tst.l 0x8(%sp) # is lo(man) = 0x0?
15316: bne.w fsub_normal # no; no underflow occurred
15317:
15318: btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
15319: beq.w fsub_normal # no; no underflow occurred
15320:
15321: #
15322: # ok, so now the result has a exponent equal to the smallest normalized
15323: # exponent for the selected precision. also, the mantissa is equal to
15324: # 0x8000000000000000 and this mantissa is the result of rounding non-zero
15325: # g,r,s.
15326: # now, we must determine whether the pre-rounded result was an underflow
15327: # rounded "up" or a normalized number rounded "down".
15328: # so, we do this be re-executing the add using RZ as the rounding mode and
15329: # seeing if the new result is smaller or equal to the current result.
15330: #
15331: fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
15332:
15333: mov.l L_SCR3(%a6),%d1
15334: andi.b &0xc0,%d1 # keep rnd prec
15335: ori.b &rz_mode*0x10,%d1 # insert rnd mode
15336: fmov.l %d1,%fpcr # set FPCR
15337: fmov.l &0x0,%fpsr # clear FPSR
15338:
15339: fsub.x FP_SCR0(%a6),%fp1 # execute subtract
15340:
15341: fmov.l &0x0,%fpcr # clear FPCR
15342:
15343: fabs.x %fp0 # compare absolute values
15344: fabs.x %fp1
15345: fcmp.x %fp0,%fp1 # is first result > second?
15346:
15347: fbgt.w fsub_unfl # yes; it's an underflow
15348: bra.w fsub_normal # no; it's not an underflow
15349:
15350: ##########################################################################
15351:
15352: #
15353: # Sub: inputs are not both normalized; what are they?
15354: #
15355: fsub_not_norm:
15356: mov.w (tbl_fsub_op.b,%pc,%d1.w*2),%d1
15357: jmp (tbl_fsub_op.b,%pc,%d1.w*1)
15358:
15359: swbeg &48
15360: tbl_fsub_op:
15361: short fsub_norm - tbl_fsub_op # NORM - NORM
15362: short fsub_zero_src - tbl_fsub_op # NORM - ZERO
15363: short fsub_inf_src - tbl_fsub_op # NORM - INF
15364: short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
15365: short fsub_norm - tbl_fsub_op # NORM - DENORM
15366: short fsub_res_snan - tbl_fsub_op # NORM - SNAN
15367: short tbl_fsub_op - tbl_fsub_op #
15368: short tbl_fsub_op - tbl_fsub_op #
15369:
15370: short fsub_zero_dst - tbl_fsub_op # ZERO - NORM
15371: short fsub_zero_2 - tbl_fsub_op # ZERO - ZERO
15372: short fsub_inf_src - tbl_fsub_op # ZERO - INF
15373: short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
15374: short fsub_zero_dst - tbl_fsub_op # ZERO - DENORM
15375: short fsub_res_snan - tbl_fsub_op # NORM - SNAN
15376: short tbl_fsub_op - tbl_fsub_op #
15377: short tbl_fsub_op - tbl_fsub_op #
15378:
15379: short fsub_inf_dst - tbl_fsub_op # INF - NORM
15380: short fsub_inf_dst - tbl_fsub_op # INF - ZERO
15381: short fsub_inf_2 - tbl_fsub_op # INF - INF
15382: short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
15383: short fsub_inf_dst - tbl_fsub_op # INF - DENORM
15384: short fsub_res_snan - tbl_fsub_op # NORM - SNAN
15385: short tbl_fsub_op - tbl_fsub_op #
15386: short tbl_fsub_op - tbl_fsub_op #
15387:
15388: short fsub_res_qnan - tbl_fsub_op # QNAN - NORM
15389: short fsub_res_qnan - tbl_fsub_op # QNAN - ZERO
15390: short fsub_res_qnan - tbl_fsub_op # QNAN - INF
15391: short fsub_res_qnan - tbl_fsub_op # QNAN - QNAN
15392: short fsub_res_qnan - tbl_fsub_op # QNAN - DENORM
15393: short fsub_res_snan - tbl_fsub_op # QNAN - SNAN
15394: short tbl_fsub_op - tbl_fsub_op #
15395: short tbl_fsub_op - tbl_fsub_op #
15396:
15397: short fsub_norm - tbl_fsub_op # DENORM - NORM
15398: short fsub_zero_src - tbl_fsub_op # DENORM - ZERO
15399: short fsub_inf_src - tbl_fsub_op # DENORM - INF
15400: short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
15401: short fsub_norm - tbl_fsub_op # DENORM - DENORM
15402: short fsub_res_snan - tbl_fsub_op # NORM - SNAN
15403: short tbl_fsub_op - tbl_fsub_op #
15404: short tbl_fsub_op - tbl_fsub_op #
15405:
15406: short fsub_res_snan - tbl_fsub_op # SNAN - NORM
15407: short fsub_res_snan - tbl_fsub_op # SNAN - ZERO
15408: short fsub_res_snan - tbl_fsub_op # SNAN - INF
15409: short fsub_res_snan - tbl_fsub_op # SNAN - QNAN
15410: short fsub_res_snan - tbl_fsub_op # SNAN - DENORM
15411: short fsub_res_snan - tbl_fsub_op # SNAN - SNAN
15412: short tbl_fsub_op - tbl_fsub_op #
15413: short tbl_fsub_op - tbl_fsub_op #
15414:
15415: fsub_res_qnan:
15416: bra.l res_qnan
15417: fsub_res_snan:
15418: bra.l res_snan
15419:
15420: #
15421: # both operands are ZEROes
15422: #
15423: fsub_zero_2:
15424: mov.b SRC_EX(%a0),%d0
15425: mov.b DST_EX(%a1),%d1
15426: eor.b %d1,%d0
15427: bpl.b fsub_zero_2_chk_rm
15428:
15429: # the signs are opposite, so, return a ZERO w/ the sign of the dst ZERO
15430: tst.b %d0 # is dst negative?
15431: bmi.b fsub_zero_2_rm # yes
15432: fmov.s &0x00000000,%fp0 # no; return +ZERO
15433: mov.b &z_bmask,FPSR_CC(%a6) # set Z
15434: rts
15435:
15436: #
15437: # the ZEROes have the same signs:
15438: # - therefore, we return +ZERO if the rounding mode is RN,RZ, or RP
15439: # - -ZERO is returned in the case of RM.
15440: #
15441: fsub_zero_2_chk_rm:
15442: mov.b 3+L_SCR3(%a6),%d1
15443: andi.b &0x30,%d1 # extract rnd mode
15444: cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM?
15445: beq.b fsub_zero_2_rm # yes
15446: fmov.s &0x00000000,%fp0 # no; return +ZERO
15447: mov.b &z_bmask,FPSR_CC(%a6) # set Z
15448: rts
15449:
15450: fsub_zero_2_rm:
15451: fmov.s &0x80000000,%fp0 # return -ZERO
15452: mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/NEG
15453: rts
15454:
15455: #
15456: # one operand is a ZERO and the other is a DENORM or a NORM.
15457: # scale the DENORM or NORM and jump to the regular fsub routine.
15458: #
15459: fsub_zero_dst:
15460: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
15461: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
15462: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
15463: bsr.l scale_to_zero_src # scale the operand
15464: clr.w FP_SCR1_EX(%a6)
15465: clr.l FP_SCR1_HI(%a6)
15466: clr.l FP_SCR1_LO(%a6)
15467: bra.w fsub_zero_entry # go execute fsub
15468:
15469: fsub_zero_src:
15470: mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
15471: mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
15472: mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
15473: bsr.l scale_to_zero_dst # scale the operand
15474: clr.w FP_SCR0_EX(%a6)
15475: clr.l FP_SCR0_HI(%a6)
15476: clr.l FP_SCR0_LO(%a6)
15477: bra.w fsub_zero_entry # go execute fsub
15478:
15479: #
15480: # both operands are INFs. an OPERR will result if the INFs have the
15481: # same signs. else,
15482: #
15483: fsub_inf_2:
15484: mov.b SRC_EX(%a0),%d0 # exclusive or the signs
15485: mov.b DST_EX(%a1),%d1
15486: eor.b %d1,%d0
15487: bpl.l res_operr # weed out (-INF)+(+INF)
15488:
15489: # ok, so it's not an OPERR. but we do have to remember to return
15490: # the src INF since that's where the 881/882 gets the j-bit.
15491:
15492: fsub_inf_src:
15493: fmovm.x SRC(%a0),&0x80 # return src INF
15494: fneg.x %fp0 # invert sign
15495: fbge.w fsub_inf_done # sign is now positive
15496: mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
15497: rts
15498:
15499: fsub_inf_dst:
15500: fmovm.x DST(%a1),&0x80 # return dst INF
15501: tst.b DST_EX(%a1) # is INF negative?
15502: bpl.b fsub_inf_done # no
15503: mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
15504: rts
15505:
15506: fsub_inf_done:
15507: mov.b &inf_bmask,FPSR_CC(%a6) # set INF
15508: rts
15509:
15510: #########################################################################
15511: # XDEF **************************************************************** #
15512: # fsqrt(): emulates the fsqrt instruction #
15513: # fssqrt(): emulates the fssqrt instruction #
15514: # fdsqrt(): emulates the fdsqrt instruction #
15515: # #
15516: # XREF **************************************************************** #
15517: # scale_sqrt() - scale the source operand #
15518: # unf_res() - return default underflow result #
15519: # ovf_res() - return default overflow result #
15520: # res_qnan_1op() - return QNAN result #
15521: # res_snan_1op() - return SNAN result #
15522: # #
15523: # INPUT *************************************************************** #
15524: # a0 = pointer to extended precision source operand #
15525: # d0 rnd prec,mode #
15526: # #
15527: # OUTPUT ************************************************************** #
15528: # fp0 = result #
15529: # fp1 = EXOP (if exception occurred) #
15530: # #
15531: # ALGORITHM *********************************************************** #
15532: # Handle NANs, infinities, and zeroes as special cases. Divide #
15533: # norms/denorms into ext/sgl/dbl precision. #
15534: # For norms/denorms, scale the exponents such that a sqrt #
15535: # instruction won't cause an exception. Use the regular fsqrt to #
15536: # compute a result. Check if the regular operands would have taken #
15537: # an exception. If so, return the default overflow/underflow result #
15538: # and return the EXOP if exceptions are enabled. Else, scale the #
15539: # result operand to the proper exponent. #
15540: # #
15541: #########################################################################
15542:
15543: global fssqrt
15544: fssqrt:
15545: andi.b &0x30,%d0 # clear rnd prec
15546: ori.b &s_mode*0x10,%d0 # insert sgl precision
15547: bra.b fsqrt
15548:
15549: global fdsqrt
15550: fdsqrt:
15551: andi.b &0x30,%d0 # clear rnd prec
15552: ori.b &d_mode*0x10,%d0 # insert dbl precision
15553:
15554: global fsqrt
15555: fsqrt:
15556: mov.l %d0,L_SCR3(%a6) # store rnd info
15557: clr.w %d1
15558: mov.b STAG(%a6),%d1
15559: bne.w fsqrt_not_norm # optimize on non-norm input
15560:
15561: #
15562: # SQUARE ROOT: norms and denorms ONLY!
15563: #
15564: fsqrt_norm:
15565: tst.b SRC_EX(%a0) # is operand negative?
15566: bmi.l res_operr # yes
15567:
15568: andi.b &0xc0,%d0 # is precision extended?
15569: bne.b fsqrt_not_ext # no; go handle sgl or dbl
15570:
15571: fmov.l L_SCR3(%a6),%fpcr # set FPCR
15572: fmov.l &0x0,%fpsr # clear FPSR
15573:
15574: fsqrt.x (%a0),%fp0 # execute square root
15575:
15576: fmov.l %fpsr,%d1
15577: or.l %d1,USER_FPSR(%a6) # set N,INEX
15578:
15579: rts
15580:
15581: fsqrt_denorm:
15582: tst.b SRC_EX(%a0) # is operand negative?
15583: bmi.l res_operr # yes
15584:
15585: andi.b &0xc0,%d0 # is precision extended?
15586: bne.b fsqrt_not_ext # no; go handle sgl or dbl
15587:
15588: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
15589: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
15590: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
15591:
15592: bsr.l scale_sqrt # calculate scale factor
15593:
15594: bra.w fsqrt_sd_normal
15595:
15596: #
15597: # operand is either single or double
15598: #
15599: fsqrt_not_ext:
15600: cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
15601: bne.w fsqrt_dbl
15602:
15603: #
15604: # operand is to be rounded to single precision
15605: #
15606: fsqrt_sgl:
15607: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
15608: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
15609: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
15610:
15611: bsr.l scale_sqrt # calculate scale factor
15612:
15613: cmpi.l %d0,&0x3fff-0x3f81 # will move in underflow?
15614: beq.w fsqrt_sd_may_unfl
15615: bgt.w fsqrt_sd_unfl # yes; go handle underflow
15616: cmpi.l %d0,&0x3fff-0x407f # will move in overflow?
15617: beq.w fsqrt_sd_may_ovfl # maybe; go check
15618: blt.w fsqrt_sd_ovfl # yes; go handle overflow
15619:
15620: #
15621: # operand will NOT overflow or underflow when moved in to the fp reg file
15622: #
15623: fsqrt_sd_normal:
15624: fmov.l &0x0,%fpsr # clear FPSR
15625: fmov.l L_SCR3(%a6),%fpcr # set FPCR
15626:
15627: fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
15628:
15629: fmov.l %fpsr,%d1 # save FPSR
15630: fmov.l &0x0,%fpcr # clear FPCR
15631:
15632: or.l %d1,USER_FPSR(%a6) # save INEX2,N
15633:
15634: fsqrt_sd_normal_exit:
15635: mov.l %d2,-(%sp) # save d2
15636: fmovm.x &0x80,FP_SCR0(%a6) # store out result
15637: mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
15638: mov.l %d1,%d2 # make a copy
15639: andi.l &0x7fff,%d1 # strip sign
15640: sub.l %d0,%d1 # add scale factor
15641: andi.w &0x8000,%d2 # keep old sign
15642: or.w %d1,%d2 # concat old sign,new exp
15643: mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
15644: mov.l (%sp)+,%d2 # restore d2
15645: fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
15646: rts
15647:
15648: #
15649: # operand is to be rounded to double precision
15650: #
15651: fsqrt_dbl:
15652: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
15653: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
15654: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
15655:
15656: bsr.l scale_sqrt # calculate scale factor
15657:
15658: cmpi.l %d0,&0x3fff-0x3c01 # will move in underflow?
15659: beq.w fsqrt_sd_may_unfl
15660: bgt.b fsqrt_sd_unfl # yes; go handle underflow
15661: cmpi.l %d0,&0x3fff-0x43ff # will move in overflow?
15662: beq.w fsqrt_sd_may_ovfl # maybe; go check
15663: blt.w fsqrt_sd_ovfl # yes; go handle overflow
15664: bra.w fsqrt_sd_normal # no; ho handle normalized op
15665:
15666: # we're on the line here and the distinguishing characteristic is whether
15667: # the exponent is 3fff or 3ffe. if it's 3ffe, then it's a safe number
15668: # elsewise fall through to underflow.
15669: fsqrt_sd_may_unfl:
15670: btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
15671: bne.w fsqrt_sd_normal # yes, so no underflow
15672:
15673: #
15674: # operand WILL underflow when moved in to the fp register file
15675: #
15676: fsqrt_sd_unfl:
15677: bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
15678:
15679: fmov.l &rz_mode*0x10,%fpcr # set FPCR
15680: fmov.l &0x0,%fpsr # clear FPSR
15681:
15682: fsqrt.x FP_SCR0(%a6),%fp0 # execute square root
15683:
15684: fmov.l %fpsr,%d1 # save status
15685: fmov.l &0x0,%fpcr # clear FPCR
15686:
15687: or.l %d1,USER_FPSR(%a6) # save INEX2,N
15688:
15689: # if underflow or inexact is enabled, go calculate EXOP first.
15690: mov.b FPCR_ENABLE(%a6),%d1
15691: andi.b &0x0b,%d1 # is UNFL or INEX enabled?
15692: bne.b fsqrt_sd_unfl_ena # yes
15693:
15694: fsqrt_sd_unfl_dis:
15695: fmovm.x &0x80,FP_SCR0(%a6) # store out result
15696:
15697: lea FP_SCR0(%a6),%a0 # pass: result addr
15698: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
15699: bsr.l unf_res # calculate default result
15700: or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
15701: fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
15702: rts
15703:
15704: #
15705: # operand will underflow AND underflow is enabled.
15706: # therefore, we must return the result rounded to extended precision.
15707: #
15708: fsqrt_sd_unfl_ena:
15709: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
15710: mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
15711: mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
15712:
15713: mov.l %d2,-(%sp) # save d2
15714: mov.l %d1,%d2 # make a copy
15715: andi.l &0x7fff,%d1 # strip sign
15716: andi.w &0x8000,%d2 # keep old sign
15717: sub.l %d0,%d1 # subtract scale factor
15718: addi.l &0x6000,%d1 # add new bias
15719: andi.w &0x7fff,%d1
15720: or.w %d2,%d1 # concat new sign,new exp
15721: mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
15722: fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
15723: mov.l (%sp)+,%d2 # restore d2
15724: bra.b fsqrt_sd_unfl_dis
15725:
15726: #
15727: # operand WILL overflow.
15728: #
15729: fsqrt_sd_ovfl:
15730: fmov.l &0x0,%fpsr # clear FPSR
15731: fmov.l L_SCR3(%a6),%fpcr # set FPCR
15732:
15733: fsqrt.x FP_SCR0(%a6),%fp0 # perform square root
15734:
15735: fmov.l &0x0,%fpcr # clear FPCR
15736: fmov.l %fpsr,%d1 # save FPSR
15737:
15738: or.l %d1,USER_FPSR(%a6) # save INEX2,N
15739:
15740: fsqrt_sd_ovfl_tst:
15741: or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
15742:
15743: mov.b FPCR_ENABLE(%a6),%d1
15744: andi.b &0x13,%d1 # is OVFL or INEX enabled?
15745: bne.b fsqrt_sd_ovfl_ena # yes
15746:
15747: #
15748: # OVFL is not enabled; therefore, we must create the default result by
15749: # calling ovf_res().
15750: #
15751: fsqrt_sd_ovfl_dis:
15752: btst &neg_bit,FPSR_CC(%a6) # is result negative?
15753: sne %d1 # set sign param accordingly
15754: mov.l L_SCR3(%a6),%d0 # pass: prec,mode
15755: bsr.l ovf_res # calculate default result
15756: or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
15757: fmovm.x (%a0),&0x80 # return default result in fp0
15758: rts
15759:
15760: #
15761: # OVFL is enabled.
15762: # the INEX2 bit has already been updated by the round to the correct precision.
15763: # now, round to extended(and don't alter the FPSR).
15764: #
15765: fsqrt_sd_ovfl_ena:
15766: mov.l %d2,-(%sp) # save d2
15767: mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
15768: mov.l %d1,%d2 # make a copy
15769: andi.l &0x7fff,%d1 # strip sign
15770: andi.w &0x8000,%d2 # keep old sign
15771: sub.l %d0,%d1 # add scale factor
15772: subi.l &0x6000,%d1 # subtract bias
15773: andi.w &0x7fff,%d1
15774: or.w %d2,%d1 # concat sign,exp
15775: mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
15776: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
15777: mov.l (%sp)+,%d2 # restore d2
15778: bra.b fsqrt_sd_ovfl_dis
15779:
15780: #
15781: # the move in MAY underflow. so...
15782: #
15783: fsqrt_sd_may_ovfl:
15784: btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
15785: bne.w fsqrt_sd_ovfl # yes, so overflow
15786:
15787: fmov.l &0x0,%fpsr # clear FPSR
15788: fmov.l L_SCR3(%a6),%fpcr # set FPCR
15789:
15790: fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
15791:
15792: fmov.l %fpsr,%d1 # save status
15793: fmov.l &0x0,%fpcr # clear FPCR
15794:
15795: or.l %d1,USER_FPSR(%a6) # save INEX2,N
15796:
15797: fmov.x %fp0,%fp1 # make a copy of result
15798: fcmp.b %fp1,&0x1 # is |result| >= 1.b?
15799: fbge.w fsqrt_sd_ovfl_tst # yes; overflow has occurred
15800:
15801: # no, it didn't overflow; we have correct result
15802: bra.w fsqrt_sd_normal_exit
15803:
15804: ##########################################################################
15805:
15806: #
15807: # input is not normalized; what is it?
15808: #
15809: fsqrt_not_norm:
15810: cmpi.b %d1,&DENORM # weed out DENORM
15811: beq.w fsqrt_denorm
15812: cmpi.b %d1,&ZERO # weed out ZERO
15813: beq.b fsqrt_zero
15814: cmpi.b %d1,&INF # weed out INF
15815: beq.b fsqrt_inf
15816: cmpi.b %d1,&SNAN # weed out SNAN
15817: beq.l res_snan_1op
15818: bra.l res_qnan_1op
15819:
15820: #
15821: # fsqrt(+0) = +0
15822: # fsqrt(-0) = -0
15823: # fsqrt(+INF) = +INF
15824: # fsqrt(-INF) = OPERR
15825: #
15826: fsqrt_zero:
15827: tst.b SRC_EX(%a0) # is ZERO positive or negative?
15828: bmi.b fsqrt_zero_m # negative
15829: fsqrt_zero_p:
15830: fmov.s &0x00000000,%fp0 # return +ZERO
15831: mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
15832: rts
15833: fsqrt_zero_m:
15834: fmov.s &0x80000000,%fp0 # return -ZERO
15835: mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
15836: rts
15837:
15838: fsqrt_inf:
15839: tst.b SRC_EX(%a0) # is INF positive or negative?
15840: bmi.l res_operr # negative
15841: fsqrt_inf_p:
15842: fmovm.x SRC(%a0),&0x80 # return +INF in fp0
15843: mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
15844: rts
15845:
15846: ##########################################################################
15847:
15848: #########################################################################
15849: # XDEF **************************************************************** #
15850: # addsub_scaler2(): scale inputs to fadd/fsub such that no #
15851: # OVFL/UNFL exceptions will result #
15852: # #
15853: # XREF **************************************************************** #
15854: # norm() - normalize mantissa after adjusting exponent #
15855: # #
15856: # INPUT *************************************************************** #
15857: # FP_SRC(a6) = fp op1(src) #
15858: # FP_DST(a6) = fp op2(dst) #
15859: # #
15860: # OUTPUT ************************************************************** #
15861: # FP_SRC(a6) = fp op1 scaled(src) #
15862: # FP_DST(a6) = fp op2 scaled(dst) #
15863: # d0 = scale amount #
15864: # #
15865: # ALGORITHM *********************************************************** #
15866: # If the DST exponent is > the SRC exponent, set the DST exponent #
15867: # equal to 0x3fff and scale the SRC exponent by the value that the #
15868: # DST exponent was scaled by. If the SRC exponent is greater or equal, #
15869: # do the opposite. Return this scale factor in d0. #
15870: # If the two exponents differ by > the number of mantissa bits #
15871: # plus two, then set the smallest exponent to a very small value as a #
15872: # quick shortcut. #
15873: # #
15874: #########################################################################
15875:
15876: global addsub_scaler2
15877: addsub_scaler2:
15878: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
15879: mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
15880: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
15881: mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
15882: mov.w SRC_EX(%a0),%d0
15883: mov.w DST_EX(%a1),%d1
15884: mov.w %d0,FP_SCR0_EX(%a6)
15885: mov.w %d1,FP_SCR1_EX(%a6)
15886:
15887: andi.w &0x7fff,%d0
15888: andi.w &0x7fff,%d1
15889: mov.w %d0,L_SCR1(%a6) # store src exponent
15890: mov.w %d1,2+L_SCR1(%a6) # store dst exponent
15891:
15892: cmp.w %d0, %d1 # is src exp >= dst exp?
15893: bge.l src_exp_ge2
15894:
15895: # dst exp is > src exp; scale dst to exp = 0x3fff
15896: dst_exp_gt2:
15897: bsr.l scale_to_zero_dst
15898: mov.l %d0,-(%sp) # save scale factor
15899:
15900: cmpi.b STAG(%a6),&DENORM # is dst denormalized?
15901: bne.b cmpexp12
15902:
15903: lea FP_SCR0(%a6),%a0
15904: bsr.l norm # normalize the denorm; result is new exp
15905: neg.w %d0 # new exp = -(shft val)
15906: mov.w %d0,L_SCR1(%a6) # inset new exp
15907:
15908: cmpexp12:
15909: mov.w 2+L_SCR1(%a6),%d0
15910: subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
15911:
15912: cmp.w %d0,L_SCR1(%a6) # is difference >= len(mantissa)+2?
15913: bge.b quick_scale12
15914:
15915: mov.w L_SCR1(%a6),%d0
15916: add.w 0x2(%sp),%d0 # scale src exponent by scale factor
15917: mov.w FP_SCR0_EX(%a6),%d1
15918: and.w &0x8000,%d1
15919: or.w %d1,%d0 # concat {sgn,new exp}
15920: mov.w %d0,FP_SCR0_EX(%a6) # insert new dst exponent
15921:
15922: mov.l (%sp)+,%d0 # return SCALE factor
15923: rts
15924:
15925: quick_scale12:
15926: andi.w &0x8000,FP_SCR0_EX(%a6) # zero src exponent
15927: bset &0x0,1+FP_SCR0_EX(%a6) # set exp = 1
15928:
15929: mov.l (%sp)+,%d0 # return SCALE factor
15930: rts
15931:
15932: # src exp is >= dst exp; scale src to exp = 0x3fff
15933: src_exp_ge2:
15934: bsr.l scale_to_zero_src
15935: mov.l %d0,-(%sp) # save scale factor
15936:
15937: cmpi.b DTAG(%a6),&DENORM # is dst denormalized?
15938: bne.b cmpexp22
15939: lea FP_SCR1(%a6),%a0
15940: bsr.l norm # normalize the denorm; result is new exp
15941: neg.w %d0 # new exp = -(shft val)
15942: mov.w %d0,2+L_SCR1(%a6) # inset new exp
15943:
15944: cmpexp22:
15945: mov.w L_SCR1(%a6),%d0
15946: subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
15947:
15948: cmp.w %d0,2+L_SCR1(%a6) # is difference >= len(mantissa)+2?
15949: bge.b quick_scale22
15950:
15951: mov.w 2+L_SCR1(%a6),%d0
15952: add.w 0x2(%sp),%d0 # scale dst exponent by scale factor
15953: mov.w FP_SCR1_EX(%a6),%d1
15954: andi.w &0x8000,%d1
15955: or.w %d1,%d0 # concat {sgn,new exp}
15956: mov.w %d0,FP_SCR1_EX(%a6) # insert new dst exponent
15957:
15958: mov.l (%sp)+,%d0 # return SCALE factor
15959: rts
15960:
15961: quick_scale22:
15962: andi.w &0x8000,FP_SCR1_EX(%a6) # zero dst exponent
15963: bset &0x0,1+FP_SCR1_EX(%a6) # set exp = 1
15964:
15965: mov.l (%sp)+,%d0 # return SCALE factor
15966: rts
15967:
15968: ##########################################################################
15969:
15970: #########################################################################
15971: # XDEF **************************************************************** #
15972: # scale_to_zero_src(): scale the exponent of extended precision #
15973: # value at FP_SCR0(a6). #
15974: # #
15975: # XREF **************************************************************** #
15976: # norm() - normalize the mantissa if the operand was a DENORM #
15977: # #
15978: # INPUT *************************************************************** #
15979: # FP_SCR0(a6) = extended precision operand to be scaled #
15980: # #
15981: # OUTPUT ************************************************************** #
15982: # FP_SCR0(a6) = scaled extended precision operand #
15983: # d0 = scale value #
15984: # #
15985: # ALGORITHM *********************************************************** #
15986: # Set the exponent of the input operand to 0x3fff. Save the value #
15987: # of the difference between the original and new exponent. Then, #
15988: # normalize the operand if it was a DENORM. Add this normalization #
15989: # value to the previous value. Return the result. #
15990: # #
15991: #########################################################################
15992:
15993: global scale_to_zero_src
15994: scale_to_zero_src:
15995: mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
15996: mov.w %d1,%d0 # make a copy
15997:
15998: andi.l &0x7fff,%d1 # extract operand's exponent
15999:
16000: andi.w &0x8000,%d0 # extract operand's sgn
16001: or.w &0x3fff,%d0 # insert new operand's exponent(=0)
16002:
16003: mov.w %d0,FP_SCR0_EX(%a6) # insert biased exponent
16004:
16005: cmpi.b STAG(%a6),&DENORM # is operand normalized?
16006: beq.b stzs_denorm # normalize the DENORM
16007:
16008: stzs_norm:
16009: mov.l &0x3fff,%d0
16010: sub.l %d1,%d0 # scale = BIAS + (-exp)
16011:
16012: rts
16013:
16014: stzs_denorm:
16015: lea FP_SCR0(%a6),%a0 # pass ptr to src op
16016: bsr.l norm # normalize denorm
16017: neg.l %d0 # new exponent = -(shft val)
16018: mov.l %d0,%d1 # prepare for op_norm call
16019: bra.b stzs_norm # finish scaling
16020:
16021: ###
16022:
16023: #########################################################################
16024: # XDEF **************************************************************** #
16025: # scale_sqrt(): scale the input operand exponent so a subsequent #
16026: # fsqrt operation won't take an exception. #
16027: # #
16028: # XREF **************************************************************** #
16029: # norm() - normalize the mantissa if the operand was a DENORM #
16030: # #
16031: # INPUT *************************************************************** #
16032: # FP_SCR0(a6) = extended precision operand to be scaled #
16033: # #
16034: # OUTPUT ************************************************************** #
16035: # FP_SCR0(a6) = scaled extended precision operand #
16036: # d0 = scale value #
16037: # #
16038: # ALGORITHM *********************************************************** #
16039: # If the input operand is a DENORM, normalize it. #
16040: # If the exponent of the input operand is even, set the exponent #
16041: # to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the #
16042: # exponent of the input operand is off, set the exponent to ox3fff and #
16043: # return a scale factor of "(exp-0x3fff)/2". #
16044: # #
16045: #########################################################################
16046:
16047: global scale_sqrt
16048: scale_sqrt:
16049: cmpi.b STAG(%a6),&DENORM # is operand normalized?
16050: beq.b ss_denorm # normalize the DENORM
16051:
16052: mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
16053: andi.l &0x7fff,%d1 # extract operand's exponent
16054:
16055: andi.w &0x8000,FP_SCR0_EX(%a6) # extract operand's sgn
16056:
16057: btst &0x0,%d1 # is exp even or odd?
16058: beq.b ss_norm_even
16059:
16060: ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
16061:
16062: mov.l &0x3fff,%d0
16063: sub.l %d1,%d0 # scale = BIAS + (-exp)
16064: asr.l &0x1,%d0 # divide scale factor by 2
16065: rts
16066:
16067: ss_norm_even:
16068: ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
16069:
16070: mov.l &0x3ffe,%d0
16071: sub.l %d1,%d0 # scale = BIAS + (-exp)
16072: asr.l &0x1,%d0 # divide scale factor by 2
16073: rts
16074:
16075: ss_denorm:
16076: lea FP_SCR0(%a6),%a0 # pass ptr to src op
16077: bsr.l norm # normalize denorm
16078:
16079: btst &0x0,%d0 # is exp even or odd?
16080: beq.b ss_denorm_even
16081:
16082: ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
16083:
16084: add.l &0x3fff,%d0
16085: asr.l &0x1,%d0 # divide scale factor by 2
16086: rts
16087:
16088: ss_denorm_even:
16089: ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
16090:
16091: add.l &0x3ffe,%d0
16092: asr.l &0x1,%d0 # divide scale factor by 2
16093: rts
16094:
16095: ###
16096:
16097: #########################################################################
16098: # XDEF **************************************************************** #
16099: # scale_to_zero_dst(): scale the exponent of extended precision #
16100: # value at FP_SCR1(a6). #
16101: # #
16102: # XREF **************************************************************** #
16103: # norm() - normalize the mantissa if the operand was a DENORM #
16104: # #
16105: # INPUT *************************************************************** #
16106: # FP_SCR1(a6) = extended precision operand to be scaled #
16107: # #
16108: # OUTPUT ************************************************************** #
16109: # FP_SCR1(a6) = scaled extended precision operand #
16110: # d0 = scale value #
16111: # #
16112: # ALGORITHM *********************************************************** #
16113: # Set the exponent of the input operand to 0x3fff. Save the value #
16114: # of the difference between the original and new exponent. Then, #
16115: # normalize the operand if it was a DENORM. Add this normalization #
16116: # value to the previous value. Return the result. #
16117: # #
16118: #########################################################################
16119:
16120: global scale_to_zero_dst
16121: scale_to_zero_dst:
16122: mov.w FP_SCR1_EX(%a6),%d1 # extract operand's {sgn,exp}
16123: mov.w %d1,%d0 # make a copy
16124:
16125: andi.l &0x7fff,%d1 # extract operand's exponent
16126:
16127: andi.w &0x8000,%d0 # extract operand's sgn
16128: or.w &0x3fff,%d0 # insert new operand's exponent(=0)
16129:
16130: mov.w %d0,FP_SCR1_EX(%a6) # insert biased exponent
16131:
16132: cmpi.b DTAG(%a6),&DENORM # is operand normalized?
16133: beq.b stzd_denorm # normalize the DENORM
16134:
16135: stzd_norm:
16136: mov.l &0x3fff,%d0
16137: sub.l %d1,%d0 # scale = BIAS + (-exp)
16138: rts
16139:
16140: stzd_denorm:
16141: lea FP_SCR1(%a6),%a0 # pass ptr to dst op
16142: bsr.l norm # normalize denorm
16143: neg.l %d0 # new exponent = -(shft val)
16144: mov.l %d0,%d1 # prepare for op_norm call
16145: bra.b stzd_norm # finish scaling
16146:
16147: ##########################################################################
16148:
16149: #########################################################################
16150: # XDEF **************************************************************** #
16151: # res_qnan(): return default result w/ QNAN operand for dyadic #
16152: # res_snan(): return default result w/ SNAN operand for dyadic #
16153: # res_qnan_1op(): return dflt result w/ QNAN operand for monadic #
16154: # res_snan_1op(): return dflt result w/ SNAN operand for monadic #
16155: # #
16156: # XREF **************************************************************** #
16157: # None #
16158: # #
16159: # INPUT *************************************************************** #
16160: # FP_SRC(a6) = pointer to extended precision src operand #
16161: # FP_DST(a6) = pointer to extended precision dst operand #
16162: # #
16163: # OUTPUT ************************************************************** #
16164: # fp0 = default result #
16165: # #
16166: # ALGORITHM *********************************************************** #
16167: # If either operand (but not both operands) of an operation is a #
16168: # nonsignalling NAN, then that NAN is returned as the result. If both #
16169: # operands are nonsignalling NANs, then the destination operand #
16170: # nonsignalling NAN is returned as the result. #
16171: # If either operand to an operation is a signalling NAN (SNAN), #
16172: # then, the SNAN bit is set in the FPSR EXC byte. If the SNAN trap #
16173: # enable bit is set in the FPCR, then the trap is taken and the #
16174: # destination is not modified. If the SNAN trap enable bit is not set, #
16175: # then the SNAN is converted to a nonsignalling NAN (by setting the #
16176: # SNAN bit in the operand to one), and the operation continues as #
16177: # described in the preceding paragraph, for nonsignalling NANs. #
16178: # Make sure the appropriate FPSR bits are set before exiting. #
16179: # #
16180: #########################################################################
16181:
16182: global res_qnan
16183: global res_snan
16184: res_qnan:
16185: res_snan:
16186: cmp.b DTAG(%a6), &SNAN # is the dst an SNAN?
16187: beq.b dst_snan2
16188: cmp.b DTAG(%a6), &QNAN # is the dst a QNAN?
16189: beq.b dst_qnan2
16190: src_nan:
16191: cmp.b STAG(%a6), &QNAN
16192: beq.b src_qnan2
16193: global res_snan_1op
16194: res_snan_1op:
16195: src_snan2:
16196: bset &0x6, FP_SRC_HI(%a6) # set SNAN bit
16197: or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
16198: lea FP_SRC(%a6), %a0
16199: bra.b nan_comp
16200: global res_qnan_1op
16201: res_qnan_1op:
16202: src_qnan2:
16203: or.l &nan_mask, USER_FPSR(%a6)
16204: lea FP_SRC(%a6), %a0
16205: bra.b nan_comp
16206: dst_snan2:
16207: or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
16208: bset &0x6, FP_DST_HI(%a6) # set SNAN bit
16209: lea FP_DST(%a6), %a0
16210: bra.b nan_comp
16211: dst_qnan2:
16212: lea FP_DST(%a6), %a0
16213: cmp.b STAG(%a6), &SNAN
16214: bne nan_done
16215: or.l &aiop_mask+snan_mask, USER_FPSR(%a6)
16216: nan_done:
16217: or.l &nan_mask, USER_FPSR(%a6)
16218: nan_comp:
16219: btst &0x7, FTEMP_EX(%a0) # is NAN neg?
16220: beq.b nan_not_neg
16221: or.l &neg_mask, USER_FPSR(%a6)
16222: nan_not_neg:
16223: fmovm.x (%a0), &0x80
16224: rts
16225:
16226: #########################################################################
16227: # XDEF **************************************************************** #
16228: # res_operr(): return default result during operand error #
16229: # #
16230: # XREF **************************************************************** #
16231: # None #
16232: # #
16233: # INPUT *************************************************************** #
16234: # None #
16235: # #
16236: # OUTPUT ************************************************************** #
16237: # fp0 = default operand error result #
16238: # #
16239: # ALGORITHM *********************************************************** #
16240: # An nonsignalling NAN is returned as the default result when #
16241: # an operand error occurs for the following cases: #
16242: # #
16243: # Multiply: (Infinity x Zero) #
16244: # Divide : (Zero / Zero) || (Infinity / Infinity) #
16245: # #
16246: #########################################################################
16247:
16248: global res_operr
16249: res_operr:
16250: or.l &nan_mask+operr_mask+aiop_mask, USER_FPSR(%a6)
16251: fmovm.x nan_return(%pc), &0x80
16252: rts
16253:
16254: nan_return:
16255: long 0x7fff0000, 0xffffffff, 0xffffffff
16256:
16257: #########################################################################
16258: # fdbcc(): routine to emulate the fdbcc instruction #
16259: # #
16260: # XDEF **************************************************************** #
16261: # _fdbcc() #
16262: # #
16263: # XREF **************************************************************** #
16264: # fetch_dreg() - fetch Dn value #
16265: # store_dreg_l() - store updated Dn value #
16266: # #
16267: # INPUT *************************************************************** #
16268: # d0 = displacement #
16269: # #
16270: # OUTPUT ************************************************************** #
16271: # none #
16272: # #
16273: # ALGORITHM *********************************************************** #
16274: # This routine checks which conditional predicate is specified by #
16275: # the stacked fdbcc instruction opcode and then branches to a routine #
16276: # for that predicate. The corresponding fbcc instruction is then used #
16277: # to see whether the condition (specified by the stacked FPSR) is true #
16278: # or false. #
16279: # If a BSUN exception should be indicated, the BSUN and ABSUN #
16280: # bits are set in the stacked FPSR. If the BSUN exception is enabled, #
16281: # the fbsun_flg is set in the SPCOND_FLG location on the stack. If an #
16282: # enabled BSUN should not be flagged and the predicate is true, then #
16283: # Dn is fetched and decremented by one. If Dn is not equal to -1, add #
16284: # the displacement value to the stacked PC so that when an "rte" is #
16285: # finally executed, the branch occurs. #
16286: # #
16287: #########################################################################
16288: global _fdbcc
16289: _fdbcc:
16290: mov.l %d0,L_SCR1(%a6) # save displacement
16291:
16292: mov.w EXC_CMDREG(%a6),%d0 # fetch predicate
16293:
16294: clr.l %d1 # clear scratch reg
16295: mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
16296: ror.l &0x8,%d1 # rotate to top byte
16297: fmov.l %d1,%fpsr # insert into FPSR
16298:
16299: mov.w (tbl_fdbcc.b,%pc,%d0.w*2),%d1 # load table
16300: jmp (tbl_fdbcc.b,%pc,%d1.w) # jump to fdbcc routine
16301:
16302: tbl_fdbcc:
16303: short fdbcc_f - tbl_fdbcc # 00
16304: short fdbcc_eq - tbl_fdbcc # 01
16305: short fdbcc_ogt - tbl_fdbcc # 02
16306: short fdbcc_oge - tbl_fdbcc # 03
16307: short fdbcc_olt - tbl_fdbcc # 04
16308: short fdbcc_ole - tbl_fdbcc # 05
16309: short fdbcc_ogl - tbl_fdbcc # 06
16310: short fdbcc_or - tbl_fdbcc # 07
16311: short fdbcc_un - tbl_fdbcc # 08
16312: short fdbcc_ueq - tbl_fdbcc # 09
16313: short fdbcc_ugt - tbl_fdbcc # 10
16314: short fdbcc_uge - tbl_fdbcc # 11
16315: short fdbcc_ult - tbl_fdbcc # 12
16316: short fdbcc_ule - tbl_fdbcc # 13
16317: short fdbcc_neq - tbl_fdbcc # 14
16318: short fdbcc_t - tbl_fdbcc # 15
16319: short fdbcc_sf - tbl_fdbcc # 16
16320: short fdbcc_seq - tbl_fdbcc # 17
16321: short fdbcc_gt - tbl_fdbcc # 18
16322: short fdbcc_ge - tbl_fdbcc # 19
16323: short fdbcc_lt - tbl_fdbcc # 20
16324: short fdbcc_le - tbl_fdbcc # 21
16325: short fdbcc_gl - tbl_fdbcc # 22
16326: short fdbcc_gle - tbl_fdbcc # 23
16327: short fdbcc_ngle - tbl_fdbcc # 24
16328: short fdbcc_ngl - tbl_fdbcc # 25
16329: short fdbcc_nle - tbl_fdbcc # 26
16330: short fdbcc_nlt - tbl_fdbcc # 27
16331: short fdbcc_nge - tbl_fdbcc # 28
16332: short fdbcc_ngt - tbl_fdbcc # 29
16333: short fdbcc_sneq - tbl_fdbcc # 30
16334: short fdbcc_st - tbl_fdbcc # 31
16335:
16336: #########################################################################
16337: # #
16338: # IEEE Nonaware tests #
16339: # #
16340: # For the IEEE nonaware tests, only the false branch changes the #
16341: # counter. However, the true branch may set bsun so we check to see #
16342: # if the NAN bit is set, in which case BSUN and AIOP will be set. #
16343: # #
16344: # The cases EQ and NE are shared by the Aware and Nonaware groups #
16345: # and are incapable of setting the BSUN exception bit. #
16346: # #
16347: # Typically, only one of the two possible branch directions could #
16348: # have the NAN bit set. #
16349: # (This is assuming the mutual exclusiveness of FPSR cc bit groupings #
16350: # is preserved.) #
16351: # #
16352: #########################################################################
16353:
16354: #
16355: # equal:
16356: #
16357: # Z
16358: #
16359: fdbcc_eq:
16360: fbeq.w fdbcc_eq_yes # equal?
16361: fdbcc_eq_no:
16362: bra.w fdbcc_false # no; go handle counter
16363: fdbcc_eq_yes:
16364: rts
16365:
16366: #
16367: # not equal:
16368: # _
16369: # Z
16370: #
16371: fdbcc_neq:
16372: fbneq.w fdbcc_neq_yes # not equal?
16373: fdbcc_neq_no:
16374: bra.w fdbcc_false # no; go handle counter
16375: fdbcc_neq_yes:
16376: rts
16377:
16378: #
16379: # greater than:
16380: # _______
16381: # NANvZvN
16382: #
16383: fdbcc_gt:
16384: fbgt.w fdbcc_gt_yes # greater than?
16385: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16386: beq.w fdbcc_false # no;go handle counter
16387: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16388: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16389: bne.w fdbcc_bsun # yes; we have an exception
16390: bra.w fdbcc_false # no; go handle counter
16391: fdbcc_gt_yes:
16392: rts # do nothing
16393:
16394: #
16395: # not greater than:
16396: #
16397: # NANvZvN
16398: #
16399: fdbcc_ngt:
16400: fbngt.w fdbcc_ngt_yes # not greater than?
16401: fdbcc_ngt_no:
16402: bra.w fdbcc_false # no; go handle counter
16403: fdbcc_ngt_yes:
16404: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16405: beq.b fdbcc_ngt_done # no;go finish
16406: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16407: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16408: bne.w fdbcc_bsun # yes; we have an exception
16409: fdbcc_ngt_done:
16410: rts # no; do nothing
16411:
16412: #
16413: # greater than or equal:
16414: # _____
16415: # Zv(NANvN)
16416: #
16417: fdbcc_ge:
16418: fbge.w fdbcc_ge_yes # greater than or equal?
16419: fdbcc_ge_no:
16420: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16421: beq.w fdbcc_false # no;go handle counter
16422: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16423: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16424: bne.w fdbcc_bsun # yes; we have an exception
16425: bra.w fdbcc_false # no; go handle counter
16426: fdbcc_ge_yes:
16427: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16428: beq.b fdbcc_ge_yes_done # no;go do nothing
16429: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16430: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16431: bne.w fdbcc_bsun # yes; we have an exception
16432: fdbcc_ge_yes_done:
16433: rts # do nothing
16434:
16435: #
16436: # not (greater than or equal):
16437: # _
16438: # NANv(N^Z)
16439: #
16440: fdbcc_nge:
16441: fbnge.w fdbcc_nge_yes # not (greater than or equal)?
16442: fdbcc_nge_no:
16443: bra.w fdbcc_false # no; go handle counter
16444: fdbcc_nge_yes:
16445: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16446: beq.b fdbcc_nge_done # no;go finish
16447: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16448: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16449: bne.w fdbcc_bsun # yes; we have an exception
16450: fdbcc_nge_done:
16451: rts # no; do nothing
16452:
16453: #
16454: # less than:
16455: # _____
16456: # N^(NANvZ)
16457: #
16458: fdbcc_lt:
16459: fblt.w fdbcc_lt_yes # less than?
16460: fdbcc_lt_no:
16461: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16462: beq.w fdbcc_false # no; go handle counter
16463: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16464: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16465: bne.w fdbcc_bsun # yes; we have an exception
16466: bra.w fdbcc_false # no; go handle counter
16467: fdbcc_lt_yes:
16468: rts # do nothing
16469:
16470: #
16471: # not less than:
16472: # _
16473: # NANv(ZvN)
16474: #
16475: fdbcc_nlt:
16476: fbnlt.w fdbcc_nlt_yes # not less than?
16477: fdbcc_nlt_no:
16478: bra.w fdbcc_false # no; go handle counter
16479: fdbcc_nlt_yes:
16480: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16481: beq.b fdbcc_nlt_done # no;go finish
16482: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16483: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16484: bne.w fdbcc_bsun # yes; we have an exception
16485: fdbcc_nlt_done:
16486: rts # no; do nothing
16487:
16488: #
16489: # less than or equal:
16490: # ___
16491: # Zv(N^NAN)
16492: #
16493: fdbcc_le:
16494: fble.w fdbcc_le_yes # less than or equal?
16495: fdbcc_le_no:
16496: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16497: beq.w fdbcc_false # no; go handle counter
16498: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16499: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16500: bne.w fdbcc_bsun # yes; we have an exception
16501: bra.w fdbcc_false # no; go handle counter
16502: fdbcc_le_yes:
16503: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16504: beq.b fdbcc_le_yes_done # no; go do nothing
16505: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16506: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16507: bne.w fdbcc_bsun # yes; we have an exception
16508: fdbcc_le_yes_done:
16509: rts # do nothing
16510:
16511: #
16512: # not (less than or equal):
16513: # ___
16514: # NANv(NvZ)
16515: #
16516: fdbcc_nle:
16517: fbnle.w fdbcc_nle_yes # not (less than or equal)?
16518: fdbcc_nle_no:
16519: bra.w fdbcc_false # no; go handle counter
16520: fdbcc_nle_yes:
16521: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16522: beq.w fdbcc_nle_done # no; go finish
16523: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16524: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16525: bne.w fdbcc_bsun # yes; we have an exception
16526: fdbcc_nle_done:
16527: rts # no; do nothing
16528:
16529: #
16530: # greater or less than:
16531: # _____
16532: # NANvZ
16533: #
16534: fdbcc_gl:
16535: fbgl.w fdbcc_gl_yes # greater or less than?
16536: fdbcc_gl_no:
16537: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16538: beq.w fdbcc_false # no; handle counter
16539: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16540: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16541: bne.w fdbcc_bsun # yes; we have an exception
16542: bra.w fdbcc_false # no; go handle counter
16543: fdbcc_gl_yes:
16544: rts # do nothing
16545:
16546: #
16547: # not (greater or less than):
16548: #
16549: # NANvZ
16550: #
16551: fdbcc_ngl:
16552: fbngl.w fdbcc_ngl_yes # not (greater or less than)?
16553: fdbcc_ngl_no:
16554: bra.w fdbcc_false # no; go handle counter
16555: fdbcc_ngl_yes:
16556: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
16557: beq.b fdbcc_ngl_done # no; go finish
16558: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16559: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16560: bne.w fdbcc_bsun # yes; we have an exception
16561: fdbcc_ngl_done:
16562: rts # no; do nothing
16563:
16564: #
16565: # greater, less, or equal:
16566: # ___
16567: # NAN
16568: #
16569: fdbcc_gle:
16570: fbgle.w fdbcc_gle_yes # greater, less, or equal?
16571: fdbcc_gle_no:
16572: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16573: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16574: bne.w fdbcc_bsun # yes; we have an exception
16575: bra.w fdbcc_false # no; go handle counter
16576: fdbcc_gle_yes:
16577: rts # do nothing
16578:
16579: #
16580: # not (greater, less, or equal):
16581: #
16582: # NAN
16583: #
16584: fdbcc_ngle:
16585: fbngle.w fdbcc_ngle_yes # not (greater, less, or equal)?
16586: fdbcc_ngle_no:
16587: bra.w fdbcc_false # no; go handle counter
16588: fdbcc_ngle_yes:
16589: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16590: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16591: bne.w fdbcc_bsun # yes; we have an exception
16592: rts # no; do nothing
16593:
16594: #########################################################################
16595: # #
16596: # Miscellaneous tests #
16597: # #
16598: # For the IEEE miscellaneous tests, all but fdbf and fdbt can set bsun. #
16599: # #
16600: #########################################################################
16601:
16602: #
16603: # false:
16604: #
16605: # False
16606: #
16607: fdbcc_f: # no bsun possible
16608: bra.w fdbcc_false # go handle counter
16609:
16610: #
16611: # true:
16612: #
16613: # True
16614: #
16615: fdbcc_t: # no bsun possible
16616: rts # do nothing
16617:
16618: #
16619: # signalling false:
16620: #
16621: # False
16622: #
16623: fdbcc_sf:
16624: btst &nan_bit, FPSR_CC(%a6) # is NAN set?
16625: beq.w fdbcc_false # no;go handle counter
16626: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16627: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16628: bne.w fdbcc_bsun # yes; we have an exception
16629: bra.w fdbcc_false # go handle counter
16630:
16631: #
16632: # signalling true:
16633: #
16634: # True
16635: #
16636: fdbcc_st:
16637: btst &nan_bit, FPSR_CC(%a6) # is NAN set?
16638: beq.b fdbcc_st_done # no;go finish
16639: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16640: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16641: bne.w fdbcc_bsun # yes; we have an exception
16642: fdbcc_st_done:
16643: rts
16644:
16645: #
16646: # signalling equal:
16647: #
16648: # Z
16649: #
16650: fdbcc_seq:
16651: fbseq.w fdbcc_seq_yes # signalling equal?
16652: fdbcc_seq_no:
16653: btst &nan_bit, FPSR_CC(%a6) # is NAN set?
16654: beq.w fdbcc_false # no;go handle counter
16655: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16656: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16657: bne.w fdbcc_bsun # yes; we have an exception
16658: bra.w fdbcc_false # go handle counter
16659: fdbcc_seq_yes:
16660: btst &nan_bit, FPSR_CC(%a6) # is NAN set?
16661: beq.b fdbcc_seq_yes_done # no;go do nothing
16662: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16663: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16664: bne.w fdbcc_bsun # yes; we have an exception
16665: fdbcc_seq_yes_done:
16666: rts # yes; do nothing
16667:
16668: #
16669: # signalling not equal:
16670: # _
16671: # Z
16672: #
16673: fdbcc_sneq:
16674: fbsneq.w fdbcc_sneq_yes # signalling not equal?
16675: fdbcc_sneq_no:
16676: btst &nan_bit, FPSR_CC(%a6) # is NAN set?
16677: beq.w fdbcc_false # no;go handle counter
16678: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16679: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16680: bne.w fdbcc_bsun # yes; we have an exception
16681: bra.w fdbcc_false # go handle counter
16682: fdbcc_sneq_yes:
16683: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
16684: beq.w fdbcc_sneq_done # no;go finish
16685: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
16686: btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
16687: bne.w fdbcc_bsun # yes; we have an exception
16688: fdbcc_sneq_done:
16689: rts
16690:
16691: #########################################################################
16692: # #
16693: # IEEE Aware tests #
16694: # #
16695: # For the IEEE aware tests, action is only taken if the result is false.#
16696: # Therefore, the opposite branch type is used to jump to the decrement #
16697: # routine. #
16698: # The BSUN exception will not be set for any of these tests. #
16699: # #
16700: #########################################################################
16701:
16702: #
16703: # ordered greater than:
16704: # _______
16705: # NANvZvN
16706: #
16707: fdbcc_ogt:
16708: fbogt.w fdbcc_ogt_yes # ordered greater than?
16709: fdbcc_ogt_no:
16710: bra.w fdbcc_false # no; go handle counter
16711: fdbcc_ogt_yes:
16712: rts # yes; do nothing
16713:
16714: #
16715: # unordered or less or equal:
16716: # _______
16717: # NANvZvN
16718: #
16719: fdbcc_ule:
16720: fbule.w fdbcc_ule_yes # unordered or less or equal?
16721: fdbcc_ule_no:
16722: bra.w fdbcc_false # no; go handle counter
16723: fdbcc_ule_yes:
16724: rts # yes; do nothing
16725:
16726: #
16727: # ordered greater than or equal:
16728: # _____
16729: # Zv(NANvN)
16730: #
16731: fdbcc_oge:
16732: fboge.w fdbcc_oge_yes # ordered greater than or equal?
16733: fdbcc_oge_no:
16734: bra.w fdbcc_false # no; go handle counter
16735: fdbcc_oge_yes:
16736: rts # yes; do nothing
16737:
16738: #
16739: # unordered or less than:
16740: # _
16741: # NANv(N^Z)
16742: #
16743: fdbcc_ult:
16744: fbult.w fdbcc_ult_yes # unordered or less than?
16745: fdbcc_ult_no:
16746: bra.w fdbcc_false # no; go handle counter
16747: fdbcc_ult_yes:
16748: rts # yes; do nothing
16749:
16750: #
16751: # ordered less than:
16752: # _____
16753: # N^(NANvZ)
16754: #
16755: fdbcc_olt:
16756: fbolt.w fdbcc_olt_yes # ordered less than?
16757: fdbcc_olt_no:
16758: bra.w fdbcc_false # no; go handle counter
16759: fdbcc_olt_yes:
16760: rts # yes; do nothing
16761:
16762: #
16763: # unordered or greater or equal:
16764: #
16765: # NANvZvN
16766: #
16767: fdbcc_uge:
16768: fbuge.w fdbcc_uge_yes # unordered or greater than?
16769: fdbcc_uge_no:
16770: bra.w fdbcc_false # no; go handle counter
16771: fdbcc_uge_yes:
16772: rts # yes; do nothing
16773:
16774: #
16775: # ordered less than or equal:
16776: # ___
16777: # Zv(N^NAN)
16778: #
16779: fdbcc_ole:
16780: fbole.w fdbcc_ole_yes # ordered greater or less than?
16781: fdbcc_ole_no:
16782: bra.w fdbcc_false # no; go handle counter
16783: fdbcc_ole_yes:
16784: rts # yes; do nothing
16785:
16786: #
16787: # unordered or greater than:
16788: # ___
16789: # NANv(NvZ)
16790: #
16791: fdbcc_ugt:
16792: fbugt.w fdbcc_ugt_yes # unordered or greater than?
16793: fdbcc_ugt_no:
16794: bra.w fdbcc_false # no; go handle counter
16795: fdbcc_ugt_yes:
16796: rts # yes; do nothing
16797:
16798: #
16799: # ordered greater or less than:
16800: # _____
16801: # NANvZ
16802: #
16803: fdbcc_ogl:
16804: fbogl.w fdbcc_ogl_yes # ordered greater or less than?
16805: fdbcc_ogl_no:
16806: bra.w fdbcc_false # no; go handle counter
16807: fdbcc_ogl_yes:
16808: rts # yes; do nothing
16809:
16810: #
16811: # unordered or equal:
16812: #
16813: # NANvZ
16814: #
16815: fdbcc_ueq:
16816: fbueq.w fdbcc_ueq_yes # unordered or equal?
16817: fdbcc_ueq_no:
16818: bra.w fdbcc_false # no; go handle counter
16819: fdbcc_ueq_yes:
16820: rts # yes; do nothing
16821:
16822: #
16823: # ordered:
16824: # ___
16825: # NAN
16826: #
16827: fdbcc_or:
16828: fbor.w fdbcc_or_yes # ordered?
16829: fdbcc_or_no:
16830: bra.w fdbcc_false # no; go handle counter
16831: fdbcc_or_yes:
16832: rts # yes; do nothing
16833:
16834: #
16835: # unordered:
16836: #
16837: # NAN
16838: #
16839: fdbcc_un:
16840: fbun.w fdbcc_un_yes # unordered?
16841: fdbcc_un_no:
16842: bra.w fdbcc_false # no; go handle counter
16843: fdbcc_un_yes:
16844: rts # yes; do nothing
16845:
16846: #######################################################################
16847:
16848: #
16849: # the bsun exception bit was not set.
16850: #
16851: # (1) subtract 1 from the count register
16852: # (2) if (cr == -1) then
16853: # pc = pc of next instruction
16854: # else
16855: # pc += sign_ext(16-bit displacement)
16856: #
16857: fdbcc_false:
16858: mov.b 1+EXC_OPWORD(%a6), %d1 # fetch lo opword
16859: andi.w &0x7, %d1 # extract count register
16860:
16861: bsr.l fetch_dreg # fetch count value
16862: # make sure that d0 isn't corrupted between calls...
16863:
16864: subq.w &0x1, %d0 # Dn - 1 -> Dn
16865:
16866: bsr.l store_dreg_l # store new count value
16867:
16868: cmpi.w %d0, &-0x1 # is (Dn == -1)?
16869: bne.b fdbcc_false_cont # no;
16870: rts
16871:
16872: fdbcc_false_cont:
16873: mov.l L_SCR1(%a6),%d0 # fetch displacement
16874: add.l USER_FPIAR(%a6),%d0 # add instruction PC
16875: addq.l &0x4,%d0 # add instruction length
16876: mov.l %d0,EXC_PC(%a6) # set new PC
16877: rts
16878:
16879: # the emulation routine set bsun and BSUN was enabled. have to
16880: # fix stack and jump to the bsun handler.
16881: # let the caller of this routine shift the stack frame up to
16882: # eliminate the effective address field.
16883: fdbcc_bsun:
16884: mov.b &fbsun_flg,SPCOND_FLG(%a6)
16885: rts
16886:
16887: #########################################################################
16888: # ftrapcc(): routine to emulate the ftrapcc instruction #
16889: # #
16890: # XDEF **************************************************************** #
16891: # _ftrapcc() #
16892: # #
16893: # XREF **************************************************************** #
16894: # none #
16895: # #
16896: # INPUT *************************************************************** #
16897: # none #
16898: # #
16899: # OUTPUT ************************************************************** #
16900: # none #
16901: # #
16902: # ALGORITHM *********************************************************** #
16903: # This routine checks which conditional predicate is specified by #
16904: # the stacked ftrapcc instruction opcode and then branches to a routine #
16905: # for that predicate. The corresponding fbcc instruction is then used #
16906: # to see whether the condition (specified by the stacked FPSR) is true #
16907: # or false. #
16908: # If a BSUN exception should be indicated, the BSUN and ABSUN #
16909: # bits are set in the stacked FPSR. If the BSUN exception is enabled, #
16910: # the fbsun_flg is set in the SPCOND_FLG location on the stack. If an #
16911: # enabled BSUN should not be flagged and the predicate is true, then #
16912: # the ftrapcc_flg is set in the SPCOND_FLG location. These special #
16913: # flags indicate to the calling routine to emulate the exceptional #
16914: # condition. #
16915: # #
16916: #########################################################################
16917:
16918: global _ftrapcc
16919: _ftrapcc:
16920: mov.w EXC_CMDREG(%a6),%d0 # fetch predicate
16921:
16922: clr.l %d1 # clear scratch reg
16923: mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
16924: ror.l &0x8,%d1 # rotate to top byte
16925: fmov.l %d1,%fpsr # insert into FPSR
16926:
16927: mov.w (tbl_ftrapcc.b,%pc,%d0.w*2), %d1 # load table
16928: jmp (tbl_ftrapcc.b,%pc,%d1.w) # jump to ftrapcc routine
16929:
16930: tbl_ftrapcc:
16931: short ftrapcc_f - tbl_ftrapcc # 00
16932: short ftrapcc_eq - tbl_ftrapcc # 01
16933: short ftrapcc_ogt - tbl_ftrapcc # 02
16934: short ftrapcc_oge - tbl_ftrapcc # 03
16935: short ftrapcc_olt - tbl_ftrapcc # 04
16936: short ftrapcc_ole - tbl_ftrapcc # 05
16937: short ftrapcc_ogl - tbl_ftrapcc # 06
16938: short ftrapcc_or - tbl_ftrapcc # 07
16939: short ftrapcc_un - tbl_ftrapcc # 08
16940: short ftrapcc_ueq - tbl_ftrapcc # 09
16941: short ftrapcc_ugt - tbl_ftrapcc # 10
16942: short ftrapcc_uge - tbl_ftrapcc # 11
16943: short ftrapcc_ult - tbl_ftrapcc # 12
16944: short ftrapcc_ule - tbl_ftrapcc # 13
16945: short ftrapcc_neq - tbl_ftrapcc # 14
16946: short ftrapcc_t - tbl_ftrapcc # 15
16947: short ftrapcc_sf - tbl_ftrapcc # 16
16948: short ftrapcc_seq - tbl_ftrapcc # 17
16949: short ftrapcc_gt - tbl_ftrapcc # 18
16950: short ftrapcc_ge - tbl_ftrapcc # 19
16951: short ftrapcc_lt - tbl_ftrapcc # 20
16952: short ftrapcc_le - tbl_ftrapcc # 21
16953: short ftrapcc_gl - tbl_ftrapcc # 22
16954: short ftrapcc_gle - tbl_ftrapcc # 23
16955: short ftrapcc_ngle - tbl_ftrapcc # 24
16956: short ftrapcc_ngl - tbl_ftrapcc # 25
16957: short ftrapcc_nle - tbl_ftrapcc # 26
16958: short ftrapcc_nlt - tbl_ftrapcc # 27
16959: short ftrapcc_nge - tbl_ftrapcc # 28
16960: short ftrapcc_ngt - tbl_ftrapcc # 29
16961: short ftrapcc_sneq - tbl_ftrapcc # 30
16962: short ftrapcc_st - tbl_ftrapcc # 31
16963:
16964: #########################################################################
16965: # #
16966: # IEEE Nonaware tests #
16967: # #
16968: # For the IEEE nonaware tests, we set the result based on the #
16969: # floating point condition codes. In addition, we check to see #
16970: # if the NAN bit is set, in which case BSUN and AIOP will be set. #
16971: # #
16972: # The cases EQ and NE are shared by the Aware and Nonaware groups #
16973: # and are incapable of setting the BSUN exception bit. #
16974: # #
16975: # Typically, only one of the two possible branch directions could #
16976: # have the NAN bit set. #
16977: # #
16978: #########################################################################
16979:
16980: #
16981: # equal:
16982: #
16983: # Z
16984: #
16985: ftrapcc_eq:
16986: fbeq.w ftrapcc_trap # equal?
16987: ftrapcc_eq_no:
16988: rts # do nothing
16989:
16990: #
16991: # not equal:
16992: # _
16993: # Z
16994: #
16995: ftrapcc_neq:
16996: fbneq.w ftrapcc_trap # not equal?
16997: ftrapcc_neq_no:
16998: rts # do nothing
16999:
17000: #
17001: # greater than:
17002: # _______
17003: # NANvZvN
17004: #
17005: ftrapcc_gt:
17006: fbgt.w ftrapcc_trap # greater than?
17007: ftrapcc_gt_no:
17008: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17009: beq.b ftrapcc_gt_done # no
17010: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17011: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17012: bne.w ftrapcc_bsun # yes
17013: ftrapcc_gt_done:
17014: rts # no; do nothing
17015:
17016: #
17017: # not greater than:
17018: #
17019: # NANvZvN
17020: #
17021: ftrapcc_ngt:
17022: fbngt.w ftrapcc_ngt_yes # not greater than?
17023: ftrapcc_ngt_no:
17024: rts # do nothing
17025: ftrapcc_ngt_yes:
17026: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17027: beq.w ftrapcc_trap # no; go take trap
17028: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17029: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17030: bne.w ftrapcc_bsun # yes
17031: bra.w ftrapcc_trap # no; go take trap
17032:
17033: #
17034: # greater than or equal:
17035: # _____
17036: # Zv(NANvN)
17037: #
17038: ftrapcc_ge:
17039: fbge.w ftrapcc_ge_yes # greater than or equal?
17040: ftrapcc_ge_no:
17041: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17042: beq.b ftrapcc_ge_done # no; go finish
17043: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17044: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17045: bne.w ftrapcc_bsun # yes
17046: ftrapcc_ge_done:
17047: rts # no; do nothing
17048: ftrapcc_ge_yes:
17049: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17050: beq.w ftrapcc_trap # no; go take trap
17051: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17052: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17053: bne.w ftrapcc_bsun # yes
17054: bra.w ftrapcc_trap # no; go take trap
17055:
17056: #
17057: # not (greater than or equal):
17058: # _
17059: # NANv(N^Z)
17060: #
17061: ftrapcc_nge:
17062: fbnge.w ftrapcc_nge_yes # not (greater than or equal)?
17063: ftrapcc_nge_no:
17064: rts # do nothing
17065: ftrapcc_nge_yes:
17066: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17067: beq.w ftrapcc_trap # no; go take trap
17068: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17069: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17070: bne.w ftrapcc_bsun # yes
17071: bra.w ftrapcc_trap # no; go take trap
17072:
17073: #
17074: # less than:
17075: # _____
17076: # N^(NANvZ)
17077: #
17078: ftrapcc_lt:
17079: fblt.w ftrapcc_trap # less than?
17080: ftrapcc_lt_no:
17081: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17082: beq.b ftrapcc_lt_done # no; go finish
17083: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17084: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17085: bne.w ftrapcc_bsun # yes
17086: ftrapcc_lt_done:
17087: rts # no; do nothing
17088:
17089: #
17090: # not less than:
17091: # _
17092: # NANv(ZvN)
17093: #
17094: ftrapcc_nlt:
17095: fbnlt.w ftrapcc_nlt_yes # not less than?
17096: ftrapcc_nlt_no:
17097: rts # do nothing
17098: ftrapcc_nlt_yes:
17099: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17100: beq.w ftrapcc_trap # no; go take trap
17101: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17102: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17103: bne.w ftrapcc_bsun # yes
17104: bra.w ftrapcc_trap # no; go take trap
17105:
17106: #
17107: # less than or equal:
17108: # ___
17109: # Zv(N^NAN)
17110: #
17111: ftrapcc_le:
17112: fble.w ftrapcc_le_yes # less than or equal?
17113: ftrapcc_le_no:
17114: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17115: beq.b ftrapcc_le_done # no; go finish
17116: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17117: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17118: bne.w ftrapcc_bsun # yes
17119: ftrapcc_le_done:
17120: rts # no; do nothing
17121: ftrapcc_le_yes:
17122: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17123: beq.w ftrapcc_trap # no; go take trap
17124: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17125: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17126: bne.w ftrapcc_bsun # yes
17127: bra.w ftrapcc_trap # no; go take trap
17128:
17129: #
17130: # not (less than or equal):
17131: # ___
17132: # NANv(NvZ)
17133: #
17134: ftrapcc_nle:
17135: fbnle.w ftrapcc_nle_yes # not (less than or equal)?
17136: ftrapcc_nle_no:
17137: rts # do nothing
17138: ftrapcc_nle_yes:
17139: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17140: beq.w ftrapcc_trap # no; go take trap
17141: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17142: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17143: bne.w ftrapcc_bsun # yes
17144: bra.w ftrapcc_trap # no; go take trap
17145:
17146: #
17147: # greater or less than:
17148: # _____
17149: # NANvZ
17150: #
17151: ftrapcc_gl:
17152: fbgl.w ftrapcc_trap # greater or less than?
17153: ftrapcc_gl_no:
17154: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17155: beq.b ftrapcc_gl_done # no; go finish
17156: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17157: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17158: bne.w ftrapcc_bsun # yes
17159: ftrapcc_gl_done:
17160: rts # no; do nothing
17161:
17162: #
17163: # not (greater or less than):
17164: #
17165: # NANvZ
17166: #
17167: ftrapcc_ngl:
17168: fbngl.w ftrapcc_ngl_yes # not (greater or less than)?
17169: ftrapcc_ngl_no:
17170: rts # do nothing
17171: ftrapcc_ngl_yes:
17172: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17173: beq.w ftrapcc_trap # no; go take trap
17174: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17175: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17176: bne.w ftrapcc_bsun # yes
17177: bra.w ftrapcc_trap # no; go take trap
17178:
17179: #
17180: # greater, less, or equal:
17181: # ___
17182: # NAN
17183: #
17184: ftrapcc_gle:
17185: fbgle.w ftrapcc_trap # greater, less, or equal?
17186: ftrapcc_gle_no:
17187: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17188: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17189: bne.w ftrapcc_bsun # yes
17190: rts # no; do nothing
17191:
17192: #
17193: # not (greater, less, or equal):
17194: #
17195: # NAN
17196: #
17197: ftrapcc_ngle:
17198: fbngle.w ftrapcc_ngle_yes # not (greater, less, or equal)?
17199: ftrapcc_ngle_no:
17200: rts # do nothing
17201: ftrapcc_ngle_yes:
17202: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17203: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17204: bne.w ftrapcc_bsun # yes
17205: bra.w ftrapcc_trap # no; go take trap
17206:
17207: #########################################################################
17208: # #
17209: # Miscellaneous tests #
17210: # #
17211: # For the IEEE aware tests, we only have to set the result based on the #
17212: # floating point condition codes. The BSUN exception will not be #
17213: # set for any of these tests. #
17214: # #
17215: #########################################################################
17216:
17217: #
17218: # false:
17219: #
17220: # False
17221: #
17222: ftrapcc_f:
17223: rts # do nothing
17224:
17225: #
17226: # true:
17227: #
17228: # True
17229: #
17230: ftrapcc_t:
17231: bra.w ftrapcc_trap # go take trap
17232:
17233: #
17234: # signalling false:
17235: #
17236: # False
17237: #
17238: ftrapcc_sf:
17239: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17240: beq.b ftrapcc_sf_done # no; go finish
17241: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17242: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17243: bne.w ftrapcc_bsun # yes
17244: ftrapcc_sf_done:
17245: rts # no; do nothing
17246:
17247: #
17248: # signalling true:
17249: #
17250: # True
17251: #
17252: ftrapcc_st:
17253: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17254: beq.w ftrapcc_trap # no; go take trap
17255: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17256: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17257: bne.w ftrapcc_bsun # yes
17258: bra.w ftrapcc_trap # no; go take trap
17259:
17260: #
17261: # signalling equal:
17262: #
17263: # Z
17264: #
17265: ftrapcc_seq:
17266: fbseq.w ftrapcc_seq_yes # signalling equal?
17267: ftrapcc_seq_no:
17268: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17269: beq.w ftrapcc_seq_done # no; go finish
17270: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17271: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17272: bne.w ftrapcc_bsun # yes
17273: ftrapcc_seq_done:
17274: rts # no; do nothing
17275: ftrapcc_seq_yes:
17276: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17277: beq.w ftrapcc_trap # no; go take trap
17278: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17279: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17280: bne.w ftrapcc_bsun # yes
17281: bra.w ftrapcc_trap # no; go take trap
17282:
17283: #
17284: # signalling not equal:
17285: # _
17286: # Z
17287: #
17288: ftrapcc_sneq:
17289: fbsneq.w ftrapcc_sneq_yes # signalling equal?
17290: ftrapcc_sneq_no:
17291: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17292: beq.w ftrapcc_sneq_no_done # no; go finish
17293: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17294: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17295: bne.w ftrapcc_bsun # yes
17296: ftrapcc_sneq_no_done:
17297: rts # do nothing
17298: ftrapcc_sneq_yes:
17299: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17300: beq.w ftrapcc_trap # no; go take trap
17301: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17302: btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
17303: bne.w ftrapcc_bsun # yes
17304: bra.w ftrapcc_trap # no; go take trap
17305:
17306: #########################################################################
17307: # #
17308: # IEEE Aware tests #
17309: # #
17310: # For the IEEE aware tests, we only have to set the result based on the #
17311: # floating point condition codes. The BSUN exception will not be #
17312: # set for any of these tests. #
17313: # #
17314: #########################################################################
17315:
17316: #
17317: # ordered greater than:
17318: # _______
17319: # NANvZvN
17320: #
17321: ftrapcc_ogt:
17322: fbogt.w ftrapcc_trap # ordered greater than?
17323: ftrapcc_ogt_no:
17324: rts # do nothing
17325:
17326: #
17327: # unordered or less or equal:
17328: # _______
17329: # NANvZvN
17330: #
17331: ftrapcc_ule:
17332: fbule.w ftrapcc_trap # unordered or less or equal?
17333: ftrapcc_ule_no:
17334: rts # do nothing
17335:
17336: #
17337: # ordered greater than or equal:
17338: # _____
17339: # Zv(NANvN)
17340: #
17341: ftrapcc_oge:
17342: fboge.w ftrapcc_trap # ordered greater than or equal?
17343: ftrapcc_oge_no:
17344: rts # do nothing
17345:
17346: #
17347: # unordered or less than:
17348: # _
17349: # NANv(N^Z)
17350: #
17351: ftrapcc_ult:
17352: fbult.w ftrapcc_trap # unordered or less than?
17353: ftrapcc_ult_no:
17354: rts # do nothing
17355:
17356: #
17357: # ordered less than:
17358: # _____
17359: # N^(NANvZ)
17360: #
17361: ftrapcc_olt:
17362: fbolt.w ftrapcc_trap # ordered less than?
17363: ftrapcc_olt_no:
17364: rts # do nothing
17365:
17366: #
17367: # unordered or greater or equal:
17368: #
17369: # NANvZvN
17370: #
17371: ftrapcc_uge:
17372: fbuge.w ftrapcc_trap # unordered or greater than?
17373: ftrapcc_uge_no:
17374: rts # do nothing
17375:
17376: #
17377: # ordered less than or equal:
17378: # ___
17379: # Zv(N^NAN)
17380: #
17381: ftrapcc_ole:
17382: fbole.w ftrapcc_trap # ordered greater or less than?
17383: ftrapcc_ole_no:
17384: rts # do nothing
17385:
17386: #
17387: # unordered or greater than:
17388: # ___
17389: # NANv(NvZ)
17390: #
17391: ftrapcc_ugt:
17392: fbugt.w ftrapcc_trap # unordered or greater than?
17393: ftrapcc_ugt_no:
17394: rts # do nothing
17395:
17396: #
17397: # ordered greater or less than:
17398: # _____
17399: # NANvZ
17400: #
17401: ftrapcc_ogl:
17402: fbogl.w ftrapcc_trap # ordered greater or less than?
17403: ftrapcc_ogl_no:
17404: rts # do nothing
17405:
17406: #
17407: # unordered or equal:
17408: #
17409: # NANvZ
17410: #
17411: ftrapcc_ueq:
17412: fbueq.w ftrapcc_trap # unordered or equal?
17413: ftrapcc_ueq_no:
17414: rts # do nothing
17415:
17416: #
17417: # ordered:
17418: # ___
17419: # NAN
17420: #
17421: ftrapcc_or:
17422: fbor.w ftrapcc_trap # ordered?
17423: ftrapcc_or_no:
17424: rts # do nothing
17425:
17426: #
17427: # unordered:
17428: #
17429: # NAN
17430: #
17431: ftrapcc_un:
17432: fbun.w ftrapcc_trap # unordered?
17433: ftrapcc_un_no:
17434: rts # do nothing
17435:
17436: #######################################################################
17437:
17438: # the bsun exception bit was not set.
17439: # we will need to jump to the ftrapcc vector. the stack frame
17440: # is the same size as that of the fp unimp instruction. the
17441: # only difference is that the <ea> field should hold the PC
17442: # of the ftrapcc instruction and the vector offset field
17443: # should denote the ftrapcc trap.
17444: ftrapcc_trap:
17445: mov.b &ftrapcc_flg,SPCOND_FLG(%a6)
17446: rts
17447:
17448: # the emulation routine set bsun and BSUN was enabled. have to
17449: # fix stack and jump to the bsun handler.
17450: # let the caller of this routine shift the stack frame up to
17451: # eliminate the effective address field.
17452: ftrapcc_bsun:
17453: mov.b &fbsun_flg,SPCOND_FLG(%a6)
17454: rts
17455:
17456: #########################################################################
17457: # fscc(): routine to emulate the fscc instruction #
17458: # #
17459: # XDEF **************************************************************** #
17460: # _fscc() #
17461: # #
17462: # XREF **************************************************************** #
17463: # store_dreg_b() - store result to data register file #
17464: # dec_areg() - decrement an areg for -(an) mode #
17465: # inc_areg() - increment an areg for (an)+ mode #
17466: # _dmem_write_byte() - store result to memory #
17467: # #
17468: # INPUT *************************************************************** #
17469: # none #
17470: # #
17471: # OUTPUT ************************************************************** #
17472: # none #
17473: # #
17474: # ALGORITHM *********************************************************** #
17475: # This routine checks which conditional predicate is specified by #
17476: # the stacked fscc instruction opcode and then branches to a routine #
17477: # for that predicate. The corresponding fbcc instruction is then used #
17478: # to see whether the condition (specified by the stacked FPSR) is true #
17479: # or false. #
17480: # If a BSUN exception should be indicated, the BSUN and ABSUN #
17481: # bits are set in the stacked FPSR. If the BSUN exception is enabled, #
17482: # the fbsun_flg is set in the SPCOND_FLG location on the stack. If an #
17483: # enabled BSUN should not be flagged and the predicate is true, then #
17484: # the result is stored to the data register file or memory #
17485: # #
17486: #########################################################################
17487:
17488: global _fscc
17489: _fscc:
17490: mov.w EXC_CMDREG(%a6),%d0 # fetch predicate
17491:
17492: clr.l %d1 # clear scratch reg
17493: mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
17494: ror.l &0x8,%d1 # rotate to top byte
17495: fmov.l %d1,%fpsr # insert into FPSR
17496:
17497: mov.w (tbl_fscc.b,%pc,%d0.w*2),%d1 # load table
17498: jmp (tbl_fscc.b,%pc,%d1.w) # jump to fscc routine
17499:
17500: tbl_fscc:
17501: short fscc_f - tbl_fscc # 00
17502: short fscc_eq - tbl_fscc # 01
17503: short fscc_ogt - tbl_fscc # 02
17504: short fscc_oge - tbl_fscc # 03
17505: short fscc_olt - tbl_fscc # 04
17506: short fscc_ole - tbl_fscc # 05
17507: short fscc_ogl - tbl_fscc # 06
17508: short fscc_or - tbl_fscc # 07
17509: short fscc_un - tbl_fscc # 08
17510: short fscc_ueq - tbl_fscc # 09
17511: short fscc_ugt - tbl_fscc # 10
17512: short fscc_uge - tbl_fscc # 11
17513: short fscc_ult - tbl_fscc # 12
17514: short fscc_ule - tbl_fscc # 13
17515: short fscc_neq - tbl_fscc # 14
17516: short fscc_t - tbl_fscc # 15
17517: short fscc_sf - tbl_fscc # 16
17518: short fscc_seq - tbl_fscc # 17
17519: short fscc_gt - tbl_fscc # 18
17520: short fscc_ge - tbl_fscc # 19
17521: short fscc_lt - tbl_fscc # 20
17522: short fscc_le - tbl_fscc # 21
17523: short fscc_gl - tbl_fscc # 22
17524: short fscc_gle - tbl_fscc # 23
17525: short fscc_ngle - tbl_fscc # 24
17526: short fscc_ngl - tbl_fscc # 25
17527: short fscc_nle - tbl_fscc # 26
17528: short fscc_nlt - tbl_fscc # 27
17529: short fscc_nge - tbl_fscc # 28
17530: short fscc_ngt - tbl_fscc # 29
17531: short fscc_sneq - tbl_fscc # 30
17532: short fscc_st - tbl_fscc # 31
17533:
17534: #########################################################################
17535: # #
17536: # IEEE Nonaware tests #
17537: # #
17538: # For the IEEE nonaware tests, we set the result based on the #
17539: # floating point condition codes. In addition, we check to see #
17540: # if the NAN bit is set, in which case BSUN and AIOP will be set. #
17541: # #
17542: # The cases EQ and NE are shared by the Aware and Nonaware groups #
17543: # and are incapable of setting the BSUN exception bit. #
17544: # #
17545: # Typically, only one of the two possible branch directions could #
17546: # have the NAN bit set. #
17547: # #
17548: #########################################################################
17549:
17550: #
17551: # equal:
17552: #
17553: # Z
17554: #
17555: fscc_eq:
17556: fbeq.w fscc_eq_yes # equal?
17557: fscc_eq_no:
17558: clr.b %d0 # set false
17559: bra.w fscc_done # go finish
17560: fscc_eq_yes:
17561: st %d0 # set true
17562: bra.w fscc_done # go finish
17563:
17564: #
17565: # not equal:
17566: # _
17567: # Z
17568: #
17569: fscc_neq:
17570: fbneq.w fscc_neq_yes # not equal?
17571: fscc_neq_no:
17572: clr.b %d0 # set false
17573: bra.w fscc_done # go finish
17574: fscc_neq_yes:
17575: st %d0 # set true
17576: bra.w fscc_done # go finish
17577:
17578: #
17579: # greater than:
17580: # _______
17581: # NANvZvN
17582: #
17583: fscc_gt:
17584: fbgt.w fscc_gt_yes # greater than?
17585: fscc_gt_no:
17586: clr.b %d0 # set false
17587: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17588: beq.w fscc_done # no;go finish
17589: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17590: bra.w fscc_chk_bsun # go finish
17591: fscc_gt_yes:
17592: st %d0 # set true
17593: bra.w fscc_done # go finish
17594:
17595: #
17596: # not greater than:
17597: #
17598: # NANvZvN
17599: #
17600: fscc_ngt:
17601: fbngt.w fscc_ngt_yes # not greater than?
17602: fscc_ngt_no:
17603: clr.b %d0 # set false
17604: bra.w fscc_done # go finish
17605: fscc_ngt_yes:
17606: st %d0 # set true
17607: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17608: beq.w fscc_done # no;go finish
17609: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17610: bra.w fscc_chk_bsun # go finish
17611:
17612: #
17613: # greater than or equal:
17614: # _____
17615: # Zv(NANvN)
17616: #
17617: fscc_ge:
17618: fbge.w fscc_ge_yes # greater than or equal?
17619: fscc_ge_no:
17620: clr.b %d0 # set false
17621: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17622: beq.w fscc_done # no;go finish
17623: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17624: bra.w fscc_chk_bsun # go finish
17625: fscc_ge_yes:
17626: st %d0 # set true
17627: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17628: beq.w fscc_done # no;go finish
17629: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17630: bra.w fscc_chk_bsun # go finish
17631:
17632: #
17633: # not (greater than or equal):
17634: # _
17635: # NANv(N^Z)
17636: #
17637: fscc_nge:
17638: fbnge.w fscc_nge_yes # not (greater than or equal)?
17639: fscc_nge_no:
17640: clr.b %d0 # set false
17641: bra.w fscc_done # go finish
17642: fscc_nge_yes:
17643: st %d0 # set true
17644: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17645: beq.w fscc_done # no;go finish
17646: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17647: bra.w fscc_chk_bsun # go finish
17648:
17649: #
17650: # less than:
17651: # _____
17652: # N^(NANvZ)
17653: #
17654: fscc_lt:
17655: fblt.w fscc_lt_yes # less than?
17656: fscc_lt_no:
17657: clr.b %d0 # set false
17658: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17659: beq.w fscc_done # no;go finish
17660: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17661: bra.w fscc_chk_bsun # go finish
17662: fscc_lt_yes:
17663: st %d0 # set true
17664: bra.w fscc_done # go finish
17665:
17666: #
17667: # not less than:
17668: # _
17669: # NANv(ZvN)
17670: #
17671: fscc_nlt:
17672: fbnlt.w fscc_nlt_yes # not less than?
17673: fscc_nlt_no:
17674: clr.b %d0 # set false
17675: bra.w fscc_done # go finish
17676: fscc_nlt_yes:
17677: st %d0 # set true
17678: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17679: beq.w fscc_done # no;go finish
17680: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17681: bra.w fscc_chk_bsun # go finish
17682:
17683: #
17684: # less than or equal:
17685: # ___
17686: # Zv(N^NAN)
17687: #
17688: fscc_le:
17689: fble.w fscc_le_yes # less than or equal?
17690: fscc_le_no:
17691: clr.b %d0 # set false
17692: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17693: beq.w fscc_done # no;go finish
17694: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17695: bra.w fscc_chk_bsun # go finish
17696: fscc_le_yes:
17697: st %d0 # set true
17698: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17699: beq.w fscc_done # no;go finish
17700: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17701: bra.w fscc_chk_bsun # go finish
17702:
17703: #
17704: # not (less than or equal):
17705: # ___
17706: # NANv(NvZ)
17707: #
17708: fscc_nle:
17709: fbnle.w fscc_nle_yes # not (less than or equal)?
17710: fscc_nle_no:
17711: clr.b %d0 # set false
17712: bra.w fscc_done # go finish
17713: fscc_nle_yes:
17714: st %d0 # set true
17715: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17716: beq.w fscc_done # no;go finish
17717: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17718: bra.w fscc_chk_bsun # go finish
17719:
17720: #
17721: # greater or less than:
17722: # _____
17723: # NANvZ
17724: #
17725: fscc_gl:
17726: fbgl.w fscc_gl_yes # greater or less than?
17727: fscc_gl_no:
17728: clr.b %d0 # set false
17729: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17730: beq.w fscc_done # no;go finish
17731: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17732: bra.w fscc_chk_bsun # go finish
17733: fscc_gl_yes:
17734: st %d0 # set true
17735: bra.w fscc_done # go finish
17736:
17737: #
17738: # not (greater or less than):
17739: #
17740: # NANvZ
17741: #
17742: fscc_ngl:
17743: fbngl.w fscc_ngl_yes # not (greater or less than)?
17744: fscc_ngl_no:
17745: clr.b %d0 # set false
17746: bra.w fscc_done # go finish
17747: fscc_ngl_yes:
17748: st %d0 # set true
17749: btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
17750: beq.w fscc_done # no;go finish
17751: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17752: bra.w fscc_chk_bsun # go finish
17753:
17754: #
17755: # greater, less, or equal:
17756: # ___
17757: # NAN
17758: #
17759: fscc_gle:
17760: fbgle.w fscc_gle_yes # greater, less, or equal?
17761: fscc_gle_no:
17762: clr.b %d0 # set false
17763: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17764: bra.w fscc_chk_bsun # go finish
17765: fscc_gle_yes:
17766: st %d0 # set true
17767: bra.w fscc_done # go finish
17768:
17769: #
17770: # not (greater, less, or equal):
17771: #
17772: # NAN
17773: #
17774: fscc_ngle:
17775: fbngle.w fscc_ngle_yes # not (greater, less, or equal)?
17776: fscc_ngle_no:
17777: clr.b %d0 # set false
17778: bra.w fscc_done # go finish
17779: fscc_ngle_yes:
17780: st %d0 # set true
17781: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17782: bra.w fscc_chk_bsun # go finish
17783:
17784: #########################################################################
17785: # #
17786: # Miscellaneous tests #
17787: # #
17788: # For the IEEE aware tests, we only have to set the result based on the #
17789: # floating point condition codes. The BSUN exception will not be #
17790: # set for any of these tests. #
17791: # #
17792: #########################################################################
17793:
17794: #
17795: # false:
17796: #
17797: # False
17798: #
17799: fscc_f:
17800: clr.b %d0 # set false
17801: bra.w fscc_done # go finish
17802:
17803: #
17804: # true:
17805: #
17806: # True
17807: #
17808: fscc_t:
17809: st %d0 # set true
17810: bra.w fscc_done # go finish
17811:
17812: #
17813: # signalling false:
17814: #
17815: # False
17816: #
17817: fscc_sf:
17818: clr.b %d0 # set false
17819: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17820: beq.w fscc_done # no;go finish
17821: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17822: bra.w fscc_chk_bsun # go finish
17823:
17824: #
17825: # signalling true:
17826: #
17827: # True
17828: #
17829: fscc_st:
17830: st %d0 # set false
17831: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17832: beq.w fscc_done # no;go finish
17833: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17834: bra.w fscc_chk_bsun # go finish
17835:
17836: #
17837: # signalling equal:
17838: #
17839: # Z
17840: #
17841: fscc_seq:
17842: fbseq.w fscc_seq_yes # signalling equal?
17843: fscc_seq_no:
17844: clr.b %d0 # set false
17845: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17846: beq.w fscc_done # no;go finish
17847: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17848: bra.w fscc_chk_bsun # go finish
17849: fscc_seq_yes:
17850: st %d0 # set true
17851: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17852: beq.w fscc_done # no;go finish
17853: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17854: bra.w fscc_chk_bsun # go finish
17855:
17856: #
17857: # signalling not equal:
17858: # _
17859: # Z
17860: #
17861: fscc_sneq:
17862: fbsneq.w fscc_sneq_yes # signalling equal?
17863: fscc_sneq_no:
17864: clr.b %d0 # set false
17865: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17866: beq.w fscc_done # no;go finish
17867: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17868: bra.w fscc_chk_bsun # go finish
17869: fscc_sneq_yes:
17870: st %d0 # set true
17871: btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
17872: beq.w fscc_done # no;go finish
17873: ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
17874: bra.w fscc_chk_bsun # go finish
17875:
17876: #########################################################################
17877: # #
17878: # IEEE Aware tests #
17879: # #
17880: # For the IEEE aware tests, we only have to set the result based on the #
17881: # floating point condition codes. The BSUN exception will not be #
17882: # set for any of these tests. #
17883: # #
17884: #########################################################################
17885:
17886: #
17887: # ordered greater than:
17888: # _______
17889: # NANvZvN
17890: #
17891: fscc_ogt:
17892: fbogt.w fscc_ogt_yes # ordered greater than?
17893: fscc_ogt_no:
17894: clr.b %d0 # set false
17895: bra.w fscc_done # go finish
17896: fscc_ogt_yes:
17897: st %d0 # set true
17898: bra.w fscc_done # go finish
17899:
17900: #
17901: # unordered or less or equal:
17902: # _______
17903: # NANvZvN
17904: #
17905: fscc_ule:
17906: fbule.w fscc_ule_yes # unordered or less or equal?
17907: fscc_ule_no:
17908: clr.b %d0 # set false
17909: bra.w fscc_done # go finish
17910: fscc_ule_yes:
17911: st %d0 # set true
17912: bra.w fscc_done # go finish
17913:
17914: #
17915: # ordered greater than or equal:
17916: # _____
17917: # Zv(NANvN)
17918: #
17919: fscc_oge:
17920: fboge.w fscc_oge_yes # ordered greater than or equal?
17921: fscc_oge_no:
17922: clr.b %d0 # set false
17923: bra.w fscc_done # go finish
17924: fscc_oge_yes:
17925: st %d0 # set true
17926: bra.w fscc_done # go finish
17927:
17928: #
17929: # unordered or less than:
17930: # _
17931: # NANv(N^Z)
17932: #
17933: fscc_ult:
17934: fbult.w fscc_ult_yes # unordered or less than?
17935: fscc_ult_no:
17936: clr.b %d0 # set false
17937: bra.w fscc_done # go finish
17938: fscc_ult_yes:
17939: st %d0 # set true
17940: bra.w fscc_done # go finish
17941:
17942: #
17943: # ordered less than:
17944: # _____
17945: # N^(NANvZ)
17946: #
17947: fscc_olt:
17948: fbolt.w fscc_olt_yes # ordered less than?
17949: fscc_olt_no:
17950: clr.b %d0 # set false
17951: bra.w fscc_done # go finish
17952: fscc_olt_yes:
17953: st %d0 # set true
17954: bra.w fscc_done # go finish
17955:
17956: #
17957: # unordered or greater or equal:
17958: #
17959: # NANvZvN
17960: #
17961: fscc_uge:
17962: fbuge.w fscc_uge_yes # unordered or greater than?
17963: fscc_uge_no:
17964: clr.b %d0 # set false
17965: bra.w fscc_done # go finish
17966: fscc_uge_yes:
17967: st %d0 # set true
17968: bra.w fscc_done # go finish
17969:
17970: #
17971: # ordered less than or equal:
17972: # ___
17973: # Zv(N^NAN)
17974: #
17975: fscc_ole:
17976: fbole.w fscc_ole_yes # ordered greater or less than?
17977: fscc_ole_no:
17978: clr.b %d0 # set false
17979: bra.w fscc_done # go finish
17980: fscc_ole_yes:
17981: st %d0 # set true
17982: bra.w fscc_done # go finish
17983:
17984: #
17985: # unordered or greater than:
17986: # ___
17987: # NANv(NvZ)
17988: #
17989: fscc_ugt:
17990: fbugt.w fscc_ugt_yes # unordered or greater than?
17991: fscc_ugt_no:
17992: clr.b %d0 # set false
17993: bra.w fscc_done # go finish
17994: fscc_ugt_yes:
17995: st %d0 # set true
17996: bra.w fscc_done # go finish
17997:
17998: #
17999: # ordered greater or less than:
18000: # _____
18001: # NANvZ
18002: #
18003: fscc_ogl:
18004: fbogl.w fscc_ogl_yes # ordered greater or less than?
18005: fscc_ogl_no:
18006: clr.b %d0 # set false
18007: bra.w fscc_done # go finish
18008: fscc_ogl_yes:
18009: st %d0 # set true
18010: bra.w fscc_done # go finish
18011:
18012: #
18013: # unordered or equal:
18014: #
18015: # NANvZ
18016: #
18017: fscc_ueq:
18018: fbueq.w fscc_ueq_yes # unordered or equal?
18019: fscc_ueq_no:
18020: clr.b %d0 # set false
18021: bra.w fscc_done # go finish
18022: fscc_ueq_yes:
18023: st %d0 # set true
18024: bra.w fscc_done # go finish
18025:
18026: #
18027: # ordered:
18028: # ___
18029: # NAN
18030: #
18031: fscc_or:
18032: fbor.w fscc_or_yes # ordered?
18033: fscc_or_no:
18034: clr.b %d0 # set false
18035: bra.w fscc_done # go finish
18036: fscc_or_yes:
18037: st %d0 # set true
18038: bra.w fscc_done # go finish
18039:
18040: #
18041: # unordered:
18042: #
18043: # NAN
18044: #
18045: fscc_un:
18046: fbun.w fscc_un_yes # unordered?
18047: fscc_un_no:
18048: clr.b %d0 # set false
18049: bra.w fscc_done # go finish
18050: fscc_un_yes:
18051: st %d0 # set true
18052: bra.w fscc_done # go finish
18053:
18054: #######################################################################
18055:
18056: #
18057: # the bsun exception bit was set. now, check to see is BSUN
18058: # is enabled. if so, don't store result and correct stack frame
18059: # for a bsun exception.
18060: #
18061: fscc_chk_bsun:
18062: btst &bsun_bit,FPCR_ENABLE(%a6) # was BSUN set?
18063: bne.w fscc_bsun
18064:
18065: #
18066: # the bsun exception bit was not set.
18067: # the result has been selected.
18068: # now, check to see if the result is to be stored in the data register
18069: # file or in memory.
18070: #
18071: fscc_done:
18072: mov.l %d0,%a0 # save result for a moment
18073:
18074: mov.b 1+EXC_OPWORD(%a6),%d1 # fetch lo opword
18075: mov.l %d1,%d0 # make a copy
18076: andi.b &0x38,%d1 # extract src mode
18077:
18078: bne.b fscc_mem_op # it's a memory operation
18079:
18080: mov.l %d0,%d1
18081: andi.w &0x7,%d1 # pass index in d1
18082: mov.l %a0,%d0 # pass result in d0
18083: bsr.l store_dreg_b # save result in regfile
18084: rts
18085:
18086: #
18087: # the stacked <ea> is correct with the exception of:
18088: # -> Dn : <ea> is garbage
18089: #
18090: # if the addressing mode is post-increment or pre-decrement,
18091: # then the address registers have not been updated.
18092: #
18093: fscc_mem_op:
18094: cmpi.b %d1,&0x18 # is <ea> (An)+ ?
18095: beq.b fscc_mem_inc # yes
18096: cmpi.b %d1,&0x20 # is <ea> -(An) ?
18097: beq.b fscc_mem_dec # yes
18098:
18099: mov.l %a0,%d0 # pass result in d0
18100: mov.l EXC_EA(%a6),%a0 # fetch <ea>
18101: bsr.l _dmem_write_byte # write result byte
18102:
18103: tst.l %d1 # did dstore fail?
18104: bne.w fscc_err # yes
18105:
18106: rts
18107:
18108: # addresing mode is post-increment. write the result byte. if the write
18109: # fails then don't update the address register. if write passes then
18110: # call inc_areg() to update the address register.
18111: fscc_mem_inc:
18112: mov.l %a0,%d0 # pass result in d0
18113: mov.l EXC_EA(%a6),%a0 # fetch <ea>
18114: bsr.l _dmem_write_byte # write result byte
18115:
18116: tst.l %d1 # did dstore fail?
18117: bne.w fscc_err # yes
18118:
18119: mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
18120: andi.w &0x7,%d1 # pass index in d1
18121: movq.l &0x1,%d0 # pass amt to inc by
18122: bsr.l inc_areg # increment address register
18123:
18124: rts
18125:
18126: # addressing mode is pre-decrement. write the result byte. if the write
18127: # fails then don't update the address register. if the write passes then
18128: # call dec_areg() to update the address register.
18129: fscc_mem_dec:
18130: mov.l %a0,%d0 # pass result in d0
18131: mov.l EXC_EA(%a6),%a0 # fetch <ea>
18132: bsr.l _dmem_write_byte # write result byte
18133:
18134: tst.l %d1 # did dstore fail?
18135: bne.w fscc_err # yes
18136:
18137: mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
18138: andi.w &0x7,%d1 # pass index in d1
18139: movq.l &0x1,%d0 # pass amt to dec by
18140: bsr.l dec_areg # decrement address register
18141:
18142: rts
18143:
18144: # the emulation routine set bsun and BSUN was enabled. have to
18145: # fix stack and jump to the bsun handler.
18146: # let the caller of this routine shift the stack frame up to
18147: # eliminate the effective address field.
18148: fscc_bsun:
18149: mov.b &fbsun_flg,SPCOND_FLG(%a6)
18150: rts
18151:
18152: # the byte write to memory has failed. pass the failing effective address
18153: # and a FSLW to funimp_dacc().
18154: fscc_err:
18155: mov.w &0x00a1,EXC_VOFF(%a6)
18156: bra.l facc_finish
18157:
18158: #########################################################################
18159: # XDEF **************************************************************** #
18160: # fmovm_dynamic(): emulate "fmovm" dynamic instruction #
18161: # #
18162: # XREF **************************************************************** #
18163: # fetch_dreg() - fetch data register #
18164: # {i,d,}mem_read() - fetch data from memory #
18165: # _mem_write() - write data to memory #
18166: # iea_iacc() - instruction memory access error occurred #
18167: # iea_dacc() - data memory access error occurred #
18168: # restore() - restore An index regs if access error occurred #
18169: # #
18170: # INPUT *************************************************************** #
18171: # None #
18172: # #
18173: # OUTPUT ************************************************************** #
18174: # If instr is "fmovm Dn,-(A7)" from supervisor mode, #
18175: # d0 = size of dump #
18176: # d1 = Dn #
18177: # Else if instruction access error, #
18178: # d0 = FSLW #
18179: # Else if data access error, #
18180: # d0 = FSLW #
18181: # a0 = address of fault #
18182: # Else #
18183: # none. #
18184: # #
18185: # ALGORITHM *********************************************************** #
18186: # The effective address must be calculated since this is entered #
18187: # from an "Unimplemented Effective Address" exception handler. So, we #
18188: # have our own fcalc_ea() routine here. If an access error is flagged #
18189: # by a _{i,d,}mem_read() call, we must exit through the special #
18190: # handler. #
18191: # The data register is determined and its value loaded to get the #
18192: # string of FP registers affected. This value is used as an index into #
18193: # a lookup table such that we can determine the number of bytes #
18194: # involved. #
18195: # If the instruction is "fmovm.x <ea>,Dn", a _mem_read() is used #
18196: # to read in all FP values. Again, _mem_read() may fail and require a #
18197: # special exit. #
18198: # If the instruction is "fmovm.x DN,<ea>", a _mem_write() is used #
18199: # to write all FP values. _mem_write() may also fail. #
18200: # If the instruction is "fmovm.x DN,-(a7)" from supervisor mode, #
18201: # then we return the size of the dump and the string to the caller #
18202: # so that the move can occur outside of this routine. This special #
18203: # case is required so that moves to the system stack are handled #
18204: # correctly. #
18205: # #
18206: # DYNAMIC: #
18207: # fmovm.x dn, <ea> #
18208: # fmovm.x <ea>, dn #
18209: # #
18210: # <WORD 1> <WORD2> #
18211: # 1111 0010 00 |<ea>| 11@& 1000 0$$$ 0000 #
18212: # #
18213: # & = (0): predecrement addressing mode #
18214: # (1): postincrement or control addressing mode #
18215: # @ = (0): move listed regs from memory to the FPU #
18216: # (1): move listed regs from the FPU to memory #
18217: # $$$ : index of data register holding reg select mask #
18218: # #
18219: # NOTES: #
18220: # If the data register holds a zero, then the #
18221: # instruction is a nop. #
18222: # #
18223: #########################################################################
18224:
18225: global fmovm_dynamic
18226: fmovm_dynamic:
18227:
18228: # extract the data register in which the bit string resides...
18229: mov.b 1+EXC_EXTWORD(%a6),%d1 # fetch extword
18230: andi.w &0x70,%d1 # extract reg bits
18231: lsr.b &0x4,%d1 # shift into lo bits
18232:
18233: # fetch the bit string into d0...
18234: bsr.l fetch_dreg # fetch reg string
18235:
18236: andi.l &0x000000ff,%d0 # keep only lo byte
18237:
18238: mov.l %d0,-(%sp) # save strg
18239: mov.b (tbl_fmovm_size.w,%pc,%d0),%d0
18240: mov.l %d0,-(%sp) # save size
18241: bsr.l fmovm_calc_ea # calculate <ea>
18242: mov.l (%sp)+,%d0 # restore size
18243: mov.l (%sp)+,%d1 # restore strg
18244:
18245: # if the bit string is a zero, then the operation is a no-op
18246: # but, make sure that we've calculated ea and advanced the opword pointer
18247: beq.w fmovm_data_done
18248:
18249: # separate move ins from move outs...
18250: btst &0x5,EXC_EXTWORD(%a6) # is it a move in or out?
18251: beq.w fmovm_data_in # it's a move out
18252:
18253: #############
18254: # MOVE OUT: #
18255: #############
18256: fmovm_data_out:
18257: btst &0x4,EXC_EXTWORD(%a6) # control or predecrement?
18258: bne.w fmovm_out_ctrl # control
18259:
18260: ############################
18261: fmovm_out_predec:
18262: # for predecrement mode, the bit string is the opposite of both control
18263: # operations and postincrement mode. (bit7 = FP7 ... bit0 = FP0)
18264: # here, we convert it to be just like the others...
18265: mov.b (tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
18266:
18267: btst &0x5,EXC_SR(%a6) # user or supervisor mode?
18268: beq.b fmovm_out_ctrl # user
18269:
18270: fmovm_out_predec_s:
18271: cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
18272: bne.b fmovm_out_ctrl
18273:
18274: # the operation was unfortunately an: fmovm.x dn,-(sp)
18275: # called from supervisor mode.
18276: # we're also passing "size" and "strg" back to the calling routine
18277: rts
18278:
18279: ############################
18280: fmovm_out_ctrl:
18281: mov.l %a0,%a1 # move <ea> to a1
18282:
18283: sub.l %d0,%sp # subtract size of dump
18284: lea (%sp),%a0
18285:
18286: tst.b %d1 # should FP0 be moved?
18287: bpl.b fmovm_out_ctrl_fp1 # no
18288:
18289: mov.l 0x0+EXC_FP0(%a6),(%a0)+ # yes
18290: mov.l 0x4+EXC_FP0(%a6),(%a0)+
18291: mov.l 0x8+EXC_FP0(%a6),(%a0)+
18292:
18293: fmovm_out_ctrl_fp1:
18294: lsl.b &0x1,%d1 # should FP1 be moved?
18295: bpl.b fmovm_out_ctrl_fp2 # no
18296:
18297: mov.l 0x0+EXC_FP1(%a6),(%a0)+ # yes
18298: mov.l 0x4+EXC_FP1(%a6),(%a0)+
18299: mov.l 0x8+EXC_FP1(%a6),(%a0)+
18300:
18301: fmovm_out_ctrl_fp2:
18302: lsl.b &0x1,%d1 # should FP2 be moved?
18303: bpl.b fmovm_out_ctrl_fp3 # no
18304:
18305: fmovm.x &0x20,(%a0) # yes
18306: add.l &0xc,%a0
18307:
18308: fmovm_out_ctrl_fp3:
18309: lsl.b &0x1,%d1 # should FP3 be moved?
18310: bpl.b fmovm_out_ctrl_fp4 # no
18311:
18312: fmovm.x &0x10,(%a0) # yes
18313: add.l &0xc,%a0
18314:
18315: fmovm_out_ctrl_fp4:
18316: lsl.b &0x1,%d1 # should FP4 be moved?
18317: bpl.b fmovm_out_ctrl_fp5 # no
18318:
18319: fmovm.x &0x08,(%a0) # yes
18320: add.l &0xc,%a0
18321:
18322: fmovm_out_ctrl_fp5:
18323: lsl.b &0x1,%d1 # should FP5 be moved?
18324: bpl.b fmovm_out_ctrl_fp6 # no
18325:
18326: fmovm.x &0x04,(%a0) # yes
18327: add.l &0xc,%a0
18328:
18329: fmovm_out_ctrl_fp6:
18330: lsl.b &0x1,%d1 # should FP6 be moved?
18331: bpl.b fmovm_out_ctrl_fp7 # no
18332:
18333: fmovm.x &0x02,(%a0) # yes
18334: add.l &0xc,%a0
18335:
18336: fmovm_out_ctrl_fp7:
18337: lsl.b &0x1,%d1 # should FP7 be moved?
18338: bpl.b fmovm_out_ctrl_done # no
18339:
18340: fmovm.x &0x01,(%a0) # yes
18341: add.l &0xc,%a0
18342:
18343: fmovm_out_ctrl_done:
18344: mov.l %a1,L_SCR1(%a6)
18345:
18346: lea (%sp),%a0 # pass: supervisor src
18347: mov.l %d0,-(%sp) # save size
18348: bsr.l _dmem_write # copy data to user mem
18349:
18350: mov.l (%sp)+,%d0
18351: add.l %d0,%sp # clear fpreg data from stack
18352:
18353: tst.l %d1 # did dstore err?
18354: bne.w fmovm_out_err # yes
18355:
18356: rts
18357:
18358: ############
18359: # MOVE IN: #
18360: ############
18361: fmovm_data_in:
18362: mov.l %a0,L_SCR1(%a6)
18363:
18364: sub.l %d0,%sp # make room for fpregs
18365: lea (%sp),%a1
18366:
18367: mov.l %d1,-(%sp) # save bit string for later
18368: mov.l %d0,-(%sp) # save # of bytes
18369:
18370: bsr.l _dmem_read # copy data from user mem
18371:
18372: mov.l (%sp)+,%d0 # retrieve # of bytes
18373:
18374: tst.l %d1 # did dfetch fail?
18375: bne.w fmovm_in_err # yes
18376:
18377: mov.l (%sp)+,%d1 # load bit string
18378:
18379: lea (%sp),%a0 # addr of stack
18380:
18381: tst.b %d1 # should FP0 be moved?
18382: bpl.b fmovm_data_in_fp1 # no
18383:
18384: mov.l (%a0)+,0x0+EXC_FP0(%a6) # yes
18385: mov.l (%a0)+,0x4+EXC_FP0(%a6)
18386: mov.l (%a0)+,0x8+EXC_FP0(%a6)
18387:
18388: fmovm_data_in_fp1:
18389: lsl.b &0x1,%d1 # should FP1 be moved?
18390: bpl.b fmovm_data_in_fp2 # no
18391:
18392: mov.l (%a0)+,0x0+EXC_FP1(%a6) # yes
18393: mov.l (%a0)+,0x4+EXC_FP1(%a6)
18394: mov.l (%a0)+,0x8+EXC_FP1(%a6)
18395:
18396: fmovm_data_in_fp2:
18397: lsl.b &0x1,%d1 # should FP2 be moved?
18398: bpl.b fmovm_data_in_fp3 # no
18399:
18400: fmovm.x (%a0)+,&0x20 # yes
18401:
18402: fmovm_data_in_fp3:
18403: lsl.b &0x1,%d1 # should FP3 be moved?
18404: bpl.b fmovm_data_in_fp4 # no
18405:
18406: fmovm.x (%a0)+,&0x10 # yes
18407:
18408: fmovm_data_in_fp4:
18409: lsl.b &0x1,%d1 # should FP4 be moved?
18410: bpl.b fmovm_data_in_fp5 # no
18411:
18412: fmovm.x (%a0)+,&0x08 # yes
18413:
18414: fmovm_data_in_fp5:
18415: lsl.b &0x1,%d1 # should FP5 be moved?
18416: bpl.b fmovm_data_in_fp6 # no
18417:
18418: fmovm.x (%a0)+,&0x04 # yes
18419:
18420: fmovm_data_in_fp6:
18421: lsl.b &0x1,%d1 # should FP6 be moved?
18422: bpl.b fmovm_data_in_fp7 # no
18423:
18424: fmovm.x (%a0)+,&0x02 # yes
18425:
18426: fmovm_data_in_fp7:
18427: lsl.b &0x1,%d1 # should FP7 be moved?
18428: bpl.b fmovm_data_in_done # no
18429:
18430: fmovm.x (%a0)+,&0x01 # yes
18431:
18432: fmovm_data_in_done:
18433: add.l %d0,%sp # remove fpregs from stack
18434: rts
18435:
18436: #####################################
18437:
18438: fmovm_data_done:
18439: rts
18440:
18441: ##############################################################################
18442:
18443: #
18444: # table indexed by the operation's bit string that gives the number
18445: # of bytes that will be moved.
18446: #
18447: # number of bytes = (# of 1's in bit string) * 12(bytes/fpreg)
18448: #
18449: tbl_fmovm_size:
18450: byte 0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
18451: byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18452: byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18453: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18454: byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18455: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18456: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18457: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18458: byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18459: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18460: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18461: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18462: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18463: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18464: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18465: byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18466: byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
18467: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18468: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18469: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18470: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18471: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18472: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18473: byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18474: byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
18475: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18476: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18477: byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18478: byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
18479: byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18480: byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
18481: byte 0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
18482:
18483: #
18484: # table to convert a pre-decrement bit string into a post-increment
18485: # or control bit string.
18486: # ex: 0x00 ==> 0x00
18487: # 0x01 ==> 0x80
18488: # 0x02 ==> 0x40
18489: # .
18490: # .
18491: # 0xfd ==> 0xbf
18492: # 0xfe ==> 0x7f
18493: # 0xff ==> 0xff
18494: #
18495: tbl_fmovm_convert:
18496: byte 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
18497: byte 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
18498: byte 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
18499: byte 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
18500: byte 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
18501: byte 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
18502: byte 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
18503: byte 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
18504: byte 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
18505: byte 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
18506: byte 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
18507: byte 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
18508: byte 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
18509: byte 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
18510: byte 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
18511: byte 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
18512: byte 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
18513: byte 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
18514: byte 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
18515: byte 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
18516: byte 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
18517: byte 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
18518: byte 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
18519: byte 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
18520: byte 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
18521: byte 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
18522: byte 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
18523: byte 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
18524: byte 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
18525: byte 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
18526: byte 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
18527: byte 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
18528:
18529: global fmovm_calc_ea
18530: ###############################################
18531: # _fmovm_calc_ea: calculate effective address #
18532: ###############################################
18533: fmovm_calc_ea:
18534: mov.l %d0,%a0 # move # bytes to a0
18535:
18536: # currently, MODE and REG are taken from the EXC_OPWORD. this could be
18537: # easily changed if they were inputs passed in registers.
18538: mov.w EXC_OPWORD(%a6),%d0 # fetch opcode word
18539: mov.w %d0,%d1 # make a copy
18540:
18541: andi.w &0x3f,%d0 # extract mode field
18542: andi.l &0x7,%d1 # extract reg field
18543:
18544: # jump to the corresponding function for each {MODE,REG} pair.
18545: mov.w (tbl_fea_mode.b,%pc,%d0.w*2),%d0 # fetch jmp distance
18546: jmp (tbl_fea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
18547:
18548: swbeg &64
18549: tbl_fea_mode:
18550: short tbl_fea_mode - tbl_fea_mode
18551: short tbl_fea_mode - tbl_fea_mode
18552: short tbl_fea_mode - tbl_fea_mode
18553: short tbl_fea_mode - tbl_fea_mode
18554: short tbl_fea_mode - tbl_fea_mode
18555: short tbl_fea_mode - tbl_fea_mode
18556: short tbl_fea_mode - tbl_fea_mode
18557: short tbl_fea_mode - tbl_fea_mode
18558:
18559: short tbl_fea_mode - tbl_fea_mode
18560: short tbl_fea_mode - tbl_fea_mode
18561: short tbl_fea_mode - tbl_fea_mode
18562: short tbl_fea_mode - tbl_fea_mode
18563: short tbl_fea_mode - tbl_fea_mode
18564: short tbl_fea_mode - tbl_fea_mode
18565: short tbl_fea_mode - tbl_fea_mode
18566: short tbl_fea_mode - tbl_fea_mode
18567:
18568: short faddr_ind_a0 - tbl_fea_mode
18569: short faddr_ind_a1 - tbl_fea_mode
18570: short faddr_ind_a2 - tbl_fea_mode
18571: short faddr_ind_a3 - tbl_fea_mode
18572: short faddr_ind_a4 - tbl_fea_mode
18573: short faddr_ind_a5 - tbl_fea_mode
18574: short faddr_ind_a6 - tbl_fea_mode
18575: short faddr_ind_a7 - tbl_fea_mode
18576:
18577: short faddr_ind_p_a0 - tbl_fea_mode
18578: short faddr_ind_p_a1 - tbl_fea_mode
18579: short faddr_ind_p_a2 - tbl_fea_mode
18580: short faddr_ind_p_a3 - tbl_fea_mode
18581: short faddr_ind_p_a4 - tbl_fea_mode
18582: short faddr_ind_p_a5 - tbl_fea_mode
18583: short faddr_ind_p_a6 - tbl_fea_mode
18584: short faddr_ind_p_a7 - tbl_fea_mode
18585:
18586: short faddr_ind_m_a0 - tbl_fea_mode
18587: short faddr_ind_m_a1 - tbl_fea_mode
18588: short faddr_ind_m_a2 - tbl_fea_mode
18589: short faddr_ind_m_a3 - tbl_fea_mode
18590: short faddr_ind_m_a4 - tbl_fea_mode
18591: short faddr_ind_m_a5 - tbl_fea_mode
18592: short faddr_ind_m_a6 - tbl_fea_mode
18593: short faddr_ind_m_a7 - tbl_fea_mode
18594:
18595: short faddr_ind_disp_a0 - tbl_fea_mode
18596: short faddr_ind_disp_a1 - tbl_fea_mode
18597: short faddr_ind_disp_a2 - tbl_fea_mode
18598: short faddr_ind_disp_a3 - tbl_fea_mode
18599: short faddr_ind_disp_a4 - tbl_fea_mode
18600: short faddr_ind_disp_a5 - tbl_fea_mode
18601: short faddr_ind_disp_a6 - tbl_fea_mode
18602: short faddr_ind_disp_a7 - tbl_fea_mode
18603:
18604: short faddr_ind_ext - tbl_fea_mode
18605: short faddr_ind_ext - tbl_fea_mode
18606: short faddr_ind_ext - tbl_fea_mode
18607: short faddr_ind_ext - tbl_fea_mode
18608: short faddr_ind_ext - tbl_fea_mode
18609: short faddr_ind_ext - tbl_fea_mode
18610: short faddr_ind_ext - tbl_fea_mode
18611: short faddr_ind_ext - tbl_fea_mode
18612:
18613: short fabs_short - tbl_fea_mode
18614: short fabs_long - tbl_fea_mode
18615: short fpc_ind - tbl_fea_mode
18616: short fpc_ind_ext - tbl_fea_mode
18617: short tbl_fea_mode - tbl_fea_mode
18618: short tbl_fea_mode - tbl_fea_mode
18619: short tbl_fea_mode - tbl_fea_mode
18620: short tbl_fea_mode - tbl_fea_mode
18621:
18622: ###################################
18623: # Address register indirect: (An) #
18624: ###################################
18625: faddr_ind_a0:
18626: mov.l EXC_DREGS+0x8(%a6),%a0 # Get current a0
18627: rts
18628:
18629: faddr_ind_a1:
18630: mov.l EXC_DREGS+0xc(%a6),%a0 # Get current a1
18631: rts
18632:
18633: faddr_ind_a2:
18634: mov.l %a2,%a0 # Get current a2
18635: rts
18636:
18637: faddr_ind_a3:
18638: mov.l %a3,%a0 # Get current a3
18639: rts
18640:
18641: faddr_ind_a4:
18642: mov.l %a4,%a0 # Get current a4
18643: rts
18644:
18645: faddr_ind_a5:
18646: mov.l %a5,%a0 # Get current a5
18647: rts
18648:
18649: faddr_ind_a6:
18650: mov.l (%a6),%a0 # Get current a6
18651: rts
18652:
18653: faddr_ind_a7:
18654: mov.l EXC_A7(%a6),%a0 # Get current a7
18655: rts
18656:
18657: #####################################################
18658: # Address register indirect w/ postincrement: (An)+ #
18659: #####################################################
18660: faddr_ind_p_a0:
18661: mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
18662: mov.l %d0,%d1
18663: add.l %a0,%d1 # Increment
18664: mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value
18665: mov.l %d0,%a0
18666: rts
18667:
18668: faddr_ind_p_a1:
18669: mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
18670: mov.l %d0,%d1
18671: add.l %a0,%d1 # Increment
18672: mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value
18673: mov.l %d0,%a0
18674: rts
18675:
18676: faddr_ind_p_a2:
18677: mov.l %a2,%d0 # Get current a2
18678: mov.l %d0,%d1
18679: add.l %a0,%d1 # Increment
18680: mov.l %d1,%a2 # Save incr value
18681: mov.l %d0,%a0
18682: rts
18683:
18684: faddr_ind_p_a3:
18685: mov.l %a3,%d0 # Get current a3
18686: mov.l %d0,%d1
18687: add.l %a0,%d1 # Increment
18688: mov.l %d1,%a3 # Save incr value
18689: mov.l %d0,%a0
18690: rts
18691:
18692: faddr_ind_p_a4:
18693: mov.l %a4,%d0 # Get current a4
18694: mov.l %d0,%d1
18695: add.l %a0,%d1 # Increment
18696: mov.l %d1,%a4 # Save incr value
18697: mov.l %d0,%a0
18698: rts
18699:
18700: faddr_ind_p_a5:
18701: mov.l %a5,%d0 # Get current a5
18702: mov.l %d0,%d1
18703: add.l %a0,%d1 # Increment
18704: mov.l %d1,%a5 # Save incr value
18705: mov.l %d0,%a0
18706: rts
18707:
18708: faddr_ind_p_a6:
18709: mov.l (%a6),%d0 # Get current a6
18710: mov.l %d0,%d1
18711: add.l %a0,%d1 # Increment
18712: mov.l %d1,(%a6) # Save incr value
18713: mov.l %d0,%a0
18714: rts
18715:
18716: faddr_ind_p_a7:
18717: mov.b &mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
18718:
18719: mov.l EXC_A7(%a6),%d0 # Get current a7
18720: mov.l %d0,%d1
18721: add.l %a0,%d1 # Increment
18722: mov.l %d1,EXC_A7(%a6) # Save incr value
18723: mov.l %d0,%a0
18724: rts
18725:
18726: ####################################################
18727: # Address register indirect w/ predecrement: -(An) #
18728: ####################################################
18729: faddr_ind_m_a0:
18730: mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
18731: sub.l %a0,%d0 # Decrement
18732: mov.l %d0,EXC_DREGS+0x8(%a6) # Save decr value
18733: mov.l %d0,%a0
18734: rts
18735:
18736: faddr_ind_m_a1:
18737: mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
18738: sub.l %a0,%d0 # Decrement
18739: mov.l %d0,EXC_DREGS+0xc(%a6) # Save decr value
18740: mov.l %d0,%a0
18741: rts
18742:
18743: faddr_ind_m_a2:
18744: mov.l %a2,%d0 # Get current a2
18745: sub.l %a0,%d0 # Decrement
18746: mov.l %d0,%a2 # Save decr value
18747: mov.l %d0,%a0
18748: rts
18749:
18750: faddr_ind_m_a3:
18751: mov.l %a3,%d0 # Get current a3
18752: sub.l %a0,%d0 # Decrement
18753: mov.l %d0,%a3 # Save decr value
18754: mov.l %d0,%a0
18755: rts
18756:
18757: faddr_ind_m_a4:
18758: mov.l %a4,%d0 # Get current a4
18759: sub.l %a0,%d0 # Decrement
18760: mov.l %d0,%a4 # Save decr value
18761: mov.l %d0,%a0
18762: rts
18763:
18764: faddr_ind_m_a5:
18765: mov.l %a5,%d0 # Get current a5
18766: sub.l %a0,%d0 # Decrement
18767: mov.l %d0,%a5 # Save decr value
18768: mov.l %d0,%a0
18769: rts
18770:
18771: faddr_ind_m_a6:
18772: mov.l (%a6),%d0 # Get current a6
18773: sub.l %a0,%d0 # Decrement
18774: mov.l %d0,(%a6) # Save decr value
18775: mov.l %d0,%a0
18776: rts
18777:
18778: faddr_ind_m_a7:
18779: mov.b &mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
18780:
18781: mov.l EXC_A7(%a6),%d0 # Get current a7
18782: sub.l %a0,%d0 # Decrement
18783: mov.l %d0,EXC_A7(%a6) # Save decr value
18784: mov.l %d0,%a0
18785: rts
18786:
18787: ########################################################
18788: # Address register indirect w/ displacement: (d16, An) #
18789: ########################################################
18790: faddr_ind_disp_a0:
18791: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18792: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18793: bsr.l _imem_read_word
18794:
18795: tst.l %d1 # did ifetch fail?
18796: bne.l iea_iacc # yes
18797:
18798: mov.w %d0,%a0 # sign extend displacement
18799:
18800: add.l EXC_DREGS+0x8(%a6),%a0 # a0 + d16
18801: rts
18802:
18803: faddr_ind_disp_a1:
18804: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18805: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18806: bsr.l _imem_read_word
18807:
18808: tst.l %d1 # did ifetch fail?
18809: bne.l iea_iacc # yes
18810:
18811: mov.w %d0,%a0 # sign extend displacement
18812:
18813: add.l EXC_DREGS+0xc(%a6),%a0 # a1 + d16
18814: rts
18815:
18816: faddr_ind_disp_a2:
18817: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18818: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18819: bsr.l _imem_read_word
18820:
18821: tst.l %d1 # did ifetch fail?
18822: bne.l iea_iacc # yes
18823:
18824: mov.w %d0,%a0 # sign extend displacement
18825:
18826: add.l %a2,%a0 # a2 + d16
18827: rts
18828:
18829: faddr_ind_disp_a3:
18830: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18831: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18832: bsr.l _imem_read_word
18833:
18834: tst.l %d1 # did ifetch fail?
18835: bne.l iea_iacc # yes
18836:
18837: mov.w %d0,%a0 # sign extend displacement
18838:
18839: add.l %a3,%a0 # a3 + d16
18840: rts
18841:
18842: faddr_ind_disp_a4:
18843: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18844: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18845: bsr.l _imem_read_word
18846:
18847: tst.l %d1 # did ifetch fail?
18848: bne.l iea_iacc # yes
18849:
18850: mov.w %d0,%a0 # sign extend displacement
18851:
18852: add.l %a4,%a0 # a4 + d16
18853: rts
18854:
18855: faddr_ind_disp_a5:
18856: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18857: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18858: bsr.l _imem_read_word
18859:
18860: tst.l %d1 # did ifetch fail?
18861: bne.l iea_iacc # yes
18862:
18863: mov.w %d0,%a0 # sign extend displacement
18864:
18865: add.l %a5,%a0 # a5 + d16
18866: rts
18867:
18868: faddr_ind_disp_a6:
18869: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18870: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18871: bsr.l _imem_read_word
18872:
18873: tst.l %d1 # did ifetch fail?
18874: bne.l iea_iacc # yes
18875:
18876: mov.w %d0,%a0 # sign extend displacement
18877:
18878: add.l (%a6),%a0 # a6 + d16
18879: rts
18880:
18881: faddr_ind_disp_a7:
18882: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18883: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18884: bsr.l _imem_read_word
18885:
18886: tst.l %d1 # did ifetch fail?
18887: bne.l iea_iacc # yes
18888:
18889: mov.w %d0,%a0 # sign extend displacement
18890:
18891: add.l EXC_A7(%a6),%a0 # a7 + d16
18892: rts
18893:
18894: ########################################################################
18895: # Address register indirect w/ index(8-bit displacement): (d8, An, Xn) #
18896: # " " " w/ " (base displacement): (bd, An, Xn) #
18897: # Memory indirect postindexed: ([bd, An], Xn, od) #
18898: # Memory indirect preindexed: ([bd, An, Xn], od) #
18899: ########################################################################
18900: faddr_ind_ext:
18901: addq.l &0x8,%d1
18902: bsr.l fetch_dreg # fetch base areg
18903: mov.l %d0,-(%sp)
18904:
18905: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18906: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18907: bsr.l _imem_read_word # fetch extword in d0
18908:
18909: tst.l %d1 # did ifetch fail?
18910: bne.l iea_iacc # yes
18911:
18912: mov.l (%sp)+,%a0
18913:
18914: btst &0x8,%d0
18915: bne.w fcalc_mem_ind
18916:
18917: mov.l %d0,L_SCR1(%a6) # hold opword
18918:
18919: mov.l %d0,%d1
18920: rol.w &0x4,%d1
18921: andi.w &0xf,%d1 # extract index regno
18922:
18923: # count on fetch_dreg() not to alter a0...
18924: bsr.l fetch_dreg # fetch index
18925:
18926: mov.l %d2,-(%sp) # save d2
18927: mov.l L_SCR1(%a6),%d2 # fetch opword
18928:
18929: btst &0xb,%d2 # is it word or long?
18930: bne.b faii8_long
18931: ext.l %d0 # sign extend word index
18932: faii8_long:
18933: mov.l %d2,%d1
18934: rol.w &0x7,%d1
18935: andi.l &0x3,%d1 # extract scale value
18936:
18937: lsl.l %d1,%d0 # shift index by scale
18938:
18939: extb.l %d2 # sign extend displacement
18940: add.l %d2,%d0 # index + disp
18941: add.l %d0,%a0 # An + (index + disp)
18942:
18943: mov.l (%sp)+,%d2 # restore old d2
18944: rts
18945:
18946: ###########################
18947: # Absolute short: (XXX).W #
18948: ###########################
18949: fabs_short:
18950: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18951: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18952: bsr.l _imem_read_word # fetch short address
18953:
18954: tst.l %d1 # did ifetch fail?
18955: bne.l iea_iacc # yes
18956:
18957: mov.w %d0,%a0 # return <ea> in a0
18958: rts
18959:
18960: ##########################
18961: # Absolute long: (XXX).L #
18962: ##########################
18963: fabs_long:
18964: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18965: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
18966: bsr.l _imem_read_long # fetch long address
18967:
18968: tst.l %d1 # did ifetch fail?
18969: bne.l iea_iacc # yes
18970:
18971: mov.l %d0,%a0 # return <ea> in a0
18972: rts
18973:
18974: #######################################################
18975: # Program counter indirect w/ displacement: (d16, PC) #
18976: #######################################################
18977: fpc_ind:
18978: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
18979: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
18980: bsr.l _imem_read_word # fetch word displacement
18981:
18982: tst.l %d1 # did ifetch fail?
18983: bne.l iea_iacc # yes
18984:
18985: mov.w %d0,%a0 # sign extend displacement
18986:
18987: add.l EXC_EXTWPTR(%a6),%a0 # pc + d16
18988:
18989: # _imem_read_word() increased the extwptr by 2. need to adjust here.
18990: subq.l &0x2,%a0 # adjust <ea>
18991: rts
18992:
18993: ##########################################################
18994: # PC indirect w/ index(8-bit displacement): (d8, PC, An) #
18995: # " " w/ " (base displacement): (bd, PC, An) #
18996: # PC memory indirect postindexed: ([bd, PC], Xn, od) #
18997: # PC memory indirect preindexed: ([bd, PC, Xn], od) #
18998: ##########################################################
18999: fpc_ind_ext:
19000: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19001: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
19002: bsr.l _imem_read_word # fetch ext word
19003:
19004: tst.l %d1 # did ifetch fail?
19005: bne.l iea_iacc # yes
19006:
19007: mov.l EXC_EXTWPTR(%a6),%a0 # put base in a0
19008: subq.l &0x2,%a0 # adjust base
19009:
19010: btst &0x8,%d0 # is disp only 8 bits?
19011: bne.w fcalc_mem_ind # calc memory indirect
19012:
19013: mov.l %d0,L_SCR1(%a6) # store opword
19014:
19015: mov.l %d0,%d1 # make extword copy
19016: rol.w &0x4,%d1 # rotate reg num into place
19017: andi.w &0xf,%d1 # extract register number
19018:
19019: # count on fetch_dreg() not to alter a0...
19020: bsr.l fetch_dreg # fetch index
19021:
19022: mov.l %d2,-(%sp) # save d2
19023: mov.l L_SCR1(%a6),%d2 # fetch opword
19024:
19025: btst &0xb,%d2 # is index word or long?
19026: bne.b fpii8_long # long
19027: ext.l %d0 # sign extend word index
19028: fpii8_long:
19029: mov.l %d2,%d1
19030: rol.w &0x7,%d1 # rotate scale value into place
19031: andi.l &0x3,%d1 # extract scale value
19032:
19033: lsl.l %d1,%d0 # shift index by scale
19034:
19035: extb.l %d2 # sign extend displacement
19036: add.l %d2,%d0 # disp + index
19037: add.l %d0,%a0 # An + (index + disp)
19038:
19039: mov.l (%sp)+,%d2 # restore temp register
19040: rts
19041:
19042: # d2 = index
19043: # d3 = base
19044: # d4 = od
19045: # d5 = extword
19046: fcalc_mem_ind:
19047: btst &0x6,%d0 # is the index suppressed?
19048: beq.b fcalc_index
19049:
19050: movm.l &0x3c00,-(%sp) # save d2-d5
19051:
19052: mov.l %d0,%d5 # put extword in d5
19053: mov.l %a0,%d3 # put base in d3
19054:
19055: clr.l %d2 # yes, so index = 0
19056: bra.b fbase_supp_ck
19057:
19058: # index:
19059: fcalc_index:
19060: mov.l %d0,L_SCR1(%a6) # save d0 (opword)
19061: bfextu %d0{&16:&4},%d1 # fetch dreg index
19062: bsr.l fetch_dreg
19063:
19064: movm.l &0x3c00,-(%sp) # save d2-d5
19065: mov.l %d0,%d2 # put index in d2
19066: mov.l L_SCR1(%a6),%d5
19067: mov.l %a0,%d3
19068:
19069: btst &0xb,%d5 # is index word or long?
19070: bne.b fno_ext
19071: ext.l %d2
19072:
19073: fno_ext:
19074: bfextu %d5{&21:&2},%d0
19075: lsl.l %d0,%d2
19076:
19077: # base address (passed as parameter in d3):
19078: # we clear the value here if it should actually be suppressed.
19079: fbase_supp_ck:
19080: btst &0x7,%d5 # is the bd suppressed?
19081: beq.b fno_base_sup
19082: clr.l %d3
19083:
19084: # base displacement:
19085: fno_base_sup:
19086: bfextu %d5{&26:&2},%d0 # get bd size
19087: # beq.l fmovm_error # if (size == 0) it's reserved
19088:
19089: cmpi.b %d0,&0x2
19090: blt.b fno_bd
19091: beq.b fget_word_bd
19092:
19093: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19094: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19095: bsr.l _imem_read_long
19096:
19097: tst.l %d1 # did ifetch fail?
19098: bne.l fcea_iacc # yes
19099:
19100: bra.b fchk_ind
19101:
19102: fget_word_bd:
19103: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19104: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
19105: bsr.l _imem_read_word
19106:
19107: tst.l %d1 # did ifetch fail?
19108: bne.l fcea_iacc # yes
19109:
19110: ext.l %d0 # sign extend bd
19111:
19112: fchk_ind:
19113: add.l %d0,%d3 # base += bd
19114:
19115: # outer displacement:
19116: fno_bd:
19117: bfextu %d5{&30:&2},%d0 # is od suppressed?
19118: beq.w faii_bd
19119:
19120: cmpi.b %d0,&0x2
19121: blt.b fnull_od
19122: beq.b fword_od
19123:
19124: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19125: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19126: bsr.l _imem_read_long
19127:
19128: tst.l %d1 # did ifetch fail?
19129: bne.l fcea_iacc # yes
19130:
19131: bra.b fadd_them
19132:
19133: fword_od:
19134: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19135: addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
19136: bsr.l _imem_read_word
19137:
19138: tst.l %d1 # did ifetch fail?
19139: bne.l fcea_iacc # yes
19140:
19141: ext.l %d0 # sign extend od
19142: bra.b fadd_them
19143:
19144: fnull_od:
19145: clr.l %d0
19146:
19147: fadd_them:
19148: mov.l %d0,%d4
19149:
19150: btst &0x2,%d5 # pre or post indexing?
19151: beq.b fpre_indexed
19152:
19153: mov.l %d3,%a0
19154: bsr.l _dmem_read_long
19155:
19156: tst.l %d1 # did dfetch fail?
19157: bne.w fcea_err # yes
19158:
19159: add.l %d2,%d0 # <ea> += index
19160: add.l %d4,%d0 # <ea> += od
19161: bra.b fdone_ea
19162:
19163: fpre_indexed:
19164: add.l %d2,%d3 # preindexing
19165: mov.l %d3,%a0
19166: bsr.l _dmem_read_long
19167:
19168: tst.l %d1 # did dfetch fail?
19169: bne.w fcea_err # yes
19170:
19171: add.l %d4,%d0 # ea += od
19172: bra.b fdone_ea
19173:
19174: faii_bd:
19175: add.l %d2,%d3 # ea = (base + bd) + index
19176: mov.l %d3,%d0
19177: fdone_ea:
19178: mov.l %d0,%a0
19179:
19180: movm.l (%sp)+,&0x003c # restore d2-d5
19181: rts
19182:
19183: #########################################################
19184: fcea_err:
19185: mov.l %d3,%a0
19186:
19187: movm.l (%sp)+,&0x003c # restore d2-d5
19188: mov.w &0x0101,%d0
19189: bra.l iea_dacc
19190:
19191: fcea_iacc:
19192: movm.l (%sp)+,&0x003c # restore d2-d5
19193: bra.l iea_iacc
19194:
19195: fmovm_out_err:
19196: bsr.l restore
19197: mov.w &0x00e1,%d0
19198: bra.b fmovm_err
19199:
19200: fmovm_in_err:
19201: bsr.l restore
19202: mov.w &0x0161,%d0
19203:
19204: fmovm_err:
19205: mov.l L_SCR1(%a6),%a0
19206: bra.l iea_dacc
19207:
19208: #########################################################################
19209: # XDEF **************************************************************** #
19210: # fmovm_ctrl(): emulate fmovm.l of control registers instr #
19211: # #
19212: # XREF **************************************************************** #
19213: # _imem_read_long() - read longword from memory #
19214: # iea_iacc() - _imem_read_long() failed; error recovery #
19215: # #
19216: # INPUT *************************************************************** #
19217: # None #
19218: # #
19219: # OUTPUT ************************************************************** #
19220: # If _imem_read_long() doesn't fail: #
19221: # USER_FPCR(a6) = new FPCR value #
19222: # USER_FPSR(a6) = new FPSR value #
19223: # USER_FPIAR(a6) = new FPIAR value #
19224: # #
19225: # ALGORITHM *********************************************************** #
19226: # Decode the instruction type by looking at the extension word #
19227: # in order to see how many control registers to fetch from memory. #
19228: # Fetch them using _imem_read_long(). If this fetch fails, exit through #
19229: # the special access error exit handler iea_iacc(). #
19230: # #
19231: # Instruction word decoding: #
19232: # #
19233: # fmovem.l #<data>, {FPIAR&|FPCR&|FPSR} #
19234: # #
19235: # WORD1 WORD2 #
19236: # 1111 0010 00 111100 100$ $$00 0000 0000 #
19237: # #
19238: # $$$ (100): FPCR #
19239: # (010): FPSR #
19240: # (001): FPIAR #
19241: # (000): FPIAR #
19242: # #
19243: #########################################################################
19244:
19245: global fmovm_ctrl
19246: fmovm_ctrl:
19247: mov.b EXC_EXTWORD(%a6),%d0 # fetch reg select bits
19248: cmpi.b %d0,&0x9c # fpcr & fpsr & fpiar ?
19249: beq.w fctrl_in_7 # yes
19250: cmpi.b %d0,&0x98 # fpcr & fpsr ?
19251: beq.w fctrl_in_6 # yes
19252: cmpi.b %d0,&0x94 # fpcr & fpiar ?
19253: beq.b fctrl_in_5 # yes
19254:
19255: # fmovem.l #<data>, fpsr/fpiar
19256: fctrl_in_3:
19257: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19258: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19259: bsr.l _imem_read_long # fetch FPSR from mem
19260:
19261: tst.l %d1 # did ifetch fail?
19262: bne.l iea_iacc # yes
19263:
19264: mov.l %d0,USER_FPSR(%a6) # store new FPSR to stack
19265: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19266: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19267: bsr.l _imem_read_long # fetch FPIAR from mem
19268:
19269: tst.l %d1 # did ifetch fail?
19270: bne.l iea_iacc # yes
19271:
19272: mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
19273: rts
19274:
19275: # fmovem.l #<data>, fpcr/fpiar
19276: fctrl_in_5:
19277: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19278: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19279: bsr.l _imem_read_long # fetch FPCR from mem
19280:
19281: tst.l %d1 # did ifetch fail?
19282: bne.l iea_iacc # yes
19283:
19284: mov.l %d0,USER_FPCR(%a6) # store new FPCR to stack
19285: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19286: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19287: bsr.l _imem_read_long # fetch FPIAR from mem
19288:
19289: tst.l %d1 # did ifetch fail?
19290: bne.l iea_iacc # yes
19291:
19292: mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
19293: rts
19294:
19295: # fmovem.l #<data>, fpcr/fpsr
19296: fctrl_in_6:
19297: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19298: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19299: bsr.l _imem_read_long # fetch FPCR from mem
19300:
19301: tst.l %d1 # did ifetch fail?
19302: bne.l iea_iacc # yes
19303:
19304: mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
19305: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19306: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19307: bsr.l _imem_read_long # fetch FPSR from mem
19308:
19309: tst.l %d1 # did ifetch fail?
19310: bne.l iea_iacc # yes
19311:
19312: mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
19313: rts
19314:
19315: # fmovem.l #<data>, fpcr/fpsr/fpiar
19316: fctrl_in_7:
19317: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19318: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19319: bsr.l _imem_read_long # fetch FPCR from mem
19320:
19321: tst.l %d1 # did ifetch fail?
19322: bne.l iea_iacc # yes
19323:
19324: mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
19325: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19326: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19327: bsr.l _imem_read_long # fetch FPSR from mem
19328:
19329: tst.l %d1 # did ifetch fail?
19330: bne.l iea_iacc # yes
19331:
19332: mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
19333: mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
19334: addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
19335: bsr.l _imem_read_long # fetch FPIAR from mem
19336:
19337: tst.l %d1 # did ifetch fail?
19338: bne.l iea_iacc # yes
19339:
19340: mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to mem
19341: rts
19342:
19343: #########################################################################
19344: # XDEF **************************************************************** #
19345: # _dcalc_ea(): calc correct <ea> from <ea> stacked on exception #
19346: # #
19347: # XREF **************************************************************** #
19348: # inc_areg() - increment an address register #
19349: # dec_areg() - decrement an address register #
19350: # #
19351: # INPUT *************************************************************** #
19352: # d0 = number of bytes to adjust <ea> by #
19353: # #
19354: # OUTPUT ************************************************************** #
19355: # None #
19356: # #
19357: # ALGORITHM *********************************************************** #
19358: # "Dummy" CALCulate Effective Address: #
19359: # The stacked <ea> for FP unimplemented instructions and opclass #
19360: # two packed instructions is correct with the exception of... #
19361: # #
19362: # 1) -(An) : The register is not updated regardless of size. #
19363: # Also, for extended precision and packed, the #
19364: # stacked <ea> value is 8 bytes too big #
19365: # 2) (An)+ : The register is not updated. #
19366: # 3) #<data> : The upper longword of the immediate operand is #
19367: # stacked b,w,l and s sizes are completely stacked. #
19368: # d,x, and p are not. #
19369: # #
19370: #########################################################################
19371:
19372: global _dcalc_ea
19373: _dcalc_ea:
19374: mov.l %d0, %a0 # move # bytes to %a0
19375:
19376: mov.b 1+EXC_OPWORD(%a6), %d0 # fetch opcode word
19377: mov.l %d0, %d1 # make a copy
19378:
19379: andi.w &0x38, %d0 # extract mode field
19380: andi.l &0x7, %d1 # extract reg field
19381:
19382: cmpi.b %d0,&0x18 # is mode (An)+ ?
19383: beq.b dcea_pi # yes
19384:
19385: cmpi.b %d0,&0x20 # is mode -(An) ?
19386: beq.b dcea_pd # yes
19387:
19388: or.w %d1,%d0 # concat mode,reg
19389: cmpi.b %d0,&0x3c # is mode #<data>?
19390:
19391: beq.b dcea_imm # yes
19392:
19393: mov.l EXC_EA(%a6),%a0 # return <ea>
19394: rts
19395:
19396: # need to set immediate data flag here since we'll need to do
19397: # an imem_read to fetch this later.
19398: dcea_imm:
19399: mov.b &immed_flg,SPCOND_FLG(%a6)
19400: lea ([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
19401: rts
19402:
19403: # here, the <ea> is stacked correctly. however, we must update the
19404: # address register...
19405: dcea_pi:
19406: mov.l %a0,%d0 # pass amt to inc by
19407: bsr.l inc_areg # inc addr register
19408:
19409: mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
19410: rts
19411:
19412: # the <ea> is stacked correctly for all but extended and packed which
19413: # the <ea>s are 8 bytes too large.
19414: # it would make no sense to have a pre-decrement to a7 in supervisor
19415: # mode so we don't even worry about this tricky case here : )
19416: dcea_pd:
19417: mov.l %a0,%d0 # pass amt to dec by
19418: bsr.l dec_areg # dec addr register
19419:
19420: mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
19421:
19422: cmpi.b %d0,&0xc # is opsize ext or packed?
19423: beq.b dcea_pd2 # yes
19424: rts
19425: dcea_pd2:
19426: sub.l &0x8,%a0 # correct <ea>
19427: mov.l %a0,EXC_EA(%a6) # put correct <ea> on stack
19428: rts
19429:
19430: #########################################################################
19431: # XDEF **************************************************************** #
19432: # _calc_ea_fout(): calculate correct stacked <ea> for extended #
19433: # and packed data opclass 3 operations. #
19434: # #
19435: # XREF **************************************************************** #
19436: # None #
19437: # #
19438: # INPUT *************************************************************** #
19439: # None #
19440: # #
19441: # OUTPUT ************************************************************** #
19442: # a0 = return correct effective address #
19443: # #
19444: # ALGORITHM *********************************************************** #
19445: # For opclass 3 extended and packed data operations, the <ea> #
19446: # stacked for the exception is incorrect for -(an) and (an)+ addressing #
19447: # modes. Also, while we're at it, the index register itself must get #
19448: # updated. #
19449: # So, for -(an), we must subtract 8 off of the stacked <ea> value #
19450: # and return that value as the correct <ea> and store that value in An. #
19451: # For (an)+, the stacked <ea> is correct but we must adjust An by +12. #
19452: # #
19453: #########################################################################
19454:
19455: # This calc_ea is currently used to retrieve the correct <ea>
19456: # for fmove outs of type extended and packed.
19457: global _calc_ea_fout
19458: _calc_ea_fout:
19459: mov.b 1+EXC_OPWORD(%a6),%d0 # fetch opcode word
19460: mov.l %d0,%d1 # make a copy
19461:
19462: andi.w &0x38,%d0 # extract mode field
19463: andi.l &0x7,%d1 # extract reg field
19464:
19465: cmpi.b %d0,&0x18 # is mode (An)+ ?
19466: beq.b ceaf_pi # yes
19467:
19468: cmpi.b %d0,&0x20 # is mode -(An) ?
19469: beq.w ceaf_pd # yes
19470:
19471: mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
19472: rts
19473:
19474: # (An)+ : extended and packed fmove out
19475: # : stacked <ea> is correct
19476: # : "An" not updated
19477: ceaf_pi:
19478: mov.w (tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
19479: mov.l EXC_EA(%a6),%a0
19480: jmp (tbl_ceaf_pi.b,%pc,%d1.w*1)
19481:
19482: swbeg &0x8
19483: tbl_ceaf_pi:
19484: short ceaf_pi0 - tbl_ceaf_pi
19485: short ceaf_pi1 - tbl_ceaf_pi
19486: short ceaf_pi2 - tbl_ceaf_pi
19487: short ceaf_pi3 - tbl_ceaf_pi
19488: short ceaf_pi4 - tbl_ceaf_pi
19489: short ceaf_pi5 - tbl_ceaf_pi
19490: short ceaf_pi6 - tbl_ceaf_pi
19491: short ceaf_pi7 - tbl_ceaf_pi
19492:
19493: ceaf_pi0:
19494: addi.l &0xc,EXC_DREGS+0x8(%a6)
19495: rts
19496: ceaf_pi1:
19497: addi.l &0xc,EXC_DREGS+0xc(%a6)
19498: rts
19499: ceaf_pi2:
19500: add.l &0xc,%a2
19501: rts
19502: ceaf_pi3:
19503: add.l &0xc,%a3
19504: rts
19505: ceaf_pi4:
19506: add.l &0xc,%a4
19507: rts
19508: ceaf_pi5:
19509: add.l &0xc,%a5
19510: rts
19511: ceaf_pi6:
19512: addi.l &0xc,EXC_A6(%a6)
19513: rts
19514: ceaf_pi7:
19515: mov.b &mia7_flg,SPCOND_FLG(%a6)
19516: addi.l &0xc,EXC_A7(%a6)
19517: rts
19518:
19519: # -(An) : extended and packed fmove out
19520: # : stacked <ea> = actual <ea> + 8
19521: # : "An" not updated
19522: ceaf_pd:
19523: mov.w (tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
19524: mov.l EXC_EA(%a6),%a0
19525: sub.l &0x8,%a0
19526: sub.l &0x8,EXC_EA(%a6)
19527: jmp (tbl_ceaf_pd.b,%pc,%d1.w*1)
19528:
19529: swbeg &0x8
19530: tbl_ceaf_pd:
19531: short ceaf_pd0 - tbl_ceaf_pd
19532: short ceaf_pd1 - tbl_ceaf_pd
19533: short ceaf_pd2 - tbl_ceaf_pd
19534: short ceaf_pd3 - tbl_ceaf_pd
19535: short ceaf_pd4 - tbl_ceaf_pd
19536: short ceaf_pd5 - tbl_ceaf_pd
19537: short ceaf_pd6 - tbl_ceaf_pd
19538: short ceaf_pd7 - tbl_ceaf_pd
19539:
19540: ceaf_pd0:
19541: mov.l %a0,EXC_DREGS+0x8(%a6)
19542: rts
19543: ceaf_pd1:
19544: mov.l %a0,EXC_DREGS+0xc(%a6)
19545: rts
19546: ceaf_pd2:
19547: mov.l %a0,%a2
19548: rts
19549: ceaf_pd3:
19550: mov.l %a0,%a3
19551: rts
19552: ceaf_pd4:
19553: mov.l %a0,%a4
19554: rts
19555: ceaf_pd5:
19556: mov.l %a0,%a5
19557: rts
19558: ceaf_pd6:
19559: mov.l %a0,EXC_A6(%a6)
19560: rts
19561: ceaf_pd7:
19562: mov.l %a0,EXC_A7(%a6)
19563: mov.b &mda7_flg,SPCOND_FLG(%a6)
19564: rts
19565:
19566: #########################################################################
19567: # XDEF **************************************************************** #
19568: # _load_fop(): load operand for unimplemented FP exception #
19569: # #
19570: # XREF **************************************************************** #
19571: # set_tag_x() - determine ext prec optype tag #
19572: # set_tag_s() - determine sgl prec optype tag #
19573: # set_tag_d() - determine dbl prec optype tag #
19574: # unnorm_fix() - convert normalized number to denorm or zero #
19575: # norm() - normalize a denormalized number #
19576: # get_packed() - fetch a packed operand from memory #
19577: # _dcalc_ea() - calculate <ea>, fixing An in process #
19578: # #
19579: # _imem_read_{word,long}() - read from instruction memory #
19580: # _dmem_read() - read from data memory #
19581: # _dmem_read_{byte,word,long}() - read from data memory #
19582: # #
19583: # facc_in_{b,w,l,d,x}() - mem read failed; special exit point #
19584: # #
19585: # INPUT *************************************************************** #
19586: # None #
19587: # #
19588: # OUTPUT ************************************************************** #
19589: # If memory access doesn't fail: #
19590: # FP_SRC(a6) = source operand in extended precision #
19591: # FP_DST(a6) = destination operand in extended precision #
19592: # #
19593: # ALGORITHM *********************************************************** #
19594: # This is called from the Unimplemented FP exception handler in #
19595: # order to load the source and maybe destination operand into #
19596: # FP_SRC(a6) and FP_DST(a6). If the instruction was opclass zero, load #
19597: # the source and destination from the FP register file. Set the optype #
19598: # tags for both if dyadic, one for monadic. If a number is an UNNORM, #
19599: # convert it to a DENORM or a ZERO. #
19600: # If the instruction is opclass two (memory->reg), then fetch #
19601: # the destination from the register file and the source operand from #
19602: # memory. Tag and fix both as above w/ opclass zero instructions. #
19603: # If the source operand is byte,word,long, or single, it may be #
19604: # in the data register file. If it's actually out in memory, use one of #
19605: # the mem_read() routines to fetch it. If the mem_read() access returns #
19606: # a failing value, exit through the special facc_in() routine which #
19607: # will create an acess error exception frame from the current exception #
19608: # frame. #
19609: # Immediate data and regular data accesses are separated because #
19610: # if an immediate data access fails, the resulting fault status #
19611: # longword stacked for the access error exception must have the #
19612: # instruction bit set. #
19613: # #
19614: #########################################################################
19615:
19616: global _load_fop
19617: _load_fop:
19618:
19619: # 15 13 12 10 9 7 6 0
19620: # / \ / \ / \ / \
19621: # ---------------------------------
19622: # | opclass | RX | RY | EXTENSION | (2nd word of general FP instruction)
19623: # ---------------------------------
19624: #
19625:
19626: # bfextu EXC_CMDREG(%a6){&0:&3}, %d0 # extract opclass
19627: # cmpi.b %d0, &0x2 # which class is it? ('000,'010,'011)
19628: # beq.w op010 # handle <ea> -> fpn
19629: # bgt.w op011 # handle fpn -> <ea>
19630:
19631: # we're not using op011 for now...
19632: btst &0x6,EXC_CMDREG(%a6)
19633: bne.b op010
19634:
19635: ############################
19636: # OPCLASS '000: reg -> reg #
19637: ############################
19638: op000:
19639: mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension word lo
19640: btst &0x5,%d0 # testing extension bits
19641: beq.b op000_src # (bit 5 == 0) => monadic
19642: btst &0x4,%d0 # (bit 5 == 1)
19643: beq.b op000_dst # (bit 4 == 0) => dyadic
19644: and.w &0x007f,%d0 # extract extension bits {6:0}
19645: cmpi.w %d0,&0x0038 # is it an fcmp (dyadic) ?
19646: bne.b op000_src # it's an fcmp
19647:
19648: op000_dst:
19649: bfextu EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
19650: bsr.l load_fpn2 # fetch dst fpreg into FP_DST
19651:
19652: bsr.l set_tag_x # get dst optype tag
19653:
19654: cmpi.b %d0, &UNNORM # is dst fpreg an UNNORM?
19655: beq.b op000_dst_unnorm # yes
19656: op000_dst_cont:
19657: mov.b %d0, DTAG(%a6) # store the dst optype tag
19658:
19659: op000_src:
19660: bfextu EXC_CMDREG(%a6){&3:&3}, %d0 # extract src field
19661: bsr.l load_fpn1 # fetch src fpreg into FP_SRC
19662:
19663: bsr.l set_tag_x # get src optype tag
19664:
19665: cmpi.b %d0, &UNNORM # is src fpreg an UNNORM?
19666: beq.b op000_src_unnorm # yes
19667: op000_src_cont:
19668: mov.b %d0, STAG(%a6) # store the src optype tag
19669: rts
19670:
19671: op000_dst_unnorm:
19672: bsr.l unnorm_fix # fix the dst UNNORM
19673: bra.b op000_dst_cont
19674: op000_src_unnorm:
19675: bsr.l unnorm_fix # fix the src UNNORM
19676: bra.b op000_src_cont
19677:
19678: #############################
19679: # OPCLASS '010: <ea> -> reg #
19680: #############################
19681: op010:
19682: mov.w EXC_CMDREG(%a6),%d0 # fetch extension word
19683: btst &0x5,%d0 # testing extension bits
19684: beq.b op010_src # (bit 5 == 0) => monadic
19685: btst &0x4,%d0 # (bit 5 == 1)
19686: beq.b op010_dst # (bit 4 == 0) => dyadic
19687: and.w &0x007f,%d0 # extract extension bits {6:0}
19688: cmpi.w %d0,&0x0038 # is it an fcmp (dyadic) ?
19689: bne.b op010_src # it's an fcmp
19690:
19691: op010_dst:
19692: bfextu EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
19693: bsr.l load_fpn2 # fetch dst fpreg ptr
19694:
19695: bsr.l set_tag_x # get dst type tag
19696:
19697: cmpi.b %d0, &UNNORM # is dst fpreg an UNNORM?
19698: beq.b op010_dst_unnorm # yes
19699: op010_dst_cont:
19700: mov.b %d0, DTAG(%a6) # store the dst optype tag
19701:
19702: op010_src:
19703: bfextu EXC_CMDREG(%a6){&3:&3}, %d0 # extract src type field
19704:
19705: bfextu EXC_OPWORD(%a6){&10:&3}, %d1 # extract <ea> mode field
19706: bne.w fetch_from_mem # src op is in memory
19707:
19708: op010_dreg:
19709: clr.b STAG(%a6) # either NORM or ZERO
19710: bfextu EXC_OPWORD(%a6){&13:&3}, %d1 # extract src reg field
19711:
19712: mov.w (tbl_op010_dreg.b,%pc,%d0.w*2), %d0 # jmp based on optype
19713: jmp (tbl_op010_dreg.b,%pc,%d0.w*1) # fetch src from dreg
19714:
19715: op010_dst_unnorm:
19716: bsr.l unnorm_fix # fix the dst UNNORM
19717: bra.b op010_dst_cont
19718:
19719: swbeg &0x8
19720: tbl_op010_dreg:
19721: short opd_long - tbl_op010_dreg
19722: short opd_sgl - tbl_op010_dreg
19723: short tbl_op010_dreg - tbl_op010_dreg
19724: short tbl_op010_dreg - tbl_op010_dreg
19725: short opd_word - tbl_op010_dreg
19726: short tbl_op010_dreg - tbl_op010_dreg
19727: short opd_byte - tbl_op010_dreg
19728: short tbl_op010_dreg - tbl_op010_dreg
19729:
19730: #
19731: # LONG: can be either NORM or ZERO...
19732: #
19733: opd_long:
19734: bsr.l fetch_dreg # fetch long in d0
19735: fmov.l %d0, %fp0 # load a long
19736: fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19737: fbeq.w opd_long_zero # long is a ZERO
19738: rts
19739: opd_long_zero:
19740: mov.b &ZERO, STAG(%a6) # set ZERO optype flag
19741: rts
19742:
19743: #
19744: # WORD: can be either NORM or ZERO...
19745: #
19746: opd_word:
19747: bsr.l fetch_dreg # fetch word in d0
19748: fmov.w %d0, %fp0 # load a word
19749: fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19750: fbeq.w opd_word_zero # WORD is a ZERO
19751: rts
19752: opd_word_zero:
19753: mov.b &ZERO, STAG(%a6) # set ZERO optype flag
19754: rts
19755:
19756: #
19757: # BYTE: can be either NORM or ZERO...
19758: #
19759: opd_byte:
19760: bsr.l fetch_dreg # fetch word in d0
19761: fmov.b %d0, %fp0 # load a byte
19762: fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19763: fbeq.w opd_byte_zero # byte is a ZERO
19764: rts
19765: opd_byte_zero:
19766: mov.b &ZERO, STAG(%a6) # set ZERO optype flag
19767: rts
19768:
19769: #
19770: # SGL: can be either NORM, DENORM, ZERO, INF, QNAN or SNAN but not UNNORM
19771: #
19772: # separate SNANs and DENORMs so they can be loaded w/ special care.
19773: # all others can simply be moved "in" using fmove.
19774: #
19775: opd_sgl:
19776: bsr.l fetch_dreg # fetch sgl in d0
19777: mov.l %d0,L_SCR1(%a6)
19778:
19779: lea L_SCR1(%a6), %a0 # pass: ptr to the sgl
19780: bsr.l set_tag_s # determine sgl type
19781: mov.b %d0, STAG(%a6) # save the src tag
19782:
19783: cmpi.b %d0, &SNAN # is it an SNAN?
19784: beq.w get_sgl_snan # yes
19785:
19786: cmpi.b %d0, &DENORM # is it a DENORM?
19787: beq.w get_sgl_denorm # yes
19788:
19789: fmov.s (%a0), %fp0 # no, so can load it regular
19790: fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19791: rts
19792:
19793: ##############################################################################
19794:
19795: #########################################################################
19796: # fetch_from_mem(): #
19797: # - src is out in memory. must: #
19798: # (1) calc ea - must read AFTER you know the src type since #
19799: # if the ea is -() or ()+, need to know # of bytes. #
19800: # (2) read it in from either user or supervisor space #
19801: # (3) if (b || w || l) then simply read in #
19802: # if (s || d || x) then check for SNAN,UNNORM,DENORM #
19803: # if (packed) then punt for now #
19804: # INPUT: #
19805: # %d0 : src type field #
19806: #########################################################################
19807: fetch_from_mem:
19808: clr.b STAG(%a6) # either NORM or ZERO
19809:
19810: mov.w (tbl_fp_type.b,%pc,%d0.w*2), %d0 # index by src type field
19811: jmp (tbl_fp_type.b,%pc,%d0.w*1)
19812:
19813: swbeg &0x8
19814: tbl_fp_type:
19815: short load_long - tbl_fp_type
19816: short load_sgl - tbl_fp_type
19817: short load_ext - tbl_fp_type
19818: short load_packed - tbl_fp_type
19819: short load_word - tbl_fp_type
19820: short load_dbl - tbl_fp_type
19821: short load_byte - tbl_fp_type
19822: short tbl_fp_type - tbl_fp_type
19823:
19824: #########################################
19825: # load a LONG into %fp0: #
19826: # -number can't fault #
19827: # (1) calc ea #
19828: # (2) read 4 bytes into L_SCR1 #
19829: # (3) fmov.l into %fp0 #
19830: #########################################
19831: load_long:
19832: movq.l &0x4, %d0 # pass: 4 (bytes)
19833: bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
19834:
19835: cmpi.b SPCOND_FLG(%a6),&immed_flg
19836: beq.b load_long_immed
19837:
19838: bsr.l _dmem_read_long # fetch src operand from memory
19839:
19840: tst.l %d1 # did dfetch fail?
19841: bne.l facc_in_l # yes
19842:
19843: load_long_cont:
19844: fmov.l %d0, %fp0 # read into %fp0;convert to xprec
19845: fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19846:
19847: fbeq.w load_long_zero # src op is a ZERO
19848: rts
19849: load_long_zero:
19850: mov.b &ZERO, STAG(%a6) # set optype tag to ZERO
19851: rts
19852:
19853: load_long_immed:
19854: bsr.l _imem_read_long # fetch src operand immed data
19855:
19856: tst.l %d1 # did ifetch fail?
19857: bne.l funimp_iacc # yes
19858: bra.b load_long_cont
19859:
19860: #########################################
19861: # load a WORD into %fp0: #
19862: # -number can't fault #
19863: # (1) calc ea #
19864: # (2) read 2 bytes into L_SCR1 #
19865: # (3) fmov.w into %fp0 #
19866: #########################################
19867: load_word:
19868: movq.l &0x2, %d0 # pass: 2 (bytes)
19869: bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
19870:
19871: cmpi.b SPCOND_FLG(%a6),&immed_flg
19872: beq.b load_word_immed
19873:
19874: bsr.l _dmem_read_word # fetch src operand from memory
19875:
19876: tst.l %d1 # did dfetch fail?
19877: bne.l facc_in_w # yes
19878:
19879: load_word_cont:
19880: fmov.w %d0, %fp0 # read into %fp0;convert to xprec
19881: fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19882:
19883: fbeq.w load_word_zero # src op is a ZERO
19884: rts
19885: load_word_zero:
19886: mov.b &ZERO, STAG(%a6) # set optype tag to ZERO
19887: rts
19888:
19889: load_word_immed:
19890: bsr.l _imem_read_word # fetch src operand immed data
19891:
19892: tst.l %d1 # did ifetch fail?
19893: bne.l funimp_iacc # yes
19894: bra.b load_word_cont
19895:
19896: #########################################
19897: # load a BYTE into %fp0: #
19898: # -number can't fault #
19899: # (1) calc ea #
19900: # (2) read 1 byte into L_SCR1 #
19901: # (3) fmov.b into %fp0 #
19902: #########################################
19903: load_byte:
19904: movq.l &0x1, %d0 # pass: 1 (byte)
19905: bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
19906:
19907: cmpi.b SPCOND_FLG(%a6),&immed_flg
19908: beq.b load_byte_immed
19909:
19910: bsr.l _dmem_read_byte # fetch src operand from memory
19911:
19912: tst.l %d1 # did dfetch fail?
19913: bne.l facc_in_b # yes
19914:
19915: load_byte_cont:
19916: fmov.b %d0, %fp0 # read into %fp0;convert to xprec
19917: fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19918:
19919: fbeq.w load_byte_zero # src op is a ZERO
19920: rts
19921: load_byte_zero:
19922: mov.b &ZERO, STAG(%a6) # set optype tag to ZERO
19923: rts
19924:
19925: load_byte_immed:
19926: bsr.l _imem_read_word # fetch src operand immed data
19927:
19928: tst.l %d1 # did ifetch fail?
19929: bne.l funimp_iacc # yes
19930: bra.b load_byte_cont
19931:
19932: #########################################
19933: # load a SGL into %fp0: #
19934: # -number can't fault #
19935: # (1) calc ea #
19936: # (2) read 4 bytes into L_SCR1 #
19937: # (3) fmov.s into %fp0 #
19938: #########################################
19939: load_sgl:
19940: movq.l &0x4, %d0 # pass: 4 (bytes)
19941: bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
19942:
19943: cmpi.b SPCOND_FLG(%a6),&immed_flg
19944: beq.b load_sgl_immed
19945:
19946: bsr.l _dmem_read_long # fetch src operand from memory
19947: mov.l %d0, L_SCR1(%a6) # store src op on stack
19948:
19949: tst.l %d1 # did dfetch fail?
19950: bne.l facc_in_l # yes
19951:
19952: load_sgl_cont:
19953: lea L_SCR1(%a6), %a0 # pass: ptr to sgl src op
19954: bsr.l set_tag_s # determine src type tag
19955: mov.b %d0, STAG(%a6) # save src optype tag on stack
19956:
19957: cmpi.b %d0, &DENORM # is it a sgl DENORM?
19958: beq.w get_sgl_denorm # yes
19959:
19960: cmpi.b %d0, &SNAN # is it a sgl SNAN?
19961: beq.w get_sgl_snan # yes
19962:
19963: fmov.s L_SCR1(%a6), %fp0 # read into %fp0;convert to xprec
19964: fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
19965: rts
19966:
19967: load_sgl_immed:
19968: bsr.l _imem_read_long # fetch src operand immed data
19969:
19970: tst.l %d1 # did ifetch fail?
19971: bne.l funimp_iacc # yes
19972: bra.b load_sgl_cont
19973:
19974: # must convert sgl denorm format to an Xprec denorm fmt suitable for
19975: # normalization...
19976: # %a0 : points to sgl denorm
19977: get_sgl_denorm:
19978: clr.w FP_SRC_EX(%a6)
19979: bfextu (%a0){&9:&23}, %d0 # fetch sgl hi(_mantissa)
19980: lsl.l &0x8, %d0
19981: mov.l %d0, FP_SRC_HI(%a6) # set ext hi(_mantissa)
19982: clr.l FP_SRC_LO(%a6) # set ext lo(_mantissa)
19983:
19984: clr.w FP_SRC_EX(%a6)
19985: btst &0x7, (%a0) # is sgn bit set?
19986: beq.b sgl_dnrm_norm
19987: bset &0x7, FP_SRC_EX(%a6) # set sgn of xprec value
19988:
19989: sgl_dnrm_norm:
19990: lea FP_SRC(%a6), %a0
19991: bsr.l norm # normalize number
19992: mov.w &0x3f81, %d1 # xprec exp = 0x3f81
19993: sub.w %d0, %d1 # exp = 0x3f81 - shft amt.
19994: or.w %d1, FP_SRC_EX(%a6) # {sgn,exp}
19995:
19996: mov.b &NORM, STAG(%a6) # fix src type tag
19997: rts
19998:
19999: # convert sgl to ext SNAN
20000: # %a0 : points to sgl SNAN
20001: get_sgl_snan:
20002: mov.w &0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
20003: bfextu (%a0){&9:&23}, %d0
20004: lsl.l &0x8, %d0 # extract and insert hi(man)
20005: mov.l %d0, FP_SRC_HI(%a6)
20006: clr.l FP_SRC_LO(%a6)
20007:
20008: btst &0x7, (%a0) # see if sign of SNAN is set
20009: beq.b no_sgl_snan_sgn
20010: bset &0x7, FP_SRC_EX(%a6)
20011: no_sgl_snan_sgn:
20012: rts
20013:
20014: #########################################
20015: # load a DBL into %fp0: #
20016: # -number can't fault #
20017: # (1) calc ea #
20018: # (2) read 8 bytes into L_SCR(1,2)#
20019: # (3) fmov.d into %fp0 #
20020: #########################################
20021: load_dbl:
20022: movq.l &0x8, %d0 # pass: 8 (bytes)
20023: bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
20024:
20025: cmpi.b SPCOND_FLG(%a6),&immed_flg
20026: beq.b load_dbl_immed
20027:
20028: lea L_SCR1(%a6), %a1 # pass: ptr to input dbl tmp space
20029: movq.l &0x8, %d0 # pass: # bytes to read
20030: bsr.l _dmem_read # fetch src operand from memory
20031:
20032: tst.l %d1 # did dfetch fail?
20033: bne.l facc_in_d # yes
20034:
20035: load_dbl_cont:
20036: lea L_SCR1(%a6), %a0 # pass: ptr to input dbl
20037: bsr.l set_tag_d # determine src type tag
20038: mov.b %d0, STAG(%a6) # set src optype tag
20039:
20040: cmpi.b %d0, &DENORM # is it a dbl DENORM?
20041: beq.w get_dbl_denorm # yes
20042:
20043: cmpi.b %d0, &SNAN # is it a dbl SNAN?
20044: beq.w get_dbl_snan # yes
20045:
20046: fmov.d L_SCR1(%a6), %fp0 # read into %fp0;convert to xprec
20047: fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
20048: rts
20049:
20050: load_dbl_immed:
20051: lea L_SCR1(%a6), %a1 # pass: ptr to input dbl tmp space
20052: movq.l &0x8, %d0 # pass: # bytes to read
20053: bsr.l _imem_read # fetch src operand from memory
20054:
20055: tst.l %d1 # did ifetch fail?
20056: bne.l funimp_iacc # yes
20057: bra.b load_dbl_cont
20058:
20059: # must convert dbl denorm format to an Xprec denorm fmt suitable for
20060: # normalization...
20061: # %a0 : loc. of dbl denorm
20062: get_dbl_denorm:
20063: clr.w FP_SRC_EX(%a6)
20064: bfextu (%a0){&12:&31}, %d0 # fetch hi(_mantissa)
20065: mov.l %d0, FP_SRC_HI(%a6)
20066: bfextu 4(%a0){&11:&21}, %d0 # fetch lo(_mantissa)
20067: mov.l &0xb, %d1
20068: lsl.l %d1, %d0
20069: mov.l %d0, FP_SRC_LO(%a6)
20070:
20071: btst &0x7, (%a0) # is sgn bit set?
20072: beq.b dbl_dnrm_norm
20073: bset &0x7, FP_SRC_EX(%a6) # set sgn of xprec value
20074:
20075: dbl_dnrm_norm:
20076: lea FP_SRC(%a6), %a0
20077: bsr.l norm # normalize number
20078: mov.w &0x3c01, %d1 # xprec exp = 0x3c01
20079: sub.w %d0, %d1 # exp = 0x3c01 - shft amt.
20080: or.w %d1, FP_SRC_EX(%a6) # {sgn,exp}
20081:
20082: mov.b &NORM, STAG(%a6) # fix src type tag
20083: rts
20084:
20085: # convert dbl to ext SNAN
20086: # %a0 : points to dbl SNAN
20087: get_dbl_snan:
20088: mov.w &0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
20089:
20090: bfextu (%a0){&12:&31}, %d0 # fetch hi(_mantissa)
20091: mov.l %d0, FP_SRC_HI(%a6)
20092: bfextu 4(%a0){&11:&21}, %d0 # fetch lo(_mantissa)
20093: mov.l &0xb, %d1
20094: lsl.l %d1, %d0
20095: mov.l %d0, FP_SRC_LO(%a6)
20096:
20097: btst &0x7, (%a0) # see if sign of SNAN is set
20098: beq.b no_dbl_snan_sgn
20099: bset &0x7, FP_SRC_EX(%a6)
20100: no_dbl_snan_sgn:
20101: rts
20102:
20103: #################################################
20104: # load a Xprec into %fp0: #
20105: # -number can't fault #
20106: # (1) calc ea #
20107: # (2) read 12 bytes into L_SCR(1,2) #
20108: # (3) fmov.x into %fp0 #
20109: #################################################
20110: load_ext:
20111: mov.l &0xc, %d0 # pass: 12 (bytes)
20112: bsr.l _dcalc_ea # calc <ea>
20113:
20114: lea FP_SRC(%a6), %a1 # pass: ptr to input ext tmp space
20115: mov.l &0xc, %d0 # pass: # of bytes to read
20116: bsr.l _dmem_read # fetch src operand from memory
20117:
20118: tst.l %d1 # did dfetch fail?
20119: bne.l facc_in_x # yes
20120:
20121: lea FP_SRC(%a6), %a0 # pass: ptr to src op
20122: bsr.l set_tag_x # determine src type tag
20123:
20124: cmpi.b %d0, &UNNORM # is the src op an UNNORM?
20125: beq.b load_ext_unnorm # yes
20126:
20127: mov.b %d0, STAG(%a6) # store the src optype tag
20128: rts
20129:
20130: load_ext_unnorm:
20131: bsr.l unnorm_fix # fix the src UNNORM
20132: mov.b %d0, STAG(%a6) # store the src optype tag
20133: rts
20134:
20135: #################################################
20136: # load a packed into %fp0: #
20137: # -number can't fault #
20138: # (1) calc ea #
20139: # (2) read 12 bytes into L_SCR(1,2,3) #
20140: # (3) fmov.x into %fp0 #
20141: #################################################
20142: load_packed:
20143: bsr.l get_packed
20144:
20145: lea FP_SRC(%a6),%a0 # pass ptr to src op
20146: bsr.l set_tag_x # determine src type tag
20147: cmpi.b %d0,&UNNORM # is the src op an UNNORM ZERO?
20148: beq.b load_packed_unnorm # yes
20149:
20150: mov.b %d0,STAG(%a6) # store the src optype tag
20151: rts
20152:
20153: load_packed_unnorm:
20154: bsr.l unnorm_fix # fix the UNNORM ZERO
20155: mov.b %d0,STAG(%a6) # store the src optype tag
20156: rts
20157:
20158: #########################################################################
20159: # XDEF **************************************************************** #
20160: # fout(): move from fp register to memory or data register #
20161: # #
20162: # XREF **************************************************************** #
20163: # _round() - needed to create EXOP for sgl/dbl precision #
20164: # norm() - needed to create EXOP for extended precision #
20165: # ovf_res() - create default overflow result for sgl/dbl precision#
20166: # unf_res() - create default underflow result for sgl/dbl prec. #
20167: # dst_dbl() - create rounded dbl precision result. #
20168: # dst_sgl() - create rounded sgl precision result. #
20169: # fetch_dreg() - fetch dynamic k-factor reg for packed. #
20170: # bindec() - convert FP binary number to packed number. #
20171: # _mem_write() - write data to memory. #
20172: # _mem_write2() - write data to memory unless supv mode -(a7) exc.#
20173: # _dmem_write_{byte,word,long}() - write data to memory. #
20174: # store_dreg_{b,w,l}() - store data to data register file. #
20175: # facc_out_{b,w,l,d,x}() - data access error occurred. #
20176: # #
20177: # INPUT *************************************************************** #
20178: # a0 = pointer to extended precision source operand #
20179: # d0 = round prec,mode #
20180: # #
20181: # OUTPUT ************************************************************** #
20182: # fp0 : intermediate underflow or overflow result if #
20183: # OVFL/UNFL occurred for a sgl or dbl operand #
20184: # #
20185: # ALGORITHM *********************************************************** #
20186: # This routine is accessed by many handlers that need to do an #
20187: # opclass three move of an operand out to memory. #
20188: # Decode an fmove out (opclass 3) instruction to determine if #
20189: # it's b,w,l,s,d,x, or p in size. b,w,l can be stored to either a data #
20190: # register or memory. The algorithm uses a standard "fmove" to create #
20191: # the rounded result. Also, since exceptions are disabled, this also #
20192: # create the correct OPERR default result if appropriate. #
20193: # For sgl or dbl precision, overflow or underflow can occur. If #
20194: # either occurs and is enabled, the EXOP. #
20195: # For extended precision, the stacked <ea> must be fixed along #
20196: # w/ the address index register as appropriate w/ _calc_ea_fout(). If #
20197: # the source is a denorm and if underflow is enabled, an EXOP must be #
20198: # created. #
20199: # For packed, the k-factor must be fetched from the instruction #
20200: # word or a data register. The <ea> must be fixed as w/ extended #
20201: # precision. Then, bindec() is called to create the appropriate #
20202: # packed result. #
20203: # If at any time an access error is flagged by one of the move- #
20204: # to-memory routines, then a special exit must be made so that the #
20205: # access error can be handled properly. #
20206: # #
20207: #########################################################################
20208:
20209: global fout
20210: fout:
20211: bfextu EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
20212: mov.w (tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
20213: jmp (tbl_fout.b,%pc,%a1) # jump to routine
20214:
20215: swbeg &0x8
20216: tbl_fout:
20217: short fout_long - tbl_fout
20218: short fout_sgl - tbl_fout
20219: short fout_ext - tbl_fout
20220: short fout_pack - tbl_fout
20221: short fout_word - tbl_fout
20222: short fout_dbl - tbl_fout
20223: short fout_byte - tbl_fout
20224: short fout_pack - tbl_fout
20225:
20226: #################################################################
20227: # fmove.b out ###################################################
20228: #################################################################
20229:
20230: # Only "Unimplemented Data Type" exceptions enter here. The operand
20231: # is either a DENORM or a NORM.
20232: fout_byte:
20233: tst.b STAG(%a6) # is operand normalized?
20234: bne.b fout_byte_denorm # no
20235:
20236: fmovm.x SRC(%a0),&0x80 # load value
20237:
20238: fout_byte_norm:
20239: fmov.l %d0,%fpcr # insert rnd prec,mode
20240:
20241: fmov.b %fp0,%d0 # exec move out w/ correct rnd mode
20242:
20243: fmov.l &0x0,%fpcr # clear FPCR
20244: fmov.l %fpsr,%d1 # fetch FPSR
20245: or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
20246:
20247: mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20248: andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20249: beq.b fout_byte_dn # must save to integer regfile
20250:
20251: mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
20252: bsr.l _dmem_write_byte # write byte
20253:
20254: tst.l %d1 # did dstore fail?
20255: bne.l facc_out_b # yes
20256:
20257: rts
20258:
20259: fout_byte_dn:
20260: mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20261: andi.w &0x7,%d1
20262: bsr.l store_dreg_b
20263: rts
20264:
20265: fout_byte_denorm:
20266: mov.l SRC_EX(%a0),%d1
20267: andi.l &0x80000000,%d1 # keep DENORM sign
20268: ori.l &0x00800000,%d1 # make smallest sgl
20269: fmov.s %d1,%fp0
20270: bra.b fout_byte_norm
20271:
20272: #################################################################
20273: # fmove.w out ###################################################
20274: #################################################################
20275:
20276: # Only "Unimplemented Data Type" exceptions enter here. The operand
20277: # is either a DENORM or a NORM.
20278: fout_word:
20279: tst.b STAG(%a6) # is operand normalized?
20280: bne.b fout_word_denorm # no
20281:
20282: fmovm.x SRC(%a0),&0x80 # load value
20283:
20284: fout_word_norm:
20285: fmov.l %d0,%fpcr # insert rnd prec:mode
20286:
20287: fmov.w %fp0,%d0 # exec move out w/ correct rnd mode
20288:
20289: fmov.l &0x0,%fpcr # clear FPCR
20290: fmov.l %fpsr,%d1 # fetch FPSR
20291: or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
20292:
20293: mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20294: andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20295: beq.b fout_word_dn # must save to integer regfile
20296:
20297: mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
20298: bsr.l _dmem_write_word # write word
20299:
20300: tst.l %d1 # did dstore fail?
20301: bne.l facc_out_w # yes
20302:
20303: rts
20304:
20305: fout_word_dn:
20306: mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20307: andi.w &0x7,%d1
20308: bsr.l store_dreg_w
20309: rts
20310:
20311: fout_word_denorm:
20312: mov.l SRC_EX(%a0),%d1
20313: andi.l &0x80000000,%d1 # keep DENORM sign
20314: ori.l &0x00800000,%d1 # make smallest sgl
20315: fmov.s %d1,%fp0
20316: bra.b fout_word_norm
20317:
20318: #################################################################
20319: # fmove.l out ###################################################
20320: #################################################################
20321:
20322: # Only "Unimplemented Data Type" exceptions enter here. The operand
20323: # is either a DENORM or a NORM.
20324: fout_long:
20325: tst.b STAG(%a6) # is operand normalized?
20326: bne.b fout_long_denorm # no
20327:
20328: fmovm.x SRC(%a0),&0x80 # load value
20329:
20330: fout_long_norm:
20331: fmov.l %d0,%fpcr # insert rnd prec:mode
20332:
20333: fmov.l %fp0,%d0 # exec move out w/ correct rnd mode
20334:
20335: fmov.l &0x0,%fpcr # clear FPCR
20336: fmov.l %fpsr,%d1 # fetch FPSR
20337: or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
20338:
20339: fout_long_write:
20340: mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20341: andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20342: beq.b fout_long_dn # must save to integer regfile
20343:
20344: mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
20345: bsr.l _dmem_write_long # write long
20346:
20347: tst.l %d1 # did dstore fail?
20348: bne.l facc_out_l # yes
20349:
20350: rts
20351:
20352: fout_long_dn:
20353: mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20354: andi.w &0x7,%d1
20355: bsr.l store_dreg_l
20356: rts
20357:
20358: fout_long_denorm:
20359: mov.l SRC_EX(%a0),%d1
20360: andi.l &0x80000000,%d1 # keep DENORM sign
20361: ori.l &0x00800000,%d1 # make smallest sgl
20362: fmov.s %d1,%fp0
20363: bra.b fout_long_norm
20364:
20365: #################################################################
20366: # fmove.x out ###################################################
20367: #################################################################
20368:
20369: # Only "Unimplemented Data Type" exceptions enter here. The operand
20370: # is either a DENORM or a NORM.
20371: # The DENORM causes an Underflow exception.
20372: fout_ext:
20373:
20374: # we copy the extended precision result to FP_SCR0 so that the reserved
20375: # 16-bit field gets zeroed. we do this since we promise not to disturb
20376: # what's at SRC(a0).
20377: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
20378: clr.w 2+FP_SCR0_EX(%a6) # clear reserved field
20379: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
20380: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
20381:
20382: fmovm.x SRC(%a0),&0x80 # return result
20383:
20384: bsr.l _calc_ea_fout # fix stacked <ea>
20385:
20386: mov.l %a0,%a1 # pass: dst addr
20387: lea FP_SCR0(%a6),%a0 # pass: src addr
20388: mov.l &0xc,%d0 # pass: opsize is 12 bytes
20389:
20390: # we must not yet write the extended precision data to the stack
20391: # in the pre-decrement case from supervisor mode or else we'll corrupt
20392: # the stack frame. so, leave it in FP_SRC for now and deal with it later...
20393: cmpi.b SPCOND_FLG(%a6),&mda7_flg
20394: beq.b fout_ext_a7
20395:
20396: bsr.l _dmem_write # write ext prec number to memory
20397:
20398: tst.l %d1 # did dstore fail?
20399: bne.w fout_ext_err # yes
20400:
20401: tst.b STAG(%a6) # is operand normalized?
20402: bne.b fout_ext_denorm # no
20403: rts
20404:
20405: # the number is a DENORM. must set the underflow exception bit
20406: fout_ext_denorm:
20407: bset &unfl_bit,FPSR_EXCEPT(%a6) # set underflow exc bit
20408:
20409: mov.b FPCR_ENABLE(%a6),%d0
20410: andi.b &0x0a,%d0 # is UNFL or INEX enabled?
20411: bne.b fout_ext_exc # yes
20412: rts
20413:
20414: # we don't want to do the write if the exception occurred in supervisor mode
20415: # so _mem_write2() handles this for us.
20416: fout_ext_a7:
20417: bsr.l _mem_write2 # write ext prec number to memory
20418:
20419: tst.l %d1 # did dstore fail?
20420: bne.w fout_ext_err # yes
20421:
20422: tst.b STAG(%a6) # is operand normalized?
20423: bne.b fout_ext_denorm # no
20424: rts
20425:
20426: fout_ext_exc:
20427: lea FP_SCR0(%a6),%a0
20428: bsr.l norm # normalize the mantissa
20429: neg.w %d0 # new exp = -(shft amt)
20430: andi.w &0x7fff,%d0
20431: andi.w &0x8000,FP_SCR0_EX(%a6) # keep only old sign
20432: or.w %d0,FP_SCR0_EX(%a6) # insert new exponent
20433: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
20434: rts
20435:
20436: fout_ext_err:
20437: mov.l EXC_A6(%a6),(%a6) # fix stacked a6
20438: bra.l facc_out_x
20439:
20440: #########################################################################
20441: # fmove.s out ###########################################################
20442: #########################################################################
20443: fout_sgl:
20444: andi.b &0x30,%d0 # clear rnd prec
20445: ori.b &s_mode*0x10,%d0 # insert sgl prec
20446: mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
20447:
20448: #
20449: # operand is a normalized number. first, we check to see if the move out
20450: # would cause either an underflow or overflow. these cases are handled
20451: # separately. otherwise, set the FPCR to the proper rounding mode and
20452: # execute the move.
20453: #
20454: mov.w SRC_EX(%a0),%d0 # extract exponent
20455: andi.w &0x7fff,%d0 # strip sign
20456:
20457: cmpi.w %d0,&SGL_HI # will operand overflow?
20458: bgt.w fout_sgl_ovfl # yes; go handle OVFL
20459: beq.w fout_sgl_may_ovfl # maybe; go handle possible OVFL
20460: cmpi.w %d0,&SGL_LO # will operand underflow?
20461: blt.w fout_sgl_unfl # yes; go handle underflow
20462:
20463: #
20464: # NORMs(in range) can be stored out by a simple "fmov.s"
20465: # Unnormalized inputs can come through this point.
20466: #
20467: fout_sgl_exg:
20468: fmovm.x SRC(%a0),&0x80 # fetch fop from stack
20469:
20470: fmov.l L_SCR3(%a6),%fpcr # set FPCR
20471: fmov.l &0x0,%fpsr # clear FPSR
20472:
20473: fmov.s %fp0,%d0 # store does convert and round
20474:
20475: fmov.l &0x0,%fpcr # clear FPCR
20476: fmov.l %fpsr,%d1 # save FPSR
20477:
20478: or.w %d1,2+USER_FPSR(%a6) # set possible inex2/ainex
20479:
20480: fout_sgl_exg_write:
20481: mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20482: andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20483: beq.b fout_sgl_exg_write_dn # must save to integer regfile
20484:
20485: mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
20486: bsr.l _dmem_write_long # write long
20487:
20488: tst.l %d1 # did dstore fail?
20489: bne.l facc_out_l # yes
20490:
20491: rts
20492:
20493: fout_sgl_exg_write_dn:
20494: mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20495: andi.w &0x7,%d1
20496: bsr.l store_dreg_l
20497: rts
20498:
20499: #
20500: # here, we know that the operand would UNFL if moved out to single prec,
20501: # so, denorm and round and then use generic store single routine to
20502: # write the value to memory.
20503: #
20504: fout_sgl_unfl:
20505: bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
20506:
20507: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
20508: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
20509: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
20510: mov.l %a0,-(%sp)
20511:
20512: clr.l %d0 # pass: S.F. = 0
20513:
20514: cmpi.b STAG(%a6),&DENORM # fetch src optype tag
20515: bne.b fout_sgl_unfl_cont # let DENORMs fall through
20516:
20517: lea FP_SCR0(%a6),%a0
20518: bsr.l norm # normalize the DENORM
20519:
20520: fout_sgl_unfl_cont:
20521: lea FP_SCR0(%a6),%a0 # pass: ptr to operand
20522: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
20523: bsr.l unf_res # calc default underflow result
20524:
20525: lea FP_SCR0(%a6),%a0 # pass: ptr to fop
20526: bsr.l dst_sgl # convert to single prec
20527:
20528: mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20529: andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20530: beq.b fout_sgl_unfl_dn # must save to integer regfile
20531:
20532: mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
20533: bsr.l _dmem_write_long # write long
20534:
20535: tst.l %d1 # did dstore fail?
20536: bne.l facc_out_l # yes
20537:
20538: bra.b fout_sgl_unfl_chkexc
20539:
20540: fout_sgl_unfl_dn:
20541: mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20542: andi.w &0x7,%d1
20543: bsr.l store_dreg_l
20544:
20545: fout_sgl_unfl_chkexc:
20546: mov.b FPCR_ENABLE(%a6),%d1
20547: andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20548: bne.w fout_sd_exc_unfl # yes
20549: addq.l &0x4,%sp
20550: rts
20551:
20552: #
20553: # it's definitely an overflow so call ovf_res to get the correct answer
20554: #
20555: fout_sgl_ovfl:
20556: tst.b 3+SRC_HI(%a0) # is result inexact?
20557: bne.b fout_sgl_ovfl_inex2
20558: tst.l SRC_LO(%a0) # is result inexact?
20559: bne.b fout_sgl_ovfl_inex2
20560: ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
20561: bra.b fout_sgl_ovfl_cont
20562: fout_sgl_ovfl_inex2:
20563: ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
20564:
20565: fout_sgl_ovfl_cont:
20566: mov.l %a0,-(%sp)
20567:
20568: # call ovf_res() w/ sgl prec and the correct rnd mode to create the default
20569: # overflow result. DON'T save the returned ccodes from ovf_res() since
20570: # fmove out doesn't alter them.
20571: tst.b SRC_EX(%a0) # is operand negative?
20572: smi %d1 # set if so
20573: mov.l L_SCR3(%a6),%d0 # pass: sgl prec,rnd mode
20574: bsr.l ovf_res # calc OVFL result
20575: fmovm.x (%a0),&0x80 # load default overflow result
20576: fmov.s %fp0,%d0 # store to single
20577:
20578: mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
20579: andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
20580: beq.b fout_sgl_ovfl_dn # must save to integer regfile
20581:
20582: mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
20583: bsr.l _dmem_write_long # write long
20584:
20585: tst.l %d1 # did dstore fail?
20586: bne.l facc_out_l # yes
20587:
20588: bra.b fout_sgl_ovfl_chkexc
20589:
20590: fout_sgl_ovfl_dn:
20591: mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
20592: andi.w &0x7,%d1
20593: bsr.l store_dreg_l
20594:
20595: fout_sgl_ovfl_chkexc:
20596: mov.b FPCR_ENABLE(%a6),%d1
20597: andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20598: bne.w fout_sd_exc_ovfl # yes
20599: addq.l &0x4,%sp
20600: rts
20601:
20602: #
20603: # move out MAY overflow:
20604: # (1) force the exp to 0x3fff
20605: # (2) do a move w/ appropriate rnd mode
20606: # (3) if exp still equals zero, then insert original exponent
20607: # for the correct result.
20608: # if exp now equals one, then it overflowed so call ovf_res.
20609: #
20610: fout_sgl_may_ovfl:
20611: mov.w SRC_EX(%a0),%d1 # fetch current sign
20612: andi.w &0x8000,%d1 # keep it,clear exp
20613: ori.w &0x3fff,%d1 # insert exp = 0
20614: mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
20615: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
20616: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
20617:
20618: fmov.l L_SCR3(%a6),%fpcr # set FPCR
20619:
20620: fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
20621: fmov.l &0x0,%fpcr # clear FPCR
20622:
20623: fabs.x %fp0 # need absolute value
20624: fcmp.b %fp0,&0x2 # did exponent increase?
20625: fblt.w fout_sgl_exg # no; go finish NORM
20626: bra.w fout_sgl_ovfl # yes; go handle overflow
20627:
20628: ################
20629:
20630: fout_sd_exc_unfl:
20631: mov.l (%sp)+,%a0
20632:
20633: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
20634: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
20635: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
20636:
20637: cmpi.b STAG(%a6),&DENORM # was src a DENORM?
20638: bne.b fout_sd_exc_cont # no
20639:
20640: lea FP_SCR0(%a6),%a0
20641: bsr.l norm
20642: neg.l %d0
20643: andi.w &0x7fff,%d0
20644: bfins %d0,FP_SCR0_EX(%a6){&1:&15}
20645: bra.b fout_sd_exc_cont
20646:
20647: fout_sd_exc:
20648: fout_sd_exc_ovfl:
20649: mov.l (%sp)+,%a0 # restore a0
20650:
20651: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
20652: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
20653: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
20654:
20655: fout_sd_exc_cont:
20656: bclr &0x7,FP_SCR0_EX(%a6) # clear sign bit
20657: sne.b 2+FP_SCR0_EX(%a6) # set internal sign bit
20658: lea FP_SCR0(%a6),%a0 # pass: ptr to DENORM
20659:
20660: mov.b 3+L_SCR3(%a6),%d1
20661: lsr.b &0x4,%d1
20662: andi.w &0x0c,%d1
20663: swap %d1
20664: mov.b 3+L_SCR3(%a6),%d1
20665: lsr.b &0x4,%d1
20666: andi.w &0x03,%d1
20667: clr.l %d0 # pass: zero g,r,s
20668: bsr.l _round # round the DENORM
20669:
20670: tst.b 2+FP_SCR0_EX(%a6) # is EXOP negative?
20671: beq.b fout_sd_exc_done # no
20672: bset &0x7,FP_SCR0_EX(%a6) # yes
20673:
20674: fout_sd_exc_done:
20675: fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
20676: rts
20677:
20678: #################################################################
20679: # fmove.d out ###################################################
20680: #################################################################
20681: fout_dbl:
20682: andi.b &0x30,%d0 # clear rnd prec
20683: ori.b &d_mode*0x10,%d0 # insert dbl prec
20684: mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
20685:
20686: #
20687: # operand is a normalized number. first, we check to see if the move out
20688: # would cause either an underflow or overflow. these cases are handled
20689: # separately. otherwise, set the FPCR to the proper rounding mode and
20690: # execute the move.
20691: #
20692: mov.w SRC_EX(%a0),%d0 # extract exponent
20693: andi.w &0x7fff,%d0 # strip sign
20694:
20695: cmpi.w %d0,&DBL_HI # will operand overflow?
20696: bgt.w fout_dbl_ovfl # yes; go handle OVFL
20697: beq.w fout_dbl_may_ovfl # maybe; go handle possible OVFL
20698: cmpi.w %d0,&DBL_LO # will operand underflow?
20699: blt.w fout_dbl_unfl # yes; go handle underflow
20700:
20701: #
20702: # NORMs(in range) can be stored out by a simple "fmov.d"
20703: # Unnormalized inputs can come through this point.
20704: #
20705: fout_dbl_exg:
20706: fmovm.x SRC(%a0),&0x80 # fetch fop from stack
20707:
20708: fmov.l L_SCR3(%a6),%fpcr # set FPCR
20709: fmov.l &0x0,%fpsr # clear FPSR
20710:
20711: fmov.d %fp0,L_SCR1(%a6) # store does convert and round
20712:
20713: fmov.l &0x0,%fpcr # clear FPCR
20714: fmov.l %fpsr,%d0 # save FPSR
20715:
20716: or.w %d0,2+USER_FPSR(%a6) # set possible inex2/ainex
20717:
20718: mov.l EXC_EA(%a6),%a1 # pass: dst addr
20719: lea L_SCR1(%a6),%a0 # pass: src addr
20720: movq.l &0x8,%d0 # pass: opsize is 8 bytes
20721: bsr.l _dmem_write # store dbl fop to memory
20722:
20723: tst.l %d1 # did dstore fail?
20724: bne.l facc_out_d # yes
20725:
20726: rts # no; so we're finished
20727:
20728: #
20729: # here, we know that the operand would UNFL if moved out to double prec,
20730: # so, denorm and round and then use generic store double routine to
20731: # write the value to memory.
20732: #
20733: fout_dbl_unfl:
20734: bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
20735:
20736: mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
20737: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
20738: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
20739: mov.l %a0,-(%sp)
20740:
20741: clr.l %d0 # pass: S.F. = 0
20742:
20743: cmpi.b STAG(%a6),&DENORM # fetch src optype tag
20744: bne.b fout_dbl_unfl_cont # let DENORMs fall through
20745:
20746: lea FP_SCR0(%a6),%a0
20747: bsr.l norm # normalize the DENORM
20748:
20749: fout_dbl_unfl_cont:
20750: lea FP_SCR0(%a6),%a0 # pass: ptr to operand
20751: mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
20752: bsr.l unf_res # calc default underflow result
20753:
20754: lea FP_SCR0(%a6),%a0 # pass: ptr to fop
20755: bsr.l dst_dbl # convert to single prec
20756: mov.l %d0,L_SCR1(%a6)
20757: mov.l %d1,L_SCR2(%a6)
20758:
20759: mov.l EXC_EA(%a6),%a1 # pass: dst addr
20760: lea L_SCR1(%a6),%a0 # pass: src addr
20761: movq.l &0x8,%d0 # pass: opsize is 8 bytes
20762: bsr.l _dmem_write # store dbl fop to memory
20763:
20764: tst.l %d1 # did dstore fail?
20765: bne.l facc_out_d # yes
20766:
20767: mov.b FPCR_ENABLE(%a6),%d1
20768: andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20769: bne.w fout_sd_exc_unfl # yes
20770: addq.l &0x4,%sp
20771: rts
20772:
20773: #
20774: # it's definitely an overflow so call ovf_res to get the correct answer
20775: #
20776: fout_dbl_ovfl:
20777: mov.w 2+SRC_LO(%a0),%d0
20778: andi.w &0x7ff,%d0
20779: bne.b fout_dbl_ovfl_inex2
20780:
20781: ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
20782: bra.b fout_dbl_ovfl_cont
20783: fout_dbl_ovfl_inex2:
20784: ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
20785:
20786: fout_dbl_ovfl_cont:
20787: mov.l %a0,-(%sp)
20788:
20789: # call ovf_res() w/ dbl prec and the correct rnd mode to create the default
20790: # overflow result. DON'T save the returned ccodes from ovf_res() since
20791: # fmove out doesn't alter them.
20792: tst.b SRC_EX(%a0) # is operand negative?
20793: smi %d1 # set if so
20794: mov.l L_SCR3(%a6),%d0 # pass: dbl prec,rnd mode
20795: bsr.l ovf_res # calc OVFL result
20796: fmovm.x (%a0),&0x80 # load default overflow result
20797: fmov.d %fp0,L_SCR1(%a6) # store to double
20798:
20799: mov.l EXC_EA(%a6),%a1 # pass: dst addr
20800: lea L_SCR1(%a6),%a0 # pass: src addr
20801: movq.l &0x8,%d0 # pass: opsize is 8 bytes
20802: bsr.l _dmem_write # store dbl fop to memory
20803:
20804: tst.l %d1 # did dstore fail?
20805: bne.l facc_out_d # yes
20806:
20807: mov.b FPCR_ENABLE(%a6),%d1
20808: andi.b &0x0a,%d1 # is UNFL or INEX enabled?
20809: bne.w fout_sd_exc_ovfl # yes
20810: addq.l &0x4,%sp
20811: rts
20812:
20813: #
20814: # move out MAY overflow:
20815: # (1) force the exp to 0x3fff
20816: # (2) do a move w/ appropriate rnd mode
20817: # (3) if exp still equals zero, then insert original exponent
20818: # for the correct result.
20819: # if exp now equals one, then it overflowed so call ovf_res.
20820: #
20821: fout_dbl_may_ovfl:
20822: mov.w SRC_EX(%a0),%d1 # fetch current sign
20823: andi.w &0x8000,%d1 # keep it,clear exp
20824: ori.w &0x3fff,%d1 # insert exp = 0
20825: mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
20826: mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
20827: mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
20828:
20829: fmov.l L_SCR3(%a6),%fpcr # set FPCR
20830:
20831: fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
20832: fmov.l &0x0,%fpcr # clear FPCR
20833:
20834: fabs.x %fp0 # need absolute value
20835: fcmp.b %fp0,&0x2 # did exponent increase?
20836: fblt.w fout_dbl_exg # no; go finish NORM
20837: bra.w fout_dbl_ovfl # yes; go handle overflow
20838:
20839: #########################################################################
20840: # XDEF **************************************************************** #
20841: # dst_dbl(): create double precision value from extended prec. #
20842: # #
20843: # XREF **************************************************************** #
20844: # None #
20845: # #
20846: # INPUT *************************************************************** #
20847: # a0 = pointer to source operand in extended precision #
20848: # #
20849: # OUTPUT ************************************************************** #
20850: # d0 = hi(double precision result) #
20851: # d1 = lo(double precision result) #
20852: # #
20853: # ALGORITHM *********************************************************** #
20854: # #
20855: # Changes extended precision to double precision. #
20856: # Note: no attempt is made to round the extended value to double. #
20857: # dbl_sign = ext_sign #
20858: # dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias) #
20859: # get rid of ext integer bit #
20860: # dbl_mant = ext_mant{62:12} #
20861: # #
20862: # --------------- --------------- --------------- #
20863: # extended -> |s| exp | |1| ms mant | | ls mant | #
20864: # --------------- --------------- --------------- #
20865: # 95 64 63 62 32 31 11 0 #
20866: # | | #
20867: # | | #
20868: # | | #
20869: # v v #
20870: # --------------- --------------- #
20871: # double -> |s|exp| mant | | mant | #
20872: # --------------- --------------- #
20873: # 63 51 32 31 0 #
20874: # #
20875: #########################################################################
20876:
20877: dst_dbl:
20878: clr.l %d0 # clear d0
20879: mov.w FTEMP_EX(%a0),%d0 # get exponent
20880: subi.w &EXT_BIAS,%d0 # subtract extended precision bias
20881: addi.w &DBL_BIAS,%d0 # add double precision bias
20882: tst.b FTEMP_HI(%a0) # is number a denorm?
20883: bmi.b dst_get_dupper # no
20884: subq.w &0x1,%d0 # yes; denorm bias = DBL_BIAS - 1
20885: dst_get_dupper:
20886: swap %d0 # d0 now in upper word
20887: lsl.l &0x4,%d0 # d0 in proper place for dbl prec exp
20888: tst.b FTEMP_EX(%a0) # test sign
20889: bpl.b dst_get_dman # if positive, go process mantissa
20890: bset &0x1f,%d0 # if negative, set sign
20891: dst_get_dman:
20892: mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
20893: bfextu %d1{&1:&20},%d1 # get upper 20 bits of ms
20894: or.l %d1,%d0 # put these bits in ms word of double
20895: mov.l %d0,L_SCR1(%a6) # put the new exp back on the stack
20896: mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
20897: mov.l &21,%d0 # load shift count
20898: lsl.l %d0,%d1 # put lower 11 bits in upper bits
20899: mov.l %d1,L_SCR2(%a6) # build lower lword in memory
20900: mov.l FTEMP_LO(%a0),%d1 # get ls mantissa
20901: bfextu %d1{&0:&21},%d0 # get ls 21 bits of double
20902: mov.l L_SCR2(%a6),%d1
20903: or.l %d0,%d1 # put them in double result
20904: mov.l L_SCR1(%a6),%d0
20905: rts
20906:
20907: #########################################################################
20908: # XDEF **************************************************************** #
20909: # dst_sgl(): create single precision value from extended prec #
20910: # #
20911: # XREF **************************************************************** #
20912: # #
20913: # INPUT *************************************************************** #
20914: # a0 = pointer to source operand in extended precision #
20915: # #
20916: # OUTPUT ************************************************************** #
20917: # d0 = single precision result #
20918: # #
20919: # ALGORITHM *********************************************************** #
20920: # #
20921: # Changes extended precision to single precision. #
20922: # sgl_sign = ext_sign #
20923: # sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias) #
20924: # get rid of ext integer bit #
20925: # sgl_mant = ext_mant{62:12} #
20926: # #
20927: # --------------- --------------- --------------- #
20928: # extended -> |s| exp | |1| ms mant | | ls mant | #
20929: # --------------- --------------- --------------- #
20930: # 95 64 63 62 40 32 31 12 0 #
20931: # | | #
20932: # | | #
20933: # | | #
20934: # v v #
20935: # --------------- #
20936: # single -> |s|exp| mant | #
20937: # --------------- #
20938: # 31 22 0 #
20939: # #
20940: #########################################################################
20941:
20942: dst_sgl:
20943: clr.l %d0
20944: mov.w FTEMP_EX(%a0),%d0 # get exponent
20945: subi.w &EXT_BIAS,%d0 # subtract extended precision bias
20946: addi.w &SGL_BIAS,%d0 # add single precision bias
20947: tst.b FTEMP_HI(%a0) # is number a denorm?
20948: bmi.b dst_get_supper # no
20949: subq.w &0x1,%d0 # yes; denorm bias = SGL_BIAS - 1
20950: dst_get_supper:
20951: swap %d0 # put exp in upper word of d0
20952: lsl.l &0x7,%d0 # shift it into single exp bits
20953: tst.b FTEMP_EX(%a0) # test sign
20954: bpl.b dst_get_sman # if positive, continue
20955: bset &0x1f,%d0 # if negative, put in sign first
20956: dst_get_sman:
20957: mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
20958: andi.l &0x7fffff00,%d1 # get upper 23 bits of ms
20959: lsr.l &0x8,%d1 # and put them flush right
20960: or.l %d1,%d0 # put these bits in ms word of single
20961: rts
20962:
20963: ##############################################################################
20964: fout_pack:
20965: bsr.l _calc_ea_fout # fetch the <ea>
20966: mov.l %a0,-(%sp)
20967:
20968: mov.b STAG(%a6),%d0 # fetch input type
20969: bne.w fout_pack_not_norm # input is not NORM
20970:
20971: fout_pack_norm:
20972: btst &0x4,EXC_CMDREG(%a6) # static or dynamic?
20973: beq.b fout_pack_s # static
20974:
20975: fout_pack_d:
20976: mov.b 1+EXC_CMDREG(%a6),%d1 # fetch dynamic reg
20977: lsr.b &0x4,%d1
20978: andi.w &0x7,%d1
20979:
20980: bsr.l fetch_dreg # fetch Dn w/ k-factor
20981:
20982: bra.b fout_pack_type
20983: fout_pack_s:
20984: mov.b 1+EXC_CMDREG(%a6),%d0 # fetch static field
20985:
20986: fout_pack_type:
20987: bfexts %d0{&25:&7},%d0 # extract k-factor
20988: mov.l %d0,-(%sp)
20989:
20990: lea FP_SRC(%a6),%a0 # pass: ptr to input
20991:
20992: # bindec is currently scrambling FP_SRC for denorm inputs.
20993: # we'll have to change this, but for now, tough luck!!!
20994: bsr.l bindec # convert xprec to packed
20995:
20996: # andi.l &0xcfff000f,FP_SCR0(%a6) # clear unused fields
20997: andi.l &0xcffff00f,FP_SCR0(%a6) # clear unused fields
20998:
20999: mov.l (%sp)+,%d0
21000:
21001: tst.b 3+FP_SCR0_EX(%a6)
21002: bne.b fout_pack_set
21003: tst.l FP_SCR0_HI(%a6)
21004: bne.b fout_pack_set
21005: tst.l FP_SCR0_LO(%a6)
21006: bne.b fout_pack_set
21007:
21008: # add the extra condition that only if the k-factor was zero, too, should
21009: # we zero the exponent
21010: tst.l %d0
21011: bne.b fout_pack_set
21012: # "mantissa" is all zero which means that the answer is zero. but, the '040
21013: # algorithm allows the exponent to be non-zero. the 881/2 do not. therefore,
21014: # if the mantissa is zero, I will zero the exponent, too.
21015: # the question now is whether the exponents sign bit is allowed to be non-zero
21016: # for a zero, also...
21017: andi.w &0xf000,FP_SCR0(%a6)
21018:
21019: fout_pack_set:
21020:
21021: lea FP_SCR0(%a6),%a0 # pass: src addr
21022:
21023: fout_pack_write:
21024: mov.l (%sp)+,%a1 # pass: dst addr
21025: mov.l &0xc,%d0 # pass: opsize is 12 bytes
21026:
21027: cmpi.b SPCOND_FLG(%a6),&mda7_flg
21028: beq.b fout_pack_a7
21029:
21030: bsr.l _dmem_write # write ext prec number to memory
21031:
21032: tst.l %d1 # did dstore fail?
21033: bne.w fout_ext_err # yes
21034:
21035: rts
21036:
21037: # we don't want to do the write if the exception occurred in supervisor mode
21038: # so _mem_write2() handles this for us.
21039: fout_pack_a7:
21040: bsr.l _mem_write2 # write ext prec number to memory
21041:
21042: tst.l %d1 # did dstore fail?
21043: bne.w fout_ext_err # yes
21044:
21045: rts
21046:
21047: fout_pack_not_norm:
21048: cmpi.b %d0,&DENORM # is it a DENORM?
21049: beq.w fout_pack_norm # yes
21050: lea FP_SRC(%a6),%a0
21051: clr.w 2+FP_SRC_EX(%a6)
21052: cmpi.b %d0,&SNAN # is it an SNAN?
21053: beq.b fout_pack_snan # yes
21054: bra.b fout_pack_write # no
21055:
21056: fout_pack_snan:
21057: ori.w &snaniop2_mask,FPSR_EXCEPT(%a6) # set SNAN/AIOP
21058: bset &0x6,FP_SRC_HI(%a6) # set snan bit
21059: bra.b fout_pack_write
21060:
21061: #########################################################################
21062: # XDEF **************************************************************** #
21063: # fetch_dreg(): fetch register according to index in d1 #
21064: # #
21065: # XREF **************************************************************** #
21066: # None #
21067: # #
21068: # INPUT *************************************************************** #
21069: # d1 = index of register to fetch from #
21070: # #
21071: # OUTPUT ************************************************************** #
21072: # d0 = value of register fetched #
21073: # #
21074: # ALGORITHM *********************************************************** #
21075: # According to the index value in d1 which can range from zero #
21076: # to fifteen, load the corresponding register file value (where #
21077: # address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the #
21078: # stack. The rest should still be in their original places. #
21079: # #
21080: #########################################################################
21081:
21082: # this routine leaves d1 intact for subsequent store_dreg calls.
21083: global fetch_dreg
21084: fetch_dreg:
21085: mov.w (tbl_fdreg.b,%pc,%d1.w*2),%d0
21086: jmp (tbl_fdreg.b,%pc,%d0.w*1)
21087:
21088: tbl_fdreg:
21089: short fdreg0 - tbl_fdreg
21090: short fdreg1 - tbl_fdreg
21091: short fdreg2 - tbl_fdreg
21092: short fdreg3 - tbl_fdreg
21093: short fdreg4 - tbl_fdreg
21094: short fdreg5 - tbl_fdreg
21095: short fdreg6 - tbl_fdreg
21096: short fdreg7 - tbl_fdreg
21097: short fdreg8 - tbl_fdreg
21098: short fdreg9 - tbl_fdreg
21099: short fdrega - tbl_fdreg
21100: short fdregb - tbl_fdreg
21101: short fdregc - tbl_fdreg
21102: short fdregd - tbl_fdreg
21103: short fdrege - tbl_fdreg
21104: short fdregf - tbl_fdreg
21105:
21106: fdreg0:
21107: mov.l EXC_DREGS+0x0(%a6),%d0
21108: rts
21109: fdreg1:
21110: mov.l EXC_DREGS+0x4(%a6),%d0
21111: rts
21112: fdreg2:
21113: mov.l %d2,%d0
21114: rts
21115: fdreg3:
21116: mov.l %d3,%d0
21117: rts
21118: fdreg4:
21119: mov.l %d4,%d0
21120: rts
21121: fdreg5:
21122: mov.l %d5,%d0
21123: rts
21124: fdreg6:
21125: mov.l %d6,%d0
21126: rts
21127: fdreg7:
21128: mov.l %d7,%d0
21129: rts
21130: fdreg8:
21131: mov.l EXC_DREGS+0x8(%a6),%d0
21132: rts
21133: fdreg9:
21134: mov.l EXC_DREGS+0xc(%a6),%d0
21135: rts
21136: fdrega:
21137: mov.l %a2,%d0
21138: rts
21139: fdregb:
21140: mov.l %a3,%d0
21141: rts
21142: fdregc:
21143: mov.l %a4,%d0
21144: rts
21145: fdregd:
21146: mov.l %a5,%d0
21147: rts
21148: fdrege:
21149: mov.l (%a6),%d0
21150: rts
21151: fdregf:
21152: mov.l EXC_A7(%a6),%d0
21153: rts
21154:
21155: #########################################################################
21156: # XDEF **************************************************************** #
21157: # store_dreg_l(): store longword to data register specified by d1 #
21158: # #
21159: # XREF **************************************************************** #
21160: # None #
21161: # #
21162: # INPUT *************************************************************** #
21163: # d0 = longowrd value to store #
21164: # d1 = index of register to fetch from #
21165: # #
21166: # OUTPUT ************************************************************** #
21167: # (data register is updated) #
21168: # #
21169: # ALGORITHM *********************************************************** #
21170: # According to the index value in d1, store the longword value #
21171: # in d0 to the corresponding data register. D0/D1 are on the stack #
21172: # while the rest are in their initial places. #
21173: # #
21174: #########################################################################
21175:
21176: global store_dreg_l
21177: store_dreg_l:
21178: mov.w (tbl_sdregl.b,%pc,%d1.w*2),%d1
21179: jmp (tbl_sdregl.b,%pc,%d1.w*1)
21180:
21181: tbl_sdregl:
21182: short sdregl0 - tbl_sdregl
21183: short sdregl1 - tbl_sdregl
21184: short sdregl2 - tbl_sdregl
21185: short sdregl3 - tbl_sdregl
21186: short sdregl4 - tbl_sdregl
21187: short sdregl5 - tbl_sdregl
21188: short sdregl6 - tbl_sdregl
21189: short sdregl7 - tbl_sdregl
21190:
21191: sdregl0:
21192: mov.l %d0,EXC_DREGS+0x0(%a6)
21193: rts
21194: sdregl1:
21195: mov.l %d0,EXC_DREGS+0x4(%a6)
21196: rts
21197: sdregl2:
21198: mov.l %d0,%d2
21199: rts
21200: sdregl3:
21201: mov.l %d0,%d3
21202: rts
21203: sdregl4:
21204: mov.l %d0,%d4
21205: rts
21206: sdregl5:
21207: mov.l %d0,%d5
21208: rts
21209: sdregl6:
21210: mov.l %d0,%d6
21211: rts
21212: sdregl7:
21213: mov.l %d0,%d7
21214: rts
21215:
21216: #########################################################################
21217: # XDEF **************************************************************** #
21218: # store_dreg_w(): store word to data register specified by d1 #
21219: # #
21220: # XREF **************************************************************** #
21221: # None #
21222: # #
21223: # INPUT *************************************************************** #
21224: # d0 = word value to store #
21225: # d1 = index of register to fetch from #
21226: # #
21227: # OUTPUT ************************************************************** #
21228: # (data register is updated) #
21229: # #
21230: # ALGORITHM *********************************************************** #
21231: # According to the index value in d1, store the word value #
21232: # in d0 to the corresponding data register. D0/D1 are on the stack #
21233: # while the rest are in their initial places. #
21234: # #
21235: #########################################################################
21236:
21237: global store_dreg_w
21238: store_dreg_w:
21239: mov.w (tbl_sdregw.b,%pc,%d1.w*2),%d1
21240: jmp (tbl_sdregw.b,%pc,%d1.w*1)
21241:
21242: tbl_sdregw:
21243: short sdregw0 - tbl_sdregw
21244: short sdregw1 - tbl_sdregw
21245: short sdregw2 - tbl_sdregw
21246: short sdregw3 - tbl_sdregw
21247: short sdregw4 - tbl_sdregw
21248: short sdregw5 - tbl_sdregw
21249: short sdregw6 - tbl_sdregw
21250: short sdregw7 - tbl_sdregw
21251:
21252: sdregw0:
21253: mov.w %d0,2+EXC_DREGS+0x0(%a6)
21254: rts
21255: sdregw1:
21256: mov.w %d0,2+EXC_DREGS+0x4(%a6)
21257: rts
21258: sdregw2:
21259: mov.w %d0,%d2
21260: rts
21261: sdregw3:
21262: mov.w %d0,%d3
21263: rts
21264: sdregw4:
21265: mov.w %d0,%d4
21266: rts
21267: sdregw5:
21268: mov.w %d0,%d5
21269: rts
21270: sdregw6:
21271: mov.w %d0,%d6
21272: rts
21273: sdregw7:
21274: mov.w %d0,%d7
21275: rts
21276:
21277: #########################################################################
21278: # XDEF **************************************************************** #
21279: # store_dreg_b(): store byte to data register specified by d1 #
21280: # #
21281: # XREF **************************************************************** #
21282: # None #
21283: # #
21284: # INPUT *************************************************************** #
21285: # d0 = byte value to store #
21286: # d1 = index of register to fetch from #
21287: # #
21288: # OUTPUT ************************************************************** #
21289: # (data register is updated) #
21290: # #
21291: # ALGORITHM *********************************************************** #
21292: # According to the index value in d1, store the byte value #
21293: # in d0 to the corresponding data register. D0/D1 are on the stack #
21294: # while the rest are in their initial places. #
21295: # #
21296: #########################################################################
21297:
21298: global store_dreg_b
21299: store_dreg_b:
21300: mov.w (tbl_sdregb.b,%pc,%d1.w*2),%d1
21301: jmp (tbl_sdregb.b,%pc,%d1.w*1)
21302:
21303: tbl_sdregb:
21304: short sdregb0 - tbl_sdregb
21305: short sdregb1 - tbl_sdregb
21306: short sdregb2 - tbl_sdregb
21307: short sdregb3 - tbl_sdregb
21308: short sdregb4 - tbl_sdregb
21309: short sdregb5 - tbl_sdregb
21310: short sdregb6 - tbl_sdregb
21311: short sdregb7 - tbl_sdregb
21312:
21313: sdregb0:
21314: mov.b %d0,3+EXC_DREGS+0x0(%a6)
21315: rts
21316: sdregb1:
21317: mov.b %d0,3+EXC_DREGS+0x4(%a6)
21318: rts
21319: sdregb2:
21320: mov.b %d0,%d2
21321: rts
21322: sdregb3:
21323: mov.b %d0,%d3
21324: rts
21325: sdregb4:
21326: mov.b %d0,%d4
21327: rts
21328: sdregb5:
21329: mov.b %d0,%d5
21330: rts
21331: sdregb6:
21332: mov.b %d0,%d6
21333: rts
21334: sdregb7:
21335: mov.b %d0,%d7
21336: rts
21337:
21338: #########################################################################
21339: # XDEF **************************************************************** #
21340: # inc_areg(): increment an address register by the value in d0 #
21341: # #
21342: # XREF **************************************************************** #
21343: # None #
21344: # #
21345: # INPUT *************************************************************** #
21346: # d0 = amount to increment by #
21347: # d1 = index of address register to increment #
21348: # #
21349: # OUTPUT ************************************************************** #
21350: # (address register is updated) #
21351: # #
21352: # ALGORITHM *********************************************************** #
21353: # Typically used for an instruction w/ a post-increment <ea>, #
21354: # this routine adds the increment value in d0 to the address register #
21355: # specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
21356: # in their original places. #
21357: # For a7, if the increment amount is one, then we have to #
21358: # increment by two. For any a7 update, set the mia7_flag so that if #
21359: # an access error exception occurs later in emulation, this address #
21360: # register update can be undone. #
21361: # #
21362: #########################################################################
21363:
21364: global inc_areg
21365: inc_areg:
21366: mov.w (tbl_iareg.b,%pc,%d1.w*2),%d1
21367: jmp (tbl_iareg.b,%pc,%d1.w*1)
21368:
21369: tbl_iareg:
21370: short iareg0 - tbl_iareg
21371: short iareg1 - tbl_iareg
21372: short iareg2 - tbl_iareg
21373: short iareg3 - tbl_iareg
21374: short iareg4 - tbl_iareg
21375: short iareg5 - tbl_iareg
21376: short iareg6 - tbl_iareg
21377: short iareg7 - tbl_iareg
21378:
21379: iareg0: add.l %d0,EXC_DREGS+0x8(%a6)
21380: rts
21381: iareg1: add.l %d0,EXC_DREGS+0xc(%a6)
21382: rts
21383: iareg2: add.l %d0,%a2
21384: rts
21385: iareg3: add.l %d0,%a3
21386: rts
21387: iareg4: add.l %d0,%a4
21388: rts
21389: iareg5: add.l %d0,%a5
21390: rts
21391: iareg6: add.l %d0,(%a6)
21392: rts
21393: iareg7: mov.b &mia7_flg,SPCOND_FLG(%a6)
21394: cmpi.b %d0,&0x1
21395: beq.b iareg7b
21396: add.l %d0,EXC_A7(%a6)
21397: rts
21398: iareg7b:
21399: addq.l &0x2,EXC_A7(%a6)
21400: rts
21401:
21402: #########################################################################
21403: # XDEF **************************************************************** #
21404: # dec_areg(): decrement an address register by the value in d0 #
21405: # #
21406: # XREF **************************************************************** #
21407: # None #
21408: # #
21409: # INPUT *************************************************************** #
21410: # d0 = amount to decrement by #
21411: # d1 = index of address register to decrement #
21412: # #
21413: # OUTPUT ************************************************************** #
21414: # (address register is updated) #
21415: # #
21416: # ALGORITHM *********************************************************** #
21417: # Typically used for an instruction w/ a pre-decrement <ea>, #
21418: # this routine adds the decrement value in d0 to the address register #
21419: # specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
21420: # in their original places. #
21421: # For a7, if the decrement amount is one, then we have to #
21422: # decrement by two. For any a7 update, set the mda7_flag so that if #
21423: # an access error exception occurs later in emulation, this address #
21424: # register update can be undone. #
21425: # #
21426: #########################################################################
21427:
21428: global dec_areg
21429: dec_areg:
21430: mov.w (tbl_dareg.b,%pc,%d1.w*2),%d1
21431: jmp (tbl_dareg.b,%pc,%d1.w*1)
21432:
21433: tbl_dareg:
21434: short dareg0 - tbl_dareg
21435: short dareg1 - tbl_dareg
21436: short dareg2 - tbl_dareg
21437: short dareg3 - tbl_dareg
21438: short dareg4 - tbl_dareg
21439: short dareg5 - tbl_dareg
21440: short dareg6 - tbl_dareg
21441: short dareg7 - tbl_dareg
21442:
21443: dareg0: sub.l %d0,EXC_DREGS+0x8(%a6)
21444: rts
21445: dareg1: sub.l %d0,EXC_DREGS+0xc(%a6)
21446: rts
21447: dareg2: sub.l %d0,%a2
21448: rts
21449: dareg3: sub.l %d0,%a3
21450: rts
21451: dareg4: sub.l %d0,%a4
21452: rts
21453: dareg5: sub.l %d0,%a5
21454: rts
21455: dareg6: sub.l %d0,(%a6)
21456: rts
21457: dareg7: mov.b &mda7_flg,SPCOND_FLG(%a6)
21458: cmpi.b %d0,&0x1
21459: beq.b dareg7b
21460: sub.l %d0,EXC_A7(%a6)
21461: rts
21462: dareg7b:
21463: subq.l &0x2,EXC_A7(%a6)
21464: rts
21465:
21466: ##############################################################################
21467:
21468: #########################################################################
21469: # XDEF **************************************************************** #
21470: # load_fpn1(): load FP register value into FP_SRC(a6). #
21471: # #
21472: # XREF **************************************************************** #
21473: # None #
21474: # #
21475: # INPUT *************************************************************** #
21476: # d0 = index of FP register to load #
21477: # #
21478: # OUTPUT ************************************************************** #
21479: # FP_SRC(a6) = value loaded from FP register file #
21480: # #
21481: # ALGORITHM *********************************************************** #
21482: # Using the index in d0, load FP_SRC(a6) with a number from the #
21483: # FP register file. #
21484: # #
21485: #########################################################################
21486:
21487: global load_fpn1
21488: load_fpn1:
21489: mov.w (tbl_load_fpn1.b,%pc,%d0.w*2), %d0
21490: jmp (tbl_load_fpn1.b,%pc,%d0.w*1)
21491:
21492: tbl_load_fpn1:
21493: short load_fpn1_0 - tbl_load_fpn1
21494: short load_fpn1_1 - tbl_load_fpn1
21495: short load_fpn1_2 - tbl_load_fpn1
21496: short load_fpn1_3 - tbl_load_fpn1
21497: short load_fpn1_4 - tbl_load_fpn1
21498: short load_fpn1_5 - tbl_load_fpn1
21499: short load_fpn1_6 - tbl_load_fpn1
21500: short load_fpn1_7 - tbl_load_fpn1
21501:
21502: load_fpn1_0:
21503: mov.l 0+EXC_FP0(%a6), 0+FP_SRC(%a6)
21504: mov.l 4+EXC_FP0(%a6), 4+FP_SRC(%a6)
21505: mov.l 8+EXC_FP0(%a6), 8+FP_SRC(%a6)
21506: lea FP_SRC(%a6), %a0
21507: rts
21508: load_fpn1_1:
21509: mov.l 0+EXC_FP1(%a6), 0+FP_SRC(%a6)
21510: mov.l 4+EXC_FP1(%a6), 4+FP_SRC(%a6)
21511: mov.l 8+EXC_FP1(%a6), 8+FP_SRC(%a6)
21512: lea FP_SRC(%a6), %a0
21513: rts
21514: load_fpn1_2:
21515: fmovm.x &0x20, FP_SRC(%a6)
21516: lea FP_SRC(%a6), %a0
21517: rts
21518: load_fpn1_3:
21519: fmovm.x &0x10, FP_SRC(%a6)
21520: lea FP_SRC(%a6), %a0
21521: rts
21522: load_fpn1_4:
21523: fmovm.x &0x08, FP_SRC(%a6)
21524: lea FP_SRC(%a6), %a0
21525: rts
21526: load_fpn1_5:
21527: fmovm.x &0x04, FP_SRC(%a6)
21528: lea FP_SRC(%a6), %a0
21529: rts
21530: load_fpn1_6:
21531: fmovm.x &0x02, FP_SRC(%a6)
21532: lea FP_SRC(%a6), %a0
21533: rts
21534: load_fpn1_7:
21535: fmovm.x &0x01, FP_SRC(%a6)
21536: lea FP_SRC(%a6), %a0
21537: rts
21538:
21539: #############################################################################
21540:
21541: #########################################################################
21542: # XDEF **************************************************************** #
21543: # load_fpn2(): load FP register value into FP_DST(a6). #
21544: # #
21545: # XREF **************************************************************** #
21546: # None #
21547: # #
21548: # INPUT *************************************************************** #
21549: # d0 = index of FP register to load #
21550: # #
21551: # OUTPUT ************************************************************** #
21552: # FP_DST(a6) = value loaded from FP register file #
21553: # #
21554: # ALGORITHM *********************************************************** #
21555: # Using the index in d0, load FP_DST(a6) with a number from the #
21556: # FP register file. #
21557: # #
21558: #########################################################################
21559:
21560: global load_fpn2
21561: load_fpn2:
21562: mov.w (tbl_load_fpn2.b,%pc,%d0.w*2), %d0
21563: jmp (tbl_load_fpn2.b,%pc,%d0.w*1)
21564:
21565: tbl_load_fpn2:
21566: short load_fpn2_0 - tbl_load_fpn2
21567: short load_fpn2_1 - tbl_load_fpn2
21568: short load_fpn2_2 - tbl_load_fpn2
21569: short load_fpn2_3 - tbl_load_fpn2
21570: short load_fpn2_4 - tbl_load_fpn2
21571: short load_fpn2_5 - tbl_load_fpn2
21572: short load_fpn2_6 - tbl_load_fpn2
21573: short load_fpn2_7 - tbl_load_fpn2
21574:
21575: load_fpn2_0:
21576: mov.l 0+EXC_FP0(%a6), 0+FP_DST(%a6)
21577: mov.l 4+EXC_FP0(%a6), 4+FP_DST(%a6)
21578: mov.l 8+EXC_FP0(%a6), 8+FP_DST(%a6)
21579: lea FP_DST(%a6), %a0
21580: rts
21581: load_fpn2_1:
21582: mov.l 0+EXC_FP1(%a6), 0+FP_DST(%a6)
21583: mov.l 4+EXC_FP1(%a6), 4+FP_DST(%a6)
21584: mov.l 8+EXC_FP1(%a6), 8+FP_DST(%a6)
21585: lea FP_DST(%a6), %a0
21586: rts
21587: load_fpn2_2:
21588: fmovm.x &0x20, FP_DST(%a6)
21589: lea FP_DST(%a6), %a0
21590: rts
21591: load_fpn2_3:
21592: fmovm.x &0x10, FP_DST(%a6)
21593: lea FP_DST(%a6), %a0
21594: rts
21595: load_fpn2_4:
21596: fmovm.x &0x08, FP_DST(%a6)
21597: lea FP_DST(%a6), %a0
21598: rts
21599: load_fpn2_5:
21600: fmovm.x &0x04, FP_DST(%a6)
21601: lea FP_DST(%a6), %a0
21602: rts
21603: load_fpn2_6:
21604: fmovm.x &0x02, FP_DST(%a6)
21605: lea FP_DST(%a6), %a0
21606: rts
21607: load_fpn2_7:
21608: fmovm.x &0x01, FP_DST(%a6)
21609: lea FP_DST(%a6), %a0
21610: rts
21611:
21612: #############################################################################
21613:
21614: #########################################################################
21615: # XDEF **************************************************************** #
21616: # store_fpreg(): store an fp value to the fpreg designated d0. #
21617: # #
21618: # XREF **************************************************************** #
21619: # None #
21620: # #
21621: # INPUT *************************************************************** #
21622: # fp0 = extended precision value to store #
21623: # d0 = index of floating-point register #
21624: # #
21625: # OUTPUT ************************************************************** #
21626: # None #
21627: # #
21628: # ALGORITHM *********************************************************** #
21629: # Store the value in fp0 to the FP register designated by the #
21630: # value in d0. The FP number can be DENORM or SNAN so we have to be #
21631: # careful that we don't take an exception here. #
21632: # #
21633: #########################################################################
21634:
21635: global store_fpreg
21636: store_fpreg:
21637: mov.w (tbl_store_fpreg.b,%pc,%d0.w*2), %d0
21638: jmp (tbl_store_fpreg.b,%pc,%d0.w*1)
21639:
21640: tbl_store_fpreg:
21641: short store_fpreg_0 - tbl_store_fpreg
21642: short store_fpreg_1 - tbl_store_fpreg
21643: short store_fpreg_2 - tbl_store_fpreg
21644: short store_fpreg_3 - tbl_store_fpreg
21645: short store_fpreg_4 - tbl_store_fpreg
21646: short store_fpreg_5 - tbl_store_fpreg
21647: short store_fpreg_6 - tbl_store_fpreg
21648: short store_fpreg_7 - tbl_store_fpreg
21649:
21650: store_fpreg_0:
21651: fmovm.x &0x80, EXC_FP0(%a6)
21652: rts
21653: store_fpreg_1:
21654: fmovm.x &0x80, EXC_FP1(%a6)
21655: rts
21656: store_fpreg_2:
21657: fmovm.x &0x01, -(%sp)
21658: fmovm.x (%sp)+, &0x20
21659: rts
21660: store_fpreg_3:
21661: fmovm.x &0x01, -(%sp)
21662: fmovm.x (%sp)+, &0x10
21663: rts
21664: store_fpreg_4:
21665: fmovm.x &0x01, -(%sp)
21666: fmovm.x (%sp)+, &0x08
21667: rts
21668: store_fpreg_5:
21669: fmovm.x &0x01, -(%sp)
21670: fmovm.x (%sp)+, &0x04
21671: rts
21672: store_fpreg_6:
21673: fmovm.x &0x01, -(%sp)
21674: fmovm.x (%sp)+, &0x02
21675: rts
21676: store_fpreg_7:
21677: fmovm.x &0x01, -(%sp)
21678: fmovm.x (%sp)+, &0x01
21679: rts
21680:
21681: #########################################################################
21682: # XDEF **************************************************************** #
21683: # _denorm(): denormalize an intermediate result #
21684: # #
21685: # XREF **************************************************************** #
21686: # None #
21687: # #
21688: # INPUT *************************************************************** #
21689: # a0 = points to the operand to be denormalized #
21690: # (in the internal extended format) #
21691: # #
21692: # d0 = rounding precision #
21693: # #
21694: # OUTPUT ************************************************************** #
21695: # a0 = pointer to the denormalized result #
21696: # (in the internal extended format) #
21697: # #
21698: # d0 = guard,round,sticky #
21699: # #
21700: # ALGORITHM *********************************************************** #
21701: # According to the exponent underflow threshold for the given #
21702: # precision, shift the mantissa bits to the right in order raise the #
21703: # exponent of the operand to the threshold value. While shifting the #
21704: # mantissa bits right, maintain the value of the guard, round, and #
21705: # sticky bits. #
21706: # other notes: #
21707: # (1) _denorm() is called by the underflow routines #
21708: # (2) _denorm() does NOT affect the status register #
21709: # #
21710: #########################################################################
21711:
21712: #
21713: # table of exponent threshold values for each precision
21714: #
21715: tbl_thresh:
21716: short 0x0
21717: short sgl_thresh
21718: short dbl_thresh
21719:
21720: global _denorm
21721: _denorm:
21722: #
21723: # Load the exponent threshold for the precision selected and check
21724: # to see if (threshold - exponent) is > 65 in which case we can
21725: # simply calculate the sticky bit and zero the mantissa. otherwise
21726: # we have to call the denormalization routine.
21727: #
21728: lsr.b &0x2, %d0 # shift prec to lo bits
21729: mov.w (tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
21730: mov.w %d1, %d0 # copy d1 into d0
21731: sub.w FTEMP_EX(%a0), %d0 # diff = threshold - exp
21732: cmpi.w %d0, &66 # is diff > 65? (mant + g,r bits)
21733: bpl.b denorm_set_stky # yes; just calc sticky
21734:
21735: clr.l %d0 # clear g,r,s
21736: btst &inex2_bit, FPSR_EXCEPT(%a6) # yes; was INEX2 set?
21737: beq.b denorm_call # no; don't change anything
21738: bset &29, %d0 # yes; set sticky bit
21739:
21740: denorm_call:
21741: bsr.l dnrm_lp # denormalize the number
21742: rts
21743:
21744: #
21745: # all bit would have been shifted off during the denorm so simply
21746: # calculate if the sticky should be set and clear the entire mantissa.
21747: #
21748: denorm_set_stky:
21749: mov.l &0x20000000, %d0 # set sticky bit in return value
21750: mov.w %d1, FTEMP_EX(%a0) # load exp with threshold
21751: clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa)
21752: clr.l FTEMP_LO(%a0) # set d2 = 0 (ms mantissa)
21753: rts
21754:
21755: # #
21756: # dnrm_lp(): normalize exponent/mantissa to specified threshold #
21757: # #
21758: # INPUT: #
21759: # %a0 : points to the operand to be denormalized #
21760: # %d0{31:29} : initial guard,round,sticky #
21761: # %d1{15:0} : denormalization threshold #
21762: # OUTPUT: #
21763: # %a0 : points to the denormalized operand #
21764: # %d0{31:29} : final guard,round,sticky #
21765: # #
21766:
21767: # *** Local Equates *** #
21768: set GRS, L_SCR2 # g,r,s temp storage
21769: set FTEMP_LO2, L_SCR1 # FTEMP_LO copy
21770:
21771: global dnrm_lp
21772: dnrm_lp:
21773:
21774: #
21775: # make a copy of FTEMP_LO and place the g,r,s bits directly after it
21776: # in memory so as to make the bitfield extraction for denormalization easier.
21777: #
21778: mov.l FTEMP_LO(%a0), FTEMP_LO2(%a6) # make FTEMP_LO copy
21779: mov.l %d0, GRS(%a6) # place g,r,s after it
21780:
21781: #
21782: # check to see how much less than the underflow threshold the operand
21783: # exponent is.
21784: #
21785: mov.l %d1, %d0 # copy the denorm threshold
21786: sub.w FTEMP_EX(%a0), %d1 # d1 = threshold - uns exponent
21787: ble.b dnrm_no_lp # d1 <= 0
21788: cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ?
21789: blt.b case_1 # yes
21790: cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ?
21791: blt.b case_2 # yes
21792: bra.w case_3 # (d1 >= 64)
21793:
21794: #
21795: # No normalization necessary
21796: #
21797: dnrm_no_lp:
21798: mov.l GRS(%a6), %d0 # restore original g,r,s
21799: rts
21800:
21801: #
21802: # case (0<d1<32)
21803: #
21804: # %d0 = denorm threshold
21805: # %d1 = "n" = amt to shift
21806: #
21807: # ---------------------------------------------------------
21808: # | FTEMP_HI | FTEMP_LO |grs000.........000|
21809: # ---------------------------------------------------------
21810: # <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
21811: # \ \ \ \
21812: # \ \ \ \
21813: # \ \ \ \
21814: # \ \ \ \
21815: # \ \ \ \
21816: # \ \ \ \
21817: # \ \ \ \
21818: # \ \ \ \
21819: # <-(n)-><-(32 - n)-><------(32)-------><------(32)------->
21820: # ---------------------------------------------------------
21821: # |0.....0| NEW_HI | NEW_FTEMP_LO |grs |
21822: # ---------------------------------------------------------
21823: #
21824: case_1:
21825: mov.l %d2, -(%sp) # create temp storage
21826:
21827: mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
21828: mov.l &32, %d0
21829: sub.w %d1, %d0 # %d0 = 32 - %d1
21830:
21831: cmpi.w %d1, &29 # is shft amt >= 29
21832: blt.b case1_extract # no; no fix needed
21833: mov.b GRS(%a6), %d2
21834: or.b %d2, 3+FTEMP_LO2(%a6)
21835:
21836: case1_extract:
21837: bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
21838: bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
21839: bfextu FTEMP_LO2(%a6){%d0:&32}, %d0 # %d0 = new G,R,S
21840:
21841: mov.l %d2, FTEMP_HI(%a0) # store new FTEMP_HI
21842: mov.l %d1, FTEMP_LO(%a0) # store new FTEMP_LO
21843:
21844: bftst %d0{&2:&30} # were bits shifted off?
21845: beq.b case1_sticky_clear # no; go finish
21846: bset &rnd_stky_bit, %d0 # yes; set sticky bit
21847:
21848: case1_sticky_clear:
21849: and.l &0xe0000000, %d0 # clear all but G,R,S
21850: mov.l (%sp)+, %d2 # restore temp register
21851: rts
21852:
21853: #
21854: # case (32<=d1<64)
21855: #
21856: # %d0 = denorm threshold
21857: # %d1 = "n" = amt to shift
21858: #
21859: # ---------------------------------------------------------
21860: # | FTEMP_HI | FTEMP_LO |grs000.........000|
21861: # ---------------------------------------------------------
21862: # <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
21863: # \ \ \
21864: # \ \ \
21865: # \ \ -------------------
21866: # \ -------------------- \
21867: # ------------------- \ \
21868: # \ \ \
21869: # \ \ \
21870: # \ \ \
21871: # <-------(32)------><-(n)-><-(32 - n)-><------(32)------->
21872: # ---------------------------------------------------------
21873: # |0...............0|0....0| NEW_LO |grs |
21874: # ---------------------------------------------------------
21875: #
21876: case_2:
21877: mov.l %d2, -(%sp) # create temp storage
21878:
21879: mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
21880: subi.w &0x20, %d1 # %d1 now between 0 and 32
21881: mov.l &0x20, %d0
21882: sub.w %d1, %d0 # %d0 = 32 - %d1
21883:
21884: # subtle step here; or in the g,r,s at the bottom of FTEMP_LO to minimize
21885: # the number of bits to check for the sticky detect.
21886: # it only plays a role in shift amounts of 61-63.
21887: mov.b GRS(%a6), %d2
21888: or.b %d2, 3+FTEMP_LO2(%a6)
21889:
21890: bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
21891: bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
21892:
21893: bftst %d1{&2:&30} # were any bits shifted off?
21894: bne.b case2_set_sticky # yes; set sticky bit
21895: bftst FTEMP_LO2(%a6){%d0:&31} # were any bits shifted off?
21896: bne.b case2_set_sticky # yes; set sticky bit
21897:
21898: mov.l %d1, %d0 # move new G,R,S to %d0
21899: bra.b case2_end
21900:
21901: case2_set_sticky:
21902: mov.l %d1, %d0 # move new G,R,S to %d0
21903: bset &rnd_stky_bit, %d0 # set sticky bit
21904:
21905: case2_end:
21906: clr.l FTEMP_HI(%a0) # store FTEMP_HI = 0
21907: mov.l %d2, FTEMP_LO(%a0) # store FTEMP_LO
21908: and.l &0xe0000000, %d0 # clear all but G,R,S
21909:
21910: mov.l (%sp)+,%d2 # restore temp register
21911: rts
21912:
21913: #
21914: # case (d1>=64)
21915: #
21916: # %d0 = denorm threshold
21917: # %d1 = amt to shift
21918: #
21919: case_3:
21920: mov.w %d0, FTEMP_EX(%a0) # insert denorm threshold
21921:
21922: cmpi.w %d1, &65 # is shift amt > 65?
21923: blt.b case3_64 # no; it's == 64
21924: beq.b case3_65 # no; it's == 65
21925:
21926: #
21927: # case (d1>65)
21928: #
21929: # Shift value is > 65 and out of range. All bits are shifted off.
21930: # Return a zero mantissa with the sticky bit set
21931: #
21932: clr.l FTEMP_HI(%a0) # clear hi(mantissa)
21933: clr.l FTEMP_LO(%a0) # clear lo(mantissa)
21934: mov.l &0x20000000, %d0 # set sticky bit
21935: rts
21936:
21937: #
21938: # case (d1 == 64)
21939: #
21940: # ---------------------------------------------------------
21941: # | FTEMP_HI | FTEMP_LO |grs000.........000|
21942: # ---------------------------------------------------------
21943: # <-------(32)------>
21944: # \ \
21945: # \ \
21946: # \ \
21947: # \ ------------------------------
21948: # ------------------------------- \
21949: # \ \
21950: # \ \
21951: # \ \
21952: # <-------(32)------>
21953: # ---------------------------------------------------------
21954: # |0...............0|0................0|grs |
21955: # ---------------------------------------------------------
21956: #
21957: case3_64:
21958: mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
21959: mov.l %d0, %d1 # make a copy
21960: and.l &0xc0000000, %d0 # extract G,R
21961: and.l &0x3fffffff, %d1 # extract other bits
21962:
21963: bra.b case3_complete
21964:
21965: #
21966: # case (d1 == 65)
21967: #
21968: # ---------------------------------------------------------
21969: # | FTEMP_HI | FTEMP_LO |grs000.........000|
21970: # ---------------------------------------------------------
21971: # <-------(32)------>
21972: # \ \
21973: # \ \
21974: # \ \
21975: # \ ------------------------------
21976: # -------------------------------- \
21977: # \ \
21978: # \ \
21979: # \ \
21980: # <-------(31)----->
21981: # ---------------------------------------------------------
21982: # |0...............0|0................0|0rs |
21983: # ---------------------------------------------------------
21984: #
21985: case3_65:
21986: mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
21987: and.l &0x80000000, %d0 # extract R bit
21988: lsr.l &0x1, %d0 # shift high bit into R bit
21989: and.l &0x7fffffff, %d1 # extract other bits
21990:
21991: case3_complete:
21992: # last operation done was an "and" of the bits shifted off so the condition
21993: # codes are already set so branch accordingly.
21994: bne.b case3_set_sticky # yes; go set new sticky
21995: tst.l FTEMP_LO(%a0) # were any bits shifted off?
21996: bne.b case3_set_sticky # yes; go set new sticky
21997: tst.b GRS(%a6) # were any bits shifted off?
21998: bne.b case3_set_sticky # yes; go set new sticky
21999:
22000: #
22001: # no bits were shifted off so don't set the sticky bit.
22002: # the guard and
22003: # the entire mantissa is zero.
22004: #
22005: clr.l FTEMP_HI(%a0) # clear hi(mantissa)
22006: clr.l FTEMP_LO(%a0) # clear lo(mantissa)
22007: rts
22008:
22009: #
22010: # some bits were shifted off so set the sticky bit.
22011: # the entire mantissa is zero.
22012: #
22013: case3_set_sticky:
22014: bset &rnd_stky_bit,%d0 # set new sticky bit
22015: clr.l FTEMP_HI(%a0) # clear hi(mantissa)
22016: clr.l FTEMP_LO(%a0) # clear lo(mantissa)
22017: rts
22018:
22019: #########################################################################
22020: # XDEF **************************************************************** #
22021: # _round(): round result according to precision/mode #
22022: # #
22023: # XREF **************************************************************** #
22024: # None #
22025: # #
22026: # INPUT *************************************************************** #
22027: # a0 = ptr to input operand in internal extended format #
22028: # d1(hi) = contains rounding precision: #
22029: # ext = $0000xxxx #
22030: # sgl = $0004xxxx #
22031: # dbl = $0008xxxx #
22032: # d1(lo) = contains rounding mode: #
22033: # RN = $xxxx0000 #
22034: # RZ = $xxxx0001 #
22035: # RM = $xxxx0002 #
22036: # RP = $xxxx0003 #
22037: # d0{31:29} = contains the g,r,s bits (extended) #
22038: # #
22039: # OUTPUT ************************************************************** #
22040: # a0 = pointer to rounded result #
22041: # #
22042: # ALGORITHM *********************************************************** #
22043: # On return the value pointed to by a0 is correctly rounded, #
22044: # a0 is preserved and the g-r-s bits in d0 are cleared. #
22045: # The result is not typed - the tag field is invalid. The #
22046: # result is still in the internal extended format. #
22047: # #
22048: # The INEX bit of USER_FPSR will be set if the rounded result was #
22049: # inexact (i.e. if any of the g-r-s bits were set). #
22050: # #
22051: #########################################################################
22052:
22053: global _round
22054: _round:
22055: #
22056: # ext_grs() looks at the rounding precision and sets the appropriate
22057: # G,R,S bits.
22058: # If (G,R,S == 0) then result is exact and round is done, else set
22059: # the inex flag in status reg and continue.
22060: #
22061: bsr.l ext_grs # extract G,R,S
22062:
22063: tst.l %d0 # are G,R,S zero?
22064: beq.w truncate # yes; round is complete
22065:
22066: or.w &inx2a_mask, 2+USER_FPSR(%a6) # set inex2/ainex
22067:
22068: #
22069: # Use rounding mode as an index into a jump table for these modes.
22070: # All of the following assumes grs != 0.
22071: #
22072: mov.w (tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
22073: jmp (tbl_mode.b,%pc,%a1) # jmp to rnd mode handler
22074:
22075: tbl_mode:
22076: short rnd_near - tbl_mode
22077: short truncate - tbl_mode # RZ always truncates
22078: short rnd_mnus - tbl_mode
22079: short rnd_plus - tbl_mode
22080:
22081: #################################################################
22082: # ROUND PLUS INFINITY #
22083: # #
22084: # If sign of fp number = 0 (positive), then add 1 to l. #
22085: #################################################################
22086: rnd_plus:
22087: tst.b FTEMP_SGN(%a0) # check for sign
22088: bmi.w truncate # if positive then truncate
22089:
22090: mov.l &0xffffffff, %d0 # force g,r,s to be all f's
22091: swap %d1 # set up d1 for round prec.
22092:
22093: cmpi.b %d1, &s_mode # is prec = sgl?
22094: beq.w add_sgl # yes
22095: bgt.w add_dbl # no; it's dbl
22096: bra.w add_ext # no; it's ext
22097:
22098: #################################################################
22099: # ROUND MINUS INFINITY #
22100: # #
22101: # If sign of fp number = 1 (negative), then add 1 to l. #
22102: #################################################################
22103: rnd_mnus:
22104: tst.b FTEMP_SGN(%a0) # check for sign
22105: bpl.w truncate # if negative then truncate
22106:
22107: mov.l &0xffffffff, %d0 # force g,r,s to be all f's
22108: swap %d1 # set up d1 for round prec.
22109:
22110: cmpi.b %d1, &s_mode # is prec = sgl?
22111: beq.w add_sgl # yes
22112: bgt.w add_dbl # no; it's dbl
22113: bra.w add_ext # no; it's ext
22114:
22115: #################################################################
22116: # ROUND NEAREST #
22117: # #
22118: # If (g=1), then add 1 to l and if (r=s=0), then clear l #
22119: # Note that this will round to even in case of a tie. #
22120: #################################################################
22121: rnd_near:
22122: asl.l &0x1, %d0 # shift g-bit to c-bit
22123: bcc.w truncate # if (g=1) then
22124:
22125: swap %d1 # set up d1 for round prec.
22126:
22127: cmpi.b %d1, &s_mode # is prec = sgl?
22128: beq.w add_sgl # yes
22129: bgt.w add_dbl # no; it's dbl
22130: bra.w add_ext # no; it's ext
22131:
22132: # *** LOCAL EQUATES ***
22133: set ad_1_sgl, 0x00000100 # constant to add 1 to l-bit in sgl prec
22134: set ad_1_dbl, 0x00000800 # constant to add 1 to l-bit in dbl prec
22135:
22136: #########################
22137: # ADD SINGLE #
22138: #########################
22139: add_sgl:
22140: add.l &ad_1_sgl, FTEMP_HI(%a0)
22141: bcc.b scc_clr # no mantissa overflow
22142: roxr.w FTEMP_HI(%a0) # shift v-bit back in
22143: roxr.w FTEMP_HI+2(%a0) # shift v-bit back in
22144: add.w &0x1, FTEMP_EX(%a0) # and incr exponent
22145: scc_clr:
22146: tst.l %d0 # test for rs = 0
22147: bne.b sgl_done
22148: and.w &0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
22149: sgl_done:
22150: and.l &0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
22151: clr.l FTEMP_LO(%a0) # clear d2
22152: rts
22153:
22154: #########################
22155: # ADD EXTENDED #
22156: #########################
22157: add_ext:
22158: addq.l &1,FTEMP_LO(%a0) # add 1 to l-bit
22159: bcc.b xcc_clr # test for carry out
22160: addq.l &1,FTEMP_HI(%a0) # propagate carry
22161: bcc.b xcc_clr
22162: roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
22163: roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
22164: roxr.w FTEMP_LO(%a0)
22165: roxr.w FTEMP_LO+2(%a0)
22166: add.w &0x1,FTEMP_EX(%a0) # and inc exp
22167: xcc_clr:
22168: tst.l %d0 # test rs = 0
22169: bne.b add_ext_done
22170: and.b &0xfe,FTEMP_LO+3(%a0) # clear the l bit
22171: add_ext_done:
22172: rts
22173:
22174: #########################
22175: # ADD DOUBLE #
22176: #########################
22177: add_dbl:
22178: add.l &ad_1_dbl, FTEMP_LO(%a0) # add 1 to lsb
22179: bcc.b dcc_clr # no carry
22180: addq.l &0x1, FTEMP_HI(%a0) # propagate carry
22181: bcc.b dcc_clr # no carry
22182:
22183: roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
22184: roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
22185: roxr.w FTEMP_LO(%a0)
22186: roxr.w FTEMP_LO+2(%a0)
22187: addq.w &0x1, FTEMP_EX(%a0) # incr exponent
22188: dcc_clr:
22189: tst.l %d0 # test for rs = 0
22190: bne.b dbl_done
22191: and.w &0xf000, FTEMP_LO+2(%a0) # clear the l-bit
22192:
22193: dbl_done:
22194: and.l &0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
22195: rts
22196:
22197: ###########################
22198: # Truncate all other bits #
22199: ###########################
22200: truncate:
22201: swap %d1 # select rnd prec
22202:
22203: cmpi.b %d1, &s_mode # is prec sgl?
22204: beq.w sgl_done # yes
22205: bgt.b dbl_done # no; it's dbl
22206: rts # no; it's ext
22207:
22208:
22209: #
22210: # ext_grs(): extract guard, round and sticky bits according to
22211: # rounding precision.
22212: #
22213: # INPUT
22214: # d0 = extended precision g,r,s (in d0{31:29})
22215: # d1 = {PREC,ROUND}
22216: # OUTPUT
22217: # d0{31:29} = guard, round, sticky
22218: #
22219: # The ext_grs extract the guard/round/sticky bits according to the
22220: # selected rounding precision. It is called by the round subroutine
22221: # only. All registers except d0 are kept intact. d0 becomes an
22222: # updated guard,round,sticky in d0{31:29}
22223: #
22224: # Notes: the ext_grs uses the round PREC, and therefore has to swap d1
22225: # prior to usage, and needs to restore d1 to original. this
22226: # routine is tightly tied to the round routine and not meant to
22227: # uphold standard subroutine calling practices.
22228: #
22229:
22230: ext_grs:
22231: swap %d1 # have d1.w point to round precision
22232: tst.b %d1 # is rnd prec = extended?
22233: bne.b ext_grs_not_ext # no; go handle sgl or dbl
22234:
22235: #
22236: # %d0 actually already hold g,r,s since _round() had it before calling
22237: # this function. so, as long as we don't disturb it, we are "returning" it.
22238: #
22239: ext_grs_ext:
22240: swap %d1 # yes; return to correct positions
22241: rts
22242:
22243: ext_grs_not_ext:
22244: movm.l &0x3000, -(%sp) # make some temp registers {d2/d3}
22245:
22246: cmpi.b %d1, &s_mode # is rnd prec = sgl?
22247: bne.b ext_grs_dbl # no; go handle dbl
22248:
22249: #
22250: # sgl:
22251: # 96 64 40 32 0
22252: # -----------------------------------------------------
22253: # | EXP |XXXXXXX| |xx | |grs|
22254: # -----------------------------------------------------
22255: # <--(24)--->nn\ /
22256: # ee ---------------------
22257: # ww |
22258: # v
22259: # gr new sticky
22260: #
22261: ext_grs_sgl:
22262: bfextu FTEMP_HI(%a0){&24:&2}, %d3 # sgl prec. g-r are 2 bits right
22263: mov.l &30, %d2 # of the sgl prec. limits
22264: lsl.l %d2, %d3 # shift g-r bits to MSB of d3
22265: mov.l FTEMP_HI(%a0), %d2 # get word 2 for s-bit test
22266: and.l &0x0000003f, %d2 # s bit is the or of all other
22267: bne.b ext_grs_st_stky # bits to the right of g-r
22268: tst.l FTEMP_LO(%a0) # test lower mantissa
22269: bne.b ext_grs_st_stky # if any are set, set sticky
22270: tst.l %d0 # test original g,r,s
22271: bne.b ext_grs_st_stky # if any are set, set sticky
22272: bra.b ext_grs_end_sd # if words 3 and 4 are clr, exit
22273:
22274: #
22275: # dbl:
22276: # 96 64 32 11 0
22277: # -----------------------------------------------------
22278: # | EXP |XXXXXXX| | |xx |grs|
22279: # -----------------------------------------------------
22280: # nn\ /
22281: # ee -------
22282: # ww |
22283: # v
22284: # gr new sticky
22285: #
22286: ext_grs_dbl:
22287: bfextu FTEMP_LO(%a0){&21:&2}, %d3 # dbl-prec. g-r are 2 bits right
22288: mov.l &30, %d2 # of the dbl prec. limits
22289: lsl.l %d2, %d3 # shift g-r bits to the MSB of d3
22290: mov.l FTEMP_LO(%a0), %d2 # get lower mantissa for s-bit test
22291: and.l &0x000001ff, %d2 # s bit is the or-ing of all
22292: bne.b ext_grs_st_stky # other bits to the right of g-r
22293: tst.l %d0 # test word original g,r,s
22294: bne.b ext_grs_st_stky # if any are set, set sticky
22295: bra.b ext_grs_end_sd # if clear, exit
22296:
22297: ext_grs_st_stky:
22298: bset &rnd_stky_bit, %d3 # set sticky bit
22299: ext_grs_end_sd:
22300: mov.l %d3, %d0 # return grs to d0
22301:
22302: movm.l (%sp)+, &0xc # restore scratch registers {d2/d3}
22303:
22304: swap %d1 # restore d1 to original
22305: rts
22306:
22307: #########################################################################
22308: # norm(): normalize the mantissa of an extended precision input. the #
22309: # input operand should not be normalized already. #
22310: # #
22311: # XDEF **************************************************************** #
22312: # norm() #
22313: # #
22314: # XREF **************************************************************** #
22315: # none #
22316: # #
22317: # INPUT *************************************************************** #
22318: # a0 = pointer fp extended precision operand to normalize #
22319: # #
22320: # OUTPUT ************************************************************** #
22321: # d0 = number of bit positions the mantissa was shifted #
22322: # a0 = the input operand's mantissa is normalized; the exponent #
22323: # is unchanged. #
22324: # #
22325: #########################################################################
22326: global norm
22327: norm:
22328: mov.l %d2, -(%sp) # create some temp regs
22329: mov.l %d3, -(%sp)
22330:
22331: mov.l FTEMP_HI(%a0), %d0 # load hi(mantissa)
22332: mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa)
22333:
22334: bfffo %d0{&0:&32}, %d2 # how many places to shift?
22335: beq.b norm_lo # hi(man) is all zeroes!
22336:
22337: norm_hi:
22338: lsl.l %d2, %d0 # left shift hi(man)
22339: bfextu %d1{&0:%d2}, %d3 # extract lo bits
22340:
22341: or.l %d3, %d0 # create hi(man)
22342: lsl.l %d2, %d1 # create lo(man)
22343:
22344: mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
22345: mov.l %d1, FTEMP_LO(%a0) # store new lo(man)
22346:
22347: mov.l %d2, %d0 # return shift amount
22348:
22349: mov.l (%sp)+, %d3 # restore temp regs
22350: mov.l (%sp)+, %d2
22351:
22352: rts
22353:
22354: norm_lo:
22355: bfffo %d1{&0:&32}, %d2 # how many places to shift?
22356: lsl.l %d2, %d1 # shift lo(man)
22357: add.l &32, %d2 # add 32 to shft amount
22358:
22359: mov.l %d1, FTEMP_HI(%a0) # store hi(man)
22360: clr.l FTEMP_LO(%a0) # lo(man) is now zero
22361:
22362: mov.l %d2, %d0 # return shift amount
22363:
22364: mov.l (%sp)+, %d3 # restore temp regs
22365: mov.l (%sp)+, %d2
22366:
22367: rts
22368:
22369: #########################################################################
22370: # unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO #
22371: # - returns corresponding optype tag #
22372: # #
22373: # XDEF **************************************************************** #
22374: # unnorm_fix() #
22375: # #
22376: # XREF **************************************************************** #
22377: # norm() - normalize the mantissa #
22378: # #
22379: # INPUT *************************************************************** #
22380: # a0 = pointer to unnormalized extended precision number #
22381: # #
22382: # OUTPUT ************************************************************** #
22383: # d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO #
22384: # a0 = input operand has been converted to a norm, denorm, or #
22385: # zero; both the exponent and mantissa are changed. #
22386: # #
22387: #########################################################################
22388:
22389: global unnorm_fix
22390: unnorm_fix:
22391: bfffo FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
22392: bne.b unnorm_shift # hi(man) is not all zeroes
22393:
22394: #
22395: # hi(man) is all zeroes so see if any bits in lo(man) are set
22396: #
22397: unnorm_chk_lo:
22398: bfffo FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
22399: beq.w unnorm_zero # yes
22400:
22401: add.w &32, %d0 # no; fix shift distance
22402:
22403: #
22404: # d0 = # shifts needed for complete normalization
22405: #
22406: unnorm_shift:
22407: clr.l %d1 # clear top word
22408: mov.w FTEMP_EX(%a0), %d1 # extract exponent
22409: and.w &0x7fff, %d1 # strip off sgn
22410:
22411: cmp.w %d0, %d1 # will denorm push exp < 0?
22412: bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0
22413:
22414: #
22415: # exponent would not go < 0. therefore, number stays normalized
22416: #
22417: sub.w %d0, %d1 # shift exponent value
22418: mov.w FTEMP_EX(%a0), %d0 # load old exponent
22419: and.w &0x8000, %d0 # save old sign
22420: or.w %d0, %d1 # {sgn,new exp}
22421: mov.w %d1, FTEMP_EX(%a0) # insert new exponent
22422:
22423: bsr.l norm # normalize UNNORM
22424:
22425: mov.b &NORM, %d0 # return new optype tag
22426: rts
22427:
22428: #
22429: # exponent would go < 0, so only denormalize until exp = 0
22430: #
22431: unnorm_nrm_zero:
22432: cmp.b %d1, &32 # is exp <= 32?
22433: bgt.b unnorm_nrm_zero_lrg # no; go handle large exponent
22434:
22435: bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
22436: mov.l %d0, FTEMP_HI(%a0) # save new hi(man)
22437:
22438: mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
22439: lsl.l %d1, %d0 # extract new lo(man)
22440: mov.l %d0, FTEMP_LO(%a0) # save new lo(man)
22441:
22442: and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
22443:
22444: mov.b &DENORM, %d0 # return new optype tag
22445: rts
22446:
22447: #
22448: # only mantissa bits set are in lo(man)
22449: #
22450: unnorm_nrm_zero_lrg:
22451: sub.w &32, %d1 # adjust shft amt by 32
22452:
22453: mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
22454: lsl.l %d1, %d0 # left shift lo(man)
22455:
22456: mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
22457: clr.l FTEMP_LO(%a0) # lo(man) = 0
22458:
22459: and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
22460:
22461: mov.b &DENORM, %d0 # return new optype tag
22462: rts
22463:
22464: #
22465: # whole mantissa is zero so this UNNORM is actually a zero
22466: #
22467: unnorm_zero:
22468: and.w &0x8000, FTEMP_EX(%a0) # force exponent to zero
22469:
22470: mov.b &ZERO, %d0 # fix optype tag
22471: rts
22472:
22473: #########################################################################
22474: # XDEF **************************************************************** #
22475: # set_tag_x(): return the optype of the input ext fp number #
22476: # #
22477: # XREF **************************************************************** #
22478: # None #
22479: # #
22480: # INPUT *************************************************************** #
22481: # a0 = pointer to extended precision operand #
22482: # #
22483: # OUTPUT ************************************************************** #
22484: # d0 = value of type tag #
22485: # one of: NORM, INF, QNAN, SNAN, DENORM, UNNORM, ZERO #
22486: # #
22487: # ALGORITHM *********************************************************** #
22488: # Simply test the exponent, j-bit, and mantissa values to #
22489: # determine the type of operand. #
22490: # If it's an unnormalized zero, alter the operand and force it #
22491: # to be a normal zero. #
22492: # #
22493: #########################################################################
22494:
22495: global set_tag_x
22496: set_tag_x:
22497: mov.w FTEMP_EX(%a0), %d0 # extract exponent
22498: andi.w &0x7fff, %d0 # strip off sign
22499: cmpi.w %d0, &0x7fff # is (EXP == MAX)?
22500: beq.b inf_or_nan_x
22501: not_inf_or_nan_x:
22502: btst &0x7,FTEMP_HI(%a0)
22503: beq.b not_norm_x
22504: is_norm_x:
22505: mov.b &NORM, %d0
22506: rts
22507: not_norm_x:
22508: tst.w %d0 # is exponent = 0?
22509: bne.b is_unnorm_x
22510: not_unnorm_x:
22511: tst.l FTEMP_HI(%a0)
22512: bne.b is_denorm_x
22513: tst.l FTEMP_LO(%a0)
22514: bne.b is_denorm_x
22515: is_zero_x:
22516: mov.b &ZERO, %d0
22517: rts
22518: is_denorm_x:
22519: mov.b &DENORM, %d0
22520: rts
22521: # must distinguish now "Unnormalized zeroes" which we
22522: # must convert to zero.
22523: is_unnorm_x:
22524: tst.l FTEMP_HI(%a0)
22525: bne.b is_unnorm_reg_x
22526: tst.l FTEMP_LO(%a0)
22527: bne.b is_unnorm_reg_x
22528: # it's an "unnormalized zero". let's convert it to an actual zero...
22529: andi.w &0x8000,FTEMP_EX(%a0) # clear exponent
22530: mov.b &ZERO, %d0
22531: rts
22532: is_unnorm_reg_x:
22533: mov.b &UNNORM, %d0
22534: rts
22535: inf_or_nan_x:
22536: tst.l FTEMP_LO(%a0)
22537: bne.b is_nan_x
22538: mov.l FTEMP_HI(%a0), %d0
22539: and.l &0x7fffffff, %d0 # msb is a don't care!
22540: bne.b is_nan_x
22541: is_inf_x:
22542: mov.b &INF, %d0
22543: rts
22544: is_nan_x:
22545: btst &0x6, FTEMP_HI(%a0)
22546: beq.b is_snan_x
22547: mov.b &QNAN, %d0
22548: rts
22549: is_snan_x:
22550: mov.b &SNAN, %d0
22551: rts
22552:
22553: #########################################################################
22554: # XDEF **************************************************************** #
22555: # set_tag_d(): return the optype of the input dbl fp number #
22556: # #
22557: # XREF **************************************************************** #
22558: # None #
22559: # #
22560: # INPUT *************************************************************** #
22561: # a0 = points to double precision operand #
22562: # #
22563: # OUTPUT ************************************************************** #
22564: # d0 = value of type tag #
22565: # one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
22566: # #
22567: # ALGORITHM *********************************************************** #
22568: # Simply test the exponent, j-bit, and mantissa values to #
22569: # determine the type of operand. #
22570: # #
22571: #########################################################################
22572:
22573: global set_tag_d
22574: set_tag_d:
22575: mov.l FTEMP(%a0), %d0
22576: mov.l %d0, %d1
22577:
22578: andi.l &0x7ff00000, %d0
22579: beq.b zero_or_denorm_d
22580:
22581: cmpi.l %d0, &0x7ff00000
22582: beq.b inf_or_nan_d
22583:
22584: is_norm_d:
22585: mov.b &NORM, %d0
22586: rts
22587: zero_or_denorm_d:
22588: and.l &0x000fffff, %d1
22589: bne is_denorm_d
22590: tst.l 4+FTEMP(%a0)
22591: bne is_denorm_d
22592: is_zero_d:
22593: mov.b &ZERO, %d0
22594: rts
22595: is_denorm_d:
22596: mov.b &DENORM, %d0
22597: rts
22598: inf_or_nan_d:
22599: and.l &0x000fffff, %d1
22600: bne is_nan_d
22601: tst.l 4+FTEMP(%a0)
22602: bne is_nan_d
22603: is_inf_d:
22604: mov.b &INF, %d0
22605: rts
22606: is_nan_d:
22607: btst &19, %d1
22608: bne is_qnan_d
22609: is_snan_d:
22610: mov.b &SNAN, %d0
22611: rts
22612: is_qnan_d:
22613: mov.b &QNAN, %d0
22614: rts
22615:
22616: #########################################################################
22617: # XDEF **************************************************************** #
22618: # set_tag_s(): return the optype of the input sgl fp number #
22619: # #
22620: # XREF **************************************************************** #
22621: # None #
22622: # #
22623: # INPUT *************************************************************** #
22624: # a0 = pointer to single precision operand #
22625: # #
22626: # OUTPUT ************************************************************** #
22627: # d0 = value of type tag #
22628: # one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
22629: # #
22630: # ALGORITHM *********************************************************** #
22631: # Simply test the exponent, j-bit, and mantissa values to #
22632: # determine the type of operand. #
22633: # #
22634: #########################################################################
22635:
22636: global set_tag_s
22637: set_tag_s:
22638: mov.l FTEMP(%a0), %d0
22639: mov.l %d0, %d1
22640:
22641: andi.l &0x7f800000, %d0
22642: beq.b zero_or_denorm_s
22643:
22644: cmpi.l %d0, &0x7f800000
22645: beq.b inf_or_nan_s
22646:
22647: is_norm_s:
22648: mov.b &NORM, %d0
22649: rts
22650: zero_or_denorm_s:
22651: and.l &0x007fffff, %d1
22652: bne is_denorm_s
22653: is_zero_s:
22654: mov.b &ZERO, %d0
22655: rts
22656: is_denorm_s:
22657: mov.b &DENORM, %d0
22658: rts
22659: inf_or_nan_s:
22660: and.l &0x007fffff, %d1
22661: bne is_nan_s
22662: is_inf_s:
22663: mov.b &INF, %d0
22664: rts
22665: is_nan_s:
22666: btst &22, %d1
22667: bne is_qnan_s
22668: is_snan_s:
22669: mov.b &SNAN, %d0
22670: rts
22671: is_qnan_s:
22672: mov.b &QNAN, %d0
22673: rts
22674:
22675: #########################################################################
22676: # XDEF **************************************************************** #
22677: # unf_res(): routine to produce default underflow result of a #
22678: # scaled extended precision number; this is used by #
22679: # fadd/fdiv/fmul/etc. emulation routines. #
22680: # unf_res4(): same as above but for fsglmul/fsgldiv which use #
22681: # single round prec and extended prec mode. #
22682: # #
22683: # XREF **************************************************************** #
22684: # _denorm() - denormalize according to scale factor #
22685: # _round() - round denormalized number according to rnd prec #
22686: # #
22687: # INPUT *************************************************************** #
22688: # a0 = pointer to extended precision operand #
22689: # d0 = scale factor #
22690: # d1 = rounding precision/mode #
22691: # #
22692: # OUTPUT ************************************************************** #
22693: # a0 = pointer to default underflow result in extended precision #
22694: # d0.b = result FPSR_cc which caller may or may not want to save #
22695: # #
22696: # ALGORITHM *********************************************************** #
22697: # Convert the input operand to "internal format" which means the #
22698: # exponent is extended to 16 bits and the sign is stored in the unused #
22699: # portion of the extended precision operand. Denormalize the number #
22700: # according to the scale factor passed in d0. Then, round the #
22701: # denormalized result. #
22702: # Set the FPSR_exc bits as appropriate but return the cc bits in #
22703: # d0 in case the caller doesn't want to save them (as is the case for #
22704: # fmove out). #
22705: # unf_res4() for fsglmul/fsgldiv forces the denorm to extended #
22706: # precision and the rounding mode to single. #
22707: # #
22708: #########################################################################
22709: global unf_res
22710: unf_res:
22711: mov.l %d1, -(%sp) # save rnd prec,mode on stack
22712:
22713: btst &0x7, FTEMP_EX(%a0) # make "internal" format
22714: sne FTEMP_SGN(%a0)
22715:
22716: mov.w FTEMP_EX(%a0), %d1 # extract exponent
22717: and.w &0x7fff, %d1
22718: sub.w %d0, %d1
22719: mov.w %d1, FTEMP_EX(%a0) # insert 16 bit exponent
22720:
22721: mov.l %a0, -(%sp) # save operand ptr during calls
22722:
22723: mov.l 0x4(%sp),%d0 # pass rnd prec.
22724: andi.w &0x00c0,%d0
22725: lsr.w &0x4,%d0
22726: bsr.l _denorm # denorm result
22727:
22728: mov.l (%sp),%a0
22729: mov.w 0x6(%sp),%d1 # load prec:mode into %d1
22730: andi.w &0xc0,%d1 # extract rnd prec
22731: lsr.w &0x4,%d1
22732: swap %d1
22733: mov.w 0x6(%sp),%d1
22734: andi.w &0x30,%d1
22735: lsr.w &0x4,%d1
22736: bsr.l _round # round the denorm
22737:
22738: mov.l (%sp)+, %a0
22739:
22740: # result is now rounded properly. convert back to normal format
22741: bclr &0x7, FTEMP_EX(%a0) # clear sgn first; may have residue
22742: tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
22743: beq.b unf_res_chkifzero # no; result is positive
22744: bset &0x7, FTEMP_EX(%a0) # set result sgn
22745: clr.b FTEMP_SGN(%a0) # clear temp sign
22746:
22747: # the number may have become zero after rounding. set ccodes accordingly.
22748: unf_res_chkifzero:
22749: clr.l %d0
22750: tst.l FTEMP_HI(%a0) # is value now a zero?
22751: bne.b unf_res_cont # no
22752: tst.l FTEMP_LO(%a0)
22753: bne.b unf_res_cont # no
22754: # bset &z_bit, FPSR_CC(%a6) # yes; set zero ccode bit
22755: bset &z_bit, %d0 # yes; set zero ccode bit
22756:
22757: unf_res_cont:
22758:
22759: #
22760: # can inex1 also be set along with unfl and inex2???
22761: #
22762: # we know that underflow has occurred. aunfl should be set if INEX2 is also set.
22763: #
22764: btst &inex2_bit, FPSR_EXCEPT(%a6) # is INEX2 set?
22765: beq.b unf_res_end # no
22766: bset &aunfl_bit, FPSR_AEXCEPT(%a6) # yes; set aunfl
22767:
22768: unf_res_end:
22769: add.l &0x4, %sp # clear stack
22770: rts
22771:
22772: # unf_res() for fsglmul() and fsgldiv().
22773: global unf_res4
22774: unf_res4:
22775: mov.l %d1,-(%sp) # save rnd prec,mode on stack
22776:
22777: btst &0x7,FTEMP_EX(%a0) # make "internal" format
22778: sne FTEMP_SGN(%a0)
22779:
22780: mov.w FTEMP_EX(%a0),%d1 # extract exponent
22781: and.w &0x7fff,%d1
22782: sub.w %d0,%d1
22783: mov.w %d1,FTEMP_EX(%a0) # insert 16 bit exponent
22784:
22785: mov.l %a0,-(%sp) # save operand ptr during calls
22786:
22787: clr.l %d0 # force rnd prec = ext
22788: bsr.l _denorm # denorm result
22789:
22790: mov.l (%sp),%a0
22791: mov.w &s_mode,%d1 # force rnd prec = sgl
22792: swap %d1
22793: mov.w 0x6(%sp),%d1 # load rnd mode
22794: andi.w &0x30,%d1 # extract rnd prec
22795: lsr.w &0x4,%d1
22796: bsr.l _round # round the denorm
22797:
22798: mov.l (%sp)+,%a0
22799:
22800: # result is now rounded properly. convert back to normal format
22801: bclr &0x7,FTEMP_EX(%a0) # clear sgn first; may have residue
22802: tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
22803: beq.b unf_res4_chkifzero # no; result is positive
22804: bset &0x7,FTEMP_EX(%a0) # set result sgn
22805: clr.b FTEMP_SGN(%a0) # clear temp sign
22806:
22807: # the number may have become zero after rounding. set ccodes accordingly.
22808: unf_res4_chkifzero:
22809: clr.l %d0
22810: tst.l FTEMP_HI(%a0) # is value now a zero?
22811: bne.b unf_res4_cont # no
22812: tst.l FTEMP_LO(%a0)
22813: bne.b unf_res4_cont # no
22814: # bset &z_bit,FPSR_CC(%a6) # yes; set zero ccode bit
22815: bset &z_bit,%d0 # yes; set zero ccode bit
22816:
22817: unf_res4_cont:
22818:
22819: #
22820: # can inex1 also be set along with unfl and inex2???
22821: #
22822: # we know that underflow has occurred. aunfl should be set if INEX2 is also set.
22823: #
22824: btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
22825: beq.b unf_res4_end # no
22826: bset &aunfl_bit,FPSR_AEXCEPT(%a6) # yes; set aunfl
22827:
22828: unf_res4_end:
22829: add.l &0x4,%sp # clear stack
22830: rts
22831:
22832: #########################################################################
22833: # XDEF **************************************************************** #
22834: # ovf_res(): routine to produce the default overflow result of #
22835: # an overflowing number. #
22836: # ovf_res2(): same as above but the rnd mode/prec are passed #
22837: # differently. #
22838: # #
22839: # XREF **************************************************************** #
22840: # none #
22841: # #
22842: # INPUT *************************************************************** #
22843: # d1.b = '-1' => (-); '0' => (+) #
22844: # ovf_res(): #
22845: # d0 = rnd mode/prec #
22846: # ovf_res2(): #
22847: # hi(d0) = rnd prec #
22848: # lo(d0) = rnd mode #
22849: # #
22850: # OUTPUT ************************************************************** #
22851: # a0 = points to extended precision result #
22852: # d0.b = condition code bits #
22853: # #
22854: # ALGORITHM *********************************************************** #
22855: # The default overflow result can be determined by the sign of #
22856: # the result and the rounding mode/prec in effect. These bits are #
22857: # concatenated together to create an index into the default result #
22858: # table. A pointer to the correct result is returned in a0. The #
22859: # resulting condition codes are returned in d0 in case the caller #
22860: # doesn't want FPSR_cc altered (as is the case for fmove out). #
22861: # #
22862: #########################################################################
22863:
22864: global ovf_res
22865: ovf_res:
22866: andi.w &0x10,%d1 # keep result sign
22867: lsr.b &0x4,%d0 # shift prec/mode
22868: or.b %d0,%d1 # concat the two
22869: mov.w %d1,%d0 # make a copy
22870: lsl.b &0x1,%d1 # multiply d1 by 2
22871: bra.b ovf_res_load
22872:
22873: global ovf_res2
22874: ovf_res2:
22875: and.w &0x10, %d1 # keep result sign
22876: or.b %d0, %d1 # insert rnd mode
22877: swap %d0
22878: or.b %d0, %d1 # insert rnd prec
22879: mov.w %d1, %d0 # make a copy
22880: lsl.b &0x1, %d1 # shift left by 1
22881:
22882: #
22883: # use the rounding mode, precision, and result sign as in index into the
22884: # two tables below to fetch the default result and the result ccodes.
22885: #
22886: ovf_res_load:
22887: mov.b (tbl_ovfl_cc.b,%pc,%d0.w*1), %d0 # fetch result ccodes
22888: lea (tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
22889:
22890: rts
22891:
22892: tbl_ovfl_cc:
22893: byte 0x2, 0x0, 0x0, 0x2
22894: byte 0x2, 0x0, 0x0, 0x2
22895: byte 0x2, 0x0, 0x0, 0x2
22896: byte 0x0, 0x0, 0x0, 0x0
22897: byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
22898: byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
22899: byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
22900:
22901: tbl_ovfl_result:
22902: long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
22903: long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
22904: long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
22905: long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
22906:
22907: long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
22908: long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
22909: long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
22910: long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
22911:
22912: long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
22913: long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
22914: long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
22915: long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
22916:
22917: long 0x00000000,0x00000000,0x00000000,0x00000000
22918: long 0x00000000,0x00000000,0x00000000,0x00000000
22919: long 0x00000000,0x00000000,0x00000000,0x00000000
22920: long 0x00000000,0x00000000,0x00000000,0x00000000
22921:
22922: long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
22923: long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
22924: long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
22925: long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
22926:
22927: long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
22928: long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
22929: long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
22930: long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
22931:
22932: long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
22933: long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
22934: long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
22935: long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
22936:
22937: #########################################################################
22938: # XDEF **************************************************************** #
22939: # get_packed(): fetch a packed operand from memory and then #
22940: # convert it to a floating-point binary number. #
22941: # #
22942: # XREF **************************************************************** #
22943: # _dcalc_ea() - calculate the correct <ea> #
22944: # _mem_read() - fetch the packed operand from memory #
22945: # facc_in_x() - the fetch failed so jump to special exit code #
22946: # decbin() - convert packed to binary extended precision #
22947: # #
22948: # INPUT *************************************************************** #
22949: # None #
22950: # #
22951: # OUTPUT ************************************************************** #
22952: # If no failure on _mem_read(): #
22953: # FP_SRC(a6) = packed operand now as a binary FP number #
22954: # #
22955: # ALGORITHM *********************************************************** #
22956: # Get the correct <ea> which is the value on the exception stack #
22957: # frame w/ maybe a correction factor if the <ea> is -(an) or (an)+. #
22958: # Then, fetch the operand from memory. If the fetch fails, exit #
22959: # through facc_in_x(). #
22960: # If the packed operand is a ZERO,NAN, or INF, convert it to #
22961: # its binary representation here. Else, call decbin() which will #
22962: # convert the packed value to an extended precision binary value. #
22963: # #
22964: #########################################################################
22965:
22966: # the stacked <ea> for packed is correct except for -(An).
22967: # the base reg must be updated for both -(An) and (An)+.
22968: global get_packed
22969: get_packed:
22970: mov.l &0xc,%d0 # packed is 12 bytes
22971: bsr.l _dcalc_ea # fetch <ea>; correct An
22972:
22973: lea FP_SRC(%a6),%a1 # pass: ptr to super dst
22974: mov.l &0xc,%d0 # pass: 12 bytes
22975: bsr.l _dmem_read # read packed operand
22976:
22977: tst.l %d1 # did dfetch fail?
22978: bne.l facc_in_x # yes
22979:
22980: # The packed operand is an INF or a NAN if the exponent field is all ones.
22981: bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
22982: cmpi.w %d0,&0x7fff # INF or NAN?
22983: bne.b gp_try_zero # no
22984: rts # operand is an INF or NAN
22985:
22986: # The packed operand is a zero if the mantissa is all zero, else it's
22987: # a normal packed op.
22988: gp_try_zero:
22989: mov.b 3+FP_SRC(%a6),%d0 # get byte 4
22990: andi.b &0x0f,%d0 # clear all but last nybble
22991: bne.b gp_not_spec # not a zero
22992: tst.l FP_SRC_HI(%a6) # is lw 2 zero?
22993: bne.b gp_not_spec # not a zero
22994: tst.l FP_SRC_LO(%a6) # is lw 3 zero?
22995: bne.b gp_not_spec # not a zero
22996: rts # operand is a ZERO
22997: gp_not_spec:
22998: lea FP_SRC(%a6),%a0 # pass: ptr to packed op
22999: bsr.l decbin # convert to extended
23000: fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
23001: rts
23002:
23003: #########################################################################
23004: # decbin(): Converts normalized packed bcd value pointed to by register #
23005: # a0 to extended-precision value in fp0. #
23006: # #
23007: # INPUT *************************************************************** #
23008: # a0 = pointer to normalized packed bcd value #
23009: # #
23010: # OUTPUT ************************************************************** #
23011: # fp0 = exact fp representation of the packed bcd value. #
23012: # #
23013: # ALGORITHM *********************************************************** #
23014: # Expected is a normal bcd (i.e. non-exceptional; all inf, zero, #
23015: # and NaN operands are dispatched without entering this routine) #
23016: # value in 68881/882 format at location (a0). #
23017: # #
23018: # A1. Convert the bcd exponent to binary by successive adds and #
23019: # muls. Set the sign according to SE. Subtract 16 to compensate #
23020: # for the mantissa which is to be interpreted as 17 integer #
23021: # digits, rather than 1 integer and 16 fraction digits. #
23022: # Note: this operation can never overflow. #
23023: # #
23024: # A2. Convert the bcd mantissa to binary by successive #
23025: # adds and muls in FP0. Set the sign according to SM. #
23026: # The mantissa digits will be converted with the decimal point #
23027: # assumed following the least-significant digit. #
23028: # Note: this operation can never overflow. #
23029: # #
23030: # A3. Count the number of leading/trailing zeros in the #
23031: # bcd string. If SE is positive, count the leading zeros; #
23032: # if negative, count the trailing zeros. Set the adjusted #
23033: # exponent equal to the exponent from A1 and the zero count #
23034: # added if SM = 1 and subtracted if SM = 0. Scale the #
23035: # mantissa the equivalent of forcing in the bcd value: #
23036: # #
23037: # SM = 0 a non-zero digit in the integer position #
23038: # SM = 1 a non-zero digit in Mant0, lsd of the fraction #
23039: # #
23040: # this will insure that any value, regardless of its #
23041: # representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted #
23042: # consistently. #
23043: # #
23044: # A4. Calculate the factor 10^exp in FP1 using a table of #
23045: # 10^(2^n) values. To reduce the error in forming factors #
23046: # greater than 10^27, a directed rounding scheme is used with #
23047: # tables rounded to RN, RM, and RP, according to the table #
23048: # in the comments of the pwrten section. #
23049: # #
23050: # A5. Form the final binary number by scaling the mantissa by #
23051: # the exponent factor. This is done by multiplying the #
23052: # mantissa in FP0 by the factor in FP1 if the adjusted #
23053: # exponent sign is positive, and dividing FP0 by FP1 if #
23054: # it is negative. #
23055: # #
23056: # Clean up and return. Check if the final mul or div was inexact. #
23057: # If so, set INEX1 in USER_FPSR. #
23058: # #
23059: #########################################################################
23060:
23061: #
23062: # PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
23063: # to nearest, minus, and plus, respectively. The tables include
23064: # 10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}. No rounding
23065: # is required until the power is greater than 27, however, all
23066: # tables include the first 5 for ease of indexing.
23067: #
23068: RTABLE:
23069: byte 0,0,0,0
23070: byte 2,3,2,3
23071: byte 2,3,3,2
23072: byte 3,2,2,3
23073:
23074: set FNIBS,7
23075: set FSTRT,0
23076:
23077: set ESTRT,4
23078: set EDIGITS,2
23079:
23080: global decbin
23081: decbin:
23082: mov.l 0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
23083: mov.l 0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
23084: mov.l 0x8(%a0),FP_SCR0_LO(%a6)
23085:
23086: lea FP_SCR0(%a6),%a0
23087:
23088: movm.l &0x3c00,-(%sp) # save d2-d5
23089: fmovm.x &0x1,-(%sp) # save fp1
23090: #
23091: # Calculate exponent:
23092: # 1. Copy bcd value in memory for use as a working copy.
23093: # 2. Calculate absolute value of exponent in d1 by mul and add.
23094: # 3. Correct for exponent sign.
23095: # 4. Subtract 16 to compensate for interpreting the mant as all integer digits.
23096: # (i.e., all digits assumed left of the decimal point.)
23097: #
23098: # Register usage:
23099: #
23100: # calc_e:
23101: # (*) d0: temp digit storage
23102: # (*) d1: accumulator for binary exponent
23103: # (*) d2: digit count
23104: # (*) d3: offset pointer
23105: # ( ) d4: first word of bcd
23106: # ( ) a0: pointer to working bcd value
23107: # ( ) a6: pointer to original bcd value
23108: # (*) FP_SCR1: working copy of original bcd value
23109: # (*) L_SCR1: copy of original exponent word
23110: #
23111: calc_e:
23112: mov.l &EDIGITS,%d2 # # of nibbles (digits) in fraction part
23113: mov.l &ESTRT,%d3 # counter to pick up digits
23114: mov.l (%a0),%d4 # get first word of bcd
23115: clr.l %d1 # zero d1 for accumulator
23116: e_gd:
23117: mulu.l &0xa,%d1 # mul partial product by one digit place
23118: bfextu %d4{%d3:&4},%d0 # get the digit and zero extend into d0
23119: add.l %d0,%d1 # d1 = d1 + d0
23120: addq.b &4,%d3 # advance d3 to the next digit
23121: dbf.w %d2,e_gd # if we have used all 3 digits, exit loop
23122: btst &30,%d4 # get SE
23123: beq.b e_pos # don't negate if pos
23124: neg.l %d1 # negate before subtracting
23125: e_pos:
23126: sub.l &16,%d1 # sub to compensate for shift of mant
23127: bge.b e_save # if still pos, do not neg
23128: neg.l %d1 # now negative, make pos and set SE
23129: or.l &0x40000000,%d4 # set SE in d4,
23130: or.l &0x40000000,(%a0) # and in working bcd
23131: e_save:
23132: mov.l %d1,-(%sp) # save exp on stack
23133: #
23134: #
23135: # Calculate mantissa:
23136: # 1. Calculate absolute value of mantissa in fp0 by mul and add.
23137: # 2. Correct for mantissa sign.
23138: # (i.e., all digits assumed left of the decimal point.)
23139: #
23140: # Register usage:
23141: #
23142: # calc_m:
23143: # (*) d0: temp digit storage
23144: # (*) d1: lword counter
23145: # (*) d2: digit count
23146: # (*) d3: offset pointer
23147: # ( ) d4: words 2 and 3 of bcd
23148: # ( ) a0: pointer to working bcd value
23149: # ( ) a6: pointer to original bcd value
23150: # (*) fp0: mantissa accumulator
23151: # ( ) FP_SCR1: working copy of original bcd value
23152: # ( ) L_SCR1: copy of original exponent word
23153: #
23154: calc_m:
23155: mov.l &1,%d1 # word counter, init to 1
23156: fmov.s &0x00000000,%fp0 # accumulator
23157: #
23158: #
23159: # Since the packed number has a long word between the first & second parts,
23160: # get the integer digit then skip down & get the rest of the
23161: # mantissa. We will unroll the loop once.
23162: #
23163: bfextu (%a0){&28:&4},%d0 # integer part is ls digit in long word
23164: fadd.b %d0,%fp0 # add digit to sum in fp0
23165: #
23166: #
23167: # Get the rest of the mantissa.
23168: #
23169: loadlw:
23170: mov.l (%a0,%d1.L*4),%d4 # load mantissa lonqword into d4
23171: mov.l &FSTRT,%d3 # counter to pick up digits
23172: mov.l &FNIBS,%d2 # reset number of digits per a0 ptr
23173: md2b:
23174: fmul.s &0x41200000,%fp0 # fp0 = fp0 * 10
23175: bfextu %d4{%d3:&4},%d0 # get the digit and zero extend
23176: fadd.b %d0,%fp0 # fp0 = fp0 + digit
23177: #
23178: #
23179: # If all the digits (8) in that long word have been converted (d2=0),
23180: # then inc d1 (=2) to point to the next long word and reset d3 to 0
23181: # to initialize the digit offset, and set d2 to 7 for the digit count;
23182: # else continue with this long word.
23183: #
23184: addq.b &4,%d3 # advance d3 to the next digit
23185: dbf.w %d2,md2b # check for last digit in this lw
23186: nextlw:
23187: addq.l &1,%d1 # inc lw pointer in mantissa
23188: cmp.l %d1,&2 # test for last lw
23189: ble.b loadlw # if not, get last one
23190: #
23191: # Check the sign of the mant and make the value in fp0 the same sign.
23192: #
23193: m_sign:
23194: btst &31,(%a0) # test sign of the mantissa
23195: beq.b ap_st_z # if clear, go to append/strip zeros
23196: fneg.x %fp0 # if set, negate fp0
23197: #
23198: # Append/strip zeros:
23199: #
23200: # For adjusted exponents which have an absolute value greater than 27*,
23201: # this routine calculates the amount needed to normalize the mantissa
23202: # for the adjusted exponent. That number is subtracted from the exp
23203: # if the exp was positive, and added if it was negative. The purpose
23204: # of this is to reduce the value of the exponent and the possibility
23205: # of error in calculation of pwrten.
23206: #
23207: # 1. Branch on the sign of the adjusted exponent.
23208: # 2p.(positive exp)
23209: # 2. Check M16 and the digits in lwords 2 and 3 in descending order.
23210: # 3. Add one for each zero encountered until a non-zero digit.
23211: # 4. Subtract the count from the exp.
23212: # 5. Check if the exp has crossed zero in #3 above; make the exp abs
23213: # and set SE.
23214: # 6. Multiply the mantissa by 10**count.
23215: # 2n.(negative exp)
23216: # 2. Check the digits in lwords 3 and 2 in descending order.
23217: # 3. Add one for each zero encountered until a non-zero digit.
23218: # 4. Add the count to the exp.
23219: # 5. Check if the exp has crossed zero in #3 above; clear SE.
23220: # 6. Divide the mantissa by 10**count.
23221: #
23222: # *Why 27? If the adjusted exponent is within -28 < expA < 28, than
23223: # any adjustment due to append/strip zeros will drive the resultane
23224: # exponent towards zero. Since all pwrten constants with a power
23225: # of 27 or less are exact, there is no need to use this routine to
23226: # attempt to lessen the resultant exponent.
23227: #
23228: # Register usage:
23229: #
23230: # ap_st_z:
23231: # (*) d0: temp digit storage
23232: # (*) d1: zero count
23233: # (*) d2: digit count
23234: # (*) d3: offset pointer
23235: # ( ) d4: first word of bcd
23236: # (*) d5: lword counter
23237: # ( ) a0: pointer to working bcd value
23238: # ( ) FP_SCR1: working copy of original bcd value
23239: # ( ) L_SCR1: copy of original exponent word
23240: #
23241: #
23242: # First check the absolute value of the exponent to see if this
23243: # routine is necessary. If so, then check the sign of the exponent
23244: # and do append (+) or strip (-) zeros accordingly.
23245: # This section handles a positive adjusted exponent.
23246: #
23247: ap_st_z:
23248: mov.l (%sp),%d1 # load expA for range test
23249: cmp.l %d1,&27 # test is with 27
23250: ble.w pwrten # if abs(expA) <28, skip ap/st zeros
23251: btst &30,(%a0) # check sign of exp
23252: bne.b ap_st_n # if neg, go to neg side
23253: clr.l %d1 # zero count reg
23254: mov.l (%a0),%d4 # load lword 1 to d4
23255: bfextu %d4{&28:&4},%d0 # get M16 in d0
23256: bne.b ap_p_fx # if M16 is non-zero, go fix exp
23257: addq.l &1,%d1 # inc zero count
23258: mov.l &1,%d5 # init lword counter
23259: mov.l (%a0,%d5.L*4),%d4 # get lword 2 to d4
23260: bne.b ap_p_cl # if lw 2 is zero, skip it
23261: addq.l &8,%d1 # and inc count by 8
23262: addq.l &1,%d5 # inc lword counter
23263: mov.l (%a0,%d5.L*4),%d4 # get lword 3 to d4
23264: ap_p_cl:
23265: clr.l %d3 # init offset reg
23266: mov.l &7,%d2 # init digit counter
23267: ap_p_gd:
23268: bfextu %d4{%d3:&4},%d0 # get digit
23269: bne.b ap_p_fx # if non-zero, go to fix exp
23270: addq.l &4,%d3 # point to next digit
23271: addq.l &1,%d1 # inc digit counter
23272: dbf.w %d2,ap_p_gd # get next digit
23273: ap_p_fx:
23274: mov.l %d1,%d0 # copy counter to d2
23275: mov.l (%sp),%d1 # get adjusted exp from memory
23276: sub.l %d0,%d1 # subtract count from exp
23277: bge.b ap_p_fm # if still pos, go to pwrten
23278: neg.l %d1 # now its neg; get abs
23279: mov.l (%a0),%d4 # load lword 1 to d4
23280: or.l &0x40000000,%d4 # and set SE in d4
23281: or.l &0x40000000,(%a0) # and in memory
23282: #
23283: # Calculate the mantissa multiplier to compensate for the striping of
23284: # zeros from the mantissa.
23285: #
23286: ap_p_fm:
23287: lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
23288: clr.l %d3 # init table index
23289: fmov.s &0x3f800000,%fp1 # init fp1 to 1
23290: mov.l &3,%d2 # init d2 to count bits in counter
23291: ap_p_el:
23292: asr.l &1,%d0 # shift lsb into carry
23293: bcc.b ap_p_en # if 1, mul fp1 by pwrten factor
23294: fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
23295: ap_p_en:
23296: add.l &12,%d3 # inc d3 to next rtable entry
23297: tst.l %d0 # check if d0 is zero
23298: bne.b ap_p_el # if not, get next bit
23299: fmul.x %fp1,%fp0 # mul mantissa by 10**(no_bits_shifted)
23300: bra.b pwrten # go calc pwrten
23301: #
23302: # This section handles a negative adjusted exponent.
23303: #
23304: ap_st_n:
23305: clr.l %d1 # clr counter
23306: mov.l &2,%d5 # set up d5 to point to lword 3
23307: mov.l (%a0,%d5.L*4),%d4 # get lword 3
23308: bne.b ap_n_cl # if not zero, check digits
23309: sub.l &1,%d5 # dec d5 to point to lword 2
23310: addq.l &8,%d1 # inc counter by 8
23311: mov.l (%a0,%d5.L*4),%d4 # get lword 2
23312: ap_n_cl:
23313: mov.l &28,%d3 # point to last digit
23314: mov.l &7,%d2 # init digit counter
23315: ap_n_gd:
23316: bfextu %d4{%d3:&4},%d0 # get digit
23317: bne.b ap_n_fx # if non-zero, go to exp fix
23318: subq.l &4,%d3 # point to previous digit
23319: addq.l &1,%d1 # inc digit counter
23320: dbf.w %d2,ap_n_gd # get next digit
23321: ap_n_fx:
23322: mov.l %d1,%d0 # copy counter to d0
23323: mov.l (%sp),%d1 # get adjusted exp from memory
23324: sub.l %d0,%d1 # subtract count from exp
23325: bgt.b ap_n_fm # if still pos, go fix mantissa
23326: neg.l %d1 # take abs of exp and clr SE
23327: mov.l (%a0),%d4 # load lword 1 to d4
23328: and.l &0xbfffffff,%d4 # and clr SE in d4
23329: and.l &0xbfffffff,(%a0) # and in memory
23330: #
23331: # Calculate the mantissa multiplier to compensate for the appending of
23332: # zeros to the mantissa.
23333: #
23334: ap_n_fm:
23335: lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
23336: clr.l %d3 # init table index
23337: fmov.s &0x3f800000,%fp1 # init fp1 to 1
23338: mov.l &3,%d2 # init d2 to count bits in counter
23339: ap_n_el:
23340: asr.l &1,%d0 # shift lsb into carry
23341: bcc.b ap_n_en # if 1, mul fp1 by pwrten factor
23342: fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
23343: ap_n_en:
23344: add.l &12,%d3 # inc d3 to next rtable entry
23345: tst.l %d0 # check if d0 is zero
23346: bne.b ap_n_el # if not, get next bit
23347: fdiv.x %fp1,%fp0 # div mantissa by 10**(no_bits_shifted)
23348: #
23349: #
23350: # Calculate power-of-ten factor from adjusted and shifted exponent.
23351: #
23352: # Register usage:
23353: #
23354: # pwrten:
23355: # (*) d0: temp
23356: # ( ) d1: exponent
23357: # (*) d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
23358: # (*) d3: FPCR work copy
23359: # ( ) d4: first word of bcd
23360: # (*) a1: RTABLE pointer
23361: # calc_p:
23362: # (*) d0: temp
23363: # ( ) d1: exponent
23364: # (*) d3: PWRTxx table index
23365: # ( ) a0: pointer to working copy of bcd
23366: # (*) a1: PWRTxx pointer
23367: # (*) fp1: power-of-ten accumulator
23368: #
23369: # Pwrten calculates the exponent factor in the selected rounding mode
23370: # according to the following table:
23371: #
23372: # Sign of Mant Sign of Exp Rounding Mode PWRTEN Rounding Mode
23373: #
23374: # ANY ANY RN RN
23375: #
23376: # + + RP RP
23377: # - + RP RM
23378: # + - RP RM
23379: # - - RP RP
23380: #
23381: # + + RM RM
23382: # - + RM RP
23383: # + - RM RP
23384: # - - RM RM
23385: #
23386: # + + RZ RM
23387: # - + RZ RM
23388: # + - RZ RP
23389: # - - RZ RP
23390: #
23391: #
23392: pwrten:
23393: mov.l USER_FPCR(%a6),%d3 # get user's FPCR
23394: bfextu %d3{&26:&2},%d2 # isolate rounding mode bits
23395: mov.l (%a0),%d4 # reload 1st bcd word to d4
23396: asl.l &2,%d2 # format d2 to be
23397: bfextu %d4{&0:&2},%d0 # {FPCR[6],FPCR[5],SM,SE}
23398: add.l %d0,%d2 # in d2 as index into RTABLE
23399: lea.l RTABLE(%pc),%a1 # load rtable base
23400: mov.b (%a1,%d2),%d0 # load new rounding bits from table
23401: clr.l %d3 # clear d3 to force no exc and extended
23402: bfins %d0,%d3{&26:&2} # stuff new rounding bits in FPCR
23403: fmov.l %d3,%fpcr # write new FPCR
23404: asr.l &1,%d0 # write correct PTENxx table
23405: bcc.b not_rp # to a1
23406: lea.l PTENRP(%pc),%a1 # it is RP
23407: bra.b calc_p # go to init section
23408: not_rp:
23409: asr.l &1,%d0 # keep checking
23410: bcc.b not_rm
23411: lea.l PTENRM(%pc),%a1 # it is RM
23412: bra.b calc_p # go to init section
23413: not_rm:
23414: lea.l PTENRN(%pc),%a1 # it is RN
23415: calc_p:
23416: mov.l %d1,%d0 # copy exp to d0;use d0
23417: bpl.b no_neg # if exp is negative,
23418: neg.l %d0 # invert it
23419: or.l &0x40000000,(%a0) # and set SE bit
23420: no_neg:
23421: clr.l %d3 # table index
23422: fmov.s &0x3f800000,%fp1 # init fp1 to 1
23423: e_loop:
23424: asr.l &1,%d0 # shift next bit into carry
23425: bcc.b e_next # if zero, skip the mul
23426: fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
23427: e_next:
23428: add.l &12,%d3 # inc d3 to next rtable entry
23429: tst.l %d0 # check if d0 is zero
23430: bne.b e_loop # not zero, continue shifting
23431: #
23432: #
23433: # Check the sign of the adjusted exp and make the value in fp0 the
23434: # same sign. If the exp was pos then multiply fp1*fp0;
23435: # else divide fp0/fp1.
23436: #
23437: # Register Usage:
23438: # norm:
23439: # ( ) a0: pointer to working bcd value
23440: # (*) fp0: mantissa accumulator
23441: # ( ) fp1: scaling factor - 10**(abs(exp))
23442: #
23443: pnorm:
23444: btst &30,(%a0) # test the sign of the exponent
23445: beq.b mul # if clear, go to multiply
23446: div:
23447: fdiv.x %fp1,%fp0 # exp is negative, so divide mant by exp
23448: bra.b end_dec
23449: mul:
23450: fmul.x %fp1,%fp0 # exp is positive, so multiply by exp
23451: #
23452: #
23453: # Clean up and return with result in fp0.
23454: #
23455: # If the final mul/div in decbin incurred an inex exception,
23456: # it will be inex2, but will be reported as inex1 by get_op.
23457: #
23458: end_dec:
23459: fmov.l %fpsr,%d0 # get status register
23460: bclr &inex2_bit+8,%d0 # test for inex2 and clear it
23461: beq.b no_exc # skip this if no exc
23462: ori.w &inx1a_mask,2+USER_FPSR(%a6) # set INEX1/AINEX
23463: no_exc:
23464: add.l &0x4,%sp # clear 1 lw param
23465: fmovm.x (%sp)+,&0x40 # restore fp1
23466: movm.l (%sp)+,&0x3c # restore d2-d5
23467: fmov.l &0x0,%fpcr
23468: fmov.l &0x0,%fpsr
23469: rts
23470:
23471: #########################################################################
23472: # bindec(): Converts an input in extended precision format to bcd format#
23473: # #
23474: # INPUT *************************************************************** #
23475: # a0 = pointer to the input extended precision value in memory. #
23476: # the input may be either normalized, unnormalized, or #
23477: # denormalized. #
23478: # d0 = contains the k-factor sign-extended to 32-bits. #
23479: # #
23480: # OUTPUT ************************************************************** #
23481: # FP_SCR0(a6) = bcd format result on the stack. #
23482: # #
23483: # ALGORITHM *********************************************************** #
23484: # #
23485: # A1. Set RM and size ext; Set SIGMA = sign of input. #
23486: # The k-factor is saved for use in d7. Clear the #
23487: # BINDEC_FLG for separating normalized/denormalized #
23488: # input. If input is unnormalized or denormalized, #
23489: # normalize it. #
23490: # #
23491: # A2. Set X = abs(input). #
23492: # #
23493: # A3. Compute ILOG. #
23494: # ILOG is the log base 10 of the input value. It is #
23495: # approximated by adding e + 0.f when the original #
23496: # value is viewed as 2^^e * 1.f in extended precision. #
23497: # This value is stored in d6. #
23498: # #
23499: # A4. Clr INEX bit. #
23500: # The operation in A3 above may have set INEX2. #
23501: # #
23502: # A5. Set ICTR = 0; #
23503: # ICTR is a flag used in A13. It must be set before the #
23504: # loop entry A6. #
23505: # #
23506: # A6. Calculate LEN. #
23507: # LEN is the number of digits to be displayed. The #
23508: # k-factor can dictate either the total number of digits, #
23509: # if it is a positive number, or the number of digits #
23510: # after the decimal point which are to be included as #
23511: # significant. See the 68882 manual for examples. #
23512: # If LEN is computed to be greater than 17, set OPERR in #
23513: # USER_FPSR. LEN is stored in d4. #
23514: # #
23515: # A7. Calculate SCALE. #
23516: # SCALE is equal to 10^ISCALE, where ISCALE is the number #
23517: # of decimal places needed to insure LEN integer digits #
23518: # in the output before conversion to bcd. LAMBDA is the #
23519: # sign of ISCALE, used in A9. Fp1 contains #
23520: # 10^^(abs(ISCALE)) using a rounding mode which is a #
23521: # function of the original rounding mode and the signs #
23522: # of ISCALE and X. A table is given in the code. #
23523: # #
23524: # A8. Clr INEX; Force RZ. #
23525: # The operation in A3 above may have set INEX2. #
23526: # RZ mode is forced for the scaling operation to insure #
23527: # only one rounding error. The grs bits are collected in #
23528: # the INEX flag for use in A10. #
23529: # #
23530: # A9. Scale X -> Y. #
23531: # The mantissa is scaled to the desired number of #
23532: # significant digits. The excess digits are collected #
23533: # in INEX2. #
23534: # #
23535: # A10. Or in INEX. #
23536: # If INEX is set, round error occurred. This is #
23537: # compensated for by 'or-ing' in the INEX2 flag to #
23538: # the lsb of Y. #
23539: # #
23540: # A11. Restore original FPCR; set size ext. #
23541: # Perform FINT operation in the user's rounding mode. #
23542: # Keep the size to extended. #
23543: # #
23544: # A12. Calculate YINT = FINT(Y) according to user's rounding #
23545: # mode. The FPSP routine sintd0 is used. The output #
23546: # is in fp0. #
23547: # #
23548: # A13. Check for LEN digits. #
23549: # If the int operation results in more than LEN digits, #
23550: # or less than LEN -1 digits, adjust ILOG and repeat from #
23551: # A6. This test occurs only on the first pass. If the #
23552: # result is exactly 10^LEN, decrement ILOG and divide #
23553: # the mantissa by 10. #
23554: # #
23555: # A14. Convert the mantissa to bcd. #
23556: # The binstr routine is used to convert the LEN digit #
23557: # mantissa to bcd in memory. The input to binstr is #
23558: # to be a fraction; i.e. (mantissa)/10^LEN and adjusted #
23559: # such that the decimal point is to the left of bit 63. #
23560: # The bcd digits are stored in the correct position in #
23561: # the final string area in memory. #
23562: # #
23563: # A15. Convert the exponent to bcd. #
23564: # As in A14 above, the exp is converted to bcd and the #
23565: # digits are stored in the final string. #
23566: # Test the length of the final exponent string. If the #
23567: # length is 4, set operr. #
23568: # #
23569: # A16. Write sign bits to final string. #
23570: # #
23571: #########################################################################
23572:
23573: set BINDEC_FLG, EXC_TEMP # DENORM flag
23574:
23575: # Constants in extended precision
23576: PLOG2:
23577: long 0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
23578: PLOG2UP1:
23579: long 0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
23580:
23581: # Constants in single precision
23582: FONE:
23583: long 0x3F800000,0x00000000,0x00000000,0x00000000
23584: FTWO:
23585: long 0x40000000,0x00000000,0x00000000,0x00000000
23586: FTEN:
23587: long 0x41200000,0x00000000,0x00000000,0x00000000
23588: F4933:
23589: long 0x459A2800,0x00000000,0x00000000,0x00000000
23590:
23591: RBDTBL:
23592: byte 0,0,0,0
23593: byte 3,3,2,2
23594: byte 3,2,2,3
23595: byte 2,3,3,2
23596:
23597: # Implementation Notes:
23598: #
23599: # The registers are used as follows:
23600: #
23601: # d0: scratch; LEN input to binstr
23602: # d1: scratch
23603: # d2: upper 32-bits of mantissa for binstr
23604: # d3: scratch;lower 32-bits of mantissa for binstr
23605: # d4: LEN
23606: # d5: LAMBDA/ICTR
23607: # d6: ILOG
23608: # d7: k-factor
23609: # a0: ptr for original operand/final result
23610: # a1: scratch pointer
23611: # a2: pointer to FP_X; abs(original value) in ext
23612: # fp0: scratch
23613: # fp1: scratch
23614: # fp2: scratch
23615: # F_SCR1:
23616: # F_SCR2:
23617: # L_SCR1:
23618: # L_SCR2:
23619:
23620: global bindec
23621: bindec:
23622: movm.l &0x3f20,-(%sp) # {%d2-%d7/%a2}
23623: fmovm.x &0x7,-(%sp) # {%fp0-%fp2}
23624:
23625: # A1. Set RM and size ext. Set SIGMA = sign input;
23626: # The k-factor is saved for use in d7. Clear BINDEC_FLG for
23627: # separating normalized/denormalized input. If the input
23628: # is a denormalized number, set the BINDEC_FLG memory word
23629: # to signal denorm. If the input is unnormalized, normalize
23630: # the input and test for denormalized result.
23631: #
23632: fmov.l &rm_mode*0x10,%fpcr # set RM and ext
23633: mov.l (%a0),L_SCR2(%a6) # save exponent for sign check
23634: mov.l %d0,%d7 # move k-factor to d7
23635:
23636: clr.b BINDEC_FLG(%a6) # clr norm/denorm flag
23637: cmpi.b STAG(%a6),&DENORM # is input a DENORM?
23638: bne.w A2_str # no; input is a NORM
23639:
23640: #
23641: # Normalize the denorm
23642: #
23643: un_de_norm:
23644: mov.w (%a0),%d0
23645: and.w &0x7fff,%d0 # strip sign of normalized exp
23646: mov.l 4(%a0),%d1
23647: mov.l 8(%a0),%d2
23648: norm_loop:
23649: sub.w &1,%d0
23650: lsl.l &1,%d2
23651: roxl.l &1,%d1
23652: tst.l %d1
23653: bge.b norm_loop
23654: #
23655: # Test if the normalized input is denormalized
23656: #
23657: tst.w %d0
23658: bgt.b pos_exp # if greater than zero, it is a norm
23659: st BINDEC_FLG(%a6) # set flag for denorm
23660: pos_exp:
23661: and.w &0x7fff,%d0 # strip sign of normalized exp
23662: mov.w %d0,(%a0)
23663: mov.l %d1,4(%a0)
23664: mov.l %d2,8(%a0)
23665:
23666: # A2. Set X = abs(input).
23667: #
23668: A2_str:
23669: mov.l (%a0),FP_SCR1(%a6) # move input to work space
23670: mov.l 4(%a0),FP_SCR1+4(%a6) # move input to work space
23671: mov.l 8(%a0),FP_SCR1+8(%a6) # move input to work space
23672: and.l &0x7fffffff,FP_SCR1(%a6) # create abs(X)
23673:
23674: # A3. Compute ILOG.
23675: # ILOG is the log base 10 of the input value. It is approx-
23676: # imated by adding e + 0.f when the original value is viewed
23677: # as 2^^e * 1.f in extended precision. This value is stored
23678: # in d6.
23679: #
23680: # Register usage:
23681: # Input/Output
23682: # d0: k-factor/exponent
23683: # d2: x/x
23684: # d3: x/x
23685: # d4: x/x
23686: # d5: x/x
23687: # d6: x/ILOG
23688: # d7: k-factor/Unchanged
23689: # a0: ptr for original operand/final result
23690: # a1: x/x
23691: # a2: x/x
23692: # fp0: x/float(ILOG)
23693: # fp1: x/x
23694: # fp2: x/x
23695: # F_SCR1:x/x
23696: # F_SCR2:Abs(X)/Abs(X) with $3fff exponent
23697: # L_SCR1:x/x
23698: # L_SCR2:first word of X packed/Unchanged
23699:
23700: tst.b BINDEC_FLG(%a6) # check for denorm
23701: beq.b A3_cont # if clr, continue with norm
23702: mov.l &-4933,%d6 # force ILOG = -4933
23703: bra.b A4_str
23704: A3_cont:
23705: mov.w FP_SCR1(%a6),%d0 # move exp to d0
23706: mov.w &0x3fff,FP_SCR1(%a6) # replace exponent with 0x3fff
23707: fmov.x FP_SCR1(%a6),%fp0 # now fp0 has 1.f
23708: sub.w &0x3fff,%d0 # strip off bias
23709: fadd.w %d0,%fp0 # add in exp
23710: fsub.s FONE(%pc),%fp0 # subtract off 1.0
23711: fbge.w pos_res # if pos, branch
23712: fmul.x PLOG2UP1(%pc),%fp0 # if neg, mul by LOG2UP1
23713: fmov.l %fp0,%d6 # put ILOG in d6 as a lword
23714: bra.b A4_str # go move out ILOG
23715: pos_res:
23716: fmul.x PLOG2(%pc),%fp0 # if pos, mul by LOG2
23717: fmov.l %fp0,%d6 # put ILOG in d6 as a lword
23718:
23719:
23720: # A4. Clr INEX bit.
23721: # The operation in A3 above may have set INEX2.
23722:
23723: A4_str:
23724: fmov.l &0,%fpsr # zero all of fpsr - nothing needed
23725:
23726:
23727: # A5. Set ICTR = 0;
23728: # ICTR is a flag used in A13. It must be set before the
23729: # loop entry A6. The lower word of d5 is used for ICTR.
23730:
23731: clr.w %d5 # clear ICTR
23732:
23733: # A6. Calculate LEN.
23734: # LEN is the number of digits to be displayed. The k-factor
23735: # can dictate either the total number of digits, if it is
23736: # a positive number, or the number of digits after the
23737: # original decimal point which are to be included as
23738: # significant. See the 68882 manual for examples.
23739: # If LEN is computed to be greater than 17, set OPERR in
23740: # USER_FPSR. LEN is stored in d4.
23741: #
23742: # Register usage:
23743: # Input/Output
23744: # d0: exponent/Unchanged
23745: # d2: x/x/scratch
23746: # d3: x/x
23747: # d4: exc picture/LEN
23748: # d5: ICTR/Unchanged
23749: # d6: ILOG/Unchanged
23750: # d7: k-factor/Unchanged
23751: # a0: ptr for original operand/final result
23752: # a1: x/x
23753: # a2: x/x
23754: # fp0: float(ILOG)/Unchanged
23755: # fp1: x/x
23756: # fp2: x/x
23757: # F_SCR1:x/x
23758: # F_SCR2:Abs(X) with $3fff exponent/Unchanged
23759: # L_SCR1:x/x
23760: # L_SCR2:first word of X packed/Unchanged
23761:
23762: A6_str:
23763: tst.l %d7 # branch on sign of k
23764: ble.b k_neg # if k <= 0, LEN = ILOG + 1 - k
23765: mov.l %d7,%d4 # if k > 0, LEN = k
23766: bra.b len_ck # skip to LEN check
23767: k_neg:
23768: mov.l %d6,%d4 # first load ILOG to d4
23769: sub.l %d7,%d4 # subtract off k
23770: addq.l &1,%d4 # add in the 1
23771: len_ck:
23772: tst.l %d4 # LEN check: branch on sign of LEN
23773: ble.b LEN_ng # if neg, set LEN = 1
23774: cmp.l %d4,&17 # test if LEN > 17
23775: ble.b A7_str # if not, forget it
23776: mov.l &17,%d4 # set max LEN = 17
23777: tst.l %d7 # if negative, never set OPERR
23778: ble.b A7_str # if positive, continue
23779: or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
23780: bra.b A7_str # finished here
23781: LEN_ng:
23782: mov.l &1,%d4 # min LEN is 1
23783:
23784:
23785: # A7. Calculate SCALE.
23786: # SCALE is equal to 10^ISCALE, where ISCALE is the number
23787: # of decimal places needed to insure LEN integer digits
23788: # in the output before conversion to bcd. LAMBDA is the sign
23789: # of ISCALE, used in A9. Fp1 contains 10^^(abs(ISCALE)) using
23790: # the rounding mode as given in the following table (see
23791: # Coonen, p. 7.23 as ref.; however, the SCALE variable is
23792: # of opposite sign in bindec.sa from Coonen).
23793: #
23794: # Initial USE
23795: # FPCR[6:5] LAMBDA SIGN(X) FPCR[6:5]
23796: # ----------------------------------------------
23797: # RN 00 0 0 00/0 RN
23798: # RN 00 0 1 00/0 RN
23799: # RN 00 1 0 00/0 RN
23800: # RN 00 1 1 00/0 RN
23801: # RZ 01 0 0 11/3 RP
23802: # RZ 01 0 1 11/3 RP
23803: # RZ 01 1 0 10/2 RM
23804: # RZ 01 1 1 10/2 RM
23805: # RM 10 0 0 11/3 RP
23806: # RM 10 0 1 10/2 RM
23807: # RM 10 1 0 10/2 RM
23808: # RM 10 1 1 11/3 RP
23809: # RP 11 0 0 10/2 RM
23810: # RP 11 0 1 11/3 RP
23811: # RP 11 1 0 11/3 RP
23812: # RP 11 1 1 10/2 RM
23813: #
23814: # Register usage:
23815: # Input/Output
23816: # d0: exponent/scratch - final is 0
23817: # d2: x/0 or 24 for A9
23818: # d3: x/scratch - offset ptr into PTENRM array
23819: # d4: LEN/Unchanged
23820: # d5: 0/ICTR:LAMBDA
23821: # d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
23822: # d7: k-factor/Unchanged
23823: # a0: ptr for original operand/final result
23824: # a1: x/ptr to PTENRM array
23825: # a2: x/x
23826: # fp0: float(ILOG)/Unchanged
23827: # fp1: x/10^ISCALE
23828: # fp2: x/x
23829: # F_SCR1:x/x
23830: # F_SCR2:Abs(X) with $3fff exponent/Unchanged
23831: # L_SCR1:x/x
23832: # L_SCR2:first word of X packed/Unchanged
23833:
23834: A7_str:
23835: tst.l %d7 # test sign of k
23836: bgt.b k_pos # if pos and > 0, skip this
23837: cmp.l %d7,%d6 # test k - ILOG
23838: blt.b k_pos # if ILOG >= k, skip this
23839: mov.l %d7,%d6 # if ((k<0) & (ILOG < k)) ILOG = k
23840: k_pos:
23841: mov.l %d6,%d0 # calc ILOG + 1 - LEN in d0
23842: addq.l &1,%d0 # add the 1
23843: sub.l %d4,%d0 # sub off LEN
23844: swap %d5 # use upper word of d5 for LAMBDA
23845: clr.w %d5 # set it zero initially
23846: clr.w %d2 # set up d2 for very small case
23847: tst.l %d0 # test sign of ISCALE
23848: bge.b iscale # if pos, skip next inst
23849: addq.w &1,%d5 # if neg, set LAMBDA true
23850: cmp.l %d0,&0xffffecd4 # test iscale <= -4908
23851: bgt.b no_inf # if false, skip rest
23852: add.l &24,%d0 # add in 24 to iscale
23853: mov.l &24,%d2 # put 24 in d2 for A9
23854: no_inf:
23855: neg.l %d0 # and take abs of ISCALE
23856: iscale:
23857: fmov.s FONE(%pc),%fp1 # init fp1 to 1
23858: bfextu USER_FPCR(%a6){&26:&2},%d1 # get initial rmode bits
23859: lsl.w &1,%d1 # put them in bits 2:1
23860: add.w %d5,%d1 # add in LAMBDA
23861: lsl.w &1,%d1 # put them in bits 3:1
23862: tst.l L_SCR2(%a6) # test sign of original x
23863: bge.b x_pos # if pos, don't set bit 0
23864: addq.l &1,%d1 # if neg, set bit 0
23865: x_pos:
23866: lea.l RBDTBL(%pc),%a2 # load rbdtbl base
23867: mov.b (%a2,%d1),%d3 # load d3 with new rmode
23868: lsl.l &4,%d3 # put bits in proper position
23869: fmov.l %d3,%fpcr # load bits into fpu
23870: lsr.l &4,%d3 # put bits in proper position
23871: tst.b %d3 # decode new rmode for pten table
23872: bne.b not_rn # if zero, it is RN
23873: lea.l PTENRN(%pc),%a1 # load a1 with RN table base
23874: bra.b rmode # exit decode
23875: not_rn:
23876: lsr.b &1,%d3 # get lsb in carry
23877: bcc.b not_rp2 # if carry clear, it is RM
23878: lea.l PTENRP(%pc),%a1 # load a1 with RP table base
23879: bra.b rmode # exit decode
23880: not_rp2:
23881: lea.l PTENRM(%pc),%a1 # load a1 with RM table base
23882: rmode:
23883: clr.l %d3 # clr table index
23884: e_loop2:
23885: lsr.l &1,%d0 # shift next bit into carry
23886: bcc.b e_next2 # if zero, skip the mul
23887: fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
23888: e_next2:
23889: add.l &12,%d3 # inc d3 to next pwrten table entry
23890: tst.l %d0 # test if ISCALE is zero
23891: bne.b e_loop2 # if not, loop
23892:
23893: # A8. Clr INEX; Force RZ.
23894: # The operation in A3 above may have set INEX2.
23895: # RZ mode is forced for the scaling operation to insure
23896: # only one rounding error. The grs bits are collected in
23897: # the INEX flag for use in A10.
23898: #
23899: # Register usage:
23900: # Input/Output
23901:
23902: fmov.l &0,%fpsr # clr INEX
23903: fmov.l &rz_mode*0x10,%fpcr # set RZ rounding mode
23904:
23905: # A9. Scale X -> Y.
23906: # The mantissa is scaled to the desired number of significant
23907: # digits. The excess digits are collected in INEX2. If mul,
23908: # Check d2 for excess 10 exponential value. If not zero,
23909: # the iscale value would have caused the pwrten calculation
23910: # to overflow. Only a negative iscale can cause this, so
23911: # multiply by 10^(d2), which is now only allowed to be 24,
23912: # with a multiply by 10^8 and 10^16, which is exact since
23913: # 10^24 is exact. If the input was denormalized, we must
23914: # create a busy stack frame with the mul command and the
23915: # two operands, and allow the fpu to complete the multiply.
23916: #
23917: # Register usage:
23918: # Input/Output
23919: # d0: FPCR with RZ mode/Unchanged
23920: # d2: 0 or 24/unchanged
23921: # d3: x/x
23922: # d4: LEN/Unchanged
23923: # d5: ICTR:LAMBDA
23924: # d6: ILOG/Unchanged
23925: # d7: k-factor/Unchanged
23926: # a0: ptr for original operand/final result
23927: # a1: ptr to PTENRM array/Unchanged
23928: # a2: x/x
23929: # fp0: float(ILOG)/X adjusted for SCALE (Y)
23930: # fp1: 10^ISCALE/Unchanged
23931: # fp2: x/x
23932: # F_SCR1:x/x
23933: # F_SCR2:Abs(X) with $3fff exponent/Unchanged
23934: # L_SCR1:x/x
23935: # L_SCR2:first word of X packed/Unchanged
23936:
23937: A9_str:
23938: fmov.x (%a0),%fp0 # load X from memory
23939: fabs.x %fp0 # use abs(X)
23940: tst.w %d5 # LAMBDA is in lower word of d5
23941: bne.b sc_mul # if neg (LAMBDA = 1), scale by mul
23942: fdiv.x %fp1,%fp0 # calculate X / SCALE -> Y to fp0
23943: bra.w A10_st # branch to A10
23944:
23945: sc_mul:
23946: tst.b BINDEC_FLG(%a6) # check for denorm
23947: beq.w A9_norm # if norm, continue with mul
23948:
23949: # for DENORM, we must calculate:
23950: # fp0 = input_op * 10^ISCALE * 10^24
23951: # since the input operand is a DENORM, we can't multiply it directly.
23952: # so, we do the multiplication of the exponents and mantissas separately.
23953: # in this way, we avoid underflow on intermediate stages of the
23954: # multiplication and guarantee a result without exception.
23955: fmovm.x &0x2,-(%sp) # save 10^ISCALE to stack
23956:
23957: mov.w (%sp),%d3 # grab exponent
23958: andi.w &0x7fff,%d3 # clear sign
23959: ori.w &0x8000,(%a0) # make DENORM exp negative
23960: add.w (%a0),%d3 # add DENORM exp to 10^ISCALE exp
23961: subi.w &0x3fff,%d3 # subtract BIAS
23962: add.w 36(%a1),%d3
23963: subi.w &0x3fff,%d3 # subtract BIAS
23964: add.w 48(%a1),%d3
23965: subi.w &0x3fff,%d3 # subtract BIAS
23966:
23967: bmi.w sc_mul_err # is result is DENORM, punt!!!
23968:
23969: andi.w &0x8000,(%sp) # keep sign
23970: or.w %d3,(%sp) # insert new exponent
23971: andi.w &0x7fff,(%a0) # clear sign bit on DENORM again
23972: mov.l 0x8(%a0),-(%sp) # put input op mantissa on stk
23973: mov.l 0x4(%a0),-(%sp)
23974: mov.l &0x3fff0000,-(%sp) # force exp to zero
23975: fmovm.x (%sp)+,&0x80 # load normalized DENORM into fp0
23976: fmul.x (%sp)+,%fp0
23977:
23978: # fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
23979: # fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
23980: mov.l 36+8(%a1),-(%sp) # get 10^8 mantissa
23981: mov.l 36+4(%a1),-(%sp)
23982: mov.l &0x3fff0000,-(%sp) # force exp to zero
23983: mov.l 48+8(%a1),-(%sp) # get 10^16 mantissa
23984: mov.l 48+4(%a1),-(%sp)
23985: mov.l &0x3fff0000,-(%sp)# force exp to zero
23986: fmul.x (%sp)+,%fp0 # multiply fp0 by 10^8
23987: fmul.x (%sp)+,%fp0 # multiply fp0 by 10^16
23988: bra.b A10_st
23989:
23990: sc_mul_err:
23991: bra.b sc_mul_err
23992:
23993: A9_norm:
23994: tst.w %d2 # test for small exp case
23995: beq.b A9_con # if zero, continue as normal
23996: fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
23997: fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
23998: A9_con:
23999: fmul.x %fp1,%fp0 # calculate X * SCALE -> Y to fp0
24000:
24001: # A10. Or in INEX.
24002: # If INEX is set, round error occurred. This is compensated
24003: # for by 'or-ing' in the INEX2 flag to the lsb of Y.
24004: #
24005: # Register usage:
24006: # Input/Output
24007: # d0: FPCR with RZ mode/FPSR with INEX2 isolated
24008: # d2: x/x
24009: # d3: x/x
24010: # d4: LEN/Unchanged
24011: # d5: ICTR:LAMBDA
24012: # d6: ILOG/Unchanged
24013: # d7: k-factor/Unchanged
24014: # a0: ptr for original operand/final result
24015: # a1: ptr to PTENxx array/Unchanged
24016: # a2: x/ptr to FP_SCR1(a6)
24017: # fp0: Y/Y with lsb adjusted
24018: # fp1: 10^ISCALE/Unchanged
24019: # fp2: x/x
24020:
24021: A10_st:
24022: fmov.l %fpsr,%d0 # get FPSR
24023: fmov.x %fp0,FP_SCR1(%a6) # move Y to memory
24024: lea.l FP_SCR1(%a6),%a2 # load a2 with ptr to FP_SCR1
24025: btst &9,%d0 # check if INEX2 set
24026: beq.b A11_st # if clear, skip rest
24027: or.l &1,8(%a2) # or in 1 to lsb of mantissa
24028: fmov.x FP_SCR1(%a6),%fp0 # write adjusted Y back to fpu
24029:
24030:
24031: # A11. Restore original FPCR; set size ext.
24032: # Perform FINT operation in the user's rounding mode. Keep
24033: # the size to extended. The sintdo entry point in the sint
24034: # routine expects the FPCR value to be in USER_FPCR for
24035: # mode and precision. The original FPCR is saved in L_SCR1.
24036:
24037: A11_st:
24038: mov.l USER_FPCR(%a6),L_SCR1(%a6) # save it for later
24039: and.l &0x00000030,USER_FPCR(%a6) # set size to ext,
24040: # ;block exceptions
24041:
24042:
24043: # A12. Calculate YINT = FINT(Y) according to user's rounding mode.
24044: # The FPSP routine sintd0 is used. The output is in fp0.
24045: #
24046: # Register usage:
24047: # Input/Output
24048: # d0: FPSR with AINEX cleared/FPCR with size set to ext
24049: # d2: x/x/scratch
24050: # d3: x/x
24051: # d4: LEN/Unchanged
24052: # d5: ICTR:LAMBDA/Unchanged
24053: # d6: ILOG/Unchanged
24054: # d7: k-factor/Unchanged
24055: # a0: ptr for original operand/src ptr for sintdo
24056: # a1: ptr to PTENxx array/Unchanged
24057: # a2: ptr to FP_SCR1(a6)/Unchanged
24058: # a6: temp pointer to FP_SCR1(a6) - orig value saved and restored
24059: # fp0: Y/YINT
24060: # fp1: 10^ISCALE/Unchanged
24061: # fp2: x/x
24062: # F_SCR1:x/x
24063: # F_SCR2:Y adjusted for inex/Y with original exponent
24064: # L_SCR1:x/original USER_FPCR
24065: # L_SCR2:first word of X packed/Unchanged
24066:
24067: A12_st:
24068: movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1}
24069: mov.l L_SCR1(%a6),-(%sp)
24070: mov.l L_SCR2(%a6),-(%sp)
24071:
24072: lea.l FP_SCR1(%a6),%a0 # a0 is ptr to FP_SCR1(a6)
24073: fmov.x %fp0,(%a0) # move Y to memory at FP_SCR1(a6)
24074: tst.l L_SCR2(%a6) # test sign of original operand
24075: bge.b do_fint12 # if pos, use Y
24076: or.l &0x80000000,(%a0) # if neg, use -Y
24077: do_fint12:
24078: mov.l USER_FPSR(%a6),-(%sp)
24079: # bsr sintdo # sint routine returns int in fp0
24080:
24081: fmov.l USER_FPCR(%a6),%fpcr
24082: fmov.l &0x0,%fpsr # clear the AEXC bits!!!
24083: ## mov.l USER_FPCR(%a6),%d0 # ext prec/keep rnd mode
24084: ## andi.l &0x00000030,%d0
24085: ## fmov.l %d0,%fpcr
24086: fint.x FP_SCR1(%a6),%fp0 # do fint()
24087: fmov.l %fpsr,%d0
24088: or.w %d0,FPSR_EXCEPT(%a6)
24089: ## fmov.l &0x0,%fpcr
24090: ## fmov.l %fpsr,%d0 # don't keep ccodes
24091: ## or.w %d0,FPSR_EXCEPT(%a6)
24092:
24093: mov.b (%sp),USER_FPSR(%a6)
24094: add.l &4,%sp
24095:
24096: mov.l (%sp)+,L_SCR2(%a6)
24097: mov.l (%sp)+,L_SCR1(%a6)
24098: movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1}
24099:
24100: mov.l L_SCR2(%a6),FP_SCR1(%a6) # restore original exponent
24101: mov.l L_SCR1(%a6),USER_FPCR(%a6) # restore user's FPCR
24102:
24103: # A13. Check for LEN digits.
24104: # If the int operation results in more than LEN digits,
24105: # or less than LEN -1 digits, adjust ILOG and repeat from
24106: # A6. This test occurs only on the first pass. If the
24107: # result is exactly 10^LEN, decrement ILOG and divide
24108: # the mantissa by 10. The calculation of 10^LEN cannot
24109: # be inexact, since all powers of ten upto 10^27 are exact
24110: # in extended precision, so the use of a previous power-of-ten
24111: # table will introduce no error.
24112: #
24113: #
24114: # Register usage:
24115: # Input/Output
24116: # d0: FPCR with size set to ext/scratch final = 0
24117: # d2: x/x
24118: # d3: x/scratch final = x
24119: # d4: LEN/LEN adjusted
24120: # d5: ICTR:LAMBDA/LAMBDA:ICTR
24121: # d6: ILOG/ILOG adjusted
24122: # d7: k-factor/Unchanged
24123: # a0: pointer into memory for packed bcd string formation
24124: # a1: ptr to PTENxx array/Unchanged
24125: # a2: ptr to FP_SCR1(a6)/Unchanged
24126: # fp0: int portion of Y/abs(YINT) adjusted
24127: # fp1: 10^ISCALE/Unchanged
24128: # fp2: x/10^LEN
24129: # F_SCR1:x/x
24130: # F_SCR2:Y with original exponent/Unchanged
24131: # L_SCR1:original USER_FPCR/Unchanged
24132: # L_SCR2:first word of X packed/Unchanged
24133:
24134: A13_st:
24135: swap %d5 # put ICTR in lower word of d5
24136: tst.w %d5 # check if ICTR = 0
24137: bne not_zr # if non-zero, go to second test
24138: #
24139: # Compute 10^(LEN-1)
24140: #
24141: fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
24142: mov.l %d4,%d0 # put LEN in d0
24143: subq.l &1,%d0 # d0 = LEN -1
24144: clr.l %d3 # clr table index
24145: l_loop:
24146: lsr.l &1,%d0 # shift next bit into carry
24147: bcc.b l_next # if zero, skip the mul
24148: fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
24149: l_next:
24150: add.l &12,%d3 # inc d3 to next pwrten table entry
24151: tst.l %d0 # test if LEN is zero
24152: bne.b l_loop # if not, loop
24153: #
24154: # 10^LEN-1 is computed for this test and A14. If the input was
24155: # denormalized, check only the case in which YINT > 10^LEN.
24156: #
24157: tst.b BINDEC_FLG(%a6) # check if input was norm
24158: beq.b A13_con # if norm, continue with checking
24159: fabs.x %fp0 # take abs of YINT
24160: bra test_2
24161: #
24162: # Compare abs(YINT) to 10^(LEN-1) and 10^LEN
24163: #
24164: A13_con:
24165: fabs.x %fp0 # take abs of YINT
24166: fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^(LEN-1)
24167: fbge.w test_2 # if greater, do next test
24168: subq.l &1,%d6 # subtract 1 from ILOG
24169: mov.w &1,%d5 # set ICTR
24170: fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
24171: fmul.s FTEN(%pc),%fp2 # compute 10^LEN
24172: bra.w A6_str # return to A6 and recompute YINT
24173: test_2:
24174: fmul.s FTEN(%pc),%fp2 # compute 10^LEN
24175: fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^LEN
24176: fblt.w A14_st # if less, all is ok, go to A14
24177: fbgt.w fix_ex # if greater, fix and redo
24178: fdiv.s FTEN(%pc),%fp0 # if equal, divide by 10
24179: addq.l &1,%d6 # and inc ILOG
24180: bra.b A14_st # and continue elsewhere
24181: fix_ex:
24182: addq.l &1,%d6 # increment ILOG by 1
24183: mov.w &1,%d5 # set ICTR
24184: fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
24185: bra.w A6_str # return to A6 and recompute YINT
24186: #
24187: # Since ICTR <> 0, we have already been through one adjustment,
24188: # and shouldn't have another; this is to check if abs(YINT) = 10^LEN
24189: # 10^LEN is again computed using whatever table is in a1 since the
24190: # value calculated cannot be inexact.
24191: #
24192: not_zr:
24193: fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
24194: mov.l %d4,%d0 # put LEN in d0
24195: clr.l %d3 # clr table index
24196: z_loop:
24197: lsr.l &1,%d0 # shift next bit into carry
24198: bcc.b z_next # if zero, skip the mul
24199: fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
24200: z_next:
24201: add.l &12,%d3 # inc d3 to next pwrten table entry
24202: tst.l %d0 # test if LEN is zero
24203: bne.b z_loop # if not, loop
24204: fabs.x %fp0 # get abs(YINT)
24205: fcmp.x %fp0,%fp2 # check if abs(YINT) = 10^LEN
24206: fbneq.w A14_st # if not, skip this
24207: fdiv.s FTEN(%pc),%fp0 # divide abs(YINT) by 10
24208: addq.l &1,%d6 # and inc ILOG by 1
24209: addq.l &1,%d4 # and inc LEN
24210: fmul.s FTEN(%pc),%fp2 # if LEN++, the get 10^^LEN
24211:
24212: # A14. Convert the mantissa to bcd.
24213: # The binstr routine is used to convert the LEN digit
24214: # mantissa to bcd in memory. The input to binstr is
24215: # to be a fraction; i.e. (mantissa)/10^LEN and adjusted
24216: # such that the decimal point is to the left of bit 63.
24217: # The bcd digits are stored in the correct position in
24218: # the final string area in memory.
24219: #
24220: #
24221: # Register usage:
24222: # Input/Output
24223: # d0: x/LEN call to binstr - final is 0
24224: # d1: x/0
24225: # d2: x/ms 32-bits of mant of abs(YINT)
24226: # d3: x/ls 32-bits of mant of abs(YINT)
24227: # d4: LEN/Unchanged
24228: # d5: ICTR:LAMBDA/LAMBDA:ICTR
24229: # d6: ILOG
24230: # d7: k-factor/Unchanged
24231: # a0: pointer into memory for packed bcd string formation
24232: # /ptr to first mantissa byte in result string
24233: # a1: ptr to PTENxx array/Unchanged
24234: # a2: ptr to FP_SCR1(a6)/Unchanged
24235: # fp0: int portion of Y/abs(YINT) adjusted
24236: # fp1: 10^ISCALE/Unchanged
24237: # fp2: 10^LEN/Unchanged
24238: # F_SCR1:x/Work area for final result
24239: # F_SCR2:Y with original exponent/Unchanged
24240: # L_SCR1:original USER_FPCR/Unchanged
24241: # L_SCR2:first word of X packed/Unchanged
24242:
24243: A14_st:
24244: fmov.l &rz_mode*0x10,%fpcr # force rz for conversion
24245: fdiv.x %fp2,%fp0 # divide abs(YINT) by 10^LEN
24246: lea.l FP_SCR0(%a6),%a0
24247: fmov.x %fp0,(%a0) # move abs(YINT)/10^LEN to memory
24248: mov.l 4(%a0),%d2 # move 2nd word of FP_RES to d2
24249: mov.l 8(%a0),%d3 # move 3rd word of FP_RES to d3
24250: clr.l 4(%a0) # zero word 2 of FP_RES
24251: clr.l 8(%a0) # zero word 3 of FP_RES
24252: mov.l (%a0),%d0 # move exponent to d0
24253: swap %d0 # put exponent in lower word
24254: beq.b no_sft # if zero, don't shift
24255: sub.l &0x3ffd,%d0 # sub bias less 2 to make fract
24256: tst.l %d0 # check if > 1
24257: bgt.b no_sft # if so, don't shift
24258: neg.l %d0 # make exp positive
24259: m_loop:
24260: lsr.l &1,%d2 # shift d2:d3 right, add 0s
24261: roxr.l &1,%d3 # the number of places
24262: dbf.w %d0,m_loop # given in d0
24263: no_sft:
24264: tst.l %d2 # check for mantissa of zero
24265: bne.b no_zr # if not, go on
24266: tst.l %d3 # continue zero check
24267: beq.b zer_m # if zero, go directly to binstr
24268: no_zr:
24269: clr.l %d1 # put zero in d1 for addx
24270: add.l &0x00000080,%d3 # inc at bit 7
24271: addx.l %d1,%d2 # continue inc
24272: and.l &0xffffff80,%d3 # strip off lsb not used by 882
24273: zer_m:
24274: mov.l %d4,%d0 # put LEN in d0 for binstr call
24275: addq.l &3,%a0 # a0 points to M16 byte in result
24276: bsr binstr # call binstr to convert mant
24277:
24278:
24279: # A15. Convert the exponent to bcd.
24280: # As in A14 above, the exp is converted to bcd and the
24281: # digits are stored in the final string.
24282: #
24283: # Digits are stored in L_SCR1(a6) on return from BINDEC as:
24284: #
24285: # 32 16 15 0
24286: # -----------------------------------------
24287: # | 0 | e3 | e2 | e1 | e4 | X | X | X |
24288: # -----------------------------------------
24289: #
24290: # And are moved into their proper places in FP_SCR0. If digit e4
24291: # is non-zero, OPERR is signaled. In all cases, all 4 digits are
24292: # written as specified in the 881/882 manual for packed decimal.
24293: #
24294: # Register usage:
24295: # Input/Output
24296: # d0: x/LEN call to binstr - final is 0
24297: # d1: x/scratch (0);shift count for final exponent packing
24298: # d2: x/ms 32-bits of exp fraction/scratch
24299: # d3: x/ls 32-bits of exp fraction
24300: # d4: LEN/Unchanged
24301: # d5: ICTR:LAMBDA/LAMBDA:ICTR
24302: # d6: ILOG
24303: # d7: k-factor/Unchanged
24304: # a0: ptr to result string/ptr to L_SCR1(a6)
24305: # a1: ptr to PTENxx array/Unchanged
24306: # a2: ptr to FP_SCR1(a6)/Unchanged
24307: # fp0: abs(YINT) adjusted/float(ILOG)
24308: # fp1: 10^ISCALE/Unchanged
24309: # fp2: 10^LEN/Unchanged
24310: # F_SCR1:Work area for final result/BCD result
24311: # F_SCR2:Y with original exponent/ILOG/10^4
24312: # L_SCR1:original USER_FPCR/Exponent digits on return from binstr
24313: # L_SCR2:first word of X packed/Unchanged
24314:
24315: A15_st:
24316: tst.b BINDEC_FLG(%a6) # check for denorm
24317: beq.b not_denorm
24318: ftest.x %fp0 # test for zero
24319: fbeq.w den_zero # if zero, use k-factor or 4933
24320: fmov.l %d6,%fp0 # float ILOG
24321: fabs.x %fp0 # get abs of ILOG
24322: bra.b convrt
24323: den_zero:
24324: tst.l %d7 # check sign of the k-factor
24325: blt.b use_ilog # if negative, use ILOG
24326: fmov.s F4933(%pc),%fp0 # force exponent to 4933
24327: bra.b convrt # do it
24328: use_ilog:
24329: fmov.l %d6,%fp0 # float ILOG
24330: fabs.x %fp0 # get abs of ILOG
24331: bra.b convrt
24332: not_denorm:
24333: ftest.x %fp0 # test for zero
24334: fbneq.w not_zero # if zero, force exponent
24335: fmov.s FONE(%pc),%fp0 # force exponent to 1
24336: bra.b convrt # do it
24337: not_zero:
24338: fmov.l %d6,%fp0 # float ILOG
24339: fabs.x %fp0 # get abs of ILOG
24340: convrt:
24341: fdiv.x 24(%a1),%fp0 # compute ILOG/10^4
24342: fmov.x %fp0,FP_SCR1(%a6) # store fp0 in memory
24343: mov.l 4(%a2),%d2 # move word 2 to d2
24344: mov.l 8(%a2),%d3 # move word 3 to d3
24345: mov.w (%a2),%d0 # move exp to d0
24346: beq.b x_loop_fin # if zero, skip the shift
24347: sub.w &0x3ffd,%d0 # subtract off bias
24348: neg.w %d0 # make exp positive
24349: x_loop:
24350: lsr.l &1,%d2 # shift d2:d3 right
24351: roxr.l &1,%d3 # the number of places
24352: dbf.w %d0,x_loop # given in d0
24353: x_loop_fin:
24354: clr.l %d1 # put zero in d1 for addx
24355: add.l &0x00000080,%d3 # inc at bit 6
24356: addx.l %d1,%d2 # continue inc
24357: and.l &0xffffff80,%d3 # strip off lsb not used by 882
24358: mov.l &4,%d0 # put 4 in d0 for binstr call
24359: lea.l L_SCR1(%a6),%a0 # a0 is ptr to L_SCR1 for exp digits
24360: bsr binstr # call binstr to convert exp
24361: mov.l L_SCR1(%a6),%d0 # load L_SCR1 lword to d0
24362: mov.l &12,%d1 # use d1 for shift count
24363: lsr.l %d1,%d0 # shift d0 right by 12
24364: bfins %d0,FP_SCR0(%a6){&4:&12} # put e3:e2:e1 in FP_SCR0
24365: lsr.l %d1,%d0 # shift d0 right by 12
24366: bfins %d0,FP_SCR0(%a6){&16:&4} # put e4 in FP_SCR0
24367: tst.b %d0 # check if e4 is zero
24368: beq.b A16_st # if zero, skip rest
24369: or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
24370:
24371:
24372: # A16. Write sign bits to final string.
24373: # Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
24374: #
24375: # Register usage:
24376: # Input/Output
24377: # d0: x/scratch - final is x
24378: # d2: x/x
24379: # d3: x/x
24380: # d4: LEN/Unchanged
24381: # d5: ICTR:LAMBDA/LAMBDA:ICTR
24382: # d6: ILOG/ILOG adjusted
24383: # d7: k-factor/Unchanged
24384: # a0: ptr to L_SCR1(a6)/Unchanged
24385: # a1: ptr to PTENxx array/Unchanged
24386: # a2: ptr to FP_SCR1(a6)/Unchanged
24387: # fp0: float(ILOG)/Unchanged
24388: # fp1: 10^ISCALE/Unchanged
24389: # fp2: 10^LEN/Unchanged
24390: # F_SCR1:BCD result with correct signs
24391: # F_SCR2:ILOG/10^4
24392: # L_SCR1:Exponent digits on return from binstr
24393: # L_SCR2:first word of X packed/Unchanged
24394:
24395: A16_st:
24396: clr.l %d0 # clr d0 for collection of signs
24397: and.b &0x0f,FP_SCR0(%a6) # clear first nibble of FP_SCR0
24398: tst.l L_SCR2(%a6) # check sign of original mantissa
24399: bge.b mant_p # if pos, don't set SM
24400: mov.l &2,%d0 # move 2 in to d0 for SM
24401: mant_p:
24402: tst.l %d6 # check sign of ILOG
24403: bge.b wr_sgn # if pos, don't set SE
24404: addq.l &1,%d0 # set bit 0 in d0 for SE
24405: wr_sgn:
24406: bfins %d0,FP_SCR0(%a6){&0:&2} # insert SM and SE into FP_SCR0
24407:
24408: # Clean up and restore all registers used.
24409:
24410: fmov.l &0,%fpsr # clear possible inex2/ainex bits
24411: fmovm.x (%sp)+,&0xe0 # {%fp0-%fp2}
24412: movm.l (%sp)+,&0x4fc # {%d2-%d7/%a2}
24413: rts
24414:
24415: global PTENRN
24416: PTENRN:
24417: long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
24418: long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
24419: long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
24420: long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
24421: long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
24422: long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
24423: long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
24424: long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
24425: long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
24426: long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
24427: long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
24428: long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
24429: long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
24430:
24431: global PTENRP
24432: PTENRP:
24433: long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
24434: long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
24435: long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
24436: long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
24437: long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
24438: long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
24439: long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
24440: long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
24441: long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
24442: long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
24443: long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
24444: long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
24445: long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
24446:
24447: global PTENRM
24448: PTENRM:
24449: long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
24450: long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
24451: long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
24452: long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
24453: long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
24454: long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
24455: long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
24456: long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
24457: long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
24458: long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
24459: long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
24460: long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
24461: long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
24462:
24463: #########################################################################
24464: # binstr(): Converts a 64-bit binary integer to bcd. #
24465: # #
24466: # INPUT *************************************************************** #
24467: # d2:d3 = 64-bit binary integer #
24468: # d0 = desired length (LEN) #
24469: # a0 = pointer to start in memory for bcd characters #
24470: # (This pointer must point to byte 4 of the first #
24471: # lword of the packed decimal memory string.) #
24472: # #
24473: # OUTPUT ************************************************************** #
24474: # a0 = pointer to LEN bcd digits representing the 64-bit integer. #
24475: # #
24476: # ALGORITHM *********************************************************** #
24477: # The 64-bit binary is assumed to have a decimal point before #
24478: # bit 63. The fraction is multiplied by 10 using a mul by 2 #
24479: # shift and a mul by 8 shift. The bits shifted out of the #
24480: # msb form a decimal digit. This process is iterated until #
24481: # LEN digits are formed. #
24482: # #
24483: # A1. Init d7 to 1. D7 is the byte digit counter, and if 1, the #
24484: # digit formed will be assumed the least significant. This is #
24485: # to force the first byte formed to have a 0 in the upper 4 bits. #
24486: # #
24487: # A2. Beginning of the loop: #
24488: # Copy the fraction in d2:d3 to d4:d5. #
24489: # #
24490: # A3. Multiply the fraction in d2:d3 by 8 using bit-field #
24491: # extracts and shifts. The three msbs from d2 will go into d1. #
24492: # #
24493: # A4. Multiply the fraction in d4:d5 by 2 using shifts. The msb #
24494: # will be collected by the carry. #
24495: # #
24496: # A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5 #
24497: # into d2:d3. D1 will contain the bcd digit formed. #
24498: # #
24499: # A6. Test d7. If zero, the digit formed is the ms digit. If non- #
24500: # zero, it is the ls digit. Put the digit in its place in the #
24501: # upper word of d0. If it is the ls digit, write the word #
24502: # from d0 to memory. #
24503: # #
24504: # A7. Decrement d6 (LEN counter) and repeat the loop until zero. #
24505: # #
24506: #########################################################################
24507:
24508: # Implementation Notes:
24509: #
24510: # The registers are used as follows:
24511: #
24512: # d0: LEN counter
24513: # d1: temp used to form the digit
24514: # d2: upper 32-bits of fraction for mul by 8
24515: # d3: lower 32-bits of fraction for mul by 8
24516: # d4: upper 32-bits of fraction for mul by 2
24517: # d5: lower 32-bits of fraction for mul by 2
24518: # d6: temp for bit-field extracts
24519: # d7: byte digit formation word;digit count {0,1}
24520: # a0: pointer into memory for packed bcd string formation
24521: #
24522:
24523: global binstr
24524: binstr:
24525: movm.l &0xff00,-(%sp) # {%d0-%d7}
24526:
24527: #
24528: # A1: Init d7
24529: #
24530: mov.l &1,%d7 # init d7 for second digit
24531: subq.l &1,%d0 # for dbf d0 would have LEN+1 passes
24532: #
24533: # A2. Copy d2:d3 to d4:d5. Start loop.
24534: #
24535: loop:
24536: mov.l %d2,%d4 # copy the fraction before muls
24537: mov.l %d3,%d5 # to d4:d5
24538: #
24539: # A3. Multiply d2:d3 by 8; extract msbs into d1.
24540: #
24541: bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1
24542: asl.l &3,%d2 # shift d2 left by 3 places
24543: bfextu %d3{&0:&3},%d6 # copy 3 msbs of d3 into d6
24544: asl.l &3,%d3 # shift d3 left by 3 places
24545: or.l %d6,%d2 # or in msbs from d3 into d2
24546: #
24547: # A4. Multiply d4:d5 by 2; add carry out to d1.
24548: #
24549: asl.l &1,%d5 # mul d5 by 2
24550: roxl.l &1,%d4 # mul d4 by 2
24551: swap %d6 # put 0 in d6 lower word
24552: addx.w %d6,%d1 # add in extend from mul by 2
24553: #
24554: # A5. Add mul by 8 to mul by 2. D1 contains the digit formed.
24555: #
24556: add.l %d5,%d3 # add lower 32 bits
24557: nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
24558: addx.l %d4,%d2 # add with extend upper 32 bits
24559: nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
24560: addx.w %d6,%d1 # add in extend from add to d1
24561: swap %d6 # with d6 = 0; put 0 in upper word
24562: #
24563: # A6. Test d7 and branch.
24564: #
24565: tst.w %d7 # if zero, store digit & to loop
24566: beq.b first_d # if non-zero, form byte & write
24567: sec_d:
24568: swap %d7 # bring first digit to word d7b
24569: asl.w &4,%d7 # first digit in upper 4 bits d7b
24570: add.w %d1,%d7 # add in ls digit to d7b
24571: mov.b %d7,(%a0)+ # store d7b byte in memory
24572: swap %d7 # put LEN counter in word d7a
24573: clr.w %d7 # set d7a to signal no digits done
24574: dbf.w %d0,loop # do loop some more!
24575: bra.b end_bstr # finished, so exit
24576: first_d:
24577: swap %d7 # put digit word in d7b
24578: mov.w %d1,%d7 # put new digit in d7b
24579: swap %d7 # put LEN counter in word d7a
24580: addq.w &1,%d7 # set d7a to signal first digit done
24581: dbf.w %d0,loop # do loop some more!
24582: swap %d7 # put last digit in string
24583: lsl.w &4,%d7 # move it to upper 4 bits
24584: mov.b %d7,(%a0)+ # store it in memory string
24585: #
24586: # Clean up and return with result in fp0.
24587: #
24588: end_bstr:
24589: movm.l (%sp)+,&0xff # {%d0-%d7}
24590: rts
24591:
24592: #########################################################################
24593: # XDEF **************************************************************** #
24594: # facc_in_b(): dmem_read_byte failed #
24595: # facc_in_w(): dmem_read_word failed #
24596: # facc_in_l(): dmem_read_long failed #
24597: # facc_in_d(): dmem_read of dbl prec failed #
24598: # facc_in_x(): dmem_read of ext prec failed #
24599: # #
24600: # facc_out_b(): dmem_write_byte failed #
24601: # facc_out_w(): dmem_write_word failed #
24602: # facc_out_l(): dmem_write_long failed #
24603: # facc_out_d(): dmem_write of dbl prec failed #
24604: # facc_out_x(): dmem_write of ext prec failed #
24605: # #
24606: # XREF **************************************************************** #
24607: # _real_access() - exit through access error handler #
24608: # #
24609: # INPUT *************************************************************** #
24610: # None #
24611: # #
24612: # OUTPUT ************************************************************** #
24613: # None #
24614: # #
24615: # ALGORITHM *********************************************************** #
24616: # Flow jumps here when an FP data fetch call gets an error #
24617: # result. This means the operating system wants an access error frame #
24618: # made out of the current exception stack frame. #
24619: # So, we first call restore() which makes sure that any updated #
24620: # -(an)+ register gets returned to its pre-exception value and then #
24621: # we change the stack to an access error stack frame. #
24622: # #
24623: #########################################################################
24624:
24625: facc_in_b:
24626: movq.l &0x1,%d0 # one byte
24627: bsr.w restore # fix An
24628:
24629: mov.w &0x0121,EXC_VOFF(%a6) # set FSLW
24630: bra.w facc_finish
24631:
24632: facc_in_w:
24633: movq.l &0x2,%d0 # two bytes
24634: bsr.w restore # fix An
24635:
24636: mov.w &0x0141,EXC_VOFF(%a6) # set FSLW
24637: bra.b facc_finish
24638:
24639: facc_in_l:
24640: movq.l &0x4,%d0 # four bytes
24641: bsr.w restore # fix An
24642:
24643: mov.w &0x0101,EXC_VOFF(%a6) # set FSLW
24644: bra.b facc_finish
24645:
24646: facc_in_d:
24647: movq.l &0x8,%d0 # eight bytes
24648: bsr.w restore # fix An
24649:
24650: mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
24651: bra.b facc_finish
24652:
24653: facc_in_x:
24654: movq.l &0xc,%d0 # twelve bytes
24655: bsr.w restore # fix An
24656:
24657: mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
24658: bra.b facc_finish
24659:
24660: ################################################################
24661:
24662: facc_out_b:
24663: movq.l &0x1,%d0 # one byte
24664: bsr.w restore # restore An
24665:
24666: mov.w &0x00a1,EXC_VOFF(%a6) # set FSLW
24667: bra.b facc_finish
24668:
24669: facc_out_w:
24670: movq.l &0x2,%d0 # two bytes
24671: bsr.w restore # restore An
24672:
24673: mov.w &0x00c1,EXC_VOFF(%a6) # set FSLW
24674: bra.b facc_finish
24675:
24676: facc_out_l:
24677: movq.l &0x4,%d0 # four bytes
24678: bsr.w restore # restore An
24679:
24680: mov.w &0x0081,EXC_VOFF(%a6) # set FSLW
24681: bra.b facc_finish
24682:
24683: facc_out_d:
24684: movq.l &0x8,%d0 # eight bytes
24685: bsr.w restore # restore An
24686:
24687: mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
24688: bra.b facc_finish
24689:
24690: facc_out_x:
24691: mov.l &0xc,%d0 # twelve bytes
24692: bsr.w restore # restore An
24693:
24694: mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
24695:
24696: # here's where we actually create the access error frame from the
24697: # current exception stack frame.
24698: facc_finish:
24699: mov.l USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
24700:
24701: fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
24702: fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
24703: movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
24704:
24705: unlk %a6
24706:
24707: mov.l (%sp),-(%sp) # store SR, hi(PC)
24708: mov.l 0x8(%sp),0x4(%sp) # store lo(PC)
24709: mov.l 0xc(%sp),0x8(%sp) # store EA
24710: mov.l &0x00000001,0xc(%sp) # store FSLW
24711: mov.w 0x6(%sp),0xc(%sp) # fix FSLW (size)
24712: mov.w &0x4008,0x6(%sp) # store voff
24713:
24714: btst &0x5,(%sp) # supervisor or user mode?
24715: beq.b facc_out2 # user
24716: bset &0x2,0xd(%sp) # set supervisor TM bit
24717:
24718: facc_out2:
24719: bra.l _real_access
24720:
24721: ##################################################################
24722:
24723: # if the effective addressing mode was predecrement or postincrement,
24724: # the emulation has already changed its value to the correct post-
24725: # instruction value. but since we're exiting to the access error
24726: # handler, then AN must be returned to its pre-instruction value.
24727: # we do that here.
24728: restore:
24729: mov.b EXC_OPWORD+0x1(%a6),%d1
24730: andi.b &0x38,%d1 # extract opmode
24731: cmpi.b %d1,&0x18 # postinc?
24732: beq.w rest_inc
24733: cmpi.b %d1,&0x20 # predec?
24734: beq.w rest_dec
24735: rts
24736:
24737: rest_inc:
24738: mov.b EXC_OPWORD+0x1(%a6),%d1
24739: andi.w &0x0007,%d1 # fetch An
24740:
24741: mov.w (tbl_rest_inc.b,%pc,%d1.w*2),%d1
24742: jmp (tbl_rest_inc.b,%pc,%d1.w*1)
24743:
24744: tbl_rest_inc:
24745: short ri_a0 - tbl_rest_inc
24746: short ri_a1 - tbl_rest_inc
24747: short ri_a2 - tbl_rest_inc
24748: short ri_a3 - tbl_rest_inc
24749: short ri_a4 - tbl_rest_inc
24750: short ri_a5 - tbl_rest_inc
24751: short ri_a6 - tbl_rest_inc
24752: short ri_a7 - tbl_rest_inc
24753:
24754: ri_a0:
24755: sub.l %d0,EXC_DREGS+0x8(%a6) # fix stacked a0
24756: rts
24757: ri_a1:
24758: sub.l %d0,EXC_DREGS+0xc(%a6) # fix stacked a1
24759: rts
24760: ri_a2:
24761: sub.l %d0,%a2 # fix a2
24762: rts
24763: ri_a3:
24764: sub.l %d0,%a3 # fix a3
24765: rts
24766: ri_a4:
24767: sub.l %d0,%a4 # fix a4
24768: rts
24769: ri_a5:
24770: sub.l %d0,%a5 # fix a5
24771: rts
24772: ri_a6:
24773: sub.l %d0,(%a6) # fix stacked a6
24774: rts
24775: # if it's a fmove out instruction, we don't have to fix a7
24776: # because we hadn't changed it yet. if it's an opclass two
24777: # instruction (data moved in) and the exception was in supervisor
24778: # mode, then also also wasn't updated. if it was user mode, then
24779: # restore the correct a7 which is in the USP currently.
24780: ri_a7:
24781: cmpi.b EXC_VOFF(%a6),&0x30 # move in or out?
24782: bne.b ri_a7_done # out
24783:
24784: btst &0x5,EXC_SR(%a6) # user or supervisor?
24785: bne.b ri_a7_done # supervisor
24786: movc %usp,%a0 # restore USP
24787: sub.l %d0,%a0
24788: movc %a0,%usp
24789: ri_a7_done:
24790: rts
24791:
24792: # need to invert adjustment value if the <ea> was predec
24793: rest_dec:
24794: neg.l %d0
24795: bra.b rest_inc
CVSweb