Annotation of sys/lib/libkern/arch/hppa/milli.S, Revision 1.1.1.1
1.1 nbrk 1: ; $OpenBSD: milli.S,v 1.5 2001/03/29 04:08:20 mickey Exp $
2: ;
3: ; (c) Copyright 1986 HEWLETT-PACKARD COMPANY
4: ;
5: ; To anyone who acknowledges that this file is provided "AS IS"
6: ; without any express or implied warranty:
7: ; permission to use, copy, modify, and distribute this file
8: ; for any purpose is hereby granted without fee, provided that
9: ; the above copyright notice and this notice appears in all
10: ; copies, and that the name of Hewlett-Packard Company not be
11: ; used in advertising or publicity pertaining to distribution
12: ; of the software without specific, written prior permission.
13: ; Hewlett-Packard Company makes no representations about the
14: ; suitability of this software for any purpose.
15: ;
16:
17: ; Standard Hardware Register Definitions for Use with Assembler
18: ; version A.08.06
19: ; - fr16-31 added at Utah
20: ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21: ; Hardware General Registers
22: r0: .equ 0
23:
24: r1: .equ 1
25:
26: r2: .equ 2
27:
28: r3: .equ 3
29:
30: r4: .equ 4
31:
32: r5: .equ 5
33:
34: r6: .equ 6
35:
36: r7: .equ 7
37:
38: r8: .equ 8
39:
40: r9: .equ 9
41:
42: r10: .equ 10
43:
44: r11: .equ 11
45:
46: r12: .equ 12
47:
48: r13: .equ 13
49:
50: r14: .equ 14
51:
52: r15: .equ 15
53:
54: r16: .equ 16
55:
56: r17: .equ 17
57:
58: r18: .equ 18
59:
60: r19: .equ 19
61:
62: r20: .equ 20
63:
64: r21: .equ 21
65:
66: r22: .equ 22
67:
68: r23: .equ 23
69:
70: r24: .equ 24
71:
72: r25: .equ 25
73:
74: r26: .equ 26
75:
76: r27: .equ 27
77:
78: r28: .equ 28
79:
80: r29: .equ 29
81:
82: r30: .equ 30
83:
84: r31: .equ 31
85:
86: ; Hardware Space Registers
87: sr0: .equ 0
88:
89: sr1: .equ 1
90:
91: sr2: .equ 2
92:
93: sr3: .equ 3
94:
95: sr4: .equ 4
96:
97: sr5: .equ 5
98:
99: sr6: .equ 6
100:
101: sr7: .equ 7
102:
103: ; Hardware Floating Point Registers
104: fr0: .equ 0
105:
106: fr1: .equ 1
107:
108: fr2: .equ 2
109:
110: fr3: .equ 3
111:
112: fr4: .equ 4
113:
114: fr5: .equ 5
115:
116: fr6: .equ 6
117:
118: fr7: .equ 7
119:
120: fr8: .equ 8
121:
122: fr9: .equ 9
123:
124: fr10: .equ 10
125:
126: fr11: .equ 11
127:
128: fr12: .equ 12
129:
130: fr13: .equ 13
131:
132: fr14: .equ 14
133:
134: fr15: .equ 15
135:
136: fr16: .equ 16
137:
138: fr17: .equ 17
139:
140: fr18: .equ 18
141:
142: fr19: .equ 19
143:
144: fr20: .equ 20
145:
146: fr21: .equ 21
147:
148: fr22: .equ 22
149:
150: fr23: .equ 23
151:
152: fr24: .equ 24
153:
154: fr25: .equ 25
155:
156: fr26: .equ 26
157:
158: fr27: .equ 27
159:
160: fr28: .equ 28
161:
162: fr29: .equ 29
163:
164: fr30: .equ 30
165:
166: fr31: .equ 31
167:
168: ; Hardware Control Registers
169: cr0: .equ 0
170:
171: rctr: .equ 0 ; Recovery Counter Register
172:
173: cr8: .equ 8 ; Protection ID 1
174:
175: pidr1: .equ 8
176:
177: cr9: .equ 9 ; Protection ID 2
178:
179: pidr2: .equ 9
180:
181: cr10: .equ 10
182:
183: ccr: .equ 10 ; Coprocessor Confiquration Register
184:
185: cr11: .equ 11
186:
187: sar: .equ 11 ; Shift Amount Register
188:
189: cr12: .equ 12
190:
191: pidr3: .equ 12 ; Protection ID 3
192:
193: cr13: .equ 13
194:
195: pidr4: .equ 13 ; Protection ID 4
196:
197: cr14: .equ 14
198:
199: iva: .equ 14 ; Interrupt Vector Address
200:
201: cr15: .equ 15
202:
203: eiem: .equ 15 ; External Interrupt Enable Mask
204:
205: cr16: .equ 16
206:
207: itmr: .equ 16 ; Interval Timer
208:
209: cr17: .equ 17
210:
211: pcsq: .equ 17 ; Program Counter Space queue
212:
213: cr18: .equ 18
214:
215: pcoq: .equ 18 ; Program Counter Offset queue
216:
217: cr19: .equ 19
218:
219: iir: .equ 19 ; Interruption Instruction Register
220:
221: cr20: .equ 20
222:
223: isr: .equ 20 ; Interruption Space Register
224:
225: cr21: .equ 21
226:
227: ior: .equ 21 ; Interruption Offset Register
228:
229: cr22: .equ 22
230:
231: ipsw: .equ 22 ; Interrpution Processor Status Word
232:
233: cr23: .equ 23
234:
235: eirr: .equ 23 ; External Interrupt Request
236:
237: cr24: .equ 24
238:
239: ppda: .equ 24 ; Physcial Page Directory Address
240:
241: tr0: .equ 24 ; Temporary register 0
242:
243: cr25: .equ 25
244:
245: hta: .equ 25 ; Hash Table Address
246:
247: tr1: .equ 25 ; Temporary register 1
248:
249: cr26: .equ 26
250:
251: tr2: .equ 26 ; Temporary register 2
252:
253: cr27: .equ 27
254:
255: tr3: .equ 27 ; Temporary register 3
256:
257: cr28: .equ 28
258:
259: tr4: .equ 28 ; Temporary register 4
260:
261: cr29: .equ 29
262:
263: tr5: .equ 29 ; Temporary register 5
264:
265: cr30: .equ 30
266:
267: tr6: .equ 30 ; Temporary register 6
268:
269: cr31: .equ 31
270:
271: tr7: .equ 31 ; Temporary register 7
272:
273: ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
274: ; Procedure Call Convention ~
275: ; Register Definitions for Use with Assembler ~
276: ; version A.08.06 ~
277: ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
278: ; Software Architecture General Registers
279: rp: .equ r2 ; return pointer
280:
281: mrp: .equ r31 ; millicode return pointer
282:
283: ret0: .equ r28 ; return value
284:
285: ret1: .equ r29 ; return value (high part of double)
286:
287: sl: .equ r29 ; static link
288:
289: sp: .equ r30 ; stack pointer
290:
291: dp: .equ r27 ; data pointer
292:
293: arg0: .equ r26 ; argument
294:
295: arg1: .equ r25 ; argument or high part of double argument
296:
297: arg2: .equ r24 ; argument
298:
299: arg3: .equ r23 ; argument or high part of double argument
300:
301: ;_____________________________________________________________________________
302: ; Software Architecture Space Registers
303: ; sr0 ; return link form BLE
304: sret: .equ sr1 ; return value
305:
306: sarg: .equ sr1 ; argument
307:
308: ; sr4 ; PC SPACE tracker
309: ; sr5 ; process private data
310: ;_____________________________________________________________________________
311: ; Software Architecture Pseudo Registers
312: previous_sp: .equ 64 ; old stack pointer (locates previous frame)
313:
314: ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
315: ; Standard space and subspace definitions. version A.08.06
316: ; These are generally suitable for programs on HP_UX and HPE.
317: ; Statements commented out are used when building such things as operating
318: ; system kernels.
319: ;;;;;;;;;;;;;;;;
320: ; Additional code subspaces should have ALIGN=8 for an interspace BV
321: ; and should have SORT=24.
322: ;
323: ; For an incomplete executable (program bound to shared libraries),
324: ; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
325: ; and $PLT$ subspaces respectively.
326: ;;;;;;;;;;;;;;;
327:
328: .text
329: .EXPORT $$remI,millicode
330: ; .IMPORT cerror
331: $$remI:
332: .PROC
333: .CALLINFO NO_CALLS
334: .ENTRY
335: addit,= 0,arg1,r0
336: add,>= r0,arg0,ret1
337: sub r0,ret1,ret1
338: sub r0,arg1,r1
339: ds r0,r1,r0
340: or r0,r0,r1
341: add ret1,ret1,ret1
342: ds r1,arg1,r1
343: addc ret1,ret1,ret1
344: ds r1,arg1,r1
345: addc ret1,ret1,ret1
346: ds r1,arg1,r1
347: addc ret1,ret1,ret1
348: ds r1,arg1,r1
349: addc ret1,ret1,ret1
350: ds r1,arg1,r1
351: addc ret1,ret1,ret1
352: ds r1,arg1,r1
353: addc ret1,ret1,ret1
354: ds r1,arg1,r1
355: addc ret1,ret1,ret1
356: ds r1,arg1,r1
357: addc ret1,ret1,ret1
358: ds r1,arg1,r1
359: addc ret1,ret1,ret1
360: ds r1,arg1,r1
361: addc ret1,ret1,ret1
362: ds r1,arg1,r1
363: addc ret1,ret1,ret1
364: ds r1,arg1,r1
365: addc ret1,ret1,ret1
366: ds r1,arg1,r1
367: addc ret1,ret1,ret1
368: ds r1,arg1,r1
369: addc ret1,ret1,ret1
370: ds r1,arg1,r1
371: addc ret1,ret1,ret1
372: ds r1,arg1,r1
373: addc ret1,ret1,ret1
374: ds r1,arg1,r1
375: addc ret1,ret1,ret1
376: ds r1,arg1,r1
377: addc ret1,ret1,ret1
378: ds r1,arg1,r1
379: addc ret1,ret1,ret1
380: ds r1,arg1,r1
381: addc ret1,ret1,ret1
382: ds r1,arg1,r1
383: addc ret1,ret1,ret1
384: ds r1,arg1,r1
385: addc ret1,ret1,ret1
386: ds r1,arg1,r1
387: addc ret1,ret1,ret1
388: ds r1,arg1,r1
389: addc ret1,ret1,ret1
390: ds r1,arg1,r1
391: addc ret1,ret1,ret1
392: ds r1,arg1,r1
393: addc ret1,ret1,ret1
394: ds r1,arg1,r1
395: addc ret1,ret1,ret1
396: ds r1,arg1,r1
397: addc ret1,ret1,ret1
398: ds r1,arg1,r1
399: addc ret1,ret1,ret1
400: ds r1,arg1,r1
401: addc ret1,ret1,ret1
402: ds r1,arg1,r1
403: addc ret1,ret1,ret1
404: ds r1,arg1,r1
405: addc ret1,ret1,ret1
406: movb,>=,n r1,ret1,remI300
407: add,< arg1,r0,r0
408: add,tr r1,arg1,ret1
409: sub r1,arg1,ret1
410: remI300: add,>= arg0,r0,r0
411:
412: sub r0,ret1,ret1
413: bv r0(r31)
414: nop
415: .EXIT
416: .PROCEND
417:
418: bit1: .equ 1
419:
420: bit30: .equ 30
421: bit31: .equ 31
422:
423: len2: .equ 2
424:
425: len4: .equ 4
426:
427: #if 0
428: $$dyncall:
429: .proc
430: .callinfo NO_CALLS
431: .export $$dyncall,MILLICODE
432:
433: bb,>=,n 22,bit30,noshlibs
434:
435: depi 0,bit31,len2,22
436: ldw 4(22),19
437: ldw 0(22),22
438: noshlibs:
439: ldsid (22),r1
440: mtsp r1,sr0
441: be 0(sr0,r22)
442: stw rp,-24(sp)
443: .procend
444: #endif
445:
446: $$sh_func_adrs:
447: .proc
448: .callinfo NO_CALLS
449: .export $$sh_func_adrs, millicode
450: ldo 0(r26),ret1
451: dep r0,30,1,r26
452: probew (r26),r31,r22
453: extru,= r22,31,1,r22
454: bv r0(r31)
455: ldws 0(r26),ret1
456: .procend
457:
458: temp: .EQU r1
459:
460: retreg: .EQU ret1 ; r29
461:
462: .export $$divU,millicode
463: .import $$divU_3,millicode
464: .import $$divU_5,millicode
465: .import $$divU_6,millicode
466: .import $$divU_7,millicode
467: .import $$divU_9,millicode
468: .import $$divU_10,millicode
469: .import $$divU_12,millicode
470: .import $$divU_14,millicode
471: .import $$divU_15,millicode
472: $$divU:
473: .proc
474: .callinfo NO_CALLS
475: ; The subtract is not nullified since it does no harm and can be used
476: ; by the two cases that branch back to "normal".
477: comib,>= 15,arg1,special_divisor
478: sub r0,arg1,temp ; clear carry, negate the divisor
479: ds r0,temp,r0 ; set V-bit to 1
480: normal:
481: add arg0,arg0,retreg ; shift msb bit into carry
482: ds r0,arg1,temp ; 1st divide step, if no carry
483: addc retreg,retreg,retreg ; shift retreg with/into carry
484: ds temp,arg1,temp ; 2nd divide step
485: addc retreg,retreg,retreg ; shift retreg with/into carry
486: ds temp,arg1,temp ; 3rd divide step
487: addc retreg,retreg,retreg ; shift retreg with/into carry
488: ds temp,arg1,temp ; 4th divide step
489: addc retreg,retreg,retreg ; shift retreg with/into carry
490: ds temp,arg1,temp ; 5th divide step
491: addc retreg,retreg,retreg ; shift retreg with/into carry
492: ds temp,arg1,temp ; 6th divide step
493: addc retreg,retreg,retreg ; shift retreg with/into carry
494: ds temp,arg1,temp ; 7th divide step
495: addc retreg,retreg,retreg ; shift retreg with/into carry
496: ds temp,arg1,temp ; 8th divide step
497: addc retreg,retreg,retreg ; shift retreg with/into carry
498: ds temp,arg1,temp ; 9th divide step
499: addc retreg,retreg,retreg ; shift retreg with/into carry
500: ds temp,arg1,temp ; 10th divide step
501: addc retreg,retreg,retreg ; shift retreg with/into carry
502: ds temp,arg1,temp ; 11th divide step
503: addc retreg,retreg,retreg ; shift retreg with/into carry
504: ds temp,arg1,temp ; 12th divide step
505: addc retreg,retreg,retreg ; shift retreg with/into carry
506: ds temp,arg1,temp ; 13th divide step
507: addc retreg,retreg,retreg ; shift retreg with/into carry
508: ds temp,arg1,temp ; 14th divide step
509: addc retreg,retreg,retreg ; shift retreg with/into carry
510: ds temp,arg1,temp ; 15th divide step
511: addc retreg,retreg,retreg ; shift retreg with/into carry
512: ds temp,arg1,temp ; 16th divide step
513: addc retreg,retreg,retreg ; shift retreg with/into carry
514: ds temp,arg1,temp ; 17th divide step
515: addc retreg,retreg,retreg ; shift retreg with/into carry
516: ds temp,arg1,temp ; 18th divide step
517: addc retreg,retreg,retreg ; shift retreg with/into carry
518: ds temp,arg1,temp ; 19th divide step
519: addc retreg,retreg,retreg ; shift retreg with/into carry
520: ds temp,arg1,temp ; 20th divide step
521: addc retreg,retreg,retreg ; shift retreg with/into carry
522: ds temp,arg1,temp ; 21st divide step
523: addc retreg,retreg,retreg ; shift retreg with/into carry
524: ds temp,arg1,temp ; 22nd divide step
525: addc retreg,retreg,retreg ; shift retreg with/into carry
526: ds temp,arg1,temp ; 23rd divide step
527: addc retreg,retreg,retreg ; shift retreg with/into carry
528: ds temp,arg1,temp ; 24th divide step
529: addc retreg,retreg,retreg ; shift retreg with/into carry
530: ds temp,arg1,temp ; 25th divide step
531: addc retreg,retreg,retreg ; shift retreg with/into carry
532: ds temp,arg1,temp ; 26th divide step
533: addc retreg,retreg,retreg ; shift retreg with/into carry
534: ds temp,arg1,temp ; 27th divide step
535: addc retreg,retreg,retreg ; shift retreg with/into carry
536: ds temp,arg1,temp ; 28th divide step
537: addc retreg,retreg,retreg ; shift retreg with/into carry
538: ds temp,arg1,temp ; 29th divide step
539: addc retreg,retreg,retreg ; shift retreg with/into carry
540: ds temp,arg1,temp ; 30th divide step
541: addc retreg,retreg,retreg ; shift retreg with/into carry
542: ds temp,arg1,temp ; 31st divide step
543: addc retreg,retreg,retreg ; shift retreg with/into carry
544: ds temp,arg1,temp ; 32nd divide step,
545: bv 0(r31)
546: addc retreg,retreg,retreg ; shift last retreg bit into retreg
547: ;_____________________________________________________________________________
548: ; handle the cases where divisor is a small constant or has high bit on
549: special_divisor:
550: blr arg1,r0
551: comib,>,n 0,arg1,big_divisor ; nullify previous instruction
552: zero_divisor: ; this label is here to provide external visibility
553:
554: addit,= 0,arg1,0 ; trap for zero dvr
555: nop
556: bv 0(r31) ; divisor == 1
557: copy arg0,retreg
558: bv 0(r31) ; divisor == 2
559: extru arg0,30,31,retreg
560: b,n $$divU_3 ; divisor == 3
561: nop
562: bv 0(r31) ; divisor == 4
563: extru arg0,29,30,retreg
564: b,n $$divU_5 ; divisor == 5
565: nop
566: b,n $$divU_6 ; divisor == 6
567: nop
568: b,n $$divU_7 ; divisor == 7
569: nop
570: bv 0(r31) ; divisor == 8
571: extru arg0,28,29,retreg
572: b,n $$divU_9 ; divisor == 9
573: nop
574: b,n $$divU_10 ; divisor == 10
575: nop
576: b normal ; divisor == 11
577: ds r0,temp,r0 ; set V-bit to 1
578: b,n $$divU_12 ; divisor == 12
579: nop
580: b normal ; divisor == 13
581: ds r0,temp,r0 ; set V-bit to 1
582: b,n $$divU_14 ; divisor == 14
583: nop
584: b,n $$divU_15 ; divisor == 15
585: nop
586: ;_____________________________________________________________________________
587: ; Handle the case where the high bit is on in the divisor.
588: ; Compute: if( dividend>=divisor) quotient=1; else quotient=0;
589: ; Note: dividend>==divisor iff dividend-divisor does not borrow
590: ; and not borrow iff carry
591: big_divisor:
592: sub arg0,arg1,r0
593: bv 0(r31)
594: addc r0,r0,retreg
595: .procend
596: .end
597:
598: t2: .EQU r1
599:
600: ; x2 .EQU arg0 ; r26
601: t1: .EQU arg1 ; r25
602:
603: ; x1 .EQU ret1 ; r29
604: ;_____________________________________________________________________________
605:
606: $$divide_by_constant:
607: .PROC
608: .CALLINFO NO_CALLS
609: .export $$divide_by_constant,millicode
610: ; Provides a "nice" label for the code covered by the unwind descriptor
611: ; for things like gprof.
612:
613: $$divI_2:
614: .EXPORT $$divI_2,MILLICODE
615: COMCLR,>= arg0,0,0
616: ADDI 1,arg0,arg0
617: bv 0(r31)
618: EXTRS arg0,30,31,ret1
619:
620: $$divI_4:
621: .EXPORT $$divI_4,MILLICODE
622: COMCLR,>= arg0,0,0
623: ADDI 3,arg0,arg0
624: bv 0(r31)
625: EXTRS arg0,29,30,ret1
626:
627: $$divI_8:
628: .EXPORT $$divI_8,MILLICODE
629: COMCLR,>= arg0,0,0
630: ADDI 7,arg0,arg0
631: bv 0(r31)
632: EXTRS arg0,28,29,ret1
633:
634: $$divI_16:
635: .EXPORT $$divI_16,MILLICODE
636: COMCLR,>= arg0,0,0
637: ADDI 15,arg0,arg0
638: bv 0(r31)
639: EXTRS arg0,27,28,ret1
640:
641: $$divI_3:
642: .EXPORT $$divI_3,MILLICODE
643: COMB,<,N arg0,0,$neg3
644:
645: ADDI 1,arg0,arg0
646: EXTRU arg0,1,2,ret1
647: SH2ADD arg0,arg0,arg0
648: B $pos
649: ADDC ret1,0,ret1
650:
651: $neg3:
652: SUBI 1,arg0,arg0
653: EXTRU arg0,1,2,ret1
654: SH2ADD arg0,arg0,arg0
655: B $neg
656: ADDC ret1,0,ret1
657:
658: $$divU_3:
659: .EXPORT $$divU_3,MILLICODE
660: ADDI 1,arg0,arg0
661: ADDC 0,0,ret1
662: SHD ret1,arg0,30,t1
663: SH2ADD arg0,arg0,arg0
664: B $pos
665: ADDC ret1,t1,ret1
666:
667: $$divI_5:
668: .EXPORT $$divI_5,MILLICODE
669: COMB,<,N arg0,0,$neg5
670: ADDI 3,arg0,t1
671: SH1ADD arg0,t1,arg0
672: B $pos
673: ADDC 0,0,ret1
674:
675: $neg5:
676: SUB 0,arg0,arg0
677: ADDI 1,arg0,arg0
678: SHD 0,arg0,31,ret1
679: SH1ADD arg0,arg0,arg0
680: B $neg
681: ADDC ret1,0,ret1
682:
683: $$divU_5:
684: .EXPORT $$divU_5,MILLICODE
685: ADDI 1,arg0,arg0
686: ADDC 0,0,ret1
687: SHD ret1,arg0,31,t1
688: SH1ADD arg0,arg0,arg0
689: B $pos
690: ADDC t1,ret1,ret1
691:
692: $$divI_6:
693: .EXPORT $$divI_6,MILLICODE
694: COMB,<,N arg0,0,$neg6
695: EXTRU arg0,30,31,arg0
696: ADDI 5,arg0,t1
697: SH2ADD arg0,t1,arg0
698: B $pos
699: ADDC 0,0,ret1
700:
701: $neg6:
702: SUBI 2,arg0,arg0
703: EXTRU arg0,30,31,arg0
704: SHD 0,arg0,30,ret1
705: SH2ADD arg0,arg0,arg0
706: B $neg
707: ADDC ret1,0,ret1
708:
709: $$divU_6:
710: .EXPORT $$divU_6,MILLICODE
711: EXTRU arg0,30,31,arg0
712: ADDI 1,arg0,arg0
713: SHD 0,arg0,30,ret1
714: SH2ADD arg0,arg0,arg0
715: B $pos
716: ADDC ret1,0,ret1
717:
718: $$divU_10:
719: .EXPORT $$divU_10,MILLICODE
720: EXTRU arg0,30,31,arg0
721: ADDI 3,arg0,t1
722: SH1ADD arg0,t1,arg0
723: ADDC 0,0,ret1
724: $pos:
725: SHD ret1,arg0,28,t1
726: SHD arg0,0,28,t2
727: ADD arg0,t2,arg0
728: ADDC ret1,t1,ret1
729: $pos_for_17:
730: SHD ret1,arg0,24,t1
731: SHD arg0,0,24,t2
732: ADD arg0,t2,arg0
733: ADDC ret1,t1,ret1
734:
735: SHD ret1,arg0,16,t1
736: SHD arg0,0,16,t2
737: ADD arg0,t2,arg0
738: bv 0(r31)
739: ADDC ret1,t1,ret1
740:
741: $$divI_10:
742: .EXPORT $$divI_10,MILLICODE
743: COMB,< arg0,0,$neg10
744: COPY 0,ret1
745: EXTRU arg0,30,31,arg0
746: ADDIB,TR 1,arg0,$pos
747: SH1ADD arg0,arg0,arg0
748:
749: $neg10:
750: SUBI 2,arg0,arg0
751: EXTRU arg0,30,31,arg0
752: SH1ADD arg0,arg0,arg0
753: $neg:
754: SHD ret1,arg0,28,t1
755: SHD arg0,0,28,t2
756: ADD arg0,t2,arg0
757: ADDC ret1,t1,ret1
758: $neg_for_17:
759: SHD ret1,arg0,24,t1
760: SHD arg0,0,24,t2
761: ADD arg0,t2,arg0
762: ADDC ret1,t1,ret1
763:
764: SHD ret1,arg0,16,t1
765: SHD arg0,0,16,t2
766: ADD arg0,t2,arg0
767: ADDC ret1,t1,ret1
768: bv 0(r31)
769: SUB 0,ret1,ret1
770:
771: $$divI_12:
772: .EXPORT $$divI_12,MILLICODE
773: COMB,< arg0,0,$neg12
774: COPY 0,ret1
775: EXTRU arg0,29,30,arg0
776: ADDIB,TR 1,arg0,$pos
777: SH2ADD arg0,arg0,arg0
778:
779: $neg12:
780: SUBI 4,arg0,arg0
781: EXTRU arg0,29,30,arg0
782: B $neg
783: SH2ADD arg0,arg0,arg0
784:
785: $$divU_12:
786: .EXPORT $$divU_12,MILLICODE
787: EXTRU arg0,29,30,arg0
788: ADDI 5,arg0,t1
789: SH2ADD arg0,t1,arg0
790: B $pos
791: ADDC 0,0,ret1
792:
793: $$divI_15:
794: .EXPORT $$divI_15,MILLICODE
795: COMB,< arg0,0,$neg15
796: COPY 0,ret1
797: ADDIB,TR 1,arg0,$pos+4
798: SHD ret1,arg0,28,t1
799:
800: $neg15:
801: B $neg
802: SUBI 1,arg0,arg0
803:
804: $$divU_15:
805: .EXPORT $$divU_15,MILLICODE
806: ADDI 1,arg0,arg0
807: B $pos
808: ADDC 0,0,ret1
809:
810: $$divI_17:
811: .EXPORT $$divI_17,MILLICODE
812: COMB,<,N arg0,0,$neg17
813: ADDI 1,arg0,arg0
814: SHD 0,arg0,28,t1
815: SHD arg0,0,28,t2
816: SUB t2,arg0,arg0
817: B $pos_for_17
818: SUBB t1,0,ret1
819:
820: $neg17:
821: SUBI 1,arg0,arg0
822: SHD 0,arg0,28,t1
823: SHD arg0,0,28,t2
824: SUB t2,arg0,arg0
825: B $neg_for_17
826: SUBB t1,0,ret1
827:
828: $$divU_17:
829: .EXPORT $$divU_17,MILLICODE
830: ADDI 1,arg0,arg0
831: ADDC 0,0,ret1
832: SHD ret1,arg0,28,t1
833: $u17:
834: SHD arg0,0,28,t2
835: SUB t2,arg0,arg0
836: B $pos_for_17
837: SUBB t1,ret1,ret1
838:
839: $$divI_7:
840: .EXPORT $$divI_7,MILLICODE
841: COMB,<,N arg0,0,$neg7
842: $7:
843: ADDI 1,arg0,arg0
844: SHD 0,arg0,29,ret1
845: SH3ADD arg0,arg0,arg0
846: ADDC ret1,0,ret1
847: $pos7:
848: SHD ret1,arg0,26,t1
849: SHD arg0,0,26,t2
850: ADD arg0,t2,arg0
851: ADDC ret1,t1,ret1
852:
853: SHD ret1,arg0,20,t1
854: SHD arg0,0,20,t2
855: ADD arg0,t2,arg0
856: ADDC ret1,t1,t1
857:
858: COPY 0,ret1
859: SHD,= t1,arg0,24,t1
860: $1:
861: ADDB,TR t1,ret1,$2
862: EXTRU arg0,31,24,arg0
863:
864: bv,n 0(r31)
865:
866: $2:
867: ADDB,TR t1,arg0,$1
868: EXTRU,= arg0,7,8,t1
869:
870: $neg7:
871: SUBI 1,arg0,arg0
872: $8:
873: SHD 0,arg0,29,ret1
874: SH3ADD arg0,arg0,arg0
875: ADDC ret1,0,ret1
876:
877: $neg7_shift:
878: SHD ret1,arg0,26,t1
879: SHD arg0,0,26,t2
880: ADD arg0,t2,arg0
881: ADDC ret1,t1,ret1
882:
883: SHD ret1,arg0,20,t1
884: SHD arg0,0,20,t2
885: ADD arg0,t2,arg0
886: ADDC ret1,t1,t1
887:
888: COPY 0,ret1
889: SHD,= t1,arg0,24,t1
890: $3:
891: ADDB,TR t1,ret1,$4
892: EXTRU arg0,31,24,arg0
893:
894: bv 0(r31)
895: SUB 0,ret1,ret1
896:
897: $4:
898: ADDB,TR t1,arg0,$3
899: EXTRU,= arg0,7,8,t1
900:
901: $$divU_7:
902: .EXPORT $$divU_7,MILLICODE
903: ADDI 1,arg0,arg0
904: ADDC 0,0,ret1
905: SHD ret1,arg0,29,t1
906: SH3ADD arg0,arg0,arg0
907: B $pos7
908: ADDC t1,ret1,ret1
909:
910: $$divI_9:
911: .EXPORT $$divI_9,MILLICODE
912: COMB,<,N arg0,0,$neg9
913: ADDI 1,arg0,arg0
914: SHD 0,arg0,29,t1
915: SHD arg0,0,29,t2
916: SUB t2,arg0,arg0
917: B $pos7
918: SUBB t1,0,ret1
919:
920: $neg9:
921: SUBI 1,arg0,arg0
922: SHD 0,arg0,29,t1
923: SHD arg0,0,29,t2
924: SUB t2,arg0,arg0
925: B $neg7_shift
926: SUBB t1,0,ret1
927:
928: $$divU_9:
929: .EXPORT $$divU_9,MILLICODE
930: ADDI 1,arg0,arg0
931: ADDC 0,0,ret1
932: SHD ret1,arg0,29,t1
933: SHD arg0,0,29,t2
934: SUB t2,arg0,arg0
935: B $pos7
936: SUBB t1,ret1,ret1
937:
938: $$divI_14:
939: .EXPORT $$divI_14,MILLICODE
940: COMB,<,N arg0,0,$neg14
941: $$divU_14:
942: .EXPORT $$divU_14,MILLICODE
943: B $7
944: EXTRU arg0,30,31,arg0
945:
946: $neg14:
947: SUBI 2,arg0,arg0
948: B $8
949: EXTRU arg0,30,31,arg0
950:
951: .PROCEND
952: .END
953:
954: rmndr: .EQU ret1 ; r29
955:
956: .export $$remU,millicode
957: $$remU:
958: .proc
959: .callinfo NO_CALLS
960: .entry
961:
962: comib,>=,n 0,arg1,special_case
963: sub r0,arg1,rmndr ; clear carry, negate the divisor
964: ds r0,rmndr,r0 ; set V-bit to 1
965: add arg0,arg0,temp ; shift msb bit into carry
966: ds r0,arg1,rmndr ; 1st divide step, if no carry
967: addc temp,temp,temp ; shift temp with/into carry
968: ds rmndr,arg1,rmndr ; 2nd divide step
969: addc temp,temp,temp ; shift temp with/into carry
970: ds rmndr,arg1,rmndr ; 3rd divide step
971: addc temp,temp,temp ; shift temp with/into carry
972: ds rmndr,arg1,rmndr ; 4th divide step
973: addc temp,temp,temp ; shift temp with/into carry
974: ds rmndr,arg1,rmndr ; 5th divide step
975: addc temp,temp,temp ; shift temp with/into carry
976: ds rmndr,arg1,rmndr ; 6th divide step
977: addc temp,temp,temp ; shift temp with/into carry
978: ds rmndr,arg1,rmndr ; 7th divide step
979: addc temp,temp,temp ; shift temp with/into carry
980: ds rmndr,arg1,rmndr ; 8th divide step
981: addc temp,temp,temp ; shift temp with/into carry
982: ds rmndr,arg1,rmndr ; 9th divide step
983: addc temp,temp,temp ; shift temp with/into carry
984: ds rmndr,arg1,rmndr ; 10th divide step
985: addc temp,temp,temp ; shift temp with/into carry
986: ds rmndr,arg1,rmndr ; 11th divide step
987: addc temp,temp,temp ; shift temp with/into carry
988: ds rmndr,arg1,rmndr ; 12th divide step
989: addc temp,temp,temp ; shift temp with/into carry
990: ds rmndr,arg1,rmndr ; 13th divide step
991: addc temp,temp,temp ; shift temp with/into carry
992: ds rmndr,arg1,rmndr ; 14th divide step
993: addc temp,temp,temp ; shift temp with/into carry
994: ds rmndr,arg1,rmndr ; 15th divide step
995: addc temp,temp,temp ; shift temp with/into carry
996: ds rmndr,arg1,rmndr ; 16th divide step
997: addc temp,temp,temp ; shift temp with/into carry
998: ds rmndr,arg1,rmndr ; 17th divide step
999: addc temp,temp,temp ; shift temp with/into carry
1000: ds rmndr,arg1,rmndr ; 18th divide step
1001: addc temp,temp,temp ; shift temp with/into carry
1002: ds rmndr,arg1,rmndr ; 19th divide step
1003: addc temp,temp,temp ; shift temp with/into carry
1004: ds rmndr,arg1,rmndr ; 20th divide step
1005: addc temp,temp,temp ; shift temp with/into carry
1006: ds rmndr,arg1,rmndr ; 21st divide step
1007: addc temp,temp,temp ; shift temp with/into carry
1008: ds rmndr,arg1,rmndr ; 22nd divide step
1009: addc temp,temp,temp ; shift temp with/into carry
1010: ds rmndr,arg1,rmndr ; 23rd divide step
1011: addc temp,temp,temp ; shift temp with/into carry
1012: ds rmndr,arg1,rmndr ; 24th divide step
1013: addc temp,temp,temp ; shift temp with/into carry
1014: ds rmndr,arg1,rmndr ; 25th divide step
1015: addc temp,temp,temp ; shift temp with/into carry
1016: ds rmndr,arg1,rmndr ; 26th divide step
1017: addc temp,temp,temp ; shift temp with/into carry
1018: ds rmndr,arg1,rmndr ; 27th divide step
1019: addc temp,temp,temp ; shift temp with/into carry
1020: ds rmndr,arg1,rmndr ; 28th divide step
1021: addc temp,temp,temp ; shift temp with/into carry
1022: ds rmndr,arg1,rmndr ; 29th divide step
1023: addc temp,temp,temp ; shift temp with/into carry
1024: ds rmndr,arg1,rmndr ; 30th divide step
1025: addc temp,temp,temp ; shift temp with/into carry
1026: ds rmndr,arg1,rmndr ; 31st divide step
1027: addc temp,temp,temp ; shift temp with/into carry
1028: ds rmndr,arg1,rmndr ; 32nd divide step,
1029: comiclr,<= 0,rmndr,r0
1030: add rmndr,arg1,rmndr ; correction
1031: ; .exit
1032: bv,n 0(r31)
1033: nop
1034: ; Putting >= on the last DS and deleting COMICLR does not work!
1035: ;_____________________________________________________________________________
1036: special_case:
1037: addit,= 0,arg1,r0 ; trap on div by zero
1038: sub,>>= arg0,arg1,rmndr
1039: copy arg0,rmndr
1040: .exit
1041: bv,n 0(r31)
1042: nop
1043: .procend
1044: .end
1045:
1046: ; Use bv 0(r31) and bv,n 0(r31) instead.
1047: ; #define return bv 0(%mrp)
1048: ; #define return_n bv,n 0(%mrp)
1049:
1050: .align 16
1051: $$mulI:
1052:
1053: .proc
1054: .callinfo NO_CALLS
1055: .export $$mulI, millicode
1056: combt,<<= %r25,%r26,l4 ; swap args if unsigned %r25>%r26
1057: copy 0,%r29 ; zero out the result
1058: xor %r26,%r25,%r26 ; swap %r26 & %r25 using the
1059: xor %r26,%r25,%r25 ; old xor trick
1060: xor %r26,%r25,%r26
1061: l4: combt,<= 0,%r26,l3 ; if %r26>=0 then proceed like unsigned
1062:
1063: zdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********
1064: sub,> 0,%r25,%r1 ; otherwise negate both and
1065: combt,<=,n %r26,%r1,l2 ; swap back if |%r26|<|%r25|
1066: sub 0,%r26,%r25
1067: movb,tr,n %r1,%r26,l2 ; 10th inst.
1068:
1069: l0: add %r29,%r1,%r29 ; add in this partial product
1070:
1071: l1: zdep %r26,23,24,%r26 ; %r26 <<= 8 ******************
1072:
1073: l2: zdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********
1074:
1075: l3: blr %r1,0 ; case on these 8 bits ******
1076:
1077: extru %r25,23,24,%r25 ; %r25 >>= 8 ******************
1078:
1079: ;16 insts before this.
1080: ; %r26 <<= 8 **************************
1081: x0: comb,<> %r25,0,l2 ! zdep %r26,23,24,%r26 ! bv,n 0(r31) ! nop
1082:
1083: x1: comb,<> %r25,0,l1 ! add %r29,%r26,%r29 ! bv,n 0(r31) ! nop
1084:
1085: x2: comb,<> %r25,0,l1 ! sh1add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
1086:
1087: x3: comb,<> %r25,0,l0 ! sh1add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
1088:
1089: x4: comb,<> %r25,0,l1 ! sh2add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
1090:
1091: x5: comb,<> %r25,0,l0 ! sh2add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
1092:
1093: x6: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
1094:
1095: x7: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r26,%r29,%r29 ! b,n ret_t0
1096:
1097: x8: comb,<> %r25,0,l1 ! sh3add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
1098:
1099: x9: comb,<> %r25,0,l0 ! sh3add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
1100:
1101: x10: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
1102:
1103: x11: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0
1104:
1105: x12: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
1106:
1107: x13: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0
1108:
1109: x14: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1110:
1111: x15: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r1,%r1 ! b,n ret_t0
1112:
1113: x16: zdep %r26,27,28,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
1114:
1115: x17: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r1,%r1 ! b,n ret_t0
1116:
1117: x18: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
1118:
1119: x19: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r26,%r1 ! b,n ret_t0
1120:
1121: x20: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
1122:
1123: x21: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
1124:
1125: x22: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1126:
1127: x23: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1128:
1129: x24: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
1130:
1131: x25: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0
1132:
1133: x26: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1134:
1135: x27: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r1,%r1 ! b,n ret_t0
1136:
1137: x28: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1138:
1139: x29: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1140:
1141: x30: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1142:
1143: x31: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
1144:
1145: x32: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
1146:
1147: x33: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
1148:
1149: x34: zdep %r26,27,28,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1150:
1151: x35: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r26,%r1,%r1
1152:
1153: x36: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
1154:
1155: x37: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
1156:
1157: x38: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1158:
1159: x39: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1160:
1161: x40: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
1162:
1163: x41: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0
1164:
1165: x42: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1166:
1167: x43: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1168:
1169: x44: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1170:
1171: x45: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0
1172:
1173: x46: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! add %r1,%r26,%r1
1174:
1175: x47: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
1176:
1177: x48: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! zdep %r1,27,28,%r1 ! b,n ret_t0
1178:
1179: x49: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r26,%r1,%r1
1180:
1181: x50: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1182:
1183: x51: sh3add %r26,%r26,%r1 ! sh3add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1184:
1185: x52: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1186:
1187: x53: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1188:
1189: x54: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1190:
1191: x55: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1192:
1193: x56: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1194:
1195: x57: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1196:
1197: x58: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1198:
1199: x59: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
1200:
1201: x60: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1202:
1203: x61: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1204:
1205: x62: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1206:
1207: x63: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
1208:
1209: x64: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
1210:
1211: x65: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0
1212:
1213: x66: zdep %r26,26,27,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1214:
1215: x67: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1216:
1217: x68: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1218:
1219: x69: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1220:
1221: x70: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
1222:
1223: x71: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1224:
1225: x72: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
1226:
1227: x73: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! add %r29,%r1,%r29
1228:
1229: x74: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1230:
1231: x75: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1232:
1233: x76: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1234:
1235: x77: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1236:
1237: x78: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1238:
1239: x79: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1240:
1241: x80: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29
1242:
1243: x81: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29
1244:
1245: x82: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1246:
1247: x83: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1248:
1249: x84: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1250:
1251: x85: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1252:
1253: x86: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1254:
1255: x87: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r26,%r1,%r1
1256:
1257: x88: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1258:
1259: x89: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1260:
1261: x90: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1262:
1263: x91: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1264:
1265: x92: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1266:
1267: x93: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1268:
1269: x94: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r26,%r1,%r1
1270:
1271: x95: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1272:
1273: x96: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1274:
1275: x97: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1276:
1277: x98: zdep %r26,26,27,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
1278:
1279: x99: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1280:
1281: x100: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1282:
1283: x101: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1284:
1285: x102: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1286:
1287: x103: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r26,%r1
1288:
1289: x104: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1290:
1291: x105: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1292:
1293: x106: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1294:
1295: x107: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh3add %r1,%r26,%r1
1296:
1297: x108: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1298:
1299: x109: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1300:
1301: x110: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1302:
1303: x111: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1304:
1305: x112: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! zdep %r1,27,28,%r1
1306:
1307: x113: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
1308:
1309: x114: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
1310:
1311: x115: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
1312:
1313: x116: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
1314:
1315: x117: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
1316:
1317: x118: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0a0 ! sh3add %r1,%r1,%r1
1318:
1319: x119: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1
1320:
1321: x120: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1322:
1323: x121: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1324:
1325: x122: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1326:
1327: x123: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1328:
1329: x124: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1330:
1331: x125: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1332:
1333: x126: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1334:
1335: x127: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
1336:
1337: x128: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
1338:
1339: x129: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! add %r1,%r26,%r1 ! b,n ret_t0
1340:
1341: x130: zdep %r26,25,26,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1342:
1343: x131: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1344:
1345: x132: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1346:
1347: x133: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1348:
1349: x134: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1350:
1351: x135: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1352:
1353: x136: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1354:
1355: x137: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1356:
1357: x138: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1358:
1359: x139: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1
1360:
1361: x140: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r1,%r1
1362:
1363: x141: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
1364:
1365: x142: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_2t0 ! sub %r1,%r26,%r1
1366:
1367: x143: zdep %r26,27,28,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1368:
1369: x144: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1370:
1371: x145: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1372:
1373: x146: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1374:
1375: x147: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1376:
1377: x148: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1378:
1379: x149: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1380:
1381: x150: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1382:
1383: x151: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
1384:
1385: x152: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1386:
1387: x153: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1388:
1389: x154: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1390:
1391: x155: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1392:
1393: x156: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1394:
1395: x157: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
1396:
1397: x158: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sub %r1,%r26,%r1
1398:
1399: x159: zdep %r26,26,27,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1400:
1401: x160: sh2add %r26,%r26,%r1 ! sh2add %r1,0,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1402:
1403: x161: sh3add %r26,0,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1404:
1405: x162: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1406:
1407: x163: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1408:
1409: x164: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1410:
1411: x165: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1412:
1413: x166: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1414:
1415: x167: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
1416:
1417: x168: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1418:
1419: x169: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1420:
1421: x170: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1422:
1423: x171: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
1424:
1425: x172: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1426:
1427: x173: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1
1428:
1429: x174: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t04a0 ! sh2add %r1,%r1,%r1
1430:
1431: x175: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
1432:
1433: x176: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0 ! add %r1,%r26,%r1
1434:
1435: x177: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1
1436:
1437: x178: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1
1438:
1439: x179: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r26,%r1
1440:
1441: x180: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1442:
1443: x181: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1444:
1445: x182: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1446:
1447: x183: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
1448:
1449: x184: sh2add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! add %r1,%r26,%r1
1450:
1451: x185: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1452:
1453: x186: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
1454:
1455: x187: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
1456:
1457: x188: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r26,%r1,%r1
1458:
1459: x189: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
1460:
1461: x190: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
1462:
1463: x191: zdep %r26,25,26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1464:
1465: x192: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1466:
1467: x193: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1468:
1469: x194: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1470:
1471: x195: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1472:
1473: x196: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1474:
1475: x197: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
1476:
1477: x198: zdep %r26,25,26,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1478:
1479: x199: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
1480:
1481: x200: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1482:
1483: x201: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1484:
1485: x202: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1486:
1487: x203: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1
1488:
1489: x204: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1
1490:
1491: x205: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1492:
1493: x206: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
1494:
1495: x207: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1
1496:
1497: x208: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1
1498:
1499: x209: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1
1500:
1501: x210: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
1502:
1503: x211: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1
1504:
1505: x212: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
1506:
1507: x213: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0a0 ! sh2add %r1,%r26,%r1
1508:
1509: x214: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e2t04a0 ! sh3add %r1,%r26,%r1
1510:
1511: x215: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
1512:
1513: x216: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1514:
1515: x217: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1516:
1517: x218: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1518:
1519: x219: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1520:
1521: x220: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1522:
1523: x221: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
1524:
1525: x222: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
1526:
1527: x223: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
1528:
1529: x224: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1
1530:
1531: x225: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1532:
1533: x226: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! zdep %r1,26,27,%r1
1534:
1535: x227: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
1536:
1537: x228: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1
1538:
1539: x229: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r1,%r1
1540:
1541: x230: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_5t0 ! add %r1,%r26,%r1
1542:
1543: x231: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1
1544:
1545: x232: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0 ! sh2add %r1,%r26,%r1
1546:
1547: x233: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0a0 ! sh2add %r1,%r26,%r1
1548:
1549: x234: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r1,%r1
1550:
1551: x235: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r1,%r1
1552:
1553: x236: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e4t08a0 ! sh1add %r1,%r1,%r1
1554:
1555: x237: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_3t0 ! sub %r1,%r26,%r1
1556:
1557: x238: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e2t04a0 ! sh3add %r1,%r1,%r1
1558:
1559: x239: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0ma0 ! sh1add %r1,%r1,%r1
1560:
1561: x240: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0 ! sh1add %r1,%r1,%r1
1562:
1563: x241: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0a0 ! sh1add %r1,%r1,%r1
1564:
1565: x242: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1
1566:
1567: x243: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1568:
1569: x244: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
1570:
1571: x245: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
1572:
1573: x246: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
1574:
1575: x247: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
1576:
1577: x248: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1578:
1579: x249: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1580:
1581: x250: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
1582:
1583: x251: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1
1584:
1585: x252: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1586:
1587: x253: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1588:
1589: x254: zdep %r26,24,25,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1590:
1591: x255: zdep %r26,23,24,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
1592:
1593: ;1040 insts before this.
1594: ret_t0: bv 0(r31)
1595:
1596: e_t0: add %r29,%r1,%r29
1597:
1598: e_shift: comb,<> %r25,0,l2
1599:
1600: zdep %r26,23,24,%r26 ; %r26 <<= 8 ***********
1601: bv,n 0(r31)
1602: e_t0ma0: comb,<> %r25,0,l0
1603:
1604: sub %r1,%r26,%r1
1605: bv 0(r31)
1606: add %r29,%r1,%r29
1607: e_t0a0: comb,<> %r25,0,l0
1608:
1609: add %r1,%r26,%r1
1610: bv 0(r31)
1611: add %r29,%r1,%r29
1612: e_t02a0: comb,<> %r25,0,l0
1613:
1614: sh1add %r26,%r1,%r1
1615: bv 0(r31)
1616: add %r29,%r1,%r29
1617: e_t04a0: comb,<> %r25,0,l0
1618:
1619: sh2add %r26,%r1,%r1
1620: bv 0(r31)
1621: add %r29,%r1,%r29
1622: e_2t0: comb,<> %r25,0,l1
1623:
1624: sh1add %r1,%r29,%r29
1625: bv,n 0(r31)
1626: e_2t0a0: comb,<> %r25,0,l0
1627:
1628: sh1add %r1,%r26,%r1
1629: bv 0(r31)
1630: add %r29,%r1,%r29
1631: e2t04a0: sh1add %r26,%r1,%r1
1632:
1633: comb,<> %r25,0,l1
1634: sh1add %r1,%r29,%r29
1635: bv,n 0(r31)
1636: e_3t0: comb,<> %r25,0,l0
1637:
1638: sh1add %r1,%r1,%r1
1639: bv 0(r31)
1640: add %r29,%r1,%r29
1641: e_4t0: comb,<> %r25,0,l1
1642:
1643: sh2add %r1,%r29,%r29
1644: bv,n 0(r31)
1645: e_4t0a0: comb,<> %r25,0,l0
1646:
1647: sh2add %r1,%r26,%r1
1648: bv 0(r31)
1649: add %r29,%r1,%r29
1650: e4t08a0: sh1add %r26,%r1,%r1
1651:
1652: comb,<> %r25,0,l1
1653: sh2add %r1,%r29,%r29
1654: bv,n 0(r31)
1655: e_5t0: comb,<> %r25,0,l0
1656:
1657: sh2add %r1,%r1,%r1
1658: bv 0(r31)
1659: add %r29,%r1,%r29
1660: e_8t0: comb,<> %r25,0,l1
1661:
1662: sh3add %r1,%r29,%r29
1663: bv,n 0(r31)
1664: e_8t0a0: comb,<> %r25,0,l0
1665:
1666: sh3add %r1,%r26,%r1
1667: bv 0(r31)
1668: add %r29,%r1,%r29
1669:
1670: .procend
1671: .end
1672:
1673: .import $$divI_2,millicode
1674: .import $$divI_3,millicode
1675: .import $$divI_4,millicode
1676: .import $$divI_5,millicode
1677: .import $$divI_6,millicode
1678: .import $$divI_7,millicode
1679: .import $$divI_8,millicode
1680: .import $$divI_9,millicode
1681: .import $$divI_10,millicode
1682: .import $$divI_12,millicode
1683: .import $$divI_14,millicode
1684: .import $$divI_15,millicode
1685: .export $$divI,millicode
1686: .export $$divoI,millicode
1687: $$divoI:
1688: .proc
1689: .callinfo NO_CALLS
1690: comib,=,n -1,arg1,negative1 ; when divisor == -1
1691: $$divI:
1692: comib,>>=,n 15,arg1,small_divisor
1693: add,>= 0,arg0,retreg ; move dividend, if retreg < 0,
1694: normal1:
1695: sub 0,retreg,retreg ; make it positive
1696: sub 0,arg1,temp ; clear carry,
1697: ; negate the divisor
1698: ds 0,temp,0 ; set V-bit to the comple-
1699: ; ment of the divisor sign
1700: add retreg,retreg,retreg ; shift msb bit into carry
1701: ds r0,arg1,temp ; 1st divide step, if no carry
1702: addc retreg,retreg,retreg ; shift retreg with/into carry
1703: ds temp,arg1,temp ; 2nd divide step
1704: addc retreg,retreg,retreg ; shift retreg with/into carry
1705: ds temp,arg1,temp ; 3rd divide step
1706: addc retreg,retreg,retreg ; shift retreg with/into carry
1707: ds temp,arg1,temp ; 4th divide step
1708: addc retreg,retreg,retreg ; shift retreg with/into carry
1709: ds temp,arg1,temp ; 5th divide step
1710: addc retreg,retreg,retreg ; shift retreg with/into carry
1711: ds temp,arg1,temp ; 6th divide step
1712: addc retreg,retreg,retreg ; shift retreg with/into carry
1713: ds temp,arg1,temp ; 7th divide step
1714: addc retreg,retreg,retreg ; shift retreg with/into carry
1715: ds temp,arg1,temp ; 8th divide step
1716: addc retreg,retreg,retreg ; shift retreg with/into carry
1717: ds temp,arg1,temp ; 9th divide step
1718: addc retreg,retreg,retreg ; shift retreg with/into carry
1719: ds temp,arg1,temp ; 10th divide step
1720: addc retreg,retreg,retreg ; shift retreg with/into carry
1721: ds temp,arg1,temp ; 11th divide step
1722: addc retreg,retreg,retreg ; shift retreg with/into carry
1723: ds temp,arg1,temp ; 12th divide step
1724: addc retreg,retreg,retreg ; shift retreg with/into carry
1725: ds temp,arg1,temp ; 13th divide step
1726: addc retreg,retreg,retreg ; shift retreg with/into carry
1727: ds temp,arg1,temp ; 14th divide step
1728: addc retreg,retreg,retreg ; shift retreg with/into carry
1729: ds temp,arg1,temp ; 15th divide step
1730: addc retreg,retreg,retreg ; shift retreg with/into carry
1731: ds temp,arg1,temp ; 16th divide step
1732: addc retreg,retreg,retreg ; shift retreg with/into carry
1733: ds temp,arg1,temp ; 17th divide step
1734: addc retreg,retreg,retreg ; shift retreg with/into carry
1735: ds temp,arg1,temp ; 18th divide step
1736: addc retreg,retreg,retreg ; shift retreg with/into carry
1737: ds temp,arg1,temp ; 19th divide step
1738: addc retreg,retreg,retreg ; shift retreg with/into carry
1739: ds temp,arg1,temp ; 20th divide step
1740: addc retreg,retreg,retreg ; shift retreg with/into carry
1741: ds temp,arg1,temp ; 21st divide step
1742: addc retreg,retreg,retreg ; shift retreg with/into carry
1743: ds temp,arg1,temp ; 22nd divide step
1744: addc retreg,retreg,retreg ; shift retreg with/into carry
1745: ds temp,arg1,temp ; 23rd divide step
1746: addc retreg,retreg,retreg ; shift retreg with/into carry
1747: ds temp,arg1,temp ; 24th divide step
1748: addc retreg,retreg,retreg ; shift retreg with/into carry
1749: ds temp,arg1,temp ; 25th divide step
1750: addc retreg,retreg,retreg ; shift retreg with/into carry
1751: ds temp,arg1,temp ; 26th divide step
1752: addc retreg,retreg,retreg ; shift retreg with/into carry
1753: ds temp,arg1,temp ; 27th divide step
1754: addc retreg,retreg,retreg ; shift retreg with/into carry
1755: ds temp,arg1,temp ; 28th divide step
1756: addc retreg,retreg,retreg ; shift retreg with/into carry
1757: ds temp,arg1,temp ; 29th divide step
1758: addc retreg,retreg,retreg ; shift retreg with/into carry
1759: ds temp,arg1,temp ; 30th divide step
1760: addc retreg,retreg,retreg ; shift retreg with/into carry
1761: ds temp,arg1,temp ; 31st divide step
1762: addc retreg,retreg,retreg ; shift retreg with/into carry
1763: ds temp,arg1,temp ; 32nd divide step,
1764: addc retreg,retreg,retreg ; shift last retreg bit into retreg
1765: xor,>= arg0,arg1,0 ; get correct sign of quotient
1766: sub 0,retreg,retreg ; based on operand signs
1767: bv,n 0(r31)
1768: nop
1769: ;______________________________________________________________________
1770: small_divisor:
1771: blr,n arg1,r0
1772: nop
1773: ; table for divisor == 0,1, ... ,15
1774: addit,= 0,arg1,r0 ; trap if divisor == 0
1775: nop
1776: bv 0(r31) ; divisor == 1
1777: copy arg0,retreg
1778: b,n $$divI_2 ; divisor == 2
1779: nop
1780: b,n $$divI_3 ; divisor == 3
1781: nop
1782: b,n $$divI_4 ; divisor == 4
1783: nop
1784: b,n $$divI_5 ; divisor == 5
1785: nop
1786: b,n $$divI_6 ; divisor == 6
1787: nop
1788: b,n $$divI_7 ; divisor == 7
1789: nop
1790: b,n $$divI_8 ; divisor == 8
1791: nop
1792: b,n $$divI_9 ; divisor == 9
1793: nop
1794: b,n $$divI_10 ; divisor == 10
1795: nop
1796: b normal1 ; divisor == 11
1797: add,>= 0,arg0,retreg
1798: b,n $$divI_12 ; divisor == 12
1799: nop
1800: b normal1 ; divisor == 13
1801: add,>= 0,arg0,retreg
1802: b,n $$divI_14 ; divisor == 14
1803: nop
1804: b,n $$divI_15 ; divisor == 15
1805: nop
1806: ;______________________________________________________________________
1807: negative1:
1808: sub 0,arg0,retreg ; result is negation of dividend
1809: bv 0(r31)
1810: addo arg0,arg1,r0 ; trap iff dividend==0x80000000 && divisor==-1
1811: .procend
1812: .end
CVSweb