diff options
author | Werner Koch <wk@gnupg.org> | 1998-01-12 11:18:17 +0100 |
---|---|---|
committer | Werner Koch <wk@gnupg.org> | 1998-01-12 11:18:17 +0100 |
commit | ed3609258828942808702a07ef2986d7328efa3f (patch) | |
tree | 185f17c055b38fee37bc0090789dcf96a1535c8c /mpi/sparc32 | |
parent | patchlevel 2 (diff) | |
download | gnupg2-ed3609258828942808702a07ef2986d7328efa3f.tar.xz gnupg2-ed3609258828942808702a07ef2986d7328efa3f.zip |
started with trust stuff
Diffstat (limited to 'mpi/sparc32')
-rw-r--r-- | mpi/sparc32/distfiles | 4 | ||||
-rw-r--r-- | mpi/sparc32/mpih-add1.S | 237 | ||||
-rw-r--r-- | mpi/sparc32/udiv.S | 188 |
3 files changed, 429 insertions, 0 deletions
diff --git a/mpi/sparc32/distfiles b/mpi/sparc32/distfiles new file mode 100644 index 000000000..7933edc8f --- /dev/null +++ b/mpi/sparc32/distfiles @@ -0,0 +1,4 @@ + +mpih-add1.S +udiv.S + diff --git a/mpi/sparc32/mpih-add1.S b/mpi/sparc32/mpih-add1.S new file mode 100644 index 000000000..04315d106 --- /dev/null +++ b/mpi/sparc32/mpih-add1.S @@ -0,0 +1,237 @@ +! SPARC __mpn_add_n -- Add two limb vectors of the same length > 0 and store +! sum in a third limb vector. + +! Copyright (C) 1995, 1996 Free Software Foundation, Inc. + +! This file is part of the GNU MP Library. + +! The GNU MP Library is free software; you can redistribute it and/or modify +! it under the terms of the GNU Library General Public License as published by +! the Free Software Foundation; either version 2 of the License, or (at your +! option) any later version. + +! The GNU MP Library is distributed in the hope that it will be useful, but +! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +! or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public +! License for more details. + +! You should have received a copy of the GNU Library General Public License +! along with the GNU MP Library; see the file COPYING.LIB. If not, write to +! the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, +! MA 02111-1307, USA. + + +/******************* + * mpi_limb_t + * mpihelp_add_n( mpi_ptr_t res_ptr, + * mpi_ptr_t s1_ptr, + * mpi_ptr_t s2_ptr, + * mpi_size_t size) + */ + +! INPUT PARAMETERS +#define res_ptr %o0 +#define s1_ptr %o1 +#define s2_ptr %o2 +#define size %o3 + +#include "sysdep.h" + + .text + .align 4 + .global C_SYMBOL_NAME(mpihelp_add_n) +C_SYMBOL_NAME(mpihelp_add_n): + xor s2_ptr,res_ptr,%g1 + andcc %g1,4,%g0 + bne L1 ! branch if alignment differs + nop +! ** V1a ** +L0: andcc res_ptr,4,%g0 ! res_ptr unaligned? Side effect: cy=0 + be L_v1 ! if no, branch + nop +/* Add least significant limb separately to align res_ptr and s2_ptr */ + ld [s1_ptr],%g4 + add s1_ptr,4,s1_ptr + ld [s2_ptr],%g2 + add s2_ptr,4,s2_ptr + add size,-1,size + addcc %g4,%g2,%o4 + st %o4,[res_ptr] + add res_ptr,4,res_ptr +L_v1: addx %g0,%g0,%o4 ! save cy in register + cmp size,2 ! if size < 2 ... + bl Lend2 ! ... branch to tail code + subcc %g0,%o4,%g0 ! restore cy + + ld [s1_ptr+0],%g4 + addcc size,-10,size + ld [s1_ptr+4],%g1 + ldd [s2_ptr+0],%g2 + blt Lfin1 + subcc %g0,%o4,%g0 ! restore cy +/* Add blocks of 8 limbs until less than 8 limbs remain */ +Loop1: addxcc %g4,%g2,%o4 + ld [s1_ptr+8],%g4 + addxcc %g1,%g3,%o5 + ld [s1_ptr+12],%g1 + ldd [s2_ptr+8],%g2 + std %o4,[res_ptr+0] + addxcc %g4,%g2,%o4 + ld [s1_ptr+16],%g4 + addxcc %g1,%g3,%o5 + ld [s1_ptr+20],%g1 + ldd [s2_ptr+16],%g2 + std %o4,[res_ptr+8] + addxcc %g4,%g2,%o4 + ld [s1_ptr+24],%g4 + addxcc %g1,%g3,%o5 + ld [s1_ptr+28],%g1 + ldd [s2_ptr+24],%g2 + std %o4,[res_ptr+16] + addxcc %g4,%g2,%o4 + ld [s1_ptr+32],%g4 + addxcc %g1,%g3,%o5 + ld [s1_ptr+36],%g1 + ldd [s2_ptr+32],%g2 + std %o4,[res_ptr+24] + addx %g0,%g0,%o4 ! save cy in register + addcc size,-8,size + add s1_ptr,32,s1_ptr + add s2_ptr,32,s2_ptr + add res_ptr,32,res_ptr + bge Loop1 + subcc %g0,%o4,%g0 ! restore cy + +Lfin1: addcc size,8-2,size + blt Lend1 + subcc %g0,%o4,%g0 ! restore cy +/* Add blocks of 2 limbs until less than 2 limbs remain */ +Loope1: addxcc %g4,%g2,%o4 + ld [s1_ptr+8],%g4 + addxcc %g1,%g3,%o5 + ld [s1_ptr+12],%g1 + ldd [s2_ptr+8],%g2 + std %o4,[res_ptr+0] + addx %g0,%g0,%o4 ! save cy in register + addcc size,-2,size + add s1_ptr,8,s1_ptr + add s2_ptr,8,s2_ptr + add res_ptr,8,res_ptr + bge Loope1 + subcc %g0,%o4,%g0 ! restore cy +Lend1: addxcc %g4,%g2,%o4 + addxcc %g1,%g3,%o5 + std %o4,[res_ptr+0] + addx %g0,%g0,%o4 ! save cy in register + + andcc size,1,%g0 + be Lret1 + subcc %g0,%o4,%g0 ! restore cy +/* Add last limb */ + ld [s1_ptr+8],%g4 + ld [s2_ptr+8],%g2 + addxcc %g4,%g2,%o4 + st %o4,[res_ptr+8] + +Lret1: retl + addx %g0,%g0,%o0 ! return carry-out from most sign. limb + +L1: xor s1_ptr,res_ptr,%g1 + andcc %g1,4,%g0 + bne L2 + nop +! ** V1b ** + mov s2_ptr,%g1 + mov s1_ptr,s2_ptr + b L0 + mov %g1,s1_ptr + +! ** V2 ** +/* If we come here, the alignment of s1_ptr and res_ptr as well as the + alignment of s2_ptr and res_ptr differ. Since there are only two ways + things can be aligned (that we care about) we now know that the alignment + of s1_ptr and s2_ptr are the same. */ + +L2: cmp size,1 + be Ljone + nop + andcc s1_ptr,4,%g0 ! s1_ptr unaligned? Side effect: cy=0 + be L_v2 ! if no, branch + nop +/* Add least significant limb separately to align s1_ptr and s2_ptr */ + ld [s1_ptr],%g4 + add s1_ptr,4,s1_ptr + ld [s2_ptr],%g2 + add s2_ptr,4,s2_ptr + add size,-1,size + addcc %g4,%g2,%o4 + st %o4,[res_ptr] + add res_ptr,4,res_ptr + +L_v2: addx %g0,%g0,%o4 ! save cy in register + addcc size,-8,size + blt Lfin2 + subcc %g0,%o4,%g0 ! restore cy +/* Add blocks of 8 limbs until less than 8 limbs remain */ +Loop2: ldd [s1_ptr+0],%g2 + ldd [s2_ptr+0],%o4 + addxcc %g2,%o4,%g2 + st %g2,[res_ptr+0] + addxcc %g3,%o5,%g3 + st %g3,[res_ptr+4] + ldd [s1_ptr+8],%g2 + ldd [s2_ptr+8],%o4 + addxcc %g2,%o4,%g2 + st %g2,[res_ptr+8] + addxcc %g3,%o5,%g3 + st %g3,[res_ptr+12] + ldd [s1_ptr+16],%g2 + ldd [s2_ptr+16],%o4 + addxcc %g2,%o4,%g2 + st %g2,[res_ptr+16] + addxcc %g3,%o5,%g3 + st %g3,[res_ptr+20] + ldd [s1_ptr+24],%g2 + ldd [s2_ptr+24],%o4 + addxcc %g2,%o4,%g2 + st %g2,[res_ptr+24] + addxcc %g3,%o5,%g3 + st %g3,[res_ptr+28] + addx %g0,%g0,%o4 ! save cy in register + addcc size,-8,size + add s1_ptr,32,s1_ptr + add s2_ptr,32,s2_ptr + add res_ptr,32,res_ptr + bge Loop2 + subcc %g0,%o4,%g0 ! restore cy + +Lfin2: addcc size,8-2,size + blt Lend2 + subcc %g0,%o4,%g0 ! restore cy +Loope2: ldd [s1_ptr+0],%g2 + ldd [s2_ptr+0],%o4 + addxcc %g2,%o4,%g2 + st %g2,[res_ptr+0] + addxcc %g3,%o5,%g3 + st %g3,[res_ptr+4] + addx %g0,%g0,%o4 ! save cy in register + addcc size,-2,size + add s1_ptr,8,s1_ptr + add s2_ptr,8,s2_ptr + add res_ptr,8,res_ptr + bge Loope2 + subcc %g0,%o4,%g0 ! restore cy +Lend2: andcc size,1,%g0 + be Lret2 + subcc %g0,%o4,%g0 ! restore cy +/* Add last limb */ +Ljone: ld [s1_ptr],%g4 + ld [s2_ptr],%g2 + addxcc %g4,%g2,%o4 + st %o4,[res_ptr] + +Lret2: retl + addx %g0,%g0,%o0 ! return carry-out from most sign. limb + + + diff --git a/mpi/sparc32/udiv.S b/mpi/sparc32/udiv.S new file mode 100644 index 000000000..3e2376cfc --- /dev/null +++ b/mpi/sparc32/udiv.S @@ -0,0 +1,188 @@ +! SPARC v7 __udiv_qrnnd division support, used from longlong.h. +! This is for v7 CPUs without a floating-point unit. + +! Copyright (C) 1993, 1994, 1996 Free Software Foundation, Inc. + +! This file is part of the GNU MP Library. + +! The GNU MP Library is free software; you can redistribute it and/or modify +! it under the terms of the GNU Library General Public License as published by +! the Free Software Foundation; either version 2 of the License, or (at your +! option) any later version. + +! The GNU MP Library is distributed in the hope that it will be useful, but +! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +! or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public +! License for more details. + +! You should have received a copy of the GNU Library General Public License +! along with the GNU MP Library; see the file COPYING.LIB. If not, write to +! the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, +! MA 02111-1307, USA. + + +! INPUT PARAMETERS +! rem_ptr o0 +! n1 o1 +! n0 o2 +! d o3 + +#include "sysdep.h" + + .text + .align 4 + .global C_SYMBOL_NAME(__udiv_qrnnd) +C_SYMBOL_NAME(__udiv_qrnnd): + tst %o3 + bneg Largedivisor + mov 8,%g1 + + b Lp1 + addxcc %o2,%o2,%o2 + +Lplop: bcc Ln1 + addxcc %o2,%o2,%o2 +Lp1: addx %o1,%o1,%o1 + subcc %o1,%o3,%o4 + bcc Ln2 + addxcc %o2,%o2,%o2 +Lp2: addx %o1,%o1,%o1 + subcc %o1,%o3,%o4 + bcc Ln3 + addxcc %o2,%o2,%o2 +Lp3: addx %o1,%o1,%o1 + subcc %o1,%o3,%o4 + bcc Ln4 + addxcc %o2,%o2,%o2 +Lp4: addx %o1,%o1,%o1 + addcc %g1,-1,%g1 + bne Lplop + subcc %o1,%o3,%o4 + bcc Ln5 + addxcc %o2,%o2,%o2 +Lp5: st %o1,[%o0] + retl + xnor %g0,%o2,%o0 + +Lnlop: bcc Lp1 + addxcc %o2,%o2,%o2 +Ln1: addx %o4,%o4,%o4 + subcc %o4,%o3,%o1 + bcc Lp2 + addxcc %o2,%o2,%o2 +Ln2: addx %o4,%o4,%o4 + subcc %o4,%o3,%o1 + bcc Lp3 + addxcc %o2,%o2,%o2 +Ln3: addx %o4,%o4,%o4 + subcc %o4,%o3,%o1 + bcc Lp4 + addxcc %o2,%o2,%o2 +Ln4: addx %o4,%o4,%o4 + addcc %g1,-1,%g1 + bne Lnlop + subcc %o4,%o3,%o1 + bcc Lp5 + addxcc %o2,%o2,%o2 +Ln5: st %o4,[%o0] + retl + xnor %g0,%o2,%o0 + +Largedivisor: + and %o2,1,%o5 ! %o5 = n0 & 1 + + srl %o2,1,%o2 + sll %o1,31,%g2 + or %g2,%o2,%o2 ! %o2 = lo(n1n0 >> 1) + srl %o1,1,%o1 ! %o1 = hi(n1n0 >> 1) + + and %o3,1,%g2 + srl %o3,1,%g3 ! %g3 = floor(d / 2) + add %g3,%g2,%g3 ! %g3 = ceil(d / 2) + + b LLp1 + addxcc %o2,%o2,%o2 + +LLplop: bcc LLn1 + addxcc %o2,%o2,%o2 +LLp1: addx %o1,%o1,%o1 + subcc %o1,%g3,%o4 + bcc LLn2 + addxcc %o2,%o2,%o2 +LLp2: addx %o1,%o1,%o1 + subcc %o1,%g3,%o4 + bcc LLn3 + addxcc %o2,%o2,%o2 +LLp3: addx %o1,%o1,%o1 + subcc %o1,%g3,%o4 + bcc LLn4 + addxcc %o2,%o2,%o2 +LLp4: addx %o1,%o1,%o1 + addcc %g1,-1,%g1 + bne LLplop + subcc %o1,%g3,%o4 + bcc LLn5 + addxcc %o2,%o2,%o2 +LLp5: add %o1,%o1,%o1 ! << 1 + tst %g2 + bne Oddp + add %o5,%o1,%o1 + st %o1,[%o0] + retl + xnor %g0,%o2,%o0 + +LLnlop: bcc LLp1 + addxcc %o2,%o2,%o2 +LLn1: addx %o4,%o4,%o4 + subcc %o4,%g3,%o1 + bcc LLp2 + addxcc %o2,%o2,%o2 +LLn2: addx %o4,%o4,%o4 + subcc %o4,%g3,%o1 + bcc LLp3 + addxcc %o2,%o2,%o2 +LLn3: addx %o4,%o4,%o4 + subcc %o4,%g3,%o1 + bcc LLp4 + addxcc %o2,%o2,%o2 +LLn4: addx %o4,%o4,%o4 + addcc %g1,-1,%g1 + bne LLnlop + subcc %o4,%g3,%o1 + bcc LLp5 + addxcc %o2,%o2,%o2 +LLn5: add %o4,%o4,%o4 ! << 1 + tst %g2 + bne Oddn + add %o5,%o4,%o4 + st %o4,[%o0] + retl + xnor %g0,%o2,%o0 + +Oddp: xnor %g0,%o2,%o2 + ! q' in %o2. r' in %o1 + addcc %o1,%o2,%o1 + bcc LLp6 + addx %o2,0,%o2 + sub %o1,%o3,%o1 +LLp6: subcc %o1,%o3,%g0 + bcs LLp7 + subx %o2,-1,%o2 + sub %o1,%o3,%o1 +LLp7: st %o1,[%o0] + retl + mov %o2,%o0 + +Oddn: xnor %g0,%o2,%o2 + ! q' in %o2. r' in %o4 + addcc %o4,%o2,%o4 + bcc LLn6 + addx %o2,0,%o2 + sub %o4,%o3,%o4 +LLn6: subcc %o4,%o3,%g0 + bcs LLn7 + subx %o2,-1,%o2 + sub %o4,%o3,%o4 +LLn7: st %o4,[%o0] + retl + mov %o2,%o0 |