summaryrefslogtreecommitdiff
path: root/plugins/MirOTR/Libgcrypt/mpi/sparc32
diff options
context:
space:
mode:
Diffstat (limited to 'plugins/MirOTR/Libgcrypt/mpi/sparc32')
-rw-r--r--plugins/MirOTR/Libgcrypt/mpi/sparc32/Manifest24
-rw-r--r--plugins/MirOTR/Libgcrypt/mpi/sparc32/distfiles6
-rw-r--r--plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-add1.S239
-rw-r--r--plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-lshift.S97
-rw-r--r--plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-rshift.S93
-rw-r--r--plugins/MirOTR/Libgcrypt/mpi/sparc32/udiv.S195
6 files changed, 654 insertions, 0 deletions
diff --git a/plugins/MirOTR/Libgcrypt/mpi/sparc32/Manifest b/plugins/MirOTR/Libgcrypt/mpi/sparc32/Manifest
new file mode 100644
index 0000000000..d279229b09
--- /dev/null
+++ b/plugins/MirOTR/Libgcrypt/mpi/sparc32/Manifest
@@ -0,0 +1,24 @@
+# Manifest - checksums
+# Copyright 2003 Free Software Foundation, Inc.
+#
+# This file is part of Libgcrypt.
+#
+# Libgcrypt is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation; either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# Libgcrypt is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+mpih-lshift.S
+mpih-rshift.S
+mpih-add1.S
+udiv.S
+$names$ iQCVAwUAP+LmaDEAnp832S/7AQISHgP/Z5orU+CPKBeRFCogSQDm4p7J2VpDovU6mtfMTdjhqWuZG0U6y8WqH0aj3USfziOhtc8YjQHQ+97g3+EnIWZgLjKacWC6pScY/QbATEpF1D0Wrcea5rk3qR1t7isdBVVOrxedZ5vuj5Op2zx/0OlPI+wt6fTtW88BdG/a6w/ZU/8==Py6h
diff --git a/plugins/MirOTR/Libgcrypt/mpi/sparc32/distfiles b/plugins/MirOTR/Libgcrypt/mpi/sparc32/distfiles
new file mode 100644
index 0000000000..a20f18eade
--- /dev/null
+++ b/plugins/MirOTR/Libgcrypt/mpi/sparc32/distfiles
@@ -0,0 +1,6 @@
+Manifest
+mpih-lshift.S
+mpih-rshift.S
+mpih-add1.S
+udiv.S
+
diff --git a/plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-add1.S b/plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-add1.S
new file mode 100644
index 0000000000..61a80ca320
--- /dev/null
+++ b/plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-add1.S
@@ -0,0 +1,239 @@
+/* SPARC _add_n -- Add two limb vectors of the same length > 0 and store
+ * sum in a third limb vector.
+ *
+ * Copyright (C) 1995, 1996, 1998,
+ * 2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+
+
+/*******************
+ * mpi_limb_t
+ * _gcry_mpih_add_n( mpi_ptr_t res_ptr,
+ * mpi_ptr_t s1_ptr,
+ * mpi_ptr_t s2_ptr,
+ * mpi_size_t size)
+ */
+
+! INPUT PARAMETERS
+#define res_ptr %o0
+#define s1_ptr %o1
+#define s2_ptr %o2
+#define size %o3
+
+#include "sysdep.h"
+
+ .text
+ .align 4
+ .global C_SYMBOL_NAME(_gcry_mpih_add_n)
+C_SYMBOL_NAME(_gcry_mpih_add_n):
+ xor s2_ptr,res_ptr,%g1
+ andcc %g1,4,%g0
+ bne L1 ! branch if alignment differs
+ nop
+! ** V1a **
+L0: andcc res_ptr,4,%g0 ! res_ptr unaligned? Side effect: cy=0
+ be L_v1 ! if no, branch
+ nop
+/* Add least significant limb separately to align res_ptr and s2_ptr */
+ ld [s1_ptr],%g4
+ add s1_ptr,4,s1_ptr
+ ld [s2_ptr],%g2
+ add s2_ptr,4,s2_ptr
+ add size,-1,size
+ addcc %g4,%g2,%o4
+ st %o4,[res_ptr]
+ add res_ptr,4,res_ptr
+L_v1: addx %g0,%g0,%o4 ! save cy in register
+ cmp size,2 ! if size < 2 ...
+ bl Lend2 ! ... branch to tail code
+ subcc %g0,%o4,%g0 ! restore cy
+
+ ld [s1_ptr+0],%g4
+ addcc size,-10,size
+ ld [s1_ptr+4],%g1
+ ldd [s2_ptr+0],%g2
+ blt Lfin1
+ subcc %g0,%o4,%g0 ! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop1: addxcc %g4,%g2,%o4
+ ld [s1_ptr+8],%g4
+ addxcc %g1,%g3,%o5
+ ld [s1_ptr+12],%g1
+ ldd [s2_ptr+8],%g2
+ std %o4,[res_ptr+0]
+ addxcc %g4,%g2,%o4
+ ld [s1_ptr+16],%g4
+ addxcc %g1,%g3,%o5
+ ld [s1_ptr+20],%g1
+ ldd [s2_ptr+16],%g2
+ std %o4,[res_ptr+8]
+ addxcc %g4,%g2,%o4
+ ld [s1_ptr+24],%g4
+ addxcc %g1,%g3,%o5
+ ld [s1_ptr+28],%g1
+ ldd [s2_ptr+24],%g2
+ std %o4,[res_ptr+16]
+ addxcc %g4,%g2,%o4
+ ld [s1_ptr+32],%g4
+ addxcc %g1,%g3,%o5
+ ld [s1_ptr+36],%g1
+ ldd [s2_ptr+32],%g2
+ std %o4,[res_ptr+24]
+ addx %g0,%g0,%o4 ! save cy in register
+ addcc size,-8,size
+ add s1_ptr,32,s1_ptr
+ add s2_ptr,32,s2_ptr
+ add res_ptr,32,res_ptr
+ bge Loop1
+ subcc %g0,%o4,%g0 ! restore cy
+
+Lfin1: addcc size,8-2,size
+ blt Lend1
+ subcc %g0,%o4,%g0 ! restore cy
+/* Add blocks of 2 limbs until less than 2 limbs remain */
+Loope1: addxcc %g4,%g2,%o4
+ ld [s1_ptr+8],%g4
+ addxcc %g1,%g3,%o5
+ ld [s1_ptr+12],%g1
+ ldd [s2_ptr+8],%g2
+ std %o4,[res_ptr+0]
+ addx %g0,%g0,%o4 ! save cy in register
+ addcc size,-2,size
+ add s1_ptr,8,s1_ptr
+ add s2_ptr,8,s2_ptr
+ add res_ptr,8,res_ptr
+ bge Loope1
+ subcc %g0,%o4,%g0 ! restore cy
+Lend1: addxcc %g4,%g2,%o4
+ addxcc %g1,%g3,%o5
+ std %o4,[res_ptr+0]
+ addx %g0,%g0,%o4 ! save cy in register
+
+ andcc size,1,%g0
+ be Lret1
+ subcc %g0,%o4,%g0 ! restore cy
+/* Add last limb */
+ ld [s1_ptr+8],%g4
+ ld [s2_ptr+8],%g2
+ addxcc %g4,%g2,%o4
+ st %o4,[res_ptr+8]
+
+Lret1: retl
+ addx %g0,%g0,%o0 ! return carry-out from most sign. limb
+
+L1: xor s1_ptr,res_ptr,%g1
+ andcc %g1,4,%g0
+ bne L2
+ nop
+! ** V1b **
+ mov s2_ptr,%g1
+ mov s1_ptr,s2_ptr
+ b L0
+ mov %g1,s1_ptr
+
+! ** V2 **
+/* If we come here, the alignment of s1_ptr and res_ptr as well as the
+ alignment of s2_ptr and res_ptr differ. Since there are only two ways
+ things can be aligned (that we care about) we now know that the alignment
+ of s1_ptr and s2_ptr are the same. */
+
+L2: cmp size,1
+ be Ljone
+ nop
+ andcc s1_ptr,4,%g0 ! s1_ptr unaligned? Side effect: cy=0
+ be L_v2 ! if no, branch
+ nop
+/* Add least significant limb separately to align s1_ptr and s2_ptr */
+ ld [s1_ptr],%g4
+ add s1_ptr,4,s1_ptr
+ ld [s2_ptr],%g2
+ add s2_ptr,4,s2_ptr
+ add size,-1,size
+ addcc %g4,%g2,%o4
+ st %o4,[res_ptr]
+ add res_ptr,4,res_ptr
+
+L_v2: addx %g0,%g0,%o4 ! save cy in register
+ addcc size,-8,size
+ blt Lfin2
+ subcc %g0,%o4,%g0 ! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop2: ldd [s1_ptr+0],%g2
+ ldd [s2_ptr+0],%o4
+ addxcc %g2,%o4,%g2
+ st %g2,[res_ptr+0]
+ addxcc %g3,%o5,%g3
+ st %g3,[res_ptr+4]
+ ldd [s1_ptr+8],%g2
+ ldd [s2_ptr+8],%o4
+ addxcc %g2,%o4,%g2
+ st %g2,[res_ptr+8]
+ addxcc %g3,%o5,%g3
+ st %g3,[res_ptr+12]
+ ldd [s1_ptr+16],%g2
+ ldd [s2_ptr+16],%o4
+ addxcc %g2,%o4,%g2
+ st %g2,[res_ptr+16]
+ addxcc %g3,%o5,%g3
+ st %g3,[res_ptr+20]
+ ldd [s1_ptr+24],%g2
+ ldd [s2_ptr+24],%o4
+ addxcc %g2,%o4,%g2
+ st %g2,[res_ptr+24]
+ addxcc %g3,%o5,%g3
+ st %g3,[res_ptr+28]
+ addx %g0,%g0,%o4 ! save cy in register
+ addcc size,-8,size
+ add s1_ptr,32,s1_ptr
+ add s2_ptr,32,s2_ptr
+ add res_ptr,32,res_ptr
+ bge Loop2
+ subcc %g0,%o4,%g0 ! restore cy
+
+Lfin2: addcc size,8-2,size
+ blt Lend2
+ subcc %g0,%o4,%g0 ! restore cy
+Loope2: ldd [s1_ptr+0],%g2
+ ldd [s2_ptr+0],%o4
+ addxcc %g2,%o4,%g2
+ st %g2,[res_ptr+0]
+ addxcc %g3,%o5,%g3
+ st %g3,[res_ptr+4]
+ addx %g0,%g0,%o4 ! save cy in register
+ addcc size,-2,size
+ add s1_ptr,8,s1_ptr
+ add s2_ptr,8,s2_ptr
+ add res_ptr,8,res_ptr
+ bge Loope2
+ subcc %g0,%o4,%g0 ! restore cy
+Lend2: andcc size,1,%g0
+ be Lret2
+ subcc %g0,%o4,%g0 ! restore cy
+/* Add last limb */
+Ljone: ld [s1_ptr],%g4
+ ld [s2_ptr],%g2
+ addxcc %g4,%g2,%o4
+ st %o4,[res_ptr]
+
+Lret2: retl
+ addx %g0,%g0,%o0 ! return carry-out from most sign. limb
+
+
+
diff --git a/plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-lshift.S b/plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-lshift.S
new file mode 100644
index 0000000000..3422ab04e5
--- /dev/null
+++ b/plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-lshift.S
@@ -0,0 +1,97 @@
+/* sparc lshift
+ *
+ * Copyright (C) 1995, 1996, 1998,
+ * 2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+
+! INPUT PARAMETERS
+! res_ptr %o0
+! src_ptr %o1
+! size %o2
+! cnt %o3
+
+#include "sysdep.h"
+
+ .text
+ .align 4
+ .global C_SYMBOL_NAME(_gcry_mpih_lshift)
+C_SYMBOL_NAME(_gcry_mpih_lshift):
+ sll %o2,2,%g1
+ add %o1,%g1,%o1 ! make %o1 point at end of src
+ ld [%o1-4],%g2 ! load first limb
+ sub %g0,%o3,%o5 ! negate shift count
+ add %o0,%g1,%o0 ! make %o0 point at end of res
+ add %o2,-1,%o2
+ andcc %o2,4-1,%g4 ! number of limbs in first loop
+ srl %g2,%o5,%g1 ! compute function result
+ be L0 ! if multiple of 4 limbs, skip first loop
+ st %g1,[%sp+80]
+
+ sub %o2,%g4,%o2 ! adjust count for main loop
+
+Loop0: ld [%o1-8],%g3
+ add %o0,-4,%o0
+ add %o1,-4,%o1
+ addcc %g4,-1,%g4
+ sll %g2,%o3,%o4
+ srl %g3,%o5,%g1
+ mov %g3,%g2
+ or %o4,%g1,%o4
+ bne Loop0
+ st %o4,[%o0+0]
+
+L0: tst %o2
+ be Lend
+ nop
+
+Loop: ld [%o1-8],%g3
+ add %o0,-16,%o0
+ addcc %o2,-4,%o2
+ sll %g2,%o3,%o4
+ srl %g3,%o5,%g1
+
+ ld [%o1-12],%g2
+ sll %g3,%o3,%g4
+ or %o4,%g1,%o4
+ st %o4,[%o0+12]
+ srl %g2,%o5,%g1
+
+ ld [%o1-16],%g3
+ sll %g2,%o3,%o4
+ or %g4,%g1,%g4
+ st %g4,[%o0+8]
+ srl %g3,%o5,%g1
+
+ ld [%o1-20],%g2
+ sll %g3,%o3,%g4
+ or %o4,%g1,%o4
+ st %o4,[%o0+4]
+ srl %g2,%o5,%g1
+
+ add %o1,-16,%o1
+ or %g4,%g1,%g4
+ bne Loop
+ st %g4,[%o0+0]
+
+Lend: sll %g2,%o3,%g2
+ st %g2,[%o0-4]
+ retl
+ ld [%sp+80],%o0
+
diff --git a/plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-rshift.S b/plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-rshift.S
new file mode 100644
index 0000000000..cd3db41df3
--- /dev/null
+++ b/plugins/MirOTR/Libgcrypt/mpi/sparc32/mpih-rshift.S
@@ -0,0 +1,93 @@
+/* sparc rshift
+ *
+ * Copyright (C) 1995, 1996, 1998,
+ * 2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+! INPUT PARAMETERS
+! res_ptr %o0
+! src_ptr %o1
+! size %o2
+! cnt %o3
+
+#include "sysdep.h"
+
+ .text
+ .align 4
+ .global C_SYMBOL_NAME(_gcry_mpih_rshift)
+C_SYMBOL_NAME(_gcry_mpih_rshift):
+ ld [%o1],%g2 ! load first limb
+ sub %g0,%o3,%o5 ! negate shift count
+ add %o2,-1,%o2
+ andcc %o2,4-1,%g4 ! number of limbs in first loop
+ sll %g2,%o5,%g1 ! compute function result
+ be L0 ! if multiple of 4 limbs, skip first loop
+ st %g1,[%sp+80]
+
+ sub %o2,%g4,%o2 ! adjust count for main loop
+
+Loop0: ld [%o1+4],%g3
+ add %o0,4,%o0
+ add %o1,4,%o1
+ addcc %g4,-1,%g4
+ srl %g2,%o3,%o4
+ sll %g3,%o5,%g1
+ mov %g3,%g2
+ or %o4,%g1,%o4
+ bne Loop0
+ st %o4,[%o0-4]
+
+L0: tst %o2
+ be Lend
+ nop
+
+Loop: ld [%o1+4],%g3
+ add %o0,16,%o0
+ addcc %o2,-4,%o2
+ srl %g2,%o3,%o4
+ sll %g3,%o5,%g1
+
+ ld [%o1+8],%g2
+ srl %g3,%o3,%g4
+ or %o4,%g1,%o4
+ st %o4,[%o0-16]
+ sll %g2,%o5,%g1
+
+ ld [%o1+12],%g3
+ srl %g2,%o3,%o4
+ or %g4,%g1,%g4
+ st %g4,[%o0-12]
+ sll %g3,%o5,%g1
+
+ ld [%o1+16],%g2
+ srl %g3,%o3,%g4
+ or %o4,%g1,%o4
+ st %o4,[%o0-8]
+ sll %g2,%o5,%g1
+
+ add %o1,16,%o1
+ or %g4,%g1,%g4
+ bne Loop
+ st %g4,[%o0-4]
+
+Lend: srl %g2,%o3,%g2
+ st %g2,[%o0-0]
+ retl
+ ld [%sp+80],%o0
+
diff --git a/plugins/MirOTR/Libgcrypt/mpi/sparc32/udiv.S b/plugins/MirOTR/Libgcrypt/mpi/sparc32/udiv.S
new file mode 100644
index 0000000000..006b5c125c
--- /dev/null
+++ b/plugins/MirOTR/Libgcrypt/mpi/sparc32/udiv.S
@@ -0,0 +1,195 @@
+/* SPARC v7 __udiv_qrnnd division support, used from longlong.h.
+ * This is for v7 CPUs without a floating-point unit.
+ *
+ * Copyright (C) 1993, 1994, 1996, 1998,
+ * 2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+
+! INPUT PARAMETERS
+! rem_ptr o0
+! n1 o1
+! n0 o2
+! d o3
+
+#include "sysdep.h"
+
+ .text
+ .align 4
+ .global C_SYMBOL_NAME(__udiv_qrnnd)
+C_SYMBOL_NAME(__udiv_qrnnd):
+ tst %o3
+ bneg Largedivisor
+ mov 8,%g1
+
+ b Lp1
+ addxcc %o2,%o2,%o2
+
+Lplop: bcc Ln1
+ addxcc %o2,%o2,%o2
+Lp1: addx %o1,%o1,%o1
+ subcc %o1,%o3,%o4
+ bcc Ln2
+ addxcc %o2,%o2,%o2
+Lp2: addx %o1,%o1,%o1
+ subcc %o1,%o3,%o4
+ bcc Ln3
+ addxcc %o2,%o2,%o2
+Lp3: addx %o1,%o1,%o1
+ subcc %o1,%o3,%o4
+ bcc Ln4
+ addxcc %o2,%o2,%o2
+Lp4: addx %o1,%o1,%o1
+ addcc %g1,-1,%g1
+ bne Lplop
+ subcc %o1,%o3,%o4
+ bcc Ln5
+ addxcc %o2,%o2,%o2
+Lp5: st %o1,[%o0]
+ retl
+ xnor %g0,%o2,%o0
+
+Lnlop: bcc Lp1
+ addxcc %o2,%o2,%o2
+Ln1: addx %o4,%o4,%o4
+ subcc %o4,%o3,%o1
+ bcc Lp2
+ addxcc %o2,%o2,%o2
+Ln2: addx %o4,%o4,%o4
+ subcc %o4,%o3,%o1
+ bcc Lp3
+ addxcc %o2,%o2,%o2
+Ln3: addx %o4,%o4,%o4
+ subcc %o4,%o3,%o1
+ bcc Lp4
+ addxcc %o2,%o2,%o2
+Ln4: addx %o4,%o4,%o4
+ addcc %g1,-1,%g1
+ bne Lnlop
+ subcc %o4,%o3,%o1
+ bcc Lp5
+ addxcc %o2,%o2,%o2
+Ln5: st %o4,[%o0]
+ retl
+ xnor %g0,%o2,%o0
+
+Largedivisor:
+ and %o2,1,%o5 ! %o5 = n0 & 1
+
+ srl %o2,1,%o2
+ sll %o1,31,%g2
+ or %g2,%o2,%o2 ! %o2 = lo(n1n0 >> 1)
+ srl %o1,1,%o1 ! %o1 = hi(n1n0 >> 1)
+
+ and %o3,1,%g2
+ srl %o3,1,%g3 ! %g3 = floor(d / 2)
+ add %g3,%g2,%g3 ! %g3 = ceil(d / 2)
+
+ b LLp1
+ addxcc %o2,%o2,%o2
+
+LLplop: bcc LLn1
+ addxcc %o2,%o2,%o2
+LLp1: addx %o1,%o1,%o1
+ subcc %o1,%g3,%o4
+ bcc LLn2
+ addxcc %o2,%o2,%o2
+LLp2: addx %o1,%o1,%o1
+ subcc %o1,%g3,%o4
+ bcc LLn3
+ addxcc %o2,%o2,%o2
+LLp3: addx %o1,%o1,%o1
+ subcc %o1,%g3,%o4
+ bcc LLn4
+ addxcc %o2,%o2,%o2
+LLp4: addx %o1,%o1,%o1
+ addcc %g1,-1,%g1
+ bne LLplop
+ subcc %o1,%g3,%o4
+ bcc LLn5
+ addxcc %o2,%o2,%o2
+LLp5: add %o1,%o1,%o1 ! << 1
+ tst %g2
+ bne Oddp
+ add %o5,%o1,%o1
+ st %o1,[%o0]
+ retl
+ xnor %g0,%o2,%o0
+
+LLnlop: bcc LLp1
+ addxcc %o2,%o2,%o2
+LLn1: addx %o4,%o4,%o4
+ subcc %o4,%g3,%o1
+ bcc LLp2
+ addxcc %o2,%o2,%o2
+LLn2: addx %o4,%o4,%o4
+ subcc %o4,%g3,%o1
+ bcc LLp3
+ addxcc %o2,%o2,%o2
+LLn3: addx %o4,%o4,%o4
+ subcc %o4,%g3,%o1
+ bcc LLp4
+ addxcc %o2,%o2,%o2
+LLn4: addx %o4,%o4,%o4
+ addcc %g1,-1,%g1
+ bne LLnlop
+ subcc %o4,%g3,%o1
+ bcc LLp5
+ addxcc %o2,%o2,%o2
+LLn5: add %o4,%o4,%o4 ! << 1
+ tst %g2
+ bne Oddn
+ add %o5,%o4,%o4
+ st %o4,[%o0]
+ retl
+ xnor %g0,%o2,%o0
+
+Oddp: xnor %g0,%o2,%o2
+ ! q' in %o2. r' in %o1
+ addcc %o1,%o2,%o1
+ bcc LLp6
+ addx %o2,0,%o2
+ sub %o1,%o3,%o1
+LLp6: subcc %o1,%o3,%g0
+ bcs LLp7
+ subx %o2,-1,%o2
+ sub %o1,%o3,%o1
+LLp7: st %o1,[%o0]
+ retl
+ mov %o2,%o0
+
+Oddn: xnor %g0,%o2,%o2
+ ! q' in %o2. r' in %o4
+ addcc %o4,%o2,%o4
+ bcc LLn6
+ addx %o2,0,%o2
+ sub %o4,%o3,%o4
+LLn6: subcc %o4,%o3,%g0
+ bcs LLn7
+ subx %o2,-1,%o2
+ sub %o4,%o3,%o4
+LLn7: st %o4,[%o0]
+ retl
+ mov %o2,%o0