]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/sparc/sparc32/sparcv9/addmul_1.S
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / sparc / sparc32 / sparcv9 / addmul_1.S
1 ! SPARC v9 32-bit __mpn_addmul_1 -- Multiply a limb vector with a limb
2 ! and add the result to a second limb vector.
3 !
4 ! Copyright (C) 2013-2019 Free Software Foundation, Inc.
5 ! This file is part of the GNU C Library.
6 ! Contributed by David S. Miller <davem@davemloft.net>
7 !
8 ! The GNU C Library is free software; you can redistribute it and/or
9 ! modify it under the terms of the GNU Lesser General Public
10 ! License as published by the Free Software Foundation; either
11 ! version 2.1 of the License, or (at your option) any later version.
12 !
13 ! The GNU C Library is distributed in the hope that it will be useful,
14 ! but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ! Lesser General Public License for more details.
17 !
18 ! You should have received a copy of the GNU Lesser General Public
19 ! License along with the GNU C Library; if not, see
20 ! <http://www.gnu.org/licenses/>.
21
22 #include <sysdep.h>
23
24 #define res_ptr %i0
25 #define s1_ptr %i1
26 #define sz_arg %i2
27 #define s2l_arg %i3
28 #define sz %o4
29 #define carry %o5
30 #define s2_limb %g1
31 #define tmp1 %l0
32 #define tmp2 %l1
33 #define tmp3 %l2
34 #define tmp4 %l3
35 #define tmp64_1 %g3
36 #define tmp64_2 %o3
37
38 ENTRY(__mpn_addmul_1)
39 save %sp, -96, %sp
40 srl sz_arg, 0, sz
41 srl s2l_arg, 0, s2_limb
42 subcc sz, 1, sz
43 be,pn %icc, .Lfinal_limb
44 clr carry
45
46 .Lloop:
47 lduw [s1_ptr + 0x00], tmp1
48 lduw [res_ptr + 0x00], tmp3
49 lduw [s1_ptr + 0x04], tmp2
50 lduw [res_ptr + 0x04], tmp4
51 mulx tmp1, s2_limb, tmp64_1
52 add s1_ptr, 8, s1_ptr
53 mulx tmp2, s2_limb, tmp64_2
54 sub sz, 2, sz
55 add res_ptr, 8, res_ptr
56 add tmp3, tmp64_1, tmp64_1
57 add carry, tmp64_1, tmp64_1
58 stw tmp64_1, [res_ptr - 0x08]
59 srlx tmp64_1, 32, carry
60 add tmp4, tmp64_2, tmp64_2
61 add carry, tmp64_2, tmp64_2
62 stw tmp64_2, [res_ptr - 0x04]
63 brgz sz, .Lloop
64 srlx tmp64_2, 32, carry
65
66 brlz,pt sz, .Lfinish
67 nop
68
69 .Lfinal_limb:
70 lduw [s1_ptr + 0x00], tmp1
71 lduw [res_ptr + 0x00], tmp3
72 mulx tmp1, s2_limb, tmp64_1
73 add tmp3, tmp64_1, tmp64_1
74 add carry, tmp64_1, tmp64_1
75 stw tmp64_1, [res_ptr + 0x00]
76 srlx tmp64_1, 32, carry
77
78 .Lfinish:
79 jmpl %i7 + 0x8, %g0
80 restore carry, 0, %o0
81 END(__mpn_addmul_1)