1 /* Copyright (C) 2018-2020 Free Software Foundation, Inc.
3 This file is free software; you can redistribute it and/or modify it
4 under the terms of the GNU General Public License as published by the
5 Free Software Foundation; either version 3, or (at your option) any
8 This file is distributed in the hope that it will be useful, but
9 WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 General Public License for more details.
13 Under Section 7 of GPL version 3, you are granted additional
14 permissions described in the GCC Runtime Library Exception, version
15 3.1, as published by the Free Software Foundation.
17 You should have received a copy of the GNU General Public License and
18 a copy of the GCC Runtime Library Exception along with this program;
19 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
20 <http://www.gnu.org/licenses/>. */
26 .type __mulsi3, @function
28 l.movhi r11, 0 /* initial r */
30 /* Given R = X * Y ... */
31 1: l.sfeq r4, r0 /* while (y != 0) */
33 l.andi r5, r4, 1 /* if (y & 1) ... */
36 #if defined(__or1k_cmov__)
37 l.cmov r11, r12, r11 /* ... r += x. */
38 l.srli r4, r4, 1 /* y >>= 1 */
41 l.srli r4, r4, 1 /* y >>= 1 */
46 l.add r3, r3, r3 /* x <<= 1 */
51 .size __mulsi3, . - __mulsi3
54 #if defined(L__udivsi3) || defined(L__umodsi3) \
55 || defined(L__divsi3) || defined(L__modsi3)
56 .global __udivmodsi3_internal
57 .hidden __udivmodsi3_internal
58 .type __udivmodsi3_internal, @function
64 .type __udivsi3, @function
66 __udivmodsi3_internal:
67 /* Note that the other division routines assume that r13
68 is not clobbered by this routine, and use that as to
69 save a return address without creating a stack frame. */
71 l.sfeq r4, r0 /* division by zero; return 0. */
72 l.ori r11, r0, 0 /* initial quotient */
74 l.ori r12, r3, 0 /* initial remainder */
76 /* Given X/Y, shift Y left until Y >= X. */
77 l.ori r6, r0, 1 /* mask = 1 */
78 1: l.sflts r4, r0 /* y has msb set */
80 l.sfltu r4, r12 /* y < x */
81 l.add r4, r4, r4 /* y <<= 1 */
83 l.add r6, r6, r6 /* mask <<= 1 */
85 /* Shift Y back to the right again, subtracting from X. */
86 2: l.add r7, r11, r6 /* tmp1 = quot + mask */
87 3: l.srli r6, r6, 1 /* mask >>= 1 */
88 l.sub r8, r12, r4 /* tmp2 = x - y */
89 l.sfleu r4, r12 /* y <= x */
90 l.srli r4, r4, 1 /* y >>= 1 */
91 #if defined(__or1k_cmov__)
92 l.cmov r11, r7, r11 /* if (y <= x) quot = tmp1 */
93 l.cmov r12, r8, r12 /* if (y <= x) x = tmp2 */
101 l.sfne r6, r0 /* loop until mask == 0 */
103 l.add r7, r11, r6 /* delay fill from loop start */
108 .size __udivsi3, . - __udivsi3
109 .size __udivmodsi3_internal, . - __udivmodsi3_internal
115 .type __umodsi3, @function
118 /* Know that __udivmodsi3_internal does not clobber r13. */
121 l.jal __udivmodsi3_internal
123 l.jr r13 /* return to saved lr */
124 l.ori r11, r12, 0 /* move remainder to rv */
127 .size __umodsi3, . - __umodsi3
130 /* For signed division we do:
132 -x / y = x / -y = -(x / y)
136 which has the property that (x/y)*y + (x%y) = x. */
141 .type __divsi3, @function
144 l.xor r6, r3, r4 /* need result negate? */
146 l.sflts r3, r0 /* abs(x) */
147 #if defined(__or1k_cmov__)
156 l.sflts r4, r0 /* abs(y) */
157 #if defined(__or1k_cmov__)
167 /* If the result will not require sign flip, tail call. */
169 l.bnf __udivmodsi3_internal
170 l.ori r13, r9, 0 /* save lr */
172 /* Otherwise, know that __udivmodsi3_internal does not clobber r13.
173 Perform a normal call, then negate and return via saved lr. */
175 l.jal __udivmodsi3_internal
181 .size __divsi3, . - __divsi3
187 .type __modsi3, @function
190 l.sflts r4, r0 /* abs(y) */
191 #if defined(__or1k_cmov__)
201 l.sflts r3, r0 /* x negative? */
203 l.ori r13, r9, 0 /* save lr */
205 /* Know that __udivmodsi3_internal does not clobber r13. */
208 /* X positive; no negate of the result required. */
209 l.jal __udivmodsi3_internal
211 l.jr r13 /* return to saved lr */
212 l.ori r11, r12, 0 /* move remainder to rv */
214 /* X negative; negate both X and the result. */
215 1: l.jal __udivmodsi3_internal
217 l.jr r13 /* return to saved lr */
218 l.sub r11, r0, r12 /* negate remainder to rv */
221 .size __modsi3, .- __modsi3