/* i80386 __mpn_submul_1 -- Multiply a limb vector with a limb and subtract
the result from a second limb vector.
- Copyright (C) 1992, 1994, 1997 Free Software Foundation, Inc.
+ Copyright (C) 1992-2019 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
- it under the terms of the GNU Library General Public License as published by
- the Free Software Foundation; either version 2 of the License, or (at your
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
- You should have received a copy of the GNU Library General Public License
- along with the GNU MP Library; see the file COPYING.LIB. If not, write to
- the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
-
-/*
- INPUT PARAMETERS
- res_ptr (sp + 4)
- s1_ptr (sp + 8)
- sizeP (sp + 12)
- s2_limb (sp + 16)
-*/
+ You should have received a copy of the GNU Lesser General Public License
+ along with the GNU MP Library; see the file COPYING.LIB. If not,
+ see <https://www.gnu.org/licenses/>. */
#include "sysdep.h"
#include "asm-syntax.h"
+#define PARMS 4+16 /* space for 4 saved regs */
+#define RES PARMS
+#define S1 RES+4
+#define SIZE S1+4
+#define S2LIMB SIZE+4
+
#define res_ptr edi
#define s1_ptr esi
#define sizeP ecx
-#define s2_limb ebp
-
- TEXT
- ALIGN (3)
- GLOBL C_SYMBOL_NAME(__mpn_submul_1)
- ASM_TYPE_DIRECTIVE(C_SYMBOL_NAME(__mpn_submul_1),@function)
-C_SYMBOL_NAME(__mpn_submul_1:)
+#define s2_limb ebx
- INSN1(push,l ,R(edi))
- INSN1(push,l ,R(esi))
- INSN1(push,l ,R(ebx))
- INSN1(push,l ,R(ebp))
+ .text
+ENTRY (__mpn_submul_1)
- INSN2(mov,l ,R(res_ptr),MEM_DISP(esp,20))
- INSN2(mov,l ,R(s1_ptr),MEM_DISP(esp,24))
- INSN2(mov,l ,R(sizeP),MEM_DISP(esp,28))
- INSN2(mov,l ,R(s2_limb),MEM_DISP(esp,32))
+ pushl %edi
+ cfi_adjust_cfa_offset (4)
+ pushl %esi
+ cfi_adjust_cfa_offset (4)
+ pushl %ebp
+ cfi_adjust_cfa_offset (4)
+ pushl %ebx
+ cfi_adjust_cfa_offset (4)
+ cfi_rel_offset (edi, 12)
+ cfi_rel_offset (esi, 8)
+ cfi_rel_offset (ebp, 4)
+ cfi_rel_offset (ebx, 0)
- INSN2(lea,l ,R(res_ptr),MEM_INDEX(res_ptr,sizeP,4))
- INSN2(lea,l ,R(s1_ptr),MEM_INDEX(s1_ptr,sizeP,4))
- INSN1(neg,l ,R(sizeP))
- INSN2(xor,l ,R(ebx),R(ebx))
+ movl RES(%esp), %res_ptr
+ movl S1(%esp), %s1_ptr
+ movl SIZE(%esp), %sizeP
+ movl S2LIMB(%esp), %s2_limb
+ leal (%res_ptr,%sizeP,4), %res_ptr
+ leal (%s1_ptr,%sizeP,4), %s1_ptr
+ negl %sizeP
+ xorl %ebp, %ebp
ALIGN (3)
L(oop):
- INSN2(mov,l ,R(eax),MEM_INDEX(s1_ptr,sizeP,4))
- INSN1(mul,l ,R(s2_limb))
- INSN2(add,l ,R(eax),R(ebx))
- INSN2(adc,l ,R(edx),$0)
- INSN2(sub,l ,MEM_INDEX(res_ptr,sizeP,4),R(eax))
- INSN2(adc,l ,R(edx),$0)
- INSN2(mov,l ,R(ebx),R(edx))
+ movl (%s1_ptr,%sizeP,4), %eax
+ mull %s2_limb
+ addl %ebp, %eax
+ adcl $0, %edx
+ subl %eax, (%res_ptr,%sizeP,4)
+ adcl $0, %edx
+ movl %edx, %ebp
- INSN1(inc,l ,R(sizeP))
- INSN1(jnz, ,L(oop))
- INSN2(mov,l ,R(eax),R(ebx))
+ incl %sizeP
+ jnz L(oop)
+ movl %ebp, %eax
- INSN1(pop,l ,R(ebp))
- INSN1(pop,l ,R(ebx))
- INSN1(pop,l ,R(esi))
- INSN1(pop,l ,R(edi))
- ret
+ popl %ebx
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (ebx)
+ popl %ebp
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (ebp)
+ popl %esi
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (esi)
+ popl %edi
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (edi)
- ASM_SIZE_DIRECTIVE(C_SYMBOL_NAME(__mpn_addmul_1))
+ ret
+END (__mpn_submul_1)