return block_clear_fn;
}
\f
-/* Write to one of the components of the complex value CPLX. Write VAL to
- the real part if IMAG_P is false, and the imaginary part if its true. */
-
-void
-write_complex_part (rtx cplx, rtx val, bool imag_p)
-{
- enum machine_mode cmode;
- enum machine_mode imode;
- unsigned ibitsize;
-
- if (GET_CODE (cplx) == CONCAT)
- {
- emit_move_insn (XEXP (cplx, imag_p), val);
- return;
- }
-
- cmode = GET_MODE (cplx);
- imode = GET_MODE_INNER (cmode);
- ibitsize = GET_MODE_BITSIZE (imode);
-
- /* If the sub-object is at least word sized, then we know that subregging
- will work. This special case is important, since store_bit_field
- wants to operate on integer modes, and there's rarely an OImode to
- correspond to TCmode. */
- if (ibitsize >= BITS_PER_WORD)
- {
- rtx part = simplify_gen_subreg (imode, cplx, cmode,
- imag_p ? GET_MODE_SIZE (imode) : 0);
- emit_move_insn (part, val);
- }
- else
- store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, imode, val,
- GET_MODE_SIZE (cmode));
-}
+/* Generate code to copy Y into X.
+ Both Y and X must have the same mode, except that
+ Y can be a constant with VOIDmode.
+ This mode cannot be BLKmode; use emit_block_move for that.
-/* Extract one of the components of the complex value CPLX. Extract the
- real part if IMAG_P is false, and the imaginary part if it's true. */
+ Return the last instruction emitted. */
rtx
-read_complex_part (rtx cplx, bool imag_p)
+emit_move_insn (rtx x, rtx y)
{
- enum machine_mode cmode, imode;
- unsigned ibitsize;
-
- if (GET_CODE (cplx) == CONCAT)
- return XEXP (cplx, imag_p);
+ enum machine_mode mode = GET_MODE (x);
+ rtx y_cst = NULL_RTX;
+ rtx last_insn, set;
- cmode = GET_MODE (cplx);
- imode = GET_MODE_INNER (cmode);
- ibitsize = GET_MODE_BITSIZE (imode);
+ x = protect_from_queue (x, 1);
+ y = protect_from_queue (y, 0);
- /* Special case reads from complex constants that got spilled to memory. */
- if (GET_CODE (cplx) == MEM && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
- {
- tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
- if (decl && TREE_CODE (decl) == COMPLEX_CST)
- {
- tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
- if (TREE_CODE (part) == REAL_CST
- || TREE_CODE (part) == INTEGER_CST)
- return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
- }
- }
+ if (mode == BLKmode || (GET_MODE (y) != mode && GET_MODE (y) != VOIDmode))
+ abort ();
- /* If the sub-object is at least word sized, then we know that subregging
- will work. This special case is important, since extract_bit_field
- wants to operate on integer modes, and there's rarely an OImode to
- correspond to TCmode. */
- if (ibitsize >= BITS_PER_WORD)
+ /* Never force constant_p_rtx to memory. */
+ if (GET_CODE (y) == CONSTANT_P_RTX)
+ ;
+ else if (CONSTANT_P (y))
{
- rtx ret = simplify_gen_subreg (imode, cplx, cmode,
- imag_p ? GET_MODE_SIZE (imode) : 0);
- if (ret == NULL)
- abort ();
- return ret;
- }
-
- return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
- true, NULL_RTX, imode, imode,
- GET_MODE_SIZE (cmode));
-}
-\f
-/* A subroutine of emit_move_insn_1. Generate a move from Y into X using
- ALT_MODE instead of the operand's natural mode, MODE. CODE is the insn
- code for the move in ALT_MODE, and is known to be valid. Returns the
- instruction emitted. */
-
-static rtx
-emit_move_via_alt_mode (enum machine_mode alt_mode, enum machine_mode mode,
- enum insn_code code, rtx x, rtx y)
-{
- /* Get X and Y in ALT_MODE. We can't use gen_lowpart here because it
- may call change_address which is not appropriate if we were
- called when a reload was in progress. We don't have to worry
- about changing the address since the size in bytes is supposed to
- be the same. Copy the MEM to change the mode and move any
- substitutions from the old MEM to the new one. */
+ if (optimize
+ && SCALAR_FLOAT_MODE_P (GET_MODE (x))
+ && (last_insn = compress_float_constant (x, y)))
+ return last_insn;
- if (reload_in_progress)
- {
- rtx x1 = x, y1 = y;
+ y_cst = y;
- x = gen_lowpart_common (alt_mode, x1);
- if (x == 0 && GET_CODE (x1) == MEM)
+ if (!LEGITIMATE_CONSTANT_P (y))
{
- x = adjust_address_nv (x1, alt_mode, 0);
- copy_replacements (x1, x);
- }
+ y = force_const_mem (mode, y);
- y = gen_lowpart_common (alt_mode, y1);
- if (y == 0 && GET_CODE (y1) == MEM)
- {
- y = adjust_address_nv (y1, alt_mode, 0);
- copy_replacements (y1, y);
+ /* If the target's cannot_force_const_mem prevented the spill,
+ assume that the target's move expanders will also take care
+ of the non-legitimate constant. */
+ if (!y)
+ y = y_cst;
}
}
- else
- {
- x = simplify_gen_subreg (alt_mode, x, mode, 0);
- y = simplify_gen_subreg (alt_mode, y, mode, 0);
- }
- return emit_insn (GEN_FCN (code) (x, y));
-}
+ /* If X or Y are memory references, verify that their addresses are valid
+ for the machine. */
+ if (GET_CODE (x) == MEM
+ && ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
+ && ! push_operand (x, GET_MODE (x)))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (x, 0)))))
+ x = validize_mem (x);
-/* A subroutine of emit_move_insn_1. Generate a move from Y into X using
- an integer mode of the same size as MODE. Returns the instruction
- emitted, or NULL if such a move could not be generated. */
+ if (GET_CODE (y) == MEM
+ && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
+ y = validize_mem (y);
-static rtx
-emit_move_via_integer (enum machine_mode mode, rtx x, rtx y)
-{
- enum machine_mode imode;
- enum insn_code code;
+ if (mode == BLKmode)
+ abort ();
- /* There must exist a mode of the exact size we require. */
- imode = int_mode_for_mode (mode);
- if (imode == BLKmode)
- return NULL_RTX;
+ last_insn = emit_move_insn_1 (x, y);
- /* The target must support moves in this mode. */
- code = mov_optab->handlers[imode].insn_code;
- if (code == CODE_FOR_nothing)
- return NULL_RTX;
+ if (y_cst && GET_CODE (x) == REG
+ && (set = single_set (last_insn)) != NULL_RTX
+ && SET_DEST (set) == x
+ && ! rtx_equal_p (y_cst, SET_SRC (set)))
+ set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
- return emit_move_via_alt_mode (imode, mode, code, x, y);
+ return last_insn;
}
-/* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
- Return an equivalent MEM that does not use an auto-increment. */
+/* Low level part of emit_move_insn.
+ Called just like emit_move_insn, but assumes X and Y
+ are basically valid. */
-static rtx
-emit_move_resolve_push (enum machine_mode mode, rtx x)
+rtx
+emit_move_insn_1 (rtx x, rtx y)
{
- enum rtx_code code = GET_CODE (XEXP (x, 0));
- HOST_WIDE_INT adjust;
- rtx temp;
+ enum machine_mode mode = GET_MODE (x);
+ enum machine_mode submode;
+ enum mode_class class = GET_MODE_CLASS (mode);
- adjust = GET_MODE_SIZE (mode);
-#ifdef PUSH_ROUNDING
- adjust = PUSH_ROUNDING (adjust);
-#endif
- if (code == PRE_DEC || code == POST_DEC)
- adjust = -adjust;
+ if ((unsigned int) mode >= (unsigned int) MAX_MACHINE_MODE)
+ abort ();
- /* Do not use anti_adjust_stack, since we don't want to update
- stack_pointer_delta. */
- temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
- GEN_INT (adjust), stack_pointer_rtx,
- 0, OPTAB_LIB_WIDEN);
- if (temp != stack_pointer_rtx)
- emit_move_insn (stack_pointer_rtx, temp);
+ if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ return
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) mode].insn_code) (x, y));
- switch (code)
+ /* Expand complex moves by moving real part and imag part, if possible. */
+ else if ((class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
+ && BLKmode != (submode = GET_MODE_INNER (mode))
+ && (mov_optab->handlers[(int) submode].insn_code
+ != CODE_FOR_nothing))
{
- case PRE_INC:
- case PRE_DEC:
- temp = stack_pointer_rtx;
- break;
- case POST_INC:
- temp = plus_constant (stack_pointer_rtx, -GET_MODE_SIZE (mode));
- break;
- case POST_DEC:
- temp = plus_constant (stack_pointer_rtx, GET_MODE_SIZE (mode));
- break;
- default:
- abort ();
- }
-
- return replace_equiv_address (x, temp);
-}
-
-/* A subroutine of emit_move_complex. Generate a move from Y into X.
- X is known to satisfy push_operand, and MODE is known to be complex.
- Returns the last instruction emitted. */
-
-static rtx
-emit_move_complex_push (enum machine_mode mode, rtx x, rtx y)
-{
- enum machine_mode submode = GET_MODE_INNER (mode);
- bool imag_first;
+ /* Don't split destination if it is a stack push. */
+ int stack = push_operand (x, GET_MODE (x));
#ifdef PUSH_ROUNDING
- unsigned int submodesize = GET_MODE_SIZE (submode);
-
- /* In case we output to the stack, but the size is smaller than the
- machine can push exactly, we need to use move instructions. */
- if (PUSH_ROUNDING (submodesize) != submodesize)
- {
- x = emit_move_resolve_push (mode, x);
- return emit_move_insn (x, y);
- }
+ /* In case we output to the stack, but the size is smaller than the
+ machine can push exactly, we need to use move instructions. */
+ if (stack
+ && (PUSH_ROUNDING (GET_MODE_SIZE (submode))
+ != GET_MODE_SIZE (submode)))
+ {
+ rtx temp;
+ HOST_WIDE_INT offset1, offset2;
+
+ /* Do not use anti_adjust_stack, since we don't want to update
+ stack_pointer_delta. */
+ temp = expand_binop (Pmode,
+#ifdef STACK_GROWS_DOWNWARD
+ sub_optab,
+#else
+ add_optab,
#endif
+ stack_pointer_rtx,
+ GEN_INT
+ (PUSH_ROUNDING
+ (GET_MODE_SIZE (GET_MODE (x)))),
+ stack_pointer_rtx, 0, OPTAB_LIB_WIDEN);
- /* Note that the real part always precedes the imag part in memory
- regardless of machine's endianness. */
- switch (GET_CODE (XEXP (x, 0)))
- {
- case PRE_DEC:
- case POST_DEC:
- imag_first = true;
- break;
- case PRE_INC:
- case POST_INC:
- imag_first = false;
- break;
- default:
- abort ();
- }
+ if (temp != stack_pointer_rtx)
+ emit_move_insn (stack_pointer_rtx, temp);
- emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- read_complex_part (y, imag_first));
- return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
- read_complex_part (y, !imag_first));
-}
-
-/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
- MODE is known to be complex. Returns the last instruction emitted. */
-
-static rtx
-emit_move_complex (enum machine_mode mode, rtx x, rtx y)
-{
- bool try_int;
+#ifdef STACK_GROWS_DOWNWARD
+ offset1 = 0;
+ offset2 = GET_MODE_SIZE (submode);
+#else
+ offset1 = -PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x)));
+ offset2 = (-PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x)))
+ + GET_MODE_SIZE (submode));
+#endif
- /* Need to take special care for pushes, to maintain proper ordering
- of the data, and possibly extra padding. */
- if (push_operand (x, mode))
- return emit_move_complex_push (mode, x, y);
+ emit_move_insn (change_address (x, submode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset1))),
+ gen_realpart (submode, y));
+ emit_move_insn (change_address (x, submode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset2))),
+ gen_imagpart (submode, y));
+ }
+ else
+#endif
+ /* If this is a stack, push the highpart first, so it
+ will be in the argument order.
- /* For memory to memory moves, optimial behaviour can be had with the
- existing block move logic. */
- if (GET_CODE (x) == MEM && GET_CODE (y) == MEM)
- {
- x = adjust_address (x, BLKmode, 0);
- y = adjust_address (y, BLKmode, 0);
- emit_block_move (x, y, GEN_INT (GET_MODE_SIZE (mode)),
- BLOCK_OP_NO_LIBCALL);
- return get_last_insn ();
- }
+ In that case, change_address is used only to convert
+ the mode, not to change the address. */
+ if (stack)
+ {
+ /* Note that the real part always precedes the imag part in memory
+ regardless of machine's endianness. */
+#ifdef STACK_GROWS_DOWNWARD
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ gen_imagpart (submode, y));
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ gen_realpart (submode, y));
+#else
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ gen_realpart (submode, y));
+ emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
+ gen_imagpart (submode, y));
+#endif
+ }
+ else
+ {
+ rtx realpart_x, realpart_y;
+ rtx imagpart_x, imagpart_y;
+
+ /* If this is a complex value with each part being smaller than a
+ word, the usual calling sequence will likely pack the pieces into
+ a single register. Unfortunately, SUBREG of hard registers only
+ deals in terms of words, so we have a problem converting input
+ arguments to the CONCAT of two registers that is used elsewhere
+ for complex values. If this is before reload, we can copy it into
+ memory and reload. FIXME, we should see about using extract and
+ insert on integer registers, but complex short and complex char
+ variables should be rarely used. */
+ if (GET_MODE_BITSIZE (mode) < 2 * BITS_PER_WORD
+ && (reload_in_progress | reload_completed) == 0)
+ {
+ int packed_dest_p
+ = (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER);
+ int packed_src_p
+ = (REG_P (y) && REGNO (y) < FIRST_PSEUDO_REGISTER);
- /* See if we can coerce the target into moving both values at once. */
-
- /* Not possible if the values are inherently not adjacent. */
- if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
- try_int = false;
- /* Is possible if both are registers (or subregs of registers). */
- else if (register_operand (x, mode) && register_operand (y, mode))
- try_int = true;
- /* If one of the operands is a memory, and alignment constraints
- are friendly enough, we may be able to do combined memory operations.
- We do not attempt this if Y is a constant because that combination is
- usually better with the by-parts thing below. */
- else if ((GET_CODE (x) == MEM ? !CONSTANT_P (y) : GET_CODE (y) == MEM)
- && (!STRICT_ALIGNMENT
- || get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
- try_int = true;
- else
- try_int = false;
+ if (packed_dest_p || packed_src_p)
+ {
+ enum mode_class reg_class = ((class == MODE_COMPLEX_FLOAT)
+ ? MODE_FLOAT : MODE_INT);
- if (try_int)
- {
- rtx ret = emit_move_via_integer (mode, x, y);
- if (ret)
- return ret;
- }
+ enum machine_mode reg_mode
+ = mode_for_size (GET_MODE_BITSIZE (mode), reg_class, 1);
- /* Show the output dies here. This is necessary for SUBREGs
- of pseudos since we cannot track their lifetimes correctly;
- hard regs shouldn't appear here except as return values. */
- if (!reload_completed && !reload_in_progress
- && GET_CODE (x) == REG && !reg_overlap_mentioned_p (x, y))
- emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
+ if (reg_mode != BLKmode)
+ {
+ rtx mem = assign_stack_temp (reg_mode,
+ GET_MODE_SIZE (mode), 0);
+ rtx cmem = adjust_address (mem, mode, 0);
- write_complex_part (x, read_complex_part (y, false), false);
- write_complex_part (x, read_complex_part (y, true), true);
- return get_last_insn ();
-}
+ cfun->cannot_inline
+ = N_("function using short complex types cannot be inline");
-/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
- MODE is known to be MODE_CC. Returns the last instruction emitted. */
+ if (packed_dest_p)
+ {
+ rtx sreg = gen_rtx_SUBREG (reg_mode, x, 0);
-static rtx
-emit_move_ccmode (enum machine_mode mode, rtx x, rtx y)
-{
- rtx ret;
+ emit_move_insn_1 (cmem, y);
+ return emit_move_insn_1 (sreg, mem);
+ }
+ else
+ {
+ rtx sreg = gen_rtx_SUBREG (reg_mode, y, 0);
- /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
- if (mode != CCmode)
- {
- enum insn_code code = mov_optab->handlers[CCmode].insn_code;
- if (code != CODE_FOR_nothing)
- return emit_move_via_alt_mode (CCmode, mode, code, x, y);
- }
+ emit_move_insn_1 (mem, sreg);
+ return emit_move_insn_1 (x, cmem);
+ }
+ }
+ }
+ }
- /* Otherwise, find the MODE_INT mode of the same width. */
- ret = emit_move_via_integer (mode, x, y);
- if (ret != NULL)
- abort ();
- return ret;
-}
+ realpart_x = gen_realpart (submode, x);
+ realpart_y = gen_realpart (submode, y);
+ imagpart_x = gen_imagpart (submode, x);
+ imagpart_y = gen_imagpart (submode, y);
-/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
- MODE is any multi-word or full-word mode that lacks a move_insn
- pattern. Note that you will get better code if you define such
- patterns, even if they must turn into multiple assembler instructions. */
+ /* Show the output dies here. This is necessary for SUBREGs
+ of pseudos since we cannot track their lifetimes correctly;
+ hard regs shouldn't appear here except as return values.
+ We never want to emit such a clobber after reload. */
+ if (x != y
+ && ! (reload_in_progress || reload_completed)
+ && (GET_CODE (realpart_x) == SUBREG
+ || GET_CODE (imagpart_x) == SUBREG))
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
-static rtx
-emit_move_multi_word (enum machine_mode mode, rtx x, rtx y)
-{
- rtx last_insn = 0;
- rtx seq, inner;
- bool need_clobber;
- int i;
-
- if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
- abort ();
-
- /* If X is a push on the stack, do the push now and replace
- X with a reference to the stack pointer. */
- if (push_operand (x, mode))
- x = emit_move_resolve_push (mode, x);
-
- /* If we are in reload, see if either operand is a MEM whose address
- is scheduled for replacement. */
- if (reload_in_progress && GET_CODE (x) == MEM
- && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
- x = replace_equiv_address_nv (x, inner);
- if (reload_in_progress && GET_CODE (y) == MEM
- && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
- y = replace_equiv_address_nv (y, inner);
-
- start_sequence ();
-
- need_clobber = false;
- for (i = 0;
- i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
- i++)
- {
- rtx xpart = operand_subword (x, i, 1, mode);
- rtx ypart = operand_subword (y, i, 1, mode);
-
- /* If we can't get a part of Y, put Y into memory if it is a
- constant. Otherwise, force it into a register. If we still
- can't get a part of Y, abort. */
- if (ypart == 0 && CONSTANT_P (y))
- {
- y = force_const_mem (mode, y);
- ypart = operand_subword (y, i, 1, mode);
+ emit_move_insn (realpart_x, realpart_y);
+ emit_move_insn (imagpart_x, imagpart_y);
}
- else if (ypart == 0)
- ypart = operand_subword_force (y, i, mode);
-
- if (xpart == 0 || ypart == 0)
- abort ();
-
- need_clobber |= (GET_CODE (xpart) == SUBREG);
- last_insn = emit_move_insn (xpart, ypart);
+ return get_last_insn ();
}
- seq = get_insns ();
- end_sequence ();
-
- /* Show the output dies here. This is necessary for SUBREGs
- of pseudos since we cannot track their lifetimes correctly;
- hard regs shouldn't appear here except as return values.
- We never want to emit such a clobber after reload. */
- if (x != y
- && ! (reload_in_progress || reload_completed)
- && need_clobber != 0)
- emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
-
- emit_insn (seq);
-
- return last_insn;
-}
+ /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
+ find a mode to do it in. If we have a movcc, use it. Otherwise,
+ find the MODE_INT mode of the same width. */
+ else if (GET_MODE_CLASS (mode) == MODE_CC
+ && mov_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing)
+ {
+ enum insn_code insn_code;
+ enum machine_mode tmode = VOIDmode;
+ rtx x1 = x, y1 = y;
-/* Low level part of emit_move_insn.
- Called just like emit_move_insn, but assumes X and Y
- are basically valid. */
+ if (mode != CCmode
+ && mov_optab->handlers[(int) CCmode].insn_code != CODE_FOR_nothing)
+ tmode = CCmode;
+ else
+ for (tmode = QImode; tmode != VOIDmode;
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) == GET_MODE_SIZE (mode))
+ break;
-rtx
-emit_move_insn_1 (rtx x, rtx y)
-{
- enum machine_mode mode = GET_MODE (x);
- enum insn_code code;
+ if (tmode == VOIDmode)
+ abort ();
- if ((unsigned int) mode >= (unsigned int) MAX_MACHINE_MODE)
- abort ();
+ /* Get X and Y in TMODE. We can't use gen_lowpart here because it
+ may call change_address which is not appropriate if we were
+ called when a reload was in progress. We don't have to worry
+ about changing the address since the size in bytes is supposed to
+ be the same. Copy the MEM to change the mode and move any
+ substitutions from the old MEM to the new one. */
- code = mov_optab->handlers[mode].insn_code;
- if (code != CODE_FOR_nothing)
- return emit_insn (GEN_FCN (code) (x, y));
+ if (reload_in_progress)
+ {
+ x = gen_lowpart_common (tmode, x1);
+ if (x == 0 && GET_CODE (x1) == MEM)
+ {
+ x = adjust_address_nv (x1, tmode, 0);
+ copy_replacements (x1, x);
+ }
- /* Expand complex moves by moving real part and imag part. */
- if (COMPLEX_MODE_P (mode))
- return emit_move_complex (mode, x, y);
+ y = gen_lowpart_common (tmode, y1);
+ if (y == 0 && GET_CODE (y1) == MEM)
+ {
+ y = adjust_address_nv (y1, tmode, 0);
+ copy_replacements (y1, y);
+ }
+ }
+ else
+ {
+ x = gen_lowpart (tmode, x);
+ y = gen_lowpart (tmode, y);
+ }
- if (GET_MODE_CLASS (mode) == MODE_CC)
- return emit_move_ccmode (mode, x, y);
+ insn_code = mov_optab->handlers[(int) tmode].insn_code;
+ return emit_insn (GEN_FCN (insn_code) (x, y));
+ }
/* Try using a move pattern for the corresponding integer mode. This is
only safe when simplify_subreg can convert MODE constants into integer
constants. At present, it can only do this reliably if the value
fits within a HOST_WIDE_INT. */
- if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- {
- rtx ret = emit_move_via_integer (mode, x, y);
- if (ret)
- return ret;
- }
-
- return emit_move_multi_word (mode, x, y);
-}
-
-/* Generate code to copy Y into X.
- Both Y and X must have the same mode, except that
- Y can be a constant with VOIDmode.
- This mode cannot be BLKmode; use emit_block_move for that.
+ else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (submode = int_mode_for_mode (mode)) != BLKmode
+ && mov_optab->handlers[submode].insn_code != CODE_FOR_nothing)
+ return emit_insn (GEN_FCN (mov_optab->handlers[submode].insn_code)
+ (simplify_gen_subreg (submode, x, mode, 0),
+ simplify_gen_subreg (submode, y, mode, 0)));
+
+ /* This will handle any multi-word or full-word mode that lacks a move_insn
+ pattern. However, you will get better code if you define such patterns,
+ even if they must turn into multiple assembler instructions. */
+ else if (GET_MODE_SIZE (mode) >= UNITS_PER_WORD)
+ {
+ rtx last_insn = 0;
+ rtx seq, inner;
+ int need_clobber;
+ int i;
- Return the last instruction emitted. */
+#ifdef PUSH_ROUNDING
-rtx
-emit_move_insn (rtx x, rtx y)
-{
- enum machine_mode mode = GET_MODE (x);
- rtx y_cst = NULL_RTX;
- rtx last_insn, set;
+ /* If X is a push on the stack, do the push now and replace
+ X with a reference to the stack pointer. */
+ if (push_operand (x, GET_MODE (x)))
+ {
+ rtx temp;
+ enum rtx_code code;
- x = protect_from_queue (x, 1);
- y = protect_from_queue (y, 0);
+ /* Do not use anti_adjust_stack, since we don't want to update
+ stack_pointer_delta. */
+ temp = expand_binop (Pmode,
+#ifdef STACK_GROWS_DOWNWARD
+ sub_optab,
+#else
+ add_optab,
+#endif
+ stack_pointer_rtx,
+ GEN_INT
+ (PUSH_ROUNDING
+ (GET_MODE_SIZE (GET_MODE (x)))),
+ stack_pointer_rtx, 0, OPTAB_LIB_WIDEN);
+
+ if (temp != stack_pointer_rtx)
+ emit_move_insn (stack_pointer_rtx, temp);
+
+ code = GET_CODE (XEXP (x, 0));
+
+ /* Just hope that small offsets off SP are OK. */
+ if (code == POST_INC)
+ temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (-((HOST_WIDE_INT)
+ GET_MODE_SIZE (GET_MODE (x)))));
+ else if (code == POST_DEC)
+ temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (GET_MODE_SIZE (GET_MODE (x))));
+ else
+ temp = stack_pointer_rtx;
- if (mode == BLKmode || (GET_MODE (y) != mode && GET_MODE (y) != VOIDmode))
- abort ();
+ x = change_address (x, VOIDmode, temp);
+ }
+#endif
- /* Never force constant_p_rtx to memory. */
- if (GET_CODE (y) == CONSTANT_P_RTX)
- ;
- else if (CONSTANT_P (y))
- {
- if (optimize
- && SCALAR_FLOAT_MODE_P (GET_MODE (x))
- && (last_insn = compress_float_constant (x, y)))
- return last_insn;
+ /* If we are in reload, see if either operand is a MEM whose address
+ is scheduled for replacement. */
+ if (reload_in_progress && GET_CODE (x) == MEM
+ && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
+ x = replace_equiv_address_nv (x, inner);
+ if (reload_in_progress && GET_CODE (y) == MEM
+ && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
+ y = replace_equiv_address_nv (y, inner);
- y_cst = y;
+ start_sequence ();
- if (!LEGITIMATE_CONSTANT_P (y))
+ need_clobber = 0;
+ for (i = 0;
+ i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
+ i++)
{
- y = force_const_mem (mode, y);
+ rtx xpart = operand_subword (x, i, 1, mode);
+ rtx ypart = operand_subword (y, i, 1, mode);
- /* If the target's cannot_force_const_mem prevented the spill,
- assume that the target's move expanders will also take care
- of the non-legitimate constant. */
- if (!y)
- y = y_cst;
- }
- }
+ /* If we can't get a part of Y, put Y into memory if it is a
+ constant. Otherwise, force it into a register. If we still
+ can't get a part of Y, abort. */
+ if (ypart == 0 && CONSTANT_P (y))
+ {
+ y = force_const_mem (mode, y);
+ ypart = operand_subword (y, i, 1, mode);
+ }
+ else if (ypart == 0)
+ ypart = operand_subword_force (y, i, mode);
- /* If X or Y are memory references, verify that their addresses are valid
- for the machine. */
- if (GET_CODE (x) == MEM
- && ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
- && ! push_operand (x, GET_MODE (x)))
- || (flag_force_addr
- && CONSTANT_ADDRESS_P (XEXP (x, 0)))))
- x = validize_mem (x);
+ if (xpart == 0 || ypart == 0)
+ abort ();
- if (GET_CODE (y) == MEM
- && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
- || (flag_force_addr
- && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
- y = validize_mem (y);
+ need_clobber |= (GET_CODE (xpart) == SUBREG);
- if (mode == BLKmode)
- abort ();
+ last_insn = emit_move_insn (xpart, ypart);
+ }
- last_insn = emit_move_insn_1 (x, y);
+ seq = get_insns ();
+ end_sequence ();
- if (y_cst && GET_CODE (x) == REG
- && (set = single_set (last_insn)) != NULL_RTX
- && SET_DEST (set) == x
- && ! rtx_equal_p (y_cst, SET_SRC (set)))
- set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
+ /* Show the output dies here. This is necessary for SUBREGs
+ of pseudos since we cannot track their lifetimes correctly;
+ hard regs shouldn't appear here except as return values.
+ We never want to emit such a clobber after reload. */
+ if (x != y
+ && ! (reload_in_progress || reload_completed)
+ && need_clobber != 0)
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
- return last_insn;
+ emit_insn (seq);
+
+ return last_insn;
+ }
+ else
+ abort ();
}
/* If Y is representable exactly in a narrower mode, and the target can
an array element in an unaligned packed structure field, has the same
problem. */
- if (handled_component_p (to)
+ if (TREE_CODE (to) == COMPONENT_REF || TREE_CODE (to) == BIT_FIELD_REF
+ || TREE_CODE (to) == ARRAY_REF || TREE_CODE (to) == ARRAY_RANGE_REF
|| TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
{
enum machine_mode mode1;
offset));
}
- if (GET_CODE (to_rtx) == CONCAT)
+ if (GET_CODE (to_rtx) == MEM)
{
- if (TREE_CODE (TREE_TYPE (from)) == COMPLEX_TYPE)
- result = store_expr (from, to_rtx, false);
- else
- {
- if (bitpos != 0 && bitpos != GET_MODE_BITSIZE (mode1))
- abort ();
- result = store_expr (from, XEXP (to_rtx, bitpos != 0), false);
- }
- }
- else
- {
- if (GET_CODE (to_rtx) == MEM)
- {
- /* If the field is at offset zero, we could have been given
- the DECL_RTX of the parent struct. Don't munge it. */
- to_rtx = shallow_copy_rtx (to_rtx);
-
- set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
+ /* If the field is at offset zero, we could have been given the
+ DECL_RTX of the parent struct. Don't munge it. */
+ to_rtx = shallow_copy_rtx (to_rtx);
- /* Deal with volatile and readonly fields. The former is only
- done for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
- if (volatilep)
- {
- if (to_rtx == orig_to_rtx)
- to_rtx = copy_rtx (to_rtx);
- MEM_VOLATILE_P (to_rtx) = 1;
- }
- }
+ set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
+ }
- if (GET_CODE (to_rtx) == MEM && !can_address_p (to))
- {
- if (to_rtx == orig_to_rtx)
- to_rtx = copy_rtx (to_rtx);
- MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
- }
+ /* Deal with volatile and readonly fields. The former is only done
+ for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
+ if (volatilep && GET_CODE (to_rtx) == MEM)
+ {
+ if (to_rtx == orig_to_rtx)
+ to_rtx = copy_rtx (to_rtx);
+ MEM_VOLATILE_P (to_rtx) = 1;
+ }
- /* We can't assert that a MEM won't be set more than once if the
- component is not addressable because another non-addressable
- component may be referenced by the same MEM. */
- else if (TREE_CODE (to) == COMPONENT_REF
- && TREE_READONLY (TREE_OPERAND (to, 1)))
- {
- if (to_rtx == orig_to_rtx)
- to_rtx = copy_rtx (to_rtx);
- RTX_UNCHANGING_P (to_rtx) = 1;
- }
+ if (TREE_CODE (to) == COMPONENT_REF
+ && TREE_READONLY (TREE_OPERAND (to, 1))
+ /* We can't assert that a MEM won't be set more than once
+ if the component is not addressable because another
+ non-addressable component may be referenced by the same MEM. */
+ && ! (GET_CODE (to_rtx) == MEM && ! can_address_p (to)))
+ {
+ if (to_rtx == orig_to_rtx)
+ to_rtx = copy_rtx (to_rtx);
+ RTX_UNCHANGING_P (to_rtx) = 1;
+ }
- result = store_field (to_rtx, bitsize, bitpos, mode1, from,
- (want_value
- /* Spurious cast for HPUX compiler. */
- ? ((enum machine_mode)
- TYPE_MODE (TREE_TYPE (to)))
- : VOIDmode),
- unsignedp, TREE_TYPE (tem),
- get_alias_set (to));
+ if (GET_CODE (to_rtx) == MEM && ! can_address_p (to))
+ {
+ if (to_rtx == orig_to_rtx)
+ to_rtx = copy_rtx (to_rtx);
+ MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
}
+ result = store_field (to_rtx, bitsize, bitpos, mode1, from,
+ (want_value
+ /* Spurious cast for HPUX compiler. */
+ ? ((enum machine_mode)
+ TYPE_MODE (TREE_TYPE (to)))
+ : VOIDmode),
+ unsignedp, TREE_TYPE (tem), get_alias_set (to));
+
preserve_temp_slots (result);
free_temp_slots ();
pop_temp_slots ();
and find the ultimate containing object. */
while (1)
{
- switch (TREE_CODE (exp))
+ if (TREE_CODE (exp) == BIT_FIELD_REF)
+ bit_offset = size_binop (PLUS_EXPR, bit_offset, TREE_OPERAND (exp, 2));
+ else if (TREE_CODE (exp) == COMPONENT_REF)
{
- case BIT_FIELD_REF:
- bit_offset = size_binop (PLUS_EXPR, bit_offset,
- TREE_OPERAND (exp, 2));
- break;
+ tree field = TREE_OPERAND (exp, 1);
+ tree this_offset = DECL_FIELD_OFFSET (field);
- case COMPONENT_REF:
- {
- tree field = TREE_OPERAND (exp, 1);
- tree this_offset = DECL_FIELD_OFFSET (field);
-
- /* If this field hasn't been filled in yet, don't go
- past it. This should only happen when folding expressions
- made during type construction. */
- if (this_offset == 0)
- goto done;
- if (CONTAINS_PLACEHOLDER_P (this_offset))
- this_offset = build (WITH_RECORD_EXPR, sizetype,
- this_offset, exp);
-
- offset = size_binop (PLUS_EXPR, offset, this_offset);
- bit_offset = size_binop (PLUS_EXPR, bit_offset,
- DECL_FIELD_BIT_OFFSET (field));
-
- /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
- }
- break;
+ /* If this field hasn't been filled in yet, don't go
+ past it. This should only happen when folding expressions
+ made during type construction. */
+ if (this_offset == 0)
+ break;
+ else if (CONTAINS_PLACEHOLDER_P (this_offset))
+ this_offset = build (WITH_RECORD_EXPR, sizetype, this_offset, exp);
- case ARRAY_REF:
- case ARRAY_RANGE_REF:
- {
- tree index = TREE_OPERAND (exp, 1);
- tree array = TREE_OPERAND (exp, 0);
- tree domain = TYPE_DOMAIN (TREE_TYPE (array));
- tree low_bound = (domain ? TYPE_MIN_VALUE (domain) : 0);
- tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (array)));
-
- /* We assume all arrays have sizes that are a multiple of a byte.
- First subtract the lower bound, if any, in the type of the
- index, then convert to sizetype and multiply by the size of the
- array element. */
- if (low_bound != 0 && ! integer_zerop (low_bound))
- index = fold (build (MINUS_EXPR, TREE_TYPE (index),
- index, low_bound));
-
- /* If the index has a self-referential type, pass it to a
- WITH_RECORD_EXPR; if the component size is, pass our
- component to one. */
- if (CONTAINS_PLACEHOLDER_P (index))
- index = build (WITH_RECORD_EXPR, TREE_TYPE (index), index, exp);
- if (CONTAINS_PLACEHOLDER_P (unit_size))
- unit_size = build (WITH_RECORD_EXPR, sizetype, unit_size, array);
-
- offset = size_binop (PLUS_EXPR, offset,
- size_binop (MULT_EXPR,
- convert (sizetype, index),
- unit_size));
- }
- break;
+ offset = size_binop (PLUS_EXPR, offset, this_offset);
+ bit_offset = size_binop (PLUS_EXPR, bit_offset,
+ DECL_FIELD_BIT_OFFSET (field));
- case PLACEHOLDER_EXPR:
- {
- tree new = find_placeholder (exp, &placeholder_ptr);
+ /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
+ }
- /* If we couldn't find the replacement, return the PLACEHOLDER_EXPR.
- We might have been called from tree optimization where we
- haven't set up an object yet. */
- if (new == 0)
- goto done;
+ else if (TREE_CODE (exp) == ARRAY_REF
+ || TREE_CODE (exp) == ARRAY_RANGE_REF)
+ {
+ tree index = TREE_OPERAND (exp, 1);
+ tree array = TREE_OPERAND (exp, 0);
+ tree domain = TYPE_DOMAIN (TREE_TYPE (array));
+ tree low_bound = (domain ? TYPE_MIN_VALUE (domain) : 0);
+ tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (array)));
- exp = new;
- }
- continue;
+ /* We assume all arrays have sizes that are a multiple of a byte.
+ First subtract the lower bound, if any, in the type of the
+ index, then convert to sizetype and multiply by the size of the
+ array element. */
+ if (low_bound != 0 && ! integer_zerop (low_bound))
+ index = fold (build (MINUS_EXPR, TREE_TYPE (index),
+ index, low_bound));
- case REALPART_EXPR:
- break;
-
- case IMAGPART_EXPR:
- bit_offset = size_binop (PLUS_EXPR, bit_offset,
- bitsize_int (*pbitsize));
- break;
+ /* If the index has a self-referential type, pass it to a
+ WITH_RECORD_EXPR; if the component size is, pass our
+ component to one. */
+ if (CONTAINS_PLACEHOLDER_P (index))
+ index = build (WITH_RECORD_EXPR, TREE_TYPE (index), index, exp);
+ if (CONTAINS_PLACEHOLDER_P (unit_size))
+ unit_size = build (WITH_RECORD_EXPR, sizetype, unit_size, array);
- /* We can go inside most conversions: all NON_VALUE_EXPRs, all normal
- conversions that don't change the mode, and all view conversions
- except those that need to "step up" the alignment. */
+ offset = size_binop (PLUS_EXPR, offset,
+ size_binop (MULT_EXPR,
+ convert (sizetype, index),
+ unit_size));
+ }
- case NON_LVALUE_EXPR:
- break;
+ else if (TREE_CODE (exp) == PLACEHOLDER_EXPR)
+ {
+ tree new = find_placeholder (exp, &placeholder_ptr);
- case NOP_EXPR:
- case CONVERT_EXPR:
- if (TYPE_MODE (TREE_TYPE (exp))
- != TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
- goto done;
- break;
-
- case VIEW_CONVERT_EXPR:
- if ((TYPE_ALIGN (TREE_TYPE (exp))
- > TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))))
- && STRICT_ALIGNMENT
- && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
- < BIGGEST_ALIGNMENT)
- && (TYPE_ALIGN_OK (TREE_TYPE (exp))
- || TYPE_ALIGN_OK (TREE_TYPE (TREE_OPERAND (exp, 0)))))
- goto done;
- break;
-
- default:
- goto done;
+ /* If we couldn't find the replacement, return the PLACEHOLDER_EXPR.
+ We might have been called from tree optimization where we
+ haven't set up an object yet. */
+ if (new == 0)
+ break;
+ else
+ exp = new;
+
+ continue;
}
+ /* We can go inside most conversions: all NON_VALUE_EXPRs, all normal
+ conversions that don't change the mode, and all view conversions
+ except those that need to "step up" the alignment. */
+ else if (TREE_CODE (exp) != NON_LVALUE_EXPR
+ && ! (TREE_CODE (exp) == VIEW_CONVERT_EXPR
+ && ! ((TYPE_ALIGN (TREE_TYPE (exp))
+ > TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ && STRICT_ALIGNMENT
+ && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ < BIGGEST_ALIGNMENT)
+ && (TYPE_ALIGN_OK (TREE_TYPE (exp))
+ || TYPE_ALIGN_OK (TREE_TYPE
+ (TREE_OPERAND (exp, 0))))))
+ && ! ((TREE_CODE (exp) == NOP_EXPR
+ || TREE_CODE (exp) == CONVERT_EXPR)
+ && (TYPE_MODE (TREE_TYPE (exp))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))))
+ break;
+
/* If any reference in the chain is volatile, the effect is volatile. */
if (TREE_THIS_VOLATILE (exp))
*pvolatilep = 1;
exp = TREE_OPERAND (exp, 0);
}
- done:
/* If OFFSET is constant, see if we can return the whole thing as a
constant bit position. Otherwise, split it up. */
case ARRAY_RANGE_REF:
case NON_LVALUE_EXPR:
case VIEW_CONVERT_EXPR:
- case REALPART_EXPR:
- case IMAGPART_EXPR:
return 1;
/* ??? Sure they are handled, but get_inner_reference may return
case ENTRY_VALUE_EXPR:
abort ();
+ /* COMPLEX type for Extended Pascal & Fortran */
case COMPLEX_EXPR:
- /* Get the rtx code of the operands. */
- op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
- op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0);
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
+ rtx insns;
- if (!target)
- target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
+ /* Get the rtx code of the operands. */
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0);
- /* Move the real (op0) and imaginary (op1) parts to their location. */
- write_complex_part (target, op0, false);
- write_complex_part (target, op1, true);
+ if (! target)
+ target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
- return target;
+ start_sequence ();
+
+ /* Move the real (op0) and imaginary (op1) parts to their location. */
+ emit_move_insn (gen_realpart (mode, target), op0);
+ emit_move_insn (gen_imagpart (mode, target), op1);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ /* Complex construction should appear as a single unit. */
+ /* If TARGET is a CONCAT, we got insns like RD = RS, ID = IS,
+ each with a separate pseudo as destination.
+ It's not correct for flow to treat them as a unit. */
+ if (GET_CODE (target) != CONCAT)
+ emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
+ else
+ emit_insn (insns);
+
+ return target;
+ }
case REALPART_EXPR:
op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
- return read_complex_part (op0, false);
+ return gen_realpart (mode, op0);
case IMAGPART_EXPR:
op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
- return read_complex_part (op0, true);
+ return gen_imagpart (mode, op0);
case CONJ_EXPR:
{
enum machine_mode partmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
+ rtx imag_t;
rtx insns;
op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
start_sequence ();
/* Store the realpart and the negated imagpart to target. */
- write_complex_part (target, read_complex_part (op0, false), false);
+ emit_move_insn (gen_realpart (partmode, target),
+ gen_realpart (partmode, op0));
+ imag_t = gen_imagpart (partmode, target);
temp = expand_unop (partmode,
! unsignedp && flag_trapv
&& (GET_MODE_CLASS(partmode) == MODE_INT)
? negv_optab : neg_optab,
- read_complex_part (op0, true), NULL_RTX, 0);
- write_complex_part (target, temp, true);
+ gen_imagpart (partmode, op0), imag_t, 0);
+ if (temp != imag_t)
+ emit_move_insn (imag_t, temp);
insns = get_insns ();
end_sequence ();
{
rtx real0 = 0, imag0 = 0;
rtx real1 = 0, imag1 = 0;
- rtx realr, imagr;
- rtx seq;
+ rtx realr, imagr, res;
+ rtx seq, result;
int ok = 0;
/* Find the correct mode for the real and imaginary parts. */
if (GET_MODE (op0) == mode)
{
- real0 = read_complex_part (op0, false);
- imag0 = read_complex_part (op0, true);
+ real0 = gen_realpart (submode, op0);
+ imag0 = gen_imagpart (submode, op0);
}
else
real0 = op0;
if (GET_MODE (op1) == mode)
{
- real1 = read_complex_part (op1, false);
- imag1 = read_complex_part (op1, true);
+ real1 = gen_realpart (submode, op1);
+ imag1 = gen_imagpart (submode, op1);
}
else
real1 = op1;
if (real0 == 0 || real1 == 0 || ! (imag0 != 0 || imag1 != 0))
abort ();
+ result = gen_reg_rtx (mode);
+ realr = gen_realpart (submode, result);
+ imagr = gen_imagpart (submode, result);
+
switch (binoptab->code)
{
case PLUS:
/* (a+ib) + (c+id) = (a+c) + i(b+d) */
case MINUS:
/* (a+ib) - (c+id) = (a-c) + i(b-d) */
- realr = expand_binop (submode, binoptab, real0, real1,
- NULL_RTX, unsignedp, methods);
- if (realr == 0)
+ res = expand_binop (submode, binoptab, real0, real1,
+ realr, unsignedp, methods);
+
+ if (res == 0)
break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
if (imag0 != 0 && imag1 != 0)
- imagr = expand_binop (submode, binoptab, imag0, imag1,
- NULL_RTX, unsignedp, methods);
+ res = expand_binop (submode, binoptab, imag0, imag1,
+ imagr, unsignedp, methods);
else if (imag0 != 0)
- imagr = force_reg (submode, imag0);
+ res = imag0;
else if (binoptab->code == MINUS)
- imagr = expand_unop (submode,
- (binoptab == subv_optab
- ? negv_optab : neg_optab),
- imag1, NULL_RTX, unsignedp);
+ res = expand_unop (submode,
+ binoptab == subv_optab ? negv_optab : neg_optab,
+ imag1, imagr, unsignedp);
else
- imagr = force_reg (submode, imag1);
- if (imagr == 0)
+ res = imag1;
+
+ if (res == 0)
break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
ok = 1;
break;
case MULT:
/* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
+
if (imag0 != 0 && imag1 != 0)
{
rtx temp1, temp2;
if (temp1 == 0 || temp2 == 0)
break;
- realr = (expand_binop
- (submode,
- binoptab == smulv_optab ? subv_optab : sub_optab,
- temp1, temp2, NULL_RTX, unsignedp, methods));
- if (realr == 0)
+ res = (expand_binop
+ (submode,
+ binoptab == smulv_optab ? subv_optab : sub_optab,
+ temp1, temp2, realr, unsignedp, methods));
+
+ if (res == 0)
break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
temp1 = expand_binop (submode, binoptab, real0, imag1,
NULL_RTX, unsignedp, methods);
if (temp1 == 0 || temp2 == 0)
break;
- imagr = (expand_binop
- (submode,
- binoptab == smulv_optab ? addv_optab : add_optab,
- temp1, temp2, NULL_RTX, unsignedp, methods));
- if (imagr == 0)
+ res = (expand_binop
+ (submode,
+ binoptab == smulv_optab ? addv_optab : add_optab,
+ temp1, temp2, imagr, unsignedp, methods));
+
+ if (res == 0)
break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
ok = 1;
}
real0 = force_reg (submode, real0);
real1 = force_reg (submode, real1);
- realr = expand_binop (submode, binoptab, real0, real1,
- NULL_RTX, unsignedp, methods);
- if (realr == 0)
+ res = expand_binop (submode, binoptab, real0, real1,
+ realr, unsignedp, methods);
+ if (res == 0)
break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
if (imag0 != 0)
- imagr = expand_binop (submode, binoptab, real1, imag0,
- NULL_RTX, unsignedp, methods);
+ res = expand_binop (submode, binoptab,
+ real1, imag0, imagr, unsignedp, methods);
else
- imagr = expand_binop (submode, binoptab, real0, imag1,
- NULL_RTX, unsignedp, methods);
- if (imagr == 0)
+ res = expand_binop (submode, binoptab,
+ real0, imag1, imagr, unsignedp, methods);
+
+ if (res == 0)
break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
ok = 1;
}
/* Simply divide the real and imaginary parts by `c' */
if (class == MODE_COMPLEX_FLOAT)
- realr = expand_binop (submode, binoptab, real0, real1,
- NULL_RTX, unsignedp, methods);
+ res = expand_binop (submode, binoptab, real0, real1,
+ realr, unsignedp, methods);
else
- realr = expand_divmod (0, TRUNC_DIV_EXPR, submode,
- real0, real1, NULL_RTX, unsignedp);
- if (realr == 0)
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ real0, real1, realr, unsignedp);
+
+ if (res == 0)
break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
if (class == MODE_COMPLEX_FLOAT)
- imagr = expand_binop (submode, binoptab, imag0, real1,
- NULL_RTX, unsignedp, methods);
+ res = expand_binop (submode, binoptab, imag0, real1,
+ imagr, unsignedp, methods);
else
- imagr = expand_divmod (0, TRUNC_DIV_EXPR, submode,
- imag0, real1, NULL_RTX, unsignedp);
- if (imagr == 0)
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ imag0, real1, imagr, unsignedp);
+
+ if (res == 0)
break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
ok = 1;
}
else
{
- realr = gen_reg_rtx (submode);
- imagr = gen_reg_rtx (submode);
switch (flag_complex_divide_method)
{
case 0:
{
rtx equiv = gen_rtx_fmt_ee (binoptab->code, mode,
copy_rtx (op0), copy_rtx (op1));
- rtx result = gen_rtx_CONCAT (mode, realr, imagr);
emit_no_conflict_block (seq, result, op0, op1, equiv);
return result;
}
else if (unoptab->code == NEG
&& (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT))
{
- rtx seq, x;
+ rtx target_piece;
+ rtx x;
+ rtx seq;
/* Find the correct mode for the real and imaginary parts. */
enum machine_mode submode = GET_MODE_INNER (mode);
start_sequence ();
+ target_piece = gen_imagpart (submode, target);
x = expand_unop (submode, unoptab,
- read_complex_part (op0, true),
- NULL_RTX, unsignedp);
- write_complex_part (target, x, true);
+ gen_imagpart (submode, op0),
+ target_piece, unsignedp);
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
+ target_piece = gen_realpart (submode, target);
x = expand_unop (submode, unoptab,
- read_complex_part (op0, false),
- NULL_RTX, unsignedp);
- write_complex_part (target, x, false);
+ gen_realpart (submode, op0),
+ target_piece, unsignedp);
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
seq = get_insns ();
end_sequence ();
{
rtx real, imag, total;
- real = read_complex_part (op0, false);
- imag = read_complex_part (op0, true);
+ real = gen_realpart (submode, op0);
+ imag = gen_imagpart (submode, op0);
/* Square both parts. */
real = expand_mult (submode, real, real, NULL_RTX, 0);