1 ;; Machine description of the Renesas M32R cpu for GNU C compiler
2 ;; Copyright (C) 1996, 1997, 1998, 1999, 2001, 2003, 2004
3 ; Free Software Foundation, Inc.
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published
9 ;; by the Free Software Foundation; either version 2, or (at your
10 ;; option) any later version.
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT
13 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 ;; License for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING. If not, write to
19 ;; the Free Software Foundation, 59 Temple Place - Suite 330,
20 ;; Boston, MA 02111-1307, USA.
22 ;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
24 ;; UNSPEC_VOLATILE usage
27 (UNSPECV_FLUSH_ICACHE 1)])
31 [(UNSPEC_LOAD_SDA_BASE 2)
33 (UNSPEC_PIC_LOAD_ADDR 4)
38 ;; Insn type. Used to default other attribute values.
40 "int2,int4,load2,load4,load8,store2,store4,store8,shift2,shift4,mul2,div4,uncond_branch,branch,call,multi,misc"
41 (const_string "misc"))
44 (define_attr "length" ""
45 (cond [(eq_attr "type" "int2,load2,store2,shift2,mul2")
48 (eq_attr "type" "int4,load4,store4,shift4,div4")
51 (eq_attr "type" "multi")
54 (eq_attr "type" "uncond_branch,branch,call")
59 ;; The length here is the length of a single asm. Unfortunately it might be
60 ;; 2 or 4 so we must allow for 4. That's ok though.
61 (define_asm_attributes
62 [(set_attr "length" "4")
63 (set_attr "type" "multi")])
65 ;; Whether an instruction is short (16-bit) or long (32-bit).
66 (define_attr "insn_size" "short,long"
67 (if_then_else (eq_attr "type" "int2,load2,store2,shift2,mul2")
68 (const_string "short")
69 (const_string "long")))
71 ;; The target CPU we're compiling for.
72 (define_attr "cpu" "m32r,m32r2,m32rx"
73 (cond [(ne (symbol_ref "TARGET_M32RX") (const_int 0))
74 (const_string "m32rx")
75 (ne (symbol_ref "TARGET_M32R2") (const_int 0))
76 (const_string "m32r2")]
77 (const_string "m32r")))
79 ;; Defines the pipeline where an instruction can be executed on.
80 ;; For the M32R, a short instruction can execute one of the two pipes.
81 ;; For the M32Rx, the restrictions are modelled in the second
82 ;; condition of this attribute definition.
83 (define_attr "m32r_pipeline" "either,s,o,long"
84 (cond [(and (eq_attr "cpu" "m32r")
85 (eq_attr "insn_size" "short"))
86 (const_string "either")
87 (eq_attr "insn_size" "!short")
88 (const_string "long")]
89 (cond [(eq_attr "type" "int2")
90 (const_string "either")
91 (eq_attr "type" "load2,store2,shift2,uncond_branch,branch,call")
93 (eq_attr "type" "mul2")
95 (const_string "long"))))
97 ;; ::::::::::::::::::::
99 ;; :: Pipeline description
101 ;; ::::::::::::::::::::
103 ;; This model is based on Chapter 2, Appendix 3 and Appendix 4 of the
104 ;; "M32R-FPU Software Manual", Revision 1.01, plus additional information
105 ;; obtained by our best friend and mine, Google.
107 ;; The pipeline is modelled as a fetch unit, and a core with a memory unit,
108 ;; two execution units, where "fetch" models IF and D, "memory" for MEM1
109 ;; and MEM2, and "EXEC" for E, E1, E2, EM, and EA. Writeback and
110 ;; bypasses are not modelled.
111 (define_automaton "m32r")
113 ;; We pretend there are two short (16 bits) instruction fetchers. The
114 ;; "s" short fetcher cannot be reserved until the "o" short fetcher is
115 ;; reserved. Some instructions reserve both the left and right fetchers.
116 ;; These fetch units are a hack to get GCC to better pack the instructions
117 ;; for the M32Rx processor, which has two execution pipes.
119 ;; In reality there is only one decoder, which can decode either two 16 bits
120 ;; instructions, or a single 32 bits instruction.
122 ;; Note, "fetch" models both the IF and the D pipeline stages.
124 ;; The m32rx core has two execution pipes. We name them o_E and s_E.
125 ;; In addition, there's a memory unit.
127 (define_cpu_unit "o_IF,s_IF,o_E,s_E,memory" "m32r")
129 ;; Prevent the s pipe from being reserved before the o pipe.
130 (absence_set "s_IF" "o_IF")
131 (absence_set "s_E" "o_E")
133 ;; On the M32Rx, long instructions execute on both pipes, so reserve
134 ;; both fetch slots and both pipes.
135 (define_reservation "long_IF" "o_IF+s_IF")
136 (define_reservation "long_E" "o_E+s_E")
138 ;; ::::::::::::::::::::
140 ;; Simple instructions do 4 stages: IF D E WB. WB is not modelled.
141 ;; Hence, ready latency is 1.
142 (define_insn_reservation "short_left" 1
143 (and (eq_attr "m32r_pipeline" "o")
144 (and (eq_attr "insn_size" "short")
145 (eq_attr "type" "!load2")))
148 (define_insn_reservation "short_right" 1
149 (and (eq_attr "m32r_pipeline" "s")
150 (and (eq_attr "insn_size" "short")
151 (eq_attr "type" "!load2")))
154 (define_insn_reservation "short_either" 1
155 (and (eq_attr "m32r_pipeline" "either")
156 (and (eq_attr "insn_size" "short")
157 (eq_attr "type" "!load2")))
160 (define_insn_reservation "long_m32r" 1
161 (and (eq_attr "cpu" "m32r")
162 (and (eq_attr "insn_size" "long")
163 (eq_attr "type" "!load4,load8")))
166 (define_insn_reservation "long_m32rx" 2
167 (and (eq_attr "m32r_pipeline" "long")
168 (and (eq_attr "insn_size" "long")
169 (eq_attr "type" "!load4,load8")))
172 ;; Load/store instructions do 6 stages: IF D E MEM1 MEM2 WB.
173 ;; MEM1 may require more than one cycle depending on locality. We
174 ;; optimistically assume all memory is nearby, i.e. MEM1 takes only
175 ;; one cycle. Hence, ready latency is 3.
177 ;; The M32Rx can do short load/store only on the left pipe.
178 (define_insn_reservation "short_load_left" 3
179 (and (eq_attr "m32r_pipeline" "o")
180 (and (eq_attr "insn_size" "short")
181 (eq_attr "type" "load2")))
184 (define_insn_reservation "short_load" 3
185 (and (eq_attr "m32r_pipeline" "either")
186 (and (eq_attr "insn_size" "short")
187 (eq_attr "type" "load2")))
188 "s_IF|o_IF,s_E|o_E,memory*2")
190 (define_insn_reservation "long_load" 3
191 (and (eq_attr "cpu" "m32r")
192 (and (eq_attr "insn_size" "long")
193 (eq_attr "type" "load4,load8")))
194 "long_IF,long_E,memory*2")
196 (define_insn_reservation "long_load_m32rx" 3
197 (and (eq_attr "m32r_pipeline" "long")
198 (eq_attr "type" "load4,load8"))
199 "long_IF,long_E,memory*2")
202 ;; Expand prologue as RTL
203 (define_expand "prologue"
208 m32r_expand_prologue ();
213 ;; Move instructions.
215 ;; For QI and HI moves, the register must contain the full properly
216 ;; sign-extended value. nonzero_bits assumes this [otherwise
217 ;; SHORT_IMMEDIATES_SIGN_EXTEND must be used, but the comment for it
218 ;; says it's a kludge and the .md files should be fixed instead].
220 (define_expand "movqi"
221 [(set (match_operand:QI 0 "general_operand" "")
222 (match_operand:QI 1 "general_operand" ""))]
226 /* Fixup PIC cases. */
229 if (symbolic_operand (operands[1], QImode))
231 if (reload_in_progress || reload_completed)
232 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
234 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
238 /* Everything except mem = const or mem = mem can be done easily.
239 Objects in the small data area are handled too. */
241 if (GET_CODE (operands[0]) == MEM)
242 operands[1] = force_reg (QImode, operands[1]);
245 (define_insn "*movqi_insn"
246 [(set (match_operand:QI 0 "move_dest_operand" "=r,r,r,r,r,T,m")
247 (match_operand:QI 1 "move_src_operand" "r,I,JQR,T,m,r,r"))]
248 "register_operand (operands[0], QImode) || register_operand (operands[1], QImode)"
257 [(set_attr "type" "int2,int2,int4,load2,load4,store2,store4")
258 (set_attr "length" "2,2,4,2,4,2,4")])
260 (define_expand "movhi"
261 [(set (match_operand:HI 0 "general_operand" "")
262 (match_operand:HI 1 "general_operand" ""))]
266 /* Fixup PIC cases. */
269 if (symbolic_operand (operands[1], HImode))
271 if (reload_in_progress || reload_completed)
272 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
274 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
278 /* Everything except mem = const or mem = mem can be done easily. */
280 if (GET_CODE (operands[0]) == MEM)
281 operands[1] = force_reg (HImode, operands[1]);
284 (define_insn "*movhi_insn"
285 [(set (match_operand:HI 0 "move_dest_operand" "=r,r,r,r,r,r,T,m")
286 (match_operand:HI 1 "move_src_operand" "r,I,JQR,K,T,m,r,r"))]
287 "register_operand (operands[0], HImode) || register_operand (operands[1], HImode)"
297 [(set_attr "type" "int2,int2,int4,int4,load2,load4,store2,store4")
298 (set_attr "length" "2,2,4,4,2,4,2,4")])
300 (define_expand "movsi_push"
301 [(set (mem:SI (pre_dec:SI (match_operand:SI 0 "register_operand" "")))
302 (match_operand:SI 1 "register_operand" ""))]
306 (define_expand "movsi_pop"
307 [(set (match_operand:SI 0 "register_operand" "")
308 (mem:SI (post_inc:SI (match_operand:SI 1 "register_operand" ""))))]
312 (define_expand "movsi"
313 [(set (match_operand:SI 0 "general_operand" "")
314 (match_operand:SI 1 "general_operand" ""))]
318 /* Fixup PIC cases. */
321 if (symbolic_operand (operands[1], SImode))
323 if (reload_in_progress || reload_completed)
324 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
326 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
330 /* Everything except mem = const or mem = mem can be done easily. */
332 if (GET_CODE (operands[0]) == MEM)
333 operands[1] = force_reg (SImode, operands[1]);
335 /* Small Data Area reference? */
336 if (small_data_operand (operands[1], SImode))
338 emit_insn (gen_movsi_sda (operands[0], operands[1]));
342 /* If medium or large code model, symbols have to be loaded with
344 if (addr32_operand (operands[1], SImode))
346 emit_insn (gen_movsi_addr32 (operands[0], operands[1]));
351 ;; ??? Do we need a const_double constraint here for large unsigned values?
352 (define_insn "*movsi_insn"
353 [(set (match_operand:SI 0 "move_dest_operand" "=r,r,r,r,r,r,r,r,r,T,S,m")
354 (match_operand:SI 1 "move_src_operand" "r,I,J,MQ,L,n,T,U,m,r,r,r"))]
355 "register_operand (operands[0], SImode) || register_operand (operands[1], SImode)"
358 if (GET_CODE (operands[0]) == REG || GET_CODE (operands[1]) == SUBREG)
360 switch (GET_CODE (operands[1]))
372 if (GET_CODE (XEXP (operands[1], 0)) == POST_INC
373 && XEXP (XEXP (operands[1], 0), 0) == stack_pointer_rtx)
379 value = INTVAL (operands[1]);
381 return \"ldi %0,%#%1\\t; %X1\";
383 if (UINT24_P (value))
384 return \"ld24 %0,%#%1\\t; %X1\";
386 if (UPPER16_P (value))
387 return \"seth %0,%#%T1\\t; %X1\";
395 return \"ld24 %0,%#%1\";
401 else if (GET_CODE (operands[0]) == MEM
402 && (GET_CODE (operands[1]) == REG || GET_CODE (operands[1]) == SUBREG))
404 if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
405 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx)
413 [(set_attr "type" "int2,int2,int4,int4,int4,multi,load2,load2,load4,store2,store2,store4")
414 (set_attr "length" "2,2,4,4,4,8,2,2,4,2,2,4")])
416 ; Try to use a four byte / two byte pair for constants not loadable with
420 [(set (match_operand:SI 0 "register_operand" "")
421 (match_operand:SI 1 "two_insn_const_operand" ""))]
423 [(set (match_dup 0) (match_dup 2))
424 (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 3)))]
427 unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
428 unsigned HOST_WIDE_INT tmp;
431 /* In all cases we will emit two instructions. However we try to
432 use 2 byte instructions wherever possible. We can assume the
433 constant isn't loadable with any of ldi, ld24, or seth. */
435 /* See if we can load a 24 bit unsigned value and invert it. */
436 if (UINT24_P (~ val))
438 emit_insn (gen_movsi (operands[0], GEN_INT (~ val)));
439 emit_insn (gen_one_cmplsi2 (operands[0], operands[0]));
443 /* See if we can load a 24 bit unsigned value and shift it into place.
444 0x01fffffe is just beyond ld24's range. */
445 for (shift = 1, tmp = 0x01fffffe;
449 if ((val & ~tmp) == 0)
451 emit_insn (gen_movsi (operands[0], GEN_INT (val >> shift)));
452 emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (shift)));
457 /* Can't use any two byte insn, fall back to seth/or3. Use ~0xffff instead
458 of 0xffff0000, since the later fails on a 64-bit host. */
459 operands[2] = GEN_INT ((val) & ~0xffff);
460 operands[3] = GEN_INT ((val) & 0xffff);
464 [(set (match_operand:SI 0 "register_operand" "")
465 (match_operand:SI 1 "seth_add3_operand" ""))]
468 (high:SI (match_dup 1)))
470 (lo_sum:SI (match_dup 0)
474 ;; Small data area support.
475 ;; The address of _SDA_BASE_ is loaded into a register and all objects in
476 ;; the small data area are indexed off that. This is done for each reference
477 ;; but cse will clean things up for us. We let the compiler choose the
478 ;; register to use so we needn't allocate (and maybe even fix) a special
479 ;; register to use. Since the load and store insns have a 16 bit offset the
480 ;; total size of the data area can be 64K. However, if the data area lives
481 ;; above 16M (24 bits), _SDA_BASE_ will have to be loaded with seth/add3 which
482 ;; would then yield 3 instructions to reference an object [though there would
483 ;; be no net loss if two or more objects were referenced]. The 3 insns can be
484 ;; reduced back to 2 if the size of the small data area were reduced to 32K
485 ;; [then seth + ld/st would work for any object in the area]. Doing this
486 ;; would require special handling of _SDA_BASE_ (its value would be
487 ;; (.sdata + 32K) & 0xffff0000) and reloc computations would be different
488 ;; [I think]. What to do about this is deferred until later and for now we
489 ;; require .sdata to be in the first 16M.
491 (define_expand "movsi_sda"
493 (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))
494 (set (match_operand:SI 0 "register_operand" "")
495 (lo_sum:SI (match_dup 2)
496 (match_operand:SI 1 "small_data_operand" "")))]
500 if (reload_in_progress || reload_completed)
501 operands[2] = operands[0];
503 operands[2] = gen_reg_rtx (SImode);
506 (define_insn "*load_sda_base_32"
507 [(set (match_operand:SI 0 "register_operand" "=r")
508 (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))]
510 "seth %0,%#shigh(_SDA_BASE_)\;add3 %0,%0,%#low(_SDA_BASE_)"
511 [(set_attr "type" "multi")
512 (set_attr "length" "8")])
514 (define_insn "*load_sda_base"
515 [(set (match_operand:SI 0 "register_operand" "=r")
516 (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))]
518 "ld24 %0,#_SDA_BASE_"
519 [(set_attr "type" "int4")
520 (set_attr "length" "4")])
522 ;; 32 bit address support.
524 (define_expand "movsi_addr32"
526 ; addr32_operand isn't used because it's too restrictive,
527 ; seth_add3_operand is more general and thus safer.
528 (high:SI (match_operand:SI 1 "seth_add3_operand" "")))
529 (set (match_operand:SI 0 "register_operand" "")
530 (lo_sum:SI (match_dup 2) (match_dup 1)))]
534 if (reload_in_progress || reload_completed)
535 operands[2] = operands[0];
537 operands[2] = gen_reg_rtx (SImode);
540 (define_insn "set_hi_si"
541 [(set (match_operand:SI 0 "register_operand" "=r")
542 (high:SI (match_operand 1 "symbolic_operand" "")))]
544 "seth %0,%#shigh(%1)"
545 [(set_attr "type" "int4")
546 (set_attr "length" "4")])
548 (define_insn "lo_sum_si"
549 [(set (match_operand:SI 0 "register_operand" "=r")
550 (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
551 (match_operand:SI 2 "immediate_operand" "in")))]
554 [(set_attr "type" "int4")
555 (set_attr "length" "4")])
557 (define_expand "movdi"
558 [(set (match_operand:DI 0 "general_operand" "")
559 (match_operand:DI 1 "general_operand" ""))]
563 /* Fixup PIC cases. */
566 if (symbolic_operand (operands[1], DImode))
568 if (reload_in_progress || reload_completed)
569 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
571 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
575 /* Everything except mem = const or mem = mem can be done easily. */
577 if (GET_CODE (operands[0]) == MEM)
578 operands[1] = force_reg (DImode, operands[1]);
581 (define_insn "*movdi_insn"
582 [(set (match_operand:DI 0 "move_dest_operand" "=r,r,r,r,m")
583 (match_operand:DI 1 "move_double_src_operand" "r,nG,F,m,r"))]
584 "register_operand (operands[0], DImode) || register_operand (operands[1], DImode)"
586 [(set_attr "type" "multi,multi,multi,load8,store8")
587 (set_attr "length" "4,4,16,6,6")])
590 [(set (match_operand:DI 0 "move_dest_operand" "")
591 (match_operand:DI 1 "move_double_src_operand" ""))]
594 "operands[2] = gen_split_move_double (operands);")
596 ;; Floating point move insns.
598 (define_expand "movsf"
599 [(set (match_operand:SF 0 "general_operand" "")
600 (match_operand:SF 1 "general_operand" ""))]
604 /* Fixup PIC cases. */
607 if (symbolic_operand (operands[1], SFmode))
609 if (reload_in_progress || reload_completed)
610 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
612 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
616 /* Everything except mem = const or mem = mem can be done easily. */
618 if (GET_CODE (operands[0]) == MEM)
619 operands[1] = force_reg (SFmode, operands[1]);
622 (define_insn "*movsf_insn"
623 [(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,r,r,T,S,m")
624 (match_operand:SF 1 "move_src_operand" "r,F,U,S,m,r,r,r"))]
625 "register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode)"
635 ;; ??? Length of alternative 1 is either 2, 4 or 8.
636 [(set_attr "type" "int2,multi,load2,load2,load4,store2,store2,store4")
637 (set_attr "length" "2,8,2,2,4,2,2,4")])
640 [(set (match_operand:SF 0 "register_operand" "")
641 (match_operand:SF 1 "const_double_operand" ""))]
643 [(set (match_dup 2) (match_dup 3))]
646 operands[2] = operand_subword (operands[0], 0, 0, SFmode);
647 operands[3] = operand_subword (operands[1], 0, 0, SFmode);
650 (define_expand "movdf"
651 [(set (match_operand:DF 0 "general_operand" "")
652 (match_operand:DF 1 "general_operand" ""))]
656 /* Fixup PIC cases. */
659 if (symbolic_operand (operands[1], DFmode))
661 if (reload_in_progress || reload_completed)
662 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
664 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
668 /* Everything except mem = const or mem = mem can be done easily. */
670 if (GET_CODE (operands[0]) == MEM)
671 operands[1] = force_reg (DFmode, operands[1]);
674 (define_insn "*movdf_insn"
675 [(set (match_operand:DF 0 "move_dest_operand" "=r,r,r,m")
676 (match_operand:DF 1 "move_double_src_operand" "r,F,m,r"))]
677 "register_operand (operands[0], DFmode) || register_operand (operands[1], DFmode)"
679 [(set_attr "type" "multi,multi,load8,store8")
680 (set_attr "length" "4,16,6,6")])
683 [(set (match_operand:DF 0 "move_dest_operand" "")
684 (match_operand:DF 1 "move_double_src_operand" ""))]
687 "operands[2] = gen_split_move_double (operands);")
689 ;; Zero extension instructions.
691 (define_insn "zero_extendqihi2"
692 [(set (match_operand:HI 0 "register_operand" "=r,r,r")
693 (zero_extend:HI (match_operand:QI 1 "extend_operand" "r,T,m")))]
699 [(set_attr "type" "int4,load2,load4")
700 (set_attr "length" "4,2,4")])
702 (define_insn "zero_extendqisi2"
703 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
704 (zero_extend:SI (match_operand:QI 1 "extend_operand" "r,T,m")))]
710 [(set_attr "type" "int4,load2,load4")
711 (set_attr "length" "4,2,4")])
713 (define_insn "zero_extendhisi2"
714 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
715 (zero_extend:SI (match_operand:HI 1 "extend_operand" "r,T,m")))]
721 [(set_attr "type" "int4,load2,load4")
722 (set_attr "length" "4,2,4")])
724 ;; Signed conversions from a smaller integer to a larger integer
725 (define_insn "extendqihi2"
726 [(set (match_operand:HI 0 "register_operand" "=r,r,r")
727 (sign_extend:HI (match_operand:QI 1 "extend_operand" "0,T,m")))]
733 [(set_attr "type" "multi,load2,load4")
734 (set_attr "length" "2,2,4")])
737 [(set (match_operand:HI 0 "register_operand" "")
738 (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
744 rtx op0 = gen_lowpart (SImode, operands[0]);
745 rtx shift = GEN_INT (24);
747 operands[2] = gen_ashlsi3 (op0, op0, shift);
748 operands[3] = gen_ashrsi3 (op0, op0, shift);
751 (define_insn "extendqisi2"
752 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
753 (sign_extend:SI (match_operand:QI 1 "extend_operand" "0,T,m")))]
759 [(set_attr "type" "multi,load2,load4")
760 (set_attr "length" "4,2,4")])
763 [(set (match_operand:SI 0 "register_operand" "")
764 (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
770 rtx shift = GEN_INT (24);
772 operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
773 operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
776 (define_insn "extendhisi2"
777 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
778 (sign_extend:SI (match_operand:HI 1 "extend_operand" "0,T,m")))]
784 [(set_attr "type" "multi,load2,load4")
785 (set_attr "length" "4,2,4")])
788 [(set (match_operand:SI 0 "register_operand" "")
789 (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
795 rtx shift = GEN_INT (16);
797 operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
798 operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
801 ;; Arithmetic instructions.
803 ; ??? Adding an alternative to split add3 of small constants into two
804 ; insns yields better instruction packing but slower code. Adds of small
805 ; values is done a lot.
807 (define_insn "addsi3"
808 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
809 (plus:SI (match_operand:SI 1 "register_operand" "%0,0,r")
810 (match_operand:SI 2 "nonmemory_operand" "r,I,J")))]
816 [(set_attr "type" "int2,int2,int4")
817 (set_attr "length" "2,2,4")])
820 ; [(set (match_operand:SI 0 "register_operand" "")
821 ; (plus:SI (match_operand:SI 1 "register_operand" "")
822 ; (match_operand:SI 2 "int8_operand" "")))]
824 ; && REGNO (operands[0]) != REGNO (operands[1])
825 ; && INT8_P (INTVAL (operands[2]))
826 ; && INTVAL (operands[2]) != 0"
827 ; [(set (match_dup 0) (match_dup 1))
828 ; (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))]
831 (define_insn "adddi3"
832 [(set (match_operand:DI 0 "register_operand" "=r")
833 (plus:DI (match_operand:DI 1 "register_operand" "%0")
834 (match_operand:DI 2 "register_operand" "r")))
835 (clobber (reg:CC 17))]
838 [(set_attr "type" "multi")
839 (set_attr "length" "6")])
841 ;; ??? The cmp clears the condition bit. Can we speed up somehow?
843 [(set (match_operand:DI 0 "register_operand" "")
844 (plus:DI (match_operand:DI 1 "register_operand" "")
845 (match_operand:DI 2 "register_operand" "")))
846 (clobber (reg:CC 17))]
848 [(parallel [(set (reg:CC 17)
850 (use (match_dup 4))])
851 (parallel [(set (match_dup 4)
852 (plus:SI (match_dup 4)
853 (plus:SI (match_dup 5)
854 (ne:SI (reg:CC 17) (const_int 0)))))
856 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
857 (parallel [(set (match_dup 6)
858 (plus:SI (match_dup 6)
859 (plus:SI (match_dup 7)
860 (ne:SI (reg:CC 17) (const_int 0)))))
862 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
865 operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
866 operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
867 operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
868 operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
871 (define_insn "*clear_c"
874 (use (match_operand:SI 0 "register_operand" "r"))]
877 [(set_attr "type" "int2")
878 (set_attr "length" "2")])
880 (define_insn "*add_carry"
881 [(set (match_operand:SI 0 "register_operand" "=r")
882 (plus:SI (match_operand:SI 1 "register_operand" "%0")
883 (plus:SI (match_operand:SI 2 "register_operand" "r")
884 (ne:SI (reg:CC 17) (const_int 0)))))
886 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
889 [(set_attr "type" "int2")
890 (set_attr "length" "2")])
892 (define_insn "subsi3"
893 [(set (match_operand:SI 0 "register_operand" "=r")
894 (minus:SI (match_operand:SI 1 "register_operand" "0")
895 (match_operand:SI 2 "register_operand" "r")))]
898 [(set_attr "type" "int2")
899 (set_attr "length" "2")])
901 (define_insn "subdi3"
902 [(set (match_operand:DI 0 "register_operand" "=r")
903 (minus:DI (match_operand:DI 1 "register_operand" "0")
904 (match_operand:DI 2 "register_operand" "r")))
905 (clobber (reg:CC 17))]
908 [(set_attr "type" "multi")
909 (set_attr "length" "6")])
911 ;; ??? The cmp clears the condition bit. Can we speed up somehow?
913 [(set (match_operand:DI 0 "register_operand" "")
914 (minus:DI (match_operand:DI 1 "register_operand" "")
915 (match_operand:DI 2 "register_operand" "")))
916 (clobber (reg:CC 17))]
918 [(parallel [(set (reg:CC 17)
920 (use (match_dup 4))])
921 (parallel [(set (match_dup 4)
922 (minus:SI (match_dup 4)
923 (minus:SI (match_dup 5)
924 (ne:SI (reg:CC 17) (const_int 0)))))
926 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
927 (parallel [(set (match_dup 6)
928 (minus:SI (match_dup 6)
929 (minus:SI (match_dup 7)
930 (ne:SI (reg:CC 17) (const_int 0)))))
932 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
935 operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
936 operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
937 operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
938 operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
941 (define_insn "*sub_carry"
942 [(set (match_operand:SI 0 "register_operand" "=r")
943 (minus:SI (match_operand:SI 1 "register_operand" "%0")
944 (minus:SI (match_operand:SI 2 "register_operand" "r")
945 (ne:SI (reg:CC 17) (const_int 0)))))
947 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
950 [(set_attr "type" "int2")
951 (set_attr "length" "2")])
953 ; Multiply/Divide instructions.
955 (define_insn "mulhisi3"
956 [(set (match_operand:SI 0 "register_operand" "=r")
957 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "r"))
958 (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
960 "mullo %1,%2\;mvfacmi %0"
961 [(set_attr "type" "multi")
962 (set_attr "length" "4")])
964 (define_insn "mulsi3"
965 [(set (match_operand:SI 0 "register_operand" "=r")
966 (mult:SI (match_operand:SI 1 "register_operand" "%0")
967 (match_operand:SI 2 "register_operand" "r")))]
970 [(set_attr "type" "mul2")
971 (set_attr "length" "2")])
973 (define_insn "divsi3"
974 [(set (match_operand:SI 0 "register_operand" "=r")
975 (div:SI (match_operand:SI 1 "register_operand" "0")
976 (match_operand:SI 2 "register_operand" "r")))]
979 [(set_attr "type" "div4")
980 (set_attr "length" "4")])
982 (define_insn "udivsi3"
983 [(set (match_operand:SI 0 "register_operand" "=r")
984 (udiv:SI (match_operand:SI 1 "register_operand" "0")
985 (match_operand:SI 2 "register_operand" "r")))]
988 [(set_attr "type" "div4")
989 (set_attr "length" "4")])
991 (define_insn "modsi3"
992 [(set (match_operand:SI 0 "register_operand" "=r")
993 (mod:SI (match_operand:SI 1 "register_operand" "0")
994 (match_operand:SI 2 "register_operand" "r")))]
997 [(set_attr "type" "div4")
998 (set_attr "length" "4")])
1000 (define_insn "umodsi3"
1001 [(set (match_operand:SI 0 "register_operand" "=r")
1002 (umod:SI (match_operand:SI 1 "register_operand" "0")
1003 (match_operand:SI 2 "register_operand" "r")))]
1006 [(set_attr "type" "div4")
1007 (set_attr "length" "4")])
1009 ;; Boolean instructions.
1011 ;; We don't define the DImode versions as expand_binop does a good enough job.
1012 ;; And if it doesn't it should be fixed.
1014 (define_insn "andsi3"
1015 [(set (match_operand:SI 0 "register_operand" "=r,r")
1016 (and:SI (match_operand:SI 1 "register_operand" "%0,r")
1017 (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
1021 /* If we are worried about space, see if we can break this up into two
1022 short instructions, which might eliminate a NOP being inserted. */
1024 && m32r_not_same_reg (operands[0], operands[1])
1025 && GET_CODE (operands[2]) == CONST_INT
1026 && INT8_P (INTVAL (operands[2])))
1029 else if (GET_CODE (operands[2]) == CONST_INT)
1030 return \"and3 %0,%1,%#%X2\";
1032 return \"and %0,%2\";
1034 [(set_attr "type" "int2,int4")
1035 (set_attr "length" "2,4")])
1038 [(set (match_operand:SI 0 "register_operand" "")
1039 (and:SI (match_operand:SI 1 "register_operand" "")
1040 (match_operand:SI 2 "int8_operand" "")))]
1041 "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
1042 [(set (match_dup 0) (match_dup 2))
1043 (set (match_dup 0) (and:SI (match_dup 0) (match_dup 1)))]
1046 (define_insn "iorsi3"
1047 [(set (match_operand:SI 0 "register_operand" "=r,r")
1048 (ior:SI (match_operand:SI 1 "register_operand" "%0,r")
1049 (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
1053 /* If we are worried about space, see if we can break this up into two
1054 short instructions, which might eliminate a NOP being inserted. */
1056 && m32r_not_same_reg (operands[0], operands[1])
1057 && GET_CODE (operands[2]) == CONST_INT
1058 && INT8_P (INTVAL (operands[2])))
1061 else if (GET_CODE (operands[2]) == CONST_INT)
1062 return \"or3 %0,%1,%#%X2\";
1064 return \"or %0,%2\";
1066 [(set_attr "type" "int2,int4")
1067 (set_attr "length" "2,4")])
1070 [(set (match_operand:SI 0 "register_operand" "")
1071 (ior:SI (match_operand:SI 1 "register_operand" "")
1072 (match_operand:SI 2 "int8_operand" "")))]
1073 "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
1074 [(set (match_dup 0) (match_dup 2))
1075 (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 1)))]
1078 (define_insn "xorsi3"
1079 [(set (match_operand:SI 0 "register_operand" "=r,r")
1080 (xor:SI (match_operand:SI 1 "register_operand" "%0,r")
1081 (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
1085 /* If we are worried about space, see if we can break this up into two
1086 short instructions, which might eliminate a NOP being inserted. */
1088 && m32r_not_same_reg (operands[0], operands[1])
1089 && GET_CODE (operands[2]) == CONST_INT
1090 && INT8_P (INTVAL (operands[2])))
1093 else if (GET_CODE (operands[2]) == CONST_INT)
1094 return \"xor3 %0,%1,%#%X2\";
1096 return \"xor %0,%2\";
1098 [(set_attr "type" "int2,int4")
1099 (set_attr "length" "2,4")])
1102 [(set (match_operand:SI 0 "register_operand" "")
1103 (xor:SI (match_operand:SI 1 "register_operand" "")
1104 (match_operand:SI 2 "int8_operand" "")))]
1105 "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
1106 [(set (match_dup 0) (match_dup 2))
1107 (set (match_dup 0) (xor:SI (match_dup 0) (match_dup 1)))]
1110 (define_insn "negsi2"
1111 [(set (match_operand:SI 0 "register_operand" "=r")
1112 (neg:SI (match_operand:SI 1 "register_operand" "r")))]
1115 [(set_attr "type" "int2")
1116 (set_attr "length" "2")])
1118 (define_insn "one_cmplsi2"
1119 [(set (match_operand:SI 0 "register_operand" "=r")
1120 (not:SI (match_operand:SI 1 "register_operand" "r")))]
1123 [(set_attr "type" "int2")
1124 (set_attr "length" "2")])
1126 ;; Shift instructions.
1128 (define_insn "ashlsi3"
1129 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1130 (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r")
1131 (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
1137 [(set_attr "type" "shift2,shift2,shift4")
1138 (set_attr "length" "2,2,4")])
1140 (define_insn "ashrsi3"
1141 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1142 (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
1143 (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
1149 [(set_attr "type" "shift2,shift2,shift4")
1150 (set_attr "length" "2,2,4")])
1152 (define_insn "lshrsi3"
1153 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1154 (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
1155 (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
1161 [(set_attr "type" "shift2,shift2,shift4")
1162 (set_attr "length" "2,2,4")])
1164 ;; Compare instructions.
1165 ;; This controls RTL generation and register allocation.
1167 ;; We generate RTL for comparisons and branches by having the cmpxx
1168 ;; patterns store away the operands. Then the bcc patterns
1169 ;; emit RTL for both the compare and the branch.
1171 ;; On the m32r it is more efficient to use the bxxz instructions and
1172 ;; thus merge the compare and branch into one instruction, so they are
1175 (define_expand "cmpsi"
1177 (compare:CC (match_operand:SI 0 "register_operand" "")
1178 (match_operand:SI 1 "reg_or_cmp_int16_operand" "")))]
1182 m32r_compare_op0 = operands[0];
1183 m32r_compare_op1 = operands[1];
1187 (define_insn "cmp_eqsi_zero_insn"
1189 (eq:CC (match_operand:SI 0 "register_operand" "r,r")
1190 (match_operand:SI 1 "reg_or_zero_operand" "r,P")))]
1191 "TARGET_M32RX || TARGET_M32R2"
1195 [(set_attr "type" "int4")
1196 (set_attr "length" "4")])
1198 ;; The cmp_xxx_insn patterns set the condition bit to the result of the
1199 ;; comparison. There isn't a "compare equal" instruction so cmp_eqsi_insn
1200 ;; is quite inefficient. However, it is rarely used.
1202 (define_insn "cmp_eqsi_insn"
1204 (eq:CC (match_operand:SI 0 "register_operand" "r,r")
1205 (match_operand:SI 1 "reg_or_cmp_int16_operand" "r,P")))
1206 (clobber (match_scratch:SI 2 "=&r,&r"))]
1210 if (which_alternative == 0)
1212 return \"mv %2,%0\;sub %2,%1\;cmpui %2,#1\";
1216 if (INTVAL (operands [1]) == 0)
1217 return \"cmpui %0, #1\";
1218 else if (REGNO (operands [2]) == REGNO (operands [0]))
1219 return \"addi %0,%#%N1\;cmpui %2,#1\";
1221 return \"add3 %2,%0,%#%N1\;cmpui %2,#1\";
1224 [(set_attr "type" "multi,multi")
1225 (set_attr "length" "8,8")])
1227 (define_insn "cmp_ltsi_insn"
1229 (lt:CC (match_operand:SI 0 "register_operand" "r,r")
1230 (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
1235 [(set_attr "type" "int2,int4")
1236 (set_attr "length" "2,4")])
1238 (define_insn "cmp_ltusi_insn"
1240 (ltu:CC (match_operand:SI 0 "register_operand" "r,r")
1241 (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
1246 [(set_attr "type" "int2,int4")
1247 (set_attr "length" "2,4")])
1249 ;; These control RTL generation for conditional jump insns.
1251 (define_expand "beq"
1253 (if_then_else (match_dup 1)
1254 (label_ref (match_operand 0 "" ""))
1259 operands[1] = gen_compare (EQ, m32r_compare_op0, m32r_compare_op1, FALSE);
1262 (define_expand "bne"
1264 (if_then_else (match_dup 1)
1265 (label_ref (match_operand 0 "" ""))
1270 operands[1] = gen_compare (NE, m32r_compare_op0, m32r_compare_op1, FALSE);
1273 (define_expand "bgt"
1275 (if_then_else (match_dup 1)
1276 (label_ref (match_operand 0 "" ""))
1281 operands[1] = gen_compare (GT, m32r_compare_op0, m32r_compare_op1, FALSE);
1284 (define_expand "ble"
1286 (if_then_else (match_dup 1)
1287 (label_ref (match_operand 0 "" ""))
1292 operands[1] = gen_compare (LE, m32r_compare_op0, m32r_compare_op1, FALSE);
1295 (define_expand "bge"
1297 (if_then_else (match_dup 1)
1298 (label_ref (match_operand 0 "" ""))
1303 operands[1] = gen_compare (GE, m32r_compare_op0, m32r_compare_op1, FALSE);
1306 (define_expand "blt"
1308 (if_then_else (match_dup 1)
1309 (label_ref (match_operand 0 "" ""))
1314 operands[1] = gen_compare (LT, m32r_compare_op0, m32r_compare_op1, FALSE);
1317 (define_expand "bgtu"
1319 (if_then_else (match_dup 1)
1320 (label_ref (match_operand 0 "" ""))
1325 operands[1] = gen_compare (GTU, m32r_compare_op0, m32r_compare_op1, FALSE);
1328 (define_expand "bleu"
1330 (if_then_else (match_dup 1)
1331 (label_ref (match_operand 0 "" ""))
1336 operands[1] = gen_compare (LEU, m32r_compare_op0, m32r_compare_op1, FALSE);
1339 (define_expand "bgeu"
1341 (if_then_else (match_dup 1)
1342 (label_ref (match_operand 0 "" ""))
1347 operands[1] = gen_compare (GEU, m32r_compare_op0, m32r_compare_op1, FALSE);
1350 (define_expand "bltu"
1352 (if_then_else (match_dup 1)
1353 (label_ref (match_operand 0 "" ""))
1358 operands[1] = gen_compare (LTU, m32r_compare_op0, m32r_compare_op1, FALSE);
1361 ;; Now match both normal and inverted jump.
1363 (define_insn "*branch_insn"
1365 (if_then_else (match_operator 1 "eqne_comparison_operator"
1366 [(reg 17) (const_int 0)])
1367 (label_ref (match_operand 0 "" ""))
1372 static char instruction[40];
1373 sprintf (instruction, \"%s%s %%l0\",
1374 (GET_CODE (operands[1]) == NE) ? \"bc\" : \"bnc\",
1375 (get_attr_length (insn) == 2) ? \".s\" : \"\");
1378 [(set_attr "type" "branch")
1379 ; We use 400/800 instead of 512,1024 to account for inaccurate insn
1380 ; lengths and insn alignments that are complex to track.
1381 ; It's not important that we be hyper-precise here. It may be more
1382 ; important blah blah blah when the chip supports parallel execution
1383 ; blah blah blah but until then blah blah blah this is simple and
1385 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1391 (define_insn "*rev_branch_insn"
1393 (if_then_else (match_operator 1 "eqne_comparison_operator"
1394 [(reg 17) (const_int 0)])
1396 (label_ref (match_operand 0 "" ""))))]
1397 ;"REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))"
1401 static char instruction[40];
1402 sprintf (instruction, \"%s%s %%l0\",
1403 (GET_CODE (operands[1]) == EQ) ? \"bc\" : \"bnc\",
1404 (get_attr_length (insn) == 2) ? \".s\" : \"\");
1407 [(set_attr "type" "branch")
1408 ; We use 400/800 instead of 512,1024 to account for inaccurate insn
1409 ; lengths and insn alignments that are complex to track.
1410 ; It's not important that we be hyper-precise here. It may be more
1411 ; important blah blah blah when the chip supports parallel execution
1412 ; blah blah blah but until then blah blah blah this is simple and
1414 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1420 ; reg/reg compare and branch insns
1422 (define_insn "*reg_branch_insn"
1424 (if_then_else (match_operator 1 "eqne_comparison_operator"
1425 [(match_operand:SI 2 "register_operand" "r")
1426 (match_operand:SI 3 "register_operand" "r")])
1427 (label_ref (match_operand 0 "" ""))
1432 /* Is branch target reachable with beq/bne? */
1433 if (get_attr_length (insn) == 4)
1435 if (GET_CODE (operands[1]) == EQ)
1436 return \"beq %2,%3,%l0\";
1438 return \"bne %2,%3,%l0\";
1442 if (GET_CODE (operands[1]) == EQ)
1443 return \"bne %2,%3,1f\;bra %l0\;1:\";
1445 return \"beq %2,%3,1f\;bra %l0\;1:\";
1448 [(set_attr "type" "branch")
1449 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1450 ; which is complex to track and inaccurate length specs.
1451 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1457 (define_insn "*rev_reg_branch_insn"
1459 (if_then_else (match_operator 1 "eqne_comparison_operator"
1460 [(match_operand:SI 2 "register_operand" "r")
1461 (match_operand:SI 3 "register_operand" "r")])
1463 (label_ref (match_operand 0 "" ""))))]
1467 /* Is branch target reachable with beq/bne? */
1468 if (get_attr_length (insn) == 4)
1470 if (GET_CODE (operands[1]) == NE)
1471 return \"beq %2,%3,%l0\";
1473 return \"bne %2,%3,%l0\";
1477 if (GET_CODE (operands[1]) == NE)
1478 return \"bne %2,%3,1f\;bra %l0\;1:\";
1480 return \"beq %2,%3,1f\;bra %l0\;1:\";
1483 [(set_attr "type" "branch")
1484 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1485 ; which is complex to track and inaccurate length specs.
1486 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1492 ; reg/zero compare and branch insns
1494 (define_insn "*zero_branch_insn"
1496 (if_then_else (match_operator 1 "signed_comparison_operator"
1497 [(match_operand:SI 2 "register_operand" "r")
1499 (label_ref (match_operand 0 "" ""))
1504 const char *br,*invbr;
1507 switch (GET_CODE (operands[1]))
1509 case EQ : br = \"eq\"; invbr = \"ne\"; break;
1510 case NE : br = \"ne\"; invbr = \"eq\"; break;
1511 case LE : br = \"le\"; invbr = \"gt\"; break;
1512 case GT : br = \"gt\"; invbr = \"le\"; break;
1513 case LT : br = \"lt\"; invbr = \"ge\"; break;
1514 case GE : br = \"ge\"; invbr = \"lt\"; break;
1519 /* Is branch target reachable with bxxz? */
1520 if (get_attr_length (insn) == 4)
1522 sprintf (asmtext, \"b%sz %%2,%%l0\", br);
1523 output_asm_insn (asmtext, operands);
1527 sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", invbr);
1528 output_asm_insn (asmtext, operands);
1532 [(set_attr "type" "branch")
1533 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1534 ; which is complex to track and inaccurate length specs.
1535 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1541 (define_insn "*rev_zero_branch_insn"
1543 (if_then_else (match_operator 1 "eqne_comparison_operator"
1544 [(match_operand:SI 2 "register_operand" "r")
1547 (label_ref (match_operand 0 "" ""))))]
1551 const char *br,*invbr;
1554 switch (GET_CODE (operands[1]))
1556 case EQ : br = \"eq\"; invbr = \"ne\"; break;
1557 case NE : br = \"ne\"; invbr = \"eq\"; break;
1558 case LE : br = \"le\"; invbr = \"gt\"; break;
1559 case GT : br = \"gt\"; invbr = \"le\"; break;
1560 case LT : br = \"lt\"; invbr = \"ge\"; break;
1561 case GE : br = \"ge\"; invbr = \"lt\"; break;
1566 /* Is branch target reachable with bxxz? */
1567 if (get_attr_length (insn) == 4)
1569 sprintf (asmtext, \"b%sz %%2,%%l0\", invbr);
1570 output_asm_insn (asmtext, operands);
1574 sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", br);
1575 output_asm_insn (asmtext, operands);
1579 [(set_attr "type" "branch")
1580 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1581 ; which is complex to track and inaccurate length specs.
1582 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1588 ;; S<cc> operations to set a register to 1/0 based on a comparison
1590 (define_expand "seq"
1591 [(match_operand:SI 0 "register_operand" "")]
1595 rtx op0 = operands[0];
1596 rtx op1 = m32r_compare_op0;
1597 rtx op2 = m32r_compare_op1;
1598 enum machine_mode mode = GET_MODE (op0);
1603 if (! register_operand (op1, mode))
1604 op1 = force_reg (mode, op1);
1606 if (TARGET_M32RX || TARGET_M32R2)
1608 if (! reg_or_zero_operand (op2, mode))
1609 op2 = force_reg (mode, op2);
1611 emit_insn (gen_seq_insn_m32rx (op0, op1, op2));
1614 if (GET_CODE (op2) == CONST_INT && INTVAL (op2) == 0)
1616 emit_insn (gen_seq_zero_insn (op0, op1));
1620 if (! reg_or_eq_int16_operand (op2, mode))
1621 op2 = force_reg (mode, op2);
1623 emit_insn (gen_seq_insn (op0, op1, op2));
1627 (define_insn "seq_insn_m32rx"
1628 [(set (match_operand:SI 0 "register_operand" "=r")
1629 (eq:SI (match_operand:SI 1 "register_operand" "%r")
1630 (match_operand:SI 2 "reg_or_zero_operand" "rP")))
1631 (clobber (reg:CC 17))]
1632 "TARGET_M32RX || TARGET_M32R2"
1634 [(set_attr "type" "multi")
1635 (set_attr "length" "6")])
1638 [(set (match_operand:SI 0 "register_operand" "")
1639 (eq:SI (match_operand:SI 1 "register_operand" "")
1640 (match_operand:SI 2 "reg_or_zero_operand" "")))
1641 (clobber (reg:CC 17))]
1642 "TARGET_M32RX || TARGET_M32R2"
1644 (eq:CC (match_dup 1)
1647 (ne:SI (reg:CC 17) (const_int 0)))]
1650 (define_insn "seq_zero_insn"
1651 [(set (match_operand:SI 0 "register_operand" "=r")
1652 (eq:SI (match_operand:SI 1 "register_operand" "r")
1654 (clobber (reg:CC 17))]
1657 [(set_attr "type" "multi")
1658 (set_attr "length" "6")])
1661 [(set (match_operand:SI 0 "register_operand" "")
1662 (eq:SI (match_operand:SI 1 "register_operand" "")
1664 (clobber (reg:CC 17))]
1669 rtx op0 = operands[0];
1670 rtx op1 = operands[1];
1673 emit_insn (gen_cmp_ltusi_insn (op1, const1_rtx));
1674 emit_insn (gen_movcc_insn (op0));
1675 operands[3] = get_insns ();
1679 (define_insn "seq_insn"
1680 [(set (match_operand:SI 0 "register_operand" "=r,r,??r,r")
1681 (eq:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
1682 (match_operand:SI 2 "reg_or_eq_int16_operand" "r,r,r,PK")))
1683 (clobber (reg:CC 17))
1684 (clobber (match_scratch:SI 3 "=1,2,&r,r"))]
1687 [(set_attr "type" "multi")
1688 (set_attr "length" "8,8,10,10")])
1691 [(set (match_operand:SI 0 "register_operand" "")
1692 (eq:SI (match_operand:SI 1 "register_operand" "")
1693 (match_operand:SI 2 "reg_or_eq_int16_operand" "")))
1694 (clobber (reg:CC 17))
1695 (clobber (match_scratch:SI 3 ""))]
1696 "TARGET_M32R && reload_completed"
1700 rtx op0 = operands[0];
1701 rtx op1 = operands[1];
1702 rtx op2 = operands[2];
1703 rtx op3 = operands[3];
1704 HOST_WIDE_INT value;
1706 if (GET_CODE (op2) == REG && GET_CODE (op3) == REG
1707 && REGNO (op2) == REGNO (op3))
1714 if (GET_CODE (op1) == REG && GET_CODE (op3) == REG
1715 && REGNO (op1) != REGNO (op3))
1717 emit_move_insn (op3, op1);
1721 if (GET_CODE (op2) == CONST_INT && (value = INTVAL (op2)) != 0
1722 && CMP_INT16_P (value))
1723 emit_insn (gen_addsi3 (op3, op1, GEN_INT (-value)));
1725 emit_insn (gen_xorsi3 (op3, op1, op2));
1727 emit_insn (gen_cmp_ltusi_insn (op3, const1_rtx));
1728 emit_insn (gen_movcc_insn (op0));
1729 operands[4] = get_insns ();
1733 (define_expand "sne"
1734 [(match_operand:SI 0 "register_operand" "")]
1738 rtx op0 = operands[0];
1739 rtx op1 = m32r_compare_op0;
1740 rtx op2 = m32r_compare_op1;
1741 enum machine_mode mode = GET_MODE (op0);
1746 if (GET_CODE (op2) != CONST_INT
1747 || (INTVAL (op2) != 0 && UINT16_P (INTVAL (op2))))
1751 if (reload_completed || reload_in_progress)
1754 reg = gen_reg_rtx (SImode);
1755 emit_insn (gen_xorsi3 (reg, op1, op2));
1758 if (! register_operand (op1, mode))
1759 op1 = force_reg (mode, op1);
1761 emit_insn (gen_sne_zero_insn (op0, op1));
1768 (define_insn "sne_zero_insn"
1769 [(set (match_operand:SI 0 "register_operand" "=r")
1770 (ne:SI (match_operand:SI 1 "register_operand" "r")
1772 (clobber (reg:CC 17))
1773 (clobber (match_scratch:SI 2 "=&r"))]
1776 [(set_attr "type" "multi")
1777 (set_attr "length" "6")])
1780 [(set (match_operand:SI 0 "register_operand" "")
1781 (ne:SI (match_operand:SI 1 "register_operand" "")
1783 (clobber (reg:CC 17))
1784 (clobber (match_scratch:SI 2 ""))]
1789 (ltu:CC (match_dup 2)
1792 (ne:SI (reg:CC 17) (const_int 0)))]
1795 (define_expand "slt"
1796 [(match_operand:SI 0 "register_operand" "")]
1800 rtx op0 = operands[0];
1801 rtx op1 = m32r_compare_op0;
1802 rtx op2 = m32r_compare_op1;
1803 enum machine_mode mode = GET_MODE (op0);
1808 if (! register_operand (op1, mode))
1809 op1 = force_reg (mode, op1);
1811 if (! reg_or_int16_operand (op2, mode))
1812 op2 = force_reg (mode, op2);
1814 emit_insn (gen_slt_insn (op0, op1, op2));
1818 (define_insn "slt_insn"
1819 [(set (match_operand:SI 0 "register_operand" "=r,r")
1820 (lt:SI (match_operand:SI 1 "register_operand" "r,r")
1821 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
1822 (clobber (reg:CC 17))]
1825 [(set_attr "type" "multi")
1826 (set_attr "length" "4,6")])
1829 [(set (match_operand:SI 0 "register_operand" "")
1830 (lt:SI (match_operand:SI 1 "register_operand" "")
1831 (match_operand:SI 2 "reg_or_int16_operand" "")))
1832 (clobber (reg:CC 17))]
1835 (lt:CC (match_dup 1)
1838 (ne:SI (reg:CC 17) (const_int 0)))]
1841 (define_expand "sle"
1842 [(match_operand:SI 0 "register_operand" "")]
1846 rtx op0 = operands[0];
1847 rtx op1 = m32r_compare_op0;
1848 rtx op2 = m32r_compare_op1;
1849 enum machine_mode mode = GET_MODE (op0);
1854 if (! register_operand (op1, mode))
1855 op1 = force_reg (mode, op1);
1857 if (GET_CODE (op2) == CONST_INT)
1859 HOST_WIDE_INT value = INTVAL (op2);
1860 if (value >= 2147483647)
1862 emit_move_insn (op0, const1_rtx);
1866 op2 = GEN_INT (value+1);
1867 if (value < -32768 || value >= 32767)
1868 op2 = force_reg (mode, op2);
1870 emit_insn (gen_slt_insn (op0, op1, op2));
1874 if (! register_operand (op2, mode))
1875 op2 = force_reg (mode, op2);
1877 emit_insn (gen_sle_insn (op0, op1, op2));
1881 (define_insn "sle_insn"
1882 [(set (match_operand:SI 0 "register_operand" "=r")
1883 (le:SI (match_operand:SI 1 "register_operand" "r")
1884 (match_operand:SI 2 "register_operand" "r")))
1885 (clobber (reg:CC 17))]
1888 [(set_attr "type" "multi")
1889 (set_attr "length" "8")])
1892 [(set (match_operand:SI 0 "register_operand" "")
1893 (le:SI (match_operand:SI 1 "register_operand" "")
1894 (match_operand:SI 2 "register_operand" "")))
1895 (clobber (reg:CC 17))]
1898 (lt:CC (match_dup 2)
1901 (ne:SI (reg:CC 17) (const_int 0)))
1903 (xor:SI (match_dup 0)
1907 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
1908 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
1910 [(set (match_operand:SI 0 "register_operand" "")
1911 (le:SI (match_operand:SI 1 "register_operand" "")
1912 (match_operand:SI 2 "register_operand" "")))
1913 (clobber (reg:CC 17))]
1916 (lt:CC (match_dup 2)
1919 (ne:SI (reg:CC 17) (const_int 0)))
1921 (plus:SI (match_dup 0)
1924 (neg:SI (match_dup 0)))]
1927 (define_expand "sgt"
1928 [(match_operand:SI 0 "register_operand" "")]
1932 rtx op0 = operands[0];
1933 rtx op1 = m32r_compare_op0;
1934 rtx op2 = m32r_compare_op1;
1935 enum machine_mode mode = GET_MODE (op0);
1940 if (! register_operand (op1, mode))
1941 op1 = force_reg (mode, op1);
1943 if (! register_operand (op2, mode))
1944 op2 = force_reg (mode, op2);
1946 emit_insn (gen_slt_insn (op0, op2, op1));
1950 (define_expand "sge"
1951 [(match_operand:SI 0 "register_operand" "")]
1955 rtx op0 = operands[0];
1956 rtx op1 = m32r_compare_op0;
1957 rtx op2 = m32r_compare_op1;
1958 enum machine_mode mode = GET_MODE (op0);
1963 if (! register_operand (op1, mode))
1964 op1 = force_reg (mode, op1);
1966 if (! reg_or_int16_operand (op2, mode))
1967 op2 = force_reg (mode, op2);
1969 emit_insn (gen_sge_insn (op0, op1, op2));
1973 (define_insn "sge_insn"
1974 [(set (match_operand:SI 0 "register_operand" "=r,r")
1975 (ge:SI (match_operand:SI 1 "register_operand" "r,r")
1976 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
1977 (clobber (reg:CC 17))]
1980 [(set_attr "type" "multi")
1981 (set_attr "length" "8,10")])
1984 [(set (match_operand:SI 0 "register_operand" "")
1985 (ge:SI (match_operand:SI 1 "register_operand" "")
1986 (match_operand:SI 2 "reg_or_int16_operand" "")))
1987 (clobber (reg:CC 17))]
1990 (lt:CC (match_dup 1)
1993 (ne:SI (reg:CC 17) (const_int 0)))
1995 (xor:SI (match_dup 0)
1999 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
2000 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
2002 [(set (match_operand:SI 0 "register_operand" "")
2003 (ge:SI (match_operand:SI 1 "register_operand" "")
2004 (match_operand:SI 2 "reg_or_int16_operand" "")))
2005 (clobber (reg:CC 17))]
2008 (lt:CC (match_dup 1)
2011 (ne:SI (reg:CC 17) (const_int 0)))
2013 (plus:SI (match_dup 0)
2016 (neg:SI (match_dup 0)))]
2019 (define_expand "sltu"
2020 [(match_operand:SI 0 "register_operand" "")]
2024 rtx op0 = operands[0];
2025 rtx op1 = m32r_compare_op0;
2026 rtx op2 = m32r_compare_op1;
2027 enum machine_mode mode = GET_MODE (op0);
2032 if (! register_operand (op1, mode))
2033 op1 = force_reg (mode, op1);
2035 if (! reg_or_int16_operand (op2, mode))
2036 op2 = force_reg (mode, op2);
2038 emit_insn (gen_sltu_insn (op0, op1, op2));
2042 (define_insn "sltu_insn"
2043 [(set (match_operand:SI 0 "register_operand" "=r,r")
2044 (ltu:SI (match_operand:SI 1 "register_operand" "r,r")
2045 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
2046 (clobber (reg:CC 17))]
2049 [(set_attr "type" "multi")
2050 (set_attr "length" "6,8")])
2053 [(set (match_operand:SI 0 "register_operand" "")
2054 (ltu:SI (match_operand:SI 1 "register_operand" "")
2055 (match_operand:SI 2 "reg_or_int16_operand" "")))
2056 (clobber (reg:CC 17))]
2059 (ltu:CC (match_dup 1)
2062 (ne:SI (reg:CC 17) (const_int 0)))]
2065 (define_expand "sleu"
2066 [(match_operand:SI 0 "register_operand" "")]
2070 rtx op0 = operands[0];
2071 rtx op1 = m32r_compare_op0;
2072 rtx op2 = m32r_compare_op1;
2073 enum machine_mode mode = GET_MODE (op0);
2078 if (GET_CODE (op2) == CONST_INT)
2080 HOST_WIDE_INT value = INTVAL (op2);
2081 if (value >= 2147483647)
2083 emit_move_insn (op0, const1_rtx);
2087 op2 = GEN_INT (value+1);
2088 if (value < 0 || value >= 32767)
2089 op2 = force_reg (mode, op2);
2091 emit_insn (gen_sltu_insn (op0, op1, op2));
2095 if (! register_operand (op2, mode))
2096 op2 = force_reg (mode, op2);
2098 emit_insn (gen_sleu_insn (op0, op1, op2));
2102 (define_insn "sleu_insn"
2103 [(set (match_operand:SI 0 "register_operand" "=r")
2104 (leu:SI (match_operand:SI 1 "register_operand" "r")
2105 (match_operand:SI 2 "register_operand" "r")))
2106 (clobber (reg:CC 17))]
2109 [(set_attr "type" "multi")
2110 (set_attr "length" "8")])
2113 [(set (match_operand:SI 0 "register_operand" "")
2114 (leu:SI (match_operand:SI 1 "register_operand" "")
2115 (match_operand:SI 2 "register_operand" "")))
2116 (clobber (reg:CC 17))]
2119 (ltu:CC (match_dup 2)
2122 (ne:SI (reg:CC 17) (const_int 0)))
2124 (xor:SI (match_dup 0)
2128 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
2129 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
2131 [(set (match_operand:SI 0 "register_operand" "")
2132 (leu:SI (match_operand:SI 1 "register_operand" "")
2133 (match_operand:SI 2 "register_operand" "")))
2134 (clobber (reg:CC 17))]
2137 (ltu:CC (match_dup 2)
2140 (ne:SI (reg:CC 17) (const_int 0)))
2142 (plus:SI (match_dup 0)
2145 (neg:SI (match_dup 0)))]
2148 (define_expand "sgtu"
2149 [(match_operand:SI 0 "register_operand" "")]
2153 rtx op0 = operands[0];
2154 rtx op1 = m32r_compare_op0;
2155 rtx op2 = m32r_compare_op1;
2156 enum machine_mode mode = GET_MODE (op0);
2161 if (! register_operand (op1, mode))
2162 op1 = force_reg (mode, op1);
2164 if (! register_operand (op2, mode))
2165 op2 = force_reg (mode, op2);
2167 emit_insn (gen_sltu_insn (op0, op2, op1));
2171 (define_expand "sgeu"
2172 [(match_operand:SI 0 "register_operand" "")]
2176 rtx op0 = operands[0];
2177 rtx op1 = m32r_compare_op0;
2178 rtx op2 = m32r_compare_op1;
2179 enum machine_mode mode = GET_MODE (op0);
2184 if (! register_operand (op1, mode))
2185 op1 = force_reg (mode, op1);
2187 if (! reg_or_int16_operand (op2, mode))
2188 op2 = force_reg (mode, op2);
2190 emit_insn (gen_sgeu_insn (op0, op1, op2));
2194 (define_insn "sgeu_insn"
2195 [(set (match_operand:SI 0 "register_operand" "=r,r")
2196 (geu:SI (match_operand:SI 1 "register_operand" "r,r")
2197 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
2198 (clobber (reg:CC 17))]
2201 [(set_attr "type" "multi")
2202 (set_attr "length" "8,10")])
2205 [(set (match_operand:SI 0 "register_operand" "")
2206 (geu:SI (match_operand:SI 1 "register_operand" "")
2207 (match_operand:SI 2 "reg_or_int16_operand" "")))
2208 (clobber (reg:CC 17))]
2211 (ltu:CC (match_dup 1)
2214 (ne:SI (reg:CC 17) (const_int 0)))
2216 (xor:SI (match_dup 0)
2220 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
2221 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
2223 [(set (match_operand:SI 0 "register_operand" "")
2224 (geu:SI (match_operand:SI 1 "register_operand" "")
2225 (match_operand:SI 2 "reg_or_int16_operand" "")))
2226 (clobber (reg:CC 17))]
2229 (ltu:CC (match_dup 1)
2232 (ne:SI (reg:CC 17) (const_int 0)))
2234 (plus:SI (match_dup 0)
2237 (neg:SI (match_dup 0)))]
2240 (define_insn "movcc_insn"
2241 [(set (match_operand:SI 0 "register_operand" "=r")
2242 (ne:SI (reg:CC 17) (const_int 0)))]
2245 [(set_attr "type" "misc")
2246 (set_attr "length" "2")])
2249 ;; Unconditional and other jump instructions.
2252 [(set (pc) (label_ref (match_operand 0 "" "")))]
2255 [(set_attr "type" "uncond_branch")
2256 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
2262 (define_insn "indirect_jump"
2263 [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
2266 [(set_attr "type" "uncond_branch")
2267 (set_attr "length" "2")])
2269 (define_insn "return"
2273 [(set_attr "type" "uncond_branch")
2274 (set_attr "length" "2")])
2276 (define_expand "tablejump"
2277 [(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
2278 (use (label_ref (match_operand 1 "" "")))])]
2282 /* In pic mode, our address differences are against the base of the
2283 table. Add that base value back in; CSE ought to be able to combine
2284 the two address loads. */
2289 tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
2291 tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
2292 operands[0] = memory_address (Pmode, tmp);
2296 (define_insn "*tablejump_insn"
2297 [(set (pc) (match_operand:SI 0 "address_operand" "p"))
2298 (use (label_ref (match_operand 1 "" "")))]
2301 [(set_attr "type" "uncond_branch")
2302 (set_attr "length" "2")])
2304 (define_expand "call"
2305 ;; operands[1] is stack_size_rtx
2306 ;; operands[2] is next_arg_register
2307 [(parallel [(call (match_operand:SI 0 "call_operand" "")
2308 (match_operand 1 "" ""))
2309 (clobber (reg:SI 14))])]
2314 current_function_uses_pic_offset_table = 1;
2317 (define_insn "*call_via_reg"
2318 [(call (mem:SI (match_operand:SI 0 "register_operand" "r"))
2319 (match_operand 1 "" ""))
2320 (clobber (reg:SI 14))]
2323 [(set_attr "type" "call")
2324 (set_attr "length" "2")])
2326 (define_insn "*call_via_label"
2327 [(call (mem:SI (match_operand:SI 0 "call_address_operand" ""))
2328 (match_operand 1 "" ""))
2329 (clobber (reg:SI 14))]
2333 int call26_p = call26_operand (operands[0], FUNCTION_MODE);
2337 /* We may not be able to reach with a `bl' insn so punt and leave it to
2339 We do this here, rather than doing a force_reg in the define_expand
2340 so these insns won't be separated, say by scheduling, thus simplifying
2342 return \"seth r14,%T0\;add3 r14,r14,%B0\;jl r14\";
2347 [(set_attr "type" "call")
2348 (set (attr "length")
2349 (if_then_else (eq (symbol_ref "call26_operand (operands[0], FUNCTION_MODE)")
2351 (const_int 12) ; 10 + 2 for nop filler
2352 ; The return address must be on a 4 byte boundary so
2353 ; there's no point in using a value of 2 here. A 2 byte
2354 ; insn may go in the left slot but we currently can't
2355 ; use such knowledge.
2358 (define_expand "call_value"
2359 ;; operand 2 is stack_size_rtx
2360 ;; operand 3 is next_arg_register
2361 [(parallel [(set (match_operand 0 "register_operand" "=r")
2362 (call (match_operand:SI 1 "call_operand" "")
2363 (match_operand 2 "" "")))
2364 (clobber (reg:SI 14))])]
2369 current_function_uses_pic_offset_table = 1;
2372 (define_insn "*call_value_via_reg"
2373 [(set (match_operand 0 "register_operand" "=r")
2374 (call (mem:SI (match_operand:SI 1 "register_operand" "r"))
2375 (match_operand 2 "" "")))
2376 (clobber (reg:SI 14))]
2379 [(set_attr "type" "call")
2380 (set_attr "length" "2")])
2382 (define_insn "*call_value_via_label"
2383 [(set (match_operand 0 "register_operand" "=r")
2384 (call (mem:SI (match_operand:SI 1 "call_address_operand" ""))
2385 (match_operand 2 "" "")))
2386 (clobber (reg:SI 14))]
2390 int call26_p = call26_operand (operands[1], FUNCTION_MODE);
2393 current_function_uses_pic_offset_table = 1;
2397 /* We may not be able to reach with a `bl' insn so punt and leave it to
2399 We do this here, rather than doing a force_reg in the define_expand
2400 so these insns won't be separated, say by scheduling, thus simplifying
2402 return \"seth r14,%T1\;add3 r14,r14,%B1\;jl r14\";
2407 [(set_attr "type" "call")
2408 (set (attr "length")
2409 (if_then_else (eq (symbol_ref "call26_operand (operands[1], FUNCTION_MODE)")
2411 (const_int 12) ; 10 + 2 for nop filler
2412 ; The return address must be on a 4 byte boundary so
2413 ; there's no point in using a value of 2 here. A 2 byte
2414 ; insn may go in the left slot but we currently can't
2415 ; use such knowledge.
2422 [(set_attr "type" "int2")
2423 (set_attr "length" "2")])
2425 ;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
2426 ;; all of memory. This blocks insns from being moved across this point.
2428 (define_insn "blockage"
2429 [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
2433 ;; Special pattern to flush the icache.
2435 (define_insn "flush_icache"
2436 [(unspec_volatile [(match_operand 0 "memory_operand" "m")]
2437 UNSPECV_FLUSH_ICACHE)
2438 (match_operand 1 "" "")
2439 (clobber (reg:SI 17))]
2441 "* return \"trap %#%1 ; flush-icache\";"
2442 [(set_attr "type" "int4")
2443 (set_attr "length" "4")])
2445 ;; Speed up fabs and provide correct sign handling for -0
2447 (define_insn "absdf2"
2448 [(set (match_operand:DF 0 "register_operand" "=r")
2449 (abs:DF (match_operand:DF 1 "register_operand" "0")))]
2452 [(set_attr "type" "multi")
2453 (set_attr "length" "4")])
2456 [(set (match_operand:DF 0 "register_operand" "")
2457 (abs:DF (match_operand:DF 1 "register_operand" "")))]
2460 (ashift:SI (match_dup 2)
2463 (lshiftrt:SI (match_dup 2)
2465 "operands[2] = gen_highpart (SImode, operands[0]);")
2467 (define_insn "abssf2"
2468 [(set (match_operand:SF 0 "register_operand" "=r")
2469 (abs:SF (match_operand:SF 1 "register_operand" "0")))]
2472 [(set_attr "type" "multi")
2473 (set_attr "length" "4")])
2476 [(set (match_operand:SF 0 "register_operand" "")
2477 (abs:SF (match_operand:SF 1 "register_operand" "")))]
2480 (ashift:SI (match_dup 2)
2483 (lshiftrt:SI (match_dup 2)
2485 "operands[2] = gen_highpart (SImode, operands[0]);")
2487 ;; Conditional move instructions
2488 ;; Based on those done for the d10v
2490 (define_expand "movsicc"
2492 (set (match_operand:SI 0 "register_operand" "r")
2493 (if_then_else:SI (match_operand 1 "" "")
2494 (match_operand:SI 2 "conditional_move_operand" "O")
2495 (match_operand:SI 3 "conditional_move_operand" "O")
2502 if (! zero_and_one (operands [2], operands [3]))
2505 /* Generate the comparison that will set the carry flag. */
2506 operands[1] = gen_compare (GET_CODE (operands[1]), m32r_compare_op0,
2507 m32r_compare_op1, TRUE);
2509 /* See other movsicc pattern below for reason why. */
2510 emit_insn (gen_blockage ());
2513 ;; Generate the conditional instructions based on how the carry flag is examined.
2514 (define_insn "*movsicc_internal"
2515 [(set (match_operand:SI 0 "register_operand" "=r")
2516 (if_then_else:SI (match_operand 1 "carry_compare_operand" "")
2517 (match_operand:SI 2 "conditional_move_operand" "O")
2518 (match_operand:SI 3 "conditional_move_operand" "O")
2521 "zero_and_one (operands [2], operands[3])"
2522 "* return emit_cond_move (operands, insn);"
2523 [(set_attr "type" "multi")
2524 (set_attr "length" "8")
2529 ;; Block moves, see m32r.c for more details.
2530 ;; Argument 0 is the destination
2531 ;; Argument 1 is the source
2532 ;; Argument 2 is the length
2533 ;; Argument 3 is the alignment
2535 (define_expand "movmemsi"
2536 [(parallel [(set (match_operand:BLK 0 "general_operand" "")
2537 (match_operand:BLK 1 "general_operand" ""))
2538 (use (match_operand:SI 2 "immediate_operand" ""))
2539 (use (match_operand:SI 3 "immediate_operand" ""))])]
2543 if (operands[0]) /* avoid unused code messages */
2545 m32r_expand_block_move (operands);
2550 ;; Insn generated by block moves
2552 (define_insn "movmemsi_internal"
2553 [(set (mem:BLK (match_operand:SI 0 "register_operand" "r")) ;; destination
2554 (mem:BLK (match_operand:SI 1 "register_operand" "r"))) ;; source
2555 (use (match_operand:SI 2 "m32r_block_immediate_operand" "J"));; # bytes to move
2556 (set (match_operand:SI 3 "register_operand" "=0")
2557 (plus:SI (match_dup 0)
2558 (minus (match_dup 2) (const_int 4))))
2559 (set (match_operand:SI 4 "register_operand" "=1")
2560 (plus:SI (match_dup 1)
2562 (clobber (match_scratch:SI 5 "=&r")) ;; temp1
2563 (clobber (match_scratch:SI 6 "=&r"))] ;; temp2
2565 "* m32r_output_block_move (insn, operands); return \"\"; "
2566 [(set_attr "type" "store8")
2567 (set_attr "length" "72")]) ;; Maximum
2571 /* When generating pic, we need to load the symbol offset into a register.
2572 So that the optimizer does not confuse this with a normal symbol load
2573 we use an unspec. The offset will be loaded from a constant pool entry,
2574 since that is the only type of relocation we can use. */
2576 (define_insn "pic_load_addr"
2577 [(set (match_operand:SI 0 "register_operand" "=r")
2578 (unspec:SI [(match_operand 1 "" "")] UNSPEC_PIC_LOAD_ADDR))]
2581 [(set_attr "type" "int4")])
2583 (define_insn "gotoff_load_addr"
2584 [(set (match_operand:SI 0 "register_operand" "=r")
2585 (unspec:SI [(match_operand 1 "" "")] UNSPEC_GOTOFF))]
2587 "seth %0, %#shigh(%1@GOTOFF)\;add3 %0, %0, low(%1@GOTOFF)"
2588 [(set_attr "type" "int4")
2589 (set_attr "length" "8")])
2591 ;; Load program counter insns.
2593 (define_insn "get_pc"
2594 [(clobber (reg:SI 14))
2595 (set (match_operand 0 "register_operand" "=r")
2596 (unspec [(match_operand 1 "" "")] UNSPEC_GET_PC))
2597 (use (match_operand:SI 2 "immediate_operand" ""))]
2601 if (INTVAL(operands[2]))
2602 return \"bl.s .+4\;ld24 %0,%#%1\;add %0,lr\";
2604 return \"bl.s .+4\;seth %0,%#shigh(%1)\;add3 %0,%0,%#low(%1+4)\;add %0,lr\";}"
2605 [(set (attr "length") (if_then_else (ne (match_dup 2) (const_int 0))
2609 (define_expand "builtin_setjmp_receiver"
2610 [(label_ref (match_operand 0 "" ""))]
2614 m32r_load_pic_register ();