1 ;; Machine description of the Renesas M32R cpu for GNU C compiler
2 ;; Copyright (C) 1996, 1997, 1998, 1999, 2001, 2003, 2004
3 ; Free Software Foundation, Inc.
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published
9 ;; by the Free Software Foundation; either version 2, or (at your
10 ;; option) any later version.
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT
13 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 ;; License for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING. If not, write to
19 ;; the Free Software Foundation, 59 Temple Place - Suite 330,
20 ;; Boston, MA 02111-1307, USA.
22 ;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
24 ;; UNSPEC_VOLATILE usage
27 (UNSPECV_FLUSH_ICACHE 1)])
31 [(UNSPEC_LOAD_SDA_BASE 2)
33 (UNSPEC_PIC_LOAD_ADDR 4)
36 ;; Insn type. Used to default other attribute values.
38 "int2,int4,load2,load4,load8,store2,store4,store8,shift2,shift4,mul2,div4,uncond_branch,branch,call,multi,misc"
39 (const_string "misc"))
42 (define_attr "length" ""
43 (cond [(eq_attr "type" "int2,load2,store2,shift2,mul2")
46 (eq_attr "type" "int4,load4,store4,shift4,div4")
49 (eq_attr "type" "multi")
52 (eq_attr "type" "uncond_branch,branch,call")
57 ;; The length here is the length of a single asm. Unfortunately it might be
58 ;; 2 or 4 so we must allow for 4. That's ok though.
59 (define_asm_attributes
60 [(set_attr "length" "4")
61 (set_attr "type" "multi")])
63 ;; Whether an instruction is short (16-bit) or long (32-bit).
64 (define_attr "insn_size" "short,long"
65 (if_then_else (eq_attr "type" "int2,load2,store2,shift2,mul2")
66 (const_string "short")
67 (const_string "long")))
69 ;; The target CPU we're compiling for.
70 (define_attr "cpu" "m32r,m32r2,m32rx"
71 (cond [(ne (symbol_ref "TARGET_M32RX") (const_int 0))
72 (const_string "m32rx")
73 (ne (symbol_ref "TARGET_M32R2") (const_int 0))
74 (const_string "m32r2")]
75 (const_string "m32r")))
77 ;; Defines the pipeline where an instruction can be executed on.
78 ;; For the M32R, a short instruction can execute one of the two pipes.
79 ;; For the M32Rx, the restrictions are modelled in the second
80 ;; condition of this attribute definition.
81 (define_attr "m32r_pipeline" "either,s,o,long"
82 (cond [(and (eq_attr "cpu" "m32r")
83 (eq_attr "insn_size" "short"))
84 (const_string "either")
85 (eq_attr "insn_size" "!short")
86 (const_string "long")]
87 (cond [(eq_attr "type" "int2")
88 (const_string "either")
89 (eq_attr "type" "load2,store2,shift2,uncond_branch,branch,call")
91 (eq_attr "type" "mul2")
93 (const_string "long"))))
95 ;; ::::::::::::::::::::
97 ;; :: Pipeline description
99 ;; ::::::::::::::::::::
101 ;; This model is based on Chapter 2, Appendix 3 and Appendix 4 of the
102 ;; "M32R-FPU Software Manual", Revision 1.01, plus additional information
103 ;; obtained by our best friend and mine, Google.
105 ;; The pipeline is modelled as a fetch unit, and a core with a memory unit,
106 ;; two execution units, where "fetch" models IF and D, "memory" for MEM1
107 ;; and MEM2, and "EXEC" for E, E1, E2, EM, and EA. Writeback and
108 ;; bypasses are not modelled.
109 (define_automaton "m32r")
111 ;; We pretend there are two short (16 bits) instruction fetchers. The
112 ;; "s" short fetcher cannot be reserved until the "o" short fetcher is
113 ;; reserved. Some instructions reserve both the left and right fetchers.
114 ;; These fetch units are a hack to get GCC to better pack the instructions
115 ;; for the M32Rx processor, which has two execution pipes.
117 ;; In reality there is only one decoder, which can decode either two 16 bits
118 ;; instructions, or a single 32 bits instruction.
120 ;; Note, "fetch" models both the IF and the D pipeline stages.
122 ;; The m32rx core has two execution pipes. We name them o_E and s_E.
123 ;; In addition, there's a memory unit.
125 (define_cpu_unit "o_IF,s_IF,o_E,s_E,memory" "m32r")
127 ;; Prevent the s pipe from being reserved before the o pipe.
128 (absence_set "s_IF" "o_IF")
129 (absence_set "s_E" "o_E")
131 ;; On the M32Rx, long instructions execute on both pipes, so reserve
132 ;; both fetch slots and both pipes.
133 (define_reservation "long_IF" "o_IF+s_IF")
134 (define_reservation "long_E" "o_E+s_E")
136 ;; ::::::::::::::::::::
138 ;; Simple instructions do 4 stages: IF D E WB. WB is not modelled.
139 ;; Hence, ready latency is 1.
140 (define_insn_reservation "short_left" 1
141 (and (eq_attr "m32r_pipeline" "o")
142 (and (eq_attr "insn_size" "short")
143 (eq_attr "type" "!load2")))
146 (define_insn_reservation "short_right" 1
147 (and (eq_attr "m32r_pipeline" "s")
148 (and (eq_attr "insn_size" "short")
149 (eq_attr "type" "!load2")))
152 (define_insn_reservation "short_either" 1
153 (and (eq_attr "m32r_pipeline" "either")
154 (and (eq_attr "insn_size" "short")
155 (eq_attr "type" "!load2")))
158 (define_insn_reservation "long_m32r" 1
159 (and (eq_attr "cpu" "m32r")
160 (and (eq_attr "insn_size" "long")
161 (eq_attr "type" "!load4,load8")))
164 (define_insn_reservation "long_m32rx" 2
165 (and (eq_attr "m32r_pipeline" "long")
166 (and (eq_attr "insn_size" "long")
167 (eq_attr "type" "!load4,load8")))
170 ;; Load/store instructions do 6 stages: IF D E MEM1 MEM2 WB.
171 ;; MEM1 may require more than one cycle depending on locality. We
172 ;; optimistically assume all memory is nearby, i.e. MEM1 takes only
173 ;; one cycle. Hence, ready latency is 3.
175 ;; The M32Rx can do short load/store only on the left pipe.
176 (define_insn_reservation "short_load_left" 3
177 (and (eq_attr "m32r_pipeline" "o")
178 (and (eq_attr "insn_size" "short")
179 (eq_attr "type" "load2")))
182 (define_insn_reservation "short_load" 3
183 (and (eq_attr "m32r_pipeline" "either")
184 (and (eq_attr "insn_size" "short")
185 (eq_attr "type" "load2")))
186 "s_IF|o_IF,s_E|o_E,memory*2")
188 (define_insn_reservation "long_load" 3
189 (and (eq_attr "cpu" "m32r")
190 (and (eq_attr "insn_size" "long")
191 (eq_attr "type" "load4,load8")))
192 "long_IF,long_E,memory*2")
194 (define_insn_reservation "long_load_m32rx" 3
195 (and (eq_attr "m32r_pipeline" "long")
196 (eq_attr "type" "load4,load8"))
197 "long_IF,long_E,memory*2")
200 ;; Expand prologue as RTL
201 (define_expand "prologue"
206 m32r_expand_prologue ();
211 ;; Move instructions.
213 ;; For QI and HI moves, the register must contain the full properly
214 ;; sign-extended value. nonzero_bits assumes this [otherwise
215 ;; SHORT_IMMEDIATES_SIGN_EXTEND must be used, but the comment for it
216 ;; says it's a kludge and the .md files should be fixed instead].
218 (define_expand "movqi"
219 [(set (match_operand:QI 0 "general_operand" "")
220 (match_operand:QI 1 "general_operand" ""))]
224 /* Fixup PIC cases. */
227 if (symbolic_operand (operands[1], QImode))
229 if (reload_in_progress || reload_completed)
230 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
232 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
236 /* Everything except mem = const or mem = mem can be done easily.
237 Objects in the small data area are handled too. */
239 if (GET_CODE (operands[0]) == MEM)
240 operands[1] = force_reg (QImode, operands[1]);
243 (define_insn "*movqi_insn"
244 [(set (match_operand:QI 0 "move_dest_operand" "=r,r,r,r,r,T,m")
245 (match_operand:QI 1 "move_src_operand" "r,I,JQR,T,m,r,r"))]
246 "register_operand (operands[0], QImode) || register_operand (operands[1], QImode)"
255 [(set_attr "type" "int2,int2,int4,load2,load4,store2,store4")
256 (set_attr "length" "2,2,4,2,4,2,4")])
258 (define_expand "movhi"
259 [(set (match_operand:HI 0 "general_operand" "")
260 (match_operand:HI 1 "general_operand" ""))]
264 /* Fixup PIC cases. */
267 if (symbolic_operand (operands[1], HImode))
269 if (reload_in_progress || reload_completed)
270 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
272 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
276 /* Everything except mem = const or mem = mem can be done easily. */
278 if (GET_CODE (operands[0]) == MEM)
279 operands[1] = force_reg (HImode, operands[1]);
282 (define_insn "*movhi_insn"
283 [(set (match_operand:HI 0 "move_dest_operand" "=r,r,r,r,r,r,T,m")
284 (match_operand:HI 1 "move_src_operand" "r,I,JQR,K,T,m,r,r"))]
285 "register_operand (operands[0], HImode) || register_operand (operands[1], HImode)"
295 [(set_attr "type" "int2,int2,int4,int4,load2,load4,store2,store4")
296 (set_attr "length" "2,2,4,4,2,4,2,4")])
298 (define_expand "movsi_push"
299 [(set (mem:SI (pre_dec:SI (match_operand:SI 0 "register_operand" "")))
300 (match_operand:SI 1 "register_operand" ""))]
304 (define_expand "movsi_pop"
305 [(set (match_operand:SI 0 "register_operand" "")
306 (mem:SI (post_inc:SI (match_operand:SI 1 "register_operand" ""))))]
310 (define_expand "movsi"
311 [(set (match_operand:SI 0 "general_operand" "")
312 (match_operand:SI 1 "general_operand" ""))]
316 /* Fixup PIC cases. */
319 if (symbolic_operand (operands[1], SImode))
321 if (reload_in_progress || reload_completed)
322 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
324 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
328 /* Everything except mem = const or mem = mem can be done easily. */
330 if (GET_CODE (operands[0]) == MEM)
331 operands[1] = force_reg (SImode, operands[1]);
333 /* Small Data Area reference? */
334 if (small_data_operand (operands[1], SImode))
336 emit_insn (gen_movsi_sda (operands[0], operands[1]));
340 /* If medium or large code model, symbols have to be loaded with
342 if (addr32_operand (operands[1], SImode))
344 emit_insn (gen_movsi_addr32 (operands[0], operands[1]));
349 ;; ??? Do we need a const_double constraint here for large unsigned values?
350 (define_insn "*movsi_insn"
351 [(set (match_operand:SI 0 "move_dest_operand" "=r,r,r,r,r,r,r,r,r,T,S,m")
352 (match_operand:SI 1 "move_src_operand" "r,I,J,MQ,L,n,T,U,m,r,r,r"))]
353 "register_operand (operands[0], SImode) || register_operand (operands[1], SImode)"
356 if (GET_CODE (operands[0]) == REG || GET_CODE (operands[1]) == SUBREG)
358 switch (GET_CODE (operands[1]))
370 if (GET_CODE (XEXP (operands[1], 0)) == POST_INC
371 && XEXP (XEXP (operands[1], 0), 0) == stack_pointer_rtx)
377 value = INTVAL (operands[1]);
379 return \"ldi %0,%#%1\\t; %X1\";
381 if (UINT24_P (value))
382 return \"ld24 %0,%#%1\\t; %X1\";
384 if (UPPER16_P (value))
385 return \"seth %0,%#%T1\\t; %X1\";
393 return \"ld24 %0,%#%1\";
399 else if (GET_CODE (operands[0]) == MEM
400 && (GET_CODE (operands[1]) == REG || GET_CODE (operands[1]) == SUBREG))
402 if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
403 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx)
411 [(set_attr "type" "int2,int2,int4,int4,int4,multi,load2,load2,load4,store2,store2,store4")
412 (set_attr "length" "2,2,4,4,4,8,2,2,4,2,2,4")])
414 ; Try to use a four byte / two byte pair for constants not loadable with
418 [(set (match_operand:SI 0 "register_operand" "")
419 (match_operand:SI 1 "two_insn_const_operand" ""))]
421 [(set (match_dup 0) (match_dup 2))
422 (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 3)))]
425 unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
426 unsigned HOST_WIDE_INT tmp;
429 /* In all cases we will emit two instructions. However we try to
430 use 2 byte instructions wherever possible. We can assume the
431 constant isn't loadable with any of ldi, ld24, or seth. */
433 /* See if we can load a 24 bit unsigned value and invert it. */
434 if (UINT24_P (~ val))
436 emit_insn (gen_movsi (operands[0], GEN_INT (~ val)));
437 emit_insn (gen_one_cmplsi2 (operands[0], operands[0]));
441 /* See if we can load a 24 bit unsigned value and shift it into place.
442 0x01fffffe is just beyond ld24's range. */
443 for (shift = 1, tmp = 0x01fffffe;
447 if ((val & ~tmp) == 0)
449 emit_insn (gen_movsi (operands[0], GEN_INT (val >> shift)));
450 emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (shift)));
455 /* Can't use any two byte insn, fall back to seth/or3. Use ~0xffff instead
456 of 0xffff0000, since the later fails on a 64-bit host. */
457 operands[2] = GEN_INT ((val) & ~0xffff);
458 operands[3] = GEN_INT ((val) & 0xffff);
462 [(set (match_operand:SI 0 "register_operand" "")
463 (match_operand:SI 1 "seth_add3_operand" ""))]
466 (high:SI (match_dup 1)))
468 (lo_sum:SI (match_dup 0)
472 ;; Small data area support.
473 ;; The address of _SDA_BASE_ is loaded into a register and all objects in
474 ;; the small data area are indexed off that. This is done for each reference
475 ;; but cse will clean things up for us. We let the compiler choose the
476 ;; register to use so we needn't allocate (and maybe even fix) a special
477 ;; register to use. Since the load and store insns have a 16 bit offset the
478 ;; total size of the data area can be 64K. However, if the data area lives
479 ;; above 16M (24 bits), _SDA_BASE_ will have to be loaded with seth/add3 which
480 ;; would then yield 3 instructions to reference an object [though there would
481 ;; be no net loss if two or more objects were referenced]. The 3 insns can be
482 ;; reduced back to 2 if the size of the small data area were reduced to 32K
483 ;; [then seth + ld/st would work for any object in the area]. Doing this
484 ;; would require special handling of _SDA_BASE_ (its value would be
485 ;; (.sdata + 32K) & 0xffff0000) and reloc computations would be different
486 ;; [I think]. What to do about this is deferred until later and for now we
487 ;; require .sdata to be in the first 16M.
489 (define_expand "movsi_sda"
491 (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))
492 (set (match_operand:SI 0 "register_operand" "")
493 (lo_sum:SI (match_dup 2)
494 (match_operand:SI 1 "small_data_operand" "")))]
498 if (reload_in_progress || reload_completed)
499 operands[2] = operands[0];
501 operands[2] = gen_reg_rtx (SImode);
504 (define_insn "*load_sda_base"
505 [(set (match_operand:SI 0 "register_operand" "=r")
506 (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))]
508 "ld24 %0,#_SDA_BASE_"
509 [(set_attr "type" "int4")
510 (set_attr "length" "4")])
512 ;; 32 bit address support.
514 (define_expand "movsi_addr32"
516 ; addr32_operand isn't used because it's too restrictive,
517 ; seth_add3_operand is more general and thus safer.
518 (high:SI (match_operand:SI 1 "seth_add3_operand" "")))
519 (set (match_operand:SI 0 "register_operand" "")
520 (lo_sum:SI (match_dup 2) (match_dup 1)))]
524 if (reload_in_progress || reload_completed)
525 operands[2] = operands[0];
527 operands[2] = gen_reg_rtx (SImode);
530 (define_insn "set_hi_si"
531 [(set (match_operand:SI 0 "register_operand" "=r")
532 (high:SI (match_operand 1 "symbolic_operand" "")))]
534 "seth %0,%#shigh(%1)"
535 [(set_attr "type" "int4")
536 (set_attr "length" "4")])
538 (define_insn "lo_sum_si"
539 [(set (match_operand:SI 0 "register_operand" "=r")
540 (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
541 (match_operand:SI 2 "immediate_operand" "in")))]
544 [(set_attr "type" "int4")
545 (set_attr "length" "4")])
547 (define_expand "movdi"
548 [(set (match_operand:DI 0 "general_operand" "")
549 (match_operand:DI 1 "general_operand" ""))]
553 /* Fixup PIC cases. */
556 if (symbolic_operand (operands[1], DImode))
558 if (reload_in_progress || reload_completed)
559 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
561 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
565 /* Everything except mem = const or mem = mem can be done easily. */
567 if (GET_CODE (operands[0]) == MEM)
568 operands[1] = force_reg (DImode, operands[1]);
571 (define_insn "*movdi_insn"
572 [(set (match_operand:DI 0 "move_dest_operand" "=r,r,r,r,m")
573 (match_operand:DI 1 "move_double_src_operand" "r,nG,F,m,r"))]
574 "register_operand (operands[0], DImode) || register_operand (operands[1], DImode)"
576 [(set_attr "type" "multi,multi,multi,load8,store8")
577 (set_attr "length" "4,4,16,6,6")])
580 [(set (match_operand:DI 0 "move_dest_operand" "")
581 (match_operand:DI 1 "move_double_src_operand" ""))]
584 "operands[2] = gen_split_move_double (operands);")
586 ;; Floating point move insns.
588 (define_expand "movsf"
589 [(set (match_operand:SF 0 "general_operand" "")
590 (match_operand:SF 1 "general_operand" ""))]
594 /* Fixup PIC cases. */
597 if (symbolic_operand (operands[1], SFmode))
599 if (reload_in_progress || reload_completed)
600 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
602 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
606 /* Everything except mem = const or mem = mem can be done easily. */
608 if (GET_CODE (operands[0]) == MEM)
609 operands[1] = force_reg (SFmode, operands[1]);
612 (define_insn "*movsf_insn"
613 [(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,r,r,T,S,m")
614 (match_operand:SF 1 "move_src_operand" "r,F,U,S,m,r,r,r"))]
615 "register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode)"
625 ;; ??? Length of alternative 1 is either 2, 4 or 8.
626 [(set_attr "type" "int2,multi,load2,load2,load4,store2,store2,store4")
627 (set_attr "length" "2,8,2,2,4,2,2,4")])
630 [(set (match_operand:SF 0 "register_operand" "")
631 (match_operand:SF 1 "const_double_operand" ""))]
633 [(set (match_dup 2) (match_dup 3))]
636 operands[2] = operand_subword (operands[0], 0, 0, SFmode);
637 operands[3] = operand_subword (operands[1], 0, 0, SFmode);
640 (define_expand "movdf"
641 [(set (match_operand:DF 0 "general_operand" "")
642 (match_operand:DF 1 "general_operand" ""))]
646 /* Fixup PIC cases. */
649 if (symbolic_operand (operands[1], DFmode))
651 if (reload_in_progress || reload_completed)
652 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
654 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
658 /* Everything except mem = const or mem = mem can be done easily. */
660 if (GET_CODE (operands[0]) == MEM)
661 operands[1] = force_reg (DFmode, operands[1]);
664 (define_insn "*movdf_insn"
665 [(set (match_operand:DF 0 "move_dest_operand" "=r,r,r,m")
666 (match_operand:DF 1 "move_double_src_operand" "r,F,m,r"))]
667 "register_operand (operands[0], DFmode) || register_operand (operands[1], DFmode)"
669 [(set_attr "type" "multi,multi,load8,store8")
670 (set_attr "length" "4,16,6,6")])
673 [(set (match_operand:DF 0 "move_dest_operand" "")
674 (match_operand:DF 1 "move_double_src_operand" ""))]
677 "operands[2] = gen_split_move_double (operands);")
679 ;; Zero extension instructions.
681 (define_insn "zero_extendqihi2"
682 [(set (match_operand:HI 0 "register_operand" "=r,r,r")
683 (zero_extend:HI (match_operand:QI 1 "extend_operand" "r,T,m")))]
689 [(set_attr "type" "int4,load2,load4")
690 (set_attr "length" "4,2,4")])
692 (define_insn "zero_extendqisi2"
693 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
694 (zero_extend:SI (match_operand:QI 1 "extend_operand" "r,T,m")))]
700 [(set_attr "type" "int4,load2,load4")
701 (set_attr "length" "4,2,4")])
703 (define_insn "zero_extendhisi2"
704 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
705 (zero_extend:SI (match_operand:HI 1 "extend_operand" "r,T,m")))]
711 [(set_attr "type" "int4,load2,load4")
712 (set_attr "length" "4,2,4")])
714 ;; Signed conversions from a smaller integer to a larger integer
715 (define_insn "extendqihi2"
716 [(set (match_operand:HI 0 "register_operand" "=r,r,r")
717 (sign_extend:HI (match_operand:QI 1 "extend_operand" "0,T,m")))]
723 [(set_attr "type" "multi,load2,load4")
724 (set_attr "length" "2,2,4")])
727 [(set (match_operand:HI 0 "register_operand" "")
728 (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
734 rtx op0 = gen_lowpart (SImode, operands[0]);
735 rtx shift = GEN_INT (24);
737 operands[2] = gen_ashlsi3 (op0, op0, shift);
738 operands[3] = gen_ashrsi3 (op0, op0, shift);
741 (define_insn "extendqisi2"
742 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
743 (sign_extend:SI (match_operand:QI 1 "extend_operand" "0,T,m")))]
749 [(set_attr "type" "multi,load2,load4")
750 (set_attr "length" "4,2,4")])
753 [(set (match_operand:SI 0 "register_operand" "")
754 (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
760 rtx shift = GEN_INT (24);
762 operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
763 operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
766 (define_insn "extendhisi2"
767 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
768 (sign_extend:SI (match_operand:HI 1 "extend_operand" "0,T,m")))]
774 [(set_attr "type" "multi,load2,load4")
775 (set_attr "length" "4,2,4")])
778 [(set (match_operand:SI 0 "register_operand" "")
779 (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
785 rtx shift = GEN_INT (16);
787 operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
788 operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
791 ;; Arithmetic instructions.
793 ; ??? Adding an alternative to split add3 of small constants into two
794 ; insns yields better instruction packing but slower code. Adds of small
795 ; values is done a lot.
797 (define_insn "addsi3"
798 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
799 (plus:SI (match_operand:SI 1 "register_operand" "%0,0,r")
800 (match_operand:SI 2 "nonmemory_operand" "r,I,J")))]
806 [(set_attr "type" "int2,int2,int4")
807 (set_attr "length" "2,2,4")])
810 ; [(set (match_operand:SI 0 "register_operand" "")
811 ; (plus:SI (match_operand:SI 1 "register_operand" "")
812 ; (match_operand:SI 2 "int8_operand" "")))]
814 ; && REGNO (operands[0]) != REGNO (operands[1])
815 ; && INT8_P (INTVAL (operands[2]))
816 ; && INTVAL (operands[2]) != 0"
817 ; [(set (match_dup 0) (match_dup 1))
818 ; (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))]
821 (define_insn "adddi3"
822 [(set (match_operand:DI 0 "register_operand" "=r")
823 (plus:DI (match_operand:DI 1 "register_operand" "%0")
824 (match_operand:DI 2 "register_operand" "r")))
825 (clobber (reg:CC 17))]
828 [(set_attr "type" "multi")
829 (set_attr "length" "6")])
831 ;; ??? The cmp clears the condition bit. Can we speed up somehow?
833 [(set (match_operand:DI 0 "register_operand" "")
834 (plus:DI (match_operand:DI 1 "register_operand" "")
835 (match_operand:DI 2 "register_operand" "")))
836 (clobber (reg:CC 17))]
838 [(parallel [(set (reg:CC 17)
840 (use (match_dup 4))])
841 (parallel [(set (match_dup 4)
842 (plus:SI (match_dup 4)
843 (plus:SI (match_dup 5)
844 (ne:SI (reg:CC 17) (const_int 0)))))
846 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
847 (parallel [(set (match_dup 6)
848 (plus:SI (match_dup 6)
849 (plus:SI (match_dup 7)
850 (ne:SI (reg:CC 17) (const_int 0)))))
852 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
855 operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
856 operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
857 operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
858 operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
861 (define_insn "*clear_c"
864 (use (match_operand:SI 0 "register_operand" "r"))]
867 [(set_attr "type" "int2")
868 (set_attr "length" "2")])
870 (define_insn "*add_carry"
871 [(set (match_operand:SI 0 "register_operand" "=r")
872 (plus:SI (match_operand:SI 1 "register_operand" "%0")
873 (plus:SI (match_operand:SI 2 "register_operand" "r")
874 (ne:SI (reg:CC 17) (const_int 0)))))
876 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
879 [(set_attr "type" "int2")
880 (set_attr "length" "2")])
882 (define_insn "subsi3"
883 [(set (match_operand:SI 0 "register_operand" "=r")
884 (minus:SI (match_operand:SI 1 "register_operand" "0")
885 (match_operand:SI 2 "register_operand" "r")))]
888 [(set_attr "type" "int2")
889 (set_attr "length" "2")])
891 (define_insn "subdi3"
892 [(set (match_operand:DI 0 "register_operand" "=r")
893 (minus:DI (match_operand:DI 1 "register_operand" "0")
894 (match_operand:DI 2 "register_operand" "r")))
895 (clobber (reg:CC 17))]
898 [(set_attr "type" "multi")
899 (set_attr "length" "6")])
901 ;; ??? The cmp clears the condition bit. Can we speed up somehow?
903 [(set (match_operand:DI 0 "register_operand" "")
904 (minus:DI (match_operand:DI 1 "register_operand" "")
905 (match_operand:DI 2 "register_operand" "")))
906 (clobber (reg:CC 17))]
908 [(parallel [(set (reg:CC 17)
910 (use (match_dup 4))])
911 (parallel [(set (match_dup 4)
912 (minus:SI (match_dup 4)
913 (minus:SI (match_dup 5)
914 (ne:SI (reg:CC 17) (const_int 0)))))
916 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
917 (parallel [(set (match_dup 6)
918 (minus:SI (match_dup 6)
919 (minus:SI (match_dup 7)
920 (ne:SI (reg:CC 17) (const_int 0)))))
922 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
925 operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
926 operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
927 operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
928 operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
931 (define_insn "*sub_carry"
932 [(set (match_operand:SI 0 "register_operand" "=r")
933 (minus:SI (match_operand:SI 1 "register_operand" "%0")
934 (minus:SI (match_operand:SI 2 "register_operand" "r")
935 (ne:SI (reg:CC 17) (const_int 0)))))
937 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
940 [(set_attr "type" "int2")
941 (set_attr "length" "2")])
943 ; Multiply/Divide instructions.
945 (define_insn "mulhisi3"
946 [(set (match_operand:SI 0 "register_operand" "=r")
947 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "r"))
948 (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
950 "mullo %1,%2\;mvfacmi %0"
951 [(set_attr "type" "multi")
952 (set_attr "length" "4")])
954 (define_insn "mulsi3"
955 [(set (match_operand:SI 0 "register_operand" "=r")
956 (mult:SI (match_operand:SI 1 "register_operand" "%0")
957 (match_operand:SI 2 "register_operand" "r")))]
960 [(set_attr "type" "mul2")
961 (set_attr "length" "2")])
963 (define_insn "divsi3"
964 [(set (match_operand:SI 0 "register_operand" "=r")
965 (div:SI (match_operand:SI 1 "register_operand" "0")
966 (match_operand:SI 2 "register_operand" "r")))]
969 [(set_attr "type" "div4")
970 (set_attr "length" "4")])
972 (define_insn "udivsi3"
973 [(set (match_operand:SI 0 "register_operand" "=r")
974 (udiv:SI (match_operand:SI 1 "register_operand" "0")
975 (match_operand:SI 2 "register_operand" "r")))]
978 [(set_attr "type" "div4")
979 (set_attr "length" "4")])
981 (define_insn "modsi3"
982 [(set (match_operand:SI 0 "register_operand" "=r")
983 (mod:SI (match_operand:SI 1 "register_operand" "0")
984 (match_operand:SI 2 "register_operand" "r")))]
987 [(set_attr "type" "div4")
988 (set_attr "length" "4")])
990 (define_insn "umodsi3"
991 [(set (match_operand:SI 0 "register_operand" "=r")
992 (umod:SI (match_operand:SI 1 "register_operand" "0")
993 (match_operand:SI 2 "register_operand" "r")))]
996 [(set_attr "type" "div4")
997 (set_attr "length" "4")])
999 ;; Boolean instructions.
1001 ;; We don't define the DImode versions as expand_binop does a good enough job.
1002 ;; And if it doesn't it should be fixed.
1004 (define_insn "andsi3"
1005 [(set (match_operand:SI 0 "register_operand" "=r,r")
1006 (and:SI (match_operand:SI 1 "register_operand" "%0,r")
1007 (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
1011 /* If we are worried about space, see if we can break this up into two
1012 short instructions, which might eliminate a NOP being inserted. */
1014 && m32r_not_same_reg (operands[0], operands[1])
1015 && GET_CODE (operands[2]) == CONST_INT
1016 && INT8_P (INTVAL (operands[2])))
1019 else if (GET_CODE (operands[2]) == CONST_INT)
1020 return \"and3 %0,%1,%#%X2\";
1022 return \"and %0,%2\";
1024 [(set_attr "type" "int2,int4")
1025 (set_attr "length" "2,4")])
1028 [(set (match_operand:SI 0 "register_operand" "")
1029 (and:SI (match_operand:SI 1 "register_operand" "")
1030 (match_operand:SI 2 "int8_operand" "")))]
1031 "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
1032 [(set (match_dup 0) (match_dup 2))
1033 (set (match_dup 0) (and:SI (match_dup 0) (match_dup 1)))]
1036 (define_insn "iorsi3"
1037 [(set (match_operand:SI 0 "register_operand" "=r,r")
1038 (ior:SI (match_operand:SI 1 "register_operand" "%0,r")
1039 (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
1043 /* If we are worried about space, see if we can break this up into two
1044 short instructions, which might eliminate a NOP being inserted. */
1046 && m32r_not_same_reg (operands[0], operands[1])
1047 && GET_CODE (operands[2]) == CONST_INT
1048 && INT8_P (INTVAL (operands[2])))
1051 else if (GET_CODE (operands[2]) == CONST_INT)
1052 return \"or3 %0,%1,%#%X2\";
1054 return \"or %0,%2\";
1056 [(set_attr "type" "int2,int4")
1057 (set_attr "length" "2,4")])
1060 [(set (match_operand:SI 0 "register_operand" "")
1061 (ior:SI (match_operand:SI 1 "register_operand" "")
1062 (match_operand:SI 2 "int8_operand" "")))]
1063 "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
1064 [(set (match_dup 0) (match_dup 2))
1065 (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 1)))]
1068 (define_insn "xorsi3"
1069 [(set (match_operand:SI 0 "register_operand" "=r,r")
1070 (xor:SI (match_operand:SI 1 "register_operand" "%0,r")
1071 (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
1075 /* If we are worried about space, see if we can break this up into two
1076 short instructions, which might eliminate a NOP being inserted. */
1078 && m32r_not_same_reg (operands[0], operands[1])
1079 && GET_CODE (operands[2]) == CONST_INT
1080 && INT8_P (INTVAL (operands[2])))
1083 else if (GET_CODE (operands[2]) == CONST_INT)
1084 return \"xor3 %0,%1,%#%X2\";
1086 return \"xor %0,%2\";
1088 [(set_attr "type" "int2,int4")
1089 (set_attr "length" "2,4")])
1092 [(set (match_operand:SI 0 "register_operand" "")
1093 (xor:SI (match_operand:SI 1 "register_operand" "")
1094 (match_operand:SI 2 "int8_operand" "")))]
1095 "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
1096 [(set (match_dup 0) (match_dup 2))
1097 (set (match_dup 0) (xor:SI (match_dup 0) (match_dup 1)))]
1100 (define_insn "negsi2"
1101 [(set (match_operand:SI 0 "register_operand" "=r")
1102 (neg:SI (match_operand:SI 1 "register_operand" "r")))]
1105 [(set_attr "type" "int2")
1106 (set_attr "length" "2")])
1108 (define_insn "one_cmplsi2"
1109 [(set (match_operand:SI 0 "register_operand" "=r")
1110 (not:SI (match_operand:SI 1 "register_operand" "r")))]
1113 [(set_attr "type" "int2")
1114 (set_attr "length" "2")])
1116 ;; Shift instructions.
1118 (define_insn "ashlsi3"
1119 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1120 (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r")
1121 (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
1127 [(set_attr "type" "shift2,shift2,shift4")
1128 (set_attr "length" "2,2,4")])
1130 (define_insn "ashrsi3"
1131 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1132 (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
1133 (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
1139 [(set_attr "type" "shift2,shift2,shift4")
1140 (set_attr "length" "2,2,4")])
1142 (define_insn "lshrsi3"
1143 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1144 (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
1145 (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
1151 [(set_attr "type" "shift2,shift2,shift4")
1152 (set_attr "length" "2,2,4")])
1154 ;; Compare instructions.
1155 ;; This controls RTL generation and register allocation.
1157 ;; We generate RTL for comparisons and branches by having the cmpxx
1158 ;; patterns store away the operands. Then the bcc patterns
1159 ;; emit RTL for both the compare and the branch.
1161 ;; On the m32r it is more efficient to use the bxxz instructions and
1162 ;; thus merge the compare and branch into one instruction, so they are
1165 (define_expand "cmpsi"
1167 (compare:CC (match_operand:SI 0 "register_operand" "")
1168 (match_operand:SI 1 "reg_or_cmp_int16_operand" "")))]
1172 m32r_compare_op0 = operands[0];
1173 m32r_compare_op1 = operands[1];
1177 (define_insn "cmp_eqsi_zero_insn"
1179 (eq:CC (match_operand:SI 0 "register_operand" "r,r")
1180 (match_operand:SI 1 "reg_or_zero_operand" "r,P")))]
1181 "TARGET_M32RX || TARGET_M32R2"
1185 [(set_attr "type" "int4")
1186 (set_attr "length" "4")])
1188 ;; The cmp_xxx_insn patterns set the condition bit to the result of the
1189 ;; comparison. There isn't a "compare equal" instruction so cmp_eqsi_insn
1190 ;; is quite inefficient. However, it is rarely used.
1192 (define_insn "cmp_eqsi_insn"
1194 (eq:CC (match_operand:SI 0 "register_operand" "r,r")
1195 (match_operand:SI 1 "reg_or_cmp_int16_operand" "r,P")))
1196 (clobber (match_scratch:SI 2 "=&r,&r"))]
1200 if (which_alternative == 0)
1202 return \"mv %2,%0\;sub %2,%1\;cmpui %2,#1\";
1206 if (INTVAL (operands [1]) == 0)
1207 return \"cmpui %0, #1\";
1208 else if (REGNO (operands [2]) == REGNO (operands [0]))
1209 return \"addi %0,%#%N1\;cmpui %2,#1\";
1211 return \"add3 %2,%0,%#%N1\;cmpui %2,#1\";
1214 [(set_attr "type" "multi,multi")
1215 (set_attr "length" "8,8")])
1217 (define_insn "cmp_ltsi_insn"
1219 (lt:CC (match_operand:SI 0 "register_operand" "r,r")
1220 (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
1225 [(set_attr "type" "int2,int4")
1226 (set_attr "length" "2,4")])
1228 (define_insn "cmp_ltusi_insn"
1230 (ltu:CC (match_operand:SI 0 "register_operand" "r,r")
1231 (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
1236 [(set_attr "type" "int2,int4")
1237 (set_attr "length" "2,4")])
1239 ;; These control RTL generation for conditional jump insns.
1241 (define_expand "beq"
1243 (if_then_else (match_dup 1)
1244 (label_ref (match_operand 0 "" ""))
1249 operands[1] = gen_compare (EQ, m32r_compare_op0, m32r_compare_op1, FALSE);
1252 (define_expand "bne"
1254 (if_then_else (match_dup 1)
1255 (label_ref (match_operand 0 "" ""))
1260 operands[1] = gen_compare (NE, m32r_compare_op0, m32r_compare_op1, FALSE);
1263 (define_expand "bgt"
1265 (if_then_else (match_dup 1)
1266 (label_ref (match_operand 0 "" ""))
1271 operands[1] = gen_compare (GT, m32r_compare_op0, m32r_compare_op1, FALSE);
1274 (define_expand "ble"
1276 (if_then_else (match_dup 1)
1277 (label_ref (match_operand 0 "" ""))
1282 operands[1] = gen_compare (LE, m32r_compare_op0, m32r_compare_op1, FALSE);
1285 (define_expand "bge"
1287 (if_then_else (match_dup 1)
1288 (label_ref (match_operand 0 "" ""))
1293 operands[1] = gen_compare (GE, m32r_compare_op0, m32r_compare_op1, FALSE);
1296 (define_expand "blt"
1298 (if_then_else (match_dup 1)
1299 (label_ref (match_operand 0 "" ""))
1304 operands[1] = gen_compare (LT, m32r_compare_op0, m32r_compare_op1, FALSE);
1307 (define_expand "bgtu"
1309 (if_then_else (match_dup 1)
1310 (label_ref (match_operand 0 "" ""))
1315 operands[1] = gen_compare (GTU, m32r_compare_op0, m32r_compare_op1, FALSE);
1318 (define_expand "bleu"
1320 (if_then_else (match_dup 1)
1321 (label_ref (match_operand 0 "" ""))
1326 operands[1] = gen_compare (LEU, m32r_compare_op0, m32r_compare_op1, FALSE);
1329 (define_expand "bgeu"
1331 (if_then_else (match_dup 1)
1332 (label_ref (match_operand 0 "" ""))
1337 operands[1] = gen_compare (GEU, m32r_compare_op0, m32r_compare_op1, FALSE);
1340 (define_expand "bltu"
1342 (if_then_else (match_dup 1)
1343 (label_ref (match_operand 0 "" ""))
1348 operands[1] = gen_compare (LTU, m32r_compare_op0, m32r_compare_op1, FALSE);
1351 ;; Now match both normal and inverted jump.
1353 (define_insn "*branch_insn"
1355 (if_then_else (match_operator 1 "eqne_comparison_operator"
1356 [(reg 17) (const_int 0)])
1357 (label_ref (match_operand 0 "" ""))
1362 static char instruction[40];
1363 sprintf (instruction, \"%s%s %%l0\",
1364 (GET_CODE (operands[1]) == NE) ? \"bc\" : \"bnc\",
1365 (get_attr_length (insn) == 2) ? \".s\" : \"\");
1368 [(set_attr "type" "branch")
1369 ; We use 400/800 instead of 512,1024 to account for inaccurate insn
1370 ; lengths and insn alignments that are complex to track.
1371 ; It's not important that we be hyper-precise here. It may be more
1372 ; important blah blah blah when the chip supports parallel execution
1373 ; blah blah blah but until then blah blah blah this is simple and
1375 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1381 (define_insn "*rev_branch_insn"
1383 (if_then_else (match_operator 1 "eqne_comparison_operator"
1384 [(reg 17) (const_int 0)])
1386 (label_ref (match_operand 0 "" ""))))]
1387 ;"REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))"
1391 static char instruction[40];
1392 sprintf (instruction, \"%s%s %%l0\",
1393 (GET_CODE (operands[1]) == EQ) ? \"bc\" : \"bnc\",
1394 (get_attr_length (insn) == 2) ? \".s\" : \"\");
1397 [(set_attr "type" "branch")
1398 ; We use 400/800 instead of 512,1024 to account for inaccurate insn
1399 ; lengths and insn alignments that are complex to track.
1400 ; It's not important that we be hyper-precise here. It may be more
1401 ; important blah blah blah when the chip supports parallel execution
1402 ; blah blah blah but until then blah blah blah this is simple and
1404 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1410 ; reg/reg compare and branch insns
1412 (define_insn "*reg_branch_insn"
1414 (if_then_else (match_operator 1 "eqne_comparison_operator"
1415 [(match_operand:SI 2 "register_operand" "r")
1416 (match_operand:SI 3 "register_operand" "r")])
1417 (label_ref (match_operand 0 "" ""))
1422 /* Is branch target reachable with beq/bne? */
1423 if (get_attr_length (insn) == 4)
1425 if (GET_CODE (operands[1]) == EQ)
1426 return \"beq %2,%3,%l0\";
1428 return \"bne %2,%3,%l0\";
1432 if (GET_CODE (operands[1]) == EQ)
1433 return \"bne %2,%3,1f\;bra %l0\;1:\";
1435 return \"beq %2,%3,1f\;bra %l0\;1:\";
1438 [(set_attr "type" "branch")
1439 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1440 ; which is complex to track and inaccurate length specs.
1441 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1447 (define_insn "*rev_reg_branch_insn"
1449 (if_then_else (match_operator 1 "eqne_comparison_operator"
1450 [(match_operand:SI 2 "register_operand" "r")
1451 (match_operand:SI 3 "register_operand" "r")])
1453 (label_ref (match_operand 0 "" ""))))]
1457 /* Is branch target reachable with beq/bne? */
1458 if (get_attr_length (insn) == 4)
1460 if (GET_CODE (operands[1]) == NE)
1461 return \"beq %2,%3,%l0\";
1463 return \"bne %2,%3,%l0\";
1467 if (GET_CODE (operands[1]) == NE)
1468 return \"bne %2,%3,1f\;bra %l0\;1:\";
1470 return \"beq %2,%3,1f\;bra %l0\;1:\";
1473 [(set_attr "type" "branch")
1474 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1475 ; which is complex to track and inaccurate length specs.
1476 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1482 ; reg/zero compare and branch insns
1484 (define_insn "*zero_branch_insn"
1486 (if_then_else (match_operator 1 "signed_comparison_operator"
1487 [(match_operand:SI 2 "register_operand" "r")
1489 (label_ref (match_operand 0 "" ""))
1494 const char *br,*invbr;
1497 switch (GET_CODE (operands[1]))
1499 case EQ : br = \"eq\"; invbr = \"ne\"; break;
1500 case NE : br = \"ne\"; invbr = \"eq\"; break;
1501 case LE : br = \"le\"; invbr = \"gt\"; break;
1502 case GT : br = \"gt\"; invbr = \"le\"; break;
1503 case LT : br = \"lt\"; invbr = \"ge\"; break;
1504 case GE : br = \"ge\"; invbr = \"lt\"; break;
1509 /* Is branch target reachable with bxxz? */
1510 if (get_attr_length (insn) == 4)
1512 sprintf (asmtext, \"b%sz %%2,%%l0\", br);
1513 output_asm_insn (asmtext, operands);
1517 sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", invbr);
1518 output_asm_insn (asmtext, operands);
1522 [(set_attr "type" "branch")
1523 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1524 ; which is complex to track and inaccurate length specs.
1525 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1531 (define_insn "*rev_zero_branch_insn"
1533 (if_then_else (match_operator 1 "eqne_comparison_operator"
1534 [(match_operand:SI 2 "register_operand" "r")
1537 (label_ref (match_operand 0 "" ""))))]
1541 const char *br,*invbr;
1544 switch (GET_CODE (operands[1]))
1546 case EQ : br = \"eq\"; invbr = \"ne\"; break;
1547 case NE : br = \"ne\"; invbr = \"eq\"; break;
1548 case LE : br = \"le\"; invbr = \"gt\"; break;
1549 case GT : br = \"gt\"; invbr = \"le\"; break;
1550 case LT : br = \"lt\"; invbr = \"ge\"; break;
1551 case GE : br = \"ge\"; invbr = \"lt\"; break;
1556 /* Is branch target reachable with bxxz? */
1557 if (get_attr_length (insn) == 4)
1559 sprintf (asmtext, \"b%sz %%2,%%l0\", invbr);
1560 output_asm_insn (asmtext, operands);
1564 sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", br);
1565 output_asm_insn (asmtext, operands);
1569 [(set_attr "type" "branch")
1570 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1571 ; which is complex to track and inaccurate length specs.
1572 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1578 ;; S<cc> operations to set a register to 1/0 based on a comparison
1580 (define_expand "seq"
1581 [(match_operand:SI 0 "register_operand" "")]
1585 rtx op0 = operands[0];
1586 rtx op1 = m32r_compare_op0;
1587 rtx op2 = m32r_compare_op1;
1588 enum machine_mode mode = GET_MODE (op0);
1593 if (! register_operand (op1, mode))
1594 op1 = force_reg (mode, op1);
1596 if (TARGET_M32RX || TARGET_M32R2)
1598 if (! reg_or_zero_operand (op2, mode))
1599 op2 = force_reg (mode, op2);
1601 emit_insn (gen_seq_insn_m32rx (op0, op1, op2));
1604 if (GET_CODE (op2) == CONST_INT && INTVAL (op2) == 0)
1606 emit_insn (gen_seq_zero_insn (op0, op1));
1610 if (! reg_or_eq_int16_operand (op2, mode))
1611 op2 = force_reg (mode, op2);
1613 emit_insn (gen_seq_insn (op0, op1, op2));
1617 (define_insn "seq_insn_m32rx"
1618 [(set (match_operand:SI 0 "register_operand" "=r")
1619 (eq:SI (match_operand:SI 1 "register_operand" "%r")
1620 (match_operand:SI 2 "reg_or_zero_operand" "rP")))
1621 (clobber (reg:CC 17))]
1622 "TARGET_M32RX || TARGET_M32R2"
1624 [(set_attr "type" "multi")
1625 (set_attr "length" "6")])
1628 [(set (match_operand:SI 0 "register_operand" "")
1629 (eq:SI (match_operand:SI 1 "register_operand" "")
1630 (match_operand:SI 2 "reg_or_zero_operand" "")))
1631 (clobber (reg:CC 17))]
1632 "TARGET_M32RX || TARGET_M32R2"
1634 (eq:CC (match_dup 1)
1637 (ne:SI (reg:CC 17) (const_int 0)))]
1640 (define_insn "seq_zero_insn"
1641 [(set (match_operand:SI 0 "register_operand" "=r")
1642 (eq:SI (match_operand:SI 1 "register_operand" "r")
1644 (clobber (reg:CC 17))]
1647 [(set_attr "type" "multi")
1648 (set_attr "length" "6")])
1651 [(set (match_operand:SI 0 "register_operand" "")
1652 (eq:SI (match_operand:SI 1 "register_operand" "")
1654 (clobber (reg:CC 17))]
1659 rtx op0 = operands[0];
1660 rtx op1 = operands[1];
1663 emit_insn (gen_cmp_ltusi_insn (op1, const1_rtx));
1664 emit_insn (gen_movcc_insn (op0));
1665 operands[3] = get_insns ();
1669 (define_insn "seq_insn"
1670 [(set (match_operand:SI 0 "register_operand" "=r,r,??r,r")
1671 (eq:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
1672 (match_operand:SI 2 "reg_or_eq_int16_operand" "r,r,r,PK")))
1673 (clobber (reg:CC 17))
1674 (clobber (match_scratch:SI 3 "=1,2,&r,r"))]
1677 [(set_attr "type" "multi")
1678 (set_attr "length" "8,8,10,10")])
1681 [(set (match_operand:SI 0 "register_operand" "")
1682 (eq:SI (match_operand:SI 1 "register_operand" "")
1683 (match_operand:SI 2 "reg_or_eq_int16_operand" "")))
1684 (clobber (reg:CC 17))
1685 (clobber (match_scratch:SI 3 ""))]
1686 "TARGET_M32R && reload_completed"
1690 rtx op0 = operands[0];
1691 rtx op1 = operands[1];
1692 rtx op2 = operands[2];
1693 rtx op3 = operands[3];
1694 HOST_WIDE_INT value;
1696 if (GET_CODE (op2) == REG && GET_CODE (op3) == REG
1697 && REGNO (op2) == REGNO (op3))
1704 if (GET_CODE (op1) == REG && GET_CODE (op3) == REG
1705 && REGNO (op1) != REGNO (op3))
1707 emit_move_insn (op3, op1);
1711 if (GET_CODE (op2) == CONST_INT && (value = INTVAL (op2)) != 0
1712 && CMP_INT16_P (value))
1713 emit_insn (gen_addsi3 (op3, op1, GEN_INT (-value)));
1715 emit_insn (gen_xorsi3 (op3, op1, op2));
1717 emit_insn (gen_cmp_ltusi_insn (op3, const1_rtx));
1718 emit_insn (gen_movcc_insn (op0));
1719 operands[4] = get_insns ();
1723 (define_expand "sne"
1724 [(match_operand:SI 0 "register_operand" "")]
1728 rtx op0 = operands[0];
1729 rtx op1 = m32r_compare_op0;
1730 rtx op2 = m32r_compare_op1;
1731 enum machine_mode mode = GET_MODE (op0);
1736 if (GET_CODE (op2) != CONST_INT
1737 || (INTVAL (op2) != 0 && UINT16_P (INTVAL (op2))))
1741 if (reload_completed || reload_in_progress)
1744 reg = gen_reg_rtx (SImode);
1745 emit_insn (gen_xorsi3 (reg, op1, op2));
1748 if (! register_operand (op1, mode))
1749 op1 = force_reg (mode, op1);
1751 emit_insn (gen_sne_zero_insn (op0, op1));
1758 (define_insn "sne_zero_insn"
1759 [(set (match_operand:SI 0 "register_operand" "=r")
1760 (ne:SI (match_operand:SI 1 "register_operand" "r")
1762 (clobber (reg:CC 17))
1763 (clobber (match_scratch:SI 2 "=&r"))]
1766 [(set_attr "type" "multi")
1767 (set_attr "length" "6")])
1770 [(set (match_operand:SI 0 "register_operand" "")
1771 (ne:SI (match_operand:SI 1 "register_operand" "")
1773 (clobber (reg:CC 17))
1774 (clobber (match_scratch:SI 2 ""))]
1779 (ltu:CC (match_dup 2)
1782 (ne:SI (reg:CC 17) (const_int 0)))]
1785 (define_expand "slt"
1786 [(match_operand:SI 0 "register_operand" "")]
1790 rtx op0 = operands[0];
1791 rtx op1 = m32r_compare_op0;
1792 rtx op2 = m32r_compare_op1;
1793 enum machine_mode mode = GET_MODE (op0);
1798 if (! register_operand (op1, mode))
1799 op1 = force_reg (mode, op1);
1801 if (! reg_or_int16_operand (op2, mode))
1802 op2 = force_reg (mode, op2);
1804 emit_insn (gen_slt_insn (op0, op1, op2));
1808 (define_insn "slt_insn"
1809 [(set (match_operand:SI 0 "register_operand" "=r,r")
1810 (lt:SI (match_operand:SI 1 "register_operand" "r,r")
1811 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
1812 (clobber (reg:CC 17))]
1815 [(set_attr "type" "multi")
1816 (set_attr "length" "4,6")])
1819 [(set (match_operand:SI 0 "register_operand" "")
1820 (lt:SI (match_operand:SI 1 "register_operand" "")
1821 (match_operand:SI 2 "reg_or_int16_operand" "")))
1822 (clobber (reg:CC 17))]
1825 (lt:CC (match_dup 1)
1828 (ne:SI (reg:CC 17) (const_int 0)))]
1831 (define_expand "sle"
1832 [(match_operand:SI 0 "register_operand" "")]
1836 rtx op0 = operands[0];
1837 rtx op1 = m32r_compare_op0;
1838 rtx op2 = m32r_compare_op1;
1839 enum machine_mode mode = GET_MODE (op0);
1844 if (! register_operand (op1, mode))
1845 op1 = force_reg (mode, op1);
1847 if (GET_CODE (op2) == CONST_INT)
1849 HOST_WIDE_INT value = INTVAL (op2);
1850 if (value >= 2147483647)
1852 emit_move_insn (op0, const1_rtx);
1856 op2 = GEN_INT (value+1);
1857 if (value < -32768 || value >= 32767)
1858 op2 = force_reg (mode, op2);
1860 emit_insn (gen_slt_insn (op0, op1, op2));
1864 if (! register_operand (op2, mode))
1865 op2 = force_reg (mode, op2);
1867 emit_insn (gen_sle_insn (op0, op1, op2));
1871 (define_insn "sle_insn"
1872 [(set (match_operand:SI 0 "register_operand" "=r")
1873 (le:SI (match_operand:SI 1 "register_operand" "r")
1874 (match_operand:SI 2 "register_operand" "r")))
1875 (clobber (reg:CC 17))]
1878 [(set_attr "type" "multi")
1879 (set_attr "length" "8")])
1882 [(set (match_operand:SI 0 "register_operand" "")
1883 (le:SI (match_operand:SI 1 "register_operand" "")
1884 (match_operand:SI 2 "register_operand" "")))
1885 (clobber (reg:CC 17))]
1888 (lt:CC (match_dup 2)
1891 (ne:SI (reg:CC 17) (const_int 0)))
1893 (xor:SI (match_dup 0)
1897 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
1898 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
1900 [(set (match_operand:SI 0 "register_operand" "")
1901 (le:SI (match_operand:SI 1 "register_operand" "")
1902 (match_operand:SI 2 "register_operand" "")))
1903 (clobber (reg:CC 17))]
1906 (lt:CC (match_dup 2)
1909 (ne:SI (reg:CC 17) (const_int 0)))
1911 (plus:SI (match_dup 0)
1914 (neg:SI (match_dup 0)))]
1917 (define_expand "sgt"
1918 [(match_operand:SI 0 "register_operand" "")]
1922 rtx op0 = operands[0];
1923 rtx op1 = m32r_compare_op0;
1924 rtx op2 = m32r_compare_op1;
1925 enum machine_mode mode = GET_MODE (op0);
1930 if (! register_operand (op1, mode))
1931 op1 = force_reg (mode, op1);
1933 if (! register_operand (op2, mode))
1934 op2 = force_reg (mode, op2);
1936 emit_insn (gen_slt_insn (op0, op2, op1));
1940 (define_expand "sge"
1941 [(match_operand:SI 0 "register_operand" "")]
1945 rtx op0 = operands[0];
1946 rtx op1 = m32r_compare_op0;
1947 rtx op2 = m32r_compare_op1;
1948 enum machine_mode mode = GET_MODE (op0);
1953 if (! register_operand (op1, mode))
1954 op1 = force_reg (mode, op1);
1956 if (! reg_or_int16_operand (op2, mode))
1957 op2 = force_reg (mode, op2);
1959 emit_insn (gen_sge_insn (op0, op1, op2));
1963 (define_insn "sge_insn"
1964 [(set (match_operand:SI 0 "register_operand" "=r,r")
1965 (ge:SI (match_operand:SI 1 "register_operand" "r,r")
1966 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
1967 (clobber (reg:CC 17))]
1970 [(set_attr "type" "multi")
1971 (set_attr "length" "8,10")])
1974 [(set (match_operand:SI 0 "register_operand" "")
1975 (ge:SI (match_operand:SI 1 "register_operand" "")
1976 (match_operand:SI 2 "reg_or_int16_operand" "")))
1977 (clobber (reg:CC 17))]
1980 (lt:CC (match_dup 1)
1983 (ne:SI (reg:CC 17) (const_int 0)))
1985 (xor:SI (match_dup 0)
1989 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
1990 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
1992 [(set (match_operand:SI 0 "register_operand" "")
1993 (ge:SI (match_operand:SI 1 "register_operand" "")
1994 (match_operand:SI 2 "reg_or_int16_operand" "")))
1995 (clobber (reg:CC 17))]
1998 (lt:CC (match_dup 1)
2001 (ne:SI (reg:CC 17) (const_int 0)))
2003 (plus:SI (match_dup 0)
2006 (neg:SI (match_dup 0)))]
2009 (define_expand "sltu"
2010 [(match_operand:SI 0 "register_operand" "")]
2014 rtx op0 = operands[0];
2015 rtx op1 = m32r_compare_op0;
2016 rtx op2 = m32r_compare_op1;
2017 enum machine_mode mode = GET_MODE (op0);
2022 if (! register_operand (op1, mode))
2023 op1 = force_reg (mode, op1);
2025 if (! reg_or_int16_operand (op2, mode))
2026 op2 = force_reg (mode, op2);
2028 emit_insn (gen_sltu_insn (op0, op1, op2));
2032 (define_insn "sltu_insn"
2033 [(set (match_operand:SI 0 "register_operand" "=r,r")
2034 (ltu:SI (match_operand:SI 1 "register_operand" "r,r")
2035 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
2036 (clobber (reg:CC 17))]
2039 [(set_attr "type" "multi")
2040 (set_attr "length" "6,8")])
2043 [(set (match_operand:SI 0 "register_operand" "")
2044 (ltu:SI (match_operand:SI 1 "register_operand" "")
2045 (match_operand:SI 2 "reg_or_int16_operand" "")))
2046 (clobber (reg:CC 17))]
2049 (ltu:CC (match_dup 1)
2052 (ne:SI (reg:CC 17) (const_int 0)))]
2055 (define_expand "sleu"
2056 [(match_operand:SI 0 "register_operand" "")]
2060 rtx op0 = operands[0];
2061 rtx op1 = m32r_compare_op0;
2062 rtx op2 = m32r_compare_op1;
2063 enum machine_mode mode = GET_MODE (op0);
2068 if (GET_CODE (op2) == CONST_INT)
2070 HOST_WIDE_INT value = INTVAL (op2);
2071 if (value >= 2147483647)
2073 emit_move_insn (op0, const1_rtx);
2077 op2 = GEN_INT (value+1);
2078 if (value < 0 || value >= 32767)
2079 op2 = force_reg (mode, op2);
2081 emit_insn (gen_sltu_insn (op0, op1, op2));
2085 if (! register_operand (op2, mode))
2086 op2 = force_reg (mode, op2);
2088 emit_insn (gen_sleu_insn (op0, op1, op2));
2092 (define_insn "sleu_insn"
2093 [(set (match_operand:SI 0 "register_operand" "=r")
2094 (leu:SI (match_operand:SI 1 "register_operand" "r")
2095 (match_operand:SI 2 "register_operand" "r")))
2096 (clobber (reg:CC 17))]
2099 [(set_attr "type" "multi")
2100 (set_attr "length" "8")])
2103 [(set (match_operand:SI 0 "register_operand" "")
2104 (leu:SI (match_operand:SI 1 "register_operand" "")
2105 (match_operand:SI 2 "register_operand" "")))
2106 (clobber (reg:CC 17))]
2109 (ltu:CC (match_dup 2)
2112 (ne:SI (reg:CC 17) (const_int 0)))
2114 (xor:SI (match_dup 0)
2118 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
2119 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
2121 [(set (match_operand:SI 0 "register_operand" "")
2122 (leu:SI (match_operand:SI 1 "register_operand" "")
2123 (match_operand:SI 2 "register_operand" "")))
2124 (clobber (reg:CC 17))]
2127 (ltu:CC (match_dup 2)
2130 (ne:SI (reg:CC 17) (const_int 0)))
2132 (plus:SI (match_dup 0)
2135 (neg:SI (match_dup 0)))]
2138 (define_expand "sgtu"
2139 [(match_operand:SI 0 "register_operand" "")]
2143 rtx op0 = operands[0];
2144 rtx op1 = m32r_compare_op0;
2145 rtx op2 = m32r_compare_op1;
2146 enum machine_mode mode = GET_MODE (op0);
2151 if (! register_operand (op1, mode))
2152 op1 = force_reg (mode, op1);
2154 if (! register_operand (op2, mode))
2155 op2 = force_reg (mode, op2);
2157 emit_insn (gen_sltu_insn (op0, op2, op1));
2161 (define_expand "sgeu"
2162 [(match_operand:SI 0 "register_operand" "")]
2166 rtx op0 = operands[0];
2167 rtx op1 = m32r_compare_op0;
2168 rtx op2 = m32r_compare_op1;
2169 enum machine_mode mode = GET_MODE (op0);
2174 if (! register_operand (op1, mode))
2175 op1 = force_reg (mode, op1);
2177 if (! reg_or_int16_operand (op2, mode))
2178 op2 = force_reg (mode, op2);
2180 emit_insn (gen_sgeu_insn (op0, op1, op2));
2184 (define_insn "sgeu_insn"
2185 [(set (match_operand:SI 0 "register_operand" "=r,r")
2186 (geu:SI (match_operand:SI 1 "register_operand" "r,r")
2187 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
2188 (clobber (reg:CC 17))]
2191 [(set_attr "type" "multi")
2192 (set_attr "length" "8,10")])
2195 [(set (match_operand:SI 0 "register_operand" "")
2196 (geu:SI (match_operand:SI 1 "register_operand" "")
2197 (match_operand:SI 2 "reg_or_int16_operand" "")))
2198 (clobber (reg:CC 17))]
2201 (ltu:CC (match_dup 1)
2204 (ne:SI (reg:CC 17) (const_int 0)))
2206 (xor:SI (match_dup 0)
2210 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
2211 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
2213 [(set (match_operand:SI 0 "register_operand" "")
2214 (geu:SI (match_operand:SI 1 "register_operand" "")
2215 (match_operand:SI 2 "reg_or_int16_operand" "")))
2216 (clobber (reg:CC 17))]
2219 (ltu:CC (match_dup 1)
2222 (ne:SI (reg:CC 17) (const_int 0)))
2224 (plus:SI (match_dup 0)
2227 (neg:SI (match_dup 0)))]
2230 (define_insn "movcc_insn"
2231 [(set (match_operand:SI 0 "register_operand" "=r")
2232 (ne:SI (reg:CC 17) (const_int 0)))]
2235 [(set_attr "type" "misc")
2236 (set_attr "length" "2")])
2239 ;; Unconditional and other jump instructions.
2242 [(set (pc) (label_ref (match_operand 0 "" "")))]
2245 [(set_attr "type" "uncond_branch")
2246 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
2252 (define_insn "indirect_jump"
2253 [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
2256 [(set_attr "type" "uncond_branch")
2257 (set_attr "length" "2")])
2259 (define_insn "return"
2263 [(set_attr "type" "uncond_branch")
2264 (set_attr "length" "2")])
2266 (define_expand "tablejump"
2267 [(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
2268 (use (label_ref (match_operand 1 "" "")))])]
2272 /* In pic mode, our address differences are against the base of the
2273 table. Add that base value back in; CSE ought to be able to combine
2274 the two address loads. */
2279 tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
2281 tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
2282 operands[0] = memory_address (Pmode, tmp);
2286 (define_insn "*tablejump_insn"
2287 [(set (pc) (match_operand:SI 0 "address_operand" "p"))
2288 (use (label_ref (match_operand 1 "" "")))]
2291 [(set_attr "type" "uncond_branch")
2292 (set_attr "length" "2")])
2294 (define_expand "call"
2295 ;; operands[1] is stack_size_rtx
2296 ;; operands[2] is next_arg_register
2297 [(parallel [(call (match_operand:SI 0 "call_operand" "")
2298 (match_operand 1 "" ""))
2299 (clobber (reg:SI 14))])]
2304 current_function_uses_pic_offset_table = 1;
2307 (define_insn "*call_via_reg"
2308 [(call (mem:SI (match_operand:SI 0 "register_operand" "r"))
2309 (match_operand 1 "" ""))
2310 (clobber (reg:SI 14))]
2313 [(set_attr "type" "call")
2314 (set_attr "length" "2")])
2316 (define_insn "*call_via_label"
2317 [(call (mem:SI (match_operand:SI 0 "call_address_operand" ""))
2318 (match_operand 1 "" ""))
2319 (clobber (reg:SI 14))]
2323 int call26_p = call26_operand (operands[0], FUNCTION_MODE);
2327 /* We may not be able to reach with a `bl' insn so punt and leave it to
2329 We do this here, rather than doing a force_reg in the define_expand
2330 so these insns won't be separated, say by scheduling, thus simplifying
2332 return \"seth r14,%T0\;add3 r14,r14,%B0\;jl r14\";
2337 [(set_attr "type" "call")
2338 (set (attr "length")
2339 (if_then_else (eq (symbol_ref "call26_operand (operands[0], FUNCTION_MODE)")
2341 (const_int 12) ; 10 + 2 for nop filler
2342 ; The return address must be on a 4 byte boundary so
2343 ; there's no point in using a value of 2 here. A 2 byte
2344 ; insn may go in the left slot but we currently can't
2345 ; use such knowledge.
2348 (define_expand "call_value"
2349 ;; operand 2 is stack_size_rtx
2350 ;; operand 3 is next_arg_register
2351 [(parallel [(set (match_operand 0 "register_operand" "=r")
2352 (call (match_operand:SI 1 "call_operand" "")
2353 (match_operand 2 "" "")))
2354 (clobber (reg:SI 14))])]
2359 current_function_uses_pic_offset_table = 1;
2362 (define_insn "*call_value_via_reg"
2363 [(set (match_operand 0 "register_operand" "=r")
2364 (call (mem:SI (match_operand:SI 1 "register_operand" "r"))
2365 (match_operand 2 "" "")))
2366 (clobber (reg:SI 14))]
2369 [(set_attr "type" "call")
2370 (set_attr "length" "2")])
2372 (define_insn "*call_value_via_label"
2373 [(set (match_operand 0 "register_operand" "=r")
2374 (call (mem:SI (match_operand:SI 1 "call_address_operand" ""))
2375 (match_operand 2 "" "")))
2376 (clobber (reg:SI 14))]
2380 int call26_p = call26_operand (operands[1], FUNCTION_MODE);
2383 current_function_uses_pic_offset_table = 1;
2387 /* We may not be able to reach with a `bl' insn so punt and leave it to
2389 We do this here, rather than doing a force_reg in the define_expand
2390 so these insns won't be separated, say by scheduling, thus simplifying
2392 return \"seth r14,%T1\;add3 r14,r14,%B1\;jl r14\";
2397 [(set_attr "type" "call")
2398 (set (attr "length")
2399 (if_then_else (eq (symbol_ref "call26_operand (operands[1], FUNCTION_MODE)")
2401 (const_int 12) ; 10 + 2 for nop filler
2402 ; The return address must be on a 4 byte boundary so
2403 ; there's no point in using a value of 2 here. A 2 byte
2404 ; insn may go in the left slot but we currently can't
2405 ; use such knowledge.
2412 [(set_attr "type" "int2")
2413 (set_attr "length" "2")])
2415 ;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
2416 ;; all of memory. This blocks insns from being moved across this point.
2418 (define_insn "blockage"
2419 [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
2423 ;; Special pattern to flush the icache.
2425 (define_insn "flush_icache"
2426 [(unspec_volatile [(match_operand 0 "memory_operand" "m")]
2427 UNSPECV_FLUSH_ICACHE)
2428 (match_operand 1 "" "")
2429 (clobber (reg:SI 17))]
2431 "* return \"trap %#%1 ; flush-icache\";"
2432 [(set_attr "type" "int4")
2433 (set_attr "length" "4")])
2435 ;; Speed up fabs and provide correct sign handling for -0
2437 (define_insn "absdf2"
2438 [(set (match_operand:DF 0 "register_operand" "=r")
2439 (abs:DF (match_operand:DF 1 "register_operand" "0")))]
2442 [(set_attr "type" "multi")
2443 (set_attr "length" "4")])
2446 [(set (match_operand:DF 0 "register_operand" "")
2447 (abs:DF (match_operand:DF 1 "register_operand" "")))]
2450 (ashift:SI (match_dup 2)
2453 (lshiftrt:SI (match_dup 2)
2455 "operands[2] = gen_highpart (SImode, operands[0]);")
2457 (define_insn "abssf2"
2458 [(set (match_operand:SF 0 "register_operand" "=r")
2459 (abs:SF (match_operand:SF 1 "register_operand" "0")))]
2462 [(set_attr "type" "multi")
2463 (set_attr "length" "4")])
2466 [(set (match_operand:SF 0 "register_operand" "")
2467 (abs:SF (match_operand:SF 1 "register_operand" "")))]
2470 (ashift:SI (match_dup 2)
2473 (lshiftrt:SI (match_dup 2)
2475 "operands[2] = gen_highpart (SImode, operands[0]);")
2477 ;; Conditional move instructions
2478 ;; Based on those done for the d10v
2480 (define_expand "movsicc"
2482 (set (match_operand:SI 0 "register_operand" "r")
2483 (if_then_else:SI (match_operand 1 "" "")
2484 (match_operand:SI 2 "conditional_move_operand" "O")
2485 (match_operand:SI 3 "conditional_move_operand" "O")
2492 if (! zero_and_one (operands [2], operands [3]))
2495 /* Generate the comparison that will set the carry flag. */
2496 operands[1] = gen_compare (GET_CODE (operands[1]), m32r_compare_op0,
2497 m32r_compare_op1, TRUE);
2499 /* See other movsicc pattern below for reason why. */
2500 emit_insn (gen_blockage ());
2503 ;; Generate the conditional instructions based on how the carry flag is examined.
2504 (define_insn "*movsicc_internal"
2505 [(set (match_operand:SI 0 "register_operand" "=r")
2506 (if_then_else:SI (match_operand 1 "carry_compare_operand" "")
2507 (match_operand:SI 2 "conditional_move_operand" "O")
2508 (match_operand:SI 3 "conditional_move_operand" "O")
2511 "zero_and_one (operands [2], operands[3])"
2512 "* return emit_cond_move (operands, insn);"
2513 [(set_attr "type" "multi")
2514 (set_attr "length" "8")
2519 ;; Block moves, see m32r.c for more details.
2520 ;; Argument 0 is the destination
2521 ;; Argument 1 is the source
2522 ;; Argument 2 is the length
2523 ;; Argument 3 is the alignment
2525 (define_expand "movmemsi"
2526 [(parallel [(set (match_operand:BLK 0 "general_operand" "")
2527 (match_operand:BLK 1 "general_operand" ""))
2528 (use (match_operand:SI 2 "immediate_operand" ""))
2529 (use (match_operand:SI 3 "immediate_operand" ""))])]
2533 if (operands[0]) /* avoid unused code messages */
2535 m32r_expand_block_move (operands);
2540 ;; Insn generated by block moves
2542 (define_insn "movmemsi_internal"
2543 [(set (mem:BLK (match_operand:SI 0 "register_operand" "r")) ;; destination
2544 (mem:BLK (match_operand:SI 1 "register_operand" "r"))) ;; source
2545 (use (match_operand:SI 2 "m32r_block_immediate_operand" "J"));; # bytes to move
2546 (set (match_operand:SI 3 "register_operand" "=0")
2547 (plus:SI (match_dup 0)
2548 (minus (match_dup 2) (const_int 4))))
2549 (set (match_operand:SI 4 "register_operand" "=1")
2550 (plus:SI (match_dup 1)
2552 (clobber (match_scratch:SI 5 "=&r")) ;; temp1
2553 (clobber (match_scratch:SI 6 "=&r"))] ;; temp2
2555 "* m32r_output_block_move (insn, operands); return \"\"; "
2556 [(set_attr "type" "store8")
2557 (set_attr "length" "72")]) ;; Maximum
2561 /* When generating pic, we need to load the symbol offset into a register.
2562 So that the optimizer does not confuse this with a normal symbol load
2563 we use an unspec. The offset will be loaded from a constant pool entry,
2564 since that is the only type of relocation we can use. */
2566 (define_insn "pic_load_addr"
2567 [(set (match_operand:SI 0 "register_operand" "=r")
2568 (unspec:SI [(match_operand 1 "" "")] UNSPEC_PIC_LOAD_ADDR))]
2571 [(set_attr "type" "int4")])
2573 ;; Load program counter insns.
2575 (define_insn "get_pc"
2576 [(clobber (reg:SI 14))
2577 (set (match_operand 0 "register_operand" "=r")
2578 (unspec [(match_operand 1 "" "")] UNSPEC_GET_PC))
2579 (use (match_operand:SI 2 "immediate_operand" ""))]
2583 if (INTVAL(operands[2]))
2584 return \"bl.s .+4\;ld24 %0,%#%1\;add %0,lr\";
2586 return \"bl.s .+4\;seth %0,%#shigh(%1)\;add3 %0,%0,%#low(%1+4)\;add %0,lr\";}"
2587 [(set (attr "length") (if_then_else (ne (match_dup 2) (const_int 0))
2591 (define_expand "builtin_setjmp_receiver"
2592 [(label_ref (match_operand 0 "" ""))]
2596 m32r_load_pic_register ();