]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/arm/cortex-a5.md
[AArch64, AArch32][Insn classification refactoring 6/N] Remove "neon_type" attribute
[thirdparty/gcc.git] / gcc / config / arm / cortex-a5.md
1 ;; ARM Cortex-A5 pipeline description
2 ;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
3 ;; Contributed by CodeSourcery.
4 ;;
5 ;; This file is part of GCC.
6 ;;
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published by
9 ;; the Free Software Foundation; either version 3, or (at your option)
10 ;; any later version.
11 ;;
12 ;; GCC is distributed in the hope that it will be useful, but
13 ;; WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;; General Public License for more details.
16 ;;
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
20
21 (define_automaton "cortex_a5")
22
23 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
24 ;; Functional units.
25 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
26
27 ;; The integer (ALU) pipeline. There are five DPU pipeline
28 ;; stages. However the decode/issue stages operate the same for all
29 ;; instructions, so do not model them. We only need to model the
30 ;; first execute stage because instructions always advance one stage
31 ;; per cycle in order. Only branch instructions may dual-issue, so a
32 ;; single unit covers all of the LS, ALU, MAC and FPU pipelines.
33
34 (define_cpu_unit "cortex_a5_ex1" "cortex_a5")
35
36 ;; The branch pipeline. Branches can dual-issue with other instructions
37 ;; (except when those instructions take multiple cycles to issue).
38
39 (define_cpu_unit "cortex_a5_branch" "cortex_a5")
40
41 ;; Pseudo-unit for blocking the multiply pipeline when a double-precision
42 ;; multiply is in progress.
43
44 (define_cpu_unit "cortex_a5_fpmul_pipe" "cortex_a5")
45
46 ;; The floating-point add pipeline (ex1/f1 stage), used to model the usage
47 ;; of the add pipeline by fmac instructions, etc.
48
49 (define_cpu_unit "cortex_a5_fpadd_pipe" "cortex_a5")
50
51 ;; Floating-point div/sqrt (long latency, out-of-order completion).
52
53 (define_cpu_unit "cortex_a5_fp_div_sqrt" "cortex_a5")
54
55 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
56 ;; ALU instructions.
57 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
58
59 (define_insn_reservation "cortex_a5_alu" 2
60 (and (eq_attr "tune" "cortexa5")
61 (eq_attr "type" "arlo_imm,arlo_reg,shift,shift_reg,\
62 mov_imm,mov_reg,mvn_imm,mvn_reg"))
63 "cortex_a5_ex1")
64
65 (define_insn_reservation "cortex_a5_alu_shift" 2
66 (and (eq_attr "tune" "cortexa5")
67 (eq_attr "type" "extend,arlo_shift,arlo_shift_reg,\
68 mov_shift,mov_shift_reg,\
69 mvn_shift,mvn_shift_reg"))
70 "cortex_a5_ex1")
71
72 ;; Forwarding path for unshifted operands.
73
74 (define_bypass 1 "cortex_a5_alu,cortex_a5_alu_shift"
75 "cortex_a5_alu")
76
77 (define_bypass 1 "cortex_a5_alu,cortex_a5_alu_shift"
78 "cortex_a5_alu_shift"
79 "arm_no_early_alu_shift_dep")
80
81 ;; The multiplier pipeline can forward results from wr stage only so
82 ;; there's no need to specify bypasses).
83
84 (define_insn_reservation "cortex_a5_mul" 2
85 (and (eq_attr "tune" "cortexa5")
86 (ior (eq_attr "mul32" "yes")
87 (eq_attr "mul64" "yes")))
88 "cortex_a5_ex1")
89
90 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
91 ;; Load/store instructions.
92 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
93
94 ;; Address-generation happens in the issue stage, which is one stage behind
95 ;; the ex1 stage (the first stage we care about for scheduling purposes). The
96 ;; dc1 stage is parallel with ex1, dc2 with ex2 and rot with wr.
97
98 (define_insn_reservation "cortex_a5_load1" 2
99 (and (eq_attr "tune" "cortexa5")
100 (eq_attr "type" "load_byte,load1"))
101 "cortex_a5_ex1")
102
103 (define_insn_reservation "cortex_a5_store1" 0
104 (and (eq_attr "tune" "cortexa5")
105 (eq_attr "type" "store1"))
106 "cortex_a5_ex1")
107
108 (define_insn_reservation "cortex_a5_load2" 3
109 (and (eq_attr "tune" "cortexa5")
110 (eq_attr "type" "load2"))
111 "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
112
113 (define_insn_reservation "cortex_a5_store2" 0
114 (and (eq_attr "tune" "cortexa5")
115 (eq_attr "type" "store2"))
116 "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
117
118 (define_insn_reservation "cortex_a5_load3" 4
119 (and (eq_attr "tune" "cortexa5")
120 (eq_attr "type" "load3"))
121 "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1+cortex_a5_branch,\
122 cortex_a5_ex1")
123
124 (define_insn_reservation "cortex_a5_store3" 0
125 (and (eq_attr "tune" "cortexa5")
126 (eq_attr "type" "store3"))
127 "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1+cortex_a5_branch,\
128 cortex_a5_ex1")
129
130 (define_insn_reservation "cortex_a5_load4" 5
131 (and (eq_attr "tune" "cortexa5")
132 (eq_attr "type" "load3"))
133 "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1+cortex_a5_branch,\
134 cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
135
136 (define_insn_reservation "cortex_a5_store4" 0
137 (and (eq_attr "tune" "cortexa5")
138 (eq_attr "type" "store3"))
139 "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1+cortex_a5_branch,\
140 cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
141
142 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
143 ;; Branches.
144 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
145
146 ;; Direct branches are the only instructions we can dual-issue (also IT and
147 ;; nop, but those aren't very interesting for scheduling). (The latency here
148 ;; is meant to represent when the branch actually takes place, but may not be
149 ;; entirely correct.)
150
151 (define_insn_reservation "cortex_a5_branch" 3
152 (and (eq_attr "tune" "cortexa5")
153 (eq_attr "type" "branch,call"))
154 "cortex_a5_branch")
155
156 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
157 ;; Floating-point arithmetic.
158 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
159
160 (define_insn_reservation "cortex_a5_fpalu" 4
161 (and (eq_attr "tune" "cortexa5")
162 (eq_attr "type" "ffariths, fadds, ffarithd, faddd, fcpys, fmuls, f_cvt,\
163 fcmps, fcmpd"))
164 "cortex_a5_ex1+cortex_a5_fpadd_pipe")
165
166 ;; For fconsts and fconstd, 8-bit immediate data is passed directly from
167 ;; f1 to f3 (which I think reduces the latency by one cycle).
168
169 (define_insn_reservation "cortex_a5_fconst" 3
170 (and (eq_attr "tune" "cortexa5")
171 (eq_attr "type" "fconsts,fconstd"))
172 "cortex_a5_ex1+cortex_a5_fpadd_pipe")
173
174 ;; We should try not to attempt to issue a single-precision multiplication in
175 ;; the middle of a double-precision multiplication operation (the usage of
176 ;; cortex_a5_fpmul_pipe).
177
178 (define_insn_reservation "cortex_a5_fpmuls" 4
179 (and (eq_attr "tune" "cortexa5")
180 (eq_attr "type" "fmuls"))
181 "cortex_a5_ex1+cortex_a5_fpmul_pipe")
182
183 ;; For single-precision multiply-accumulate, the add (accumulate) is issued
184 ;; whilst the multiply is in F4. The multiply result can then be forwarded
185 ;; from F5 to F1. The issue unit is only used once (when we first start
186 ;; processing the instruction), but the usage of the FP add pipeline could
187 ;; block other instructions attempting to use it simultaneously. We try to
188 ;; avoid that using cortex_a5_fpadd_pipe.
189
190 (define_insn_reservation "cortex_a5_fpmacs" 8
191 (and (eq_attr "tune" "cortexa5")
192 (eq_attr "type" "fmacs,ffmas"))
193 "cortex_a5_ex1+cortex_a5_fpmul_pipe, nothing*3, cortex_a5_fpadd_pipe")
194
195 ;; Non-multiply instructions can issue in the middle two instructions of a
196 ;; double-precision multiply. Note that it isn't entirely clear when a branch
197 ;; can dual-issue when a multi-cycle multiplication is in progress; we ignore
198 ;; that for now though.
199
200 (define_insn_reservation "cortex_a5_fpmuld" 7
201 (and (eq_attr "tune" "cortexa5")
202 (eq_attr "type" "fmuld"))
203 "cortex_a5_ex1+cortex_a5_fpmul_pipe, cortex_a5_fpmul_pipe*2,\
204 cortex_a5_ex1+cortex_a5_fpmul_pipe")
205
206 (define_insn_reservation "cortex_a5_fpmacd" 11
207 (and (eq_attr "tune" "cortexa5")
208 (eq_attr "type" "fmacd,ffmad"))
209 "cortex_a5_ex1+cortex_a5_fpmul_pipe, cortex_a5_fpmul_pipe*2,\
210 cortex_a5_ex1+cortex_a5_fpmul_pipe, nothing*3, cortex_a5_fpadd_pipe")
211
212 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
213 ;; Floating-point divide/square root instructions.
214 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
215
216 ;; ??? Not sure if the 14 cycles taken for single-precision divide to complete
217 ;; includes the time taken for the special instruction used to collect the
218 ;; result to travel down the multiply pipeline, or not. Assuming so. (If
219 ;; that's wrong, the latency should be increased by a few cycles.)
220
221 ;; fsqrt takes one cycle less, but that is not modelled, nor is the use of the
222 ;; multiply pipeline to collect the divide/square-root result.
223
224 (define_insn_reservation "cortex_a5_fdivs" 14
225 (and (eq_attr "tune" "cortexa5")
226 (eq_attr "type" "fdivs"))
227 "cortex_a5_ex1, cortex_a5_fp_div_sqrt * 13")
228
229 ;; ??? Similarly for fdivd.
230
231 (define_insn_reservation "cortex_a5_fdivd" 29
232 (and (eq_attr "tune" "cortexa5")
233 (eq_attr "type" "fdivd"))
234 "cortex_a5_ex1, cortex_a5_fp_div_sqrt * 28")
235
236 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
237 ;; VFP to/from core transfers.
238 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
239
240 ;; FP loads take data from wr/rot/f3.
241
242 ;; Core-to-VFP transfers use the multiply pipeline.
243
244 (define_insn_reservation "cortex_a5_r2f" 4
245 (and (eq_attr "tune" "cortexa5")
246 (eq_attr "type" "f_mcr,f_mcrr"))
247 "cortex_a5_ex1")
248
249 (define_insn_reservation "cortex_a5_f2r" 2
250 (and (eq_attr "tune" "cortexa5")
251 (eq_attr "type" "f_mrc,f_mrrc"))
252 "cortex_a5_ex1")
253
254 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
255 ;; VFP flag transfer.
256 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
257
258 ;; ??? The flag forwarding from fmstat to the ex2 stage of the second
259 ;; instruction is not modeled at present.
260
261 (define_insn_reservation "cortex_a5_f_flags" 4
262 (and (eq_attr "tune" "cortexa5")
263 (eq_attr "type" "f_flag"))
264 "cortex_a5_ex1")
265
266 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
267 ;; VFP load/store.
268 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
269
270 (define_insn_reservation "cortex_a5_f_loads" 4
271 (and (eq_attr "tune" "cortexa5")
272 (eq_attr "type" "f_loads"))
273 "cortex_a5_ex1")
274
275 (define_insn_reservation "cortex_a5_f_loadd" 5
276 (and (eq_attr "tune" "cortexa5")
277 (eq_attr "type" "f_loadd"))
278 "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
279
280 (define_insn_reservation "cortex_a5_f_stores" 0
281 (and (eq_attr "tune" "cortexa5")
282 (eq_attr "type" "f_stores"))
283 "cortex_a5_ex1")
284
285 (define_insn_reservation "cortex_a5_f_stored" 0
286 (and (eq_attr "tune" "cortexa5")
287 (eq_attr "type" "f_stored"))
288 "cortex_a5_ex1+cortex_a5_branch, cortex_a5_ex1")
289
290 ;; Load-to-use for floating-point values has a penalty of one cycle,
291 ;; i.e. a latency of two.
292
293 (define_bypass 2 "cortex_a5_f_loads"
294 "cortex_a5_fpalu, cortex_a5_fpmacs, cortex_a5_fpmuld,\
295 cortex_a5_fpmacd, cortex_a5_fdivs, cortex_a5_fdivd,\
296 cortex_a5_f2r")
297
298 (define_bypass 3 "cortex_a5_f_loadd"
299 "cortex_a5_fpalu, cortex_a5_fpmacs, cortex_a5_fpmuld,\
300 cortex_a5_fpmacd, cortex_a5_fdivs, cortex_a5_fdivd,\
301 cortex_a5_f2r")