]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/riscv/bitmanip.md
RISC-V: Handle "(a & twobits) == singlebit" in branches using Zbs
[thirdparty/gcc.git] / gcc / config / riscv / bitmanip.md
1 ;; Machine description for RISC-V Bit Manipulation operations.
2 ;; Copyright (C) 2021-2022 Free Software Foundation, Inc.
3
4 ;; This file is part of GCC.
5
6 ;; GCC is free software; you can redistribute it and/or modify
7 ;; it under the terms of the GNU General Public License as published by
8 ;; the Free Software Foundation; either version 3, or (at your option)
9 ;; any later version.
10
11 ;; GCC is distributed in the hope that it will be useful,
12 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ;; GNU General Public License for more details.
15
16 ;; You should have received a copy of the GNU General Public License
17 ;; along with GCC; see the file COPYING3. If not see
18 ;; <http://www.gnu.org/licenses/>.
19
20 ;; ZBA extension.
21
22 (define_insn "*zero_extendsidi2_bitmanip"
23 [(set (match_operand:DI 0 "register_operand" "=r,r")
24 (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
25 "TARGET_64BIT && TARGET_ZBA"
26 "@
27 zext.w\t%0,%1
28 lwu\t%0,%1"
29 [(set_attr "type" "bitmanip,load")
30 (set_attr "mode" "DI")])
31
32 (define_insn "*shNadd"
33 [(set (match_operand:X 0 "register_operand" "=r")
34 (plus:X (ashift:X (match_operand:X 1 "register_operand" "r")
35 (match_operand:QI 2 "imm123_operand" "Ds3"))
36 (match_operand:X 3 "register_operand" "r")))]
37 "TARGET_ZBA"
38 "sh%2add\t%0,%1,%3"
39 [(set_attr "type" "bitmanip")
40 (set_attr "mode" "<X:MODE>")])
41
42 ; When using strength-reduction, we will reduce a multiplication to a
43 ; sequence of shifts and adds. If this is performed with 32-bit types
44 ; and followed by a division, the lack of w-form sh[123]add will make
45 ; combination impossible and lead to a slli + addw being generated.
46 ; Split the sequence with the knowledge that a w-form div will perform
47 ; implicit sign-extensions.
48 (define_split
49 [(set (match_operand:DI 0 "register_operand")
50 (sign_extend:DI (div:SI (plus:SI (subreg:SI (ashift:DI (match_operand:DI 1 "register_operand")
51 (match_operand:QI 2 "imm123_operand")) 0)
52 (subreg:SI (match_operand:DI 3 "register_operand") 0))
53 (subreg:SI (match_operand:DI 4 "register_operand") 0))))
54 (clobber (match_operand:DI 5 "register_operand"))]
55 "TARGET_64BIT && TARGET_ZBA"
56 [(set (match_dup 5) (plus:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
57 (set (match_dup 0) (sign_extend:DI (div:SI (subreg:SI (match_dup 5) 0) (subreg:SI (match_dup 4) 0))))])
58
59 ; Zba does not provide W-forms of sh[123]add(.uw)?, which leads to an
60 ; interesting irregularity: we can generate a signed 32-bit result
61 ; using slli(.uw)?+ addw, but a unsigned 32-bit result can be more
62 ; efficiently be generated as sh[123]add+zext.w (the .uw can be
63 ; dropped, if we zero-extend the output anyway).
64 ;
65 ; To enable this optimization, we split [ slli(.uw)?, addw, zext.w ]
66 ; into [ sh[123]add, zext.w ] for use during combine.
67 (define_split
68 [(set (match_operand:DI 0 "register_operand")
69 (zero_extend:DI (plus:SI (ashift:SI (subreg:SI (match_operand:DI 1 "register_operand") 0)
70 (match_operand:QI 2 "imm123_operand"))
71 (subreg:SI (match_operand:DI 3 "register_operand") 0))))]
72 "TARGET_64BIT && TARGET_ZBA"
73 [(set (match_dup 0) (plus:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
74 (set (match_dup 0) (zero_extend:DI (subreg:SI (match_dup 0) 0)))])
75
76 (define_split
77 [(set (match_operand:DI 0 "register_operand")
78 (zero_extend:DI (plus:SI (subreg:SI (and:DI (ashift:DI (match_operand:DI 1 "register_operand")
79 (match_operand:QI 2 "imm123_operand"))
80 (match_operand:DI 3 "consecutive_bits_operand")) 0)
81 (subreg:SI (match_operand:DI 4 "register_operand") 0))))]
82 "TARGET_64BIT && TARGET_ZBA
83 && riscv_shamt_matches_mask_p (INTVAL (operands[2]), INTVAL (operands[3]))"
84 [(set (match_dup 0) (plus:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 4)))
85 (set (match_dup 0) (zero_extend:DI (subreg:SI (match_dup 0) 0)))])
86
87 ; Make sure that an andi followed by a sh[123]add remains a two instruction
88 ; sequence--and is not torn apart into slli, slri, add.
89 (define_insn_and_split "*andi_add.uw"
90 [(set (match_operand:DI 0 "register_operand" "=r")
91 (plus:DI (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
92 (match_operand:QI 2 "imm123_operand" "Ds3"))
93 (match_operand:DI 3 "consecutive_bits_operand" ""))
94 (match_operand:DI 4 "register_operand" "r")))
95 (clobber (match_scratch:DI 5 "=&r"))]
96 "TARGET_64BIT && TARGET_ZBA
97 && riscv_shamt_matches_mask_p (INTVAL (operands[2]), INTVAL (operands[3]))
98 && SMALL_OPERAND (INTVAL (operands[3]) >> INTVAL (operands[2]))"
99 "#"
100 "&& reload_completed"
101 [(set (match_dup 5) (and:DI (match_dup 1) (match_dup 3)))
102 (set (match_dup 0) (plus:DI (ashift:DI (match_dup 5) (match_dup 2))
103 (match_dup 4)))]
104 {
105 operands[3] = GEN_INT (INTVAL (operands[3]) >> INTVAL (operands[2]));
106 })
107
108 (define_insn "*shNadduw"
109 [(set (match_operand:DI 0 "register_operand" "=r")
110 (plus:DI
111 (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
112 (match_operand:QI 2 "imm123_operand" "Ds3"))
113 (match_operand 3 "immediate_operand" "n"))
114 (match_operand:DI 4 "register_operand" "r")))]
115 "TARGET_64BIT && TARGET_ZBA
116 && (INTVAL (operands[3]) >> INTVAL (operands[2])) == 0xffffffff"
117 "sh%2add.uw\t%0,%1,%4"
118 [(set_attr "type" "bitmanip")
119 (set_attr "mode" "DI")])
120
121 ;; During combine, we may encounter an attempt to combine
122 ;; slli rtmp, rs, #imm
123 ;; zext.w rtmp, rtmp
124 ;; sh[123]add rd, rtmp, rs2
125 ;; which will lead to the immediate not satisfying the above constraints.
126 ;; By splitting the compound expression, we can simplify to a slli and a
127 ;; sh[123]add.uw.
128 (define_split
129 [(set (match_operand:DI 0 "register_operand")
130 (plus:DI (and:DI (ashift:DI (match_operand:DI 1 "register_operand")
131 (match_operand:QI 2 "immediate_operand"))
132 (match_operand:DI 3 "consecutive_bits_operand"))
133 (match_operand:DI 4 "register_operand")))
134 (clobber (match_operand:DI 5 "register_operand"))]
135 "TARGET_64BIT && TARGET_ZBA"
136 [(set (match_dup 5) (ashift:DI (match_dup 1) (match_dup 6)))
137 (set (match_dup 0) (plus:DI (and:DI (ashift:DI (match_dup 5)
138 (match_dup 7))
139 (match_dup 8))
140 (match_dup 4)))]
141 {
142 unsigned HOST_WIDE_INT mask = UINTVAL (operands[3]);
143 /* scale: shift within the sh[123]add.uw */
144 unsigned HOST_WIDE_INT scale = 32 - clz_hwi (mask);
145 /* bias: pre-scale amount (i.e. the prior shift amount) */
146 int bias = ctz_hwi (mask) - scale;
147
148 /* If the bias + scale don't add up to operand[2], reject. */
149 if ((scale + bias) != UINTVAL (operands[2]))
150 FAIL;
151
152 /* If the shift-amount is out-of-range for sh[123]add.uw, reject. */
153 if ((scale < 1) || (scale > 3))
154 FAIL;
155
156 /* If there's no bias, the '*shNadduw' pattern should have matched. */
157 if (bias == 0)
158 FAIL;
159
160 operands[6] = GEN_INT (bias);
161 operands[7] = GEN_INT (scale);
162 operands[8] = GEN_INT (0xffffffffULL << scale);
163 })
164
165 (define_insn "*add.uw"
166 [(set (match_operand:DI 0 "register_operand" "=r")
167 (plus:DI (zero_extend:DI
168 (match_operand:SI 1 "register_operand" "r"))
169 (match_operand:DI 2 "register_operand" "r")))]
170 "TARGET_64BIT && TARGET_ZBA"
171 "add.uw\t%0,%1,%2"
172 [(set_attr "type" "bitmanip")
173 (set_attr "mode" "DI")])
174
175 (define_insn "*slliuw"
176 [(set (match_operand:DI 0 "register_operand" "=r")
177 (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
178 (match_operand:QI 2 "immediate_operand" "I"))
179 (match_operand 3 "immediate_operand" "n")))]
180 "TARGET_64BIT && TARGET_ZBA
181 && (INTVAL (operands[3]) >> INTVAL (operands[2])) == 0xffffffff"
182 "slli.uw\t%0,%1,%2"
183 [(set_attr "type" "bitmanip")
184 (set_attr "mode" "DI")])
185
186 ;; ZBB extension.
187
188 (define_insn "*<optab>_not<mode>"
189 [(set (match_operand:X 0 "register_operand" "=r")
190 (bitmanip_bitwise:X (not:X (match_operand:X 1 "register_operand" "r"))
191 (match_operand:X 2 "register_operand" "r")))]
192 "TARGET_ZBB"
193 "<insn>n\t%0,%2,%1"
194 [(set_attr "type" "bitmanip")
195 (set_attr "mode" "<X:MODE>")])
196
197 ;; '(a >= 0) ? b : 0' is emitted branchless (from if-conversion). Without a
198 ;; bit of extra help for combine (i.e., the below split), we end up emitting
199 ;; not/srai/and instead of combining the not into an andn.
200 (define_split
201 [(set (match_operand:DI 0 "register_operand")
202 (and:DI (neg:DI (ge:DI (match_operand:DI 1 "register_operand")
203 (const_int 0)))
204 (match_operand:DI 2 "register_operand")))
205 (clobber (match_operand:DI 3 "register_operand"))]
206 "TARGET_ZBB"
207 [(set (match_dup 3) (ashiftrt:DI (match_dup 1) (const_int 63)))
208 (set (match_dup 0) (and:DI (not:DI (match_dup 3)) (match_dup 2)))])
209
210 (define_insn "*xor_not<mode>"
211 [(set (match_operand:X 0 "register_operand" "=r")
212 (not:X (xor:X (match_operand:X 1 "register_operand" "r")
213 (match_operand:X 2 "register_operand" "r"))))]
214 "TARGET_ZBB"
215 "xnor\t%0,%1,%2"
216 [(set_attr "type" "bitmanip")
217 (set_attr "mode" "<X:MODE>")])
218
219 (define_insn "<bitmanip_optab>si2"
220 [(set (match_operand:SI 0 "register_operand" "=r")
221 (clz_ctz_pcnt:SI (match_operand:SI 1 "register_operand" "r")))]
222 "TARGET_ZBB"
223 "<bitmanip_insn>%~\t%0,%1"
224 [(set_attr "type" "bitmanip")
225 (set_attr "mode" "SI")])
226
227 (define_insn "*<bitmanip_optab>disi2"
228 [(set (match_operand:DI 0 "register_operand" "=r")
229 (sign_extend:DI
230 (clz_ctz_pcnt:SI (match_operand:SI 1 "register_operand" "r"))))]
231 "TARGET_64BIT && TARGET_ZBB"
232 "<bitmanip_insn>w\t%0,%1"
233 [(set_attr "type" "bitmanip")
234 (set_attr "mode" "SI")])
235
236 (define_insn "<bitmanip_optab>di2"
237 [(set (match_operand:DI 0 "register_operand" "=r")
238 (clz_ctz_pcnt:DI (match_operand:DI 1 "register_operand" "r")))]
239 "TARGET_64BIT && TARGET_ZBB"
240 "<bitmanip_insn>\t%0,%1"
241 [(set_attr "type" "bitmanip")
242 (set_attr "mode" "DI")])
243
244 (define_insn "*zero_extendhi<GPR:mode>2_bitmanip"
245 [(set (match_operand:GPR 0 "register_operand" "=r,r")
246 (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
247 "TARGET_ZBB"
248 "@
249 zext.h\t%0,%1
250 lhu\t%0,%1"
251 [(set_attr "type" "bitmanip,load")
252 (set_attr "mode" "<GPR:MODE>")])
253
254 (define_insn "*extend<SHORT:mode><SUPERQI:mode>2_zbb"
255 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
256 (sign_extend:SUPERQI
257 (match_operand:SHORT 1 "nonimmediate_operand" " r,m")))]
258 "TARGET_ZBB"
259 "@
260 sext.<SHORT:size>\t%0,%1
261 l<SHORT:size>\t%0,%1"
262 [(set_attr "type" "bitmanip,load")
263 (set_attr "mode" "<SUPERQI:MODE>")])
264
265 (define_insn "*zero_extendhi<GPR:mode>2_zbb"
266 [(set (match_operand:GPR 0 "register_operand" "=r,r")
267 (zero_extend:GPR
268 (match_operand:HI 1 "nonimmediate_operand" " r,m")))]
269 "TARGET_ZBB"
270 "@
271 zext.h\t%0,%1
272 lhu\t%0,%1"
273 [(set_attr "type" "bitmanip,load")
274 (set_attr "mode" "HI")])
275
276 (define_insn "rotrsi3"
277 [(set (match_operand:SI 0 "register_operand" "=r")
278 (rotatert:SI (match_operand:SI 1 "register_operand" "r")
279 (match_operand:QI 2 "arith_operand" "rI")))]
280 "TARGET_ZBB"
281 "ror%i2%~\t%0,%1,%2"
282 [(set_attr "type" "bitmanip")])
283
284 (define_insn "rotrdi3"
285 [(set (match_operand:DI 0 "register_operand" "=r")
286 (rotatert:DI (match_operand:DI 1 "register_operand" "r")
287 (match_operand:QI 2 "arith_operand" "rI")))]
288 "TARGET_64BIT && TARGET_ZBB"
289 "ror%i2\t%0,%1,%2"
290 [(set_attr "type" "bitmanip")])
291
292 (define_insn "rotrsi3_sext"
293 [(set (match_operand:DI 0 "register_operand" "=r")
294 (sign_extend:DI (rotatert:SI (match_operand:SI 1 "register_operand" "r")
295 (match_operand:QI 2 "register_operand" "r"))))]
296 "TARGET_64BIT && TARGET_ZBB"
297 "rorw\t%0,%1,%2"
298 [(set_attr "type" "bitmanip")])
299
300 (define_insn "rotlsi3"
301 [(set (match_operand:SI 0 "register_operand" "=r")
302 (rotate:SI (match_operand:SI 1 "register_operand" "r")
303 (match_operand:QI 2 "register_operand" "r")))]
304 "TARGET_ZBB"
305 "rol%~\t%0,%1,%2"
306 [(set_attr "type" "bitmanip")])
307
308 (define_insn "rotldi3"
309 [(set (match_operand:DI 0 "register_operand" "=r")
310 (rotate:DI (match_operand:DI 1 "register_operand" "r")
311 (match_operand:QI 2 "register_operand" "r")))]
312 "TARGET_64BIT && TARGET_ZBB"
313 "rol\t%0,%1,%2"
314 [(set_attr "type" "bitmanip")])
315
316 (define_insn "rotlsi3_sext"
317 [(set (match_operand:DI 0 "register_operand" "=r")
318 (sign_extend:DI (rotate:SI (match_operand:SI 1 "register_operand" "r")
319 (match_operand:QI 2 "register_operand" "r"))))]
320 "TARGET_64BIT && TARGET_ZBB"
321 "rolw\t%0,%1,%2"
322 [(set_attr "type" "bitmanip")])
323
324 ;; orc.b (or-combine) is added as an unspec for the benefit of the support
325 ;; for optimized string functions (such as strcmp).
326 (define_insn "orcb<mode>2"
327 [(set (match_operand:X 0 "register_operand" "=r")
328 (unspec:X [(match_operand:X 1 "register_operand" "r")] UNSPEC_ORC_B))]
329 "TARGET_ZBB"
330 "orc.b\t%0,%1")
331
332 (define_insn "bswap<mode>2"
333 [(set (match_operand:X 0 "register_operand" "=r")
334 (bswap:X (match_operand:X 1 "register_operand" "r")))]
335 "TARGET_ZBB"
336 "rev8\t%0,%1"
337 [(set_attr "type" "bitmanip")])
338
339 ;; HI bswap can be emulated using SI/DI bswap followed
340 ;; by a logical shift right
341 ;; SI bswap for TARGET_64BIT is already similarly in
342 ;; the common code.
343 (define_expand "bswaphi2"
344 [(set (match_operand:HI 0 "register_operand" "=r")
345 (bswap:HI (match_operand:HI 1 "register_operand" "r")))]
346 "TARGET_ZBB"
347 {
348 rtx tmp = gen_reg_rtx (word_mode);
349 rtx newop1 = gen_lowpart (word_mode, operands[1]);
350 if (TARGET_64BIT)
351 emit_insn (gen_bswapdi2 (tmp, newop1));
352 else
353 emit_insn (gen_bswapsi2 (tmp, newop1));
354 rtx tmp1 = gen_reg_rtx (word_mode);
355 if (TARGET_64BIT)
356 emit_insn (gen_lshrdi3 (tmp1, tmp, GEN_INT (64 - 16)));
357 else
358 emit_insn (gen_lshrsi3 (tmp1, tmp, GEN_INT (32 - 16)));
359 emit_move_insn (operands[0], gen_lowpart (HImode, tmp1));
360 DONE;
361 })
362
363 (define_insn "<bitmanip_optab><mode>3"
364 [(set (match_operand:X 0 "register_operand" "=r")
365 (bitmanip_minmax:X (match_operand:X 1 "register_operand" "r")
366 (match_operand:X 2 "register_operand" "r")))]
367 "TARGET_ZBB"
368 "<bitmanip_insn>\t%0,%1,%2"
369 [(set_attr "type" "bitmanip")])
370
371 ;; ZBS extension.
372
373 (define_insn "*bset<mode>"
374 [(set (match_operand:X 0 "register_operand" "=r")
375 (ior:X (ashift:X (const_int 1)
376 (match_operand:QI 2 "register_operand" "r"))
377 (match_operand:X 1 "register_operand" "r")))]
378 "TARGET_ZBS"
379 "bset\t%0,%1,%2"
380 [(set_attr "type" "bitmanip")])
381
382 (define_insn "*bset<mode>_mask"
383 [(set (match_operand:X 0 "register_operand" "=r")
384 (ior:X (ashift:X (const_int 1)
385 (subreg:QI
386 (and:X (match_operand:X 2 "register_operand" "r")
387 (match_operand 3 "<X:shiftm1>" "<X:shiftm1p>")) 0))
388 (match_operand:X 1 "register_operand" "r")))]
389 "TARGET_ZBS"
390 "bset\t%0,%1,%2"
391 [(set_attr "type" "bitmanip")])
392
393 (define_insn "*bset<mode>_1"
394 [(set (match_operand:X 0 "register_operand" "=r")
395 (ashift:X (const_int 1)
396 (match_operand:QI 1 "register_operand" "r")))]
397 "TARGET_ZBS"
398 "bset\t%0,x0,%1"
399 [(set_attr "type" "bitmanip")])
400
401 (define_insn "*bset<mode>_1_mask"
402 [(set (match_operand:X 0 "register_operand" "=r")
403 (ashift:X (const_int 1)
404 (subreg:QI
405 (and:X (match_operand:X 1 "register_operand" "r")
406 (match_operand 2 "<X:shiftm1>" "<X:shiftm1p>")) 0)))]
407 "TARGET_ZBS"
408 "bset\t%0,x0,%1"
409 [(set_attr "type" "bitmanip")])
410
411 (define_insn "*bseti<mode>"
412 [(set (match_operand:X 0 "register_operand" "=r")
413 (ior:X (match_operand:X 1 "register_operand" "r")
414 (match_operand:X 2 "single_bit_mask_operand" "DbS")))]
415 "TARGET_ZBS"
416 "bseti\t%0,%1,%S2"
417 [(set_attr "type" "bitmanip")])
418
419 ;; As long as the SImode operand is not a partial subreg, we can use a
420 ;; bseti without postprocessing, as the middle end is smart enough to
421 ;; stay away from the signbit.
422 (define_insn "*bsetidisi"
423 [(set (match_operand:DI 0 "register_operand" "=r")
424 (ior:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
425 (match_operand 2 "single_bit_mask_operand" "i")))]
426 "TARGET_ZBS && TARGET_64BIT
427 && !partial_subreg_p (operands[2])"
428 "bseti\t%0,%1,%S2"
429 [(set_attr "type" "bitmanip")])
430
431 (define_insn "*bclr<mode>"
432 [(set (match_operand:X 0 "register_operand" "=r")
433 (and:X (rotate:X (const_int -2)
434 (match_operand:QI 2 "register_operand" "r"))
435 (match_operand:X 1 "register_operand" "r")))]
436 "TARGET_ZBS"
437 "bclr\t%0,%1,%2"
438 [(set_attr "type" "bitmanip")])
439
440 (define_insn "*bclri<mode>"
441 [(set (match_operand:X 0 "register_operand" "=r")
442 (and:X (match_operand:X 1 "register_operand" "r")
443 (match_operand:X 2 "not_single_bit_mask_operand" "DnS")))]
444 "TARGET_ZBS"
445 "bclri\t%0,%1,%T2"
446 [(set_attr "type" "bitmanip")])
447
448 ;; In case we have "val & ~IMM" where ~IMM has 2 bits set.
449 (define_insn_and_split "*bclri<mode>_nottwobits"
450 [(set (match_operand:X 0 "register_operand" "=r")
451 (and:X (match_operand:X 1 "register_operand" "r")
452 (match_operand:X 2 "const_nottwobits_operand" "i")))]
453 "TARGET_ZBS && !paradoxical_subreg_p (operands[1])"
454 "#"
455 "&& reload_completed"
456 [(set (match_dup 0) (and:X (match_dup 1) (match_dup 3)))
457 (set (match_dup 0) (and:X (match_dup 0) (match_dup 4)))]
458 {
459 unsigned HOST_WIDE_INT bits = ~UINTVAL (operands[2]);
460 unsigned HOST_WIDE_INT topbit = HOST_WIDE_INT_1U << floor_log2 (bits);
461
462 operands[3] = GEN_INT (~bits | topbit);
463 operands[4] = GEN_INT (~topbit);
464 })
465
466 ;; In case of a paradoxical subreg, the sign bit and the high bits are
467 ;; not allowed to be changed
468 (define_insn_and_split "*bclridisi_nottwobits"
469 [(set (match_operand:DI 0 "register_operand" "=r")
470 (and:DI (match_operand:DI 1 "register_operand" "r")
471 (match_operand:DI 2 "const_nottwobits_operand" "i")))]
472 "TARGET_64BIT && TARGET_ZBS
473 && clz_hwi (~UINTVAL (operands[2])) > 33"
474 "#"
475 "&& reload_completed"
476 [(set (match_dup 0) (and:DI (match_dup 1) (match_dup 3)))
477 (set (match_dup 0) (and:DI (match_dup 0) (match_dup 4)))]
478 {
479 unsigned HOST_WIDE_INT bits = ~UINTVAL (operands[2]);
480 unsigned HOST_WIDE_INT topbit = HOST_WIDE_INT_1U << floor_log2 (bits);
481
482 operands[3] = GEN_INT (~bits | topbit);
483 operands[4] = GEN_INT (~topbit);
484 })
485
486 (define_insn "*binv<mode>"
487 [(set (match_operand:X 0 "register_operand" "=r")
488 (xor:X (ashift:X (const_int 1)
489 (match_operand:QI 2 "register_operand" "r"))
490 (match_operand:X 1 "register_operand" "r")))]
491 "TARGET_ZBS"
492 "binv\t%0,%1,%2"
493 [(set_attr "type" "bitmanip")])
494
495 (define_insn "*binvi<mode>"
496 [(set (match_operand:X 0 "register_operand" "=r")
497 (xor:X (match_operand:X 1 "register_operand" "r")
498 (match_operand:X 2 "single_bit_mask_operand" "DbS")))]
499 "TARGET_ZBS"
500 "binvi\t%0,%1,%S2"
501 [(set_attr "type" "bitmanip")])
502
503 (define_insn "*bext<mode>"
504 [(set (match_operand:X 0 "register_operand" "=r")
505 (zero_extract:X (match_operand:X 1 "register_operand" "r")
506 (const_int 1)
507 (zero_extend:X
508 (match_operand:QI 2 "register_operand" "r"))))]
509 "TARGET_ZBS"
510 "bext\t%0,%1,%2"
511 [(set_attr "type" "bitmanip")])
512
513 ;; When performing `(a & (1UL << bitno)) ? 0 : -1` the combiner
514 ;; usually has the `bitno` typed as X-mode (i.e. no further
515 ;; zero-extension is performed around the bitno).
516 (define_insn "*bext<mode>"
517 [(set (match_operand:X 0 "register_operand" "=r")
518 (zero_extract:X (match_operand:X 1 "register_operand" "r")
519 (const_int 1)
520 (match_operand:X 2 "register_operand" "r")))]
521 "TARGET_ZBS"
522 "bext\t%0,%1,%2"
523 [(set_attr "type" "bitmanip")])
524
525 (define_insn "*bexti"
526 [(set (match_operand:X 0 "register_operand" "=r")
527 (zero_extract:X (match_operand:X 1 "register_operand" "r")
528 (const_int 1)
529 (match_operand 2 "immediate_operand" "n")))]
530 "TARGET_ZBS && UINTVAL (operands[2]) < GET_MODE_BITSIZE (<MODE>mode)"
531 "bexti\t%0,%1,%2"
532 [(set_attr "type" "bitmanip")])
533
534 ;; Split for "(a & (1 << BIT_NO)) ? 0 : 1":
535 ;; We avoid reassociating "(~(a >> BIT_NO)) & 1" into "((~a) >> BIT_NO) & 1",
536 ;; so we don't have to use a temporary. Instead we extract the bit and then
537 ;; invert bit 0 ("a ^ 1") only.
538 (define_split
539 [(set (match_operand:X 0 "register_operand")
540 (and:X (not:X (lshiftrt:X (match_operand:X 1 "register_operand")
541 (subreg:QI (match_operand:X 2 "register_operand") 0)))
542 (const_int 1)))]
543 "TARGET_ZBS"
544 [(set (match_dup 0) (zero_extract:X (match_dup 1)
545 (const_int 1)
546 (match_dup 2)))
547 (set (match_dup 0) (xor:X (match_dup 0) (const_int 1)))])
548
549 ;; We can create a polarity-reversed mask (i.e. bit N -> { set = 0, clear = -1 })
550 ;; using a bext(i) followed by an addi instruction.
551 ;; This splits the canonical representation of "(a & (1 << BIT_NO)) ? 0 : -1".
552 (define_split
553 [(set (match_operand:GPR 0 "register_operand")
554 (neg:GPR (eq:GPR (zero_extract:GPR (match_operand:GPR 1 "register_operand")
555 (const_int 1)
556 (match_operand 2))
557 (const_int 0))))]
558 "TARGET_ZBS"
559 [(set (match_dup 0) (zero_extract:GPR (match_dup 1) (const_int 1) (match_dup 2)))
560 (set (match_dup 0) (plus:GPR (match_dup 0) (const_int -1)))])
561
562 ;; Catch those cases where we can use a bseti/binvi + ori/xori or
563 ;; bseti/binvi + bseti/binvi instead of a lui + addi + or/xor sequence.
564 (define_insn_and_split "*<or_optab>i<mode>_extrabit"
565 [(set (match_operand:X 0 "register_operand" "=r")
566 (any_or:X (match_operand:X 1 "register_operand" "r")
567 (match_operand:X 2 "uimm_extra_bit_or_twobits" "i")))]
568 "TARGET_ZBS"
569 "#"
570 "&& reload_completed"
571 [(set (match_dup 0) (<or_optab>:X (match_dup 1) (match_dup 3)))
572 (set (match_dup 0) (<or_optab>:X (match_dup 0) (match_dup 4)))]
573 {
574 unsigned HOST_WIDE_INT bits = UINTVAL (operands[2]);
575 unsigned HOST_WIDE_INT topbit = HOST_WIDE_INT_1U << floor_log2 (bits);
576
577 operands[3] = GEN_INT (bits &~ topbit);
578 operands[4] = GEN_INT (topbit);
579 })
580
581 ;; Same to use blcri + andi and blcri + bclri
582 (define_insn_and_split "*andi<mode>_extrabit"
583 [(set (match_operand:X 0 "register_operand" "=r")
584 (and:X (match_operand:X 1 "register_operand" "r")
585 (match_operand:X 2 "not_uimm_extra_bit_or_nottwobits" "i")))]
586 "TARGET_ZBS"
587 "#"
588 "&& reload_completed"
589 [(set (match_dup 0) (and:X (match_dup 1) (match_dup 3)))
590 (set (match_dup 0) (and:X (match_dup 0) (match_dup 4)))]
591 {
592 unsigned HOST_WIDE_INT bits = UINTVAL (operands[2]);
593 unsigned HOST_WIDE_INT topbit = HOST_WIDE_INT_1U << floor_log2 (~bits);
594
595 operands[3] = GEN_INT (bits | topbit);
596 operands[4] = GEN_INT (~topbit);
597 })
598
599 ;; IF_THEN_ELSE: test for 2 bits of opposite polarity
600 (define_insn_and_split "*branch<X:mode>_mask_twobits_equals_singlebit"
601 [(set (pc)
602 (if_then_else
603 (match_operator 1 "equality_operator"
604 [(and:X (match_operand:X 2 "register_operand" "r")
605 (match_operand:X 3 "const_twobits_not_arith_operand" "i"))
606 (match_operand:X 4 "single_bit_mask_operand" "i")])
607 (label_ref (match_operand 0 "" ""))
608 (pc)))
609 (clobber (match_scratch:X 5 "=&r"))
610 (clobber (match_scratch:X 6 "=&r"))]
611 "TARGET_ZBS && TARGET_ZBB"
612 "#"
613 "&& reload_completed"
614 [(set (match_dup 5) (zero_extract:X (match_dup 2)
615 (const_int 1)
616 (match_dup 8)))
617 (set (match_dup 6) (zero_extract:X (match_dup 2)
618 (const_int 1)
619 (match_dup 9)))
620 (set (match_dup 6) (and:X (not:X (match_dup 6)) (match_dup 5)))
621 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 6) (const_int 0)])
622 (label_ref (match_dup 0))
623 (pc)))]
624 {
625 unsigned HOST_WIDE_INT twobits_mask = UINTVAL (operands[3]);
626 unsigned HOST_WIDE_INT singlebit_mask = UINTVAL (operands[4]);
627
628 /* We should never see an unsatisfiable condition. */
629 gcc_assert (twobits_mask & singlebit_mask);
630
631 int setbit = ctz_hwi (singlebit_mask);
632 int clearbit = ctz_hwi (twobits_mask & ~singlebit_mask);
633
634 operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]) == NE ? EQ : NE,
635 <X:MODE>mode, operands[6], GEN_INT(0));
636
637 operands[8] = GEN_INT (setbit);
638 operands[9] = GEN_INT (clearbit);
639 })