arithmetic insn after the jump insn and put the arithmetic insn in the
delay slot. If we can't do this, return. */
if (delay_list->is_empty () && likely
- && new_thread && !ANY_RETURN_P (new_thread)
+ && new_thread
+ && !ANY_RETURN_P (new_thread)
&& NONJUMP_INSN_P (new_thread)
&& !RTX_FRAME_RELATED_P (new_thread)
&& GET_CODE (PATTERN (new_thread)) != ASM_INPUT
gcc_assert (thread_if_true);
- if (new_thread && simplejump_or_return_p (new_thread)
+ if (new_thread
+ && simplejump_or_return_p (new_thread)
&& redirect_with_delay_list_safe_p (insn,
JUMP_LABEL (new_thread),
*delay_list))
- new_thread = follow_jumps (JUMP_LABEL (new_thread), insn,
- &crossing);
+ new_thread = follow_jumps (JUMP_LABEL (new_thread), insn, &crossing);
- if (ANY_RETURN_P (new_thread))
+ if (!new_thread)
+ label = find_end_label (simple_return_rtx);
+ else if (ANY_RETURN_P (new_thread))
label = find_end_label (new_thread);
else if (LABEL_P (new_thread))
label = new_thread;
--- /dev/null
+// PR rtl-optimization/113140
+// Reduced testcase by Rainer Orth <ro@gcc.gnu.org>
+
+// { dg-options "-O -w" }
+
+int *m();
+struct StaticValue {
+ long _val;
+ void setM(int *) { _val = 0; }
+};
+struct Value : StaticValue {
+ template <typename T> T *as();
+};
+Value *alloc();
+struct Scoped {
+ Scoped() {
+ Value v;
+ ptr = alloc();
+ Value *__trans_tmp_1 = v.as<Value>();
+ ptr->setM(__trans_tmp_1 ? m() : 0);
+ }
+ Value *ptr;
+};
+struct QObjectMethod {
+ unsigned long long callInternal() const;
+};
+unsigned long long QObjectMethod::callInternal() const {
+ [] {
+ if (Scoped(); 0)
+ ;
+ }();
+}