@FLAG_W_MISSING_PARAMETER_TYPE@ \
@FLAG_W_LOGICAL_OP@ \
@FLAG_W_ENUM_CONVERSION@ \
+ @FLAG_W_IMPLICIT_FALLTHROUGH@ \
@FLAG_W_OLD_STYLE_DECLARATION@ \
@FLAG_FINLINE_FUNCTIONS@ \
@FLAG_FNO_STACK_PROTECTOR@ \
405363 PPC64, xvcvdpsxws, xvcvdpuxws, do not handle NaN arguments correctly.
405365 PPC64, function _get_maxmin_fp_NaN() doesn't handle QNaN, SNaN case
correctly.
+405430 Use gcc -Wimplicit-fallthrough=2 by default if available
405733 PPC64, xvcvdpsp should write 32-bit result to upper and lower 32-bits
of the 64-bit destination field.
405734 PPC64, vrlwnm, vrlwmi, vrldrm, vrldmi do not work properly when me < mb
# undef CVT
goto decode_success;
}
+ break;
case 0x09:
/* VROUNDPD imm8, xmm2/m128, xmm1 */
# undef CVT
goto decode_success;
}
+ break;
case 0x0A:
case 0x0B:
} else {
storeLE(addr, getQRegLane((tt+3) % 32, ix, ty));
}
- /* fallthrough */
}
+ /* fallthrough */
case 3: {
IRExpr* addr
= binop(Iop_Add64, mkexpr(tTA), mkU64(2 * laneSzB));
} else {
storeLE(addr, getQRegLane((tt+2) % 32, ix, ty));
}
- /* fallthrough */
}
+ /* fallthrough */
case 2: {
IRExpr* addr
= binop(Iop_Add64, mkexpr(tTA), mkU64(1 * laneSzB));
} else {
storeLE(addr, getQRegLane((tt+1) % 32, ix, ty));
}
- /* fallthrough */
}
+ /* fallthrough */
case 1: {
IRExpr* addr
= binop(Iop_Add64, mkexpr(tTA), mkU64(0 * laneSzB));
}
return True;
}
- } else {
- /* fall through */
}
+ /* else fall through */
case 9:
dreg = ((theInstr >> 18) & 0x10) | ((theInstr >> 12) & 0xF);
mreg = ((theInstr >> 1) & 0x10) | (theInstr & 0xF);
/* Conditions starting with S should signal exception on QNaN inputs. */
switch (function) {
case 8: /* SAF */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 0: /* AF */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
get_IR_roundingmode(), mkU64(0)));
break;
case 9: /* SUN */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 1: /* UN */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
get_IR_roundingmode(), mkU64(0))));
break;
case 0x19: /* SOR */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 0x11: /* OR */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
mkU64(0xFFFFFFFFFFFFFFFFULL))));
break;
case 0xa: /* SEQ */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 2: /* EQ */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
get_IR_roundingmode(), mkU64(0))));
break;
case 0x1A: /* SNEQ */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 0x12: /* NEQ */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
mkU64(0xFFFFFFFFFFFFFFFFULL))));
break;
case 0xB: /* SUEQ */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 0x3: /* UEQ */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
mkU64(0)))));
break;
case 0x1B: /* SNEQ */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 0x13: /* NEQ */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
mkU64(0)))));
break;
case 0xC: /* SLT */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 0x4: /* LT */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
get_IR_roundingmode(), mkU64(0))));
break;
case 0xD: /* SULT */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 0x5: /* ULT */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
mkU64(0)))));
break;
case 0xE: /* SLE */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 0x6: /* LE */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
mkU64(0)))));
break;
case 0xF: /* SULE */
- signaling = CMPSAFD;
+ signaling = CMPSAFD; /* fallthrough */
case 0x7: /* ULE */
assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
calculateFCSR(fs, ft, signaling, False, 2);
/* Conditions starting with S should signal exception on QNaN inputs. */
switch (function) {
case 8: /* SAF */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 0: /* AF */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
get_IR_roundingmode(), mkU32(0))));
break;
case 9: /* SUN */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 1: /* UN */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
mkU32(0)))));
break;
case 0x19: /* SOR */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 0x11: /* OR */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
mkU32(0xFFFFFFFFU)))));
break;
case 0xa: /* SEQ */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 2: /* EQ */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
mkU32(0)))));
break;
case 0x1A: /* SNEQ */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 0x12: /* NEQ */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
mkU32(0xFFFFFFFFU)))));
break;
case 0xB: /* SUEQ */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 0x3: /* UEQ */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
mkU32(0))))));
break;
case 0x1B: /* SNEQ */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 0x13: /* NEQ */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
mkU32(0))))));
break;
case 0xC: /* SLT */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 0x4: /* LT */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
mkU32(0)))));
break;
case 0xD: /* SULT */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 0x5: /* ULT */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
mkU32(0))))));
break;
case 0xE: /* SLE */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 0x6: /* LE */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
mkU32(0))))));
break;
case 0xF: /* SULE */
- signaling = CMPSAFS;
+ signaling = CMPSAFS; /* fallthrough */
case 0x7: /* ULE */
assign(t0, binop(Iop_CmpF32,
getLoFromF64(Ity_F64, getFReg(fs)),
break;
} else {
ILLEGAL_INSTRUCTON;
+ break;
}
}
}
} else {
ILLEGAL_INSTRUCTON;
+ break;
}
}
case 0x3E: // immediate offset: 64bit: std/stdu/stq: mask off
// lowest 2 bits of immediate before forming EA
simm16 = simm16 & 0xFFFFFFFC;
+ /* fallthrough */
default: // immediate offset
assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
break;
if (!allow_DFP) goto decode_noDFP;
if (dis_dfp_arith( theInstr ))
goto decode_success;
+ goto decode_failure;
case 0x82: // dcmpo, DFP comparison ordered instruction
case 0x282: // dcmpu, DFP comparison unordered instruction
if (!allow_DFP) goto decode_noDFP;
abiinfo ) )
goto decode_success;
}
+ /* fallthrough to dis_vx_scalar_quad_precision */
/* Instructions implemented with Pre ISA 3.0 Iops */
/* VSX Scalar Quad-Precision instructions */
switch (abyte) {
/* According to the Intel manual, "repne movs" should never occur, but
* in practice it has happened, so allow for it here... */
- case 0xA4: sz = 1; /* REPNE MOVS<sz> */
+ case 0xA4: sz = 1; /* REPNE MOVS<sz> fallthrough */
case 0xA5:
dis_REP_op ( &dres, X86CondNZ, dis_MOVS, sz, eip_orig,
guest_EIP_bbstart+delta, "repne movs" );
break;
- case 0xA6: sz = 1; /* REPNE CMP<sz> */
+ case 0xA6: sz = 1; /* REPNE CMP<sz> fallthrough */
case 0xA7:
dis_REP_op ( &dres, X86CondNZ, dis_CMPS, sz, eip_orig,
guest_EIP_bbstart+delta, "repne cmps" );
break;
- case 0xAA: sz = 1; /* REPNE STOS<sz> */
+ case 0xAA: sz = 1; /* REPNE STOS<sz> fallthrough */
case 0xAB:
dis_REP_op ( &dres, X86CondNZ, dis_STOS, sz, eip_orig,
guest_EIP_bbstart+delta, "repne stos" );
break;
- case 0xAE: sz = 1; /* REPNE SCAS<sz> */
+ case 0xAE: sz = 1; /* REPNE SCAS<sz> fallthrough */
case 0xAF:
dis_REP_op ( &dres, X86CondNZ, dis_SCAS, sz, eip_orig,
guest_EIP_bbstart+delta, "repne scas" );
}
break;
- case 0xA4: sz = 1; /* REP MOVS<sz> */
+ case 0xA4: sz = 1; /* REP MOVS<sz> fallthrough */
case 0xA5:
dis_REP_op ( &dres, X86CondAlways, dis_MOVS, sz, eip_orig,
guest_EIP_bbstart+delta, "rep movs" );
break;
- case 0xA6: sz = 1; /* REPE CMP<sz> */
+ case 0xA6: sz = 1; /* REPE CMP<sz> fallthrough */
case 0xA7:
dis_REP_op ( &dres, X86CondZ, dis_CMPS, sz, eip_orig,
guest_EIP_bbstart+delta, "repe cmps" );
break;
- case 0xAA: sz = 1; /* REP STOS<sz> */
+ case 0xAA: sz = 1; /* REP STOS<sz> fallthrough */
case 0xAB:
dis_REP_op ( &dres, X86CondAlways, dis_STOS, sz, eip_orig,
guest_EIP_bbstart+delta, "rep stos" );
break;
- case 0xAC: sz = 1; /* REP LODS<sz> */
+ case 0xAC: sz = 1; /* REP LODS<sz> fallthrough */
case 0xAD:
dis_REP_op ( &dres, X86CondAlways, dis_LODS, sz, eip_orig,
guest_EIP_bbstart+delta, "rep lods" );
break;
- case 0xAE: sz = 1; /* REPE SCAS<sz> */
+ case 0xAE: sz = 1; /* REPE SCAS<sz> fallthrough */
case 0xAF:
dis_REP_op ( &dres, X86CondZ, dis_SCAS, sz, eip_orig,
guest_EIP_bbstart+delta, "repe scas" );
addInstr(env, ARM64Instr_MovI(dst, hregARM64_X0()));
return dst;
}
- /* else fall through; will hit the irreducible: label */
+ goto irreducible;
}
/* --------- LITERAL --------- */
addInstr(env, mk_iMOVds_RR(dst, hregARM_R0()));
return dst;
}
- /* else fall through; will hit the irreducible: label */
+ goto irreducible;
}
/* --------- LITERAL --------- */
switch (df) {
case 0:
dfm |= 0x10;
-
+ /* fallthrough */
case 1:
dfm |= 0x20;
-
+ /* fallthrough */
case 2:
dfm |= 0x40;
}
switch (op_unop) {
case Iop_1Uto64:
vassert(mode64);
+ /* fallthrough */
case Iop_1Uto8:
case Iop_1Uto32:
mask = toUShort(0x1);
break;
case Iop_8Uto64:
vassert(mode64);
+ /* fallthrough */
case Iop_8Uto16:
case Iop_8Uto32:
mask = toUShort(0xFF);
break;
case Iop_16Uto64:
vassert(mode64);
+ /* fallthrough */
case Iop_16Uto32:
mask = toUShort(0xFFFF);
break;
case Iop_Clz64:
vassert(mode64);
+ /* fallthrough */
case Iop_Clz32: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
AC_GCC_WARNING_SUBST([missing-parameter-type], [FLAG_W_MISSING_PARAMETER_TYPE])
AC_GCC_WARNING_SUBST([logical-op], [FLAG_W_LOGICAL_OP])
AC_GCC_WARNING_SUBST([enum-conversion], [FLAG_W_ENUM_CONVERSION])
+AC_GCC_WARNING_SUBST([implicit-fallthrough=2], [FLAG_W_IMPLICIT_FALLTHROUGH])
# Does this compiler support -Wformat-security ?
# Special handling is needed, because certain GCC versions require -Wformat
interrupts_non_interruptible++;
VG_(force_vgdb_poll) ();
give_control_back_to_vgdb();
-
+ /* If give_control_back_to_vgdb returns in an non interruptable
+ state something went horribly wrong, fallthrough to vg_assert. */
default: vg_assert(0);
}
}
break;
case VKI_CLONE_VFORK | VKI_CLONE_VM: /* vfork */
- // FALLTHROUGH - assume vfork (somewhat) == fork, see ML_(do_fork_clone).
cloneflags &= ~VKI_CLONE_VM;
+ // FALLTHROUGH - assume vfork (somewhat) == fork, see ML_(do_fork_clone).
case 0: /* plain fork */
SET_STATUS_from_SysRes(
PRE_MEM_WRITE("bpf(attr->btf_log_buf)",
attr->btf_log_buf, attr->btf_log_size);
}
+ break;
case VKI_BPF_TASK_FD_QUERY:
/* Get info about the task. Write collected info. */
PRE_MEM_READ("bpf(attr->task_fd_query.pid)", (Addr)&attr->task_fd_query.pid, sizeof(attr->task_fd_query.pid));
mc->szB = szB;
mc->allockind = kind;
switch ( MC_(n_where_pointers)() ) {
- case 2: mc->where[1] = 0; // fallback to 1
- case 1: mc->where[0] = 0; // fallback to 0
+ case 2: mc->where[1] = 0; // fallthrough to 1
+ case 1: mc->where[0] = 0; // fallthrough to 0
case 0: break;
default: tl_assert(0);
}