/* Build expressions with type checking for C++ compiler.
- Copyright (C) 1987-2022 Free Software Foundation, Inc.
+ Copyright (C) 1987-2024 Free Software Foundation, Inc.
Hacked by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
complete type when this function returns. */
tree
-require_complete_type_sfinae (tree value, tsubst_flags_t complain)
+require_complete_type (tree value,
+ tsubst_flags_t complain /* = tf_warning_or_error */)
{
tree type;
return error_mark_node;
}
-tree
-require_complete_type (tree value)
-{
- return require_complete_type_sfinae (value, tf_warning_or_error);
-}
-
/* Try to complete TYPE, if it is incomplete. For example, if TYPE is
a template instantiation, do the instantiation. Returns TYPE,
whether or not it could be completed, unless something goes
return cp_build_type_attribute_variant (type, attrs);
}
+/* Compare floating point conversion ranks and subranks of T1 and T2
+ types. If T1 and T2 have unordered conversion ranks, return 3.
+ If T1 has greater conversion rank than T2, return 2.
+ If T2 has greater conversion rank than T1, return -2.
+ If T1 has equal conversion rank as T2, return -1, 0 or 1 depending
+ on if T1 has smaller, equal or greater conversion subrank than
+ T2. */
+
+int
+cp_compare_floating_point_conversion_ranks (tree t1, tree t2)
+{
+ tree mv1 = TYPE_MAIN_VARIANT (t1);
+ tree mv2 = TYPE_MAIN_VARIANT (t2);
+ int extended1 = 0;
+ int extended2 = 0;
+
+ if (mv1 == mv2)
+ return 0;
+
+ for (int i = 0; i < NUM_FLOATN_NX_TYPES; ++i)
+ {
+ if (mv1 == FLOATN_NX_TYPE_NODE (i))
+ extended1 = i + 1;
+ if (mv2 == FLOATN_NX_TYPE_NODE (i))
+ extended2 = i + 1;
+ }
+ if (mv1 == bfloat16_type_node)
+ extended1 = true;
+ if (mv2 == bfloat16_type_node)
+ extended2 = true;
+ if (extended2 && !extended1)
+ {
+ int ret = cp_compare_floating_point_conversion_ranks (t2, t1);
+ return ret == 3 ? 3 : -ret;
+ }
+
+ const struct real_format *fmt1 = REAL_MODE_FORMAT (TYPE_MODE (t1));
+ const struct real_format *fmt2 = REAL_MODE_FORMAT (TYPE_MODE (t2));
+ gcc_assert (fmt1->b == 2 && fmt2->b == 2);
+ /* For {ibm,mips}_extended_format formats, the type has variable
+ precision up to ~2150 bits when the first double is around maximum
+ representable double and second double is subnormal minimum.
+ So, e.g. for __ibm128 vs. std::float128_t, they have unordered
+ ranks. */
+ int p1 = (MODE_COMPOSITE_P (TYPE_MODE (t1))
+ ? fmt1->emax - fmt1->emin + fmt1->p - 1 : fmt1->p);
+ int p2 = (MODE_COMPOSITE_P (TYPE_MODE (t2))
+ ? fmt2->emax - fmt2->emin + fmt2->p - 1 : fmt2->p);
+ /* The rank of a floating point type T is greater than the rank of
+ any floating-point type whose set of values is a proper subset
+ of the set of values of T. */
+ if ((p1 > p2 && fmt1->emax >= fmt2->emax)
+ || (p1 == p2 && fmt1->emax > fmt2->emax))
+ return 2;
+ if ((p1 < p2 && fmt1->emax <= fmt2->emax)
+ || (p1 == p2 && fmt1->emax < fmt2->emax))
+ return -2;
+ if ((p1 > p2 && fmt1->emax < fmt2->emax)
+ || (p1 < p2 && fmt1->emax > fmt2->emax))
+ return 3;
+ if (!extended1 && !extended2)
+ {
+ /* The rank of long double is greater than the rank of double, which
+ is greater than the rank of float. */
+ if (t1 == long_double_type_node)
+ return 2;
+ else if (t2 == long_double_type_node)
+ return -2;
+ if (t1 == double_type_node)
+ return 2;
+ else if (t2 == double_type_node)
+ return -2;
+ if (t1 == float_type_node)
+ return 2;
+ else if (t2 == float_type_node)
+ return -2;
+ return 0;
+ }
+ /* Two extended floating-point types with the same set of values have equal
+ ranks. */
+ if (extended1 && extended2)
+ {
+ if ((extended1 <= NUM_FLOATN_TYPES) == (extended2 <= NUM_FLOATN_TYPES))
+ {
+ /* Prefer higher extendedN value. */
+ if (extended1 > extended2)
+ return 1;
+ else if (extended1 < extended2)
+ return -1;
+ else
+ return 0;
+ }
+ else if (extended1 <= NUM_FLOATN_TYPES)
+ /* Prefer _FloatN type over _FloatMx type. */
+ return 1;
+ else if (extended2 <= NUM_FLOATN_TYPES)
+ return -1;
+ else
+ return 0;
+ }
+
+ /* gcc_assert (extended1 && !extended2); */
+ tree *p;
+ int cnt = 0;
+ for (p = &float_type_node; p <= &long_double_type_node; ++p)
+ {
+ const struct real_format *fmt3 = REAL_MODE_FORMAT (TYPE_MODE (*p));
+ gcc_assert (fmt3->b == 2);
+ int p3 = (MODE_COMPOSITE_P (TYPE_MODE (*p))
+ ? fmt3->emax - fmt3->emin + fmt3->p - 1 : fmt3->p);
+ if (p1 == p3 && fmt1->emax == fmt3->emax)
+ ++cnt;
+ }
+ /* An extended floating-point type with the same set of values
+ as exactly one cv-unqualified standard floating-point type
+ has a rank equal to the rank of that standard floating-point
+ type.
+
+ An extended floating-point type with the same set of values
+ as more than one cv-unqualified standard floating-point type
+ has a rank equal to the rank of double.
+
+ Thus, if the latter is true and t2 is long double, t2
+ has higher rank. */
+ if (cnt > 1 && mv2 == long_double_type_node)
+ return -2;
+ /* Otherwise, they have equal rank, but extended types
+ (other than std::bfloat16_t) have higher subrank.
+ std::bfloat16_t shouldn't have equal rank to any standard
+ floating point type. */
+ return 1;
+}
+
/* Return the common type for two arithmetic types T1 and T2 under the
usual arithmetic conversions. The default conversions have already
been applied, and enumerated types converted to their compatible
tree subtype
= type_after_usual_arithmetic_conversions (subtype1, subtype2);
+ if (subtype == error_mark_node)
+ return subtype;
if (code1 == COMPLEX_TYPE && TREE_TYPE (t1) == subtype)
return build_type_attribute_variant (t1, attributes);
else if (code2 == COMPLEX_TYPE && TREE_TYPE (t2) == subtype)
if (code2 == REAL_TYPE && code1 != REAL_TYPE)
return build_type_attribute_variant (t2, attributes);
+ if (code1 == REAL_TYPE
+ && (extended_float_type_p (t1) || extended_float_type_p (t2)))
+ {
+ tree mv1 = TYPE_MAIN_VARIANT (t1);
+ tree mv2 = TYPE_MAIN_VARIANT (t2);
+ if (mv1 == mv2)
+ return build_type_attribute_variant (t1, attributes);
+
+ int cmpret = cp_compare_floating_point_conversion_ranks (mv1, mv2);
+ if (cmpret == 3)
+ return error_mark_node;
+ else if (cmpret >= 0)
+ return build_type_attribute_variant (t1, attributes);
+ else
+ return build_type_attribute_variant (t2, attributes);
+ }
+
/* Both real or both integers; use the one with greater precision. */
if (TYPE_PRECISION (t1) > TYPE_PRECISION (t2))
return build_type_attribute_variant (t1, attributes);
return false;
break;
- case UNDERLYING_TYPE:
- if (!same_type_p (UNDERLYING_TYPE_TYPE (t1), UNDERLYING_TYPE_TYPE (t2)))
+ case TRAIT_TYPE:
+ if (TRAIT_TYPE_KIND (t1) != TRAIT_TYPE_KIND (t2))
+ return false;
+ if (!cp_tree_equal (TRAIT_TYPE_TYPE1 (t1), TRAIT_TYPE_TYPE1 (t2))
+ || !cp_tree_equal (TRAIT_TYPE_TYPE2 (t1), TRAIT_TYPE_TYPE2 (t2)))
return false;
break;
substitute into the specialization arguments at instantiation
time. And aliases can't be equivalent without being ==, so
we don't need to look any deeper. */
+ ++processing_template_decl;
tree dep1 = dependent_alias_template_spec_p (t1, nt_transparent);
tree dep2 = dependent_alias_template_spec_p (t2, nt_transparent);
+ --processing_template_decl;
if ((dep1 || dep2) && dep1 != dep2)
return false;
}
the common initial sequence. */
bool
-next_common_initial_seqence (tree &memb1, tree &memb2)
+next_common_initial_sequence (tree &memb1, tree &memb2)
{
while (memb1)
{
if ((!lookup_attribute ("no_unique_address", DECL_ATTRIBUTES (memb1)))
!= !lookup_attribute ("no_unique_address", DECL_ATTRIBUTES (memb2)))
return false;
+ if (DECL_ALIGN (memb1) != DECL_ALIGN (memb2))
+ return false;
if (!tree_int_cst_equal (bit_position (memb1), bit_position (memb2)))
return false;
return true;
type2 = cp_build_qualified_type (type2, TYPE_UNQUALIFIED);
if (TREE_CODE (type1) == ENUMERAL_TYPE)
- return (TYPE_ALIGN (type1) == TYPE_ALIGN (type2)
- && tree_int_cst_equal (TYPE_SIZE (type1), TYPE_SIZE (type2))
+ return (tree_int_cst_equal (TYPE_SIZE (type1), TYPE_SIZE (type2))
&& same_type_p (finish_underlying_type (type1),
finish_underlying_type (type2)));
if (CLASS_TYPE_P (type1)
&& std_layout_type_p (type1)
&& std_layout_type_p (type2)
- && TYPE_ALIGN (type1) == TYPE_ALIGN (type2)
&& tree_int_cst_equal (TYPE_SIZE (type1), TYPE_SIZE (type2)))
{
tree field1 = TYPE_FIELDS (type1);
{
while (1)
{
- if (!next_common_initial_seqence (field1, field2))
+ if (!next_common_initial_sequence (field1, field2))
return false;
if (field1 == NULL_TREE)
return true;
}
\f
-/* Process a sizeof or alignof expression where the operand is a
- type. STD_ALIGNOF indicates whether an alignof has C++11 (minimum alignment)
- or GNU (preferred alignment) semantics; it is ignored if op is
+/* Process a sizeof or alignof expression where the operand is a type.
+ STD_ALIGNOF indicates whether an alignof has C++11 (minimum alignment)
+ or GNU (preferred alignment) semantics; it is ignored if OP is
SIZEOF_EXPR. */
tree
else
return error_mark_node;
}
+ else if (VOID_TYPE_P (type) && std_alignof)
+ {
+ if (complain)
+ error_at (loc, "invalid application of %qs to a void type",
+ OVL_OP_INFO (false, op)->name);
+ return error_mark_node;
+ }
bool dependent_p = dependent_type_p (type);
if (!dependent_p)
/* [dcl.align]/3:
When the alignment-specifier is of the form
- alignas(type-id ), it shall have the same effect as
- alignas(alignof(type-id )). */
+ alignas(type-id), it shall have the same effect as
+ alignas(alignof(type-id)). */
return cxx_sizeof_or_alignof_type (input_location,
- e, ALIGNOF_EXPR, true, false);
+ e, ALIGNOF_EXPR,
+ /*std_alignof=*/true,
+ /*complain=*/true);
/* If we reach this point, it means the alignas expression if of
the form "alignas(assignment-expression)", so we should follow
return false;
if (is_overloaded_fn (expr) && !really_overloaded_fn (expr))
expr = get_first_fn (expr);
- if (DECL_NONSTATIC_MEMBER_FUNCTION_P (expr))
+ if (TREE_TYPE (expr)
+ && DECL_NONSTATIC_MEMBER_FUNCTION_P (expr))
{
if (complain & tf_error)
{
return error_mark_node;
}
- /* Don't let an array compound literal decay to a pointer. It can
- still be used to initialize an array or bind to a reference. */
- if (TREE_CODE (exp) == TARGET_EXPR)
- {
- if (complain & tf_error)
- error_at (loc, "taking address of temporary array");
- return error_mark_node;
- }
-
ptrtype = build_pointer_type (TREE_TYPE (type));
if (VAR_P (exp))
return build_min_nt_loc (UNKNOWN_LOCATION, COMPONENT_REF,
orig_object, orig_name, NULL_TREE);
}
- object = build_non_dependent_expr (object);
}
else if (c_dialect_objc ()
&& identifier_p (name)
name, scope);
return error_mark_node;
}
-
+
if (TREE_SIDE_EFFECTS (object))
val = build2 (COMPOUND_EXPR, TREE_TYPE (val), object, val);
return val;
return error_mark_node;
}
+ /* NAME may refer to a static data member, in which case there is
+ one copy of the data member that is shared by all the objects of
+ the class. So NAME can be unambiguously referred to even if
+ there are multiple indirect base classes containing NAME. */
+ const base_access ba = [scope, name] ()
+ {
+ if (identifier_p (name))
+ {
+ tree m = lookup_member (scope, name, /*protect=*/0,
+ /*want_type=*/false, tf_none);
+ if (!m || shared_member_p (m))
+ return ba_any;
+ }
+ return ba_check;
+ } ();
+
/* Find the base of OBJECT_TYPE corresponding to SCOPE. */
- access_path = lookup_base (object_type, scope, ba_check,
- NULL, complain);
+ access_path = lookup_base (object_type, scope, ba, NULL, complain);
if (access_path == error_mark_node)
return error_mark_node;
if (!access_path)
= build_dependent_operator_type (lookups, INDIRECT_REF, false);
return expr;
}
- expr = build_non_dependent_expr (expr);
}
rval = build_new_op (loc, INDIRECT_REF, LOOKUP_NORMAL, expr,
If INDEX is of some user-defined type, it must be converted to
integer type. Otherwise, to make a compatible PLUS_EXPR, it
will inherit the type of the array, which will be some pointer type.
-
+
LOC is the location to use in building the array reference. */
tree
cp_build_array_ref (location_t loc, tree array, tree idx,
tsubst_flags_t complain)
{
+ tree first = NULL_TREE;
tree ret;
if (idx == 0)
bool non_lvalue = convert_vector_to_array_for_subscript (loc, &array, idx);
+ /* 0[array] */
+ if (TREE_CODE (TREE_TYPE (idx)) == ARRAY_TYPE)
+ {
+ std::swap (array, idx);
+ if (flag_strong_eval_order == 2 && TREE_SIDE_EFFECTS (array))
+ idx = first = save_expr (idx);
+ }
+
if (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE)
{
tree rval, type;
|= (CP_TYPE_VOLATILE_P (type) | TREE_SIDE_EFFECTS (array));
TREE_THIS_VOLATILE (rval)
|= (CP_TYPE_VOLATILE_P (type) | TREE_THIS_VOLATILE (array));
- ret = require_complete_type_sfinae (rval, complain);
+ ret = require_complete_type (rval, complain);
protected_set_expr_location (ret, loc);
if (non_lvalue)
ret = non_lvalue_loc (loc, ret);
+ if (first)
+ ret = build2_loc (loc, COMPOUND_EXPR, TREE_TYPE (ret), first, ret);
return ret;
}
{
tree ar = cp_default_conversion (array, complain);
tree ind = cp_default_conversion (idx, complain);
- tree first = NULL_TREE;
- if (flag_strong_eval_order == 2 && TREE_SIDE_EFFECTS (ind))
+ if (!first && flag_strong_eval_order == 2 && TREE_SIDE_EFFECTS (ind))
ar = first = save_expr (ar);
/* Put the integer in IND to simplify error checking. */
}
else
{
- if (fndecl && magic_varargs_p (fndecl))
- /* Don't do ellipsis conversion for __built_in_constant_p
- as this will result in spurious errors for non-trivial
- types. */
- val = require_complete_type_sfinae (val, complain);
+ int magic = fndecl ? magic_varargs_p (fndecl) : 0;
+ if (magic)
+ {
+ /* Don't truncate excess precision to the semantic type. */
+ if (magic == 1 && TREE_CODE (val) == EXCESS_PRECISION_EXPR)
+ val = TREE_OPERAND (val, 0);
+ /* Don't do ellipsis conversion for __built_in_constant_p
+ as this will result in spurious errors for non-trivial
+ types. */
+ val = require_complete_type (val, complain);
+ }
else
val = convert_arg_to_ellipsis (val, complain);
= build_dependent_operator_type (lookups, code, false);
return expr;
}
- arg1 = build_non_dependent_expr (arg1);
- arg2 = build_non_dependent_expr (arg2);
}
if (code == DOTSTAR_EXPR)
|| type_dependent_expression_p (arg2))
return build_min_nt_loc (loc, ARRAY_REF, arg1, arg2,
NULL_TREE, NULL_TREE);
- arg1 = build_non_dependent_expr (arg1);
- arg2 = build_non_dependent_expr (arg2);
}
expr = build_new_op (loc, ARRAY_REF, LOOKUP_NORMAL, arg1, arg2,
{
tree zero_vec = build_zero_cst (type);
tree minus_one_vec = build_minus_one_cst (type);
- tree cmp_type = truth_type_for (type);
+ tree cmp_type = truth_type_for (TREE_TYPE (arg0));
tree cmp = build2 (code, cmp_type, arg0, arg1);
return build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
}
|| warning_suppressed_p (op, OPT_Waddress))
return;
- if (TREE_CODE (op) == NON_DEPENDENT_EXPR)
- op = TREE_OPERAND (op, 0);
-
tree cop = fold_for_warn (op);
if (TREE_CODE (cop) == NON_LVALUE_EXPR)
tree off = TREE_OPERAND (cop, 1);
if (!integer_zerop (off)
&& !warning_suppressed_p (cop, OPT_Waddress))
- warning_at (location, OPT_Waddress, "comparing the result of pointer "
- "addition %qE and NULL", cop);
+ {
+ tree base = TREE_OPERAND (cop, 0);
+ STRIP_NOPS (base);
+ if (TYPE_REF_P (TREE_TYPE (base)))
+ warning_at (location, OPT_Waddress, "the compiler can assume that "
+ "the address of %qE will never be NULL", base);
+ else
+ warning_at (location, OPT_Waddress, "comparing the result of "
+ "pointer addition %qE and NULL", cop);
+ }
return;
}
else if (CONVERT_EXPR_P (op)
type, this behavior is deprecated ([depr.arith.conv.enum]). CODE is the
code of the binary operation, TYPE0 and TYPE1 are the types of the operands,
and LOC is the location for the whole binary expression.
+ For C++26 this is ill-formed rather than deprecated.
+ Return true for SFINAE errors.
TODO: Consider combining this with -Wenum-compare in build_new_op_1. */
-static void
+static bool
do_warn_enum_conversions (location_t loc, enum tree_code code, tree type0,
- tree type1)
+ tree type1, tsubst_flags_t complain)
{
if (TREE_CODE (type0) == ENUMERAL_TYPE
&& TREE_CODE (type1) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (type0) != TYPE_MAIN_VARIANT (type1))
{
+ if (cxx_dialect >= cxx26)
+ {
+ if ((complain & tf_warning_or_error) == 0)
+ return true;
+ }
+ else if ((complain & tf_warning) == 0)
+ return false;
/* In C++20, -Wdeprecated-enum-enum-conversion is on by default.
Otherwise, warn if -Wenum-conversion is on. */
enum opt_code opt;
else if (warn_enum_conversion)
opt = OPT_Wenum_conversion;
else
- return;
+ return false;
switch (code)
{
case EQ_EXPR:
case NE_EXPR:
/* Comparisons are handled by -Wenum-compare. */
- return;
+ return false;
case SPACESHIP_EXPR:
/* This is invalid, don't warn. */
- return;
+ return false;
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
- warning_at (loc, opt, "bitwise operation between different "
- "enumeration types %qT and %qT is deprecated",
- type0, type1);
- return;
+ if (cxx_dialect >= cxx26)
+ pedwarn (loc, opt, "bitwise operation between different "
+ "enumeration types %qT and %qT", type0, type1);
+ else
+ warning_at (loc, opt, "bitwise operation between different "
+ "enumeration types %qT and %qT is deprecated",
+ type0, type1);
+ return false;
default:
- warning_at (loc, opt, "arithmetic between different enumeration "
- "types %qT and %qT is deprecated", type0, type1);
- return;
+ if (cxx_dialect >= cxx26)
+ pedwarn (loc, opt, "arithmetic between different enumeration "
+ "types %qT and %qT", type0, type1);
+ else
+ warning_at (loc, opt, "arithmetic between different enumeration "
+ "types %qT and %qT is deprecated", type0, type1);
+ return false;
}
}
else if ((TREE_CODE (type0) == ENUMERAL_TYPE
- && TREE_CODE (type1) == REAL_TYPE)
- || (TREE_CODE (type0) == REAL_TYPE
+ && SCALAR_FLOAT_TYPE_P (type1))
+ || (SCALAR_FLOAT_TYPE_P (type0)
&& TREE_CODE (type1) == ENUMERAL_TYPE))
{
+ if (cxx_dialect >= cxx26)
+ {
+ if ((complain & tf_warning_or_error) == 0)
+ return true;
+ }
+ else if ((complain & tf_warning) == 0)
+ return false;
const bool enum_first_p = TREE_CODE (type0) == ENUMERAL_TYPE;
/* In C++20, -Wdeprecated-enum-float-conversion is on by default.
Otherwise, warn if -Wenum-conversion is on. */
else if (warn_enum_conversion)
opt = OPT_Wenum_conversion;
else
- return;
+ return false;
switch (code)
{
case LE_EXPR:
case EQ_EXPR:
case NE_EXPR:
- if (enum_first_p)
+ if (enum_first_p && cxx_dialect >= cxx26)
+ pedwarn (loc, opt, "comparison of enumeration type %qT with "
+ "floating-point type %qT", type0, type1);
+ else if (cxx_dialect >= cxx26)
+ pedwarn (loc, opt, "comparison of floating-point type %qT "
+ "with enumeration type %qT", type0, type1);
+ else if (enum_first_p)
warning_at (loc, opt, "comparison of enumeration type %qT with "
"floating-point type %qT is deprecated",
type0, type1);
warning_at (loc, opt, "comparison of floating-point type %qT "
"with enumeration type %qT is deprecated",
type0, type1);
- return;
+ return false;
case SPACESHIP_EXPR:
/* This is invalid, don't warn. */
- return;
+ return false;
default:
- if (enum_first_p)
+ if (enum_first_p && cxx_dialect >= cxx26)
+ pedwarn (loc, opt, "arithmetic between enumeration type %qT "
+ "and floating-point type %qT", type0, type1);
+ else if (cxx_dialect >= cxx26)
+ pedwarn (loc, opt, "arithmetic between floating-point type %qT "
+ "and enumeration type %qT", type0, type1);
+ else if (enum_first_p)
warning_at (loc, opt, "arithmetic between enumeration type %qT "
"and floating-point type %qT is deprecated",
type0, type1);
warning_at (loc, opt, "arithmetic between floating-point type %qT "
"and enumeration type %qT is deprecated",
type0, type1);
- return;
+ return false;
}
}
+ return false;
}
/* Build a binary-operation expression without default conversions.
{
tree op0, op1;
enum tree_code code0, code1;
- tree type0, type1;
+ tree type0, type1, orig_type0, orig_type1;
const char *invalid_op_diag;
/* Expression code to give to the expression when it is built.
In the simplest cases this is the common type of the arguments. */
tree result_type = NULL_TREE;
+ /* When the computation is in excess precision, the type of the
+ final EXCESS_PRECISION_EXPR. */
+ tree semantic_result_type = NULL;
+
/* Nonzero means operands have already been type-converted
in whatever way is necessary.
Zero means they need to be converted to RESULT_TYPE. */
convert it to this type. */
tree final_type = 0;
- tree result, result_ovl;
+ tree result;
/* Nonzero if this is an operation like MIN or MAX which can
safely be computed in short if both args are promoted shorts.
/* Tree holding instrumentation expression. */
tree instrument_expr = NULL_TREE;
+ /* True means this is an arithmetic operation that may need excess
+ precision. */
+ bool may_need_excess_precision;
+
/* Apply default conversions. */
op0 = resolve_nondeduced_context (orig_op0, complain);
op1 = resolve_nondeduced_context (orig_op1, complain);
}
}
- type0 = TREE_TYPE (op0);
- type1 = TREE_TYPE (op1);
+ orig_type0 = type0 = TREE_TYPE (op0);
+ orig_type1 = type1 = TREE_TYPE (op1);
+ tree non_ep_op0 = op0;
+ tree non_ep_op1 = op1;
/* The expression codes of the data types of the arguments tell us
whether the arguments are integers, floating, pointers, etc. */
= targetm.invalid_binary_op (code, type0, type1)))
{
if (complain & tf_error)
- error (invalid_op_diag);
+ {
+ if (code0 == REAL_TYPE
+ && code1 == REAL_TYPE
+ && (extended_float_type_p (type0)
+ || extended_float_type_p (type1))
+ && cp_compare_floating_point_conversion_ranks (type0,
+ type1) == 3)
+ {
+ rich_location richloc (line_table, location);
+ binary_op_error (&richloc, code, type0, type1);
+ }
+ else
+ error (invalid_op_diag);
+ }
return error_mark_node;
}
+ switch (code)
+ {
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ may_need_excess_precision = true;
+ break;
+ case EQ_EXPR:
+ case NE_EXPR:
+ case LE_EXPR:
+ case GE_EXPR:
+ case LT_EXPR:
+ case GT_EXPR:
+ case SPACESHIP_EXPR:
+ /* Excess precision for implicit conversions of integers to
+ floating point. */
+ may_need_excess_precision = (ANY_INTEGRAL_TYPE_P (type0)
+ || ANY_INTEGRAL_TYPE_P (type1));
+ break;
+ default:
+ may_need_excess_precision = false;
+ break;
+ }
+ if (TREE_CODE (op0) == EXCESS_PRECISION_EXPR)
+ {
+ op0 = TREE_OPERAND (op0, 0);
+ type0 = TREE_TYPE (op0);
+ }
+ else if (may_need_excess_precision
+ && (code0 == REAL_TYPE || code0 == COMPLEX_TYPE))
+ if (tree eptype = excess_precision_type (type0))
+ {
+ type0 = eptype;
+ op0 = convert (eptype, op0);
+ }
+ if (TREE_CODE (op1) == EXCESS_PRECISION_EXPR)
+ {
+ op1 = TREE_OPERAND (op1, 0);
+ type1 = TREE_TYPE (op1);
+ }
+ else if (may_need_excess_precision
+ && (code1 == REAL_TYPE || code1 == COMPLEX_TYPE))
+ if (tree eptype = excess_precision_type (type1))
+ {
+ type1 = eptype;
+ op1 = convert (eptype, op1);
+ }
+
/* Issue warnings about peculiar, but valid, uses of NULL. */
if ((null_node_p (orig_op0) || null_node_p (orig_op1))
/* It's reasonable to use pointer values as operands of &&
if ((gnu_vector_type_p (type0) && code1 != VECTOR_TYPE)
|| (gnu_vector_type_p (type1) && code0 != VECTOR_TYPE))
{
- enum stv_conv convert_flag = scalar_to_vector (location, code, op0, op1,
- complain & tf_error);
+ enum stv_conv convert_flag
+ = scalar_to_vector (location, code, non_ep_op0, non_ep_op1,
+ complain & tf_error);
switch (convert_flag)
{
op0 = convert (TREE_TYPE (type1), op0);
op0 = save_expr (op0);
op0 = build_vector_from_val (type1, op0);
- type0 = TREE_TYPE (op0);
+ orig_type0 = type0 = TREE_TYPE (op0);
code0 = TREE_CODE (type0);
converted = 1;
break;
op1 = convert (TREE_TYPE (type0), op1);
op1 = save_expr (op1);
op1 = build_vector_from_val (type0, op1);
- type1 = TREE_TYPE (op1);
+ orig_type1 = type1 = TREE_TYPE (op1);
code1 = TREE_CODE (type1);
converted = 1;
break;
type0 = TREE_TYPE (type0);
if (!TYPE_P (type1))
type1 = TREE_TYPE (type1);
- if (INDIRECT_TYPE_P (type0) && same_type_p (TREE_TYPE (type0), type1))
+ if (type0
+ && INDIRECT_TYPE_P (type0)
+ && same_type_p (TREE_TYPE (type0), type1))
{
if (!(TREE_CODE (first_arg) == PARM_DECL
&& DECL_ARRAY_PARAMETER_P (first_arg)
"first %<sizeof%> operand was declared here");
}
}
- else if (TREE_CODE (type0) == ARRAY_TYPE
+ else if (!dependent_type_p (type0)
+ && !dependent_type_p (type1)
+ && TREE_CODE (type0) == ARRAY_TYPE
&& !char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (type0)))
/* Set by finish_parenthesized_expr. */
&& !warning_suppressed_p (op1, OPT_Wsizeof_array_div)
point, so we have to dig out the original type to find out if
it was unsigned. */
tree stripped_op1 = tree_strip_any_location_wrapper (op1);
- shorten = ((TREE_CODE (op0) == NOP_EXPR
- && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0))))
- || (TREE_CODE (stripped_op1) == INTEGER_CST
- && ! integer_all_onesp (stripped_op1)));
+ shorten = may_shorten_divmod (op0, stripped_op1);
}
common = 1;
quotient can't be represented in the computation mode. We shorten
only if unsigned or if dividing by something we know != -1. */
tree stripped_op1 = tree_strip_any_location_wrapper (op1);
- shorten = ((TREE_CODE (op0) == NOP_EXPR
- && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0))))
- || (TREE_CODE (stripped_op1) == INTEGER_CST
- && ! integer_all_onesp (stripped_op1)));
+ shorten = may_shorten_divmod (op0, stripped_op1);
common = 1;
}
break;
doing_shift = true;
if (TREE_CODE (const_op0) == INTEGER_CST
&& tree_int_cst_sgn (const_op0) < 0
+ && !TYPE_OVERFLOW_WRAPS (type0)
&& (complain & tf_warning)
&& c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_negative_value,
pfn0 = pfn_from_ptrmemfunc (op0);
delta0 = delta_from_ptrmemfunc (op0);
- e1 = cp_build_binary_op (location,
- EQ_EXPR,
- pfn0,
- build_zero_cst (TREE_TYPE (pfn0)),
- complain);
+ {
+ /* If we will warn below about a null-address compare
+ involving the orig_op0 ptrmemfunc, we'd likely also
+ warn about the pfn0's null-address compare, and
+ that would be redundant, so suppress it. */
+ warning_sentinel ws (warn_address);
+ e1 = cp_build_binary_op (location,
+ EQ_EXPR,
+ pfn0,
+ build_zero_cst (TREE_TYPE (pfn0)),
+ complain);
+ }
e2 = cp_build_binary_op (location,
BIT_AND_EXPR,
delta0,
&& (shorten || common || short_compare))
{
result_type = cp_common_type (type0, type1);
- if (complain & tf_warning)
+ if (result_type == error_mark_node)
{
- do_warn_double_promotion (result_type, type0, type1,
- "implicit conversion from %qH to %qI "
- "to match other operand of binary "
- "expression",
- location);
- do_warn_enum_conversions (location, code, TREE_TYPE (orig_op0),
- TREE_TYPE (orig_op1));
+ tree t1 = type0;
+ tree t2 = type1;
+ if (TREE_CODE (t1) == COMPLEX_TYPE)
+ t1 = TREE_TYPE (t1);
+ if (TREE_CODE (t2) == COMPLEX_TYPE)
+ t2 = TREE_TYPE (t2);
+ gcc_checking_assert (TREE_CODE (t1) == REAL_TYPE
+ && TREE_CODE (t2) == REAL_TYPE
+ && (extended_float_type_p (t1)
+ || extended_float_type_p (t2))
+ && cp_compare_floating_point_conversion_ranks
+ (t1, t2) == 3);
+ if (complain & tf_error)
+ {
+ rich_location richloc (line_table, location);
+ binary_op_error (&richloc, code, type0, type1);
+ }
+ return error_mark_node;
+ }
+ if (complain & tf_warning)
+ do_warn_double_promotion (result_type, type0, type1,
+ "implicit conversion from %qH to %qI "
+ "to match other operand of binary "
+ "expression", location);
+ if (do_warn_enum_conversions (location, code, TREE_TYPE (orig_op0),
+ TREE_TYPE (orig_op1), complain))
+ return error_mark_node;
+ }
+ if (may_need_excess_precision
+ && (orig_type0 != type0 || orig_type1 != type1)
+ && build_type == NULL_TREE
+ && result_type)
+ {
+ gcc_assert (common);
+ semantic_result_type = cp_common_type (orig_type0, orig_type1);
+ if (semantic_result_type == error_mark_node)
+ {
+ tree t1 = orig_type0;
+ tree t2 = orig_type1;
+ if (TREE_CODE (t1) == COMPLEX_TYPE)
+ t1 = TREE_TYPE (t1);
+ if (TREE_CODE (t2) == COMPLEX_TYPE)
+ t2 = TREE_TYPE (t2);
+ gcc_checking_assert (TREE_CODE (t1) == REAL_TYPE
+ && TREE_CODE (t2) == REAL_TYPE
+ && (extended_float_type_p (t1)
+ || extended_float_type_p (t2))
+ && cp_compare_floating_point_conversion_ranks
+ (t1, t2) == 3);
+ if (complain & tf_error)
+ {
+ rich_location richloc (line_table, location);
+ binary_op_error (&richloc, code, type0, type1);
+ }
+ return error_mark_node;
}
}
tree_code orig_code0 = TREE_CODE (orig_type0);
tree orig_type1 = TREE_TYPE (orig_op1);
tree_code orig_code1 = TREE_CODE (orig_type1);
- if (!result_type)
- /* Nope. */;
+ if (!result_type || result_type == error_mark_node)
+ /* Nope. */
+ result_type = NULL_TREE;
else if ((orig_code0 == BOOLEAN_TYPE) != (orig_code1 == BOOLEAN_TYPE))
/* "If one of the operands is of type bool and the other is not, the
program is ill-formed." */
build_type ? build_type : result_type,
NULL_TREE, op1);
TREE_OPERAND (tmp, 0) = op0;
+ if (semantic_result_type)
+ tmp = build1 (EXCESS_PRECISION_EXPR, semantic_result_type, tmp);
return tmp;
}
}
}
result = build2 (COMPLEX_EXPR, result_type, real, imag);
+ if (semantic_result_type)
+ result = build1 (EXCESS_PRECISION_EXPR, semantic_result_type,
+ result);
return result;
}
result = build2 (COMPOUND_EXPR, TREE_TYPE (result),
instrument_expr, result);
- if (!processing_template_decl)
+ if (resultcode == SPACESHIP_EXPR && !processing_template_decl)
+ result = get_target_expr (result, complain);
+
+ if (semantic_result_type)
+ result = build1 (EXCESS_PRECISION_EXPR, semantic_result_type, result);
+
+ if (!c_inhibit_evaluation_warnings)
{
- if (resultcode == SPACESHIP_EXPR)
- result = get_target_expr_sfinae (result, complain);
- op0 = cp_fully_fold (op0);
- /* Only consider the second argument if the first isn't overflowed. */
- if (!CONSTANT_CLASS_P (op0) || TREE_OVERFLOW_P (op0))
- return result;
- op1 = cp_fully_fold (op1);
- if (!CONSTANT_CLASS_P (op1) || TREE_OVERFLOW_P (op1))
+ if (!processing_template_decl)
+ {
+ op0 = cp_fully_fold (op0);
+ /* Only consider the second argument if the first isn't overflowed. */
+ if (!CONSTANT_CLASS_P (op0) || TREE_OVERFLOW_P (op0))
+ return result;
+ op1 = cp_fully_fold (op1);
+ if (!CONSTANT_CLASS_P (op1) || TREE_OVERFLOW_P (op1))
+ return result;
+ }
+ else if (!CONSTANT_CLASS_P (op0) || !CONSTANT_CLASS_P (op1)
+ || TREE_OVERFLOW_P (op0) || TREE_OVERFLOW_P (op1))
return result;
- }
- else if (!CONSTANT_CLASS_P (op0) || !CONSTANT_CLASS_P (op1)
- || TREE_OVERFLOW_P (op0) || TREE_OVERFLOW_P (op1))
- return result;
- result_ovl = fold_build2 (resultcode, build_type, op0, op1);
- if (TREE_OVERFLOW_P (result_ovl))
- overflow_warning (location, result_ovl);
+ tree result_ovl = fold_build2 (resultcode, build_type, op0, op1);
+ if (TREE_OVERFLOW_P (result_ovl))
+ overflow_warning (location, result_ovl);
+ }
return result;
}
|| type_dependent_expression_p (arg1)
|| type_dependent_expression_p (arg2))
return build_min_nt_loc (loc, VEC_PERM_EXPR, arg0, arg1, arg2);
- arg0 = build_non_dependent_expr (arg0);
- if (arg1)
- arg1 = build_non_dependent_expr (arg1);
- arg2 = build_non_dependent_expr (arg2);
}
tree exp = c_build_vec_perm_expr (loc, arg0, arg1, arg2, complain & tf_error);
if (processing_template_decl && exp != error_mark_node)
if (processing_template_decl)
{
for (unsigned i = 0; i < args->length (); ++i)
- if (type_dependent_expression_p ((*args)[i]))
+ if (i <= 1
+ ? type_dependent_expression_p ((*args)[i])
+ : instantiation_dependent_expression_p ((*args)[i]))
{
tree exp = build_min_nt_call_vec (NULL, args);
CALL_EXPR_IFN (exp) = IFN_SHUFFLEVECTOR;
return exp;
}
- arg0 = build_non_dependent_expr (arg0);
- arg1 = build_non_dependent_expr (arg1);
- /* ??? Nothing needed for the index arguments? */
}
auto_vec<tree, 16> mask;
for (unsigned i = 2; i < args->length (); ++i)
{
- tree idx = maybe_constant_value ((*args)[i]);
+ tree idx = fold_non_dependent_expr ((*args)[i], complain);
mask.safe_push (idx);
}
tree exp = c_build_shufflevector (loc, arg0, arg1, mask, complain & tf_error);
TREE_TYPE (e) = build_dependent_operator_type (lookups, code, false);
return e;
}
-
- xarg = build_non_dependent_expr (xarg);
}
exp = NULL_TREE;
{
if (type_dependent_expression_p (arg))
return build_min_nt_loc (loc, ADDRESSOF_EXPR, arg, NULL_TREE);
-
- arg = build_non_dependent_expr (arg);
}
tree exp = cp_build_addr_expr_strict (arg, complain);
return error_mark_node;
}
+ /* Forming a pointer-to-member is a use of non-pure-virtual fns. */
+ if (TREE_CODE (t) == FUNCTION_DECL
+ && !DECL_PURE_VIRTUAL_P (t)
+ && !mark_used (t, complain) && !(complain & tf_error))
+ return error_mark_node;
+
type = build_ptrmem_type (context_for_name_lookup (t),
TREE_TYPE (t));
t = make_ptrmem_cst (type, t);
so we can just form an ADDR_EXPR with the correct type. */
if (processing_template_decl || TREE_CODE (arg) != COMPONENT_REF)
{
- tree stripped_arg = tree_strip_any_location_wrapper (arg);
- if (TREE_CODE (stripped_arg) == FUNCTION_DECL
- && !mark_used (stripped_arg, complain) && !(complain & tf_error))
+ if (!mark_single_function (arg, complain))
return error_mark_node;
val = build_address (arg);
if (TREE_CODE (arg) == OFFSET_REF)
complain);
}
- /* For addresses of immediate functions ensure we have EXPR_LOCATION
- set for possible later diagnostics. */
+ /* Ensure we have EXPR_LOCATION set for possible later diagnostics. */
if (TREE_CODE (val) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL
- && DECL_IMMEDIATE_FUNCTION_P (TREE_OPERAND (val, 0)))
+ && TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL)
SET_EXPR_LOCATION (val, input_location);
return val;
tree arg = xarg;
location_t location = cp_expr_loc_or_input_loc (arg);
tree argtype = 0;
+ tree eptype = NULL_TREE;
const char *errstring = NULL;
tree val;
const char *invalid_op_diag;
return error_mark_node;
}
+ if (TREE_CODE (arg) == EXCESS_PRECISION_EXPR)
+ {
+ eptype = TREE_TYPE (arg);
+ arg = TREE_OPERAND (arg, 0);
+ }
+
switch (code)
{
case UNARY_PLUS_EXPR:
build_zero_cst (TREE_TYPE (arg)), complain);
arg = perform_implicit_conversion (boolean_type_node, arg,
complain);
- val = invert_truthvalue_loc (location, arg);
if (arg != error_mark_node)
- return val;
+ {
+ if (processing_template_decl)
+ return build1_loc (location, TRUTH_NOT_EXPR, boolean_type_node, arg);
+ val = invert_truthvalue_loc (location, arg);
+ if (obvalue_p (val))
+ val = non_lvalue_loc (location, val);
+ return val;
+ }
errstring = _("in argument to unary !");
break;
case REALPART_EXPR:
case IMAGPART_EXPR:
- arg = build_real_imag_expr (input_location, code, arg);
- return arg;
+ val = build_real_imag_expr (input_location, code, arg);
+ if (eptype && TREE_CODE (eptype) == COMPLEX_EXPR)
+ val = build1_loc (input_location, EXCESS_PRECISION_EXPR,
+ TREE_TYPE (eptype), val);
+ return val;
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
val = unary_complex_lvalue (code, arg);
if (val != 0)
- return val;
+ goto return_build_unary_op;
arg = mark_lvalue_use (arg);
real = cp_build_unary_op (code, real, true, complain);
if (real == error_mark_node || imag == error_mark_node)
return error_mark_node;
- return build2 (COMPLEX_EXPR, TREE_TYPE (arg),
- real, imag);
+ val = build2 (COMPLEX_EXPR, TREE_TYPE (arg), real, imag);
+ goto return_build_unary_op;
}
/* Report invalid types. */
/* [depr.volatile.type] "Postfix ++ and -- expressions and
prefix ++ and -- expressions of volatile-qualified arithmetic
and pointer types are deprecated." */
- if (TREE_THIS_VOLATILE (arg) || CP_TYPE_VOLATILE_P (TREE_TYPE (arg)))
+ if ((TREE_THIS_VOLATILE (arg) || CP_TYPE_VOLATILE_P (TREE_TYPE (arg)))
+ && (complain & tf_warning))
warning_at (location, OPT_Wvolatile,
"%qs expression of %<volatile%>-qualified type is "
"deprecated",
return error_mark_node;
}
/* Otherwise, [depr.incr.bool] says this is deprecated. */
- else
+ else if (complain & tf_warning)
warning_at (location, OPT_Wdeprecated,
"use of an operand of type %qT "
"in %<operator++%> is deprecated",
val = build2 (code, TREE_TYPE (arg), arg, inc);
TREE_SIDE_EFFECTS (val) = 1;
- return val;
+ goto return_build_unary_op;
}
case ADDR_EXPR:
{
if (argtype == 0)
argtype = TREE_TYPE (arg);
- return build1 (code, argtype, arg);
+ val = build1 (code, argtype, arg);
+ return_build_unary_op:
+ if (eptype)
+ val = build1 (EXCESS_PRECISION_EXPR, eptype, val);
+ return val;
}
if (complain & tf_error)
|| (op1 && type_dependent_expression_p (op1))
|| type_dependent_expression_p (op2))
return build_min_nt_loc (loc, COND_EXPR, ifexp, op1, op2);
- ifexp = build_non_dependent_expr (ifexp);
- if (op1)
- op1 = build_non_dependent_expr (op1);
- op2 = build_non_dependent_expr (op2);
}
expr = build_conditional_expr (loc, ifexp, op1, op2, complain);
= build_dependent_operator_type (lookups, COMPOUND_EXPR, false);
return result;
}
- op1 = build_non_dependent_expr (op1);
- op2 = build_non_dependent_expr (op2);
}
result = build_new_op (loc, COMPOUND_EXPR, LOOKUP_NORMAL, op1, op2,
if (lhs == error_mark_node || rhs == error_mark_node)
return error_mark_node;
+ if (TREE_CODE (lhs) == EXCESS_PRECISION_EXPR)
+ lhs = TREE_OPERAND (lhs, 0);
+ tree eptype = NULL_TREE;
+ if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
+ {
+ eptype = TREE_TYPE (rhs);
+ rhs = TREE_OPERAND (rhs, 0);
+ }
+
if (TREE_CODE (rhs) == TARGET_EXPR)
{
/* If the rhs is a TARGET_EXPR, then build the compound
init = build2 (COMPOUND_EXPR, TREE_TYPE (init), lhs, init);
TREE_OPERAND (rhs, 1) = init;
+ if (eptype)
+ rhs = build1 (EXCESS_PRECISION_EXPR, eptype, rhs);
return rhs;
}
return error_mark_node;
}
- return build2 (COMPOUND_EXPR, TREE_TYPE (rhs), lhs, rhs);
+ tree ret = build2 (COMPOUND_EXPR, TREE_TYPE (rhs), lhs, rhs);
+ if (eptype)
+ ret = build1 (EXCESS_PRECISION_EXPR, eptype, ret);
+ return ret;
}
/* Issue a diagnostic message if casting from SRC_TYPE to DEST_TYPE
if (warn_useless_cast
&& complain & tf_warning)
{
- if ((TYPE_REF_P (type)
- && (TYPE_REF_IS_RVALUE (type)
- ? xvalue_p (expr) : lvalue_p (expr))
- && same_type_p (TREE_TYPE (expr), TREE_TYPE (type)))
- || same_type_p (TREE_TYPE (expr), type))
+ if (TYPE_REF_P (type)
+ ? ((TYPE_REF_IS_RVALUE (type)
+ ? xvalue_p (expr) : lvalue_p (expr))
+ && same_type_p (TREE_TYPE (expr), TREE_TYPE (type)))
+ /* Don't warn when converting a class object to a non-reference type,
+ because that's a common way to create a temporary. */
+ : (!glvalue_p (expr) && same_type_p (TREE_TYPE (expr), type)))
warning_at (loc, OPT_Wuseless_cast,
"useless cast to type %q#T", type);
}
Any expression can be explicitly converted to type cv void. */
if (VOID_TYPE_P (type))
- return convert_to_void (expr, ICV_CAST, complain);
+ {
+ if (TREE_CODE (expr) == EXCESS_PRECISION_EXPR)
+ expr = TREE_OPERAND (expr, 0);
+ return convert_to_void (expr, ICV_CAST, complain);
+ }
/* [class.abstract]
An abstract class shall not be used ... as the type of an explicit
conversion. */
- if (abstract_virtuals_error_sfinae (ACU_CAST, type, complain))
+ if (abstract_virtuals_error (ACU_CAST, type, complain))
return error_mark_node;
/* [expr.static.cast]
{
if (processing_template_decl)
return expr;
+ if (TREE_CODE (expr) == EXCESS_PRECISION_EXPR)
+ expr = TREE_OPERAND (expr, 0);
+ /* [expr.static.cast]: "If the value is not a bit-field, the result
+ refers to the object or the specified base class subobject thereof;
+ otherwise, the lvalue-to-rvalue conversion is applied to the
+ bit-field and the resulting prvalue is used as the operand of the
+ static_cast." There are no prvalue bit-fields; the l-to-r conversion
+ will give us an object of the underlying type of the bit-field. */
+ expr = decay_conversion (expr, complain);
return ocp_convert (type, expr, CONV_C_CAST, LOOKUP_NORMAL, complain);
}
protected_set_expr_location (result, loc);
return result;
}
- else if (processing_template_decl)
- expr = build_non_dependent_expr (expr);
/* build_c_cast puts on a NOP_EXPR to make the result not an lvalue.
Strip such NOP_EXPRs if VALUE is being used in non-lvalue context. */
maybe_warn_about_useless_cast (loc, type, value, complain);
maybe_warn_about_cast_ignoring_quals (loc, type, complain);
}
+ else if (complain & tf_error)
+ build_const_cast_1 (loc, type, value, tf_error, &valid_p);
return result;
}
to succeed. */
if (!same_type_p (non_reference (type), non_reference (result_type)))
{
- result = build_const_cast_1 (loc, type, result, false, &valid_p);
+ result = build_const_cast_1 (loc, type, result, tf_none, &valid_p);
gcc_assert (valid_p);
}
return result;
return error_mark_node;
}
-\f
+
+/* Warn when a value is moved to itself with std::move. LHS is the target,
+ RHS may be the std::move call, and LOC is the location of the whole
+ assignment. */
+
+static void
+maybe_warn_self_move (location_t loc, tree lhs, tree rhs)
+{
+ if (!warn_self_move)
+ return;
+
+ /* C++98 doesn't know move. */
+ if (cxx_dialect < cxx11)
+ return;
+
+ if (processing_template_decl)
+ return;
+
+ if (!REFERENCE_REF_P (rhs)
+ || TREE_CODE (TREE_OPERAND (rhs, 0)) != CALL_EXPR)
+ return;
+ tree fn = TREE_OPERAND (rhs, 0);
+ if (!is_std_move_p (fn))
+ return;
+
+ /* Just a little helper to strip * and various NOPs. */
+ auto extract_op = [] (tree &op) {
+ STRIP_NOPS (op);
+ while (INDIRECT_REF_P (op))
+ op = TREE_OPERAND (op, 0);
+ op = maybe_undo_parenthesized_ref (op);
+ STRIP_ANY_LOCATION_WRAPPER (op);
+ };
+
+ tree arg = CALL_EXPR_ARG (fn, 0);
+ extract_op (arg);
+ if (TREE_CODE (arg) == ADDR_EXPR)
+ arg = TREE_OPERAND (arg, 0);
+ tree type = TREE_TYPE (lhs);
+ tree orig_lhs = lhs;
+ extract_op (lhs);
+ if (cp_tree_equal (lhs, arg))
+ {
+ auto_diagnostic_group d;
+ if (warning_at (loc, OPT_Wself_move,
+ "moving %qE of type %qT to itself", orig_lhs, type))
+ inform (loc, "remove %<std::move%> call");
+ }
+}
+
/* For use from the C common bits. */
tree
build_modify_expr (location_t location,
if (! same_type_p (TREE_TYPE (rhs), lhstype))
/* Call convert to generate an error; see PR 11063. */
rhs = convert (lhstype, rhs);
- result = build2 (INIT_EXPR, lhstype, lhs, rhs);
+ result = cp_build_init_expr (lhs, rhs);
TREE_SIDE_EFFECTS (result) = 1;
goto ret;
}
}
else
{
- lhs = require_complete_type_sfinae (lhs, complain);
+ lhs = require_complete_type (lhs, complain);
if (lhs == error_mark_node)
return error_mark_node;
if (modifycode == NOP_EXPR)
{
+ maybe_warn_self_move (loc, lhs, rhs);
+
if (c_dialect_objc ())
{
result = objc_maybe_build_modify_expr (lhs, rhs);
&& MAYBE_CLASS_TYPE_P (TREE_TYPE (lhstype)))
|| MAYBE_CLASS_TYPE_P (lhstype)));
- /* An expression of the form E1 op= E2. [expr.ass] says:
- "Such expressions are deprecated if E1 has volatile-qualified
- type." We warn here rather than in cp_genericize_r because
- for compound assignments we are supposed to warn even if the
- assignment is a discarded-value expression. */
- if (TREE_THIS_VOLATILE (lhs) || CP_TYPE_VOLATILE_P (lhstype))
- warning_at (loc, OPT_Wvolatile,
- "compound assignment with %<volatile%>-qualified left "
- "operand is deprecated");
/* Preevaluate the RHS to make sure its evaluation is complete
before the lvalue-to-rvalue conversion of the LHS:
}
/* Allow array assignment in compiler-generated code. */
+ else if (DECL_P (lhs) && DECL_ARTIFICIAL (lhs))
+ /* OK, used by coroutines (co-await-initlist1.C). */;
else if (!current_function_decl
|| !DECL_DEFAULTED_FN (current_function_decl))
{
result = build2_loc (loc, modifycode == NOP_EXPR ? MODIFY_EXPR : INIT_EXPR,
lhstype, lhs, newrhs);
+ if (modifycode == INIT_EXPR)
+ set_target_expr_eliding (newrhs);
TREE_SIDE_EFFECTS (result) = 1;
if (!plain_assign)
if (lhs == error_mark_node || rhs == error_mark_node)
return cp_expr (error_mark_node, loc);
+ tree op = build_min (modifycode, void_type_node, NULL_TREE, NULL_TREE);
+
if (processing_template_decl)
{
- if (modifycode == NOP_EXPR
- || type_dependent_expression_p (lhs)
+ if (type_dependent_expression_p (lhs)
|| type_dependent_expression_p (rhs))
{
- tree op = build_min_nt_loc (loc, modifycode, NULL_TREE, NULL_TREE);
tree rval = build_min_nt_loc (loc, MODOP_EXPR, lhs, op, rhs);
if (modifycode != NOP_EXPR)
TREE_TYPE (rval)
= build_dependent_operator_type (lookups, modifycode, true);
return rval;
}
-
- lhs = build_non_dependent_expr (lhs);
- rhs = build_non_dependent_expr (rhs);
}
- if (modifycode != NOP_EXPR)
+ tree rval;
+ if (modifycode == NOP_EXPR)
+ rval = cp_build_modify_expr (loc, lhs, modifycode, rhs, complain);
+ else
+ rval = build_new_op (loc, MODIFY_EXPR, LOOKUP_NORMAL,
+ lhs, rhs, op, lookups, &overload, complain);
+ if (rval == error_mark_node)
+ return error_mark_node;
+ if (processing_template_decl)
{
- tree op = build_nt (modifycode, NULL_TREE, NULL_TREE);
- tree rval = build_new_op (loc, MODIFY_EXPR, LOOKUP_NORMAL,
- lhs, rhs, op, lookups, &overload, complain);
- if (rval)
- {
- if (rval == error_mark_node)
- return rval;
- suppress_warning (rval /* What warning? */);
- if (processing_template_decl)
- {
- if (overload != NULL_TREE)
- return (build_min_non_dep_op_overload
- (MODIFY_EXPR, rval, overload, orig_lhs, orig_rhs));
+ if (overload != NULL_TREE)
+ return (build_min_non_dep_op_overload
+ (MODIFY_EXPR, rval, overload, orig_lhs, orig_rhs));
- return (build_min_non_dep
- (MODOP_EXPR, rval, orig_lhs, op, orig_rhs));
- }
- return rval;
- }
+ return (build_min_non_dep
+ (MODOP_EXPR, rval, orig_lhs, op, orig_rhs));
}
- return cp_build_modify_expr (loc, lhs, modifycode, rhs, complain);
+ return rval;
}
/* Helper function for get_delta_difference which assumes FROM is a base
if (n == error_mark_node)
return error_mark_node;
+ STRIP_ANY_LOCATION_WRAPPER (pfn);
+
/* We don't have to do any conversion to convert a
pointer-to-member to its own type. But, we don't want to
just return a PTRMEM_CST if there's an explicit cast; that
cast should make the expression an invalid template argument. */
- if (TREE_CODE (pfn) != PTRMEM_CST)
- {
- if (same_type_p (to_type, pfn_type))
- return pfn;
- else if (integer_zerop (n) && TREE_CODE (pfn) != CONSTRUCTOR)
- return build_reinterpret_cast (input_location, to_type, pfn,
- complain);
- }
+ if (TREE_CODE (pfn) != PTRMEM_CST
+ && same_type_p (to_type, pfn_type))
+ return pfn;
if (TREE_SIDE_EFFECTS (pfn))
pfn = save_expr (pfn);
{
range_label_for_type_mismatch label (rhstype, type);
gcc_rich_location richloc (rhs_loc, has_loc ? &label : NULL);
+ auto_diagnostic_group d;
+
switch (errtype)
{
case ICR_DEFAULT_ARGUMENT:
gcc_unreachable();
}
}
+
+ /* See if we can be more helpful. */
+ maybe_show_nonconverting_candidate (type, rhstype, rhs, flags);
+
if (TYPE_PTR_P (rhstype)
&& TYPE_PTR_P (type)
&& CLASS_TYPE_P (TREE_TYPE (rhstype))
}
}
- /* If -Wparentheses, warn about a = b = c when a has type bool and b
- does not. */
- if (warn_parentheses
- && TREE_CODE (type) == BOOLEAN_TYPE
- && TREE_CODE (rhs) == MODIFY_EXPR
- && !warning_suppressed_p (rhs, OPT_Wparentheses)
- && TREE_CODE (TREE_TYPE (rhs)) != BOOLEAN_TYPE
- && (complain & tf_warning)
- && warning_at (rhs_loc, OPT_Wparentheses,
- "suggest parentheses around assignment used as "
- "truth value"))
- suppress_warning (rhs, OPT_Wparentheses);
+ if (TREE_CODE (type) == BOOLEAN_TYPE)
+ maybe_warn_unparenthesized_assignment (rhs, /*nested_p=*/true, complain);
if (complain & tf_warning)
- warn_for_address_or_pointer_of_packed_member (type, rhs);
+ warn_for_address_of_packed_member (type, rhs);
return perform_implicit_conversion_flags (strip_top_quals (type), rhs,
complain, flags);
}
if (exp != 0)
- exp = require_complete_type_sfinae (exp, complain);
+ exp = require_complete_type (exp, complain);
if (exp == error_mark_node)
return error_mark_node;
if (TYPE_REF_P (valtype))
warning_at (loc, OPT_Wreturn_local_addr,
"returning reference to temporary");
+ else if (TYPE_PTR_P (valtype))
+ warning_at (loc, OPT_Wreturn_local_addr,
+ "returning pointer to temporary");
else if (is_std_init_list (valtype))
warning_at (loc, OPT_Winit_list_lifetime,
"returning temporary %<initializer_list%> does not extend "
/* The cv-unqualified type of the returned value must be the
same as the cv-unqualified return type of the
function. */
- && same_type_p ((TYPE_MAIN_VARIANT (TREE_TYPE (retval))),
- (TYPE_MAIN_VARIANT (functype)))
+ && same_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (retval)),
+ TYPE_MAIN_VARIANT (functype))
/* And the returned value must be non-volatile. */
&& !TYPE_VOLATILE (TREE_TYPE (retval)));
}
+/* True if we would like to perform NRVO, i.e. can_do_nrvo_p is true and we
+ would otherwise return in memory. */
+
+static bool
+want_nrvo_p (tree retval, tree functype)
+{
+ return (can_do_nrvo_p (retval, functype)
+ && aggregate_value_p (functype, current_function_decl));
+}
+
+/* Like can_do_nrvo_p, but we check if we're trying to move a class
+ prvalue. */
+
+static bool
+can_elide_copy_prvalue_p (tree retval, tree functype)
+{
+ if (functype == error_mark_node)
+ return false;
+ if (retval)
+ STRIP_ANY_LOCATION_WRAPPER (retval);
+ return (retval != NULL_TREE
+ && !glvalue_p (retval)
+ && same_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (retval)),
+ TYPE_MAIN_VARIANT (functype))
+ && !TYPE_VOLATILE (TREE_TYPE (retval)));
+}
+
/* If we should treat RETVAL, an expression being returned, as if it were
designated by an rvalue, returns it adjusted accordingly; otherwise, returns
NULL_TREE. See [class.copy.elision]. RETURN_P is true if this is a return
if (DECL_CONTEXT (retval) != current_function_decl)
return NULL_TREE;
if (return_p)
- return set_implicit_rvalue_p (move (expr));
+ {
+ expr = move (expr);
+ if (expr == error_mark_node)
+ return NULL_TREE;
+ return set_implicit_rvalue_p (expr);
+ }
/* if the operand of a throw-expression is a (possibly parenthesized)
id-expression that names an implicitly movable entity whose scope does not
}
}
-/* Warn about wrong usage of std::move in a return statement. RETVAL
- is the expression we are returning; FUNCTYPE is the type the function
- is declared to return. */
+/* Warn about dubious usage of std::move (in a return statement, if RETURN_P
+ is true). EXPR is the std::move expression; TYPE is the type of the object
+ being initialized. */
-static void
-maybe_warn_pessimizing_move (tree retval, tree functype)
+void
+maybe_warn_pessimizing_move (tree expr, tree type, bool return_p)
{
if (!(warn_pessimizing_move || warn_redundant_move))
return;
- location_t loc = cp_expr_loc_or_input_loc (retval);
+ const location_t loc = cp_expr_loc_or_input_loc (expr);
/* C++98 doesn't know move. */
if (cxx_dialect < cxx11)
return;
/* This is only interesting for class types. */
- if (!CLASS_TYPE_P (functype))
+ if (!CLASS_TYPE_P (type))
+ return;
+
+ bool wrapped_p = false;
+ /* A a = std::move (A()); */
+ if (TREE_CODE (expr) == TREE_LIST)
+ {
+ if (list_length (expr) == 1)
+ {
+ expr = TREE_VALUE (expr);
+ wrapped_p = true;
+ }
+ else
+ return;
+ }
+ /* A a = {std::move (A())};
+ A a{std::move (A())}; */
+ else if (TREE_CODE (expr) == CONSTRUCTOR)
+ {
+ if (CONSTRUCTOR_NELTS (expr) == 1)
+ {
+ expr = CONSTRUCTOR_ELT (expr, 0)->value;
+ wrapped_p = true;
+ }
+ else
+ return;
+ }
+
+ /* First, check if this is a call to std::move. */
+ if (!REFERENCE_REF_P (expr)
+ || TREE_CODE (TREE_OPERAND (expr, 0)) != CALL_EXPR)
+ return;
+ tree fn = TREE_OPERAND (expr, 0);
+ if (!is_std_move_p (fn))
+ return;
+ tree arg = CALL_EXPR_ARG (fn, 0);
+ if (TREE_CODE (arg) != NOP_EXPR)
return;
+ /* If we're looking at *std::move<T&> ((T &) &arg), do the pessimizing N/RVO
+ and implicitly-movable warnings. */
+ if (TREE_CODE (TREE_OPERAND (arg, 0)) == ADDR_EXPR)
+ {
+ arg = TREE_OPERAND (arg, 0);
+ arg = TREE_OPERAND (arg, 0);
+ arg = convert_from_reference (arg);
+ if (can_elide_copy_prvalue_p (arg, type))
+ {
+ auto_diagnostic_group d;
+ if (warning_at (loc, OPT_Wpessimizing_move,
+ "moving a temporary object prevents copy elision"))
+ inform (loc, "remove %<std::move%> call");
+ }
+ /* The rest of the warnings is only relevant for when we are returning
+ from a function. */
+ if (!return_p)
+ return;
- /* We're looking for *std::move<T&> ((T &) &arg). */
- if (REFERENCE_REF_P (retval)
- && TREE_CODE (TREE_OPERAND (retval, 0)) == CALL_EXPR)
- {
- tree fn = TREE_OPERAND (retval, 0);
- if (is_std_move_p (fn))
- {
- tree arg = CALL_EXPR_ARG (fn, 0);
- tree moved;
- if (TREE_CODE (arg) != NOP_EXPR)
- return;
- arg = TREE_OPERAND (arg, 0);
- if (TREE_CODE (arg) != ADDR_EXPR)
- return;
- arg = TREE_OPERAND (arg, 0);
- arg = convert_from_reference (arg);
- /* Warn if we could do copy elision were it not for the move. */
- if (can_do_nrvo_p (arg, functype))
+ tree moved;
+ /* Warn if we could do copy elision were it not for the move. */
+ if (can_do_nrvo_p (arg, type))
+ {
+ auto_diagnostic_group d;
+ if (!warning_suppressed_p (expr, OPT_Wpessimizing_move)
+ && warning_at (loc, OPT_Wpessimizing_move,
+ "moving a local object in a return statement "
+ "prevents copy elision"))
+ inform (loc, "remove %<std::move%> call");
+ }
+ /* Warn if the move is redundant. It is redundant when we would
+ do maybe-rvalue overload resolution even without std::move. */
+ else if (warn_redundant_move
+ /* This doesn't apply for return {std::move (t)};. */
+ && !wrapped_p
+ && !warning_suppressed_p (expr, OPT_Wredundant_move)
+ && (moved = treat_lvalue_as_rvalue_p (arg, /*return*/true)))
+ {
+ /* Make sure that overload resolution would actually succeed
+ if we removed the std::move call. */
+ tree t = convert_for_initialization (NULL_TREE, type,
+ moved,
+ (LOOKUP_NORMAL
+ | LOOKUP_ONLYCONVERTING),
+ ICR_RETURN, NULL_TREE, 0,
+ tf_none);
+ /* If this worked, implicit rvalue would work, so the call to
+ std::move is redundant. */
+ if (t != error_mark_node)
{
auto_diagnostic_group d;
- if (warning_at (loc, OPT_Wpessimizing_move,
- "moving a local object in a return statement "
- "prevents copy elision"))
+ if (warning_at (loc, OPT_Wredundant_move,
+ "redundant move in return statement"))
inform (loc, "remove %<std::move%> call");
}
- /* Warn if the move is redundant. It is redundant when we would
- do maybe-rvalue overload resolution even without std::move. */
- else if (warn_redundant_move
- && (moved = treat_lvalue_as_rvalue_p (arg, /*return*/true)))
- {
- /* Make sure that the overload resolution would actually succeed
- if we removed the std::move call. */
- tree t = convert_for_initialization (NULL_TREE, functype,
- moved,
- (LOOKUP_NORMAL
- | LOOKUP_ONLYCONVERTING
- | LOOKUP_PREFER_RVALUE),
- ICR_RETURN, NULL_TREE, 0,
- tf_none);
- /* If this worked, implicit rvalue would work, so the call to
- std::move is redundant. */
- if (t != error_mark_node)
- {
- auto_diagnostic_group d;
- if (warning_at (loc, OPT_Wredundant_move,
- "redundant move in return statement"))
- inform (loc, "remove %<std::move%> call");
- }
- }
}
+ }
+ /* Also try to warn about redundant std::move in code such as
+ T f (const T& t)
+ {
+ return std::move(t);
+ }
+ for which EXPR will be something like
+ *std::move<const T&> ((const struct T &) (const struct T *) t)
+ and where the std::move does nothing if T does not have a T(const T&&)
+ constructor, because the argument is const. It will not use T(T&&)
+ because that would mean losing the const. */
+ else if (warn_redundant_move
+ && !warning_suppressed_p (expr, OPT_Wredundant_move)
+ && TYPE_REF_P (TREE_TYPE (arg))
+ && CP_TYPE_CONST_P (TREE_TYPE (TREE_TYPE (arg))))
+ {
+ tree rtype = TREE_TYPE (TREE_TYPE (arg));
+ if (!same_type_ignoring_top_level_qualifiers_p (rtype, type))
+ return;
+ /* Check for the unlikely case there's T(const T&&) (we don't care if
+ it's deleted). */
+ for (tree fn : ovl_range (CLASSTYPE_CONSTRUCTORS (rtype)))
+ if (move_fn_p (fn))
+ {
+ tree t = TREE_VALUE (FUNCTION_FIRST_USER_PARMTYPE (fn));
+ if (UNLIKELY (CP_TYPE_CONST_P (TREE_TYPE (t))))
+ return;
+ }
+ auto_diagnostic_group d;
+ if (return_p
+ ? warning_at (loc, OPT_Wredundant_move,
+ "redundant move in return statement")
+ : warning_at (loc, OPT_Wredundant_move,
+ "redundant move in initialization"))
+ inform (loc, "remove %<std::move%> call");
}
}
change RETVAL into the function return type, and to assign it to
the DECL_RESULT for the function. Set *NO_WARNING to true if
code reaches end of non-void function warning shouldn't be issued
- on this RETURN_EXPR. */
+ on this RETURN_EXPR. Set *DANGLING to true if code returns the
+ address of a local variable. */
tree
-check_return_expr (tree retval, bool *no_warning)
+check_return_expr (tree retval, bool *no_warning, bool *dangling)
{
tree result;
/* The type actually returned by the function. */
location_t loc = cp_expr_loc_or_input_loc (retval);
*no_warning = false;
+ *dangling = false;
/* A `volatile' function is one that isn't supposed to return, ever.
(This is a G++ extension, used to get better code for functions
{
if (retval)
error_at (loc, "returning a value from a destructor");
- return NULL_TREE;
+
+ if (targetm.cxx.cdtor_returns_this () && !processing_template_decl)
+ retval = current_class_ptr;
+ else
+ return NULL_TREE;
}
else if (DECL_CONSTRUCTOR_P (current_function_decl))
{
else if (retval)
/* You can't return a value from a constructor. */
error_at (loc, "returning a value from a constructor");
- return NULL_TREE;
+
+ if (targetm.cxx.cdtor_returns_this () && !processing_template_decl)
+ retval = current_class_ptr;
+ else
+ return NULL_TREE;
}
const tree saved_retval = retval;
So, if this is a value-returning function that always returns the same
local variable, remember it.
- It might be nice to be more flexible, and choose the first suitable
- variable even if the function sometimes returns something else, but
- then we run the risk of clobbering the variable we chose if the other
- returned expression uses the chosen variable somehow. And people expect
- this restriction, anyway. (jason 2000-11-19)
+ We choose the first suitable variable even if the function sometimes
+ returns something else, but only if the variable is out of scope at the
+ other return sites, or else we run the risk of clobbering the variable we
+ chose if the other returned expression uses the chosen variable somehow.
+
+ We don't currently do this if the first return is a non-variable, as it
+ would be complicated to determine whether an NRV selected later was in
+ scope at the point of the earlier return. We also don't currently support
+ multiple variables with non-overlapping scopes (53637).
See finish_function and finalize_nrv for the rest of this optimization. */
tree bare_retval = NULL_TREE;
bare_retval = tree_strip_any_location_wrapper (retval);
}
- bool named_return_value_okay_p = can_do_nrvo_p (bare_retval, functype);
- if (fn_returns_value_p && flag_elide_constructors)
+ bool named_return_value_okay_p = want_nrvo_p (bare_retval, functype);
+ if (fn_returns_value_p && flag_elide_constructors
+ && current_function_return_value != bare_retval)
{
if (named_return_value_okay_p
- && (current_function_return_value == NULL_TREE
- || current_function_return_value == bare_retval))
+ && current_function_return_value == NULL_TREE)
current_function_return_value = bare_retval;
+ else if (current_function_return_value
+ && VAR_P (current_function_return_value)
+ && DECL_NAME (current_function_return_value)
+ && !decl_in_scope_p (current_function_return_value))
+ {
+ /* The earlier NRV is out of scope at this point, so it's safe to
+ leave it alone; the current return can't refer to it. */;
+ if (named_return_value_okay_p
+ && !warning_suppressed_p (current_function_decl, OPT_Wnrvo))
+ {
+ warning (OPT_Wnrvo, "not eliding copy on return from %qD",
+ bare_retval);
+ suppress_warning (current_function_decl, OPT_Wnrvo);
+ }
+ }
else
- current_function_return_value = error_mark_node;
+ {
+ if ((named_return_value_okay_p
+ || (current_function_return_value
+ && current_function_return_value != error_mark_node))
+ && !warning_suppressed_p (current_function_decl, OPT_Wnrvo))
+ {
+ warning (OPT_Wnrvo, "not eliding copy on return in %qD",
+ current_function_decl);
+ suppress_warning (current_function_decl, OPT_Wnrvo);
+ }
+ current_function_return_value = error_mark_node;
+ }
}
/* We don't need to do any conversions when there's nothing being
return NULL_TREE;
if (!named_return_value_okay_p)
- maybe_warn_pessimizing_move (retval, functype);
+ maybe_warn_pessimizing_move (retval, functype, /*return_p*/true);
/* Do any required conversions. */
if (bare_retval == result || DECL_CONSTRUCTOR_P (current_function_decl))
if (VOID_TYPE_P (functype))
return error_mark_node;
- if (processing_template_decl)
- retval = build_non_dependent_expr (retval);
-
/* Under C++11 [12.8/32 class.copy], a returned lvalue is sometimes
treated as an rvalue for the purposes of overload resolution to
favor move constructors over copy constructors.
the conditions for the named return value optimization. */
bool converted = false;
tree moved;
- /* This is only interesting for class type. */
- if (CLASS_TYPE_P (functype)
- && (moved = treat_lvalue_as_rvalue_p (retval, /*return*/true)))
- {
- if (cxx_dialect < cxx20)
- {
- moved = convert_for_initialization
- (NULL_TREE, functype, moved, flags|LOOKUP_PREFER_RVALUE,
- ICR_RETURN, NULL_TREE, 0, tf_none);
- if (moved != error_mark_node)
- {
- retval = moved;
- converted = true;
- }
- }
- else
- /* In C++20 we just treat the return value as an rvalue that
- can bind to lvalue refs. */
- retval = moved;
- }
+ /* Until C++23, this was only interesting for class type, but in C++23,
+ we should do the below when we're converting rom/to a class/reference
+ (a non-scalar type). */
+ if ((cxx_dialect < cxx23
+ ? CLASS_TYPE_P (functype)
+ : !SCALAR_TYPE_P (functype) || !SCALAR_TYPE_P (TREE_TYPE (retval)))
+ && (moved = treat_lvalue_as_rvalue_p (retval, /*return*/true)))
+ /* In C++20 and earlier we treat the return value as an rvalue
+ that can bind to lvalue refs. In C++23, such an expression is just
+ an xvalue (see reference_binding). */
+ retval = moved;
/* The call in a (lambda) thunk needs no conversions. */
if (TREE_CODE (retval) == CALL_EXPR
else if (!processing_template_decl
&& maybe_warn_about_returning_address_of_local (retval, loc)
&& INDIRECT_TYPE_P (valtype))
- retval = build2 (COMPOUND_EXPR, TREE_TYPE (retval), retval,
- build_zero_cst (TREE_TYPE (retval)));
+ *dangling = true;
}
+ /* A naive attempt to reduce the number of -Wdangling-reference false
+ positives: if we know that this function can return a variable with
+ static storage duration rather than one of its parameters, suppress
+ the warning. */
+ if (warn_dangling_reference
+ && TYPE_REF_P (functype)
+ && bare_retval
+ && VAR_P (bare_retval)
+ && TREE_STATIC (bare_retval))
+ suppress_warning (current_function_decl, OPT_Wdangling_reference);
+
if (processing_template_decl)
return saved_retval;
/* Actually copy the value returned into the appropriate location. */
if (retval && retval != result)
- retval = build2 (INIT_EXPR, TREE_TYPE (result), result, retval);
+ {
+ /* If there's a postcondition for a scalar return value, wrap
+ retval in a call to the postcondition function. */
+ if (tree post = apply_postcondition_to_return (retval))
+ retval = post;
+ retval = cp_build_init_expr (result, retval);
+ }
+
+ if (current_function_return_value == bare_retval)
+ INIT_EXPR_NRV_P (retval) = true;
if (tree set = maybe_set_retval_sentinel ())
retval = build2 (COMPOUND_EXPR, void_type_node, retval, set);
+ /* If there's a postcondition for an aggregate return value, call the
+ postcondition function after the return object is initialized. */
+ if (tree post = apply_postcondition_to_return (result))
+ retval = build2 (COMPOUND_EXPR, void_type_node, retval, post);
+
return retval;
}