+2019-10-16 Richard Biener <rguenther@suse.de>
+
+ Backport from mainline
+ 2019-10-04 Richard Biener <rguenther@suse.de>
+
+ PR lto/91968
+ * tree.c (find_decls_types_r): Do not remove LABEL_DECLs from
+ BLOCK_VARS.
+
+ 2019-09-19 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/91812
+ * tree-ssa-phiprop.c (propagate_with_phi): Do not replace
+ volatile loads.
+
+ 2019-09-17 Richard Biener <rguenther@suse.de>
+
+ PR debug/91772
+ * dwarf2out.c (dwarf2out_late_global_decl): If early dwarf
+ was missing generate locations only once.
+
+ 2019-09-17 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/91790
+ * tree-vect-stmts.c (vectorizable_load): For BB vectorization
+ use the correct DR for setting up realignment.
+
2019-10-14 Will Schmidt <will_schmidt@vnet.ibm.com>
Backport from trunk
+2019-10-16 Richard Biener <rguenther@suse.de>
+
+ Backport from mainline
+ 2019-10-02 Richard Biener <rguenther@suse.de>
+
+ PR c++/91606
+ * decl.c (build_ptrmemfunc_type): Mark pointer-to-member
+ fat pointer structure members as DECL_NONADDRESSABLE_P.
+
2019-10-08 Marek Polacek <polacek@redhat.com>
Backported from mainline
TYPE_PTRMEMFUNC_FLAG (t) = 1;
field = build_decl (input_location, FIELD_DECL, pfn_identifier, type);
+ DECL_NONADDRESSABLE_P (field) = 1;
fields = field;
field = build_decl (input_location, FIELD_DECL, delta_identifier,
delta_type_node);
+ DECL_NONADDRESSABLE_P (field) = 1;
DECL_CHAIN (field) = fields;
fields = field;
{
dw_die_ref die = lookup_decl_die (decl);
- /* We may have to generate early debug late for LTO in case debug
+ /* We may have to generate full debug late for LTO in case debug
was not enabled at compile-time or the target doesn't support
the LTO early debug scheme. */
if (! die && in_lto_p)
- {
- dwarf2out_decl (decl);
- die = lookup_decl_die (decl);
- }
-
- if (die)
+ dwarf2out_decl (decl);
+ else if (die)
{
/* We get called via the symtab code invoking late_global_decl
for symbols that are optimized out.
+2019-10-16 Richard Biener <rguenther@suse.de>
+
+ Backport from mainline
+ 2019-10-02 Richard Biener <rguenther@suse.de>
+
+ PR c++/91606
+ * g++.dg/torture/pr91606.C: New testcase.
+
+ 2019-09-19 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/91812
+ * gcc.dg/torture/pr91812.c: New testcase.
+
2019-10-14 Will Schmidt <will_schmidt@vnet.ibm.com>
Backport from trunk.
--- /dev/null
+/* { dg-do run } */
+/* { dg-additional-options "-fstrict-aliasing" } */
+
+#include <cstdlib>
+#include <array>
+#include <type_traits>
+
+template <typename T1, typename T2>
+struct variant
+{
+ constexpr variant(T1 arg)
+ : f1(arg),
+ index(0)
+ {}
+
+ constexpr variant(T2 arg)
+ : f2(arg),
+ index(1)
+ {}
+
+ union
+ {
+ T1 f1;
+ T2 f2;
+ };
+ std::size_t index = 0;
+};
+
+template <typename T1, typename T2>
+constexpr const T1* get_if(const variant<T1, T2>* v)
+{
+ if (v->index != 0)
+ {
+ return nullptr;
+ }
+ return &v->f1;
+}
+
+template <typename T2, typename T1>
+constexpr const T2* get_if(const variant<T1, T2>* v)
+{
+ if (v->index != 1)
+ {
+ return nullptr;
+ }
+ return &v->f2;
+}
+
+template <typename T, size_t N>
+struct my_array
+{
+ constexpr const T* begin() const
+ {
+ return data;
+ }
+
+ constexpr const T* end() const
+ {
+ return data + N;
+ }
+
+ T data[N];
+};
+
+template <typename ...Ts>
+constexpr auto get_array_of_variants(Ts ...ptrs)
+{
+ return std::array<variant<std::decay_t<Ts>...>, sizeof...(Ts)>{ ptrs... };
+}
+
+template <typename T>
+constexpr auto get_member_functions();
+
+template <typename Member, typename Class>
+constexpr int getFuncId(Member (Class::*memFuncPtr))
+{
+ int idx = 0u;
+ for (auto &anyFunc : get_member_functions<Class>())
+ {
+ if (auto *specificFunc = get_if<Member (Class::*)>(&anyFunc))
+ {
+ if (*specificFunc == memFuncPtr)
+ {
+ return idx;
+ }
+ }
+ ++idx;
+ }
+ std::abort();
+}
+
+struct MyStruct
+{
+ void fun1(int /*a*/) {}
+
+ int fun2(char /*b*/, short /*c*/, bool /*d*/) { return 0; }
+
+};
+
+template <>
+constexpr auto get_member_functions<MyStruct>()
+{
+ return get_array_of_variants(&MyStruct::fun1, &MyStruct::fun2);
+}
+
+int main()
+{
+ return getFuncId(&MyStruct::fun1);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
+/* { dg-options "-fdump-tree-optimized-blocks" } */
+
+unsigned register1;
+unsigned register2;
+
+void busy_wait_for_register (int x)
+{
+ volatile unsigned* ptr;
+ switch(x) {
+ case 0x1111:
+ ptr = ®ister1;
+ break;
+
+ case 0x2222:
+ ptr = ®ister2;
+ break;
+
+ default:
+ return;
+ }
+ while (*ptr) {}
+}
+
+/* { dg-final { scan-tree-dump "loop depth 1" "optimized" } } */
&& (!type
|| types_compatible_p
(TREE_TYPE (gimple_assign_lhs (use_stmt)), type))
- /* We cannot replace a load that may throw or is volatile. */
- && !stmt_can_throw_internal (cfun, use_stmt)))
+ /* We cannot replace a load that may throw or is volatile.
+ For volatiles the transform can change the number of
+ executions if the load is inside a loop but the address
+ computations outside (PR91812). We could relax this
+ if we guard against that appropriately. For loads that can
+ throw we could relax things if the moved loads all are
+ known to not throw. */
+ && !stmt_can_throw_internal (cfun, use_stmt)
+ && !gimple_has_volatile_ops (use_stmt)))
continue;
/* Check if we can move the loads. The def stmt of the virtual use
|| alignment_support_scheme == dr_explicit_realign)
&& !compute_in_loop)
{
- msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token,
+ msq = vect_setup_realignment (first_stmt_info_for_drptr
+ ? first_stmt_info_for_drptr
+ : first_stmt_info, gsi, &realignment_token,
alignment_support_scheme, NULL_TREE,
&at_loop);
if (alignment_support_scheme == dr_explicit_realign_optimized)
{
for (tree *tem = &BLOCK_VARS (t); *tem; )
{
- if (TREE_CODE (*tem) != VAR_DECL
- || !auto_var_in_fn_p (*tem, DECL_CONTEXT (*tem)))
+ if (TREE_CODE (*tem) != LABEL_DECL
+ && (TREE_CODE (*tem) != VAR_DECL
+ || !auto_var_in_fn_p (*tem, DECL_CONTEXT (*tem))))
{
gcc_assert (TREE_CODE (*tem) != RESULT_DECL
&& TREE_CODE (*tem) != PARM_DECL);