/* Building internal representation for IRA.
- Copyright (C) 2006-2013 Free Software Foundation, Inc.
+ Copyright (C) 2006-2021 Free Software Foundation, Inc.
Contributed by Vladimir Makarov <vmakarov@redhat.com>.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
-#include "rtl.h"
-#include "tm_p.h"
+#include "backend.h"
#include "target.h"
-#include "regs.h"
-#include "flags.h"
-#include "hard-reg-set.h"
-#include "basic-block.h"
-#include "insn-config.h"
-#include "recog.h"
-#include "diagnostic-core.h"
-#include "params.h"
+#include "rtl.h"
+#include "predict.h"
#include "df.h"
-#include "reload.h"
-#include "sparseset.h"
+#include "insn-config.h"
+#include "regs.h"
+#include "memmodel.h"
+#include "ira.h"
#include "ira-int.h"
-#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
+#include "sparseset.h"
+#include "cfgloop.h"
-static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx,
+static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
ira_loop_tree_node_t);
/* The root of the loop tree corresponding to the all function. */
int ira_loop_tree_height;
/* All nodes representing basic blocks are referred through the
- following array. We can not use basic block member `aux' for this
+ following array. We cannot use basic block member `aux' for this
because it is used for insertion of insns on edges. */
ira_loop_tree_node_t ira_bb_nodes;
/* Map a conflict id to its conflict record. */
ira_object_t *ira_object_id_map;
+/* Array of references to all allocno preferences. The order number
+ of the preference corresponds to the index in the array. */
+ira_pref_t *ira_prefs;
+
+/* Size of the previous array. */
+int ira_prefs_num;
+
/* Array of references to all copies. The order number of the copy
corresponds to the index in the array. Removed copies have NULL
element value. */
bool skip_p;
edge_iterator ei;
edge e;
- vec<edge> edges;
loop_p loop;
ira_bb_nodes
= ((struct ira_loop_tree_node *)
- ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block));
- last_basic_block_before_change = last_basic_block;
- for (i = 0; i < (unsigned int) last_basic_block; i++)
+ ira_allocate (sizeof (struct ira_loop_tree_node)
+ * last_basic_block_for_fn (cfun)));
+ last_basic_block_before_change = last_basic_block_for_fn (cfun);
+ for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
{
ira_bb_nodes[i].regno_allocno_map = NULL;
memset (ira_bb_nodes[i].reg_pressure, 0,
init_loop_tree_node (ira_loop_nodes, 0);
return;
}
- ira_loop_nodes_count = number_of_loops ();
+ ira_loop_nodes_count = number_of_loops (cfun);
ira_loop_nodes = ((struct ira_loop_tree_node *)
ira_allocate (sizeof (struct ira_loop_tree_node)
* ira_loop_nodes_count));
- FOR_EACH_VEC_SAFE_ELT (get_loops (), i, loop)
+ FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
{
if (loop_outer (loop) != NULL)
{
}
if (skip_p)
continue;
- edges = get_loop_exit_edges (loop);
+ auto_vec<edge> edges = get_loop_exit_edges (loop);
FOR_EACH_VEC_ELT (edges, j, e)
if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
{
skip_p = true;
break;
}
- edges.release ();
if (skip_p)
continue;
}
loop_p loop;
if (current_loops != NULL)
- FOR_EACH_VEC_SAFE_ELT (get_loops (), i, loop)
+ FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
if (ira_loop_nodes[i].regno_allocno_map != NULL
&& ira_loop_tree_root != &ira_loop_nodes[i])
return true;
loop designating the whole function when CFG loops are not
built. */
static void
-add_loop_to_tree (struct loop *loop)
+add_loop_to_tree (class loop *loop)
{
int loop_num;
- struct loop *parent;
+ class loop *parent;
ira_loop_tree_node_t loop_node, parent_node;
- /* We can not use loop node access macros here because of potential
+ /* We cannot use loop node access macros here because of potential
checking and because the nodes are not initialized enough
yet. */
if (loop != NULL && loop_outer (loop) != NULL)
form_loop_tree (void)
{
basic_block bb;
- struct loop *parent;
+ class loop *parent;
ira_loop_tree_node_t bb_node, loop_node;
- /* We can not use loop/bb node access macros because of potential
+ /* We cannot use loop/bb node access macros because of potential
checking and because the nodes are not initialized enough
yet. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bb_node = &ira_bb_nodes[bb->index];
bb_node->bb = bb;
ira_assert (current_loops != NULL);
max_regno = max_reg_num ();
- FOR_EACH_VEC_SAFE_ELT (get_loops (), l, loop)
+ FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
if (ira_loop_nodes[l].regno_allocno_map != NULL)
{
ira_free (ira_loop_nodes[l].regno_allocno_map);
\f
/* Pools for allocnos, allocno live ranges and objects. */
-static alloc_pool allocno_pool, live_range_pool, object_pool;
+static object_allocator<live_range> live_range_pool ("live ranges");
+static object_allocator<ira_allocno> allocno_pool ("allocnos");
+static object_allocator<ira_object> object_pool ("objects");
/* Vec containing references to all created allocnos. It is a
container of array allocnos. */
static void
initiate_allocnos (void)
{
- live_range_pool
- = create_alloc_pool ("live ranges",
- sizeof (struct live_range), 100);
- allocno_pool
- = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
- object_pool
- = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
allocno_vec.create (max_reg_num () * 2);
ira_allocnos = NULL;
ira_allocnos_num = 0;
ira_create_object (ira_allocno_t a, int subword)
{
enum reg_class aclass = ALLOCNO_CLASS (a);
- ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
+ ira_object_t obj = object_pool.allocate ();
OBJECT_ALLOCNO (obj) = a;
OBJECT_SUBWORD (obj) = subword;
OBJECT_CONFLICT_VEC_P (obj) = false;
OBJECT_CONFLICT_ARRAY (obj) = NULL;
OBJECT_NUM_CONFLICTS (obj) = 0;
- COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
- COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
- IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
- reg_class_contents[aclass]);
- IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- reg_class_contents[aclass]);
+ OBJECT_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
+ OBJECT_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
OBJECT_MIN (obj) = INT_MAX;
OBJECT_MAX (obj) = -1;
OBJECT_LIVE_RANGES (obj) = NULL;
{
ira_allocno_t a;
- a = (ira_allocno_t) pool_alloc (allocno_pool);
+ a = allocno_pool.allocate ();
ALLOCNO_REGNO (a) = regno;
ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
if (! cap_p)
ALLOCNO_CALL_FREQ (a) = 0;
ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
+ ALLOCNO_CROSSED_CALLS_ABIS (a) = 0;
+ CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
#ifdef STACK_REGS
ALLOCNO_NO_STACK_REG_P (a) = false;
ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
ALLOCNO_BAD_SPILL_P (a) = false;
ALLOCNO_ASSIGNED_P (a) = false;
ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
+ ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
+ ALLOCNO_PREFS (a) = NULL;
ALLOCNO_COPIES (a) = NULL;
ALLOCNO_HARD_REG_COSTS (a) = NULL;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
ALLOCNO_CLASS (a) = aclass;
FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
{
- IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
- reg_class_contents[aclass]);
- IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- reg_class_contents[aclass]);
+ OBJECT_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
}
}
void
ira_create_allocno_objects (ira_allocno_t a)
{
- enum machine_mode mode = ALLOCNO_MODE (a);
+ machine_mode mode = ALLOCNO_MODE (a);
enum reg_class aclass = ALLOCNO_CLASS (a);
int n = ira_reg_class_max_nregs[aclass][mode];
int i;
- if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
+ if (n != 2 || maybe_ne (GET_MODE_SIZE (mode), n * UNITS_PER_WORD))
n = 1;
ALLOCNO_NUM_OBJECTS (a) = n;
ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
if (!total_only)
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
- OBJECT_CONFLICT_HARD_REGS (from_obj));
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
- OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
+ OBJECT_CONFLICT_HARD_REGS (to_obj)
+ |= OBJECT_CONFLICT_HARD_REGS (from_obj);
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj)
+ |= OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj);
}
#ifdef STACK_REGS
if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
/* Update hard register conflict information for all objects associated with
A to include the regs in SET. */
void
-ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
+ior_hard_reg_conflicts (ira_allocno_t a, const_hard_reg_set set)
{
ira_allocno_object_iterator i;
ira_object_t obj;
FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
{
- IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
- IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
+ OBJECT_CONFLICT_HARD_REGS (obj) |= set;
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= set;
}
}
parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
+ ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
aclass = ALLOCNO_CLASS (a);
ira_set_allocno_class (cap, aclass);
ira_create_allocno_objects (cap);
ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
+ ALLOCNO_CROSSED_CALLS_ABIS (cap) = ALLOCNO_CROSSED_CALLS_ABIS (a);
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap)
+ = ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
{
fprintf (ira_dump_file, " Creating cap ");
{
live_range_t p;
- p = (live_range_t) pool_alloc (live_range_pool);
+ p = live_range_pool.allocate ();
p->object = obj;
p->start = start;
p->finish = finish;
{
live_range_t p;
- p = (live_range_t) pool_alloc (live_range_pool);
+ p = live_range_pool.allocate ();
*p = *r;
return p;
}
live_range_t
ira_merge_live_ranges (live_range_t r1, live_range_t r2)
{
- live_range_t first, last, temp;
+ live_range_t first, last;
if (r1 == NULL)
return r2;
for (first = last = NULL; r1 != NULL && r2 != NULL;)
{
if (r1->start < r2->start)
- {
- temp = r1;
- r1 = r2;
- r2 = temp;
- }
+ std::swap (r1, r2);
if (r1->start <= r2->finish + 1)
{
/* Intersected ranges: merge r1 and r2 into r1. */
r1->start = r2->start;
if (r1->finish < r2->finish)
r1->finish = r2->finish;
- temp = r2;
+ live_range_t temp = r2;
r2 = r2->next;
ira_finish_live_range (temp);
if (r2 == NULL)
void
ira_finish_live_range (live_range_t r)
{
- pool_free (live_range_pool, r);
+ live_range_pool.remove (r);
}
/* Free list of allocno live ranges starting with R. */
ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
ira_free (OBJECT_CONFLICT_ARRAY (obj));
- pool_free (object_pool, obj);
+ object_pool.remove (obj);
}
ira_allocnos[ALLOCNO_NUM (a)] = NULL;
finish_allocno (ira_allocno_t a)
{
ira_free_allocno_costs (a);
- pool_free (allocno_pool, a);
+ allocno_pool.remove (a);
}
/* Free the memory allocated for all allocnos. */
ira_free (ira_regno_allocno_map);
ira_object_id_map_vec.release ();
allocno_vec.release ();
- free_alloc_pool (allocno_pool);
- free_alloc_pool (object_pool);
- free_alloc_pool (live_range_pool);
+ allocno_pool.release ();
+ object_pool.release ();
+ live_range_pool.release ();
+}
+
+\f
+
+/* Pools for allocno preferences. */
+static object_allocator <ira_allocno_pref> pref_pool ("prefs");
+
+/* Vec containing references to all created preferences. It is a
+ container of array ira_prefs. */
+static vec<ira_pref_t> pref_vec;
+
+/* The function initializes data concerning allocno prefs. */
+static void
+initiate_prefs (void)
+{
+ pref_vec.create (get_max_uid ());
+ ira_prefs = NULL;
+ ira_prefs_num = 0;
+}
+
+/* Return pref for A and HARD_REGNO if any. */
+static ira_pref_t
+find_allocno_pref (ira_allocno_t a, int hard_regno)
+{
+ ira_pref_t pref;
+
+ for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
+ if (pref->allocno == a && pref->hard_regno == hard_regno)
+ return pref;
+ return NULL;
+}
+
+/* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
+ira_pref_t
+ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
+{
+ ira_pref_t pref;
+
+ pref = pref_pool.allocate ();
+ pref->num = ira_prefs_num;
+ pref->allocno = a;
+ pref->hard_regno = hard_regno;
+ pref->freq = freq;
+ pref_vec.safe_push (pref);
+ ira_prefs = pref_vec.address ();
+ ira_prefs_num = pref_vec.length ();
+ return pref;
+}
+
+/* Attach a pref PREF to the corresponding allocno. */
+static void
+add_allocno_pref_to_list (ira_pref_t pref)
+{
+ ira_allocno_t a = pref->allocno;
+
+ pref->next_pref = ALLOCNO_PREFS (a);
+ ALLOCNO_PREFS (a) = pref;
+}
+
+/* Create (or update frequency if the pref already exists) the pref of
+ allocnos A preferring HARD_REGNO with frequency FREQ. */
+void
+ira_add_allocno_pref (ira_allocno_t a, int hard_regno, int freq)
+{
+ ira_pref_t pref;
+
+ if (freq <= 0)
+ return;
+ if ((pref = find_allocno_pref (a, hard_regno)) != NULL)
+ {
+ pref->freq += freq;
+ return;
+ }
+ pref = ira_create_pref (a, hard_regno, freq);
+ ira_assert (a != NULL);
+ add_allocno_pref_to_list (pref);
+}
+
+/* Print info about PREF into file F. */
+static void
+print_pref (FILE *f, ira_pref_t pref)
+{
+ fprintf (f, " pref%d:a%d(r%d)<-hr%d@%d\n", pref->num,
+ ALLOCNO_NUM (pref->allocno), ALLOCNO_REGNO (pref->allocno),
+ pref->hard_regno, pref->freq);
+}
+
+/* Print info about PREF into stderr. */
+void
+ira_debug_pref (ira_pref_t pref)
+{
+ print_pref (stderr, pref);
+}
+
+/* Print info about all prefs into file F. */
+static void
+print_prefs (FILE *f)
+{
+ ira_pref_t pref;
+ ira_pref_iterator pi;
+
+ FOR_EACH_PREF (pref, pi)
+ print_pref (f, pref);
+}
+
+/* Print info about all prefs into stderr. */
+void
+ira_debug_prefs (void)
+{
+ print_prefs (stderr);
+}
+
+/* Print info about prefs involving allocno A into file F. */
+static void
+print_allocno_prefs (FILE *f, ira_allocno_t a)
+{
+ ira_pref_t pref;
+
+ fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
+ for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
+ fprintf (f, " pref%d:hr%d@%d", pref->num, pref->hard_regno, pref->freq);
+ fprintf (f, "\n");
+}
+
+/* Print info about prefs involving allocno A into stderr. */
+void
+ira_debug_allocno_prefs (ira_allocno_t a)
+{
+ print_allocno_prefs (stderr, a);
+}
+
+/* The function frees memory allocated for PREF. */
+static void
+finish_pref (ira_pref_t pref)
+{
+ ira_prefs[pref->num] = NULL;
+ pref_pool.remove (pref);
+}
+
+/* Remove PREF from the list of allocno prefs and free memory for
+ it. */
+void
+ira_remove_pref (ira_pref_t pref)
+{
+ ira_pref_t cpref, prev;
+
+ if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
+ fprintf (ira_dump_file, " Removing pref%d:hr%d@%d\n",
+ pref->num, pref->hard_regno, pref->freq);
+ for (prev = NULL, cpref = ALLOCNO_PREFS (pref->allocno);
+ cpref != NULL;
+ prev = cpref, cpref = cpref->next_pref)
+ if (cpref == pref)
+ break;
+ ira_assert (cpref != NULL);
+ if (prev == NULL)
+ ALLOCNO_PREFS (pref->allocno) = pref->next_pref;
+ else
+ prev->next_pref = pref->next_pref;
+ finish_pref (pref);
+}
+
+/* Remove all prefs of allocno A. */
+void
+ira_remove_allocno_prefs (ira_allocno_t a)
+{
+ ira_pref_t pref, next_pref;
+
+ for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
+ {
+ next_pref = pref->next_pref;
+ finish_pref (pref);
+ }
+ ALLOCNO_PREFS (a) = NULL;
+}
+
+/* Free memory allocated for all prefs. */
+static void
+finish_prefs (void)
+{
+ ira_pref_t pref;
+ ira_pref_iterator pi;
+
+ FOR_EACH_PREF (pref, pi)
+ finish_pref (pref);
+ pref_vec.release ();
+ pref_pool.release ();
}
\f
/* Pools for copies. */
-static alloc_pool copy_pool;
+static object_allocator<ira_allocno_copy> copy_pool ("copies");
/* Vec containing references to all created copies. It is a
container of array ira_copies. */
static void
initiate_copies (void)
{
- copy_pool
- = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
copy_vec.create (get_max_uid ());
ira_copies = NULL;
ira_copies_num = 0;
/* Return copy connecting A1 and A2 and originated from INSN of
LOOP_TREE_NODE if any. */
static ira_copy_t
-find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx insn,
+find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx_insn *insn,
ira_loop_tree_node_t loop_tree_node)
{
ira_copy_t cp, next_cp;
SECOND, FREQ, CONSTRAINT_P, and INSN. */
ira_copy_t
ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
- bool constraint_p, rtx insn,
+ bool constraint_p, rtx_insn *insn,
ira_loop_tree_node_t loop_tree_node)
{
ira_copy_t cp;
- cp = (ira_copy_t) pool_alloc (copy_pool);
+ cp = copy_pool.allocate ();
cp->num = ira_copies_num;
cp->first = first;
cp->second = second;
}
/* Attach a copy CP to allocnos involved into the copy. */
-void
-ira_add_allocno_copy_to_list (ira_copy_t cp)
+static void
+add_allocno_copy_to_list (ira_copy_t cp)
{
ira_allocno_t first = cp->first, second = cp->second;
/* Make a copy CP a canonical copy where number of the
first allocno is less than the second one. */
-void
-ira_swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
+static void
+swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
{
- ira_allocno_t temp;
- ira_copy_t temp_cp;
-
if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
return;
- temp = cp->first;
- cp->first = cp->second;
- cp->second = temp;
-
- temp_cp = cp->prev_first_allocno_copy;
- cp->prev_first_allocno_copy = cp->prev_second_allocno_copy;
- cp->prev_second_allocno_copy = temp_cp;
-
- temp_cp = cp->next_first_allocno_copy;
- cp->next_first_allocno_copy = cp->next_second_allocno_copy;
- cp->next_second_allocno_copy = temp_cp;
+ std::swap (cp->first, cp->second);
+ std::swap (cp->prev_first_allocno_copy, cp->prev_second_allocno_copy);
+ std::swap (cp->next_first_allocno_copy, cp->next_second_allocno_copy);
}
/* Create (or update frequency if the copy already exists) and return
LOOP_TREE_NODE. */
ira_copy_t
ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
- bool constraint_p, rtx insn,
+ bool constraint_p, rtx_insn *insn,
ira_loop_tree_node_t loop_tree_node)
{
ira_copy_t cp;
cp = ira_create_copy (first, second, freq, constraint_p, insn,
loop_tree_node);
ira_assert (first != NULL && second != NULL);
- ira_add_allocno_copy_to_list (cp);
- ira_swap_allocno_copy_ends_if_necessary (cp);
+ add_allocno_copy_to_list (cp);
+ swap_allocno_copy_ends_if_necessary (cp);
return cp;
}
static void
finish_copy (ira_copy_t cp)
{
- pool_free (copy_pool, cp);
+ copy_pool.remove (cp);
}
FOR_EACH_COPY (cp, ci)
finish_copy (cp);
copy_vec.release ();
- free_alloc_pool (copy_pool);
+ copy_pool.release ();
}
\f
/* Pools for cost vectors. It is defined only for allocno classes. */
-static alloc_pool cost_vector_pool[N_REG_CLASSES];
+static pool_allocator *cost_vector_pool[N_REG_CLASSES];
/* The function initiates work with hard register cost vectors. It
creates allocation pool for each allocno class. */
for (i = 0; i < ira_allocno_classes_num; i++)
{
aclass = ira_allocno_classes[i];
- cost_vector_pool[aclass]
- = create_alloc_pool ("cost vectors",
- sizeof (int) * ira_class_hard_regs_num[aclass],
- 100);
+ cost_vector_pool[aclass] = new pool_allocator
+ ("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass]));
}
}
int *
ira_allocate_cost_vector (reg_class_t aclass)
{
- return (int *) pool_alloc (cost_vector_pool[(int) aclass]);
+ return (int*) cost_vector_pool[(int) aclass]->allocate ();
}
/* Free a cost vector VEC for ACLASS. */
ira_free_cost_vector (int *vec, reg_class_t aclass)
{
ira_assert (vec != NULL);
- pool_free (cost_vector_pool[(int) aclass], vec);
+ cost_vector_pool[(int) aclass]->remove (vec);
}
/* Finish work with hard register cost vectors. Release allocation
for (i = 0; i < ira_allocno_classes_num; i++)
{
aclass = ira_allocno_classes[i];
- free_alloc_pool (cost_vector_pool[aclass]);
+ delete cost_vector_pool[aclass];
}
}
minimizes the number of chain elements per allocno live range. If the
blocks would be visited in a different order, we would still compute a
correct post-ordering but it would be less likely that two nodes
- connected by an edge in the CFG are neighbours in the topsort. */
+ connected by an edge in the CFG are neighbors in the topsort. */
static vec<ira_loop_tree_node_t>
ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
- vec<ira_loop_tree_node_t> loop_preorder)
+ const vec<ira_loop_tree_node_t> &loop_preorder)
{
vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
unsigned int n_loop_preorder;
{
ira_loop_tree_node_t subloop_node;
unsigned int i;
- vec<ira_loop_tree_node_t> dfs_stack;
+ auto_vec<ira_loop_tree_node_t> dfs_stack;
/* This is a bit of strange abuse of the BB_VISITED flag: We use
the flag to mark blocks we still have to visit to add them to
ira_loop_tree_node_t pred_node;
basic_block pred_bb = e->src;
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
}
#undef BB_TO_VISIT
- dfs_stack.release ();
}
gcc_assert (topsort_nodes.length () == n_loop_preorder);
if (bb_p)
{
- vec<ira_loop_tree_node_t>
- loop_preorder = vNULL;
+ auto_vec<ira_loop_tree_node_t> loop_preorder;
unsigned int i;
/* Add all nodes to the set of nodes to visit. The IRA loop tree
(*postorder_func) (subloop_node);
loop_rev_postorder.release ();
}
-
- loop_preorder.release ();
}
for (subloop_node = loop_node->subloops;
/* This recursive function creates allocnos corresponding to
pseudo-registers containing in X. True OUTPUT_P means that X is
- a lvalue. */
+ an lvalue. PARENT corresponds to the parent expression of X. */
static void
-create_insn_allocnos (rtx x, bool output_p)
+create_insn_allocnos (rtx x, rtx outer, bool output_p)
{
int i, j;
const char *fmt;
ira_allocno_t a;
if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
- a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
+ {
+ a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
+ if (outer != NULL && GET_CODE (outer) == SUBREG)
+ {
+ machine_mode wmode = GET_MODE (outer);
+ if (partial_subreg_p (ALLOCNO_WMODE (a), wmode))
+ ALLOCNO_WMODE (a) = wmode;
+ }
+ }
ALLOCNO_NREFS (a)++;
ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
}
else if (code == SET)
{
- create_insn_allocnos (SET_DEST (x), true);
- create_insn_allocnos (SET_SRC (x), false);
+ create_insn_allocnos (SET_DEST (x), NULL, true);
+ create_insn_allocnos (SET_SRC (x), NULL, false);
return;
}
else if (code == CLOBBER)
{
- create_insn_allocnos (XEXP (x, 0), true);
+ create_insn_allocnos (XEXP (x, 0), NULL, true);
return;
}
else if (code == MEM)
{
- create_insn_allocnos (XEXP (x, 0), false);
+ create_insn_allocnos (XEXP (x, 0), NULL, false);
return;
}
else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
{
- create_insn_allocnos (XEXP (x, 0), true);
- create_insn_allocnos (XEXP (x, 0), false);
+ create_insn_allocnos (XEXP (x, 0), NULL, true);
+ create_insn_allocnos (XEXP (x, 0), NULL, false);
return;
}
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- create_insn_allocnos (XEXP (x, i), output_p);
+ create_insn_allocnos (XEXP (x, i), x, output_p);
else if (fmt[i] == 'E')
for (j = 0; j < XVECLEN (x, i); j++)
- create_insn_allocnos (XVECEXP (x, i, j), output_p);
+ create_insn_allocnos (XVECEXP (x, i, j), x, output_p);
}
}
create_bb_allocnos (ira_loop_tree_node_t bb_node)
{
basic_block bb;
- rtx insn;
+ rtx_insn *insn;
unsigned int i;
bitmap_iterator bi;
ira_assert (bb != NULL);
FOR_BB_INSNS_REVERSE (bb, insn)
if (NONDEBUG_INSN_P (insn))
- create_insn_allocnos (PATTERN (insn), false);
+ create_insn_allocnos (PATTERN (insn), NULL, false);
/* It might be a allocno living through from one subloop to
another. */
EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
int i;
edge_iterator ei;
edge e;
- vec<edge> edges;
ira_assert (current_loops != NULL);
FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
if (e->src != loop_node->loop->latch)
create_loop_allocnos (e);
- edges = get_loop_exit_edges (loop_node->loop);
+ auto_vec<edge> edges = get_loop_exit_edges (loop_node->loop);
FOR_EACH_VEC_ELT (edges, i, e)
create_loop_allocnos (e);
- edges.release ();
}
}
+= ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
+= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
+ ALLOCNO_CROSSED_CALLS_ABIS (parent_a)
+ |= ALLOCNO_CROSSED_CALLS_ABIS (a);
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a)
+ |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
aclass = ALLOCNO_CLASS (a);
#ifdef STACK_REGS
/* Return TRUE if LOOP has a complex enter or exit edge. We don't
form a region from such loop if the target use stack register
- because reg-stack.c can not deal with such edges. */
+ because reg-stack.c cannot deal with such edges. */
static bool
-loop_with_complex_edge_p (struct loop *loop)
+loop_with_complex_edge_p (class loop *loop)
{
int i;
edge_iterator ei;
edge e;
- vec<edge> edges;
bool res;
FOR_EACH_EDGE (e, ei, loop->header->preds)
if (e->flags & EDGE_EH)
return true;
- edges = get_loop_exit_edges (loop);
+ auto_vec<edge> edges = get_loop_exit_edges (loop);
res = false;
FOR_EACH_VEC_ELT (edges, i, e)
if (e->flags & EDGE_COMPLEX)
res = true;
break;
}
- edges.release ();
return res;
}
#endif
return -1;
if (! l1->to_remove_p && l2->to_remove_p)
return 1;
- if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
+ if ((diff = l1->loop->header->count.to_frequency (cfun)
+ - l2->loop->header->count.to_frequency (cfun)) != 0)
return diff;
if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
return diff;
hardly helps (for irregular register file architecture it could
help by choosing a better hard register in the loop but we prefer
faster allocation even in this case). We also remove cheap loops
- if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
+ if there are more than param_ira_max_loops_num of them. Loop with EH
exit or enter edges are removed too because the allocation might
require put pseudo moves on the EH edges (we could still do this
for pseudos with caller saved hard registers in some cases but it
ira_assert (current_loops != NULL);
sorted_loops
= (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
- * number_of_loops ());
- for (n = i = 0; vec_safe_iterate (get_loops (), i, &loop); i++)
+ * number_of_loops (cfun));
+ for (n = i = 0; vec_safe_iterate (get_loops (cfun), i, &loop); i++)
if (ira_loop_nodes[i].regno_allocno_map != NULL)
{
if (ira_loop_nodes[i].parent == NULL)
);
}
qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
- for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
+ for (i = 0; i < n - param_ira_max_loops_num; i++)
{
sorted_loops[i]->to_remove_p = true;
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
(ira_dump_file,
" Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
- sorted_loops[i]->loop->header->frequency,
+ sorted_loops[i]->loop->header->count.to_frequency (cfun),
loop_depth (sorted_loops[i]->loop),
low_pressure_loop_node_p (sorted_loops[i]->parent)
&& low_pressure_loop_node_p (sorted_loops[i])
loop_p loop;
ira_assert (current_loops != NULL);
- FOR_EACH_VEC_SAFE_ELT (get_loops (), i, loop)
+ FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
if (ira_loop_nodes[i].regno_allocno_map != NULL)
{
if (ira_loop_nodes[i].parent == NULL)
" Mark loop %d (header %d, freq %d, depth %d) for removal\n",
ira_loop_nodes[i].loop_num,
ira_loop_nodes[i].loop->header->index,
- ira_loop_nodes[i].loop->header->frequency,
+ ira_loop_nodes[i].loop->header->count.to_frequency (cfun),
loop_depth (ira_loop_nodes[i].loop));
}
}
ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
+= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
+ ALLOCNO_CROSSED_CALLS_ABIS (a) |= ALLOCNO_CROSSED_CALLS_ABIS (from_a);
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a)
+ |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a);
+
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
if (! ALLOCNO_BAD_SPILL_P (from_a))
map to avoid info propagation of subsequent
allocno into this already removed allocno. */
a_node->regno_allocno_map[regno] = NULL;
+ ira_remove_allocno_prefs (a);
finish_allocno (a);
}
}
ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
ALLOCNO_CAP_MEMBER (a) = NULL;
FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
- COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
- OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
+ OBJECT_CONFLICT_HARD_REGS (obj)
+ = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj);
#ifdef STACK_REGS
if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
ALLOCNO_NO_STACK_REG_P (a) = true;
#endif
}
else
- finish_allocno (a);
+ {
+ ira_remove_allocno_prefs (a);
+ finish_allocno (a);
+ }
}
if (merged_p)
ira_rebuild_start_finish_chains ();
mark_all_loops_for_removal ();
else
mark_loops_for_removal ();
- children_vec.create(last_basic_block + number_of_loops ());
- removed_loop_vec.create(last_basic_block + number_of_loops ());
+ children_vec.create (last_basic_block_for_fn (cfun)
+ + number_of_loops (cfun));
+ removed_loop_vec.create (last_basic_block_for_fn (cfun)
+ + number_of_loops (cfun));
remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
children_vec.release ();
if (all_p)
/* At this point true value of allocno attribute bad_spill_p means
that there is an insn where allocno occurs and where the allocno
- can not be used as memory. The function updates the attribute, now
- it can be true only for allocnos which can not be used as memory in
+ cannot be used as memory. The function updates the attribute, now
+ it can be true only for allocnos which cannot be used as memory in
an insn and in whose live ranges there is other allocno deaths.
Spilling allocnos with true value will not improve the code because
it will not make other allocnos colorable and additional reloads
ira_object_t parent_obj;
if (OBJECT_MAX (obj) < 0)
- continue;
+ {
+ /* The object is not used and hence does not live. */
+ ira_assert (OBJECT_LIVE_RANGES (obj) == NULL);
+ OBJECT_MAX (obj) = 0;
+ OBJECT_MIN (obj) = 1;
+ continue;
+ }
ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
/* Accumulation of range info. */
if (ALLOCNO_CAP (a) != NULL)
#ifdef ENABLE_IRA_CHECKING
FOR_EACH_OBJECT (obj, oi)
{
- if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
- && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
+ if ((OBJECT_MIN (obj) >= 0 && OBJECT_MIN (obj) <= ira_max_point)
+ && (OBJECT_MAX (obj) >= 0 && OBJECT_MAX (obj) <= ira_max_point))
continue;
gcc_unreachable ();
}
FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
ira_object_id_map[num++] = obj;
}
- qsort (ira_object_id_map, num, sizeof (ira_object_t),
- object_range_compare_func);
+ if (num > 1)
+ qsort (ira_object_id_map, num, sizeof (ira_object_t),
+ object_range_compare_func);
for (i = 0; i < num; i++)
{
ira_object_t obj = ira_object_id_map[i];
+= ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
+= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
+ ALLOCNO_CROSSED_CALLS_ABIS (parent_a)
+ |= ALLOCNO_CROSSED_CALLS_ABIS (a);
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a)
+ |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
merged_p = true;
flattening. */
continue;
FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
- COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
- OBJECT_CONFLICT_HARD_REGS (obj));
+ OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
+ = OBJECT_CONFLICT_HARD_REGS (obj);
#ifdef STACK_REGS
ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
#endif
-= ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
-= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
+ /* Assume that ALLOCNO_CROSSED_CALLS_ABIS and
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS stay the same.
+ We'd need to rebuild the IR to do better. */
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
-= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
continue;
aclass = ALLOCNO_CLASS (a);
- sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
{
ira_object_t live_obj = ira_object_id_map[n];
&& live_a != a)
ira_add_conflict (obj, live_obj);
}
+ sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
}
for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Remove a%dr%d\n",
ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
+ ira_remove_allocno_prefs (a);
finish_allocno (a);
continue;
}
ira_assert
(ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
&& ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
- ira_add_allocno_copy_to_list (cp);
- ira_swap_allocno_copy_ends_if_necessary (cp);
+ add_allocno_copy_to_list (cp);
+ swap_allocno_copy_ends_if_necessary (cp);
}
rebuild_regno_allocno_maps ();
if (ira_max_point != ira_max_point_before_emit)
df_analyze ();
initiate_cost_vectors ();
initiate_allocnos ();
+ initiate_prefs ();
initiate_copies ();
create_loop_tree_nodes ();
form_loop_tree ();
allocno crossing calls. */
FOR_EACH_ALLOCNO (a, ai)
if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
- ior_hard_reg_conflicts (a, &call_used_reg_set);
+ ior_hard_reg_conflicts (a, ira_need_caller_save_regs (a));
}
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
print_copies (ira_dump_file);
+ if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
+ print_prefs (ira_dump_file);
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
{
int n, nr, nr_big;
}
}
fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
- current_loops == NULL ? 1 : number_of_loops (),
- n_basic_blocks, ira_max_point);
+ current_loops == NULL ? 1 : number_of_loops (cfun),
+ n_basic_blocks_for_fn (cfun), ira_max_point);
fprintf (ira_dump_file,
" allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
ira_allocnos_num, nr_big, ira_copies_num, n, nr);
ira_destroy (void)
{
finish_loop_tree_nodes ();
+ finish_prefs ();
finish_copies ();
finish_allocnos ();
finish_cost_vectors ();