+ /* We create an area for a possible literal pool every N (default 5000)
+ frags or so. */
+ xtensa_maybe_create_literal_pool_frag (true, true);
+}
+
+static xtensa_insnbuf trampoline_buf = NULL;
+static xtensa_insnbuf trampoline_slotbuf = NULL;
+
+static xtensa_insnbuf litpool_buf = NULL;
+static xtensa_insnbuf litpool_slotbuf = NULL;
+
+#define TRAMPOLINE_FRAG_SIZE 3000
+
+static struct trampoline_seg *
+find_trampoline_seg (asection *seg)
+{
+ struct trampoline_seg *ts = trampoline_seg_list.next;
+ static struct trampoline_seg *mr;
+
+ if (mr && mr->seg == seg)
+ return mr;
+
+ for ( ; ts; ts = ts->next)
+ {
+ if (ts->seg == seg)
+ {
+ mr = ts;
+ return ts;
+ }
+ }
+
+ return NULL;
+}
+
+static size_t xg_find_trampoline (const struct trampoline_index *idx,
+ addressT addr)
+{
+ size_t a = 0;
+ size_t b = idx->n_entries;
+
+ while (b - a > 1)
+ {
+ size_t c = (a + b) / 2;
+
+ if (idx->entry[c]->fr_address <= addr)
+ a = c;
+ else
+ b = c;
+ }
+ return a;
+}
+
+static void xg_add_trampoline_to_index (struct trampoline_index *idx,
+ fragS *fragP)
+{
+ if (idx->n_entries == idx->n_max)
+ {
+ idx->n_max = (idx->n_entries + 1) * 2;
+ idx->entry = xrealloc (idx->entry,
+ sizeof (*idx->entry) * idx->n_max);
+ }
+ idx->entry[idx->n_entries] = fragP;
+ ++idx->n_entries;
+}
+
+static void xg_remove_trampoline_from_index (struct trampoline_index *idx,
+ size_t i)
+{
+ gas_assert (i < idx->n_entries);
+ memmove (idx->entry + i, idx->entry + i + 1,
+ (idx->n_entries - i - 1) * sizeof (*idx->entry));
+ --idx->n_entries;
+}
+
+static void xg_add_trampoline_to_seg (struct trampoline_seg *ts,
+ fragS *fragP)
+{
+ xg_add_trampoline_to_index (&ts->index, fragP);
+}
+
+static void
+xtensa_create_trampoline_frag (bool needs_jump_around)
+{
+ /* Emit a frag where we can place intermediate jump instructions,
+ in case we need to jump farther than 128K bytes.
+ Each jump instruction takes three bytes.
+ We allocate enough for 1000 trampolines in each frag.
+ If that's not enough, oh well. */
+
+ struct trampoline_seg *ts = find_trampoline_seg (now_seg);
+ char *varP;
+ fragS *fragP;
+ int size = TRAMPOLINE_FRAG_SIZE;
+
+ if (ts == NULL)
+ {
+ ts = XCNEW(struct trampoline_seg);
+ ts->next = trampoline_seg_list.next;
+ trampoline_seg_list.next = ts;
+ ts->seg = now_seg;
+ }
+
+ frag_wane (frag_now);
+ frag_new (0);
+ xtensa_set_frag_assembly_state (frag_now);
+ varP = frag_var (rs_machine_dependent, size, size, RELAX_TRAMPOLINE, NULL, 0, NULL);
+ fragP = (fragS *)(varP - SIZEOF_STRUCT_FRAG);
+ if (trampoline_buf == NULL)
+ {
+ trampoline_buf = xtensa_insnbuf_alloc (xtensa_default_isa);
+ trampoline_slotbuf = xtensa_insnbuf_alloc (xtensa_default_isa);
+ }
+ fragP->tc_frag_data.needs_jump_around = needs_jump_around;
+ xg_add_trampoline_to_seg (ts, fragP);
+}
+
+static bool xg_is_trampoline_frag_full (const fragS *fragP)
+{
+ return fragP->fr_var < 3;
+}
+
+static int xg_order_trampoline_chain_entry (const void *a, const void *b)
+{
+ const struct trampoline_chain_entry *pa = a;
+ const struct trampoline_chain_entry *pb = b;
+
+ if (pa->sym != pb->sym)
+ {
+ valueT aval = S_GET_VALUE (pa->sym);
+ valueT bval = S_GET_VALUE (pb->sym);
+
+ if (aval != bval)
+ return aval < bval ? -1 : 1;
+ }
+ if (pa->offset != pb->offset)
+ return pa->offset < pb->offset ? -1 : 1;
+ return 0;
+}
+
+static void xg_sort_trampoline_chain (struct trampoline_chain *tc)
+{
+ qsort (tc->entry, tc->n_entries, sizeof (*tc->entry),
+ xg_order_trampoline_chain_entry);
+ tc->needs_sorting = false;
+}
+
+/* Find entry index in the given chain with maximal address <= source. */
+static size_t xg_find_chain_entry (struct trampoline_chain *tc,
+ addressT source)
+{
+ size_t a = 0;
+ size_t b = tc->n_entries;
+
+ if (tc->needs_sorting)
+ xg_sort_trampoline_chain (tc);
+
+ while (b - a > 1)
+ {
+ size_t c = (a + b) / 2;
+ struct trampoline_chain_entry *e = tc->entry + c;
+
+ if (S_GET_VALUE(e->sym) + e->offset <= source)
+ a = c;
+ else
+ b = c;
+ }
+ return a;
+}
+
+/* Find the best jump target for the source in the given trampoline chain.
+ The best jump target is the one that results in the shortest path to the
+ final target, it's the location of the jump closest to the final target,
+ but within the J_RANGE - J_MARGIN from the source. */
+static struct trampoline_chain_entry *
+xg_get_best_chain_entry (struct trampoline_chain *tc, addressT source)
+{
+ addressT target = S_GET_VALUE(tc->target.sym) + tc->target.offset;
+ size_t i = xg_find_chain_entry (tc, source);
+ struct trampoline_chain_entry *e = tc->entry + i;
+ int step = target < source ? -1 : 1;
+ addressT chained_target;
+ offsetT off;
+
+ if (target > source &&
+ S_GET_VALUE(e->sym) + e->offset <= source &&
+ i + 1 < tc->n_entries)
+ ++i;
+
+ while (i + step < tc->n_entries)
+ {
+ struct trampoline_chain_entry *next = tc->entry + i + step;
+
+ chained_target = S_GET_VALUE(next->sym) + next->offset;
+ off = source - chained_target;
+
+ if (labs (off) >= J_RANGE - J_MARGIN)
+ break;
+
+ i += step;
+ }
+
+ e = tc->entry + i;
+ chained_target = S_GET_VALUE(e->sym) + e->offset;
+ off = source - chained_target;
+
+ if (labs (off) < J_MARGIN ||
+ labs (off) >= J_RANGE - J_MARGIN)
+ return &tc->target;
+ return tc->entry + i;
+}
+
+static int xg_order_trampoline_chain (const void *a, const void *b)
+{
+ const struct trampoline_chain *_pa = a;
+ const struct trampoline_chain *_pb = b;
+ const struct trampoline_chain_entry *pa = &_pa->target;
+ const struct trampoline_chain_entry *pb = &_pb->target;
+ symbolS *s1 = pa->sym;
+ symbolS *s2 = pb->sym;
+
+ if (s1 != s2)
+ {
+ symbolS *tmp = symbol_symbolS (s1);
+ if (tmp)
+ s1 = tmp;
+
+ tmp = symbol_symbolS (s2);
+ if (tmp)
+ s2 = tmp;
+
+ if (s1 != s2)
+ return s1 < s2 ? -1 : 1;
+ }
+
+ if (pa->offset != pb->offset)
+ return pa->offset < pb->offset ? -1 : 1;
+ return 0;
+}
+
+static struct trampoline_chain *
+xg_get_trampoline_chain (struct trampoline_seg *ts,
+ symbolS *sym,
+ addressT offset)
+{
+ struct trampoline_chain_index *idx = &ts->chain_index;
+ struct trampoline_chain c;
+
+ if (idx->n_entries == 0)
+ return NULL;
+
+ if (idx->needs_sorting)
+ {
+ qsort (idx->entry, idx->n_entries, sizeof (*idx->entry),
+ xg_order_trampoline_chain);
+ idx->needs_sorting = false;
+ }
+ c.target.sym = sym;
+ c.target.offset = offset;
+ return bsearch (&c, idx->entry, idx->n_entries,
+ sizeof (struct trampoline_chain),
+ xg_order_trampoline_chain);
+}
+
+/* Find trampoline chain in the given trampoline segment that is going
+ to the *sym + *offset. If found, replace *sym and *offset with the
+ best jump target in that chain. */
+static struct trampoline_chain *
+xg_find_best_eq_target (struct trampoline_seg *ts,
+ addressT source, symbolS **sym,
+ addressT *offset)
+{
+ struct trampoline_chain *tc = xg_get_trampoline_chain (ts, *sym, *offset);
+
+ if (tc)
+ {
+ struct trampoline_chain_entry *e = xg_get_best_chain_entry (tc, source);
+
+ *sym = e->sym;
+ *offset = e->offset;
+ }
+ return tc;
+}
+
+static void xg_add_location_to_chain (struct trampoline_chain *tc,
+ symbolS *sym, addressT offset)
+{
+ struct trampoline_chain_entry *e;
+
+ if (tc->n_entries == tc->n_max)
+ {
+ tc->n_max = (tc->n_max + 1) * 2;
+ tc->entry = xrealloc (tc->entry, sizeof (*tc->entry) * tc->n_max);
+ }
+ e = tc->entry + tc->n_entries;
+ e->sym = sym;
+ e->offset = offset;
+ ++tc->n_entries;
+ tc->needs_sorting = true;
+}
+
+static struct trampoline_chain *
+xg_create_trampoline_chain (struct trampoline_seg *ts,
+ symbolS *sym, addressT offset)
+{
+ struct trampoline_chain_index *idx = &ts->chain_index;
+ struct trampoline_chain *tc;
+
+ if (idx->n_entries == idx->n_max)
+ {
+ idx->n_max = (idx->n_max + 1) * 2;
+ idx->entry = xrealloc (idx->entry,
+ sizeof (*idx->entry) * idx->n_max);
+ }
+
+ tc = idx->entry + idx->n_entries;
+ tc->target.sym = sym;
+ tc->target.offset = offset;
+ tc->entry = NULL;
+ tc->n_entries = 0;
+ tc->n_max = 0;
+ xg_add_location_to_chain (tc, sym, offset);
+
+ ++idx->n_entries;
+ idx->needs_sorting = true;
+
+ return tc;
+}
+
+void dump_trampolines (void);
+
+void
+dump_trampolines (void)
+{
+ struct trampoline_seg *ts = trampoline_seg_list.next;
+
+ for ( ; ts; ts = ts->next)
+ {
+ size_t i;
+ asection *seg = ts->seg;
+
+ if (seg == NULL)
+ continue;
+ fprintf(stderr, "SECTION %s\n", seg->name);
+
+ for (i = 0; i < ts->index.n_entries; ++i)
+ {
+ fragS *tf = ts->index.entry[i];
+
+ fprintf(stderr, " 0x%08x: fix=%d, jump_around=%s\n",
+ (int)tf->fr_address, (int)tf->fr_fix,
+ tf->tc_frag_data.needs_jump_around ? "T" : "F");
+ }
+ }
+}
+
+static void dump_litpools (void) __attribute__ ((unused));
+
+static void
+dump_litpools (void)
+{
+ struct litpool_seg *lps = litpool_seg_list.next;
+ struct litpool_frag *lpf;
+
+ for ( ; lps ; lps = lps->next )
+ {
+ printf("litpool seg %s\n", lps->seg->name);
+ for ( lpf = lps->frag_list.next; lpf->fragP; lpf = lpf->next )
+ {
+ fragS *litfrag = lpf->fragP->fr_next;
+ int count = 0;
+ while (litfrag && litfrag->fr_subtype != RELAX_LITERAL_POOL_END)
+ {
+ if (litfrag->fr_fix == 4)
+ count++;
+ litfrag = litfrag->fr_next;
+ }
+ printf(" %ld <%d:%d> (%d) [%d]: ",
+ lpf->addr, lpf->priority, lpf->original_priority,
+ lpf->fragP->fr_line, count);
+ /* dump_frag(lpf->fragP); */
+ }
+ }
+}
+
+static void
+xtensa_maybe_create_literal_pool_frag (bool create, bool only_if_needed)
+{
+ struct litpool_seg *lps = litpool_seg_list.next;
+ fragS *fragP;
+ struct litpool_frag *lpf;
+ bool needed = false;
+
+ if (use_literal_section || !auto_litpools)
+ return;
+
+ for ( ; lps ; lps = lps->next )
+ {
+ if (lps->seg == now_seg)
+ break;
+ }
+
+ if (lps == NULL)
+ {
+ lps = XCNEW (struct litpool_seg);
+ lps->next = litpool_seg_list.next;
+ litpool_seg_list.next = lps;
+ lps->seg = now_seg;
+ lps->frag_list.next = &lps->frag_list;
+ lps->frag_list.prev = &lps->frag_list;
+ /* Put candidate literal pool at the beginning of every section,
+ so that even when section starts with literal load there's a
+ literal pool available. */
+ lps->frag_count = auto_litpool_limit;
+ }
+
+ lps->frag_count++;
+
+ if (create)
+ {
+ if (only_if_needed)
+ {
+ if (past_xtensa_end || !use_transform() ||
+ frag_now->tc_frag_data.is_no_transform)
+ {
+ return;
+ }
+ if (auto_litpool_limit <= 0)
+ {
+ /* Don't create a litpool based only on frag count. */
+ return;
+ }
+ else if (lps->frag_count > auto_litpool_limit)
+ {
+ needed = true;
+ }
+ else
+ {
+ return;
+ }
+ }
+ else
+ {
+ needed = true;
+ }
+ }
+
+ if (needed)
+ {
+ int size = (only_if_needed) ? 3 : 0; /* Space for a "j" insn. */
+ /* Create a potential site for a literal pool. */
+ frag_wane (frag_now);
+ frag_new (0);
+ xtensa_set_frag_assembly_state (frag_now);
+ fragP = frag_now;
+ fragP->tc_frag_data.lit_frchain = frchain_now;
+ fragP->tc_frag_data.literal_frag = fragP;
+ frag_var (rs_machine_dependent, size, size,
+ (only_if_needed) ?
+ RELAX_LITERAL_POOL_CANDIDATE_BEGIN :
+ RELAX_LITERAL_POOL_BEGIN,
+ NULL, 0, NULL);
+ frag_now->tc_frag_data.lit_seg = now_seg;
+ frag_variant (rs_machine_dependent, 0, 0,
+ RELAX_LITERAL_POOL_END, NULL, 0, NULL);
+ xtensa_set_frag_assembly_state (frag_now);
+ }
+ else
+ {
+ /* RELAX_LITERAL_POOL_BEGIN frag is being created;
+ just record it here. */
+ fragP = frag_now;
+ }
+
+ lpf = XNEW (struct litpool_frag);
+ /* Insert at tail of circular list. */
+ lpf->addr = 0;
+ lps->frag_list.prev->next = lpf;
+ lpf->next = &lps->frag_list;
+ lpf->prev = lps->frag_list.prev;
+ lps->frag_list.prev = lpf;
+ lpf->fragP = fragP;
+ lpf->priority = (needed) ? (only_if_needed) ? 3 : 2 : 1;
+ lpf->original_priority = lpf->priority;
+ lpf->literal_count = 0;
+
+ lps->frag_count = 0;
+}
+
+static void
+xtensa_cleanup_align_frags (void)
+{
+ frchainS *frchP;
+ asection *s;
+
+ for (s = stdoutput->sections; s; s = s->next)
+ for (frchP = seg_info (s)->frchainP; frchP; frchP = frchP->frch_next)
+ {
+ fragS *fragP;
+ /* Walk over all of the fragments in a subsection. */
+ for (fragP = frchP->frch_root; fragP; fragP = fragP->fr_next)
+ {
+ if ((fragP->fr_type == rs_align
+ || fragP->fr_type == rs_align_code
+ || (fragP->fr_type == rs_machine_dependent
+ && (fragP->fr_subtype == RELAX_DESIRE_ALIGN
+ || fragP->fr_subtype == RELAX_DESIRE_ALIGN_IF_TARGET)))
+ && fragP->fr_fix == 0)
+ {
+ fragS *next = fragP->fr_next;
+
+ while (next
+ && next->fr_fix == 0
+ && next->fr_type == rs_machine_dependent
+ && next->fr_subtype == RELAX_DESIRE_ALIGN_IF_TARGET)
+ {
+ frag_wane (next);
+ next = next->fr_next;
+ }
+ }
+ /* If we don't widen branch targets, then they
+ will be easier to align. */
+ if (fragP->tc_frag_data.is_branch_target
+ && fragP->fr_opcode == fragP->fr_literal
+ && fragP->fr_type == rs_machine_dependent
+ && fragP->fr_subtype == RELAX_SLOTS
+ && fragP->tc_frag_data.slot_subtypes[0] == RELAX_NARROW)
+ frag_wane (fragP);
+ if (fragP->fr_type == rs_machine_dependent
+ && fragP->fr_subtype == RELAX_UNREACHABLE)
+ fragP->tc_frag_data.is_unreachable = true;
+ }
+ }
+}
+
+
+/* Re-process all of the fragments looking to convert all of the
+ RELAX_DESIRE_ALIGN_IF_TARGET fragments. If there is a branch
+ target in the next fragment, convert this to RELAX_DESIRE_ALIGN.
+ Otherwise, convert to a .fill 0. */
+
+static void
+xtensa_fix_target_frags (void)
+{
+ frchainS *frchP;
+ asection *s;
+
+ /* When this routine is called, all of the subsections are still intact
+ so we walk over subsections instead of sections. */
+ for (s = stdoutput->sections; s; s = s->next)
+ for (frchP = seg_info (s)->frchainP; frchP; frchP = frchP->frch_next)
+ {
+ fragS *fragP;
+
+ /* Walk over all of the fragments in a subsection. */
+ for (fragP = frchP->frch_root; fragP; fragP = fragP->fr_next)
+ {
+ if (fragP->fr_type == rs_machine_dependent
+ && fragP->fr_subtype == RELAX_DESIRE_ALIGN_IF_TARGET)
+ {
+ if (next_frag_is_branch_target (fragP))
+ fragP->fr_subtype = RELAX_DESIRE_ALIGN;
+ else
+ frag_wane (fragP);
+ }
+ }
+ }
+}
+
+
+static bool is_narrow_branch_guaranteed_in_range (fragS *, TInsn *);
+
+static void
+xtensa_mark_narrow_branches (void)
+{
+ frchainS *frchP;
+ asection *s;
+
+ for (s = stdoutput->sections; s; s = s->next)
+ for (frchP = seg_info (s)->frchainP; frchP; frchP = frchP->frch_next)
+ {
+ fragS *fragP;
+ /* Walk over all of the fragments in a subsection. */
+ for (fragP = frchP->frch_root; fragP; fragP = fragP->fr_next)
+ {
+ if (fragP->fr_type == rs_machine_dependent
+ && fragP->fr_subtype == RELAX_SLOTS
+ && fragP->tc_frag_data.slot_subtypes[0] == RELAX_IMMED)
+ {
+ vliw_insn vinsn;
+
+ vinsn_from_chars (&vinsn, fragP->fr_opcode);
+ tinsn_immed_from_frag (&vinsn.slots[0], fragP, 0);
+
+ if (vinsn.num_slots == 1
+ && xtensa_opcode_is_branch (xtensa_default_isa,