diff options
author | hubicka <hubicka@138bc75d-0d04-0410-961f-82ee72b054a4> | 2001-09-10 12:23:08 +0000 |
---|---|---|
committer | hubicka <hubicka@138bc75d-0d04-0410-961f-82ee72b054a4> | 2001-09-10 12:23:08 +0000 |
commit | 65f34de51669d0fe37752d46811f848402c274e4 (patch) | |
tree | 1de90ed0fe72193706efd4b77aee818dfb646ee7 /gcc | |
parent | 27d0c333857a441a3629bcda370457da97e49bf1 (diff) | |
download | ppe42-gcc-65f34de51669d0fe37752d46811f848402c274e4.tar.gz ppe42-gcc-65f34de51669d0fe37752d46811f848402c274e4.zip |
* Makefile.in (cfg.o, cfganal.o, cfgloop.o, cfgbuild.o, cfgcleanup.o):
New.
* basic-block.h (flow_obstack, label_value_list,
tail_recursion_label_list): Declare
(tidy_fallthru_edges): Declare.
(expunge_block, last_loop_beg_note): Delete.
(can_fallthru, flow_nodes_print, flow_edge_list_print): Declare.
* cfg.c: New file
(basic_block_for_insn, label_value_list): Move from flow.c; make global.
(n_basic_blocks, n_edges, basic_block_info, entry_exit_blocks,
init_flow, clear_edges, can_delete_note_p, can_delete_label_p,
flow_delete_insn, flow_delete_insn_chain, create_basic_block,
expunge_block, flow_delete_block, compute_bb_for_insn,
update_bb_for_insn, set_block_for_insn, set_block_for_new_insns,
make_edge, remove_edge, redirect_edge_succ, redirect_edge_succ_nodup,
redirect_edge_pred, split_block, marge_blocks_nomove, block_label,
try_redirect_by_replacing_jump, last_loop_beg_note,
redirect_edge_and_branch, redirect_edge_and_branch_force,
tidy_fallthru_edge, tidy_fallthru_edges, back_edge_of_syntactic_loop_p,
split_edge, insert_insn_on_edge, commit_one_edge_insertion,
commit_edge_insertions, dump_flow_info, debug_flow_info,
dump_edge_info, dump_bb, debug_bb, debug_bb_n, print_rtl_with_bb,
verify_flow_info, purge_dead_edges, purge_all_dead_edges):
Move here from flow.c
* cfganal.c: New file.
(forwarder_block_p, can_fallthru, mark_critical_edges,
mark_dfs_back_edges, need_fake_edge_p, flow_call_edges_add,
find_unreachable_blocks, create_edge_list, free_edge_list,
print_edge_list, verify_edge_list, find_edge_index, flow_nodes_print,
flow_edge_list_print, remove_fake_successors, remove_fake_edges,
add_noreturn_fake_exit_edges, connect_infinite_loops_to_exit,
flow_reverse_top_sort_order_compute, flow_depth_first_order_compute,
flow_dfs_compute_reverse_init, flow_dfs-compute_reverse_add_bb,
flow_dfs-compute_reverse_execute, flow_dfs_compute_reverse_finish);
Move here from flow.c
* cfgbuild.c: New file
(count_basic_blocks, find_label_refs, make_label_edge, make_eh_edge,
make_edges, find_basic_blocks_1, find_basic_blocks,
find_sub_basic_blocks): Move here from flow.c
* cfgcleanup.c: New file.
(try_simplify_condjump, try_forward_edges, tail_recursion_label_p,
merge_blocks_move_predecessor_nojumps,
merge_blocks_move_successor_nojumps, merge_blocks,
flow_find_cross_jump, outgoing_edges_match, try_crossjump_to_edge,
try_crossjump_bb, try_optimize_cfg): Move here from flow.c
(delete_unreachable_blocks, cleanup_cfg): Likewise; return true
if succeeded.
* cfgloop.c: New file
(flow_loops_cfg_dump, flow_loop_nested_p, flow_loop_dump,
flow_loops_dump, flow_loops_free, flow_loop_entry_edges_find,
flow_loop_exit_edges_find, flow_loop_nodes_find,
flow_loop_pre_header_scan, flow_loop_pre_header_find,
flow_loop_tree_node_add, flow_loops_tree_build,
flow_loop_level_compute, flow_loops_level_compute, flow_loop_scan,
flow_loops_find, flow_loops_update, flow_loop_outside_edge_p):
Move here from flow.c
* flow.c: Remove everything moved elsewhere
* output.h (cleanup_cfg): Return bool.
* bb-reorder.c (reorder_block_def): Remove 'index'.
(insert_intra_1): Add argument BB, set block for new note.
(make_reorder_chain): Do not depdent on BB indexes.
(make_reorder_chain_1): Do not use BB indexes.
(label_for_bb): Likewise; set BB for new insn.
(emit_jump_to_block_after): Likewise.
(fixup_reoder_chain): Sanity check that all basic blocks
are chained; verify newly created insn chain; remove
undocnitional jump simplifying; Do not use BB indexes;
properly initialize count and frequency information;
dump reordered sequence.
(insert_intra_bb_scope_notes): update call of insert_intra_1.
(insert_inter_bb_scope_notes): Set block for new insn.
(reorder_basic_blocks): Dump flow info before reoredering.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@45504 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 76 | ||||
-rw-r--r-- | gcc/Makefile.in | 15 | ||||
-rw-r--r-- | gcc/basic-block.h | 15 | ||||
-rw-r--r-- | gcc/bb-reorder.c | 152 | ||||
-rw-r--r-- | gcc/cfg.c | 2517 | ||||
-rw-r--r-- | gcc/cfganal.c | 1074 | ||||
-rw-r--r-- | gcc/cfgbuild.c | 791 | ||||
-rw-r--r-- | gcc/cfgcleanup.c | 1248 | ||||
-rw-r--r-- | gcc/cfgloop.c | 854 | ||||
-rw-r--r-- | gcc/flow.c | 6269 | ||||
-rw-r--r-- | gcc/output.h | 2 |
11 files changed, 6669 insertions, 6344 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index b2f641ace7f..98445a2f82a 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,79 @@ +Mon Sep 10 14:21:26 CEST 2001 Jan Hubicka <jh@suse.cz> + + * Makefile.in (cfg.o, cfganal.o, cfgloop.o, cfgbuild.o, cfgcleanup.o): + New. + * basic-block.h (flow_obstack, label_value_list, + tail_recursion_label_list): Declare + (tidy_fallthru_edges): Declare. + (expunge_block, last_loop_beg_note): Delete. + (can_fallthru, flow_nodes_print, flow_edge_list_print): Declare. + * cfg.c: New file + (basic_block_for_insn, label_value_list): Move from flow.c; make global. + (n_basic_blocks, n_edges, basic_block_info, entry_exit_blocks, + init_flow, clear_edges, can_delete_note_p, can_delete_label_p, + flow_delete_insn, flow_delete_insn_chain, create_basic_block, + expunge_block, flow_delete_block, compute_bb_for_insn, + update_bb_for_insn, set_block_for_insn, set_block_for_new_insns, + make_edge, remove_edge, redirect_edge_succ, redirect_edge_succ_nodup, + redirect_edge_pred, split_block, marge_blocks_nomove, block_label, + try_redirect_by_replacing_jump, last_loop_beg_note, + redirect_edge_and_branch, redirect_edge_and_branch_force, + tidy_fallthru_edge, tidy_fallthru_edges, back_edge_of_syntactic_loop_p, + split_edge, insert_insn_on_edge, commit_one_edge_insertion, + commit_edge_insertions, dump_flow_info, debug_flow_info, + dump_edge_info, dump_bb, debug_bb, debug_bb_n, print_rtl_with_bb, + verify_flow_info, purge_dead_edges, purge_all_dead_edges): + Move here from flow.c + * cfganal.c: New file. + (forwarder_block_p, can_fallthru, mark_critical_edges, + mark_dfs_back_edges, need_fake_edge_p, flow_call_edges_add, + find_unreachable_blocks, create_edge_list, free_edge_list, + print_edge_list, verify_edge_list, find_edge_index, flow_nodes_print, + flow_edge_list_print, remove_fake_successors, remove_fake_edges, + add_noreturn_fake_exit_edges, connect_infinite_loops_to_exit, + flow_reverse_top_sort_order_compute, flow_depth_first_order_compute, + flow_dfs_compute_reverse_init, flow_dfs-compute_reverse_add_bb, + flow_dfs-compute_reverse_execute, flow_dfs_compute_reverse_finish); + Move here from flow.c + * cfgbuild.c: New file + (count_basic_blocks, find_label_refs, make_label_edge, make_eh_edge, + make_edges, find_basic_blocks_1, find_basic_blocks, + find_sub_basic_blocks): Move here from flow.c + * cfgcleanup.c: New file. + (try_simplify_condjump, try_forward_edges, tail_recursion_label_p, + merge_blocks_move_predecessor_nojumps, + merge_blocks_move_successor_nojumps, merge_blocks, + flow_find_cross_jump, outgoing_edges_match, try_crossjump_to_edge, + try_crossjump_bb, try_optimize_cfg): Move here from flow.c + (delete_unreachable_blocks, cleanup_cfg): Likewise; return true + if succeeded. + * cfgloop.c: New file + (flow_loops_cfg_dump, flow_loop_nested_p, flow_loop_dump, + flow_loops_dump, flow_loops_free, flow_loop_entry_edges_find, + flow_loop_exit_edges_find, flow_loop_nodes_find, + flow_loop_pre_header_scan, flow_loop_pre_header_find, + flow_loop_tree_node_add, flow_loops_tree_build, + flow_loop_level_compute, flow_loops_level_compute, flow_loop_scan, + flow_loops_find, flow_loops_update, flow_loop_outside_edge_p): + Move here from flow.c + * flow.c: Remove everything moved elsewhere + * output.h (cleanup_cfg): Return bool. + + * bb-reorder.c (reorder_block_def): Remove 'index'. + (insert_intra_1): Add argument BB, set block for new note. + (make_reorder_chain): Do not depdent on BB indexes. + (make_reorder_chain_1): Do not use BB indexes. + (label_for_bb): Likewise; set BB for new insn. + (emit_jump_to_block_after): Likewise. + (fixup_reoder_chain): Sanity check that all basic blocks + are chained; verify newly created insn chain; remove + undocnitional jump simplifying; Do not use BB indexes; + properly initialize count and frequency information; + dump reordered sequence. + (insert_intra_bb_scope_notes): update call of insert_intra_1. + (insert_inter_bb_scope_notes): Set block for new insn. + (reorder_basic_blocks): Dump flow info before reoredering. + Mon Sep 10 06:47:35 2001 Richard Kenner <kenner@vlsi1.ultra.nyu.edu> * alias.c (clear_reg_alias_info): Use K&R format definition. diff --git a/gcc/Makefile.in b/gcc/Makefile.in index 1c4343d9cb1..4c29d261e5b 100644 --- a/gcc/Makefile.in +++ b/gcc/Makefile.in @@ -746,7 +746,8 @@ OBJS = \ rtl-error.o sbitmap.o sched-deps.o sched-ebb.o sched-rgn.o sched-vis.o \ sdbout.o sibcall.o simplify-rtx.o splay-tree.o ssa.o ssa-ccp.o \ ssa-dce.o stmt.o stor-layout.o stringpool.o timevar.o toplev.o tree.o \ - unroll.o varasm.o varray.o version.o xcoffout.o \ + unroll.o varasm.o varray.o version.o xcoffout.o cfg.o cfganal.o \ + cfgbuild.o cfgcleanup.o cfgloop.o \ $(GGC) $(out_object_file) $(EXTRA_OBJS) BACKEND = main.o libbackend.a @@ -1487,6 +1488,18 @@ unroll.o : unroll.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) insn-config.h function.h \ flow.o : flow.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) $(TREE_H) flags.h insn-config.h \ $(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h output.h toplev.h $(RECOG_H) \ function.h except.h $(EXPR_H) ssa.h $(GGC_H) $(TM_P_H) +cfg.o : cfg.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) flags.h insn-config.h \ + $(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h output.h toplev.h $(RECOG_H) \ + function.h except.h $(GGC_H) +cfganal.o : cfganal.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) \ + $(BASIC_BLOCK_H) hard-reg-set.h $(GGC_H) +cfgbuild.o : cfgbuild.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) flags.h insn-config.h \ + $(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h output.h toplev.h $(RECOG_H) \ + function.h except.h $(GGC_H) +cfgcleanup.o : cfgcleanup.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) \ + $(BASIC_BLOCK_H) hard-reg-set.h output.h flags.h $(RECOG_H) toplev.h $(GGC_H) +cfgloop.o : cfgloop.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) \ + $(BASIC_BLOCK_H) hard-reg-set.h dominance.o : dominance.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) hard-reg-set.h \ $(BASIC_BLOCK_H) combine.o : combine.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) flags.h function.h \ diff --git a/gcc/basic-block.h b/gcc/basic-block.h index b8754948fec..13241e8ffe6 100644 --- a/gcc/basic-block.h +++ b/gcc/basic-block.h @@ -246,6 +246,12 @@ extern varray_type basic_block_info; extern regset regs_live_at_setjmp; +/* Special labels found during CFG build. */ + +extern rtx label_value_list, tail_recursion_label_list; + +extern struct obstack flow_obstack; + /* Indexed by n, gives number of basic block that (REG n) is used in. If the value is REG_BLOCK_GLOBAL (-2), it means (REG n) is used in more than one basic block. @@ -310,6 +316,7 @@ extern int flow_delete_block PARAMS ((basic_block)); extern void merge_blocks_nomove PARAMS ((basic_block, basic_block)); extern void tidy_fallthru_edge PARAMS ((edge, basic_block, basic_block)); +extern void tidy_fallthru_edges PARAMS ((void)); extern void flow_reverse_top_sort_order_compute PARAMS ((int *)); extern int flow_depth_first_order_compute PARAMS ((int *, int *)); extern void dump_edge_info PARAMS ((FILE *, edge, int)); @@ -616,9 +623,7 @@ extern void debug_regset PARAMS ((regset)); extern void allocate_reg_life_data PARAMS ((void)); extern void allocate_bb_life_data PARAMS ((void)); extern void find_unreachable_blocks PARAMS ((void)); -extern void expunge_block PARAMS ((basic_block)); extern void delete_noop_moves PARAMS ((rtx)); -extern rtx last_loop_beg_note PARAMS ((rtx)); extern basic_block redirect_edge_and_branch_force PARAMS ((edge, basic_block)); extern bool redirect_edge_and_branch PARAMS ((edge, basic_block)); extern rtx block_label PARAMS ((basic_block)); @@ -626,7 +631,11 @@ extern bool forwarder_block_p PARAMS ((basic_block)); extern bool purge_all_dead_edges PARAMS ((void)); extern bool purge_dead_edges PARAMS ((basic_block)); extern void find_sub_basic_blocks PARAMS ((basic_block)); - +extern bool can_fallthru PARAMS ((basic_block, basic_block)); +extern void flow_nodes_print PARAMS ((const char *, const sbitmap, + FILE *)); +extern void flow_edge_list_print PARAMS ((const char *, const edge *, + int, FILE *)); /* This function is always defined so it can be called from the debugger, and it is declared extern so we don't get warnings about diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c index 72d3902e7fc..3102132eb3e 100644 --- a/gcc/bb-reorder.c +++ b/gcc/bb-reorder.c @@ -165,7 +165,6 @@ typedef struct reorder_block_def rtx eff_end; scope scope; basic_block next; - int index; int visited; } *reorder_block_def; @@ -187,7 +186,7 @@ static void relate_bbs_with_scopes PARAMS ((scope)); static scope make_new_scope PARAMS ((int, rtx)); static void build_scope_forest PARAMS ((scope_forest_info *)); static void remove_scope_notes PARAMS ((void)); -static void insert_intra_1 PARAMS ((scope, rtx *)); +static void insert_intra_1 PARAMS ((scope, rtx *, basic_block)); static void insert_intra_bb_scope_notes PARAMS ((basic_block)); static void insert_inter_bb_scope_notes PARAMS ((basic_block, basic_block)); static void rebuild_scope_notes PARAMS ((scope_forest_info *)); @@ -323,6 +322,7 @@ make_reorder_chain () basic_block last_block = NULL; basic_block prev = NULL; int nbb_m1 = n_basic_blocks - 1; + basic_block next; /* If we've not got epilogue in RTL, we must fallthru to the exit. Force the last block to be at the end. */ @@ -339,7 +339,8 @@ make_reorder_chain () do { int i; - basic_block next = NULL; + + next = NULL; /* Find the next unplaced block. */ /* ??? Get rid of this loop, and track which blocks are not yet @@ -348,27 +349,21 @@ make_reorder_chain () remove from the list as we place. The head of that list is what we're looking for here. */ - for (i = 0; i <= nbb_m1; ++i) + for (i = 0; i <= nbb_m1 && !next; ++i) { basic_block bb = BASIC_BLOCK (i); if (! RBI (bb)->visited) - { - next = bb; - break; - } + next = bb; } - if (! next) - abort (); - - prev = make_reorder_chain_1 (next, prev); + if (next) + prev = make_reorder_chain_1 (next, prev); } - while (RBI (prev)->index < nbb_m1); + while (next); /* Terminate the chain. */ if (! HAVE_epilogue) { RBI (prev)->next = last_block; - RBI (last_block)->index = RBI (prev)->index + 1; prev = last_block; } RBI (prev)->next = NULL; @@ -397,19 +392,18 @@ make_reorder_chain_1 (bb, prev) /* Mark this block visited. */ if (prev) { - int new_index; - restart: RBI (prev)->next = bb; - new_index = RBI (prev)->index + 1; - RBI (bb)->index = new_index; if (rtl_dump_file && prev->index + 1 != bb->index) - fprintf (rtl_dump_file, "Reordering block %d (%d) after %d (%d)\n", - bb->index, RBI (bb)->index, prev->index, RBI (prev)->index); + fprintf (rtl_dump_file, "Reordering block %d after %d\n", + bb->index, prev->index); } else - RBI (bb)->index = 0; + { + if (bb->index != 0) + abort (); + } RBI (bb)->visited = 1; prev = bb; @@ -508,13 +502,15 @@ label_for_bb (bb) if (GET_CODE (label) != CODE_LABEL) { if (rtl_dump_file) - fprintf (rtl_dump_file, "Emitting label for block %d (%d)\n", - bb->index, RBI (bb)->index); + fprintf (rtl_dump_file, "Emitting label for block %d\n", + bb->index); label = emit_label_before (gen_label_rtx (), label); if (bb->head == RBI (bb)->eff_head) RBI (bb)->eff_head = label; bb->head = label; + if (basic_block_for_insn) + set_block_for_insn (label, bb); } return label; @@ -540,8 +536,8 @@ emit_jump_to_block_after (bb, after) set_block_for_new_insns (jump, bb); if (rtl_dump_file) - fprintf (rtl_dump_file, "Emitting jump to block %d (%d)\n", - bb->index, RBI (bb)->index); + fprintf (rtl_dump_file, "Emitting jump to block %d\n", + bb->index); } else { @@ -549,6 +545,8 @@ emit_jump_to_block_after (bb, after) if (! HAVE_return) abort (); jump = emit_jump_insn_after (gen_return (), after); + if (basic_block_for_insn) + set_block_for_new_insns (jump, bb); if (rtl_dump_file) fprintf (rtl_dump_file, "Emitting return\n"); @@ -567,12 +565,16 @@ static void fixup_reorder_chain () { basic_block bb, last_bb; + int index; + rtx insn; + int old_n_basic_blocks = n_basic_blocks; /* First do the bulk reordering -- rechain the blocks without regard to the needed changes to jumps and labels. */ last_bb = BASIC_BLOCK (0); bb = RBI (last_bb)->next; + index = 1; while (bb) { rtx last_e = RBI (last_bb)->eff_end; @@ -583,19 +585,24 @@ fixup_reorder_chain () last_bb = bb; bb = RBI (bb)->next; + index++; } - { - rtx insn = RBI (last_bb)->eff_end; + if (index != n_basic_blocks) + abort (); - NEXT_INSN (insn) = function_tail_eff_head; - if (function_tail_eff_head) - PREV_INSN (function_tail_eff_head) = insn; + insn = RBI (last_bb)->eff_end; - while (NEXT_INSN (insn)) - insn = NEXT_INSN (insn); - set_last_insn (insn); - } + NEXT_INSN (insn) = function_tail_eff_head; + if (function_tail_eff_head) + PREV_INSN (function_tail_eff_head) = insn; + + while (NEXT_INSN (insn)) + insn = NEXT_INSN (insn); + set_last_insn (insn); +#ifdef ENABLE_CHECKING + verify_insn_chain (); +#endif /* Now add jumps and labels as needed to match the blocks new outgoing edges. */ @@ -621,27 +628,11 @@ fixup_reorder_chain () bb_end_insn = bb->end; if (GET_CODE (bb_end_insn) == JUMP_INSN) { - if (any_uncondjump_p (bb_end_insn)) - { - /* If the destination is still not next, nothing to do. */ - if (RBI (bb)->index + 1 != RBI (e_taken->dest)->index) - continue; - - /* Otherwise, we can remove the jump and cleanup the edge. */ - tidy_fallthru_edge (e_taken, bb, e_taken->dest); - RBI (bb)->eff_end = skip_insns_after_block (bb); - RBI (e_taken->dest)->eff_head = NEXT_INSN (RBI (bb)->eff_end); - - if (rtl_dump_file) - fprintf (rtl_dump_file, "Removing jump in block %d (%d)\n", - bb->index, RBI (bb)->index); - continue; - } - else if (any_condjump_p (bb_end_insn)) + if (any_condjump_p (bb_end_insn)) { /* If the old fallthru is still next, nothing to do. */ - if (RBI (bb)->index + 1 == RBI (e_fall->dest)->index - || (RBI (bb)->index == n_basic_blocks - 1 + if (RBI (bb)->next == e_fall->dest + || (!RBI (bb)->next && e_fall->dest == EXIT_BLOCK_PTR)) continue; @@ -649,7 +640,7 @@ fixup_reorder_chain () such as happens at the very end of a function, then we'll need to add a new unconditional jump. Choose the taken edge based on known or assumed probability. */ - if (RBI (bb)->index + 1 != RBI (e_taken->dest)->index) + if (RBI (bb)->next != e_taken->dest) { rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0); if (note @@ -684,7 +675,7 @@ fixup_reorder_chain () #ifdef CASE_DROPS_THROUGH /* Except for VAX. Since we didn't have predication for the tablejump, the fallthru block should not have moved. */ - if (RBI (bb)->index + 1 == RBI (e_fall->dest)->index) + if (RBI (bb)->next == e_fall->dest) continue; bb_end_insn = skip_insns_after_block (bb); #else @@ -701,9 +692,7 @@ fixup_reorder_chain () continue; /* If the fallthru block is still next, nothing to do. */ - if (RBI (bb)->index + 1 == RBI (e_fall->dest)->index - || (RBI (bb)->index == n_basic_blocks - 1 - && e_fall->dest == EXIT_BLOCK_PTR)) + if (RBI (bb)->next == e_fall->dest) continue; /* We need a new jump insn. If the block has only one outgoing @@ -730,12 +719,12 @@ fixup_reorder_chain () create_basic_block (n_basic_blocks - 1, jump_insn, jump_insn, NULL); nb = BASIC_BLOCK (n_basic_blocks - 1); - nb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); - nb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); nb->local_set = 0; nb->count = e_fall->count; nb->frequency = EDGE_FREQUENCY (e_fall); + nb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); + nb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); COPY_REG_SET (nb->global_live_at_start, bb->global_live_at_start); COPY_REG_SET (nb->global_live_at_end, bb->global_live_at_start); @@ -743,7 +732,6 @@ fixup_reorder_chain () RBI (nb)->eff_head = nb->head; RBI (nb)->eff_end = barrier_insn; RBI (nb)->scope = RBI (bb)->scope; - RBI (nb)->index = RBI (bb)->index + 1; RBI (nb)->visited = 1; RBI (nb)->next = RBI (bb)->next; RBI (bb)->next = nb; @@ -756,17 +744,26 @@ fixup_reorder_chain () /* Don't process this new block. */ bb = nb; - - /* Fix subsequent reorder block indices to reflect new block. */ - while ((nb = RBI (nb)->next) != NULL) - RBI (nb)->index += 1; } /* Put basic_block_info in the new order. */ - for (bb = BASIC_BLOCK (0); bb ; bb = RBI (bb)->next) + bb = BASIC_BLOCK (0); + index = 0; + + if (rtl_dump_file) + fprintf (rtl_dump_file, "Reordered sequence:\n"); + while (bb) { - bb->index = RBI (bb)->index; - BASIC_BLOCK (bb->index) = bb; + if (rtl_dump_file) + fprintf (rtl_dump_file, " %i %sbb %i freq %i\n", index, + bb->index >= old_n_basic_blocks ? "compensation " : "", + bb->index, + bb->frequency); + bb->index = index; + BASIC_BLOCK (index) = bb; + + bb = RBI (bb)->next; + index++; } } @@ -1142,9 +1139,10 @@ remove_scope_notes () /* Insert scope note pairs for a contained scope tree S after insn IP. */ static void -insert_intra_1 (s, ip) +insert_intra_1 (s, ip, bb) scope s; rtx *ip; + basic_block bb; { scope p; @@ -1152,15 +1150,19 @@ insert_intra_1 (s, ip) { *ip = emit_note_after (NOTE_INSN_BLOCK_BEG, *ip); NOTE_BLOCK (*ip) = NOTE_BLOCK (s->note_beg); + if (basic_block_for_insn) + set_block_for_insn (*ip, bb); } for (p = s->inner; p; p = p->next) - insert_intra_1 (p, ip); + insert_intra_1 (p, ip, bb); if (NOTE_BLOCK (s->note_beg)) { *ip = emit_note_after (NOTE_INSN_BLOCK_END, *ip); NOTE_BLOCK (*ip) = NOTE_BLOCK (s->note_end); + if (basic_block_for_insn) + set_block_for_insn (*ip, bb); } } @@ -1186,7 +1188,7 @@ insert_intra_bb_scope_notes (bb) for (p = s->inner; p; p = p->next) { if (p->bb_beg != NULL && p->bb_beg == p->bb_end && p->bb_beg == bb) - insert_intra_1 (p, &ip); + insert_intra_1 (p, &ip, bb); } } @@ -1254,6 +1256,8 @@ insert_inter_bb_scope_notes (bb1, bb2) { ip = emit_note_after (NOTE_INSN_BLOCK_END, ip); NOTE_BLOCK (ip) = NOTE_BLOCK (s->note_end); + if (basic_block_for_insn) + set_block_for_insn (ip, bb1); } s = s->outer; } @@ -1270,6 +1274,8 @@ insert_inter_bb_scope_notes (bb1, bb2) { ip = emit_note_before (NOTE_INSN_BLOCK_BEG, ip); NOTE_BLOCK (ip) = NOTE_BLOCK (s->note_beg); + if (basic_block_for_insn) + set_block_for_insn (ip, bb2); } s = s->outer; } @@ -1414,6 +1420,10 @@ reorder_basic_blocks () record_effective_endpoints (); make_reorder_chain (); + + if (rtl_dump_file) + dump_flow_info (rtl_dump_file); + fixup_reorder_chain (); #ifdef ENABLE_CHECKING diff --git a/gcc/cfg.c b/gcc/cfg.c new file mode 100644 index 00000000000..3f11a55982f --- /dev/null +++ b/gcc/cfg.c @@ -0,0 +1,2517 @@ +/* Control flow graph manipulation code for GNU compiler. + Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 2001 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to the Free +Software Foundation, 59 Temple Place - Suite 330, Boston, MA +02111-1307, USA. */ + +/* This file contains low level functions to manipulate with CFG and analyze it. + All other modules should not transform the datastructure directly and use + abstraction instead. The file is supposed to be ordered bottom-up. + + Available functionality: + - Initialization/deallocation + init_flow, clear_edges + - CFG aware instruction chain manipulation + flow_delete_insn, flow_delete_insn_chain + - Basic block manipulation + create_basic_block, flow_delete_block, split_block, merge_blocks_nomove + - Infrastructure to determine quickly basic block for instruction. + compute_bb_for_insn, update_bb_for_insn, set_block_for_insn, + set_block_for_new_insns + - Edge manipulation + make_edge, remove_edge + - Low level edge redirection (without updating instruction chain) + redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred + - High level edge redirection (with updating and optimizing instruction + chain) + block_label, redirect_edge_and_branch, + redirect_edge_and_branch_force, tidy_fallthru_edge + - Edge splitting and commiting to edges + split_edge, insert_insn_on_edge, commit_edge_insertions + - Dumpipng and debugging + dump_flow_info, debug_flow_info, dump_edge_info, dump_bb, debug_bb, + debug_bb_n, print_rtl_with_bb + - Consistency checking + verify_flow_info + - CFG updating after constant propagation + purge_dead_edges, purge_all_dead_edges + */ + +#include "config.h" +#include "system.h" +#include "tree.h" +#include "rtl.h" +#include "hard-reg-set.h" +#include "basic-block.h" +#include "regs.h" +#include "flags.h" +#include "output.h" +#include "function.h" +#include "except.h" +#include "toplev.h" + +#include "obstack.h" + +/* The obstack on which the flow graph components are allocated. */ + +struct obstack flow_obstack; +static char *flow_firstobj; + +/* Number of basic blocks in the current function. */ + +int n_basic_blocks; + +/* Number of edges in the current function. */ + +int n_edges; + +/* The basic block array. */ + +varray_type basic_block_info; + +/* The special entry and exit blocks. */ + +struct basic_block_def entry_exit_blocks[2] += {{NULL, /* head */ + NULL, /* end */ + NULL, /* head_tree */ + NULL, /* end_tree */ + NULL, /* pred */ + NULL, /* succ */ + NULL, /* local_set */ + NULL, /* cond_local_set */ + NULL, /* global_live_at_start */ + NULL, /* global_live_at_end */ + NULL, /* aux */ + ENTRY_BLOCK, /* index */ + 0, /* loop_depth */ + 0, /* count */ + 0, /* frequency */ + 0 /* flags */ + }, + { + NULL, /* head */ + NULL, /* end */ + NULL, /* head_tree */ + NULL, /* end_tree */ + NULL, /* pred */ + NULL, /* succ */ + NULL, /* local_set */ + NULL, /* cond_local_set */ + NULL, /* global_live_at_start */ + NULL, /* global_live_at_end */ + NULL, /* aux */ + EXIT_BLOCK, /* index */ + 0, /* loop_depth */ + 0, /* count */ + 0, /* frequency */ + 0 /* flags */ + } +}; + +/* The basic block structure for every insn, indexed by uid. */ + +varray_type basic_block_for_insn; + +/* The labels mentioned in non-jump rtl. Valid during find_basic_blocks. */ +/* ??? Should probably be using LABEL_NUSES instead. It would take a + bit of surgery to be able to use or co-opt the routines in jump. */ + +rtx label_value_list; +rtx tail_recursion_label_list; + +void debug_flow_info PARAMS ((void)); +static int can_delete_note_p PARAMS ((rtx)); +static int can_delete_label_p PARAMS ((rtx)); +static void commit_one_edge_insertion PARAMS ((edge)); +static bool try_redirect_by_replacing_jump PARAMS ((edge, basic_block)); +static void expunge_block PARAMS ((basic_block)); +static rtx last_loop_beg_note PARAMS ((rtx)); +static bool back_edge_of_syntactic_loop_p PARAMS ((basic_block, basic_block)); + +/* Called once at intialization time. */ + +void +init_flow () +{ + static int initialized; + + if (!initialized) + { + gcc_obstack_init (&flow_obstack); + flow_firstobj = (char *) obstack_alloc (&flow_obstack, 0); + initialized = 1; + } + else + { + obstack_free (&flow_obstack, flow_firstobj); + flow_firstobj = (char *) obstack_alloc (&flow_obstack, 0); + } +} + +/* Free the memory associated with the edge structures. */ + +void +clear_edges () +{ + int i; + edge n, e; + + for (i = 0; i < n_basic_blocks; ++i) + { + basic_block bb = BASIC_BLOCK (i); + + for (e = bb->succ; e; e = n) + { + n = e->succ_next; + free (e); + } + + bb->succ = 0; + bb->pred = 0; + } + + for (e = ENTRY_BLOCK_PTR->succ; e; e = n) + { + n = e->succ_next; + free (e); + } + + ENTRY_BLOCK_PTR->succ = 0; + EXIT_BLOCK_PTR->pred = 0; + + n_edges = 0; +} + +/* Return true if NOTE is not one of the ones that must be kept paired, + so that we may simply delete them. */ + +static int +can_delete_note_p (note) + rtx note; +{ + return (NOTE_LINE_NUMBER (note) == NOTE_INSN_DELETED + || NOTE_LINE_NUMBER (note) == NOTE_INSN_BASIC_BLOCK); +} + +/* True if a given label can be deleted. */ + +static int +can_delete_label_p (label) + rtx label; +{ + rtx x; + + if (LABEL_PRESERVE_P (label)) + return 0; + + for (x = forced_labels; x; x = XEXP (x, 1)) + if (label == XEXP (x, 0)) + return 0; + for (x = label_value_list; x; x = XEXP (x, 1)) + if (label == XEXP (x, 0)) + return 0; + for (x = exception_handler_labels; x; x = XEXP (x, 1)) + if (label == XEXP (x, 0)) + return 0; + + /* User declared labels must be preserved. */ + if (LABEL_NAME (label) != 0) + return 0; + + return 1; +} + +/* Delete INSN by patching it out. Return the next insn. */ + +rtx +flow_delete_insn (insn) + rtx insn; +{ + rtx prev = PREV_INSN (insn); + rtx next = NEXT_INSN (insn); + rtx note; + + PREV_INSN (insn) = NULL_RTX; + NEXT_INSN (insn) = NULL_RTX; + INSN_DELETED_P (insn) = 1; + + if (prev) + NEXT_INSN (prev) = next; + if (next) + PREV_INSN (next) = prev; + else + set_last_insn (prev); + + if (GET_CODE (insn) == CODE_LABEL) + remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels); + + /* If deleting a jump, decrement the use count of the label. Deleting + the label itself should happen in the normal course of block merging. */ + if (GET_CODE (insn) == JUMP_INSN + && JUMP_LABEL (insn) + && GET_CODE (JUMP_LABEL (insn)) == CODE_LABEL) + LABEL_NUSES (JUMP_LABEL (insn))--; + + /* Also if deleting an insn that references a label. */ + else if ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)) != NULL_RTX + && GET_CODE (XEXP (note, 0)) == CODE_LABEL) + LABEL_NUSES (XEXP (note, 0))--; + + if (GET_CODE (insn) == JUMP_INSN + && (GET_CODE (PATTERN (insn)) == ADDR_VEC + || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) + { + rtx pat = PATTERN (insn); + int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC; + int len = XVECLEN (pat, diff_vec_p); + int i; + + for (i = 0; i < len; i++) + LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0))--; + } + + return next; +} + +/* Unlink a chain of insns between START and FINISH, leaving notes + that must be paired. */ + +void +flow_delete_insn_chain (start, finish) + rtx start, finish; +{ + /* Unchain the insns one by one. It would be quicker to delete all + of these with a single unchaining, rather than one at a time, but + we need to keep the NOTE's. */ + + rtx next; + + while (1) + { + next = NEXT_INSN (start); + if (GET_CODE (start) == NOTE && !can_delete_note_p (start)) + ; + else if (GET_CODE (start) == CODE_LABEL + && ! can_delete_label_p (start)) + { + const char *name = LABEL_NAME (start); + PUT_CODE (start, NOTE); + NOTE_LINE_NUMBER (start) = NOTE_INSN_DELETED_LABEL; + NOTE_SOURCE_FILE (start) = name; + } + else + next = flow_delete_insn (start); + + if (start == finish) + break; + start = next; + } +} + +/* Create a new basic block consisting of the instructions between + HEAD and END inclusive. Reuses the note and basic block struct + in BB_NOTE, if any. */ + +void +create_basic_block (index, head, end, bb_note) + int index; + rtx head, end, bb_note; +{ + basic_block bb; + + if (bb_note + && ! RTX_INTEGRATED_P (bb_note) + && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL + && bb->aux == NULL) + { + /* If we found an existing note, thread it back onto the chain. */ + + rtx after; + + if (GET_CODE (head) == CODE_LABEL) + after = head; + else + { + after = PREV_INSN (head); + head = bb_note; + } + + if (after != bb_note && NEXT_INSN (after) != bb_note) + reorder_insns (bb_note, bb_note, after); + } + else + { + /* Otherwise we must create a note and a basic block structure. + Since we allow basic block structs in rtl, give the struct + the same lifetime by allocating it off the function obstack + rather than using malloc. */ + + bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*bb)); + memset (bb, 0, sizeof (*bb)); + + if (GET_CODE (head) == CODE_LABEL) + bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head); + else + { + bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head); + head = bb_note; + } + NOTE_BASIC_BLOCK (bb_note) = bb; + } + + /* Always include the bb note in the block. */ + if (NEXT_INSN (end) == bb_note) + end = bb_note; + + bb->head = head; + bb->end = end; + bb->index = index; + BASIC_BLOCK (index) = bb; + + /* Tag the block so that we know it has been used when considering + other basic block notes. */ + bb->aux = bb; +} + +/* Remove block B from the basic block array and compact behind it. */ + +static void +expunge_block (b) + basic_block b; +{ + int i, n = n_basic_blocks; + + for (i = b->index; i + 1 < n; ++i) + { + basic_block x = BASIC_BLOCK (i + 1); + BASIC_BLOCK (i) = x; + x->index = i; + } + + basic_block_info->num_elements--; + n_basic_blocks--; +} + +/* Delete the insns in a (non-live) block. We physically delete every + non-deleted-note insn, and update the flow graph appropriately. + + Return nonzero if we deleted an exception handler. */ + +/* ??? Preserving all such notes strikes me as wrong. It would be nice + to post-process the stream to remove empty blocks, loops, ranges, etc. */ + +int +flow_delete_block (b) + basic_block b; +{ + int deleted_handler = 0; + rtx insn, end, tmp; + + /* If the head of this block is a CODE_LABEL, then it might be the + label for an exception handler which can't be reached. + + We need to remove the label from the exception_handler_label list + and remove the associated NOTE_INSN_EH_REGION_BEG and + NOTE_INSN_EH_REGION_END notes. */ + + insn = b->head; + + never_reached_warning (insn); + + if (GET_CODE (insn) == CODE_LABEL) + maybe_remove_eh_handler (insn); + + /* Include any jump table following the basic block. */ + end = b->end; + if (GET_CODE (end) == JUMP_INSN + && (tmp = JUMP_LABEL (end)) != NULL_RTX + && (tmp = NEXT_INSN (tmp)) != NULL_RTX + && GET_CODE (tmp) == JUMP_INSN + && (GET_CODE (PATTERN (tmp)) == ADDR_VEC + || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)) + end = tmp; + + /* Include any barrier that may follow the basic block. */ + tmp = next_nonnote_insn (end); + if (tmp && GET_CODE (tmp) == BARRIER) + end = tmp; + + /* Selectively delete the entire chain. */ + flow_delete_insn_chain (insn, end); + + /* Remove the edges into and out of this block. Note that there may + indeed be edges in, if we are removing an unreachable loop. */ + { + edge e, next, *q; + + for (e = b->pred; e; e = next) + { + for (q = &e->src->succ; *q != e; q = &(*q)->succ_next) + continue; + *q = e->succ_next; + next = e->pred_next; + n_edges--; + free (e); + } + for (e = b->succ; e; e = next) + { + for (q = &e->dest->pred; *q != e; q = &(*q)->pred_next) + continue; + *q = e->pred_next; + next = e->succ_next; + n_edges--; + free (e); + } + + b->pred = NULL; + b->succ = NULL; + } + + /* Remove the basic block from the array, and compact behind it. */ + expunge_block (b); + + return deleted_handler; +} + +/* Records the basic block struct in BB_FOR_INSN, for every instruction + indexed by INSN_UID. MAX is the size of the array. */ + +void +compute_bb_for_insn (max) + int max; +{ + int i; + + if (basic_block_for_insn) + VARRAY_FREE (basic_block_for_insn); + VARRAY_BB_INIT (basic_block_for_insn, max, "basic_block_for_insn"); + + for (i = 0; i < n_basic_blocks; ++i) + { + basic_block bb = BASIC_BLOCK (i); + rtx insn, end; + + end = bb->end; + insn = bb->head; + while (1) + { + int uid = INSN_UID (insn); + if (uid < max) + VARRAY_BB (basic_block_for_insn, uid) = bb; + if (insn == end) + break; + insn = NEXT_INSN (insn); + } + } +} + +/* Update insns block within BB. */ + +void +update_bb_for_insn (bb) + basic_block bb; +{ + rtx insn; + + if (! basic_block_for_insn) + return; + + for (insn = bb->head; ; insn = NEXT_INSN (insn)) + { + set_block_for_insn (insn, bb); + + if (insn == bb->end) + break; + } +} + +/* Record INSN's block as BB. */ + +void +set_block_for_insn (insn, bb) + rtx insn; + basic_block bb; +{ + size_t uid = INSN_UID (insn); + if (uid >= basic_block_for_insn->num_elements) + { + int new_size; + + /* Add one-eighth the size so we don't keep calling xrealloc. */ + new_size = uid + (uid + 7) / 8; + + VARRAY_GROW (basic_block_for_insn, new_size); + } + VARRAY_BB (basic_block_for_insn, uid) = bb; +} + +/* When a new insn has been inserted into an existing block, it will + sometimes emit more than a single insn. This routine will set the + block number for the specified insn, and look backwards in the insn + chain to see if there are any other uninitialized insns immediately + previous to this one, and set the block number for them too. */ + +void +set_block_for_new_insns (insn, bb) + rtx insn; + basic_block bb; +{ + set_block_for_insn (insn, bb); + + /* Scan the previous instructions setting the block number until we find + an instruction that has the block number set, or we find a note + of any kind. */ + for (insn = PREV_INSN (insn); insn != NULL_RTX; insn = PREV_INSN (insn)) + { + if (GET_CODE (insn) == NOTE) + break; + if ((unsigned) INSN_UID (insn) >= basic_block_for_insn->num_elements + || BLOCK_FOR_INSN (insn) == 0) + set_block_for_insn (insn, bb); + else + break; + } +} + +void +make_edge (edge_cache, src, dst, flags) + sbitmap *edge_cache; + basic_block src, dst; + int flags; +{ + int use_edge_cache; + edge e; + + /* Don't bother with edge cache for ENTRY or EXIT; there aren't that + many edges to them, and we didn't allocate memory for it. */ + use_edge_cache = (edge_cache + && src != ENTRY_BLOCK_PTR + && dst != EXIT_BLOCK_PTR); + + /* Make sure we don't add duplicate edges. */ + switch (use_edge_cache) + { + default: + /* Quick test for non-existance of the edge. */ + if (! TEST_BIT (edge_cache[src->index], dst->index)) + break; + + /* The edge exists; early exit if no work to do. */ + if (flags == 0) + return; + + /* FALLTHRU */ + case 0: + for (e = src->succ; e; e = e->succ_next) + if (e->dest == dst) + { + e->flags |= flags; + return; + } + break; + } + + e = (edge) xcalloc (1, sizeof (*e)); + n_edges++; + + e->succ_next = src->succ; + e->pred_next = dst->pred; + e->src = src; + e->dest = dst; + e->flags = flags; + + src->succ = e; + dst->pred = e; + + if (use_edge_cache) + SET_BIT (edge_cache[src->index], dst->index); +} + +/* This function will remove an edge from the flow graph. */ + +void +remove_edge (e) + edge e; +{ + edge last_pred = NULL; + edge last_succ = NULL; + edge tmp; + basic_block src, dest; + src = e->src; + dest = e->dest; + for (tmp = src->succ; tmp && tmp != e; tmp = tmp->succ_next) + last_succ = tmp; + + if (!tmp) + abort (); + if (last_succ) + last_succ->succ_next = e->succ_next; + else + src->succ = e->succ_next; + + for (tmp = dest->pred; tmp && tmp != e; tmp = tmp->pred_next) + last_pred = tmp; + + if (!tmp) + abort (); + if (last_pred) + last_pred->pred_next = e->pred_next; + else + dest->pred = e->pred_next; + + n_edges--; + free (e); +} + +/* Redirect an edge's successor from one block to another. */ + +void +redirect_edge_succ (e, new_succ) + edge e; + basic_block new_succ; +{ + edge *pe; + + /* Disconnect the edge from the old successor block. */ + for (pe = &e->dest->pred; *pe != e; pe = &(*pe)->pred_next) + continue; + *pe = (*pe)->pred_next; + + /* Reconnect the edge to the new successor block. */ + e->pred_next = new_succ->pred; + new_succ->pred = e; + e->dest = new_succ; +} + +/* Like previous but avoid possible dupplicate edge. */ + +edge +redirect_edge_succ_nodup (e, new_succ) + edge e; + basic_block new_succ; +{ + edge s; + /* Check whether the edge is already present. */ + for (s = e->src->succ; s; s = s->succ_next) + if (s->dest == new_succ && s != e) + break; + if (s) + { + s->flags |= e->flags; + s->probability += e->probability; + s->count += e->count; + remove_edge (e); + e = s; + } + else + redirect_edge_succ (e, new_succ); + return e; +} + +/* Redirect an edge's predecessor from one block to another. */ + +void +redirect_edge_pred (e, new_pred) + edge e; + basic_block new_pred; +{ + edge *pe; + + /* Disconnect the edge from the old predecessor block. */ + for (pe = &e->src->succ; *pe != e; pe = &(*pe)->succ_next) + continue; + *pe = (*pe)->succ_next; + + /* Reconnect the edge to the new predecessor block. */ + e->succ_next = new_pred->succ; + new_pred->succ = e; + e->src = new_pred; +} + +/* Split a block BB after insn INSN creating a new fallthru edge. + Return the new edge. Note that to keep other parts of the compiler happy, + this function renumbers all the basic blocks so that the new + one has a number one greater than the block split. */ + +edge +split_block (bb, insn) + basic_block bb; + rtx insn; +{ + basic_block new_bb; + edge new_edge; + edge e; + rtx bb_note; + int i, j; + + /* There is no point splitting the block after its end. */ + if (bb->end == insn) + return 0; + + /* Create the new structures. */ + new_bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*new_bb)); + new_edge = (edge) xcalloc (1, sizeof (*new_edge)); + n_edges++; + + memset (new_bb, 0, sizeof (*new_bb)); + + new_bb->head = NEXT_INSN (insn); + new_bb->end = bb->end; + bb->end = insn; + + new_bb->succ = bb->succ; + bb->succ = new_edge; + new_bb->pred = new_edge; + new_bb->count = bb->count; + new_bb->frequency = bb->frequency; + new_bb->loop_depth = bb->loop_depth; + + new_edge->src = bb; + new_edge->dest = new_bb; + new_edge->flags = EDGE_FALLTHRU; + new_edge->probability = REG_BR_PROB_BASE; + new_edge->count = bb->count; + + /* Redirect the src of the successor edges of bb to point to new_bb. */ + for (e = new_bb->succ; e; e = e->succ_next) + e->src = new_bb; + + /* Place the new block just after the block being split. */ + VARRAY_GROW (basic_block_info, ++n_basic_blocks); + + /* Some parts of the compiler expect blocks to be number in + sequential order so insert the new block immediately after the + block being split.. */ + j = bb->index; + for (i = n_basic_blocks - 1; i > j + 1; --i) + { + basic_block tmp = BASIC_BLOCK (i - 1); + BASIC_BLOCK (i) = tmp; + tmp->index = i; + } + + BASIC_BLOCK (i) = new_bb; + new_bb->index = i; + + if (GET_CODE (new_bb->head) == CODE_LABEL) + { + /* Create the basic block note. */ + bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, + new_bb->head); + NOTE_BASIC_BLOCK (bb_note) = new_bb; + + /* If the only thing in this new block was the label, make sure + the block note gets included. */ + if (new_bb->head == new_bb->end) + new_bb->end = bb_note; + } + else + { + /* Create the basic block note. */ + bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, + new_bb->head); + NOTE_BASIC_BLOCK (bb_note) = new_bb; + new_bb->head = bb_note; + } + + update_bb_for_insn (new_bb); + + if (bb->global_live_at_start) + { + new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); + new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); + COPY_REG_SET (new_bb->global_live_at_end, bb->global_live_at_end); + + /* We now have to calculate which registers are live at the end + of the split basic block and at the start of the new basic + block. Start with those registers that are known to be live + at the end of the original basic block and get + propagate_block to determine which registers are live. */ + COPY_REG_SET (new_bb->global_live_at_start, bb->global_live_at_end); + propagate_block (new_bb, new_bb->global_live_at_start, NULL, NULL, 0); + COPY_REG_SET (bb->global_live_at_end, + new_bb->global_live_at_start); + } + + return new_edge; +} + +/* Blocks A and B are to be merged into a single block A. The insns + are already contiguous, hence `nomove'. */ + +void +merge_blocks_nomove (a, b) + basic_block a, b; +{ + edge e; + rtx b_head, b_end, a_end; + rtx del_first = NULL_RTX, del_last = NULL_RTX; + int b_empty = 0; + + /* If there was a CODE_LABEL beginning B, delete it. */ + b_head = b->head; + b_end = b->end; + if (GET_CODE (b_head) == CODE_LABEL) + { + /* Detect basic blocks with nothing but a label. This can happen + in particular at the end of a function. */ + if (b_head == b_end) + b_empty = 1; + del_first = del_last = b_head; + b_head = NEXT_INSN (b_head); + } + + /* Delete the basic block note. */ + if (NOTE_INSN_BASIC_BLOCK_P (b_head)) + { + if (b_head == b_end) + b_empty = 1; + if (! del_last) + del_first = b_head; + del_last = b_head; + b_head = NEXT_INSN (b_head); + } + + /* If there was a jump out of A, delete it. */ + a_end = a->end; + if (GET_CODE (a_end) == JUMP_INSN) + { + rtx prev; + + for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev)) + if (GET_CODE (prev) != NOTE + || NOTE_LINE_NUMBER (prev) == NOTE_INSN_BASIC_BLOCK + || prev == a->head) + break; + + del_first = a_end; + +#ifdef HAVE_cc0 + /* If this was a conditional jump, we need to also delete + the insn that set cc0. */ + if (only_sets_cc0_p (prev)) + { + rtx tmp = prev; + prev = prev_nonnote_insn (prev); + if (!prev) + prev = a->head; + del_first = tmp; + } +#endif + + a_end = prev; + } + else if (GET_CODE (NEXT_INSN (a_end)) == BARRIER) + del_first = NEXT_INSN (a_end); + + /* Delete everything marked above as well as crap that might be + hanging out between the two blocks. */ + flow_delete_insn_chain (del_first, del_last); + + /* Normally there should only be one successor of A and that is B, but + partway though the merge of blocks for conditional_execution we'll + be merging a TEST block with THEN and ELSE successors. Free the + whole lot of them and hope the caller knows what they're doing. */ + while (a->succ) + remove_edge (a->succ); + + /* Adjust the edges out of B for the new owner. */ + for (e = b->succ; e; e = e->succ_next) + e->src = a; + a->succ = b->succ; + + /* B hasn't quite yet ceased to exist. Attempt to prevent mishap. */ + b->pred = b->succ = NULL; + + /* Reassociate the insns of B with A. */ + if (!b_empty) + { + if (basic_block_for_insn) + { + BLOCK_FOR_INSN (b_head) = a; + while (b_head != b_end) + { + b_head = NEXT_INSN (b_head); + BLOCK_FOR_INSN (b_head) = a; + } + } + a_end = b_end; + } + a->end = a_end; + + expunge_block (b); +} + +/* Return label in the head of basic block. Create one if it doesn't exist. */ + +rtx +block_label (block) + basic_block block; +{ + if (block == EXIT_BLOCK_PTR) + return NULL_RTX; + if (GET_CODE (block->head) != CODE_LABEL) + { + block->head = emit_label_before (gen_label_rtx (), block->head); + if (basic_block_for_insn) + set_block_for_insn (block->head, block); + } + return block->head; +} + +/* Attempt to perform edge redirection by replacing possibly complex jump + instruction by unconditional jump or removing jump completely. + This can apply only if all edges now point to the same block. + + The parameters and return values are equivalent to redirect_edge_and_branch. + */ + +static bool +try_redirect_by_replacing_jump (e, target) + edge e; + basic_block target; +{ + basic_block src = e->src; + rtx insn = src->end, kill_from; + edge tmp; + rtx set; + int fallthru = 0; + + /* Verify that all targets will be TARGET. */ + for (tmp = src->succ; tmp; tmp = tmp->succ_next) + if (tmp->dest != target && tmp != e) + break; + if (tmp || !onlyjump_p (insn)) + return false; + + /* Avoid removing branch with side effects. */ + set = single_set (insn); + if (!set || side_effects_p (set)) + return false; + + /* In case we zap a conditional jump, we'll need to kill + the cc0 setter too. */ + kill_from = insn; +#ifdef HAVE_cc0 + if (reg_mentioned_p (cc0_rtx, PATTERN (insn))) + kill_from = PREV_INSN (insn); +#endif + + /* See if we can create the fallthru edge. */ + if (can_fallthru (src, target)) + { + src->end = PREV_INSN (kill_from); + if (rtl_dump_file) + fprintf (rtl_dump_file, "Removing jump %i.\n", INSN_UID (insn)); + fallthru = 1; + + /* Selectivly unlink whole insn chain. */ + flow_delete_insn_chain (kill_from, PREV_INSN (target->head)); + } + /* If this already is simplejump, redirect it. */ + else if (simplejump_p (insn)) + { + if (e->dest == target) + return false; + if (rtl_dump_file) + fprintf (rtl_dump_file, "Redirecting jump %i from %i to %i.\n", + INSN_UID (insn), e->dest->index, target->index); + redirect_jump (insn, block_label (target), 0); + } + /* Or replace possibly complicated jump insn by simple jump insn. */ + else + { + rtx target_label = block_label (target); + rtx barrier; + + src->end = emit_jump_insn_before (gen_jump (target_label), kill_from); + JUMP_LABEL (src->end) = target_label; + LABEL_NUSES (target_label)++; + if (basic_block_for_insn) + set_block_for_new_insns (src->end, src); + if (rtl_dump_file) + fprintf (rtl_dump_file, "Replacing insn %i by jump %i\n", + INSN_UID (insn), INSN_UID (src->end)); + + flow_delete_insn_chain (kill_from, insn); + + barrier = next_nonnote_insn (src->end); + if (!barrier || GET_CODE (barrier) != BARRIER) + emit_barrier_after (src->end); + } + + /* Keep only one edge out and set proper flags. */ + while (src->succ->succ_next) + remove_edge (src->succ); + e = src->succ; + if (fallthru) + e->flags = EDGE_FALLTHRU; + else + e->flags = 0; + e->probability = REG_BR_PROB_BASE; + e->count = src->count; + + /* We don't want a block to end on a line-number note since that has + the potential of changing the code between -g and not -g. */ + while (GET_CODE (e->src->end) == NOTE + && NOTE_LINE_NUMBER (e->src->end) >= 0) + { + rtx prev = PREV_INSN (e->src->end); + flow_delete_insn (e->src->end); + e->src->end = prev; + } + + if (e->dest != target) + redirect_edge_succ (e, target); + return true; +} + +/* Return last loop_beg note appearing after INSN, before start of next + basic block. Return INSN if there are no such notes. + + When emmiting jump to redirect an fallthru edge, it should always + appear after the LOOP_BEG notes, as loop optimizer expect loop to + eighter start by fallthru edge or jump following the LOOP_BEG note + jumping to the loop exit test. */ + +static rtx +last_loop_beg_note (insn) + rtx insn; +{ + rtx last = insn; + insn = NEXT_INSN (insn); + while (GET_CODE (insn) == NOTE + && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK) + { + if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) + last = insn; + insn = NEXT_INSN (insn); + } + return last; +} + +/* Attempt to change code to redirect edge E to TARGET. + Don't do that on expense of adding new instructions or reordering + basic blocks. + + Function can be also called with edge destionation equivalent to the + TARGET. Then it should try the simplifications and do nothing if + none is possible. + + Return true if transformation suceeded. We still return flase in case + E already destinated TARGET and we didn't managed to simplify instruction + stream. */ + +bool +redirect_edge_and_branch (e, target) + edge e; + basic_block target; +{ + rtx tmp; + rtx old_label = e->dest->head; + basic_block src = e->src; + rtx insn = src->end; + + if (e->flags & EDGE_COMPLEX) + return false; + + if (try_redirect_by_replacing_jump (e, target)) + return true; + /* Do this fast path late, as we want above code to simplify for cases + where called on single edge leaving basic block containing nontrivial + jump insn. */ + else if (e->dest == target) + return false; + + /* We can only redirect non-fallthru edges of jump insn. */ + if (e->flags & EDGE_FALLTHRU) + return false; + if (GET_CODE (insn) != JUMP_INSN) + return false; + + /* Recognize a tablejump and adjust all matching cases. */ + if ((tmp = JUMP_LABEL (insn)) != NULL_RTX + && (tmp = NEXT_INSN (tmp)) != NULL_RTX + && GET_CODE (tmp) == JUMP_INSN + && (GET_CODE (PATTERN (tmp)) == ADDR_VEC + || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)) + { + rtvec vec; + int j; + rtx new_label = block_label (target); + + if (GET_CODE (PATTERN (tmp)) == ADDR_VEC) + vec = XVEC (PATTERN (tmp), 0); + else + vec = XVEC (PATTERN (tmp), 1); + + for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j) + if (XEXP (RTVEC_ELT (vec, j), 0) == old_label) + { + RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label); + --LABEL_NUSES (old_label); + ++LABEL_NUSES (new_label); + } + + /* Handle casesi dispatch insns */ + if ((tmp = single_set (insn)) != NULL + && SET_DEST (tmp) == pc_rtx + && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE + && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF + && XEXP (XEXP (SET_SRC (tmp), 2), 0) == old_label) + { + XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (VOIDmode, + new_label); + --LABEL_NUSES (old_label); + ++LABEL_NUSES (new_label); + } + } + else + { + /* ?? We may play the games with moving the named labels from + one basic block to the other in case only one computed_jump is + available. */ + if (computed_jump_p (insn)) + return false; + + /* A return instruction can't be redirected. */ + if (returnjump_p (insn)) + return false; + + /* If the insn doesn't go where we think, we're confused. */ + if (JUMP_LABEL (insn) != old_label) + abort (); + redirect_jump (insn, block_label (target), 0); + } + + if (rtl_dump_file) + fprintf (rtl_dump_file, "Edge %i->%i redirected to %i\n", + e->src->index, e->dest->index, target->index); + if (e->dest != target) + redirect_edge_succ_nodup (e, target); + return true; +} + +/* Redirect edge even at the expense of creating new jump insn or + basic block. Return new basic block if created, NULL otherwise. + Abort if converison is impossible. */ + +basic_block +redirect_edge_and_branch_force (e, target) + edge e; + basic_block target; +{ + basic_block new_bb; + edge new_edge; + rtx label; + rtx bb_note; + int i, j; + + if (redirect_edge_and_branch (e, target)) + return NULL; + if (e->dest == target) + return NULL; + if (e->flags & EDGE_ABNORMAL) + abort (); + if (!(e->flags & EDGE_FALLTHRU)) + abort (); + + e->flags &= ~EDGE_FALLTHRU; + label = block_label (target); + /* Case of the fallthru block. */ + if (!e->src->succ->succ_next) + { + e->src->end = emit_jump_insn_after (gen_jump (label), + last_loop_beg_note (e->src->end)); + JUMP_LABEL (e->src->end) = label; + LABEL_NUSES (label)++; + if (basic_block_for_insn) + set_block_for_new_insns (e->src->end, e->src); + emit_barrier_after (e->src->end); + if (rtl_dump_file) + fprintf (rtl_dump_file, + "Emitting jump insn %i to redirect edge %i->%i to %i\n", + INSN_UID (e->src->end), e->src->index, e->dest->index, + target->index); + redirect_edge_succ (e, target); + return NULL; + } + /* Redirecting fallthru edge of the conditional needs extra work. */ + + if (rtl_dump_file) + fprintf (rtl_dump_file, + "Emitting jump insn %i in new BB to redirect edge %i->%i to %i\n", + INSN_UID (e->src->end), e->src->index, e->dest->index, + target->index); + + /* Create the new structures. */ + new_bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*new_bb)); + new_edge = (edge) xcalloc (1, sizeof (*new_edge)); + n_edges++; + + memset (new_bb, 0, sizeof (*new_bb)); + + new_bb->end = new_bb->head = last_loop_beg_note (e->src->end); + new_bb->succ = NULL; + new_bb->pred = new_edge; + new_bb->count = e->count; + new_bb->frequency = EDGE_FREQUENCY (e); + new_bb->loop_depth = e->dest->loop_depth; + + new_edge->flags = EDGE_FALLTHRU; + new_edge->probability = e->probability; + new_edge->count = e->count; + + if (target->global_live_at_start) + { + new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); + new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); + COPY_REG_SET (new_bb->global_live_at_start, + target->global_live_at_start); + COPY_REG_SET (new_bb->global_live_at_end, new_bb->global_live_at_start); + } + + /* Wire edge in. */ + new_edge->src = e->src; + new_edge->dest = new_bb; + new_edge->succ_next = e->src->succ; + e->src->succ = new_edge; + new_edge->pred_next = NULL; + + /* Redirect old edge. */ + redirect_edge_succ (e, target); + redirect_edge_pred (e, new_bb); + e->probability = REG_BR_PROB_BASE; + + /* Place the new block just after the block being split. */ + VARRAY_GROW (basic_block_info, ++n_basic_blocks); + + /* Some parts of the compiler expect blocks to be number in + sequential order so insert the new block immediately after the + block being split.. */ + j = new_edge->src->index; + for (i = n_basic_blocks - 1; i > j + 1; --i) + { + basic_block tmp = BASIC_BLOCK (i - 1); + BASIC_BLOCK (i) = tmp; + tmp->index = i; + } + + BASIC_BLOCK (i) = new_bb; + new_bb->index = i; + + /* Create the basic block note. */ + bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, new_bb->head); + NOTE_BASIC_BLOCK (bb_note) = new_bb; + new_bb->head = bb_note; + + new_bb->end = emit_jump_insn_after (gen_jump (label), new_bb->head); + JUMP_LABEL (new_bb->end) = label; + LABEL_NUSES (label)++; + if (basic_block_for_insn) + set_block_for_new_insns (new_bb->end, new_bb); + emit_barrier_after (new_bb->end); + return new_bb; +} + +/* The given edge should potentially be a fallthru edge. If that is in + fact true, delete the jump and barriers that are in the way. */ + +void +tidy_fallthru_edge (e, b, c) + edge e; + basic_block b, c; +{ + rtx q; + + /* ??? In a late-running flow pass, other folks may have deleted basic + blocks by nopping out blocks, leaving multiple BARRIERs between here + and the target label. They ought to be chastized and fixed. + + We can also wind up with a sequence of undeletable labels between + one block and the next. + + So search through a sequence of barriers, labels, and notes for + the head of block C and assert that we really do fall through. */ + + if (next_real_insn (b->end) != next_real_insn (PREV_INSN (c->head))) + return; + + /* Remove what will soon cease being the jump insn from the source block. + If block B consisted only of this single jump, turn it into a deleted + note. */ + q = b->end; + if (GET_CODE (q) == JUMP_INSN + && onlyjump_p (q) + && (any_uncondjump_p (q) + || (b->succ == e && e->succ_next == NULL))) + { +#ifdef HAVE_cc0 + /* If this was a conditional jump, we need to also delete + the insn that set cc0. */ + if (any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q))) + q = PREV_INSN (q); +#endif + + if (b->head == q) + { + PUT_CODE (q, NOTE); + NOTE_LINE_NUMBER (q) = NOTE_INSN_DELETED; + NOTE_SOURCE_FILE (q) = 0; + } + else + { + q = PREV_INSN (q); + + /* We don't want a block to end on a line-number note since that has + the potential of changing the code between -g and not -g. */ + while (GET_CODE (q) == NOTE && NOTE_LINE_NUMBER (q) >= 0) + q = PREV_INSN (q); + } + + b->end = q; + } + + /* Selectively unlink the sequence. */ + if (q != PREV_INSN (c->head)) + flow_delete_insn_chain (NEXT_INSN (q), PREV_INSN (c->head)); + + e->flags |= EDGE_FALLTHRU; +} + +/* Fix up edges that now fall through, or rather should now fall through + but previously required a jump around now deleted blocks. Simplify + the search by only examining blocks numerically adjacent, since this + is how find_basic_blocks created them. */ + +void +tidy_fallthru_edges () +{ + int i; + + for (i = 1; i < n_basic_blocks; ++i) + { + basic_block b = BASIC_BLOCK (i - 1); + basic_block c = BASIC_BLOCK (i); + edge s; + + /* We care about simple conditional or unconditional jumps with + a single successor. + + If we had a conditional branch to the next instruction when + find_basic_blocks was called, then there will only be one + out edge for the block which ended with the conditional + branch (since we do not create duplicate edges). + + Furthermore, the edge will be marked as a fallthru because we + merge the flags for the duplicate edges. So we do not want to + check that the edge is not a FALLTHRU edge. */ + if ((s = b->succ) != NULL + && ! (s->flags & EDGE_COMPLEX) + && s->succ_next == NULL + && s->dest == c + /* If the jump insn has side effects, we can't tidy the edge. */ + && (GET_CODE (b->end) != JUMP_INSN + || onlyjump_p (b->end))) + tidy_fallthru_edge (s, b, c); + } +} + +/* Helper function for split_edge. Return true in case edge BB2 to BB1 + is back edge of syntactic loop. */ + +static bool +back_edge_of_syntactic_loop_p (bb1, bb2) + basic_block bb1, bb2; +{ + rtx insn; + int count = 0; + + if (bb1->index > bb2->index) + return false; + + if (bb1->index == bb2->index) + return true; + + for (insn = bb1->end; insn != bb2->head && count >= 0; + insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == NOTE) + { + if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) + count++; + if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END) + count--; + } + + return count >= 0; +} + +/* Split a (typically critical) edge. Return the new block. + Abort on abnormal edges. + + ??? The code generally expects to be called on critical edges. + The case of a block ending in an unconditional jump to a + block with multiple predecessors is not handled optimally. */ + +basic_block +split_edge (edge_in) + edge edge_in; +{ + basic_block old_pred, bb, old_succ; + edge edge_out; + rtx bb_note; + int i, j; + + /* Abnormal edges cannot be split. */ + if ((edge_in->flags & EDGE_ABNORMAL) != 0) + abort (); + + old_pred = edge_in->src; + old_succ = edge_in->dest; + + /* Create the new structures. */ + bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*bb)); + edge_out = (edge) xcalloc (1, sizeof (*edge_out)); + n_edges++; + + memset (bb, 0, sizeof (*bb)); + + /* ??? This info is likely going to be out of date very soon. */ + if (old_succ->global_live_at_start) + { + bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); + bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); + COPY_REG_SET (bb->global_live_at_start, old_succ->global_live_at_start); + COPY_REG_SET (bb->global_live_at_end, old_succ->global_live_at_start); + } + + /* Wire them up. */ + bb->succ = edge_out; + bb->count = edge_in->count; + bb->frequency = EDGE_FREQUENCY (edge_in); + + edge_in->flags &= ~EDGE_CRITICAL; + + edge_out->pred_next = old_succ->pred; + edge_out->succ_next = NULL; + edge_out->src = bb; + edge_out->dest = old_succ; + edge_out->flags = EDGE_FALLTHRU; + edge_out->probability = REG_BR_PROB_BASE; + edge_out->count = edge_in->count; + + old_succ->pred = edge_out; + + /* Tricky case -- if there existed a fallthru into the successor + (and we're not it) we must add a new unconditional jump around + the new block we're actually interested in. + + Further, if that edge is critical, this means a second new basic + block must be created to hold it. In order to simplify correct + insn placement, do this before we touch the existing basic block + ordering for the block we were really wanting. */ + if ((edge_in->flags & EDGE_FALLTHRU) == 0) + { + edge e; + for (e = edge_out->pred_next; e; e = e->pred_next) + if (e->flags & EDGE_FALLTHRU) + break; + + if (e) + { + basic_block jump_block; + rtx pos; + + if ((e->flags & EDGE_CRITICAL) == 0 + && e->src != ENTRY_BLOCK_PTR) + { + /* Non critical -- we can simply add a jump to the end + of the existing predecessor. */ + jump_block = e->src; + } + else + { + /* We need a new block to hold the jump. The simplest + way to do the bulk of the work here is to recursively + call ourselves. */ + jump_block = split_edge (e); + e = jump_block->succ; + } + + /* Now add the jump insn ... */ + pos = emit_jump_insn_after (gen_jump (old_succ->head), + last_loop_beg_note (jump_block->end)); + jump_block->end = pos; + if (basic_block_for_insn) + set_block_for_new_insns (pos, jump_block); + emit_barrier_after (pos); + + /* ... let jump know that label is in use, ... */ + JUMP_LABEL (pos) = old_succ->head; + ++LABEL_NUSES (old_succ->head); + + /* ... and clear fallthru on the outgoing edge. */ + e->flags &= ~EDGE_FALLTHRU; + + /* Continue splitting the interesting edge. */ + } + } + + /* Place the new block just in front of the successor. */ + VARRAY_GROW (basic_block_info, ++n_basic_blocks); + if (old_succ == EXIT_BLOCK_PTR) + j = n_basic_blocks - 1; + else + j = old_succ->index; + for (i = n_basic_blocks - 1; i > j; --i) + { + basic_block tmp = BASIC_BLOCK (i - 1); + BASIC_BLOCK (i) = tmp; + tmp->index = i; + } + BASIC_BLOCK (i) = bb; + bb->index = i; + + /* Create the basic block note. + + Where we place the note can have a noticable impact on the generated + code. Consider this cfg: + + E + | + 0 + / \ + +->1-->2--->E + | | + +--+ + + If we need to insert an insn on the edge from block 0 to block 1, + we want to ensure the instructions we insert are outside of any + loop notes that physically sit between block 0 and block 1. Otherwise + we confuse the loop optimizer into thinking the loop is a phony. */ + if (old_succ != EXIT_BLOCK_PTR + && PREV_INSN (old_succ->head) + && GET_CODE (PREV_INSN (old_succ->head)) == NOTE + && NOTE_LINE_NUMBER (PREV_INSN (old_succ->head)) == NOTE_INSN_LOOP_BEG + && !back_edge_of_syntactic_loop_p (old_succ, old_pred)) + bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, + PREV_INSN (old_succ->head)); + else if (old_succ != EXIT_BLOCK_PTR) + bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, old_succ->head); + else + bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ()); + NOTE_BASIC_BLOCK (bb_note) = bb; + bb->head = bb->end = bb_note; + + /* For non-fallthry edges, we must adjust the predecessor's + jump instruction to target our new block. */ + if ((edge_in->flags & EDGE_FALLTHRU) == 0) + { + if (!redirect_edge_and_branch (edge_in, bb)) + abort (); + } + else + redirect_edge_succ (edge_in, bb); + + return bb; +} + +/* Queue instructions for insertion on an edge between two basic blocks. + The new instructions and basic blocks (if any) will not appear in the + CFG until commit_edge_insertions is called. */ + +void +insert_insn_on_edge (pattern, e) + rtx pattern; + edge e; +{ + /* We cannot insert instructions on an abnormal critical edge. + It will be easier to find the culprit if we die now. */ + if ((e->flags & (EDGE_ABNORMAL|EDGE_CRITICAL)) + == (EDGE_ABNORMAL|EDGE_CRITICAL)) + abort (); + + if (e->insns == NULL_RTX) + start_sequence (); + else + push_to_sequence (e->insns); + + emit_insn (pattern); + + e->insns = get_insns (); + end_sequence (); +} + +/* Update the CFG for the instructions queued on edge E. */ + +static void +commit_one_edge_insertion (e) + edge e; +{ + rtx before = NULL_RTX, after = NULL_RTX, insns, tmp, last; + basic_block bb; + + /* Pull the insns off the edge now since the edge might go away. */ + insns = e->insns; + e->insns = NULL_RTX; + + /* Figure out where to put these things. If the destination has + one predecessor, insert there. Except for the exit block. */ + if (e->dest->pred->pred_next == NULL + && e->dest != EXIT_BLOCK_PTR) + { + bb = e->dest; + + /* Get the location correct wrt a code label, and "nice" wrt + a basic block note, and before everything else. */ + tmp = bb->head; + if (GET_CODE (tmp) == CODE_LABEL) + tmp = NEXT_INSN (tmp); + if (NOTE_INSN_BASIC_BLOCK_P (tmp)) + tmp = NEXT_INSN (tmp); + if (tmp == bb->head) + before = tmp; + else + after = PREV_INSN (tmp); + } + + /* If the source has one successor and the edge is not abnormal, + insert there. Except for the entry block. */ + else if ((e->flags & EDGE_ABNORMAL) == 0 + && e->src->succ->succ_next == NULL + && e->src != ENTRY_BLOCK_PTR) + { + bb = e->src; + /* It is possible to have a non-simple jump here. Consider a target + where some forms of unconditional jumps clobber a register. This + happens on the fr30 for example. + + We know this block has a single successor, so we can just emit + the queued insns before the jump. */ + if (GET_CODE (bb->end) == JUMP_INSN) + { + before = bb->end; + while (GET_CODE (PREV_INSN (before)) == NOTE + && NOTE_LINE_NUMBER (PREV_INSN (before)) == NOTE_INSN_LOOP_BEG) + before = PREV_INSN (before); + } + else + { + /* We'd better be fallthru, or we've lost track of what's what. */ + if ((e->flags & EDGE_FALLTHRU) == 0) + abort (); + + after = bb->end; + } + } + + /* Otherwise we must split the edge. */ + else + { + bb = split_edge (e); + after = bb->end; + } + + /* Now that we've found the spot, do the insertion. */ + + /* Set the new block number for these insns, if structure is allocated. */ + if (basic_block_for_insn) + { + rtx i; + for (i = insns; i != NULL_RTX; i = NEXT_INSN (i)) + set_block_for_insn (i, bb); + } + + if (before) + { + emit_insns_before (insns, before); + if (before == bb->head) + bb->head = insns; + + last = prev_nonnote_insn (before); + } + else + { + last = emit_insns_after (insns, after); + if (after == bb->end) + bb->end = last; + } + + if (returnjump_p (last)) + { + /* ??? Remove all outgoing edges from BB and add one for EXIT. + This is not currently a problem because this only happens + for the (single) epilogue, which already has a fallthru edge + to EXIT. */ + + e = bb->succ; + if (e->dest != EXIT_BLOCK_PTR + || e->succ_next != NULL + || (e->flags & EDGE_FALLTHRU) == 0) + abort (); + e->flags &= ~EDGE_FALLTHRU; + + emit_barrier_after (last); + bb->end = last; + + if (before) + flow_delete_insn (before); + } + else if (GET_CODE (last) == JUMP_INSN) + abort (); + find_sub_basic_blocks (bb); +} + +/* Update the CFG for all queued instructions. */ + +void +commit_edge_insertions () +{ + int i; + basic_block bb; + compute_bb_for_insn (get_max_uid ()); + +#ifdef ENABLE_CHECKING + verify_flow_info (); +#endif + + i = -1; + bb = ENTRY_BLOCK_PTR; + while (1) + { + edge e, next; + + for (e = bb->succ; e; e = next) + { + next = e->succ_next; + if (e->insns) + commit_one_edge_insertion (e); + } + + if (++i >= n_basic_blocks) + break; + bb = BASIC_BLOCK (i); + } +} + +void +dump_flow_info (file) + FILE *file; +{ + register int i; + static const char * const reg_class_names[] = REG_CLASS_NAMES; + + fprintf (file, "%d registers.\n", max_regno); + for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) + if (REG_N_REFS (i)) + { + enum reg_class class, altclass; + fprintf (file, "\nRegister %d used %d times across %d insns", + i, REG_N_REFS (i), REG_LIVE_LENGTH (i)); + if (REG_BASIC_BLOCK (i) >= 0) + fprintf (file, " in block %d", REG_BASIC_BLOCK (i)); + if (REG_N_SETS (i)) + fprintf (file, "; set %d time%s", REG_N_SETS (i), + (REG_N_SETS (i) == 1) ? "" : "s"); + if (REG_USERVAR_P (regno_reg_rtx[i])) + fprintf (file, "; user var"); + if (REG_N_DEATHS (i) != 1) + fprintf (file, "; dies in %d places", REG_N_DEATHS (i)); + if (REG_N_CALLS_CROSSED (i) == 1) + fprintf (file, "; crosses 1 call"); + else if (REG_N_CALLS_CROSSED (i)) + fprintf (file, "; crosses %d calls", REG_N_CALLS_CROSSED (i)); + if (PSEUDO_REGNO_BYTES (i) != UNITS_PER_WORD) + fprintf (file, "; %d bytes", PSEUDO_REGNO_BYTES (i)); + class = reg_preferred_class (i); + altclass = reg_alternate_class (i); + if (class != GENERAL_REGS || altclass != ALL_REGS) + { + if (altclass == ALL_REGS || class == ALL_REGS) + fprintf (file, "; pref %s", reg_class_names[(int) class]); + else if (altclass == NO_REGS) + fprintf (file, "; %s or none", reg_class_names[(int) class]); + else + fprintf (file, "; pref %s, else %s", + reg_class_names[(int) class], + reg_class_names[(int) altclass]); + } + if (REG_POINTER (regno_reg_rtx[i])) + fprintf (file, "; pointer"); + fprintf (file, ".\n"); + } + + fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks, n_edges); + for (i = 0; i < n_basic_blocks; i++) + { + register basic_block bb = BASIC_BLOCK (i); + register edge e; + + fprintf (file, "\nBasic block %d: first insn %d, last %d, loop_depth %d, count ", + i, INSN_UID (bb->head), INSN_UID (bb->end), bb->loop_depth); + fprintf (file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) bb->count); + fprintf (file, ", freq %i.\n", bb->frequency); + + fprintf (file, "Predecessors: "); + for (e = bb->pred; e; e = e->pred_next) + dump_edge_info (file, e, 0); + + fprintf (file, "\nSuccessors: "); + for (e = bb->succ; e; e = e->succ_next) + dump_edge_info (file, e, 1); + + fprintf (file, "\nRegisters live at start:"); + dump_regset (bb->global_live_at_start, file); + + fprintf (file, "\nRegisters live at end:"); + dump_regset (bb->global_live_at_end, file); + + putc ('\n', file); + } + + putc ('\n', file); +} + +void +debug_flow_info () +{ + dump_flow_info (stderr); +} + +void +dump_edge_info (file, e, do_succ) + FILE *file; + edge e; + int do_succ; +{ + basic_block side = (do_succ ? e->dest : e->src); + + if (side == ENTRY_BLOCK_PTR) + fputs (" ENTRY", file); + else if (side == EXIT_BLOCK_PTR) + fputs (" EXIT", file); + else + fprintf (file, " %d", side->index); + + if (e->probability) + fprintf (file, " [%.1f%%] ", e->probability * 100.0 / REG_BR_PROB_BASE); + + if (e->count) + { + fprintf (file, " count:"); + fprintf (file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) e->count); + } + + if (e->flags) + { + static const char * const bitnames[] = { + "fallthru", "crit", "ab", "abcall", "eh", "fake", "dfs_back" + }; + int comma = 0; + int i, flags = e->flags; + + fputc (' ', file); + fputc ('(', file); + for (i = 0; flags; i++) + if (flags & (1 << i)) + { + flags &= ~(1 << i); + + if (comma) + fputc (',', file); + if (i < (int) ARRAY_SIZE (bitnames)) + fputs (bitnames[i], file); + else + fprintf (file, "%d", i); + comma = 1; + } + fputc (')', file); + } +} + +/* Print out one basic block with live information at start and end. */ + +void +dump_bb (bb, outf) + basic_block bb; + FILE *outf; +{ + rtx insn; + rtx last; + edge e; + + fprintf (outf, ";; Basic block %d, loop depth %d, count ", + bb->index, bb->loop_depth); + fprintf (outf, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) bb->count); + putc ('\n', outf); + + fputs (";; Predecessors: ", outf); + for (e = bb->pred; e; e = e->pred_next) + dump_edge_info (outf, e, 0); + putc ('\n', outf); + + fputs (";; Registers live at start:", outf); + dump_regset (bb->global_live_at_start, outf); + putc ('\n', outf); + + for (insn = bb->head, last = NEXT_INSN (bb->end); + insn != last; + insn = NEXT_INSN (insn)) + print_rtl_single (outf, insn); + + fputs (";; Registers live at end:", outf); + dump_regset (bb->global_live_at_end, outf); + putc ('\n', outf); + + fputs (";; Successors: ", outf); + for (e = bb->succ; e; e = e->succ_next) + dump_edge_info (outf, e, 1); + putc ('\n', outf); +} + +void +debug_bb (bb) + basic_block bb; +{ + dump_bb (bb, stderr); +} + +void +debug_bb_n (n) + int n; +{ + dump_bb (BASIC_BLOCK (n), stderr); +} + +/* Like print_rtl, but also print out live information for the start of each + basic block. */ + +void +print_rtl_with_bb (outf, rtx_first) + FILE *outf; + rtx rtx_first; +{ + register rtx tmp_rtx; + + if (rtx_first == 0) + fprintf (outf, "(nil)\n"); + else + { + int i; + enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB }; + int max_uid = get_max_uid (); + basic_block *start = (basic_block *) + xcalloc (max_uid, sizeof (basic_block)); + basic_block *end = (basic_block *) + xcalloc (max_uid, sizeof (basic_block)); + enum bb_state *in_bb_p = (enum bb_state *) + xcalloc (max_uid, sizeof (enum bb_state)); + + for (i = n_basic_blocks - 1; i >= 0; i--) + { + basic_block bb = BASIC_BLOCK (i); + rtx x; + + start[INSN_UID (bb->head)] = bb; + end[INSN_UID (bb->end)] = bb; + for (x = bb->head; x != NULL_RTX; x = NEXT_INSN (x)) + { + enum bb_state state = IN_MULTIPLE_BB; + if (in_bb_p[INSN_UID (x)] == NOT_IN_BB) + state = IN_ONE_BB; + in_bb_p[INSN_UID (x)] = state; + + if (x == bb->end) + break; + } + } + + for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx)) + { + int did_output; + basic_block bb; + + if ((bb = start[INSN_UID (tmp_rtx)]) != NULL) + { + fprintf (outf, ";; Start of basic block %d, registers live:", + bb->index); + dump_regset (bb->global_live_at_start, outf); + putc ('\n', outf); + } + + if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB + && GET_CODE (tmp_rtx) != NOTE + && GET_CODE (tmp_rtx) != BARRIER) + fprintf (outf, ";; Insn is not within a basic block\n"); + else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB) + fprintf (outf, ";; Insn is in multiple basic blocks\n"); + + did_output = print_rtl_single (outf, tmp_rtx); + + if ((bb = end[INSN_UID (tmp_rtx)]) != NULL) + { + fprintf (outf, ";; End of basic block %d, registers live:\n", + bb->index); + dump_regset (bb->global_live_at_end, outf); + putc ('\n', outf); + } + + if (did_output) + putc ('\n', outf); + } + + free (start); + free (end); + free (in_bb_p); + } + + if (current_function_epilogue_delay_list != 0) + { + fprintf (outf, "\n;; Insns in epilogue delay list:\n\n"); + for (tmp_rtx = current_function_epilogue_delay_list; tmp_rtx != 0; + tmp_rtx = XEXP (tmp_rtx, 1)) + print_rtl_single (outf, XEXP (tmp_rtx, 0)); + } +} + +/* Verify the CFG consistency. This function check some CFG invariants and + aborts when something is wrong. Hope that this function will help to + convert many optimization passes to preserve CFG consistent. + + Currently it does following checks: + + - test head/end pointers + - overlapping of basic blocks + - edge list correctness + - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note) + - tails of basic blocks (ensure that boundary is necesary) + - scans body of the basic block for JUMP_INSN, CODE_LABEL + and NOTE_INSN_BASIC_BLOCK + - check that all insns are in the basic blocks + (except the switch handling code, barriers and notes) + - check that all returns are followed by barriers + + In future it can be extended check a lot of other stuff as well + (reachability of basic blocks, life information, etc. etc.). */ + +void +verify_flow_info () +{ + const int max_uid = get_max_uid (); + const rtx rtx_first = get_insns (); + rtx last_head = get_last_insn (); + basic_block *bb_info, *last_visited; + size_t *edge_checksum; + rtx x; + int i, last_bb_num_seen, num_bb_notes, err = 0; + + bb_info = (basic_block *) xcalloc (max_uid, sizeof (basic_block)); + last_visited = (basic_block *) xcalloc (n_basic_blocks + 2, + sizeof (basic_block)); + edge_checksum = (size_t *) xcalloc (n_basic_blocks + 2, sizeof (size_t)); + + for (i = n_basic_blocks - 1; i >= 0; i--) + { + basic_block bb = BASIC_BLOCK (i); + rtx head = bb->head; + rtx end = bb->end; + + /* Verify the end of the basic block is in the INSN chain. */ + for (x = last_head; x != NULL_RTX; x = PREV_INSN (x)) + if (x == end) + break; + if (!x) + { + error ("End insn %d for block %d not found in the insn stream.", + INSN_UID (end), bb->index); + err = 1; + } + + /* Work backwards from the end to the head of the basic block + to verify the head is in the RTL chain. */ + for (; x != NULL_RTX; x = PREV_INSN (x)) + { + /* While walking over the insn chain, verify insns appear + in only one basic block and initialize the BB_INFO array + used by other passes. */ + if (bb_info[INSN_UID (x)] != NULL) + { + error ("Insn %d is in multiple basic blocks (%d and %d)", + INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index); + err = 1; + } + bb_info[INSN_UID (x)] = bb; + + if (x == head) + break; + } + if (!x) + { + error ("Head insn %d for block %d not found in the insn stream.", + INSN_UID (head), bb->index); + err = 1; + } + + last_head = x; + } + + /* Now check the basic blocks (boundaries etc.) */ + for (i = n_basic_blocks - 1; i >= 0; i--) + { + basic_block bb = BASIC_BLOCK (i); + int has_fallthru = 0; + edge e; + + e = bb->succ; + while (e) + { + if (last_visited [e->dest->index + 2] == bb) + { + error ("verify_flow_info: Duplicate edge %i->%i", + e->src->index, e->dest->index); + err = 1; + } + last_visited [e->dest->index + 2] = bb; + + if (e->flags & EDGE_FALLTHRU) + has_fallthru = 1; + + if ((e->flags & EDGE_FALLTHRU) + && e->src != ENTRY_BLOCK_PTR + && e->dest != EXIT_BLOCK_PTR) + { + rtx insn; + if (e->src->index + 1 != e->dest->index) + { + error ("verify_flow_info: Incorrect blocks for fallthru %i->%i", + e->src->index, e->dest->index); + err = 1; + } + else + for (insn = NEXT_INSN (e->src->end); insn != e->dest->head; + insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == BARRIER || INSN_P (insn)) + { + error ("verify_flow_info: Incorrect fallthru %i->%i", + e->src->index, e->dest->index); + fatal_insn ("Wrong insn in the fallthru edge", insn); + err = 1; + } + } + if (e->src != bb) + { + error ("verify_flow_info: Basic block %d succ edge is corrupted", + bb->index); + fprintf (stderr, "Predecessor: "); + dump_edge_info (stderr, e, 0); + fprintf (stderr, "\nSuccessor: "); + dump_edge_info (stderr, e, 1); + fprintf (stderr, "\n"); + err = 1; + } + edge_checksum[e->dest->index + 2] += (size_t) e; + e = e->succ_next; + } + if (!has_fallthru) + { + rtx insn = bb->end; + + /* Ensure existence of barrier in BB with no fallthru edges. */ + for (insn = bb->end; GET_CODE (insn) != BARRIER; + insn = NEXT_INSN (insn)) + if (!insn + || (GET_CODE (insn) == NOTE + && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)) + { + error ("Missing barrier after block %i", bb->index); + err = 1; + } + } + + e = bb->pred; + while (e) + { + if (e->dest != bb) + { + error ("Basic block %d pred edge is corrupted", bb->index); + fputs ("Predecessor: ", stderr); + dump_edge_info (stderr, e, 0); + fputs ("\nSuccessor: ", stderr); + dump_edge_info (stderr, e, 1); + fputc ('\n', stderr); + err = 1; + } + edge_checksum[e->dest->index + 2] -= (size_t) e; + e = e->pred_next; + } + + /* OK pointers are correct. Now check the header of basic + block. It ought to contain optional CODE_LABEL followed + by NOTE_BASIC_BLOCK. */ + x = bb->head; + if (GET_CODE (x) == CODE_LABEL) + { + if (bb->end == x) + { + error ("NOTE_INSN_BASIC_BLOCK is missing for block %d", + bb->index); + err = 1; + } + x = NEXT_INSN (x); + } + if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb) + { + error ("NOTE_INSN_BASIC_BLOCK is missing for block %d", + bb->index); + err = 1; + } + + if (bb->end == x) + { + /* Do checks for empty blocks here */ + } + else + { + x = NEXT_INSN (x); + while (x) + { + if (NOTE_INSN_BASIC_BLOCK_P (x)) + { + error ("NOTE_INSN_BASIC_BLOCK %d in the middle of basic block %d", + INSN_UID (x), bb->index); + err = 1; + } + + if (x == bb->end) + break; + + if (GET_CODE (x) == JUMP_INSN + || GET_CODE (x) == CODE_LABEL + || GET_CODE (x) == BARRIER) + { + error ("In basic block %d:", bb->index); + fatal_insn ("Flow control insn inside a basic block", x); + } + + x = NEXT_INSN (x); + } + } + } + + /* Complete edge checksumming for ENTRY and EXIT. */ + { + edge e; + for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next) + edge_checksum[e->dest->index + 2] += (size_t) e; + for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) + edge_checksum[e->dest->index + 2] -= (size_t) e; + } + + for (i = -2; i < n_basic_blocks; ++i) + if (edge_checksum[i + 2]) + { + error ("Basic block %i edge lists are corrupted", i); + err = 1; + } + + last_bb_num_seen = -1; + num_bb_notes = 0; + x = rtx_first; + while (x) + { + if (NOTE_INSN_BASIC_BLOCK_P (x)) + { + basic_block bb = NOTE_BASIC_BLOCK (x); + num_bb_notes++; + if (bb->index != last_bb_num_seen + 1) + internal_error ("Basic blocks not numbered consecutively."); + + last_bb_num_seen = bb->index; + } + + if (!bb_info[INSN_UID (x)]) + { + switch (GET_CODE (x)) + { + case BARRIER: + case NOTE: + break; + + case CODE_LABEL: + /* An addr_vec is placed outside any block block. */ + if (NEXT_INSN (x) + && GET_CODE (NEXT_INSN (x)) == JUMP_INSN + && (GET_CODE (PATTERN (NEXT_INSN (x))) == ADDR_DIFF_VEC + || GET_CODE (PATTERN (NEXT_INSN (x))) == ADDR_VEC)) + { + x = NEXT_INSN (x); + } + + /* But in any case, non-deletable labels can appear anywhere. */ + break; + + default: + fatal_insn ("Insn outside basic block", x); + } + } + + if (INSN_P (x) + && GET_CODE (x) == JUMP_INSN + && returnjump_p (x) && ! condjump_p (x) + && ! (NEXT_INSN (x) && GET_CODE (NEXT_INSN (x)) == BARRIER)) + fatal_insn ("Return not followed by barrier", x); + + x = NEXT_INSN (x); + } + + if (num_bb_notes != n_basic_blocks) + internal_error + ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)", + num_bb_notes, n_basic_blocks); + + if (err) + internal_error ("verify_flow_info failed."); + + /* Clean up. */ + free (bb_info); + free (last_visited); + free (edge_checksum); +} + + +/* Assume that the preceeding pass has possibly eliminated jump instructions + or converted the unconditional jumps. Eliminate the edges from CFG. + Return true if any edges are eliminated. */ + +bool +purge_dead_edges (bb) + basic_block bb; +{ + edge e, next; + rtx insn = bb->end; + bool purged = false; + + if (GET_CODE (insn) == JUMP_INSN && !simplejump_p (insn)) + return false; + if (GET_CODE (insn) == JUMP_INSN) + { + rtx note; + edge b,f; + /* We do care only about conditional jumps and simplejumps. */ + if (!any_condjump_p (insn) + && !returnjump_p (insn) + && !simplejump_p (insn)) + return false; + for (e = bb->succ; e; e = next) + { + next = e->succ_next; + + /* Check purposes we can have edge. */ + if ((e->flags & EDGE_FALLTHRU) + && any_condjump_p (insn)) + continue; + if (e->dest != EXIT_BLOCK_PTR + && e->dest->head == JUMP_LABEL (insn)) + continue; + if (e->dest == EXIT_BLOCK_PTR + && returnjump_p (insn)) + continue; + purged = true; + remove_edge (e); + } + if (!bb->succ || !purged) + return false; + if (rtl_dump_file) + fprintf (rtl_dump_file, "Purged edges from bb %i\n", bb->index); + if (!optimize) + return purged; + + /* Redistribute probabilities. */ + if (!bb->succ->succ_next) + { + bb->succ->probability = REG_BR_PROB_BASE; + bb->succ->count = bb->count; + } + else + { + note = find_reg_note (insn, REG_BR_PROB, NULL); + if (!note) + return purged; + b = BRANCH_EDGE (bb); + f = FALLTHRU_EDGE (bb); + b->probability = INTVAL (XEXP (note, 0)); + f->probability = REG_BR_PROB_BASE - b->probability; + b->count = bb->count * b->probability / REG_BR_PROB_BASE; + f->count = bb->count * f->probability / REG_BR_PROB_BASE; + } + return purged; + } + + /* Cleanup abnormal edges caused by throwing insns that have been + eliminated. */ + if (! can_throw_internal (bb->end)) + for (e = bb->succ; e; e = next) + { + next = e->succ_next; + if (e->flags & EDGE_EH) + { + remove_edge (e); + purged = true; + } + } + + /* If we don't see a jump insn, we don't know exactly why the block would + have been broken at this point. Look for a simple, non-fallthru edge, + as these are only created by conditional branches. If we find such an + edge we know that there used to be a jump here and can then safely + remove all non-fallthru edges. */ + for (e = bb->succ; e && (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)); + e = e->succ_next); + if (!e) + return purged; + for (e = bb->succ; e; e = next) + { + next = e->succ_next; + if (!(e->flags & EDGE_FALLTHRU)) + remove_edge (e), purged = true; + } + if (!bb->succ || bb->succ->succ_next) + abort (); + bb->succ->probability = REG_BR_PROB_BASE; + bb->succ->count = bb->count; + + if (rtl_dump_file) + fprintf (rtl_dump_file, "Purged non-fallthru edges from bb %i\n", + bb->index); + return purged; +} + +/* Search all basic blocks for potentionally dead edges and purge them. + + Return true ifif some edge has been elliminated. + */ + +bool +purge_all_dead_edges () +{ + int i, purged = false; + for (i = 0; i < n_basic_blocks; i++) + purged |= purge_dead_edges (BASIC_BLOCK (i)); + return purged; +} diff --git a/gcc/cfganal.c b/gcc/cfganal.c new file mode 100644 index 00000000000..96a20b8f3c3 --- /dev/null +++ b/gcc/cfganal.c @@ -0,0 +1,1074 @@ +/* Control flow graph analysis code for GNU compiler. + Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 2001 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to the Free +Software Foundation, 59 Temple Place - Suite 330, Boston, MA +02111-1307, USA. */ + +/* This file contains various simple utilities to analyze the CFG. */ +#include "config.h" +#include "system.h" +#include "rtl.h" +#include "hard-reg-set.h" +#include "basic-block.h" +#include "toplev.h" + +#include "obstack.h" + +/* Store the data structures necessary for depth-first search. */ +struct depth_first_search_dsS { + /* stack for backtracking during the algorithm */ + basic_block *stack; + + /* number of edges in the stack. That is, positions 0, ..., sp-1 + have edges. */ + unsigned int sp; + + /* record of basic blocks already seen by depth-first search */ + sbitmap visited_blocks; +}; +typedef struct depth_first_search_dsS *depth_first_search_ds; + +static void flow_dfs_compute_reverse_init + PARAMS ((depth_first_search_ds)); +static void flow_dfs_compute_reverse_add_bb + PARAMS ((depth_first_search_ds, basic_block)); +static basic_block flow_dfs_compute_reverse_execute + PARAMS ((depth_first_search_ds)); +static void flow_dfs_compute_reverse_finish + PARAMS ((depth_first_search_ds)); +static void remove_fake_successors PARAMS ((basic_block)); +static bool need_fake_edge_p PARAMS ((rtx)); + +/* Return true if the block has no effect and only forwards control flow to + its single destination. */ +bool +forwarder_block_p (bb) + basic_block bb; +{ + rtx insn = bb->head; + if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR + || !bb->succ || bb->succ->succ_next) + return false; + + while (insn != bb->end) + { + if (active_insn_p (insn)) + return false; + insn = NEXT_INSN (insn); + } + return (!active_insn_p (insn) + || (GET_CODE (insn) == JUMP_INSN && onlyjump_p (insn))); +} + +/* Return nonzero if we can reach target from src by falling trought. */ +bool +can_fallthru (src, target) + basic_block src, target; +{ + rtx insn = src->end; + rtx insn2 = target->head; + + if (src->index + 1 == target->index && !active_insn_p (insn2)) + insn2 = next_active_insn (insn2); + /* ??? Later we may add code to move jump tables offline. */ + return next_active_insn (insn) == insn2; +} + +/* Identify critical edges and set the bits appropriately. */ + +void +mark_critical_edges () +{ + int i, n = n_basic_blocks; + basic_block bb; + + /* We begin with the entry block. This is not terribly important now, + but could be if a front end (Fortran) implemented alternate entry + points. */ + bb = ENTRY_BLOCK_PTR; + i = -1; + + while (1) + { + edge e; + + /* (1) Critical edges must have a source with multiple successors. */ + if (bb->succ && bb->succ->succ_next) + { + for (e = bb->succ; e; e = e->succ_next) + { + /* (2) Critical edges must have a destination with multiple + predecessors. Note that we know there is at least one + predecessor -- the edge we followed to get here. */ + if (e->dest->pred->pred_next) + e->flags |= EDGE_CRITICAL; + else + e->flags &= ~EDGE_CRITICAL; + } + } + else + { + for (e = bb->succ; e; e = e->succ_next) + e->flags &= ~EDGE_CRITICAL; + } + + if (++i >= n) + break; + bb = BASIC_BLOCK (i); + } +} + +/* Mark the back edges in DFS traversal. + Return non-zero if a loop (natural or otherwise) is present. + Inspired by Depth_First_Search_PP described in: + + Advanced Compiler Design and Implementation + Steven Muchnick + Morgan Kaufmann, 1997 + + and heavily borrowed from flow_depth_first_order_compute. */ + +bool +mark_dfs_back_edges () +{ + edge *stack; + int *pre; + int *post; + int sp; + int prenum = 1; + int postnum = 1; + sbitmap visited; + bool found = false; + + /* Allocate the preorder and postorder number arrays. */ + pre = (int *) xcalloc (n_basic_blocks, sizeof (int)); + post = (int *) xcalloc (n_basic_blocks, sizeof (int)); + + /* Allocate stack for back-tracking up CFG. */ + stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + sp = 0; + + /* Allocate bitmap to track nodes that have been visited. */ + visited = sbitmap_alloc (n_basic_blocks); + + /* None of the nodes in the CFG have been visited yet. */ + sbitmap_zero (visited); + + /* Push the first edge on to the stack. */ + stack[sp++] = ENTRY_BLOCK_PTR->succ; + + while (sp) + { + edge e; + basic_block src; + basic_block dest; + + /* Look at the edge on the top of the stack. */ + e = stack[sp - 1]; + src = e->src; + dest = e->dest; + e->flags &= ~EDGE_DFS_BACK; + + /* Check if the edge destination has been visited yet. */ + if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) + { + /* Mark that we have visited the destination. */ + SET_BIT (visited, dest->index); + + pre[dest->index] = prenum++; + + if (dest->succ) + { + /* Since the DEST node has been visited for the first + time, check its successors. */ + stack[sp++] = dest->succ; + } + else + post[dest->index] = postnum++; + } + else + { + if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR + && pre[src->index] >= pre[dest->index] + && post[dest->index] == 0) + e->flags |= EDGE_DFS_BACK, found = true; + + if (! e->succ_next && src != ENTRY_BLOCK_PTR) + post[src->index] = postnum++; + + if (e->succ_next) + stack[sp - 1] = e->succ_next; + else + sp--; + } + } + + free (pre); + free (post); + free (stack); + sbitmap_free (visited); + + return found; +} + +/* Return true if we need to add fake edge to exit. + Helper function for the flow_call_edges_add. */ + +static bool +need_fake_edge_p (insn) + rtx insn; +{ + if (!INSN_P (insn)) + return false; + + if ((GET_CODE (insn) == CALL_INSN + && !SIBLING_CALL_P (insn) + && !find_reg_note (insn, REG_NORETURN, NULL) + && !find_reg_note (insn, REG_ALWAYS_RETURN, NULL) + && !CONST_OR_PURE_CALL_P (insn))) + return true; + + return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS + && MEM_VOLATILE_P (PATTERN (insn))) + || (GET_CODE (PATTERN (insn)) == PARALLEL + && asm_noperands (insn) != -1 + && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0))) + || GET_CODE (PATTERN (insn)) == ASM_INPUT); +} + +/* Add fake edges to the function exit for any non constant and non noreturn + calls, volatile inline assembly in the bitmap of blocks specified by + BLOCKS or to the whole CFG if BLOCKS is zero. Return the nuber of blocks + that were split. + + The goal is to expose cases in which entering a basic block does not imply + that all subsequent instructions must be executed. */ + +int +flow_call_edges_add (blocks) + sbitmap blocks; +{ + int i; + int blocks_split = 0; + int bb_num = 0; + basic_block *bbs; + bool check_last_block = false; + + /* Map bb indicies into basic block pointers since split_block + will renumber the basic blocks. */ + + bbs = xmalloc (n_basic_blocks * sizeof (*bbs)); + + if (! blocks) + { + for (i = 0; i < n_basic_blocks; i++) + bbs[bb_num++] = BASIC_BLOCK (i); + check_last_block = true; + } + else + { + EXECUTE_IF_SET_IN_SBITMAP (blocks, 0, i, + { + bbs[bb_num++] = BASIC_BLOCK (i); + if (i == n_basic_blocks - 1) + check_last_block = true; + }); + } + + /* In the last basic block, before epilogue generation, there will be + a fallthru edge to EXIT. Special care is required if the last insn + of the last basic block is a call because make_edge folds duplicate + edges, which would result in the fallthru edge also being marked + fake, which would result in the fallthru edge being removed by + remove_fake_edges, which would result in an invalid CFG. + + Moreover, we can't elide the outgoing fake edge, since the block + profiler needs to take this into account in order to solve the minimal + spanning tree in the case that the call doesn't return. + + Handle this by adding a dummy instruction in a new last basic block. */ + if (check_last_block + && need_fake_edge_p (BASIC_BLOCK (n_basic_blocks - 1)->end)) + { + edge e; + for (e = BASIC_BLOCK (n_basic_blocks - 1)->succ; e; e = e->succ_next) + if (e->dest == EXIT_BLOCK_PTR) + break; + insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e); + commit_edge_insertions (); + } + + + /* Now add fake edges to the function exit for any non constant + calls since there is no way that we can determine if they will + return or not... */ + + for (i = 0; i < bb_num; i++) + { + basic_block bb = bbs[i]; + rtx insn; + rtx prev_insn; + + for (insn = bb->end; ; insn = prev_insn) + { + prev_insn = PREV_INSN (insn); + if (need_fake_edge_p (insn)) + { + edge e; + + /* The above condition should be enought to verify that there is + no edge to the exit block in CFG already. Calling make_edge in + such case would make us to mark that edge as fake and remove it + later. */ +#ifdef ENABLE_CHECKING + if (insn == bb->end) + for (e = bb->succ; e; e = e->succ_next) + if (e->dest == EXIT_BLOCK_PTR) + abort (); +#endif + + /* Note that the following may create a new basic block + and renumber the existing basic blocks. */ + e = split_block (bb, insn); + if (e) + blocks_split++; + + make_edge (NULL, bb, EXIT_BLOCK_PTR, EDGE_FAKE); + } + if (insn == bb->head) + break; + } + } + + if (blocks_split) + verify_flow_info (); + + free (bbs); + return blocks_split; +} +/* Find unreachable blocks. An unreachable block will have 0 in + the reachable bit in block->flags. A non-zero value indicates the + block is reachable. */ + +void +find_unreachable_blocks () +{ + edge e; + int i, n; + basic_block *tos, *worklist; + + n = n_basic_blocks; + tos = worklist = (basic_block *) xmalloc (sizeof (basic_block) * n); + + /* Clear all the reachability flags. */ + + for (i = 0; i < n; ++i) + BASIC_BLOCK (i)->flags &= ~BB_REACHABLE; + + /* Add our starting points to the worklist. Almost always there will + be only one. It isn't inconcievable that we might one day directly + support Fortran alternate entry points. */ + + for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + { + *tos++ = e->dest; + + /* Mark the block reachable. */ + e->dest->flags |= BB_REACHABLE; + } + + /* Iterate: find everything reachable from what we've already seen. */ + + while (tos != worklist) + { + basic_block b = *--tos; + + for (e = b->succ; e; e = e->succ_next) + if (!(e->dest->flags & BB_REACHABLE)) + { + *tos++ = e->dest; + e->dest->flags |= BB_REACHABLE; + } + } + + free (worklist); +} + +/* Functions to access an edge list with a vector representation. + Enough data is kept such that given an index number, the + pred and succ that edge represents can be determined, or + given a pred and a succ, its index number can be returned. + This allows algorithms which consume a lot of memory to + represent the normally full matrix of edge (pred,succ) with a + single indexed vector, edge (EDGE_INDEX (pred, succ)), with no + wasted space in the client code due to sparse flow graphs. */ + +/* This functions initializes the edge list. Basically the entire + flowgraph is processed, and all edges are assigned a number, + and the data structure is filled in. */ + +struct edge_list * +create_edge_list () +{ + struct edge_list *elist; + edge e; + int num_edges; + int x; + int block_count; + + block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */ + + num_edges = 0; + + /* Determine the number of edges in the flow graph by counting successor + edges on each basic block. */ + for (x = 0; x < n_basic_blocks; x++) + { + basic_block bb = BASIC_BLOCK (x); + + for (e = bb->succ; e; e = e->succ_next) + num_edges++; + } + /* Don't forget successors of the entry block. */ + for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + num_edges++; + + elist = (struct edge_list *) xmalloc (sizeof (struct edge_list)); + elist->num_blocks = block_count; + elist->num_edges = num_edges; + elist->index_to_edge = (edge *) xmalloc (sizeof (edge) * num_edges); + + num_edges = 0; + + /* Follow successors of the entry block, and register these edges. */ + for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + { + elist->index_to_edge[num_edges] = e; + num_edges++; + } + + for (x = 0; x < n_basic_blocks; x++) + { + basic_block bb = BASIC_BLOCK (x); + + /* Follow all successors of blocks, and register these edges. */ + for (e = bb->succ; e; e = e->succ_next) + { + elist->index_to_edge[num_edges] = e; + num_edges++; + } + } + return elist; +} + +/* This function free's memory associated with an edge list. */ + +void +free_edge_list (elist) + struct edge_list *elist; +{ + if (elist) + { + free (elist->index_to_edge); + free (elist); + } +} + +/* This function provides debug output showing an edge list. */ + +void +print_edge_list (f, elist) + FILE *f; + struct edge_list *elist; +{ + int x; + fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n", + elist->num_blocks - 2, elist->num_edges); + + for (x = 0; x < elist->num_edges; x++) + { + fprintf (f, " %-4d - edge(", x); + if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR) + fprintf (f, "entry,"); + else + fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index); + + if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR) + fprintf (f, "exit)\n"); + else + fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index); + } +} + +/* This function provides an internal consistency check of an edge list, + verifying that all edges are present, and that there are no + extra edges. */ + +void +verify_edge_list (f, elist) + FILE *f; + struct edge_list *elist; +{ + int x, pred, succ, index; + edge e; + + for (x = 0; x < n_basic_blocks; x++) + { + basic_block bb = BASIC_BLOCK (x); + + for (e = bb->succ; e; e = e->succ_next) + { + pred = e->src->index; + succ = e->dest->index; + index = EDGE_INDEX (elist, e->src, e->dest); + if (index == EDGE_INDEX_NO_EDGE) + { + fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ); + continue; + } + if (INDEX_EDGE_PRED_BB (elist, index)->index != pred) + fprintf (f, "*p* Pred for index %d should be %d not %d\n", + index, pred, INDEX_EDGE_PRED_BB (elist, index)->index); + if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ) + fprintf (f, "*p* Succ for index %d should be %d not %d\n", + index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index); + } + } + for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + { + pred = e->src->index; + succ = e->dest->index; + index = EDGE_INDEX (elist, e->src, e->dest); + if (index == EDGE_INDEX_NO_EDGE) + { + fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ); + continue; + } + if (INDEX_EDGE_PRED_BB (elist, index)->index != pred) + fprintf (f, "*p* Pred for index %d should be %d not %d\n", + index, pred, INDEX_EDGE_PRED_BB (elist, index)->index); + if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ) + fprintf (f, "*p* Succ for index %d should be %d not %d\n", + index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index); + } + /* We've verified that all the edges are in the list, no lets make sure + there are no spurious edges in the list. */ + + for (pred = 0; pred < n_basic_blocks; pred++) + for (succ = 0; succ < n_basic_blocks; succ++) + { + basic_block p = BASIC_BLOCK (pred); + basic_block s = BASIC_BLOCK (succ); + + int found_edge = 0; + + for (e = p->succ; e; e = e->succ_next) + if (e->dest == s) + { + found_edge = 1; + break; + } + for (e = s->pred; e; e = e->pred_next) + if (e->src == p) + { + found_edge = 1; + break; + } + if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ)) + == EDGE_INDEX_NO_EDGE && found_edge != 0) + fprintf (f, "*** Edge (%d, %d) appears to not have an index\n", + pred, succ); + if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ)) + != EDGE_INDEX_NO_EDGE && found_edge == 0) + fprintf (f, "*** Edge (%d, %d) has index %d, but there is no edge\n", + pred, succ, EDGE_INDEX (elist, BASIC_BLOCK (pred), + BASIC_BLOCK (succ))); + } + for (succ = 0; succ < n_basic_blocks; succ++) + { + basic_block p = ENTRY_BLOCK_PTR; + basic_block s = BASIC_BLOCK (succ); + + int found_edge = 0; + + for (e = p->succ; e; e = e->succ_next) + if (e->dest == s) + { + found_edge = 1; + break; + } + for (e = s->pred; e; e = e->pred_next) + if (e->src == p) + { + found_edge = 1; + break; + } + if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ)) + == EDGE_INDEX_NO_EDGE && found_edge != 0) + fprintf (f, "*** Edge (entry, %d) appears to not have an index\n", + succ); + if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ)) + != EDGE_INDEX_NO_EDGE && found_edge == 0) + fprintf (f, "*** Edge (entry, %d) has index %d, but no edge exists\n", + succ, EDGE_INDEX (elist, ENTRY_BLOCK_PTR, + BASIC_BLOCK (succ))); + } + for (pred = 0; pred < n_basic_blocks; pred++) + { + basic_block p = BASIC_BLOCK (pred); + basic_block s = EXIT_BLOCK_PTR; + + int found_edge = 0; + + for (e = p->succ; e; e = e->succ_next) + if (e->dest == s) + { + found_edge = 1; + break; + } + for (e = s->pred; e; e = e->pred_next) + if (e->src == p) + { + found_edge = 1; + break; + } + if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR) + == EDGE_INDEX_NO_EDGE && found_edge != 0) + fprintf (f, "*** Edge (%d, exit) appears to not have an index\n", + pred); + if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR) + != EDGE_INDEX_NO_EDGE && found_edge == 0) + fprintf (f, "*** Edge (%d, exit) has index %d, but no edge exists\n", + pred, EDGE_INDEX (elist, BASIC_BLOCK (pred), + EXIT_BLOCK_PTR)); + } +} + +/* This routine will determine what, if any, edge there is between + a specified predecessor and successor. */ + +int +find_edge_index (edge_list, pred, succ) + struct edge_list *edge_list; + basic_block pred, succ; +{ + int x; + for (x = 0; x < NUM_EDGES (edge_list); x++) + { + if (INDEX_EDGE_PRED_BB (edge_list, x) == pred + && INDEX_EDGE_SUCC_BB (edge_list, x) == succ) + return x; + } + return (EDGE_INDEX_NO_EDGE); +} + +/* Dump the list of basic blocks in the bitmap NODES. */ + +void +flow_nodes_print (str, nodes, file) + const char *str; + const sbitmap nodes; + FILE *file; +{ + int node; + + if (! nodes) + return; + + fprintf (file, "%s { ", str); + EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, {fprintf (file, "%d ", node);}); + fputs ("}\n", file); +} + +/* Dump the list of edges in the array EDGE_LIST. */ + +void +flow_edge_list_print (str, edge_list, num_edges, file) + const char *str; + const edge *edge_list; + int num_edges; + FILE *file; +{ + int i; + + if (! edge_list) + return; + + fprintf (file, "%s { ", str); + for (i = 0; i < num_edges; i++) + fprintf (file, "%d->%d ", edge_list[i]->src->index, + edge_list[i]->dest->index); + fputs ("}\n", file); +} + + +/* This routine will remove any fake successor edges for a basic block. + When the edge is removed, it is also removed from whatever predecessor + list it is in. */ + +static void +remove_fake_successors (bb) + basic_block bb; +{ + edge e; + for (e = bb->succ; e;) + { + edge tmp = e; + e = e->succ_next; + if ((tmp->flags & EDGE_FAKE) == EDGE_FAKE) + remove_edge (tmp); + } +} + +/* This routine will remove all fake edges from the flow graph. If + we remove all fake successors, it will automatically remove all + fake predecessors. */ + +void +remove_fake_edges () +{ + int x; + + for (x = 0; x < n_basic_blocks; x++) + remove_fake_successors (BASIC_BLOCK (x)); + + /* We've handled all successors except the entry block's. */ + remove_fake_successors (ENTRY_BLOCK_PTR); +} + +/* This function will add a fake edge between any block which has no + successors, and the exit block. Some data flow equations require these + edges to exist. */ + +void +add_noreturn_fake_exit_edges () +{ + int x; + + for (x = 0; x < n_basic_blocks; x++) + if (BASIC_BLOCK (x)->succ == NULL) + make_edge (NULL, BASIC_BLOCK (x), EXIT_BLOCK_PTR, EDGE_FAKE); +} + +/* This function adds a fake edge between any infinite loops to the + exit block. Some optimizations require a path from each node to + the exit node. + + See also Morgan, Figure 3.10, pp. 82-83. + + The current implementation is ugly, not attempting to minimize the + number of inserted fake edges. To reduce the number of fake edges + to insert, add fake edges from _innermost_ loops containing only + nodes not reachable from the exit block. */ + +void +connect_infinite_loops_to_exit () +{ + basic_block unvisited_block; + + /* Perform depth-first search in the reverse graph to find nodes + reachable from the exit block. */ + struct depth_first_search_dsS dfs_ds; + + flow_dfs_compute_reverse_init (&dfs_ds); + flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR); + + /* Repeatedly add fake edges, updating the unreachable nodes. */ + while (1) + { + unvisited_block = flow_dfs_compute_reverse_execute (&dfs_ds); + if (!unvisited_block) + break; + make_edge (NULL, unvisited_block, EXIT_BLOCK_PTR, EDGE_FAKE); + flow_dfs_compute_reverse_add_bb (&dfs_ds, unvisited_block); + } + + flow_dfs_compute_reverse_finish (&dfs_ds); + + return; +} + +/* Compute reverse top sort order */ +void +flow_reverse_top_sort_order_compute (rts_order) + int *rts_order; +{ + edge *stack; + int sp; + int postnum = 0; + sbitmap visited; + + /* Allocate stack for back-tracking up CFG. */ + stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + sp = 0; + + /* Allocate bitmap to track nodes that have been visited. */ + visited = sbitmap_alloc (n_basic_blocks); + + /* None of the nodes in the CFG have been visited yet. */ + sbitmap_zero (visited); + + /* Push the first edge on to the stack. */ + stack[sp++] = ENTRY_BLOCK_PTR->succ; + + while (sp) + { + edge e; + basic_block src; + basic_block dest; + + /* Look at the edge on the top of the stack. */ + e = stack[sp - 1]; + src = e->src; + dest = e->dest; + + /* Check if the edge destination has been visited yet. */ + if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) + { + /* Mark that we have visited the destination. */ + SET_BIT (visited, dest->index); + + if (dest->succ) + { + /* Since the DEST node has been visited for the first + time, check its successors. */ + stack[sp++] = dest->succ; + } + else + rts_order[postnum++] = dest->index; + } + else + { + if (! e->succ_next && src != ENTRY_BLOCK_PTR) + rts_order[postnum++] = src->index; + + if (e->succ_next) + stack[sp - 1] = e->succ_next; + else + sp--; + } + } + + free (stack); + sbitmap_free (visited); +} + +/* Compute the depth first search order and store in the array + DFS_ORDER if non-zero, marking the nodes visited in VISITED. If + RC_ORDER is non-zero, return the reverse completion number for each + node. Returns the number of nodes visited. A depth first search + tries to get as far away from the starting point as quickly as + possible. */ + +int +flow_depth_first_order_compute (dfs_order, rc_order) + int *dfs_order; + int *rc_order; +{ + edge *stack; + int sp; + int dfsnum = 0; + int rcnum = n_basic_blocks - 1; + sbitmap visited; + + /* Allocate stack for back-tracking up CFG. */ + stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + sp = 0; + + /* Allocate bitmap to track nodes that have been visited. */ + visited = sbitmap_alloc (n_basic_blocks); + + /* None of the nodes in the CFG have been visited yet. */ + sbitmap_zero (visited); + + /* Push the first edge on to the stack. */ + stack[sp++] = ENTRY_BLOCK_PTR->succ; + + while (sp) + { + edge e; + basic_block src; + basic_block dest; + + /* Look at the edge on the top of the stack. */ + e = stack[sp - 1]; + src = e->src; + dest = e->dest; + + /* Check if the edge destination has been visited yet. */ + if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) + { + /* Mark that we have visited the destination. */ + SET_BIT (visited, dest->index); + + if (dfs_order) + dfs_order[dfsnum++] = dest->index; + + if (dest->succ) + { + /* Since the DEST node has been visited for the first + time, check its successors. */ + stack[sp++] = dest->succ; + } + else + { + /* There are no successors for the DEST node so assign + its reverse completion number. */ + if (rc_order) + rc_order[rcnum--] = dest->index; + } + } + else + { + if (! e->succ_next && src != ENTRY_BLOCK_PTR) + { + /* There are no more successors for the SRC node + so assign its reverse completion number. */ + if (rc_order) + rc_order[rcnum--] = src->index; + } + + if (e->succ_next) + stack[sp - 1] = e->succ_next; + else + sp--; + } + } + + free (stack); + sbitmap_free (visited); + + /* The number of nodes visited should not be greater than + n_basic_blocks. */ + if (dfsnum > n_basic_blocks) + abort (); + + /* There are some nodes left in the CFG that are unreachable. */ + if (dfsnum < n_basic_blocks) + abort (); + return dfsnum; +} + +/* Compute the depth first search order on the _reverse_ graph and + store in the array DFS_ORDER, marking the nodes visited in VISITED. + Returns the number of nodes visited. + + The computation is split into three pieces: + + flow_dfs_compute_reverse_init () creates the necessary data + structures. + + flow_dfs_compute_reverse_add_bb () adds a basic block to the data + structures. The block will start the search. + + flow_dfs_compute_reverse_execute () continues (or starts) the + search using the block on the top of the stack, stopping when the + stack is empty. + + flow_dfs_compute_reverse_finish () destroys the necessary data + structures. + + Thus, the user will probably call ..._init(), call ..._add_bb() to + add a beginning basic block to the stack, call ..._execute(), + possibly add another bb to the stack and again call ..._execute(), + ..., and finally call _finish(). */ + +/* Initialize the data structures used for depth-first search on the + reverse graph. If INITIALIZE_STACK is nonzero, the exit block is + added to the basic block stack. DATA is the current depth-first + search context. If INITIALIZE_STACK is non-zero, there is an + element on the stack. */ + +static void +flow_dfs_compute_reverse_init (data) + depth_first_search_ds data; +{ + /* Allocate stack for back-tracking up CFG. */ + data->stack = + (basic_block *) xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1)) + * sizeof (basic_block)); + data->sp = 0; + + /* Allocate bitmap to track nodes that have been visited. */ + data->visited_blocks = sbitmap_alloc (n_basic_blocks - (INVALID_BLOCK + 1)); + + /* None of the nodes in the CFG have been visited yet. */ + sbitmap_zero (data->visited_blocks); + + return; +} + +/* Add the specified basic block to the top of the dfs data + structures. When the search continues, it will start at the + block. */ + +static void +flow_dfs_compute_reverse_add_bb (data, bb) + depth_first_search_ds data; + basic_block bb; +{ + data->stack[data->sp++] = bb; + return; +} + +/* Continue the depth-first search through the reverse graph starting + with the block at the stack's top and ending when the stack is + empty. Visited nodes are marked. Returns an unvisited basic + block, or NULL if there is none available. */ + +static basic_block +flow_dfs_compute_reverse_execute (data) + depth_first_search_ds data; +{ + basic_block bb; + edge e; + int i; + + while (data->sp > 0) + { + bb = data->stack[--data->sp]; + + /* Mark that we have visited this node. */ + if (!TEST_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1))) + { + SET_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1)); + + /* Perform depth-first search on adjacent vertices. */ + for (e = bb->pred; e; e = e->pred_next) + flow_dfs_compute_reverse_add_bb (data, e->src); + } + } + + /* Determine if there are unvisited basic blocks. */ + for (i = n_basic_blocks - (INVALID_BLOCK + 1); --i >= 0;) + if (!TEST_BIT (data->visited_blocks, i)) + return BASIC_BLOCK (i + (INVALID_BLOCK + 1)); + return NULL; +} + +/* Destroy the data structures needed for depth-first search on the + reverse graph. */ + +static void +flow_dfs_compute_reverse_finish (data) + depth_first_search_ds data; +{ + free (data->stack); + sbitmap_free (data->visited_blocks); + return; +} diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c new file mode 100644 index 00000000000..2f62b067ff5 --- /dev/null +++ b/gcc/cfgbuild.c @@ -0,0 +1,791 @@ +/* Control flow graph building code for GNU compiler. + Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 2001 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to the Free +Software Foundation, 59 Temple Place - Suite 330, Boston, MA +02111-1307, USA. */ + +/* find_basic_blocks divides the current function's rtl into basic + blocks and constructs the CFG. The blocks are recorded in the + basic_block_info array; the CFG exists in the edge structures + referenced by the blocks. + + find_basic_blocks also finds any unreachable loops and deletes them. + + Available functionality: + - CFG construction + find_basic_blocks + - Local CFG construction + find_sub_basic_blocks + */ + +#include "config.h" +#include "system.h" +#include "tree.h" +#include "rtl.h" +#include "hard-reg-set.h" +#include "basic-block.h" +#include "regs.h" +#include "flags.h" +#include "output.h" +#include "function.h" +#include "except.h" +#include "toplev.h" +#include "timevar.h" + +#include "obstack.h" +static int count_basic_blocks PARAMS ((rtx)); +static void find_basic_blocks_1 PARAMS ((rtx)); +static rtx find_label_refs PARAMS ((rtx, rtx)); +static void make_edges PARAMS ((rtx, int, int, int)); +static void make_label_edge PARAMS ((sbitmap *, basic_block, + rtx, int)); +static void make_eh_edge PARAMS ((sbitmap *, basic_block, rtx)); + +/* Count the basic blocks of the function. */ + +static int +count_basic_blocks (f) + rtx f; +{ + register rtx insn; + register RTX_CODE prev_code; + register int count = 0; + int saw_abnormal_edge = 0; + + prev_code = JUMP_INSN; + for (insn = f; insn; insn = NEXT_INSN (insn)) + { + enum rtx_code code = GET_CODE (insn); + + if (code == CODE_LABEL + || (GET_RTX_CLASS (code) == 'i' + && (prev_code == JUMP_INSN + || prev_code == BARRIER + || saw_abnormal_edge))) + { + saw_abnormal_edge = 0; + count++; + } + + /* Record whether this insn created an edge. */ + if (code == CALL_INSN) + { + rtx note; + + /* If there is a nonlocal goto label and the specified + region number isn't -1, we have an edge. */ + if (nonlocal_goto_handler_labels + && ((note = find_reg_note (insn, REG_EH_REGION, NULL_RTX)) == 0 + || INTVAL (XEXP (note, 0)) >= 0)) + saw_abnormal_edge = 1; + + else if (can_throw_internal (insn)) + saw_abnormal_edge = 1; + } + else if (flag_non_call_exceptions + && code == INSN + && can_throw_internal (insn)) + saw_abnormal_edge = 1; + + if (code != NOTE) + prev_code = code; + } + + /* The rest of the compiler works a bit smoother when we don't have to + check for the edge case of do-nothing functions with no basic blocks. */ + if (count == 0) + { + emit_insn (gen_rtx_USE (VOIDmode, const0_rtx)); + count = 1; + } + + return count; +} + +/* Scan a list of insns for labels referred to other than by jumps. + This is used to scan the alternatives of a call placeholder. */ +static rtx +find_label_refs (f, lvl) + rtx f; + rtx lvl; +{ + rtx insn; + + for (insn = f; insn; insn = NEXT_INSN (insn)) + if (INSN_P (insn) && GET_CODE (insn) != JUMP_INSN) + { + rtx note; + + /* Make a list of all labels referred to other than by jumps + (which just don't have the REG_LABEL notes). + + Make a special exception for labels followed by an ADDR*VEC, + as this would be a part of the tablejump setup code. + + Make a special exception to registers loaded with label + values just before jump insns that use them. */ + + for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) + if (REG_NOTE_KIND (note) == REG_LABEL) + { + rtx lab = XEXP (note, 0), next; + + if ((next = next_nonnote_insn (lab)) != NULL + && GET_CODE (next) == JUMP_INSN + && (GET_CODE (PATTERN (next)) == ADDR_VEC + || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)) + ; + else if (GET_CODE (lab) == NOTE) + ; + else if (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN + && find_reg_note (NEXT_INSN (insn), REG_LABEL, lab)) + ; + else + lvl = alloc_EXPR_LIST (0, XEXP (note, 0), lvl); + } + } + + return lvl; +} + +/* Create an edge between two basic blocks. FLAGS are auxiliary information + about the edge that is accumulated between calls. */ + +/* Create an edge from a basic block to a label. */ + +static void +make_label_edge (edge_cache, src, label, flags) + sbitmap *edge_cache; + basic_block src; + rtx label; + int flags; +{ + if (GET_CODE (label) != CODE_LABEL) + abort (); + + /* If the label was never emitted, this insn is junk, but avoid a + crash trying to refer to BLOCK_FOR_INSN (label). This can happen + as a result of a syntax error and a diagnostic has already been + printed. */ + + if (INSN_UID (label) == 0) + return; + + make_edge (edge_cache, src, BLOCK_FOR_INSN (label), flags); +} + +/* Create the edges generated by INSN in REGION. */ + +static void +make_eh_edge (edge_cache, src, insn) + sbitmap *edge_cache; + basic_block src; + rtx insn; +{ + int is_call = (GET_CODE (insn) == CALL_INSN ? EDGE_ABNORMAL_CALL : 0); + rtx handlers, i; + + handlers = reachable_handlers (insn); + + for (i = handlers; i; i = XEXP (i, 1)) + make_label_edge (edge_cache, src, XEXP (i, 0), + EDGE_ABNORMAL | EDGE_EH | is_call); + + free_INSN_LIST_list (&handlers); +} +/* Identify the edges between basic blocks MIN to MAX. + + NONLOCAL_LABEL_LIST is a list of non-local labels in the function. Blocks + that are otherwise unreachable may be reachable with a non-local goto. + + BB_EH_END is an array indexed by basic block number in which we record + the list of exception regions active at the end of the basic block. */ + +static void +make_edges (label_value_list, min, max, update_p) + rtx label_value_list; + int min, max, update_p; +{ + int i; + sbitmap *edge_cache = NULL; + + /* Assume no computed jump; revise as we create edges. */ + current_function_has_computed_jump = 0; + + /* Heavy use of computed goto in machine-generated code can lead to + nearly fully-connected CFGs. In that case we spend a significant + amount of time searching the edge lists for duplicates. */ + if (forced_labels || label_value_list) + { + edge_cache = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks); + sbitmap_vector_zero (edge_cache, n_basic_blocks); + + if (update_p) + for (i = min; i <= max; ++i) + { + edge e; + for (e = BASIC_BLOCK (i)->succ; e ; e = e->succ_next) + if (e->dest != EXIT_BLOCK_PTR) + SET_BIT (edge_cache[i], e->dest->index); + } + } + + /* By nature of the way these get numbered, block 0 is always the entry. */ + make_edge (edge_cache, ENTRY_BLOCK_PTR, BASIC_BLOCK (0), EDGE_FALLTHRU); + + for (i = min; i <= max; ++i) + { + basic_block bb = BASIC_BLOCK (i); + rtx insn, x; + enum rtx_code code; + int force_fallthru = 0; + + if (GET_CODE (bb->head) == CODE_LABEL + && LABEL_ALTERNATE_NAME (bb->head)) + make_edge (NULL, ENTRY_BLOCK_PTR, bb, 0); + + /* Examine the last instruction of the block, and discover the + ways we can leave the block. */ + + insn = bb->end; + code = GET_CODE (insn); + + /* A branch. */ + if (code == JUMP_INSN) + { + rtx tmp; + + /* Recognize exception handling placeholders. */ + if (GET_CODE (PATTERN (insn)) == RESX) + make_eh_edge (edge_cache, bb, insn); + + /* Recognize a non-local goto as a branch outside the + current function. */ + else if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX)) + ; + + /* ??? Recognize a tablejump and do the right thing. */ + else if ((tmp = JUMP_LABEL (insn)) != NULL_RTX + && (tmp = NEXT_INSN (tmp)) != NULL_RTX + && GET_CODE (tmp) == JUMP_INSN + && (GET_CODE (PATTERN (tmp)) == ADDR_VEC + || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)) + { + rtvec vec; + int j; + + if (GET_CODE (PATTERN (tmp)) == ADDR_VEC) + vec = XVEC (PATTERN (tmp), 0); + else + vec = XVEC (PATTERN (tmp), 1); + + for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j) + make_label_edge (edge_cache, bb, + XEXP (RTVEC_ELT (vec, j), 0), 0); + + /* Some targets (eg, ARM) emit a conditional jump that also + contains the out-of-range target. Scan for these and + add an edge if necessary. */ + if ((tmp = single_set (insn)) != NULL + && SET_DEST (tmp) == pc_rtx + && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE + && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF) + make_label_edge (edge_cache, bb, + XEXP (XEXP (SET_SRC (tmp), 2), 0), 0); + +#ifdef CASE_DROPS_THROUGH + /* Silly VAXen. The ADDR_VEC is going to be in the way of + us naturally detecting fallthru into the next block. */ + force_fallthru = 1; +#endif + } + + /* If this is a computed jump, then mark it as reaching + everything on the label_value_list and forced_labels list. */ + else if (computed_jump_p (insn)) + { + current_function_has_computed_jump = 1; + + for (x = label_value_list; x; x = XEXP (x, 1)) + make_label_edge (edge_cache, bb, XEXP (x, 0), EDGE_ABNORMAL); + + for (x = forced_labels; x; x = XEXP (x, 1)) + make_label_edge (edge_cache, bb, XEXP (x, 0), EDGE_ABNORMAL); + } + + /* Returns create an exit out. */ + else if (returnjump_p (insn)) + make_edge (edge_cache, bb, EXIT_BLOCK_PTR, 0); + + /* Otherwise, we have a plain conditional or unconditional jump. */ + else + { + if (! JUMP_LABEL (insn)) + abort (); + make_label_edge (edge_cache, bb, JUMP_LABEL (insn), 0); + } + } + + /* If this is a sibling call insn, then this is in effect a + combined call and return, and so we need an edge to the + exit block. No need to worry about EH edges, since we + wouldn't have created the sibling call in the first place. */ + + if (code == CALL_INSN && SIBLING_CALL_P (insn)) + make_edge (edge_cache, bb, EXIT_BLOCK_PTR, + EDGE_ABNORMAL | EDGE_ABNORMAL_CALL); + + /* If this is a CALL_INSN, then mark it as reaching the active EH + handler for this CALL_INSN. If we're handling non-call + exceptions then any insn can reach any of the active handlers. + + Also mark the CALL_INSN as reaching any nonlocal goto handler. */ + + else if (code == CALL_INSN || flag_non_call_exceptions) + { + /* Add any appropriate EH edges. */ + make_eh_edge (edge_cache, bb, insn); + + if (code == CALL_INSN && nonlocal_goto_handler_labels) + { + /* ??? This could be made smarter: in some cases it's possible + to tell that certain calls will not do a nonlocal goto. + + For example, if the nested functions that do the nonlocal + gotos do not have their addresses taken, then only calls to + those functions or to other nested functions that use them + could possibly do nonlocal gotos. */ + /* We do know that a REG_EH_REGION note with a value less + than 0 is guaranteed not to perform a non-local goto. */ + rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); + if (!note || INTVAL (XEXP (note, 0)) >= 0) + for (x = nonlocal_goto_handler_labels; x; x = XEXP (x, 1)) + make_label_edge (edge_cache, bb, XEXP (x, 0), + EDGE_ABNORMAL | EDGE_ABNORMAL_CALL); + } + } + + /* Find out if we can drop through to the next block. */ + insn = next_nonnote_insn (insn); + if (!insn || (i + 1 == n_basic_blocks && force_fallthru)) + make_edge (edge_cache, bb, EXIT_BLOCK_PTR, EDGE_FALLTHRU); + else if (i + 1 < n_basic_blocks) + { + rtx tmp = BLOCK_HEAD (i + 1); + if (GET_CODE (tmp) == NOTE) + tmp = next_nonnote_insn (tmp); + if (force_fallthru || insn == tmp) + make_edge (edge_cache, bb, BASIC_BLOCK (i + 1), EDGE_FALLTHRU); + } + } + + if (edge_cache) + sbitmap_vector_free (edge_cache); +} + +/* Find all basic blocks of the function whose first insn is F. + + Collect and return a list of labels whose addresses are taken. This + will be used in make_edges for use with computed gotos. */ + +static void +find_basic_blocks_1 (f) + rtx f; +{ + register rtx insn, next; + int i = 0; + rtx bb_note = NULL_RTX; + rtx lvl = NULL_RTX; + rtx trll = NULL_RTX; + rtx head = NULL_RTX; + rtx end = NULL_RTX; + + /* We process the instructions in a slightly different way than we did + previously. This is so that we see a NOTE_BASIC_BLOCK after we have + closed out the previous block, so that it gets attached at the proper + place. Since this form should be equivalent to the previous, + count_basic_blocks continues to use the old form as a check. */ + + for (insn = f; insn; insn = next) + { + enum rtx_code code = GET_CODE (insn); + + next = NEXT_INSN (insn); + + switch (code) + { + case NOTE: + { + int kind = NOTE_LINE_NUMBER (insn); + + /* Look for basic block notes with which to keep the + basic_block_info pointers stable. Unthread the note now; + we'll put it back at the right place in create_basic_block. + Or not at all if we've already found a note in this block. */ + if (kind == NOTE_INSN_BASIC_BLOCK) + { + if (bb_note == NULL_RTX) + bb_note = insn; + else + next = flow_delete_insn (insn); + } + break; + } + + case CODE_LABEL: + /* A basic block starts at a label. If we've closed one off due + to a barrier or some such, no need to do it again. */ + if (head != NULL_RTX) + { + create_basic_block (i++, head, end, bb_note); + bb_note = NULL_RTX; + } + + head = end = insn; + break; + + case JUMP_INSN: + /* A basic block ends at a jump. */ + if (head == NULL_RTX) + head = insn; + else + { + /* ??? Make a special check for table jumps. The way this + happens is truly and amazingly gross. We are about to + create a basic block that contains just a code label and + an addr*vec jump insn. Worse, an addr_diff_vec creates + its own natural loop. + + Prevent this bit of brain damage, pasting things together + correctly in make_edges. + + The correct solution involves emitting the table directly + on the tablejump instruction as a note, or JUMP_LABEL. */ + + if (GET_CODE (PATTERN (insn)) == ADDR_VEC + || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC) + { + head = end = NULL; + n_basic_blocks--; + break; + } + } + end = insn; + goto new_bb_inclusive; + + case BARRIER: + /* A basic block ends at a barrier. It may be that an unconditional + jump already closed the basic block -- no need to do it again. */ + if (head == NULL_RTX) + break; + goto new_bb_exclusive; + + case CALL_INSN: + { + /* Record whether this call created an edge. */ + rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); + int region = (note ? INTVAL (XEXP (note, 0)) : 0); + + if (GET_CODE (PATTERN (insn)) == CALL_PLACEHOLDER) + { + /* Scan each of the alternatives for label refs. */ + lvl = find_label_refs (XEXP (PATTERN (insn), 0), lvl); + lvl = find_label_refs (XEXP (PATTERN (insn), 1), lvl); + lvl = find_label_refs (XEXP (PATTERN (insn), 2), lvl); + /* Record its tail recursion label, if any. */ + if (XEXP (PATTERN (insn), 3) != NULL_RTX) + trll = alloc_EXPR_LIST (0, XEXP (PATTERN (insn), 3), trll); + } + + /* A basic block ends at a call that can either throw or + do a non-local goto. */ + if ((nonlocal_goto_handler_labels && region >= 0) + || can_throw_internal (insn)) + { + new_bb_inclusive: + if (head == NULL_RTX) + head = insn; + end = insn; + + new_bb_exclusive: + create_basic_block (i++, head, end, bb_note); + head = end = NULL_RTX; + bb_note = NULL_RTX; + break; + } + } + /* Fall through. */ + + case INSN: + /* Non-call exceptions generate new blocks just like calls. */ + if (flag_non_call_exceptions && can_throw_internal (insn)) + goto new_bb_inclusive; + + if (head == NULL_RTX) + head = insn; + end = insn; + break; + + default: + abort (); + } + + if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN) + { + rtx note; + + /* Make a list of all labels referred to other than by jumps. + + Make a special exception for labels followed by an ADDR*VEC, + as this would be a part of the tablejump setup code. + + Make a special exception to registers loaded with label + values just before jump insns that use them. */ + + for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) + if (REG_NOTE_KIND (note) == REG_LABEL) + { + rtx lab = XEXP (note, 0), next; + + if ((next = next_nonnote_insn (lab)) != NULL + && GET_CODE (next) == JUMP_INSN + && (GET_CODE (PATTERN (next)) == ADDR_VEC + || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)) + ; + else if (GET_CODE (lab) == NOTE) + ; + else if (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN + && find_reg_note (NEXT_INSN (insn), REG_LABEL, lab)) + ; + else + lvl = alloc_EXPR_LIST (0, XEXP (note, 0), lvl); + } + } + } + + if (head != NULL_RTX) + create_basic_block (i++, head, end, bb_note); + else if (bb_note) + flow_delete_insn (bb_note); + + if (i != n_basic_blocks) + abort (); + + label_value_list = lvl; + tail_recursion_label_list = trll; +} + + +/* Find basic blocks of the current function. + F is the first insn of the function and NREGS the number of register + numbers in use. */ + +void +find_basic_blocks (f, nregs, file) + rtx f; + int nregs ATTRIBUTE_UNUSED; + FILE *file ATTRIBUTE_UNUSED; +{ + int max_uid; + timevar_push (TV_CFG); + + /* Flush out existing data. */ + if (basic_block_info != NULL) + { + int i; + + clear_edges (); + + /* Clear bb->aux on all extant basic blocks. We'll use this as a + tag for reuse during create_basic_block, just in case some pass + copies around basic block notes improperly. */ + for (i = 0; i < n_basic_blocks; ++i) + BASIC_BLOCK (i)->aux = NULL; + + VARRAY_FREE (basic_block_info); + } + + n_basic_blocks = count_basic_blocks (f); + + /* Size the basic block table. The actual structures will be allocated + by find_basic_blocks_1, since we want to keep the structure pointers + stable across calls to find_basic_blocks. */ + /* ??? This whole issue would be much simpler if we called find_basic_blocks + exactly once, and thereafter we don't have a single long chain of + instructions at all until close to the end of compilation when we + actually lay them out. */ + + VARRAY_BB_INIT (basic_block_info, n_basic_blocks, "basic_block_info"); + + find_basic_blocks_1 (f); + + /* Record the block to which an insn belongs. */ + /* ??? This should be done another way, by which (perhaps) a label is + tagged directly with the basic block that it starts. It is used for + more than that currently, but IMO that is the only valid use. */ + + max_uid = get_max_uid (); +#ifdef AUTO_INC_DEC + /* Leave space for insns life_analysis makes in some cases for auto-inc. + These cases are rare, so we don't need too much space. */ + max_uid += max_uid / 10; +#endif + + compute_bb_for_insn (max_uid); + + /* Discover the edges of our cfg. */ + make_edges (label_value_list, 0, n_basic_blocks - 1, 0); + + /* Do very simple cleanup now, for the benefit of code that runs between + here and cleanup_cfg, e.g. thread_prologue_and_epilogue_insns. */ + tidy_fallthru_edges (); + + mark_critical_edges (); + +#ifdef ENABLE_CHECKING + verify_flow_info (); +#endif + timevar_pop (TV_CFG); +} + +/* Assume that someone emitted code with control flow instructions to the + basic block. Update the data structure. */ +void +find_sub_basic_blocks (bb) + basic_block bb; +{ + rtx insn = bb->head; + rtx end = bb->end; + rtx jump_insn = NULL_RTX; + edge falltru = 0; + basic_block first_bb = bb; + int i; + + if (insn == bb->end) + return; + + if (GET_CODE (insn) == CODE_LABEL) + insn = NEXT_INSN (insn); + + /* Scan insn chain and try to find new basic block boundaries. */ + while (1) + { + enum rtx_code code = GET_CODE (insn); + switch (code) + { + case BARRIER: + if (!jump_insn) + abort (); + break; + /* On code label, split current basic block. */ + case CODE_LABEL: + falltru = split_block (bb, PREV_INSN (insn)); + if (jump_insn) + bb->end = jump_insn; + bb = falltru->dest; + remove_edge (falltru); + jump_insn = 0; + if (LABEL_ALTERNATE_NAME (insn)) + make_edge (NULL, ENTRY_BLOCK_PTR, bb, 0); + break; + case INSN: + case JUMP_INSN: + /* In case we've previously split insn on the JUMP_INSN, move the + block header to proper place. */ + if (jump_insn) + { + falltru = split_block (bb, PREV_INSN (insn)); + bb->end = jump_insn; + bb = falltru->dest; + remove_edge (falltru); + jump_insn = 0; + } + /* We need some special care for those expressions. */ + if (GET_CODE (insn) == JUMP_INSN) + { + if (GET_CODE (PATTERN (insn)) == ADDR_VEC + || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC) + abort(); + jump_insn = insn; + } + break; + default: + break; + } + if (insn == end) + break; + insn = NEXT_INSN (insn); + } + + /* In case expander replaced normal insn by sequence terminating by + return and barrier, or possibly other sequence not behaving like + ordinary jump, we need to take care and move basic block boundary. */ + if (jump_insn && GET_CODE (bb->end) != JUMP_INSN) + bb->end = jump_insn; + + /* We've possibly replaced the conditional jump by conditional jump + followed by cleanup at fallthru edge, so the outgoing edges may + be dead. */ + purge_dead_edges (bb); + + /* Now re-scan and wire in all edges. This expect simple (conditional) + jumps at the end of each new basic blocks. */ + make_edges (NULL, first_bb->index, bb->index, 1); + + /* Update branch probabilities. Expect only (un)conditional jumps + to be created with only the forward edges. */ + for (i = first_bb->index; i <= bb->index; i++) + { + edge e,f; + basic_block b = BASIC_BLOCK (i); + if (b != first_bb) + { + b->count = 0; + b->frequency = 0; + for (e = b->pred; e; e=e->pred_next) + { + b->count += e->count; + b->frequency += EDGE_FREQUENCY (e); + } + } + if (b->succ && b->succ->succ_next && !b->succ->succ_next->succ_next) + { + rtx note = find_reg_note (b->end, REG_BR_PROB, NULL); + int probability; + + if (!note) + continue; + probability = INTVAL (XEXP (find_reg_note (b->end, + REG_BR_PROB, + NULL), 0)); + e = BRANCH_EDGE (b); + e->probability = probability; + e->count = ((b->count * probability + REG_BR_PROB_BASE / 2) + / REG_BR_PROB_BASE); + f = FALLTHRU_EDGE (b); + f->probability = REG_BR_PROB_BASE - probability; + f->count = b->count - e->count; + } + if (b->succ && !b->succ->succ_next) + { + e = b->succ; + e->probability = REG_BR_PROB_BASE; + e->count = b->count; + } + } +} diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c new file mode 100644 index 00000000000..00eb80adc7c --- /dev/null +++ b/gcc/cfgcleanup.c @@ -0,0 +1,1248 @@ +/* Control flow optimization code for GNU compiler. + Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 2001 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to the Free +Software Foundation, 59 Temple Place - Suite 330, Boston, MA +02111-1307, USA. */ + +/* This file contains optimizer of the control flow. The main entrypoint is + cleanup_cfg. Following optimizations are performed: + + - Unreachable blocks removal + - Edge forwarding (edge to the forwarder block is forwarded to it's + succesor. Simplification of the branch instruction is performed by + underlying infrastructure so branch can be converted to simplejump or + elliminated). + - Cross jumping (tail merging) + - Conditional jump-around-simplejump simplification + - Basic block merging. */ + +#include "config.h" +#include "system.h" +#include "rtl.h" +#include "hard-reg-set.h" +#include "basic-block.h" +#include "timevar.h" +#include "output.h" +#include "insn-config.h" +#include "flags.h" +#include "recog.h" +#include "toplev.h" + +#include "obstack.h" + +static bool try_crossjump_to_edge PARAMS ((int, edge, edge)); +static bool try_crossjump_bb PARAMS ((int, basic_block)); +static bool outgoing_edges_match PARAMS ((basic_block, basic_block)); +static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block, + rtx *, rtx *)); + +static bool delete_unreachable_blocks PARAMS ((void)); +static int tail_recursion_label_p PARAMS ((rtx)); +static int merge_blocks_move_predecessor_nojumps PARAMS ((basic_block, + basic_block)); +static int merge_blocks_move_successor_nojumps PARAMS ((basic_block, + basic_block)); +static int merge_blocks PARAMS ((edge,basic_block,basic_block, + int)); +static bool try_optimize_cfg PARAMS ((int)); +static bool try_simplify_condjump PARAMS ((basic_block)); +static bool try_forward_edges PARAMS ((int, basic_block)); + +/* Simplify a conditional jump around an unconditional jump. + Return true if something changed. */ + +static bool +try_simplify_condjump (cbranch_block) + basic_block cbranch_block; +{ + basic_block jump_block, jump_dest_block, cbranch_dest_block; + edge cbranch_jump_edge, cbranch_fallthru_edge; + rtx cbranch_insn; + + /* Verify that there are exactly two successors. */ + if (!cbranch_block->succ + || !cbranch_block->succ->succ_next + || cbranch_block->succ->succ_next->succ_next) + return false; + + /* Verify that we've got a normal conditional branch at the end + of the block. */ + cbranch_insn = cbranch_block->end; + if (!any_condjump_p (cbranch_insn)) + return false; + + cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block); + cbranch_jump_edge = BRANCH_EDGE (cbranch_block); + + /* The next block must not have multiple predecessors, must not + be the last block in the function, and must contain just the + unconditional jump. */ + jump_block = cbranch_fallthru_edge->dest; + if (jump_block->pred->pred_next + || jump_block->index == n_basic_blocks - 1 + || !forwarder_block_p (jump_block)) + return false; + jump_dest_block = jump_block->succ->dest; + + /* The conditional branch must target the block after the + unconditional branch. */ + cbranch_dest_block = cbranch_jump_edge->dest; + + if (!can_fallthru (jump_block, cbranch_dest_block)) + return false; + + /* Invert the conditional branch. Prevent jump.c from deleting + "unreachable" instructions. */ + LABEL_NUSES (JUMP_LABEL (cbranch_insn))++; + if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 1)) + { + LABEL_NUSES (JUMP_LABEL (cbranch_insn))--; + return false; + } + + if (rtl_dump_file) + fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n", + INSN_UID (cbranch_insn), INSN_UID (jump_block->end)); + + /* Success. Update the CFG to match. Note that after this point + the edge variable names appear backwards; the redirection is done + this way to preserve edge profile data. */ + cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge, + cbranch_dest_block); + cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge, + jump_dest_block); + cbranch_jump_edge->flags |= EDGE_FALLTHRU; + cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU; + + /* Delete the block with the unconditional jump, and clean up the mess. */ + flow_delete_block (jump_block); + tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block); + + return true; +} + +/* Attempt to forward edges leaving basic block B. + Return true if sucessful. */ + +static bool +try_forward_edges (mode, b) + basic_block b; + int mode; +{ + bool changed = false; + edge e, next; + + for (e = b->succ; e ; e = next) + { + basic_block target, first; + int counter; + + next = e->succ_next; + + /* Skip complex edges because we don't know how to update them. + + Still handle fallthru edges, as we can suceed to forward fallthru + edge to the same place as the branch edge of conditional branch + and turn conditional branch to an unconditonal branch. */ + if (e->flags & EDGE_COMPLEX) + continue; + + target = first = e->dest; + counter = 0; + + /* Look for the real destination of the jump. + Avoid inifinite loop in the infinite empty loop by counting + up to n_basic_blocks. */ + while (forwarder_block_p (target) + && target->succ->dest != EXIT_BLOCK_PTR + && counter < n_basic_blocks) + { + /* Bypass trivial infinite loops. */ + if (target == target->succ->dest) + counter = n_basic_blocks; + + /* Avoid killing of loop pre-headers, as it is the place loop + optimizer wants to hoist code to. + + For fallthru forwarders, the LOOP_BEG note must appear between + the header of block and CODE_LABEL of the loop, for non forwarders + it must appear before the JUMP_INSN. */ + if (mode & CLEANUP_PRE_LOOP) + { + rtx insn = (target->succ->flags & EDGE_FALLTHRU + ? target->head : prev_nonnote_insn (target->end)); + + if (GET_CODE (insn) != NOTE) + insn = NEXT_INSN (insn); + + for (;insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn); + insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == NOTE + && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) + break; + + if (GET_CODE (insn) == NOTE) + break; + } + target = target->succ->dest, counter++; + } + + if (counter >= n_basic_blocks) + { + if (rtl_dump_file) + fprintf (rtl_dump_file, "Infinite loop in BB %i.\n", + target->index); + } + else if (target == first) + ; /* We didn't do anything. */ + else + { + /* Save the values now, as the edge may get removed. */ + gcov_type edge_count = e->count; + int edge_probability = e->probability; + + if (redirect_edge_and_branch (e, target)) + { + /* We successfully forwarded the edge. Now update profile + data: for each edge we traversed in the chain, remove + the original edge's execution count. */ + int edge_frequency = ((edge_probability * b->frequency + + REG_BR_PROB_BASE / 2) + / REG_BR_PROB_BASE); + + do + { + first->count -= edge_count; + first->succ->count -= edge_count; + first->frequency -= edge_frequency; + first = first->succ->dest; + } + while (first != target); + + changed = true; + } + else + { + if (rtl_dump_file) + fprintf (rtl_dump_file, "Forwarding edge %i->%i to %i failed.\n", + b->index, e->dest->index, target->index); + } + } + } + + return changed; +} + +static int +tail_recursion_label_p (label) + rtx label; +{ + rtx x; + + for (x = tail_recursion_label_list; x; x = XEXP (x, 1)) + if (label == XEXP (x, 0)) + return 1; + + return 0; +} + +/* Blocks A and B are to be merged into a single block. A has no incoming + fallthru edge, so it can be moved before B without adding or modifying + any jumps (aside from the jump from A to B). */ + +static int +merge_blocks_move_predecessor_nojumps (a, b) + basic_block a, b; +{ + rtx barrier; + int index; + + barrier = next_nonnote_insn (a->end); + if (GET_CODE (barrier) != BARRIER) + abort (); + flow_delete_insn (barrier); + + /* Move block and loop notes out of the chain so that we do not + disturb their order. + + ??? A better solution would be to squeeze out all the non-nested notes + and adjust the block trees appropriately. Even better would be to have + a tighter connection between block trees and rtl so that this is not + necessary. */ + squeeze_notes (&a->head, &a->end); + + /* Scramble the insn chain. */ + if (a->end != PREV_INSN (b->head)) + reorder_insns (a->head, a->end, PREV_INSN (b->head)); + + if (rtl_dump_file) + { + fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n", + a->index, b->index); + } + + /* Swap the records for the two blocks around. Although we are deleting B, + A is now where B was and we want to compact the BB array from where + A used to be. */ + BASIC_BLOCK (a->index) = b; + BASIC_BLOCK (b->index) = a; + index = a->index; + a->index = b->index; + b->index = index; + + /* Now blocks A and B are contiguous. Merge them. */ + merge_blocks_nomove (a, b); + + return 1; +} + +/* Blocks A and B are to be merged into a single block. B has no outgoing + fallthru edge, so it can be moved after A without adding or modifying + any jumps (aside from the jump from A to B). */ + +static int +merge_blocks_move_successor_nojumps (a, b) + basic_block a, b; +{ + rtx barrier; + + barrier = NEXT_INSN (b->end); + + /* Recognize a jump table following block B. */ + if (barrier + && GET_CODE (barrier) == CODE_LABEL + && NEXT_INSN (barrier) + && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN + && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC + || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC)) + { + b->end = NEXT_INSN (barrier); + barrier = NEXT_INSN (b->end); + } + + /* There had better have been a barrier there. Delete it. */ + if (barrier && GET_CODE (barrier) == BARRIER) + flow_delete_insn (barrier); + + /* Move block and loop notes out of the chain so that we do not + disturb their order. + + ??? A better solution would be to squeeze out all the non-nested notes + and adjust the block trees appropriately. Even better would be to have + a tighter connection between block trees and rtl so that this is not + necessary. */ + squeeze_notes (&b->head, &b->end); + + /* Scramble the insn chain. */ + reorder_insns (b->head, b->end, a->end); + + /* Now blocks A and B are contiguous. Merge them. */ + merge_blocks_nomove (a, b); + + if (rtl_dump_file) + { + fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n", + b->index, a->index); + } + + return 1; +} + +/* Attempt to merge basic blocks that are potentially non-adjacent. + Return true iff the attempt succeeded. */ + +static int +merge_blocks (e, b, c, mode) + edge e; + basic_block b, c; + int mode; +{ + /* If C has a tail recursion label, do not merge. There is no + edge recorded from the call_placeholder back to this label, as + that would make optimize_sibling_and_tail_recursive_calls more + complex for no gain. */ + if (GET_CODE (c->head) == CODE_LABEL + && tail_recursion_label_p (c->head)) + return 0; + + /* If B has a fallthru edge to C, no need to move anything. */ + if (e->flags & EDGE_FALLTHRU) + { + merge_blocks_nomove (b, c); + + if (rtl_dump_file) + { + fprintf (rtl_dump_file, "Merged %d and %d without moving.\n", + b->index, c->index); + } + + return 1; + } + /* Otherwise we will need to move code around. Do that only if expensive + transformations are allowed. */ + else if (mode & CLEANUP_EXPENSIVE) + { + edge tmp_edge, c_fallthru_edge; + int c_has_outgoing_fallthru; + int b_has_incoming_fallthru; + + /* Avoid overactive code motion, as the forwarder blocks should be + eliminated by edge redirection instead. One exception might have + been if B is a forwarder block and C has no fallthru edge, but + that should be cleaned up by bb-reorder instead. */ + if (forwarder_block_p (b) || forwarder_block_p (c)) + return 0; + + /* We must make sure to not munge nesting of lexical blocks, + and loop notes. This is done by squeezing out all the notes + and leaving them there to lie. Not ideal, but functional. */ + + for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next) + if (tmp_edge->flags & EDGE_FALLTHRU) + break; + c_has_outgoing_fallthru = (tmp_edge != NULL); + c_fallthru_edge = tmp_edge; + + for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next) + if (tmp_edge->flags & EDGE_FALLTHRU) + break; + b_has_incoming_fallthru = (tmp_edge != NULL); + + /* If B does not have an incoming fallthru, then it can be moved + immediately before C without introducing or modifying jumps. + C cannot be the first block, so we do not have to worry about + accessing a non-existent block. */ + if (! b_has_incoming_fallthru) + return merge_blocks_move_predecessor_nojumps (b, c); + + /* Otherwise, we're going to try to move C after B. If C does + not have an outgoing fallthru, then it can be moved + immediately after B without introducing or modifying jumps. */ + if (! c_has_outgoing_fallthru) + return merge_blocks_move_successor_nojumps (b, c); + + /* Otherwise, we'll need to insert an extra jump, and possibly + a new block to contain it. We can't redirect to EXIT_BLOCK_PTR, + as we don't have explicit return instructions before epilogues + are generated, so give up on that case. */ + + if (c_fallthru_edge->dest != EXIT_BLOCK_PTR + && merge_blocks_move_successor_nojumps (b, c)) + { + basic_block target = c_fallthru_edge->dest; + rtx barrier; + basic_block new; + + /* This is a dirty hack to avoid code duplication. + + Set edge to point to wrong basic block, so + redirect_edge_and_branch_force will do the trick + and rewire edge back to the original location. */ + redirect_edge_succ (c_fallthru_edge, ENTRY_BLOCK_PTR); + new = redirect_edge_and_branch_force (c_fallthru_edge, target); + + /* We've just created barrier, but another barrier is + already present in the stream. Avoid the duplicate. */ + barrier = next_nonnote_insn (new ? new->end : b->end); + if (GET_CODE (barrier) != BARRIER) + abort (); + flow_delete_insn (barrier); + + return 1; + } + + return 0; + } + return 0; +} + +/* Look through the insns at the end of BB1 and BB2 and find the longest + sequence that are equivalent. Store the first insns for that sequence + in *F1 and *F2 and return the sequence length. + + To simplify callers of this function, if the blocks match exactly, + store the head of the blocks in *F1 and *F2. */ + +static int +flow_find_cross_jump (mode, bb1, bb2, f1, f2) + int mode ATTRIBUTE_UNUSED; + basic_block bb1, bb2; + rtx *f1, *f2; +{ + rtx i1, i2, p1, p2, last1, last2, afterlast1, afterlast2; + int ninsns = 0; + + /* Skip simple jumps at the end of the blocks. Complex jumps still + need to be compared for equivalence, which we'll do below. */ + + i1 = bb1->end; + if (onlyjump_p (i1) + || (returnjump_p (i1) && !side_effects_p (PATTERN (i1)))) + i1 = PREV_INSN (i1); + i2 = bb2->end; + if (onlyjump_p (i2) + || (returnjump_p (i2) && !side_effects_p (PATTERN (i2)))) + i2 = PREV_INSN (i2); + + last1 = afterlast1 = last2 = afterlast2 = NULL_RTX; + while (true) + { + /* Ignore notes. */ + while ((GET_CODE (i1) == NOTE && i1 != bb1->head)) + i1 = PREV_INSN (i1); + while ((GET_CODE (i2) == NOTE && i2 != bb2->head)) + i2 = PREV_INSN (i2); + + if (i1 == bb1->head || i2 == bb2->head) + break; + + /* Verify that I1 and I2 are equivalent. */ + + if (GET_CODE (i1) != GET_CODE (i2)) + break; + + p1 = PATTERN (i1); + p2 = PATTERN (i2); + + /* If this is a CALL_INSN, compare register usage information. + If we don't check this on stack register machines, the two + CALL_INSNs might be merged leaving reg-stack.c with mismatching + numbers of stack registers in the same basic block. + If we don't check this on machines with delay slots, a delay slot may + be filled that clobbers a parameter expected by the subroutine. + + ??? We take the simple route for now and assume that if they're + equal, they were constructed identically. */ + + if (GET_CODE (i1) == CALL_INSN + && ! rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1), + CALL_INSN_FUNCTION_USAGE (i2))) + break; + +#ifdef STACK_REGS + /* If cross_jump_death_matters is not 0, the insn's mode + indicates whether or not the insn contains any stack-like + regs. */ + + if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1)) + { + /* If register stack conversion has already been done, then + death notes must also be compared before it is certain that + the two instruction streams match. */ + + rtx note; + HARD_REG_SET i1_regset, i2_regset; + + CLEAR_HARD_REG_SET (i1_regset); + CLEAR_HARD_REG_SET (i2_regset); + + for (note = REG_NOTES (i1); note; note = XEXP (note, 1)) + if (REG_NOTE_KIND (note) == REG_DEAD + && STACK_REG_P (XEXP (note, 0))) + SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0))); + + for (note = REG_NOTES (i2); note; note = XEXP (note, 1)) + if (REG_NOTE_KIND (note) == REG_DEAD + && STACK_REG_P (XEXP (note, 0))) + SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0))); + + GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done); + + break; + + done: + ; + } +#endif + + if (GET_CODE (p1) != GET_CODE (p2)) + break; + + if (! rtx_renumbered_equal_p (p1, p2)) + { + /* The following code helps take care of G++ cleanups. */ + rtx equiv1 = find_reg_equal_equiv_note (i1); + rtx equiv2 = find_reg_equal_equiv_note (i2); + + if (equiv1 && equiv2 + /* If the equivalences are not to a constant, they may + reference pseudos that no longer exist, so we can't + use them. */ + && CONSTANT_P (XEXP (equiv1, 0)) + && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0))) + { + rtx s1 = single_set (i1); + rtx s2 = single_set (i2); + if (s1 != 0 && s2 != 0 + && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2))) + { + validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1); + validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1); + if (! rtx_renumbered_equal_p (p1, p2)) + cancel_changes (0); + else if (apply_change_group ()) + goto win; + } + } + break; + } + + win: + /* Don't begin a cross-jump with a USE or CLOBBER insn. */ + if (GET_CODE (p1) != USE && GET_CODE (p1) != CLOBBER) + { + afterlast1 = last1, afterlast2 = last2; + last1 = i1, last2 = i2; + ninsns++; + } + i1 = PREV_INSN (i1); + i2 = PREV_INSN (i2); + } + +#ifdef HAVE_cc0 + if (ninsns) + { + /* Don't allow the insn after a compare to be shared by + cross-jumping unless the compare is also shared. */ + if (reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1)) + last1 = afterlast1, last2 = afterlast2, ninsns--; + } +#endif + + /* Include preceeding notes and labels in the cross-jump. One, + this may bring us to the head of the blocks as requested above. + Two, it keeps line number notes as matched as may be. */ + if (ninsns) + { + while (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == NOTE) + last1 = PREV_INSN (last1); + if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL) + last1 = PREV_INSN (last1); + while (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == NOTE) + last2 = PREV_INSN (last2); + if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL) + last2 = PREV_INSN (last2); + + *f1 = last1; + *f2 = last2; + } + + return ninsns; +} + +/* Return true iff outgoing edges of BB1 and BB2 match, together with + the branch instruction. This means that if we commonize the control + flow before end of the basic block, the semantic remains unchanged. + + We may assume that there exists one edge with a common destination. */ + +static bool +outgoing_edges_match (bb1, bb2) + basic_block bb1; + basic_block bb2; +{ + /* If BB1 has only one successor, we must be looking at an unconditional + jump. Which, by the assumption above, means that we only need to check + that BB2 has one successor. */ + if (bb1->succ && !bb1->succ->succ_next) + return (bb2->succ && !bb2->succ->succ_next); + + /* Match conditional jumps - this may get tricky when fallthru and branch + edges are crossed. */ + if (bb1->succ + && bb1->succ->succ_next + && !bb1->succ->succ_next->succ_next + && any_condjump_p (bb1->end)) + { + edge b1, f1, b2, f2; + bool reverse, match; + rtx set1, set2, cond1, cond2; + enum rtx_code code1, code2; + + if (!bb2->succ + || !bb2->succ->succ_next + || bb1->succ->succ_next->succ_next + || !any_condjump_p (bb2->end)) + return false; + + b1 = BRANCH_EDGE (bb1); + b2 = BRANCH_EDGE (bb2); + f1 = FALLTHRU_EDGE (bb1); + f2 = FALLTHRU_EDGE (bb2); + + /* Get around possible forwarders on fallthru edges. Other cases + should be optimized out already. */ + if (forwarder_block_p (f1->dest)) + f1 = f1->dest->succ; + if (forwarder_block_p (f2->dest)) + f2 = f2->dest->succ; + + /* To simplify use of this function, return false if there are + unneeded forwarder blocks. These will get eliminated later + during cleanup_cfg. */ + if (forwarder_block_p (f1->dest) + || forwarder_block_p (f2->dest) + || forwarder_block_p (b1->dest) + || forwarder_block_p (b2->dest)) + return false; + + if (f1->dest == f2->dest && b1->dest == b2->dest) + reverse = false; + else if (f1->dest == b2->dest && b1->dest == f2->dest) + reverse = true; + else + return false; + + set1 = pc_set (bb1->end); + set2 = pc_set (bb2->end); + if ((XEXP (SET_SRC (set1), 1) == pc_rtx) + != (XEXP (SET_SRC (set2), 1) == pc_rtx)) + reverse = !reverse; + + cond1 = XEXP (SET_SRC (set1), 0); + cond2 = XEXP (SET_SRC (set2), 0); + code1 = GET_CODE (cond1); + if (reverse) + code2 = reversed_comparison_code (cond2, bb2->end); + else + code2 = GET_CODE (cond2); + if (code2 == UNKNOWN) + return false; + + /* Verify codes and operands match. */ + match = ((code1 == code2 + && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) + && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) + || (code1 == swap_condition (code2) + && rtx_renumbered_equal_p (XEXP (cond1, 1), + XEXP (cond2, 0)) + && rtx_renumbered_equal_p (XEXP (cond1, 0), + XEXP (cond2, 1)))); + + /* If we return true, we will join the blocks. Which means that + we will only have one branch prediction bit to work with. Thus + we require the existing branches to have probabilities that are + roughly similar. */ + /* ??? We should use bb->frequency to allow merging in infrequently + executed blocks, but at the moment it is not available when + cleanup_cfg is run. */ + if (match && !optimize_size) + { + rtx note1, note2; + int prob1, prob2; + note1 = find_reg_note (bb1->end, REG_BR_PROB, 0); + note2 = find_reg_note (bb2->end, REG_BR_PROB, 0); + + if (note1 && note2) + { + prob1 = INTVAL (XEXP (note1, 0)); + prob2 = INTVAL (XEXP (note2, 0)); + if (reverse) + prob2 = REG_BR_PROB_BASE - prob2; + + /* Fail if the difference in probabilities is + greater than 5%. */ + if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20) + return false; + } + else if (note1 || note2) + return false; + } + + if (rtl_dump_file && match) + fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n", + bb1->index, bb2->index); + + return match; + } + + /* ??? We can handle computed jumps too. This may be important for + inlined functions containing switch statements. Also jumps w/o + fallthru edges can be handled by simply matching whole insn. */ + return false; +} + +/* E1 and E2 are edges with the same destination block. Search their + predecessors for common code. If found, redirect control flow from + (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */ + +static bool +try_crossjump_to_edge (mode, e1, e2) + int mode; + edge e1, e2; +{ + int nmatch; + basic_block src1 = e1->src, src2 = e2->src; + basic_block redirect_to; + rtx newpos1, newpos2; + edge s; + rtx last; + rtx label; + rtx note; + + /* Search backward through forwarder blocks. We don't need to worry + about multiple entry or chained forwarders, as they will be optimized + away. We do this to look past the unconditional jump following a + conditional jump that is required due to the current CFG shape. */ + if (src1->pred + && !src1->pred->pred_next + && forwarder_block_p (src1)) + { + e1 = src1->pred; + src1 = e1->src; + } + if (src2->pred + && !src2->pred->pred_next + && forwarder_block_p (src2)) + { + e2 = src2->pred; + src2 = e2->src; + } + + /* Nothing to do if we reach ENTRY, or a common source block. */ + if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR) + return false; + if (src1 == src2) + return false; + + /* Seeing more than 1 forwarder blocks would confuse us later... */ + if (forwarder_block_p (e1->dest) + && forwarder_block_p (e1->dest->succ->dest)) + return false; + if (forwarder_block_p (e2->dest) + && forwarder_block_p (e2->dest->succ->dest)) + return false; + + /* Likewise with dead code (possibly newly created by the other optimizations + of cfg_cleanup). */ + if (!src1->pred || !src2->pred) + return false; + + /* Likewise with complex edges. + ??? We should be able to handle most complex edges later with some + care. */ + if (e1->flags & EDGE_COMPLEX) + return false; + + /* Look for the common insn sequence, part the first ... */ + if (!outgoing_edges_match (src1, src2)) + return false; + + /* ... and part the second. */ + nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2); + if (!nmatch) + return false; + + /* Avoid splitting if possible. */ + if (newpos2 == src2->head) + redirect_to = src2; + else + { + if (rtl_dump_file) + fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n", + src2->index, nmatch); + redirect_to = split_block (src2, PREV_INSN (newpos2))->dest; + } + + if (rtl_dump_file) + fprintf (rtl_dump_file, + "Cross jumping from bb %i to bb %i; %i common insns\n", + src1->index, src2->index, nmatch); + + redirect_to->count += src1->count; + redirect_to->frequency += src1->frequency; + + /* Recompute the frequencies and counts of outgoing edges. */ + for (s = redirect_to->succ; s; s = s->succ_next) + { + edge s2; + basic_block d = s->dest; + + if (forwarder_block_p (d)) + d = d->succ->dest; + for (s2 = src1->succ; ; s2 = s2->succ_next) + { + basic_block d2 = s2->dest; + if (forwarder_block_p (d2)) + d2 = d2->succ->dest; + if (d == d2) + break; + } + s->count += s2->count; + + /* Take care to update possible forwarder blocks. We verified + that there is no more than one in the chain, so we can't run + into infinite loop. */ + if (forwarder_block_p (s->dest)) + { + s->dest->succ->count += s2->count; + s->dest->count += s2->count; + s->dest->frequency += EDGE_FREQUENCY (s); + } + if (forwarder_block_p (s2->dest)) + { + s2->dest->succ->count -= s2->count; + s2->dest->count -= s2->count; + s2->dest->frequency -= EDGE_FREQUENCY (s); + } + if (!redirect_to->frequency && !src1->frequency) + s->probability = (s->probability + s2->probability) / 2; + else + s->probability = + ((s->probability * redirect_to->frequency + + s2->probability * src1->frequency) + / (redirect_to->frequency + src1->frequency)); + } + + note = find_reg_note (redirect_to->end, REG_BR_PROB, 0); + if (note) + XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability); + + /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */ + + /* Skip possible basic block header. */ + if (GET_CODE (newpos1) == CODE_LABEL) + newpos1 = NEXT_INSN (newpos1); + if (GET_CODE (newpos1) == NOTE) + newpos1 = NEXT_INSN (newpos1); + last = src1->end; + + /* Emit the jump insn. */ + label = block_label (redirect_to); + src1->end = emit_jump_insn_before (gen_jump (label), newpos1); + JUMP_LABEL (src1->end) = label; + LABEL_NUSES (label)++; + if (basic_block_for_insn) + set_block_for_new_insns (src1->end, src1); + + /* Delete the now unreachable instructions. */ + flow_delete_insn_chain (newpos1, last); + + /* Make sure there is a barrier after the new jump. */ + last = next_nonnote_insn (src1->end); + if (!last || GET_CODE (last) != BARRIER) + emit_barrier_after (src1->end); + + /* Update CFG. */ + while (src1->succ) + remove_edge (src1->succ); + make_edge (NULL, src1, redirect_to, 0); + src1->succ->probability = REG_BR_PROB_BASE; + src1->succ->count = src1->count; + + return true; +} + +/* Search the predecessors of BB for common insn sequences. When found, + share code between them by redirecting control flow. Return true if + any changes made. */ + +static bool +try_crossjump_bb (mode, bb) + int mode; + basic_block bb; +{ + edge e, e2, nexte2, nexte, fallthru; + bool changed; + + /* Nothing to do if there is not at least two incomming edges. */ + if (!bb->pred || !bb->pred->pred_next) + return false; + + /* It is always cheapest to redirect a block that ends in a branch to + a block that falls through into BB, as that adds no branches to the + program. We'll try that combination first. */ + for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next) + if (fallthru->flags & EDGE_FALLTHRU) + break; + + changed = false; + for (e = bb->pred; e; e = nexte) + { + nexte = e->pred_next; + + /* Elide complex edges now, as neither try_crossjump_to_edge + nor outgoing_edges_match can handle them. */ + if (e->flags & EDGE_COMPLEX) + continue; + + /* As noted above, first try with the fallthru predecessor. */ + if (fallthru) + { + /* Don't combine the fallthru edge into anything else. + If there is a match, we'll do it the other way around. */ + if (e == fallthru) + continue; + + if (try_crossjump_to_edge (mode, e, fallthru)) + { + changed = true; + nexte = bb->pred; + continue; + } + } + + /* Non-obvious work limiting check: Recognize that we're going + to call try_crossjump_bb on every basic block. So if we have + two blocks with lots of outgoing edges (a switch) and they + share lots of common destinations, then we would do the + cross-jump check once for each common destination. + + Now, if the blocks actually are cross-jump candidates, then + all of their destinations will be shared. Which means that + we only need check them for cross-jump candidacy once. We + can eliminate redundant checks of crossjump(A,B) by arbitrarily + choosing to do the check from the block for which the edge + in question is the first successor of A. */ + if (e->src->succ != e) + continue; + + for (e2 = bb->pred; e2; e2 = nexte2) + { + nexte2 = e2->pred_next; + + if (e2 == e) + continue; + + /* We've already checked the fallthru edge above. */ + if (e2 == fallthru) + continue; + + /* Again, neither try_crossjump_to_edge nor outgoing_edges_match + can handle complex edges. */ + if (e2->flags & EDGE_COMPLEX) + continue; + + /* The "first successor" check above only prevents multiple + checks of crossjump(A,B). In order to prevent redundant + checks of crossjump(B,A), require that A be the block + with the lowest index. */ + if (e->src->index > e2->src->index) + continue; + + if (try_crossjump_to_edge (mode, e, e2)) + { + changed = true; + nexte = bb->pred; + break; + } + } + } + + return changed; +} + +/* Do simple CFG optimizations - basic block merging, simplifying of jump + instructions etc. Return nonzero if changes were made. */ + +static bool +try_optimize_cfg (mode) + int mode; +{ + int i; + bool changed_overall = false; + bool changed; + int iterations = 0; + + /* Attempt to merge blocks as made possible by edge removal. If a block + has only one successor, and the successor has only one predecessor, + they may be combined. */ + + do + { + changed = false; + iterations++; + + if (rtl_dump_file) + fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n", + iterations); + + for (i = 0; i < n_basic_blocks;) + { + basic_block c, b = BASIC_BLOCK (i); + edge s; + bool changed_here = false; + + /* Delete trivially dead basic blocks. */ + while (b->pred == NULL) + { + c = BASIC_BLOCK (b->index - 1); + if (rtl_dump_file) + fprintf (rtl_dump_file, "Deleting block %i.\n", b->index); + flow_delete_block (b); + changed = true; + b = c; + } + + /* Remove code labels no longer used. Don't do this before + CALL_PLACEHOLDER is removed, as some branches may be hidden + within. */ + if (b->pred->pred_next == NULL + && (b->pred->flags & EDGE_FALLTHRU) + && !(b->pred->flags & EDGE_COMPLEX) + && GET_CODE (b->head) == CODE_LABEL + && (!(mode & CLEANUP_PRE_SIBCALL) + || !tail_recursion_label_p (b->head)) + /* If previous block ends with condjump jumping to next BB, + we can't delete the label. */ + && (b->pred->src == ENTRY_BLOCK_PTR + || !reg_mentioned_p (b->head, b->pred->src->end))) + { + rtx label = b->head; + b->head = NEXT_INSN (b->head); + flow_delete_insn_chain (label, label); + if (rtl_dump_file) + fprintf (rtl_dump_file, "Deleted label in block %i.\n", + b->index); + } + + /* If we fall through an empty block, we can remove it. */ + if (b->pred->pred_next == NULL + && (b->pred->flags & EDGE_FALLTHRU) + && GET_CODE (b->head) != CODE_LABEL + && forwarder_block_p (b) + /* Note that forwarder_block_p true ensures that there + is a successor for this block. */ + && (b->succ->flags & EDGE_FALLTHRU) + && n_basic_blocks > 1) + { + if (rtl_dump_file) + fprintf (rtl_dump_file, "Deleting fallthru block %i.\n", + b->index); + c = BASIC_BLOCK (b->index ? b->index - 1 : 1); + redirect_edge_succ_nodup (b->pred, b->succ->dest); + flow_delete_block (b); + changed = true; + b = c; + } + + /* Merge blocks. Loop because chains of blocks might be + combineable. */ + while ((s = b->succ) != NULL + && s->succ_next == NULL + && !(s->flags & EDGE_COMPLEX) + && (c = s->dest) != EXIT_BLOCK_PTR + && c->pred->pred_next == NULL + /* If the jump insn has side effects, + we can't kill the edge. */ + && (GET_CODE (b->end) != JUMP_INSN + || onlyjump_p (b->end)) + && merge_blocks (s, b, c, mode)) + changed_here = true; + + /* Simplify branch over branch. */ + if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b)) + changed_here = true; + + /* If B has a single outgoing edge, but uses a non-trivial jump + instruction without side-effects, we can either delete the + jump entirely, or replace it with a simple unconditional jump. + Use redirect_edge_and_branch to do the dirty work. */ + if (b->succ + && ! b->succ->succ_next + && b->succ->dest != EXIT_BLOCK_PTR + && onlyjump_p (b->end) + && redirect_edge_and_branch (b->succ, b->succ->dest)) + changed_here = true; + + /* Simplify branch to branch. */ + if (try_forward_edges (mode, b)) + changed_here = true; + + /* Look for shared code between blocks. */ + if ((mode & CLEANUP_CROSSJUMP) + && try_crossjump_bb (mode, b)) + changed_here = true; + + /* Don't get confused by the index shift caused by deleting + blocks. */ + if (!changed_here) + i = b->index + 1; + else + changed = true; + } + + if ((mode & CLEANUP_CROSSJUMP) + && try_crossjump_bb (mode, EXIT_BLOCK_PTR)) + changed = true; + +#ifdef ENABLE_CHECKING + if (changed) + verify_flow_info (); +#endif + + changed_overall |= changed; + } + while (changed); + return changed_overall; +} + +/* Delete all unreachable basic blocks. */ +static bool +delete_unreachable_blocks () +{ + int i; + bool changed = false; + + find_unreachable_blocks (); + + /* Delete all unreachable basic blocks. Count down so that we + don't interfere with the block renumbering that happens in + flow_delete_block. */ + + for (i = n_basic_blocks - 1; i >= 0; --i) + { + basic_block b = BASIC_BLOCK (i); + + if (!(b->flags & BB_REACHABLE)) + flow_delete_block (b), changed = true; + } + + if (changed) + tidy_fallthru_edges (); + return changed; +} + + +/* Tidy the CFG by deleting unreachable code and whatnot. */ + +bool +cleanup_cfg (mode) + int mode; +{ + int i; + bool changed = false; + + timevar_push (TV_CLEANUP_CFG); + changed = delete_unreachable_blocks (); + if (try_optimize_cfg (mode)) + delete_unreachable_blocks (), changed = true; + + if (changed) + mark_critical_edges (); + + /* Kill the data we won't maintain. */ + free_EXPR_LIST_list (&label_value_list); + free_EXPR_LIST_list (&tail_recursion_label_list); + timevar_pop (TV_CLEANUP_CFG); + + /* Clear bb->aux on all basic blocks. */ + for (i = 0; i < n_basic_blocks; ++i) + BASIC_BLOCK (i)->aux = NULL; + return changed; +} diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c new file mode 100644 index 00000000000..d8b5b4d46fb --- /dev/null +++ b/gcc/cfgloop.c @@ -0,0 +1,854 @@ +/* Natural loop discovery code for GNU compiler. + Copyright (C) 2000, 2001 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to the Free +Software Foundation, 59 Temple Place - Suite 330, Boston, MA +02111-1307, USA. */ + +#include "config.h" +#include "system.h" +#include "rtl.h" +#include "hard-reg-set.h" +#include "basic-block.h" + +static void flow_loops_cfg_dump PARAMS ((const struct loops *, + FILE *)); +static int flow_loop_nested_p PARAMS ((struct loop *, + struct loop *)); +static int flow_loop_entry_edges_find PARAMS ((basic_block, const sbitmap, + edge **)); +static int flow_loop_exit_edges_find PARAMS ((const sbitmap, edge **)); +static int flow_loop_nodes_find PARAMS ((basic_block, basic_block, sbitmap)); +static void flow_loop_pre_header_scan PARAMS ((struct loop *)); +static basic_block flow_loop_pre_header_find PARAMS ((basic_block, + const sbitmap *)); +static void flow_loop_tree_node_add PARAMS ((struct loop *, struct loop *)); +static void flow_loops_tree_build PARAMS ((struct loops *)); +static int flow_loop_level_compute PARAMS ((struct loop *, int)); +static int flow_loops_level_compute PARAMS ((struct loops *)); + +/* Dump loop related CFG information. */ + +static void +flow_loops_cfg_dump (loops, file) + const struct loops *loops; + FILE *file; +{ + int i; + + if (! loops->num || ! file || ! loops->cfg.dom) + return; + + for (i = 0; i < n_basic_blocks; i++) + { + edge succ; + + fprintf (file, ";; %d succs { ", i); + for (succ = BASIC_BLOCK (i)->succ; succ; succ = succ->succ_next) + fprintf (file, "%d ", succ->dest->index); + flow_nodes_print ("} dom", loops->cfg.dom[i], file); + } + + /* Dump the DFS node order. */ + if (loops->cfg.dfs_order) + { + fputs (";; DFS order: ", file); + for (i = 0; i < n_basic_blocks; i++) + fprintf (file, "%d ", loops->cfg.dfs_order[i]); + fputs ("\n", file); + } + /* Dump the reverse completion node order. */ + if (loops->cfg.rc_order) + { + fputs (";; RC order: ", file); + for (i = 0; i < n_basic_blocks; i++) + fprintf (file, "%d ", loops->cfg.rc_order[i]); + fputs ("\n", file); + } +} + +/* Return non-zero if the nodes of LOOP are a subset of OUTER. */ + +static int +flow_loop_nested_p (outer, loop) + struct loop *outer; + struct loop *loop; +{ + return sbitmap_a_subset_b_p (loop->nodes, outer->nodes); +} + +/* Dump the loop information specified by LOOP to the stream FILE + using auxiliary dump callback function LOOP_DUMP_AUX if non null. */ + +void +flow_loop_dump (loop, file, loop_dump_aux, verbose) + const struct loop *loop; + FILE *file; + void (*loop_dump_aux) PARAMS((const struct loop *, FILE *, int)); + int verbose; +{ + if (! loop || ! loop->header) + return; + + if (loop->first->head && loop->last->end) + fprintf (file, ";;\n;; Loop %d (%d to %d):%s%s\n", + loop->num, INSN_UID (loop->first->head), + INSN_UID (loop->last->end), + loop->shared ? " shared" : "", + loop->invalid ? " invalid" : ""); + else + fprintf (file, ";;\n;; Loop %d:%s%s\n", loop->num, + loop->shared ? " shared" : "", + loop->invalid ? " invalid" : ""); + + fprintf (file, ";; header %d, latch %d, pre-header %d, first %d, last %d\n", + loop->header->index, loop->latch->index, + loop->pre_header ? loop->pre_header->index : -1, + loop->first->index, loop->last->index); + fprintf (file, ";; depth %d, level %d, outer %ld\n", + loop->depth, loop->level, + (long) (loop->outer ? loop->outer->num : -1)); + + if (loop->pre_header_edges) + flow_edge_list_print (";; pre-header edges", loop->pre_header_edges, + loop->num_pre_header_edges, file); + flow_edge_list_print (";; entry edges", loop->entry_edges, + loop->num_entries, file); + fprintf (file, ";; %d", loop->num_nodes); + flow_nodes_print (" nodes", loop->nodes, file); + flow_edge_list_print (";; exit edges", loop->exit_edges, + loop->num_exits, file); + if (loop->exits_doms) + flow_nodes_print (";; exit doms", loop->exits_doms, file); + if (loop_dump_aux) + loop_dump_aux (loop, file, verbose); +} + +/* Dump the loop information specified by LOOPS to the stream FILE, + using auxiliary dump callback function LOOP_DUMP_AUX if non null. */ + +void +flow_loops_dump (loops, file, loop_dump_aux, verbose) + const struct loops *loops; + FILE *file; + void (*loop_dump_aux) PARAMS((const struct loop *, FILE *, int)); + int verbose; +{ + int i; + int num_loops; + + num_loops = loops->num; + if (! num_loops || ! file) + return; + + fprintf (file, ";; %d loops found, %d levels\n", + num_loops, loops->levels); + + for (i = 0; i < num_loops; i++) + { + struct loop *loop = &loops->array[i]; + + flow_loop_dump (loop, file, loop_dump_aux, verbose); + + if (loop->shared) + { + int j; + + for (j = 0; j < i; j++) + { + struct loop *oloop = &loops->array[j]; + + if (loop->header == oloop->header) + { + int disjoint; + int smaller; + + smaller = loop->num_nodes < oloop->num_nodes; + + /* If the union of LOOP and OLOOP is different than + the larger of LOOP and OLOOP then LOOP and OLOOP + must be disjoint. */ + disjoint = ! flow_loop_nested_p (smaller ? loop : oloop, + smaller ? oloop : loop); + fprintf (file, + ";; loop header %d shared by loops %d, %d %s\n", + loop->header->index, i, j, + disjoint ? "disjoint" : "nested"); + } + } + } + } + + if (verbose) + flow_loops_cfg_dump (loops, file); +} + +/* Free all the memory allocated for LOOPS. */ + +void +flow_loops_free (loops) + struct loops *loops; +{ + if (loops->array) + { + int i; + + if (! loops->num) + abort (); + + /* Free the loop descriptors. */ + for (i = 0; i < loops->num; i++) + { + struct loop *loop = &loops->array[i]; + + if (loop->pre_header_edges) + free (loop->pre_header_edges); + if (loop->nodes) + sbitmap_free (loop->nodes); + if (loop->entry_edges) + free (loop->entry_edges); + if (loop->exit_edges) + free (loop->exit_edges); + if (loop->exits_doms) + sbitmap_free (loop->exits_doms); + } + free (loops->array); + loops->array = NULL; + + if (loops->cfg.dom) + sbitmap_vector_free (loops->cfg.dom); + if (loops->cfg.dfs_order) + free (loops->cfg.dfs_order); + + if (loops->shared_headers) + sbitmap_free (loops->shared_headers); + } +} + +/* Find the entry edges into the loop with header HEADER and nodes + NODES and store in ENTRY_EDGES array. Return the number of entry + edges from the loop. */ + +static int +flow_loop_entry_edges_find (header, nodes, entry_edges) + basic_block header; + const sbitmap nodes; + edge **entry_edges; +{ + edge e; + int num_entries; + + *entry_edges = NULL; + + num_entries = 0; + for (e = header->pred; e; e = e->pred_next) + { + basic_block src = e->src; + + if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->index)) + num_entries++; + } + + if (! num_entries) + abort (); + + *entry_edges = (edge *) xmalloc (num_entries * sizeof (edge *)); + + num_entries = 0; + for (e = header->pred; e; e = e->pred_next) + { + basic_block src = e->src; + + if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->index)) + (*entry_edges)[num_entries++] = e; + } + + return num_entries; +} + +/* Find the exit edges from the loop using the bitmap of loop nodes + NODES and store in EXIT_EDGES array. Return the number of + exit edges from the loop. */ + +static int +flow_loop_exit_edges_find (nodes, exit_edges) + const sbitmap nodes; + edge **exit_edges; +{ + edge e; + int node; + int num_exits; + + *exit_edges = NULL; + + /* Check all nodes within the loop to see if there are any + successors not in the loop. Note that a node may have multiple + exiting edges ????? A node can have one jumping edge and one fallthru + edge so only one of these can exit the loop. */ + num_exits = 0; + EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, { + for (e = BASIC_BLOCK (node)->succ; e; e = e->succ_next) + { + basic_block dest = e->dest; + + if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->index)) + num_exits++; + } + }); + + if (! num_exits) + return 0; + + *exit_edges = (edge *) xmalloc (num_exits * sizeof (edge *)); + + /* Store all exiting edges into an array. */ + num_exits = 0; + EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, { + for (e = BASIC_BLOCK (node)->succ; e; e = e->succ_next) + { + basic_block dest = e->dest; + + if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->index)) + (*exit_edges)[num_exits++] = e; + } + }); + + return num_exits; +} + +/* Find the nodes contained within the loop with header HEADER and + latch LATCH and store in NODES. Return the number of nodes within + the loop. */ + +static int +flow_loop_nodes_find (header, latch, nodes) + basic_block header; + basic_block latch; + sbitmap nodes; +{ + basic_block *stack; + int sp; + int num_nodes = 0; + + stack = (basic_block *) xmalloc (n_basic_blocks * sizeof (basic_block)); + sp = 0; + + /* Start with only the loop header in the set of loop nodes. */ + sbitmap_zero (nodes); + SET_BIT (nodes, header->index); + num_nodes++; + header->loop_depth++; + + /* Push the loop latch on to the stack. */ + if (! TEST_BIT (nodes, latch->index)) + { + SET_BIT (nodes, latch->index); + latch->loop_depth++; + num_nodes++; + stack[sp++] = latch; + } + + while (sp) + { + basic_block node; + edge e; + + node = stack[--sp]; + for (e = node->pred; e; e = e->pred_next) + { + basic_block ancestor = e->src; + + /* If each ancestor not marked as part of loop, add to set of + loop nodes and push on to stack. */ + if (ancestor != ENTRY_BLOCK_PTR + && ! TEST_BIT (nodes, ancestor->index)) + { + SET_BIT (nodes, ancestor->index); + ancestor->loop_depth++; + num_nodes++; + stack[sp++] = ancestor; + } + } + } + free (stack); + return num_nodes; +} + +/* Find the root node of the loop pre-header extended basic block and + the edges along the trace from the root node to the loop header. */ + +static void +flow_loop_pre_header_scan (loop) + struct loop *loop; +{ + int num = 0; + basic_block ebb; + + loop->num_pre_header_edges = 0; + + if (loop->num_entries != 1) + return; + + ebb = loop->entry_edges[0]->src; + + if (ebb != ENTRY_BLOCK_PTR) + { + edge e; + + /* Count number of edges along trace from loop header to + root of pre-header extended basic block. Usually this is + only one or two edges. */ + num++; + while (ebb->pred->src != ENTRY_BLOCK_PTR && ! ebb->pred->pred_next) + { + ebb = ebb->pred->src; + num++; + } + + loop->pre_header_edges = (edge *) xmalloc (num * sizeof (edge *)); + loop->num_pre_header_edges = num; + + /* Store edges in order that they are followed. The source + of the first edge is the root node of the pre-header extended + basic block and the destination of the last last edge is + the loop header. */ + for (e = loop->entry_edges[0]; num; e = e->src->pred) + { + loop->pre_header_edges[--num] = e; + } + } +} + +/* Return the block for the pre-header of the loop with header + HEADER where DOM specifies the dominator information. Return NULL if + there is no pre-header. */ + +static basic_block +flow_loop_pre_header_find (header, dom) + basic_block header; + const sbitmap *dom; +{ + basic_block pre_header; + edge e; + + /* If block p is a predecessor of the header and is the only block + that the header does not dominate, then it is the pre-header. */ + pre_header = NULL; + for (e = header->pred; e; e = e->pred_next) + { + basic_block node = e->src; + + if (node != ENTRY_BLOCK_PTR + && ! TEST_BIT (dom[node->index], header->index)) + { + if (pre_header == NULL) + pre_header = node; + else + { + /* There are multiple edges into the header from outside + the loop so there is no pre-header block. */ + pre_header = NULL; + break; + } + } + } + return pre_header; +} + +/* Add LOOP to the loop hierarchy tree where PREVLOOP was the loop + previously added. The insertion algorithm assumes that the loops + are added in the order found by a depth first search of the CFG. */ + +static void +flow_loop_tree_node_add (prevloop, loop) + struct loop *prevloop; + struct loop *loop; +{ + + if (flow_loop_nested_p (prevloop, loop)) + { + prevloop->inner = loop; + loop->outer = prevloop; + return; + } + + while (prevloop->outer) + { + if (flow_loop_nested_p (prevloop->outer, loop)) + { + prevloop->next = loop; + loop->outer = prevloop->outer; + return; + } + prevloop = prevloop->outer; + } + + prevloop->next = loop; + loop->outer = NULL; +} + +/* Build the loop hierarchy tree for LOOPS. */ + +static void +flow_loops_tree_build (loops) + struct loops *loops; +{ + int i; + int num_loops; + + num_loops = loops->num; + if (! num_loops) + return; + + /* Root the loop hierarchy tree with the first loop found. + Since we used a depth first search this should be the + outermost loop. */ + loops->tree_root = &loops->array[0]; + loops->tree_root->outer = loops->tree_root->inner = loops->tree_root->next = NULL; + + /* Add the remaining loops to the tree. */ + for (i = 1; i < num_loops; i++) + flow_loop_tree_node_add (&loops->array[i - 1], &loops->array[i]); +} + +/* Helper function to compute loop nesting depth and enclosed loop level + for the natural loop specified by LOOP at the loop depth DEPTH. + Returns the loop level. */ + +static int +flow_loop_level_compute (loop, depth) + struct loop *loop; + int depth; +{ + struct loop *inner; + int level = 1; + + if (! loop) + return 0; + + /* Traverse loop tree assigning depth and computing level as the + maximum level of all the inner loops of this loop. The loop + level is equivalent to the height of the loop in the loop tree + and corresponds to the number of enclosed loop levels (including + itself). */ + for (inner = loop->inner; inner; inner = inner->next) + { + int ilevel; + + ilevel = flow_loop_level_compute (inner, depth + 1) + 1; + + if (ilevel > level) + level = ilevel; + } + loop->level = level; + loop->depth = depth; + return level; +} + +/* Compute the loop nesting depth and enclosed loop level for the loop + hierarchy tree specfied by LOOPS. Return the maximum enclosed loop + level. */ + +static int +flow_loops_level_compute (loops) + struct loops *loops; +{ + struct loop *loop; + int level; + int levels = 0; + + /* Traverse all the outer level loops. */ + for (loop = loops->tree_root; loop; loop = loop->next) + { + level = flow_loop_level_compute (loop, 1); + if (level > levels) + levels = level; + } + return levels; +} + +/* Scan a single natural loop specified by LOOP collecting information + about it specified by FLAGS. */ + +int +flow_loop_scan (loops, loop, flags) + struct loops *loops; + struct loop *loop; + int flags; +{ + /* Determine prerequisites. */ + if ((flags & LOOP_EXITS_DOMS) && ! loop->exit_edges) + flags |= LOOP_EXIT_EDGES; + + if (flags & LOOP_ENTRY_EDGES) + { + /* Find edges which enter the loop header. + Note that the entry edges should only + enter the header of a natural loop. */ + loop->num_entries + = flow_loop_entry_edges_find (loop->header, + loop->nodes, + &loop->entry_edges); + } + + if (flags & LOOP_EXIT_EDGES) + { + /* Find edges which exit the loop. */ + loop->num_exits + = flow_loop_exit_edges_find (loop->nodes, + &loop->exit_edges); + } + + if (flags & LOOP_EXITS_DOMS) + { + int j; + + /* Determine which loop nodes dominate all the exits + of the loop. */ + loop->exits_doms = sbitmap_alloc (n_basic_blocks); + sbitmap_copy (loop->exits_doms, loop->nodes); + for (j = 0; j < loop->num_exits; j++) + sbitmap_a_and_b (loop->exits_doms, loop->exits_doms, + loops->cfg.dom[loop->exit_edges[j]->src->index]); + + /* The header of a natural loop must dominate + all exits. */ + if (! TEST_BIT (loop->exits_doms, loop->header->index)) + abort (); + } + + if (flags & LOOP_PRE_HEADER) + { + /* Look to see if the loop has a pre-header node. */ + loop->pre_header + = flow_loop_pre_header_find (loop->header, loops->cfg.dom); + + /* Find the blocks within the extended basic block of + the loop pre-header. */ + flow_loop_pre_header_scan (loop); + } + return 1; +} + +/* Find all the natural loops in the function and save in LOOPS structure + and recalculate loop_depth information in basic block structures. + FLAGS controls which loop information is collected. + Return the number of natural loops found. */ + +int +flow_loops_find (loops, flags) + struct loops *loops; + int flags; +{ + int i; + int b; + int num_loops; + edge e; + sbitmap headers; + sbitmap *dom; + int *dfs_order; + int *rc_order; + + /* This function cannot be repeatedly called with different + flags to build up the loop information. The loop tree + must always be built if this function is called. */ + if (! (flags & LOOP_TREE)) + abort (); + + memset (loops, 0, sizeof (*loops)); + + /* Taking care of this degenerate case makes the rest of + this code simpler. */ + if (n_basic_blocks == 0) + return 0; + + dfs_order = NULL; + rc_order = NULL; + + /* Compute the dominators. */ + dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks); + calculate_dominance_info (NULL, dom, CDI_DOMINATORS); + + /* Count the number of loop edges (back edges). This should be the + same as the number of natural loops. */ + + num_loops = 0; + for (b = 0; b < n_basic_blocks; b++) + { + basic_block header; + + header = BASIC_BLOCK (b); + header->loop_depth = 0; + + for (e = header->pred; e; e = e->pred_next) + { + basic_block latch = e->src; + + /* Look for back edges where a predecessor is dominated + by this block. A natural loop has a single entry + node (header) that dominates all the nodes in the + loop. It also has single back edge to the header + from a latch node. Note that multiple natural loops + may share the same header. */ + if (b != header->index) + abort (); + + if (latch != ENTRY_BLOCK_PTR && TEST_BIT (dom[latch->index], b)) + num_loops++; + } + } + + if (num_loops) + { + /* Compute depth first search order of the CFG so that outer + natural loops will be found before inner natural loops. */ + dfs_order = (int *) xmalloc (n_basic_blocks * sizeof (int)); + rc_order = (int *) xmalloc (n_basic_blocks * sizeof (int)); + flow_depth_first_order_compute (dfs_order, rc_order); + + /* Save CFG derived information to avoid recomputing it. */ + loops->cfg.dom = dom; + loops->cfg.dfs_order = dfs_order; + loops->cfg.rc_order = rc_order; + + /* Allocate loop structures. */ + loops->array + = (struct loop *) xcalloc (num_loops, sizeof (struct loop)); + + headers = sbitmap_alloc (n_basic_blocks); + sbitmap_zero (headers); + + loops->shared_headers = sbitmap_alloc (n_basic_blocks); + sbitmap_zero (loops->shared_headers); + + /* Find and record information about all the natural loops + in the CFG. */ + num_loops = 0; + for (b = 0; b < n_basic_blocks; b++) + { + basic_block header; + + /* Search the nodes of the CFG in reverse completion order + so that we can find outer loops first. */ + header = BASIC_BLOCK (rc_order[b]); + + /* Look for all the possible latch blocks for this header. */ + for (e = header->pred; e; e = e->pred_next) + { + basic_block latch = e->src; + + /* Look for back edges where a predecessor is dominated + by this block. A natural loop has a single entry + node (header) that dominates all the nodes in the + loop. It also has single back edge to the header + from a latch node. Note that multiple natural loops + may share the same header. */ + if (latch != ENTRY_BLOCK_PTR + && TEST_BIT (dom[latch->index], header->index)) + { + struct loop *loop; + + loop = loops->array + num_loops; + + loop->header = header; + loop->latch = latch; + loop->num = num_loops; + + num_loops++; + } + } + } + + for (i = 0; i < num_loops; i++) + { + struct loop *loop = &loops->array[i]; + + /* Keep track of blocks that are loop headers so + that we can tell which loops should be merged. */ + if (TEST_BIT (headers, loop->header->index)) + SET_BIT (loops->shared_headers, loop->header->index); + SET_BIT (headers, loop->header->index); + + /* Find nodes contained within the loop. */ + loop->nodes = sbitmap_alloc (n_basic_blocks); + loop->num_nodes + = flow_loop_nodes_find (loop->header, loop->latch, loop->nodes); + + /* Compute first and last blocks within the loop. + These are often the same as the loop header and + loop latch respectively, but this is not always + the case. */ + loop->first + = BASIC_BLOCK (sbitmap_first_set_bit (loop->nodes)); + loop->last + = BASIC_BLOCK (sbitmap_last_set_bit (loop->nodes)); + + flow_loop_scan (loops, loop, flags); + } + + /* Natural loops with shared headers may either be disjoint or + nested. Disjoint loops with shared headers cannot be inner + loops and should be merged. For now just mark loops that share + headers. */ + for (i = 0; i < num_loops; i++) + if (TEST_BIT (loops->shared_headers, loops->array[i].header->index)) + loops->array[i].shared = 1; + + sbitmap_free (headers); + } + else + { + sbitmap_vector_free (dom); + } + + loops->num = num_loops; + + /* Build the loop hierarchy tree. */ + flow_loops_tree_build (loops); + + /* Assign the loop nesting depth and enclosed loop level for each + loop. */ + loops->levels = flow_loops_level_compute (loops); + + return num_loops; +} + +/* Update the information regarding the loops in the CFG + specified by LOOPS. */ +int +flow_loops_update (loops, flags) + struct loops *loops; + int flags; +{ + /* One day we may want to update the current loop data. For now + throw away the old stuff and rebuild what we need. */ + if (loops->array) + flow_loops_free (loops); + + return flow_loops_find (loops, flags); +} + +/* Return non-zero if edge E enters header of LOOP from outside of LOOP. */ + +int +flow_loop_outside_edge_p (loop, e) + const struct loop *loop; + edge e; +{ + if (e->dest != loop->header) + abort (); + return (e->src == ENTRY_BLOCK_PTR) || ! TEST_BIT (loop->nodes, e->src->index); +} diff --git a/gcc/flow.c b/gcc/flow.c index 1cbb0f8f82a..b5a6e3775ac 100644 --- a/gcc/flow.c +++ b/gcc/flow.c @@ -174,63 +174,6 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #endif #endif -/* The obstack on which the flow graph components are allocated. */ - -struct obstack flow_obstack; -static char *flow_firstobj; - -/* Number of basic blocks in the current function. */ - -int n_basic_blocks; - -/* Number of edges in the current function. */ - -int n_edges; - -/* The basic block array. */ - -varray_type basic_block_info; - -/* The special entry and exit blocks. */ - -struct basic_block_def entry_exit_blocks[2] -= {{NULL, /* head */ - NULL, /* end */ - NULL, /* head_tree */ - NULL, /* end_tree */ - NULL, /* pred */ - NULL, /* succ */ - NULL, /* local_set */ - NULL, /* cond_local_set */ - NULL, /* global_live_at_start */ - NULL, /* global_live_at_end */ - NULL, /* aux */ - ENTRY_BLOCK, /* index */ - 0, /* loop_depth */ - 0, /* count */ - 0, /* frequency */ - 0 /* flags */ - }, - { - NULL, /* head */ - NULL, /* end */ - NULL, /* head_tree */ - NULL, /* end_tree */ - NULL, /* pred */ - NULL, /* succ */ - NULL, /* local_set */ - NULL, /* cond_local_set */ - NULL, /* global_live_at_start */ - NULL, /* global_live_at_end */ - NULL, /* aux */ - EXIT_BLOCK, /* index */ - 0, /* loop_depth */ - 0, /* count */ - 0, /* frequency */ - 0 /* flags */ - } -}; - /* Nonzero if the second flow pass has completed. */ int flow2_completed; @@ -268,17 +211,6 @@ int (*lang_missing_noreturn_ok_p) PARAMS ((tree)); static HARD_REG_SET elim_reg_set; -/* The basic block structure for every insn, indexed by uid. */ - -varray_type basic_block_for_insn; - -/* The labels mentioned in non-jump rtl. Valid during find_basic_blocks. */ -/* ??? Should probably be using LABEL_NUSES instead. It would take a - bit of surgery to be able to use or co-opt the routines in jump. */ - -static rtx label_value_list; -static rtx tail_recursion_label_list; - /* Holds information for tracking conditional register life information. */ struct reg_cond_life_info { @@ -348,57 +280,12 @@ struct propagate_block_info new elements on the floor. */ #define MAX_MEM_SET_LIST_LEN 100 -/* Store the data structures necessary for depth-first search. */ -struct depth_first_search_dsS { - /* stack for backtracking during the algorithm */ - basic_block *stack; - - /* number of edges in the stack. That is, positions 0, ..., sp-1 - have edges. */ - unsigned int sp; - - /* record of basic blocks already seen by depth-first search */ - sbitmap visited_blocks; -}; -typedef struct depth_first_search_dsS *depth_first_search_ds; - /* Have print_rtl_and_abort give the same information that fancy_abort does. */ #define print_rtl_and_abort() \ print_rtl_and_abort_fcn (__FILE__, __LINE__, __FUNCTION__) /* Forward declarations */ -static bool try_crossjump_to_edge PARAMS ((int, edge, edge)); -static bool try_crossjump_bb PARAMS ((int, basic_block)); -static bool outgoing_edges_match PARAMS ((basic_block, basic_block)); -static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block, - rtx *, rtx *)); -static int count_basic_blocks PARAMS ((rtx)); -static void find_basic_blocks_1 PARAMS ((rtx)); -static rtx find_label_refs PARAMS ((rtx, rtx)); -static void make_edges PARAMS ((rtx, int, int, int)); -static void make_label_edge PARAMS ((sbitmap *, basic_block, - rtx, int)); -static void make_eh_edge PARAMS ((sbitmap *, basic_block, rtx)); - -static void commit_one_edge_insertion PARAMS ((edge)); - -static void delete_unreachable_blocks PARAMS ((void)); -static int can_delete_note_p PARAMS ((rtx)); -static int can_delete_label_p PARAMS ((rtx)); -static int tail_recursion_label_p PARAMS ((rtx)); -static int merge_blocks_move_predecessor_nojumps PARAMS ((basic_block, - basic_block)); -static int merge_blocks_move_successor_nojumps PARAMS ((basic_block, - basic_block)); -static int merge_blocks PARAMS ((edge,basic_block,basic_block, - int)); -static bool try_optimize_cfg PARAMS ((int)); -static bool can_fallthru PARAMS ((basic_block, basic_block)); -static bool try_redirect_by_replacing_jump PARAMS ((edge, basic_block)); -static bool try_simplify_condjump PARAMS ((basic_block)); -static bool try_forward_edges PARAMS ((int, basic_block)); -static void tidy_fallthru_edges PARAMS ((void)); static int verify_wide_reg_1 PARAMS ((rtx *, void *)); static void verify_wide_reg PARAMS ((int, rtx, rtx)); static void verify_local_live_at_start PARAMS ((regset, basic_block)); @@ -456,109 +343,8 @@ static void invalidate_mems_from_autoinc PARAMS ((struct propagate_block_info *, rtx)); static void invalidate_mems_from_set PARAMS ((struct propagate_block_info *, rtx)); -static void remove_fake_successors PARAMS ((basic_block)); -static void flow_nodes_print PARAMS ((const char *, const sbitmap, - FILE *)); -static void flow_edge_list_print PARAMS ((const char *, const edge *, - int, FILE *)); -static void flow_loops_cfg_dump PARAMS ((const struct loops *, - FILE *)); -static int flow_loop_nested_p PARAMS ((struct loop *, - struct loop *)); -static int flow_loop_entry_edges_find PARAMS ((basic_block, const sbitmap, - edge **)); -static int flow_loop_exit_edges_find PARAMS ((const sbitmap, edge **)); -static int flow_loop_nodes_find PARAMS ((basic_block, basic_block, sbitmap)); -static void flow_dfs_compute_reverse_init - PARAMS ((depth_first_search_ds)); -static void flow_dfs_compute_reverse_add_bb - PARAMS ((depth_first_search_ds, basic_block)); -static basic_block flow_dfs_compute_reverse_execute - PARAMS ((depth_first_search_ds)); -static void flow_dfs_compute_reverse_finish - PARAMS ((depth_first_search_ds)); -static void flow_loop_pre_header_scan PARAMS ((struct loop *)); -static basic_block flow_loop_pre_header_find PARAMS ((basic_block, - const sbitmap *)); -static void flow_loop_tree_node_add PARAMS ((struct loop *, struct loop *)); -static void flow_loops_tree_build PARAMS ((struct loops *)); -static int flow_loop_level_compute PARAMS ((struct loop *, int)); -static int flow_loops_level_compute PARAMS ((struct loops *)); static void delete_dead_jumptables PARAMS ((void)); -static bool back_edge_of_syntactic_loop_p PARAMS ((basic_block, basic_block)); -static bool need_fake_edge_p PARAMS ((rtx)); -/* Find basic blocks of the current function. - F is the first insn of the function and NREGS the number of register - numbers in use. */ - -void -find_basic_blocks (f, nregs, file) - rtx f; - int nregs ATTRIBUTE_UNUSED; - FILE *file ATTRIBUTE_UNUSED; -{ - int max_uid; - timevar_push (TV_CFG); - - /* Flush out existing data. */ - if (basic_block_info != NULL) - { - int i; - - clear_edges (); - - /* Clear bb->aux on all extant basic blocks. We'll use this as a - tag for reuse during create_basic_block, just in case some pass - copies around basic block notes improperly. */ - for (i = 0; i < n_basic_blocks; ++i) - BASIC_BLOCK (i)->aux = NULL; - - VARRAY_FREE (basic_block_info); - } - - n_basic_blocks = count_basic_blocks (f); - - /* Size the basic block table. The actual structures will be allocated - by find_basic_blocks_1, since we want to keep the structure pointers - stable across calls to find_basic_blocks. */ - /* ??? This whole issue would be much simpler if we called find_basic_blocks - exactly once, and thereafter we don't have a single long chain of - instructions at all until close to the end of compilation when we - actually lay them out. */ - - VARRAY_BB_INIT (basic_block_info, n_basic_blocks, "basic_block_info"); - - find_basic_blocks_1 (f); - - /* Record the block to which an insn belongs. */ - /* ??? This should be done another way, by which (perhaps) a label is - tagged directly with the basic block that it starts. It is used for - more than that currently, but IMO that is the only valid use. */ - - max_uid = get_max_uid (); -#ifdef AUTO_INC_DEC - /* Leave space for insns life_analysis makes in some cases for auto-inc. - These cases are rare, so we don't need too much space. */ - max_uid += max_uid / 10; -#endif - - compute_bb_for_insn (max_uid); - - /* Discover the edges of our cfg. */ - make_edges (label_value_list, 0, n_basic_blocks - 1, 0); - - /* Do very simple cleanup now, for the benefit of code that runs between - here and cleanup_cfg, e.g. thread_prologue_and_epilogue_insns. */ - tidy_fallthru_edges (); - - mark_critical_edges (); - -#ifdef ENABLE_CHECKING - verify_flow_info (); -#endif - timevar_pop (TV_CFG); -} void check_function_return_warnings () @@ -598,521 +384,6 @@ check_function_return_warnings () } } } - -/* Count the basic blocks of the function. */ - -static int -count_basic_blocks (f) - rtx f; -{ - register rtx insn; - register RTX_CODE prev_code; - register int count = 0; - int saw_abnormal_edge = 0; - - prev_code = JUMP_INSN; - for (insn = f; insn; insn = NEXT_INSN (insn)) - { - enum rtx_code code = GET_CODE (insn); - - if (code == CODE_LABEL - || (GET_RTX_CLASS (code) == 'i' - && (prev_code == JUMP_INSN - || prev_code == BARRIER - || saw_abnormal_edge))) - { - saw_abnormal_edge = 0; - count++; - } - - /* Record whether this insn created an edge. */ - if (code == CALL_INSN) - { - rtx note; - - /* If there is a nonlocal goto label and the specified - region number isn't -1, we have an edge. */ - if (nonlocal_goto_handler_labels - && ((note = find_reg_note (insn, REG_EH_REGION, NULL_RTX)) == 0 - || INTVAL (XEXP (note, 0)) >= 0)) - saw_abnormal_edge = 1; - - else if (can_throw_internal (insn)) - saw_abnormal_edge = 1; - } - else if (flag_non_call_exceptions - && code == INSN - && can_throw_internal (insn)) - saw_abnormal_edge = 1; - - if (code != NOTE) - prev_code = code; - } - - /* The rest of the compiler works a bit smoother when we don't have to - check for the edge case of do-nothing functions with no basic blocks. */ - if (count == 0) - { - emit_insn (gen_rtx_USE (VOIDmode, const0_rtx)); - count = 1; - } - - return count; -} - -/* Scan a list of insns for labels referred to other than by jumps. - This is used to scan the alternatives of a call placeholder. */ -static rtx -find_label_refs (f, lvl) - rtx f; - rtx lvl; -{ - rtx insn; - - for (insn = f; insn; insn = NEXT_INSN (insn)) - if (INSN_P (insn) && GET_CODE (insn) != JUMP_INSN) - { - rtx note; - - /* Make a list of all labels referred to other than by jumps - (which just don't have the REG_LABEL notes). - - Make a special exception for labels followed by an ADDR*VEC, - as this would be a part of the tablejump setup code. - - Make a special exception to registers loaded with label - values just before jump insns that use them. */ - - for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) - if (REG_NOTE_KIND (note) == REG_LABEL) - { - rtx lab = XEXP (note, 0), next; - - if ((next = next_nonnote_insn (lab)) != NULL - && GET_CODE (next) == JUMP_INSN - && (GET_CODE (PATTERN (next)) == ADDR_VEC - || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)) - ; - else if (GET_CODE (lab) == NOTE) - ; - else if (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN - && find_reg_note (NEXT_INSN (insn), REG_LABEL, lab)) - ; - else - lvl = alloc_EXPR_LIST (0, XEXP (note, 0), lvl); - } - } - - return lvl; -} - -/* Assume that someone emitted code with control flow instructions to the - basic block. Update the data structure. */ -void -find_sub_basic_blocks (bb) - basic_block bb; -{ - rtx insn = bb->head; - rtx end = bb->end; - rtx jump_insn = NULL_RTX; - edge falltru = 0; - basic_block first_bb = bb; - int i; - - if (insn == bb->end) - return; - - if (GET_CODE (insn) == CODE_LABEL) - insn = NEXT_INSN (insn); - - /* Scan insn chain and try to find new basic block boundaries. */ - while (1) - { - enum rtx_code code = GET_CODE (insn); - switch (code) - { - case BARRIER: - if (!jump_insn) - abort (); - break; - /* On code label, split current basic block. */ - case CODE_LABEL: - falltru = split_block (bb, PREV_INSN (insn)); - if (jump_insn) - bb->end = jump_insn; - bb = falltru->dest; - remove_edge (falltru); - jump_insn = 0; - if (LABEL_ALTERNATE_NAME (insn)) - make_edge (NULL, ENTRY_BLOCK_PTR, bb, 0); - break; - case INSN: - case JUMP_INSN: - /* In case we've previously split insn on the JUMP_INSN, move the - block header to proper place. */ - if (jump_insn) - { - falltru = split_block (bb, PREV_INSN (insn)); - bb->end = jump_insn; - bb = falltru->dest; - remove_edge (falltru); - jump_insn = 0; - } - /* We need some special care for those expressions. */ - if (GET_CODE (insn) == JUMP_INSN) - { - if (GET_CODE (PATTERN (insn)) == ADDR_VEC - || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC) - abort(); - jump_insn = insn; - } - break; - default: - break; - } - if (insn == end) - break; - insn = NEXT_INSN (insn); - } - - /* In case expander replaced normal insn by sequence terminating by - return and barrier, or possibly other sequence not behaving like - ordinary jump, we need to take care and move basic block boundary. */ - if (jump_insn && GET_CODE (bb->end) != JUMP_INSN) - bb->end = jump_insn; - - /* We've possibly replaced the conditional jump by conditional jump - followed by cleanup at fallthru edge, so the outgoing edges may - be dead. */ - purge_dead_edges (bb); - - /* Now re-scan and wire in all edges. This expect simple (conditional) - jumps at the end of each new basic blocks. */ - make_edges (NULL, first_bb->index, bb->index, 1); - - /* Update branch probabilities. Expect only (un)conditional jumps - to be created with only the forward edges. */ - for (i = first_bb->index; i <= bb->index; i++) - { - edge e,f; - basic_block b = BASIC_BLOCK (i); - if (b != first_bb) - { - b->count = 0; - b->frequency = 0; - for (e = b->pred; e; e=e->pred_next) - { - b->count += e->count; - b->frequency += EDGE_FREQUENCY (e); - } - } - if (b->succ && b->succ->succ_next && !b->succ->succ_next->succ_next) - { - rtx note = find_reg_note (b->end, REG_BR_PROB, NULL); - int probability; - - if (!note) - continue; - probability = INTVAL (XEXP (find_reg_note (b->end, - REG_BR_PROB, - NULL), 0)); - e = BRANCH_EDGE (b); - e->probability = probability; - e->count = ((b->count * probability + REG_BR_PROB_BASE / 2) - / REG_BR_PROB_BASE); - f = FALLTHRU_EDGE (b); - f->probability = REG_BR_PROB_BASE - probability; - f->count = b->count - e->count; - } - if (b->succ && !b->succ->succ_next) - { - e = b->succ; - e->probability = REG_BR_PROB_BASE; - e->count = b->count; - } - } -} - -/* Find all basic blocks of the function whose first insn is F. - - Collect and return a list of labels whose addresses are taken. This - will be used in make_edges for use with computed gotos. */ - -static void -find_basic_blocks_1 (f) - rtx f; -{ - register rtx insn, next; - int i = 0; - rtx bb_note = NULL_RTX; - rtx lvl = NULL_RTX; - rtx trll = NULL_RTX; - rtx head = NULL_RTX; - rtx end = NULL_RTX; - - /* We process the instructions in a slightly different way than we did - previously. This is so that we see a NOTE_BASIC_BLOCK after we have - closed out the previous block, so that it gets attached at the proper - place. Since this form should be equivalent to the previous, - count_basic_blocks continues to use the old form as a check. */ - - for (insn = f; insn; insn = next) - { - enum rtx_code code = GET_CODE (insn); - - next = NEXT_INSN (insn); - - switch (code) - { - case NOTE: - { - int kind = NOTE_LINE_NUMBER (insn); - - /* Look for basic block notes with which to keep the - basic_block_info pointers stable. Unthread the note now; - we'll put it back at the right place in create_basic_block. - Or not at all if we've already found a note in this block. */ - if (kind == NOTE_INSN_BASIC_BLOCK) - { - if (bb_note == NULL_RTX) - bb_note = insn; - else - next = flow_delete_insn (insn); - } - break; - } - - case CODE_LABEL: - /* A basic block starts at a label. If we've closed one off due - to a barrier or some such, no need to do it again. */ - if (head != NULL_RTX) - { - create_basic_block (i++, head, end, bb_note); - bb_note = NULL_RTX; - } - - head = end = insn; - break; - - case JUMP_INSN: - /* A basic block ends at a jump. */ - if (head == NULL_RTX) - head = insn; - else - { - /* ??? Make a special check for table jumps. The way this - happens is truly and amazingly gross. We are about to - create a basic block that contains just a code label and - an addr*vec jump insn. Worse, an addr_diff_vec creates - its own natural loop. - - Prevent this bit of brain damage, pasting things together - correctly in make_edges. - - The correct solution involves emitting the table directly - on the tablejump instruction as a note, or JUMP_LABEL. */ - - if (GET_CODE (PATTERN (insn)) == ADDR_VEC - || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC) - { - head = end = NULL; - n_basic_blocks--; - break; - } - } - end = insn; - goto new_bb_inclusive; - - case BARRIER: - /* A basic block ends at a barrier. It may be that an unconditional - jump already closed the basic block -- no need to do it again. */ - if (head == NULL_RTX) - break; - goto new_bb_exclusive; - - case CALL_INSN: - { - /* Record whether this call created an edge. */ - rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); - int region = (note ? INTVAL (XEXP (note, 0)) : 0); - - if (GET_CODE (PATTERN (insn)) == CALL_PLACEHOLDER) - { - /* Scan each of the alternatives for label refs. */ - lvl = find_label_refs (XEXP (PATTERN (insn), 0), lvl); - lvl = find_label_refs (XEXP (PATTERN (insn), 1), lvl); - lvl = find_label_refs (XEXP (PATTERN (insn), 2), lvl); - /* Record its tail recursion label, if any. */ - if (XEXP (PATTERN (insn), 3) != NULL_RTX) - trll = alloc_EXPR_LIST (0, XEXP (PATTERN (insn), 3), trll); - } - - /* A basic block ends at a call that can either throw or - do a non-local goto. */ - if ((nonlocal_goto_handler_labels && region >= 0) - || can_throw_internal (insn)) - { - new_bb_inclusive: - if (head == NULL_RTX) - head = insn; - end = insn; - - new_bb_exclusive: - create_basic_block (i++, head, end, bb_note); - head = end = NULL_RTX; - bb_note = NULL_RTX; - break; - } - } - /* Fall through. */ - - case INSN: - /* Non-call exceptions generate new blocks just like calls. */ - if (flag_non_call_exceptions && can_throw_internal (insn)) - goto new_bb_inclusive; - - if (head == NULL_RTX) - head = insn; - end = insn; - break; - - default: - abort (); - } - - if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN) - { - rtx note; - - /* Make a list of all labels referred to other than by jumps. - - Make a special exception for labels followed by an ADDR*VEC, - as this would be a part of the tablejump setup code. - - Make a special exception to registers loaded with label - values just before jump insns that use them. */ - - for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) - if (REG_NOTE_KIND (note) == REG_LABEL) - { - rtx lab = XEXP (note, 0), next; - - if ((next = next_nonnote_insn (lab)) != NULL - && GET_CODE (next) == JUMP_INSN - && (GET_CODE (PATTERN (next)) == ADDR_VEC - || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)) - ; - else if (GET_CODE (lab) == NOTE) - ; - else if (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN - && find_reg_note (NEXT_INSN (insn), REG_LABEL, lab)) - ; - else - lvl = alloc_EXPR_LIST (0, XEXP (note, 0), lvl); - } - } - } - - if (head != NULL_RTX) - create_basic_block (i++, head, end, bb_note); - else if (bb_note) - flow_delete_insn (bb_note); - - if (i != n_basic_blocks) - abort (); - - label_value_list = lvl; - tail_recursion_label_list = trll; -} - -/* Tidy the CFG by deleting unreachable code and whatnot. */ - -void -cleanup_cfg (mode) - int mode; -{ - int i; - - timevar_push (TV_CLEANUP_CFG); - delete_unreachable_blocks (); - if (try_optimize_cfg (mode)) - delete_unreachable_blocks (); - mark_critical_edges (); - - /* Kill the data we won't maintain. */ - free_EXPR_LIST_list (&label_value_list); - free_EXPR_LIST_list (&tail_recursion_label_list); - timevar_pop (TV_CLEANUP_CFG); - - /* Clear bb->aux on all basic blocks. */ - for (i = 0; i < n_basic_blocks; ++i) - BASIC_BLOCK (i)->aux = NULL; -} - -/* Create a new basic block consisting of the instructions between - HEAD and END inclusive. Reuses the note and basic block struct - in BB_NOTE, if any. */ - -void -create_basic_block (index, head, end, bb_note) - int index; - rtx head, end, bb_note; -{ - basic_block bb; - - if (bb_note - && ! RTX_INTEGRATED_P (bb_note) - && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL - && bb->aux == NULL) - { - /* If we found an existing note, thread it back onto the chain. */ - - rtx after; - - if (GET_CODE (head) == CODE_LABEL) - after = head; - else - { - after = PREV_INSN (head); - head = bb_note; - } - - if (after != bb_note && NEXT_INSN (after) != bb_note) - reorder_insns (bb_note, bb_note, after); - } - else - { - /* Otherwise we must create a note and a basic block structure. - Since we allow basic block structs in rtl, give the struct - the same lifetime by allocating it off the function obstack - rather than using malloc. */ - - bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*bb)); - memset (bb, 0, sizeof (*bb)); - - if (GET_CODE (head) == CODE_LABEL) - bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head); - else - { - bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head); - head = bb_note; - } - NOTE_BASIC_BLOCK (bb_note) = bb; - } - - /* Always include the bb note in the block. */ - if (NEXT_INSN (end) == bb_note) - end = bb_note; - - bb->head = head; - bb->end = end; - bb->index = index; - BASIC_BLOCK (index) = bb; - - /* Tag the block so that we know it has been used when considering - other basic block notes. */ - bb->aux = bb; -} /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK note associated with the BLOCK. */ @@ -1135,3155 +406,6 @@ first_insn_after_basic_block_note (block) return NEXT_INSN (insn); } - -/* Records the basic block struct in BB_FOR_INSN, for every instruction - indexed by INSN_UID. MAX is the size of the array. */ - -void -compute_bb_for_insn (max) - int max; -{ - int i; - - if (basic_block_for_insn) - VARRAY_FREE (basic_block_for_insn); - VARRAY_BB_INIT (basic_block_for_insn, max, "basic_block_for_insn"); - - for (i = 0; i < n_basic_blocks; ++i) - { - basic_block bb = BASIC_BLOCK (i); - rtx insn, end; - - end = bb->end; - insn = bb->head; - while (1) - { - int uid = INSN_UID (insn); - if (uid < max) - VARRAY_BB (basic_block_for_insn, uid) = bb; - if (insn == end) - break; - insn = NEXT_INSN (insn); - } - } -} - -/* Free the memory associated with the edge structures. */ - -void -clear_edges () -{ - int i; - edge n, e; - - for (i = 0; i < n_basic_blocks; ++i) - { - basic_block bb = BASIC_BLOCK (i); - - for (e = bb->succ; e; e = n) - { - n = e->succ_next; - free (e); - } - - bb->succ = 0; - bb->pred = 0; - } - - for (e = ENTRY_BLOCK_PTR->succ; e; e = n) - { - n = e->succ_next; - free (e); - } - - ENTRY_BLOCK_PTR->succ = 0; - EXIT_BLOCK_PTR->pred = 0; - - n_edges = 0; -} - -/* Identify the edges between basic blocks MIN to MAX. - - NONLOCAL_LABEL_LIST is a list of non-local labels in the function. Blocks - that are otherwise unreachable may be reachable with a non-local goto. - - BB_EH_END is an array indexed by basic block number in which we record - the list of exception regions active at the end of the basic block. */ - -static void -make_edges (label_value_list, min, max, update_p) - rtx label_value_list; - int min, max, update_p; -{ - int i; - sbitmap *edge_cache = NULL; - - /* Assume no computed jump; revise as we create edges. */ - current_function_has_computed_jump = 0; - - /* Heavy use of computed goto in machine-generated code can lead to - nearly fully-connected CFGs. In that case we spend a significant - amount of time searching the edge lists for duplicates. */ - if (forced_labels || label_value_list) - { - edge_cache = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks); - sbitmap_vector_zero (edge_cache, n_basic_blocks); - - if (update_p) - for (i = min; i <= max; ++i) - { - edge e; - for (e = BASIC_BLOCK (i)->succ; e ; e = e->succ_next) - if (e->dest != EXIT_BLOCK_PTR) - SET_BIT (edge_cache[i], e->dest->index); - } - } - - /* By nature of the way these get numbered, block 0 is always the entry. */ - make_edge (edge_cache, ENTRY_BLOCK_PTR, BASIC_BLOCK (0), EDGE_FALLTHRU); - - for (i = min; i <= max; ++i) - { - basic_block bb = BASIC_BLOCK (i); - rtx insn, x; - enum rtx_code code; - int force_fallthru = 0; - - if (GET_CODE (bb->head) == CODE_LABEL - && LABEL_ALTERNATE_NAME (bb->head)) - make_edge (NULL, ENTRY_BLOCK_PTR, bb, 0); - - /* Examine the last instruction of the block, and discover the - ways we can leave the block. */ - - insn = bb->end; - code = GET_CODE (insn); - - /* A branch. */ - if (code == JUMP_INSN) - { - rtx tmp; - - /* Recognize exception handling placeholders. */ - if (GET_CODE (PATTERN (insn)) == RESX) - make_eh_edge (edge_cache, bb, insn); - - /* Recognize a non-local goto as a branch outside the - current function. */ - else if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX)) - ; - - /* ??? Recognize a tablejump and do the right thing. */ - else if ((tmp = JUMP_LABEL (insn)) != NULL_RTX - && (tmp = NEXT_INSN (tmp)) != NULL_RTX - && GET_CODE (tmp) == JUMP_INSN - && (GET_CODE (PATTERN (tmp)) == ADDR_VEC - || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)) - { - rtvec vec; - int j; - - if (GET_CODE (PATTERN (tmp)) == ADDR_VEC) - vec = XVEC (PATTERN (tmp), 0); - else - vec = XVEC (PATTERN (tmp), 1); - - for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j) - make_label_edge (edge_cache, bb, - XEXP (RTVEC_ELT (vec, j), 0), 0); - - /* Some targets (eg, ARM) emit a conditional jump that also - contains the out-of-range target. Scan for these and - add an edge if necessary. */ - if ((tmp = single_set (insn)) != NULL - && SET_DEST (tmp) == pc_rtx - && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE - && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF) - make_label_edge (edge_cache, bb, - XEXP (XEXP (SET_SRC (tmp), 2), 0), 0); - -#ifdef CASE_DROPS_THROUGH - /* Silly VAXen. The ADDR_VEC is going to be in the way of - us naturally detecting fallthru into the next block. */ - force_fallthru = 1; -#endif - } - - /* If this is a computed jump, then mark it as reaching - everything on the label_value_list and forced_labels list. */ - else if (computed_jump_p (insn)) - { - current_function_has_computed_jump = 1; - - for (x = label_value_list; x; x = XEXP (x, 1)) - make_label_edge (edge_cache, bb, XEXP (x, 0), EDGE_ABNORMAL); - - for (x = forced_labels; x; x = XEXP (x, 1)) - make_label_edge (edge_cache, bb, XEXP (x, 0), EDGE_ABNORMAL); - } - - /* Returns create an exit out. */ - else if (returnjump_p (insn)) - make_edge (edge_cache, bb, EXIT_BLOCK_PTR, 0); - - /* Otherwise, we have a plain conditional or unconditional jump. */ - else - { - if (! JUMP_LABEL (insn)) - abort (); - make_label_edge (edge_cache, bb, JUMP_LABEL (insn), 0); - } - } - - /* If this is a sibling call insn, then this is in effect a - combined call and return, and so we need an edge to the - exit block. No need to worry about EH edges, since we - wouldn't have created the sibling call in the first place. */ - - if (code == CALL_INSN && SIBLING_CALL_P (insn)) - make_edge (edge_cache, bb, EXIT_BLOCK_PTR, - EDGE_ABNORMAL | EDGE_ABNORMAL_CALL); - - /* If this is a CALL_INSN, then mark it as reaching the active EH - handler for this CALL_INSN. If we're handling non-call - exceptions then any insn can reach any of the active handlers. - - Also mark the CALL_INSN as reaching any nonlocal goto handler. */ - - else if (code == CALL_INSN || flag_non_call_exceptions) - { - /* Add any appropriate EH edges. */ - make_eh_edge (edge_cache, bb, insn); - - if (code == CALL_INSN && nonlocal_goto_handler_labels) - { - /* ??? This could be made smarter: in some cases it's possible - to tell that certain calls will not do a nonlocal goto. - - For example, if the nested functions that do the nonlocal - gotos do not have their addresses taken, then only calls to - those functions or to other nested functions that use them - could possibly do nonlocal gotos. */ - /* We do know that a REG_EH_REGION note with a value less - than 0 is guaranteed not to perform a non-local goto. */ - rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); - if (!note || INTVAL (XEXP (note, 0)) >= 0) - for (x = nonlocal_goto_handler_labels; x; x = XEXP (x, 1)) - make_label_edge (edge_cache, bb, XEXP (x, 0), - EDGE_ABNORMAL | EDGE_ABNORMAL_CALL); - } - } - - /* Find out if we can drop through to the next block. */ - insn = next_nonnote_insn (insn); - if (!insn || (i + 1 == n_basic_blocks && force_fallthru)) - make_edge (edge_cache, bb, EXIT_BLOCK_PTR, EDGE_FALLTHRU); - else if (i + 1 < n_basic_blocks) - { - rtx tmp = BLOCK_HEAD (i + 1); - if (GET_CODE (tmp) == NOTE) - tmp = next_nonnote_insn (tmp); - if (force_fallthru || insn == tmp) - make_edge (edge_cache, bb, BASIC_BLOCK (i + 1), EDGE_FALLTHRU); - } - } - - if (edge_cache) - sbitmap_vector_free (edge_cache); -} - -/* Create an edge between two basic blocks. FLAGS are auxiliary information - about the edge that is accumulated between calls. */ - -void -make_edge (edge_cache, src, dst, flags) - sbitmap *edge_cache; - basic_block src, dst; - int flags; -{ - int use_edge_cache; - edge e; - - /* Don't bother with edge cache for ENTRY or EXIT; there aren't that - many edges to them, and we didn't allocate memory for it. */ - use_edge_cache = (edge_cache - && src != ENTRY_BLOCK_PTR - && dst != EXIT_BLOCK_PTR); - - /* Make sure we don't add duplicate edges. */ - switch (use_edge_cache) - { - default: - /* Quick test for non-existance of the edge. */ - if (! TEST_BIT (edge_cache[src->index], dst->index)) - break; - - /* The edge exists; early exit if no work to do. */ - if (flags == 0) - return; - - /* FALLTHRU */ - case 0: - for (e = src->succ; e; e = e->succ_next) - if (e->dest == dst) - { - e->flags |= flags; - return; - } - break; - } - - e = (edge) xcalloc (1, sizeof (*e)); - n_edges++; - - e->succ_next = src->succ; - e->pred_next = dst->pred; - e->src = src; - e->dest = dst; - e->flags = flags; - - src->succ = e; - dst->pred = e; - - if (use_edge_cache) - SET_BIT (edge_cache[src->index], dst->index); -} - -/* Create an edge from a basic block to a label. */ - -static void -make_label_edge (edge_cache, src, label, flags) - sbitmap *edge_cache; - basic_block src; - rtx label; - int flags; -{ - if (GET_CODE (label) != CODE_LABEL) - abort (); - - /* If the label was never emitted, this insn is junk, but avoid a - crash trying to refer to BLOCK_FOR_INSN (label). This can happen - as a result of a syntax error and a diagnostic has already been - printed. */ - - if (INSN_UID (label) == 0) - return; - - make_edge (edge_cache, src, BLOCK_FOR_INSN (label), flags); -} - -/* Create the edges generated by INSN in REGION. */ - -static void -make_eh_edge (edge_cache, src, insn) - sbitmap *edge_cache; - basic_block src; - rtx insn; -{ - int is_call = (GET_CODE (insn) == CALL_INSN ? EDGE_ABNORMAL_CALL : 0); - rtx handlers, i; - - handlers = reachable_handlers (insn); - - for (i = handlers; i; i = XEXP (i, 1)) - make_label_edge (edge_cache, src, XEXP (i, 0), - EDGE_ABNORMAL | EDGE_EH | is_call); - - free_INSN_LIST_list (&handlers); -} - -/* Identify critical edges and set the bits appropriately. */ - -void -mark_critical_edges () -{ - int i, n = n_basic_blocks; - basic_block bb; - - /* We begin with the entry block. This is not terribly important now, - but could be if a front end (Fortran) implemented alternate entry - points. */ - bb = ENTRY_BLOCK_PTR; - i = -1; - - while (1) - { - edge e; - - /* (1) Critical edges must have a source with multiple successors. */ - if (bb->succ && bb->succ->succ_next) - { - for (e = bb->succ; e; e = e->succ_next) - { - /* (2) Critical edges must have a destination with multiple - predecessors. Note that we know there is at least one - predecessor -- the edge we followed to get here. */ - if (e->dest->pred->pred_next) - e->flags |= EDGE_CRITICAL; - else - e->flags &= ~EDGE_CRITICAL; - } - } - else - { - for (e = bb->succ; e; e = e->succ_next) - e->flags &= ~EDGE_CRITICAL; - } - - if (++i >= n) - break; - bb = BASIC_BLOCK (i); - } -} - -/* Mark the back edges in DFS traversal. - Return non-zero if a loop (natural or otherwise) is present. - Inspired by Depth_First_Search_PP described in: - - Advanced Compiler Design and Implementation - Steven Muchnick - Morgan Kaufmann, 1997 - - and heavily borrowed from flow_depth_first_order_compute. */ - -bool -mark_dfs_back_edges () -{ - edge *stack; - int *pre; - int *post; - int sp; - int prenum = 1; - int postnum = 1; - sbitmap visited; - bool found = false; - - /* Allocate the preorder and postorder number arrays. */ - pre = (int *) xcalloc (n_basic_blocks, sizeof (int)); - post = (int *) xcalloc (n_basic_blocks, sizeof (int)); - - /* Allocate stack for back-tracking up CFG. */ - stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge)); - sp = 0; - - /* Allocate bitmap to track nodes that have been visited. */ - visited = sbitmap_alloc (n_basic_blocks); - - /* None of the nodes in the CFG have been visited yet. */ - sbitmap_zero (visited); - - /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; - - while (sp) - { - edge e; - basic_block src; - basic_block dest; - - /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; - e->flags &= ~EDGE_DFS_BACK; - - /* Check if the edge destination has been visited yet. */ - if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) - { - /* Mark that we have visited the destination. */ - SET_BIT (visited, dest->index); - - pre[dest->index] = prenum++; - - if (dest->succ) - { - /* Since the DEST node has been visited for the first - time, check its successors. */ - stack[sp++] = dest->succ; - } - else - post[dest->index] = postnum++; - } - else - { - if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR - && pre[src->index] >= pre[dest->index] - && post[dest->index] == 0) - e->flags |= EDGE_DFS_BACK, found = true; - - if (! e->succ_next && src != ENTRY_BLOCK_PTR) - post[src->index] = postnum++; - - if (e->succ_next) - stack[sp - 1] = e->succ_next; - else - sp--; - } - } - - free (pre); - free (post); - free (stack); - sbitmap_free (visited); - - return found; -} - -/* Split a block BB after insn INSN creating a new fallthru edge. - Return the new edge. Note that to keep other parts of the compiler happy, - this function renumbers all the basic blocks so that the new - one has a number one greater than the block split. */ - -edge -split_block (bb, insn) - basic_block bb; - rtx insn; -{ - basic_block new_bb; - edge new_edge; - edge e; - rtx bb_note; - int i, j; - - /* There is no point splitting the block after its end. */ - if (bb->end == insn) - return 0; - - /* Create the new structures. */ - new_bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*new_bb)); - new_edge = (edge) xcalloc (1, sizeof (*new_edge)); - n_edges++; - - memset (new_bb, 0, sizeof (*new_bb)); - - new_bb->head = NEXT_INSN (insn); - new_bb->end = bb->end; - bb->end = insn; - - new_bb->succ = bb->succ; - bb->succ = new_edge; - new_bb->pred = new_edge; - new_bb->count = bb->count; - new_bb->frequency = bb->frequency; - new_bb->loop_depth = bb->loop_depth; - - new_edge->src = bb; - new_edge->dest = new_bb; - new_edge->flags = EDGE_FALLTHRU; - new_edge->probability = REG_BR_PROB_BASE; - new_edge->count = bb->count; - - /* Redirect the src of the successor edges of bb to point to new_bb. */ - for (e = new_bb->succ; e; e = e->succ_next) - e->src = new_bb; - - /* Place the new block just after the block being split. */ - VARRAY_GROW (basic_block_info, ++n_basic_blocks); - - /* Some parts of the compiler expect blocks to be number in - sequential order so insert the new block immediately after the - block being split.. */ - j = bb->index; - for (i = n_basic_blocks - 1; i > j + 1; --i) - { - basic_block tmp = BASIC_BLOCK (i - 1); - BASIC_BLOCK (i) = tmp; - tmp->index = i; - } - - BASIC_BLOCK (i) = new_bb; - new_bb->index = i; - - if (GET_CODE (new_bb->head) == CODE_LABEL) - { - /* Create the basic block note. */ - bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, - new_bb->head); - NOTE_BASIC_BLOCK (bb_note) = new_bb; - - /* If the only thing in this new block was the label, make sure - the block note gets included. */ - if (new_bb->head == new_bb->end) - new_bb->end = bb_note; - } - else - { - /* Create the basic block note. */ - bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, - new_bb->head); - NOTE_BASIC_BLOCK (bb_note) = new_bb; - new_bb->head = bb_note; - } - - update_bb_for_insn (new_bb); - - if (bb->global_live_at_start) - { - new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); - new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); - COPY_REG_SET (new_bb->global_live_at_end, bb->global_live_at_end); - - /* We now have to calculate which registers are live at the end - of the split basic block and at the start of the new basic - block. Start with those registers that are known to be live - at the end of the original basic block and get - propagate_block to determine which registers are live. */ - COPY_REG_SET (new_bb->global_live_at_start, bb->global_live_at_end); - propagate_block (new_bb, new_bb->global_live_at_start, NULL, NULL, 0); - COPY_REG_SET (bb->global_live_at_end, - new_bb->global_live_at_start); - } - - return new_edge; -} - -/* Return label in the head of basic block. Create one if it doesn't exist. */ -rtx -block_label (block) - basic_block block; -{ - if (block == EXIT_BLOCK_PTR) - return NULL_RTX; - if (GET_CODE (block->head) != CODE_LABEL) - { - block->head = emit_label_before (gen_label_rtx (), block->head); - if (basic_block_for_insn) - set_block_for_insn (block->head, block); - } - return block->head; -} - -/* Return true if the block has no effect and only forwards control flow to - its single destination. */ -bool -forwarder_block_p (bb) - basic_block bb; -{ - rtx insn = bb->head; - if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR - || !bb->succ || bb->succ->succ_next) - return false; - - while (insn != bb->end) - { - if (active_insn_p (insn)) - return false; - insn = NEXT_INSN (insn); - } - return (!active_insn_p (insn) - || (GET_CODE (insn) == JUMP_INSN && onlyjump_p (insn))); -} - -/* Return nonzero if we can reach target from src by falling trought. */ -static bool -can_fallthru (src, target) - basic_block src, target; -{ - rtx insn = src->end; - rtx insn2 = target->head; - - if (src->index + 1 == target->index && !active_insn_p (insn2)) - insn2 = next_active_insn (insn2); - /* ??? Later we may add code to move jump tables offline. */ - return next_active_insn (insn) == insn2; -} - -/* Attempt to perform edge redirection by replacing possibly complex jump - instruction by unconditional jump or removing jump completely. - This can apply only if all edges now point to the same block. - - The parameters and return values are equivalent to redirect_edge_and_branch. - */ -static bool -try_redirect_by_replacing_jump (e, target) - edge e; - basic_block target; -{ - basic_block src = e->src; - rtx insn = src->end, kill_from; - edge tmp; - rtx set; - int fallthru = 0; - - /* Verify that all targets will be TARGET. */ - for (tmp = src->succ; tmp; tmp = tmp->succ_next) - if (tmp->dest != target && tmp != e) - break; - if (tmp || !onlyjump_p (insn)) - return false; - - /* Avoid removing branch with side effects. */ - set = single_set (insn); - if (!set || side_effects_p (set)) - return false; - - /* In case we zap a conditional jump, we'll need to kill - the cc0 setter too. */ - kill_from = insn; -#ifdef HAVE_cc0 - if (reg_mentioned_p (cc0_rtx, PATTERN (insn))) - kill_from = PREV_INSN (insn); -#endif - - /* See if we can create the fallthru edge. */ - if (can_fallthru (src, target)) - { - src->end = PREV_INSN (kill_from); - if (rtl_dump_file) - fprintf (rtl_dump_file, "Removing jump %i.\n", INSN_UID (insn)); - fallthru = 1; - - /* Selectivly unlink whole insn chain. */ - flow_delete_insn_chain (kill_from, PREV_INSN (target->head)); - } - /* If this already is simplejump, redirect it. */ - else if (simplejump_p (insn)) - { - if (e->dest == target) - return false; - if (rtl_dump_file) - fprintf (rtl_dump_file, "Redirecting jump %i from %i to %i.\n", - INSN_UID (insn), e->dest->index, target->index); - redirect_jump (insn, block_label (target), 0); - } - /* Or replace possibly complicated jump insn by simple jump insn. */ - else - { - rtx target_label = block_label (target); - rtx barrier; - - src->end = emit_jump_insn_before (gen_jump (target_label), kill_from); - JUMP_LABEL (src->end) = target_label; - LABEL_NUSES (target_label)++; - if (basic_block_for_insn) - set_block_for_new_insns (src->end, src); - if (rtl_dump_file) - fprintf (rtl_dump_file, "Replacing insn %i by jump %i\n", - INSN_UID (insn), INSN_UID (src->end)); - - flow_delete_insn_chain (kill_from, insn); - - barrier = next_nonnote_insn (src->end); - if (!barrier || GET_CODE (barrier) != BARRIER) - emit_barrier_after (src->end); - } - - /* Keep only one edge out and set proper flags. */ - while (src->succ->succ_next) - remove_edge (src->succ); - e = src->succ; - if (fallthru) - e->flags = EDGE_FALLTHRU; - else - e->flags = 0; - e->probability = REG_BR_PROB_BASE; - e->count = src->count; - - /* We don't want a block to end on a line-number note since that has - the potential of changing the code between -g and not -g. */ - while (GET_CODE (e->src->end) == NOTE - && NOTE_LINE_NUMBER (e->src->end) >= 0) - { - rtx prev = PREV_INSN (e->src->end); - flow_delete_insn (e->src->end); - e->src->end = prev; - } - - if (e->dest != target) - redirect_edge_succ (e, target); - return true; -} - -/* Return last loop_beg note appearing after INSN, before start of next - basic block. Return INSN if there are no such notes. - - When emmiting jump to redirect an fallthru edge, it should always - appear after the LOOP_BEG notes, as loop optimizer expect loop to - eighter start by fallthru edge or jump following the LOOP_BEG note - jumping to the loop exit test. */ -rtx -last_loop_beg_note (insn) - rtx insn; -{ - rtx last = insn; - insn = NEXT_INSN (insn); - while (GET_CODE (insn) == NOTE - && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK) - { - if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) - last = insn; - insn = NEXT_INSN (insn); - } - return last; -} - -/* Attempt to change code to redirect edge E to TARGET. - Don't do that on expense of adding new instructions or reordering - basic blocks. - - Function can be also called with edge destionation equivalent to the - TARGET. Then it should try the simplifications and do nothing if - none is possible. - - Return true if transformation suceeded. We still return flase in case - E already destinated TARGET and we didn't managed to simplify instruction - stream. */ -bool -redirect_edge_and_branch (e, target) - edge e; - basic_block target; -{ - rtx tmp; - rtx old_label = e->dest->head; - basic_block src = e->src; - rtx insn = src->end; - - if (e->flags & EDGE_COMPLEX) - return false; - - if (try_redirect_by_replacing_jump (e, target)) - return true; - /* Do this fast path late, as we want above code to simplify for cases - where called on single edge leaving basic block containing nontrivial - jump insn. */ - else if (e->dest == target) - return false; - - /* We can only redirect non-fallthru edges of jump insn. */ - if (e->flags & EDGE_FALLTHRU) - return false; - if (GET_CODE (insn) != JUMP_INSN) - return false; - - /* Recognize a tablejump and adjust all matching cases. */ - if ((tmp = JUMP_LABEL (insn)) != NULL_RTX - && (tmp = NEXT_INSN (tmp)) != NULL_RTX - && GET_CODE (tmp) == JUMP_INSN - && (GET_CODE (PATTERN (tmp)) == ADDR_VEC - || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)) - { - rtvec vec; - int j; - rtx new_label = block_label (target); - - if (GET_CODE (PATTERN (tmp)) == ADDR_VEC) - vec = XVEC (PATTERN (tmp), 0); - else - vec = XVEC (PATTERN (tmp), 1); - - for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j) - if (XEXP (RTVEC_ELT (vec, j), 0) == old_label) - { - RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label); - --LABEL_NUSES (old_label); - ++LABEL_NUSES (new_label); - } - - /* Handle casesi dispatch insns */ - if ((tmp = single_set (insn)) != NULL - && SET_DEST (tmp) == pc_rtx - && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE - && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF - && XEXP (XEXP (SET_SRC (tmp), 2), 0) == old_label) - { - XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (VOIDmode, - new_label); - --LABEL_NUSES (old_label); - ++LABEL_NUSES (new_label); - } - } - else - { - /* ?? We may play the games with moving the named labels from - one basic block to the other in case only one computed_jump is - available. */ - if (computed_jump_p (insn)) - return false; - - /* A return instruction can't be redirected. */ - if (returnjump_p (insn)) - return false; - - /* If the insn doesn't go where we think, we're confused. */ - if (JUMP_LABEL (insn) != old_label) - abort (); - redirect_jump (insn, block_label (target), 0); - } - - if (rtl_dump_file) - fprintf (rtl_dump_file, "Edge %i->%i redirected to %i\n", - e->src->index, e->dest->index, target->index); - if (e->dest != target) - redirect_edge_succ_nodup (e, target); - return true; -} - -/* Redirect edge even at the expense of creating new jump insn or - basic block. Return new basic block if created, NULL otherwise. - Abort if converison is impossible. */ -basic_block -redirect_edge_and_branch_force (e, target) - edge e; - basic_block target; -{ - basic_block new_bb; - edge new_edge; - rtx label; - rtx bb_note; - int i, j; - - if (redirect_edge_and_branch (e, target)) - return NULL; - if (e->dest == target) - return NULL; - if (e->flags & EDGE_ABNORMAL) - abort (); - if (!(e->flags & EDGE_FALLTHRU)) - abort (); - - e->flags &= ~EDGE_FALLTHRU; - label = block_label (target); - /* Case of the fallthru block. */ - if (!e->src->succ->succ_next) - { - e->src->end = emit_jump_insn_after (gen_jump (label), - last_loop_beg_note (e->src->end)); - JUMP_LABEL (e->src->end) = label; - LABEL_NUSES (label)++; - if (basic_block_for_insn) - set_block_for_new_insns (e->src->end, e->src); - emit_barrier_after (e->src->end); - if (rtl_dump_file) - fprintf (rtl_dump_file, - "Emitting jump insn %i to redirect edge %i->%i to %i\n", - INSN_UID (e->src->end), e->src->index, e->dest->index, - target->index); - redirect_edge_succ (e, target); - return NULL; - } - /* Redirecting fallthru edge of the conditional needs extra work. */ - - if (rtl_dump_file) - fprintf (rtl_dump_file, - "Emitting jump insn %i in new BB to redirect edge %i->%i to %i\n", - INSN_UID (e->src->end), e->src->index, e->dest->index, - target->index); - - /* Create the new structures. */ - new_bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*new_bb)); - new_edge = (edge) xcalloc (1, sizeof (*new_edge)); - n_edges++; - - memset (new_bb, 0, sizeof (*new_bb)); - - new_bb->end = new_bb->head = last_loop_beg_note (e->src->end); - new_bb->succ = NULL; - new_bb->pred = new_edge; - new_bb->count = e->count; - new_bb->frequency = EDGE_FREQUENCY (e); - new_bb->loop_depth = e->dest->loop_depth; - - new_edge->flags = EDGE_FALLTHRU; - new_edge->probability = e->probability; - new_edge->count = e->count; - - if (target->global_live_at_start) - { - new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); - new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); - COPY_REG_SET (new_bb->global_live_at_start, - target->global_live_at_start); - COPY_REG_SET (new_bb->global_live_at_end, new_bb->global_live_at_start); - } - - /* Wire edge in. */ - new_edge->src = e->src; - new_edge->dest = new_bb; - new_edge->succ_next = e->src->succ; - e->src->succ = new_edge; - new_edge->pred_next = NULL; - - /* Redirect old edge. */ - redirect_edge_succ (e, target); - redirect_edge_pred (e, new_bb); - e->probability = REG_BR_PROB_BASE; - - /* Place the new block just after the block being split. */ - VARRAY_GROW (basic_block_info, ++n_basic_blocks); - - /* Some parts of the compiler expect blocks to be number in - sequential order so insert the new block immediately after the - block being split.. */ - j = new_edge->src->index; - for (i = n_basic_blocks - 1; i > j + 1; --i) - { - basic_block tmp = BASIC_BLOCK (i - 1); - BASIC_BLOCK (i) = tmp; - tmp->index = i; - } - - BASIC_BLOCK (i) = new_bb; - new_bb->index = i; - - /* Create the basic block note. */ - bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, new_bb->head); - NOTE_BASIC_BLOCK (bb_note) = new_bb; - new_bb->head = bb_note; - - new_bb->end = emit_jump_insn_after (gen_jump (label), new_bb->head); - JUMP_LABEL (new_bb->end) = label; - LABEL_NUSES (label)++; - if (basic_block_for_insn) - set_block_for_new_insns (new_bb->end, new_bb); - emit_barrier_after (new_bb->end); - return new_bb; -} - -/* Helper function for split_edge. Return true in case edge BB2 to BB1 - is back edge of syntactic loop. */ -static bool -back_edge_of_syntactic_loop_p (bb1, bb2) - basic_block bb1, bb2; -{ - rtx insn; - int count = 0; - - if (bb1->index > bb2->index) - return false; - - if (bb1->index == bb2->index) - return true; - - for (insn = bb1->end; insn != bb2->head && count >= 0; - insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == NOTE) - { - if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) - count++; - if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END) - count--; - } - - return count >= 0; -} - -/* Split a (typically critical) edge. Return the new block. - Abort on abnormal edges. - - ??? The code generally expects to be called on critical edges. - The case of a block ending in an unconditional jump to a - block with multiple predecessors is not handled optimally. */ - -basic_block -split_edge (edge_in) - edge edge_in; -{ - basic_block old_pred, bb, old_succ; - edge edge_out; - rtx bb_note; - int i, j; - - /* Abnormal edges cannot be split. */ - if ((edge_in->flags & EDGE_ABNORMAL) != 0) - abort (); - - old_pred = edge_in->src; - old_succ = edge_in->dest; - - /* Create the new structures. */ - bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*bb)); - edge_out = (edge) xcalloc (1, sizeof (*edge_out)); - n_edges++; - - memset (bb, 0, sizeof (*bb)); - - /* ??? This info is likely going to be out of date very soon. */ - if (old_succ->global_live_at_start) - { - bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); - bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); - COPY_REG_SET (bb->global_live_at_start, old_succ->global_live_at_start); - COPY_REG_SET (bb->global_live_at_end, old_succ->global_live_at_start); - } - - /* Wire them up. */ - bb->succ = edge_out; - bb->count = edge_in->count; - bb->frequency = EDGE_FREQUENCY (edge_in); - - edge_in->flags &= ~EDGE_CRITICAL; - - edge_out->pred_next = old_succ->pred; - edge_out->succ_next = NULL; - edge_out->src = bb; - edge_out->dest = old_succ; - edge_out->flags = EDGE_FALLTHRU; - edge_out->probability = REG_BR_PROB_BASE; - edge_out->count = edge_in->count; - - old_succ->pred = edge_out; - - /* Tricky case -- if there existed a fallthru into the successor - (and we're not it) we must add a new unconditional jump around - the new block we're actually interested in. - - Further, if that edge is critical, this means a second new basic - block must be created to hold it. In order to simplify correct - insn placement, do this before we touch the existing basic block - ordering for the block we were really wanting. */ - if ((edge_in->flags & EDGE_FALLTHRU) == 0) - { - edge e; - for (e = edge_out->pred_next; e; e = e->pred_next) - if (e->flags & EDGE_FALLTHRU) - break; - - if (e) - { - basic_block jump_block; - rtx pos; - - if ((e->flags & EDGE_CRITICAL) == 0 - && e->src != ENTRY_BLOCK_PTR) - { - /* Non critical -- we can simply add a jump to the end - of the existing predecessor. */ - jump_block = e->src; - } - else - { - /* We need a new block to hold the jump. The simplest - way to do the bulk of the work here is to recursively - call ourselves. */ - jump_block = split_edge (e); - e = jump_block->succ; - } - - /* Now add the jump insn ... */ - pos = emit_jump_insn_after (gen_jump (old_succ->head), - last_loop_beg_note (jump_block->end)); - jump_block->end = pos; - if (basic_block_for_insn) - set_block_for_new_insns (pos, jump_block); - emit_barrier_after (pos); - - /* ... let jump know that label is in use, ... */ - JUMP_LABEL (pos) = old_succ->head; - ++LABEL_NUSES (old_succ->head); - - /* ... and clear fallthru on the outgoing edge. */ - e->flags &= ~EDGE_FALLTHRU; - - /* Continue splitting the interesting edge. */ - } - } - - /* Place the new block just in front of the successor. */ - VARRAY_GROW (basic_block_info, ++n_basic_blocks); - if (old_succ == EXIT_BLOCK_PTR) - j = n_basic_blocks - 1; - else - j = old_succ->index; - for (i = n_basic_blocks - 1; i > j; --i) - { - basic_block tmp = BASIC_BLOCK (i - 1); - BASIC_BLOCK (i) = tmp; - tmp->index = i; - } - BASIC_BLOCK (i) = bb; - bb->index = i; - - /* Create the basic block note. - - Where we place the note can have a noticable impact on the generated - code. Consider this cfg: - - E - | - 0 - / \ - +->1-->2--->E - | | - +--+ - - If we need to insert an insn on the edge from block 0 to block 1, - we want to ensure the instructions we insert are outside of any - loop notes that physically sit between block 0 and block 1. Otherwise - we confuse the loop optimizer into thinking the loop is a phony. */ - if (old_succ != EXIT_BLOCK_PTR - && PREV_INSN (old_succ->head) - && GET_CODE (PREV_INSN (old_succ->head)) == NOTE - && NOTE_LINE_NUMBER (PREV_INSN (old_succ->head)) == NOTE_INSN_LOOP_BEG - && !back_edge_of_syntactic_loop_p (old_succ, old_pred)) - bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, - PREV_INSN (old_succ->head)); - else if (old_succ != EXIT_BLOCK_PTR) - bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, old_succ->head); - else - bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ()); - NOTE_BASIC_BLOCK (bb_note) = bb; - bb->head = bb->end = bb_note; - - /* For non-fallthry edges, we must adjust the predecessor's - jump instruction to target our new block. */ - if ((edge_in->flags & EDGE_FALLTHRU) == 0) - { - if (!redirect_edge_and_branch (edge_in, bb)) - abort (); - } - else - redirect_edge_succ (edge_in, bb); - - return bb; -} - -/* Queue instructions for insertion on an edge between two basic blocks. - The new instructions and basic blocks (if any) will not appear in the - CFG until commit_edge_insertions is called. */ - -void -insert_insn_on_edge (pattern, e) - rtx pattern; - edge e; -{ - /* We cannot insert instructions on an abnormal critical edge. - It will be easier to find the culprit if we die now. */ - if ((e->flags & (EDGE_ABNORMAL|EDGE_CRITICAL)) - == (EDGE_ABNORMAL|EDGE_CRITICAL)) - abort (); - - if (e->insns == NULL_RTX) - start_sequence (); - else - push_to_sequence (e->insns); - - emit_insn (pattern); - - e->insns = get_insns (); - end_sequence (); -} - -/* Update the CFG for the instructions queued on edge E. */ - -static void -commit_one_edge_insertion (e) - edge e; -{ - rtx before = NULL_RTX, after = NULL_RTX, insns, tmp, last; - basic_block bb; - - /* Pull the insns off the edge now since the edge might go away. */ - insns = e->insns; - e->insns = NULL_RTX; - - /* Figure out where to put these things. If the destination has - one predecessor, insert there. Except for the exit block. */ - if (e->dest->pred->pred_next == NULL - && e->dest != EXIT_BLOCK_PTR) - { - bb = e->dest; - - /* Get the location correct wrt a code label, and "nice" wrt - a basic block note, and before everything else. */ - tmp = bb->head; - if (GET_CODE (tmp) == CODE_LABEL) - tmp = NEXT_INSN (tmp); - if (NOTE_INSN_BASIC_BLOCK_P (tmp)) - tmp = NEXT_INSN (tmp); - if (tmp == bb->head) - before = tmp; - else - after = PREV_INSN (tmp); - } - - /* If the source has one successor and the edge is not abnormal, - insert there. Except for the entry block. */ - else if ((e->flags & EDGE_ABNORMAL) == 0 - && e->src->succ->succ_next == NULL - && e->src != ENTRY_BLOCK_PTR) - { - bb = e->src; - /* It is possible to have a non-simple jump here. Consider a target - where some forms of unconditional jumps clobber a register. This - happens on the fr30 for example. - - We know this block has a single successor, so we can just emit - the queued insns before the jump. */ - if (GET_CODE (bb->end) == JUMP_INSN) - { - before = bb->end; - while (GET_CODE (PREV_INSN (before)) == NOTE - && NOTE_LINE_NUMBER (PREV_INSN (before)) == NOTE_INSN_LOOP_BEG) - before = PREV_INSN (before); - } - else - { - /* We'd better be fallthru, or we've lost track of what's what. */ - if ((e->flags & EDGE_FALLTHRU) == 0) - abort (); - - after = bb->end; - } - } - - /* Otherwise we must split the edge. */ - else - { - bb = split_edge (e); - after = bb->end; - } - - /* Now that we've found the spot, do the insertion. */ - - /* Set the new block number for these insns, if structure is allocated. */ - if (basic_block_for_insn) - { - rtx i; - for (i = insns; i != NULL_RTX; i = NEXT_INSN (i)) - set_block_for_insn (i, bb); - } - - if (before) - { - emit_insns_before (insns, before); - if (before == bb->head) - bb->head = insns; - - last = prev_nonnote_insn (before); - } - else - { - last = emit_insns_after (insns, after); - if (after == bb->end) - bb->end = last; - } - - if (returnjump_p (last)) - { - /* ??? Remove all outgoing edges from BB and add one for EXIT. - This is not currently a problem because this only happens - for the (single) epilogue, which already has a fallthru edge - to EXIT. */ - - e = bb->succ; - if (e->dest != EXIT_BLOCK_PTR - || e->succ_next != NULL - || (e->flags & EDGE_FALLTHRU) == 0) - abort (); - e->flags &= ~EDGE_FALLTHRU; - - emit_barrier_after (last); - bb->end = last; - - if (before) - flow_delete_insn (before); - } - else if (GET_CODE (last) == JUMP_INSN) - abort (); - find_sub_basic_blocks (bb); -} - -/* Update the CFG for all queued instructions. */ - -void -commit_edge_insertions () -{ - int i; - basic_block bb; - compute_bb_for_insn (get_max_uid ()); - -#ifdef ENABLE_CHECKING - verify_flow_info (); -#endif - - i = -1; - bb = ENTRY_BLOCK_PTR; - while (1) - { - edge e, next; - - for (e = bb->succ; e; e = next) - { - next = e->succ_next; - if (e->insns) - commit_one_edge_insertion (e); - } - - if (++i >= n_basic_blocks) - break; - bb = BASIC_BLOCK (i); - } -} - -/* Return true if we need to add fake edge to exit. - Helper function for the flow_call_edges_add. */ -static bool -need_fake_edge_p (insn) - rtx insn; -{ - if (!INSN_P (insn)) - return false; - - if ((GET_CODE (insn) == CALL_INSN - && !SIBLING_CALL_P (insn) - && !find_reg_note (insn, REG_NORETURN, NULL) - && !find_reg_note (insn, REG_ALWAYS_RETURN, NULL) - && !CONST_OR_PURE_CALL_P (insn))) - return true; - - return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS - && MEM_VOLATILE_P (PATTERN (insn))) - || (GET_CODE (PATTERN (insn)) == PARALLEL - && asm_noperands (insn) != -1 - && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0))) - || GET_CODE (PATTERN (insn)) == ASM_INPUT); -} - -/* Add fake edges to the function exit for any non constant and non noreturn - calls, volatile inline assembly in the bitmap of blocks specified by - BLOCKS or to the whole CFG if BLOCKS is zero. Return the nuber of blocks - that were split. - - The goal is to expose cases in which entering a basic block does not imply - that all subsequent instructions must be executed. */ - -int -flow_call_edges_add (blocks) - sbitmap blocks; -{ - int i; - int blocks_split = 0; - int bb_num = 0; - basic_block *bbs; - bool check_last_block = false; - - /* Map bb indicies into basic block pointers since split_block - will renumber the basic blocks. */ - - bbs = xmalloc (n_basic_blocks * sizeof (*bbs)); - - if (! blocks) - { - for (i = 0; i < n_basic_blocks; i++) - bbs[bb_num++] = BASIC_BLOCK (i); - check_last_block = true; - } - else - { - EXECUTE_IF_SET_IN_SBITMAP (blocks, 0, i, - { - bbs[bb_num++] = BASIC_BLOCK (i); - if (i == n_basic_blocks - 1) - check_last_block = true; - }); - } - - /* In the last basic block, before epilogue generation, there will be - a fallthru edge to EXIT. Special care is required if the last insn - of the last basic block is a call because make_edge folds duplicate - edges, which would result in the fallthru edge also being marked - fake, which would result in the fallthru edge being removed by - remove_fake_edges, which would result in an invalid CFG. - - Moreover, we can't elide the outgoing fake edge, since the block - profiler needs to take this into account in order to solve the minimal - spanning tree in the case that the call doesn't return. - - Handle this by adding a dummy instruction in a new last basic block. */ - if (check_last_block - && need_fake_edge_p (BASIC_BLOCK (n_basic_blocks - 1)->end)) - { - edge e; - for (e = BASIC_BLOCK (n_basic_blocks - 1)->succ; e; e = e->succ_next) - if (e->dest == EXIT_BLOCK_PTR) - break; - insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e); - commit_edge_insertions (); - } - - - /* Now add fake edges to the function exit for any non constant - calls since there is no way that we can determine if they will - return or not... */ - - for (i = 0; i < bb_num; i++) - { - basic_block bb = bbs[i]; - rtx insn; - rtx prev_insn; - - for (insn = bb->end; ; insn = prev_insn) - { - prev_insn = PREV_INSN (insn); - if (need_fake_edge_p (insn)) - { - edge e; - - /* The above condition should be enought to verify that there is - no edge to the exit block in CFG already. Calling make_edge in - such case would make us to mark that edge as fake and remove it - later. */ -#ifdef ENABLE_CHECKING - if (insn == bb->end) - for (e = bb->succ; e; e = e->succ_next) - if (e->dest == EXIT_BLOCK_PTR) - abort (); -#endif - - /* Note that the following may create a new basic block - and renumber the existing basic blocks. */ - e = split_block (bb, insn); - if (e) - blocks_split++; - - make_edge (NULL, bb, EXIT_BLOCK_PTR, EDGE_FAKE); - } - if (insn == bb->head) - break; - } - } - - if (blocks_split) - verify_flow_info (); - - free (bbs); - return blocks_split; -} - -/* Find unreachable blocks. An unreachable block will have 0 in - the reachable bit in block->flags. A non-zero value indicates the - block is reachable. */ - -void -find_unreachable_blocks () -{ - edge e; - int i, n; - basic_block *tos, *worklist; - - n = n_basic_blocks; - tos = worklist = (basic_block *) xmalloc (sizeof (basic_block) * n); - - /* Clear all the reachability flags. */ - - for (i = 0; i < n; ++i) - BASIC_BLOCK (i)->flags &= ~BB_REACHABLE; - - /* Add our starting points to the worklist. Almost always there will - be only one. It isn't inconcievable that we might one day directly - support Fortran alternate entry points. */ - - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) - { - *tos++ = e->dest; - - /* Mark the block reachable. */ - e->dest->flags |= BB_REACHABLE; - } - - /* Iterate: find everything reachable from what we've already seen. */ - - while (tos != worklist) - { - basic_block b = *--tos; - - for (e = b->succ; e; e = e->succ_next) - if (!(e->dest->flags & BB_REACHABLE)) - { - *tos++ = e->dest; - e->dest->flags |= BB_REACHABLE; - } - } - - free (worklist); -} - -/* Delete all unreachable basic blocks. */ -static void -delete_unreachable_blocks () -{ - int i; - - find_unreachable_blocks (); - - /* Delete all unreachable basic blocks. Count down so that we - don't interfere with the block renumbering that happens in - flow_delete_block. */ - - for (i = n_basic_blocks - 1; i >= 0; --i) - { - basic_block b = BASIC_BLOCK (i); - - if (!(b->flags & BB_REACHABLE)) - flow_delete_block (b); - } - - tidy_fallthru_edges (); -} - -/* Return true if NOTE is not one of the ones that must be kept paired, - so that we may simply delete them. */ - -static int -can_delete_note_p (note) - rtx note; -{ - return (NOTE_LINE_NUMBER (note) == NOTE_INSN_DELETED - || NOTE_LINE_NUMBER (note) == NOTE_INSN_BASIC_BLOCK); -} - -/* Unlink a chain of insns between START and FINISH, leaving notes - that must be paired. */ - -void -flow_delete_insn_chain (start, finish) - rtx start, finish; -{ - /* Unchain the insns one by one. It would be quicker to delete all - of these with a single unchaining, rather than one at a time, but - we need to keep the NOTE's. */ - - rtx next; - - while (1) - { - next = NEXT_INSN (start); - if (GET_CODE (start) == NOTE && !can_delete_note_p (start)) - ; - else if (GET_CODE (start) == CODE_LABEL - && ! can_delete_label_p (start)) - { - const char *name = LABEL_NAME (start); - PUT_CODE (start, NOTE); - NOTE_LINE_NUMBER (start) = NOTE_INSN_DELETED_LABEL; - NOTE_SOURCE_FILE (start) = name; - } - else - next = flow_delete_insn (start); - - if (start == finish) - break; - start = next; - } -} - -/* Delete the insns in a (non-live) block. We physically delete every - non-deleted-note insn, and update the flow graph appropriately. - - Return nonzero if we deleted an exception handler. */ - -/* ??? Preserving all such notes strikes me as wrong. It would be nice - to post-process the stream to remove empty blocks, loops, ranges, etc. */ - -int -flow_delete_block (b) - basic_block b; -{ - int deleted_handler = 0; - rtx insn, end, tmp; - - /* If the head of this block is a CODE_LABEL, then it might be the - label for an exception handler which can't be reached. - - We need to remove the label from the exception_handler_label list - and remove the associated NOTE_INSN_EH_REGION_BEG and - NOTE_INSN_EH_REGION_END notes. */ - - insn = b->head; - - never_reached_warning (insn); - - if (GET_CODE (insn) == CODE_LABEL) - maybe_remove_eh_handler (insn); - - /* Include any jump table following the basic block. */ - end = b->end; - if (GET_CODE (end) == JUMP_INSN - && (tmp = JUMP_LABEL (end)) != NULL_RTX - && (tmp = NEXT_INSN (tmp)) != NULL_RTX - && GET_CODE (tmp) == JUMP_INSN - && (GET_CODE (PATTERN (tmp)) == ADDR_VEC - || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)) - end = tmp; - - /* Include any barrier that may follow the basic block. */ - tmp = next_nonnote_insn (end); - if (tmp && GET_CODE (tmp) == BARRIER) - end = tmp; - - /* Selectively delete the entire chain. */ - flow_delete_insn_chain (insn, end); - - /* Remove the edges into and out of this block. Note that there may - indeed be edges in, if we are removing an unreachable loop. */ - { - edge e, next, *q; - - for (e = b->pred; e; e = next) - { - for (q = &e->src->succ; *q != e; q = &(*q)->succ_next) - continue; - *q = e->succ_next; - next = e->pred_next; - n_edges--; - free (e); - } - for (e = b->succ; e; e = next) - { - for (q = &e->dest->pred; *q != e; q = &(*q)->pred_next) - continue; - *q = e->pred_next; - next = e->succ_next; - n_edges--; - free (e); - } - - b->pred = NULL; - b->succ = NULL; - } - - /* Remove the basic block from the array, and compact behind it. */ - expunge_block (b); - - return deleted_handler; -} - -/* Remove block B from the basic block array and compact behind it. */ - -void -expunge_block (b) - basic_block b; -{ - int i, n = n_basic_blocks; - - for (i = b->index; i + 1 < n; ++i) - { - basic_block x = BASIC_BLOCK (i + 1); - BASIC_BLOCK (i) = x; - x->index = i; - } - - basic_block_info->num_elements--; - n_basic_blocks--; -} - -/* Delete INSN by patching it out. Return the next insn. */ - -rtx -flow_delete_insn (insn) - rtx insn; -{ - rtx prev = PREV_INSN (insn); - rtx next = NEXT_INSN (insn); - rtx note; - - PREV_INSN (insn) = NULL_RTX; - NEXT_INSN (insn) = NULL_RTX; - INSN_DELETED_P (insn) = 1; - - if (prev) - NEXT_INSN (prev) = next; - if (next) - PREV_INSN (next) = prev; - else - set_last_insn (prev); - - if (GET_CODE (insn) == CODE_LABEL) - remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels); - - /* If deleting a jump, decrement the use count of the label. Deleting - the label itself should happen in the normal course of block merging. */ - if (GET_CODE (insn) == JUMP_INSN - && JUMP_LABEL (insn) - && GET_CODE (JUMP_LABEL (insn)) == CODE_LABEL) - LABEL_NUSES (JUMP_LABEL (insn))--; - - /* Also if deleting an insn that references a label. */ - else if ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)) != NULL_RTX - && GET_CODE (XEXP (note, 0)) == CODE_LABEL) - LABEL_NUSES (XEXP (note, 0))--; - - if (GET_CODE (insn) == JUMP_INSN - && (GET_CODE (PATTERN (insn)) == ADDR_VEC - || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) - { - rtx pat = PATTERN (insn); - int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC; - int len = XVECLEN (pat, diff_vec_p); - int i; - - for (i = 0; i < len; i++) - LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0))--; - } - - return next; -} - -/* True if a given label can be deleted. */ - -static int -can_delete_label_p (label) - rtx label; -{ - rtx x; - - if (LABEL_PRESERVE_P (label)) - return 0; - - for (x = forced_labels; x; x = XEXP (x, 1)) - if (label == XEXP (x, 0)) - return 0; - for (x = label_value_list; x; x = XEXP (x, 1)) - if (label == XEXP (x, 0)) - return 0; - for (x = exception_handler_labels; x; x = XEXP (x, 1)) - if (label == XEXP (x, 0)) - return 0; - - /* User declared labels must be preserved. */ - if (LABEL_NAME (label) != 0) - return 0; - - return 1; -} - -static int -tail_recursion_label_p (label) - rtx label; -{ - rtx x; - - for (x = tail_recursion_label_list; x; x = XEXP (x, 1)) - if (label == XEXP (x, 0)) - return 1; - - return 0; -} - -/* Blocks A and B are to be merged into a single block A. The insns - are already contiguous, hence `nomove'. */ - -void -merge_blocks_nomove (a, b) - basic_block a, b; -{ - edge e; - rtx b_head, b_end, a_end; - rtx del_first = NULL_RTX, del_last = NULL_RTX; - int b_empty = 0; - - /* If there was a CODE_LABEL beginning B, delete it. */ - b_head = b->head; - b_end = b->end; - if (GET_CODE (b_head) == CODE_LABEL) - { - /* Detect basic blocks with nothing but a label. This can happen - in particular at the end of a function. */ - if (b_head == b_end) - b_empty = 1; - del_first = del_last = b_head; - b_head = NEXT_INSN (b_head); - } - - /* Delete the basic block note. */ - if (NOTE_INSN_BASIC_BLOCK_P (b_head)) - { - if (b_head == b_end) - b_empty = 1; - if (! del_last) - del_first = b_head; - del_last = b_head; - b_head = NEXT_INSN (b_head); - } - - /* If there was a jump out of A, delete it. */ - a_end = a->end; - if (GET_CODE (a_end) == JUMP_INSN) - { - rtx prev; - - for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev)) - if (GET_CODE (prev) != NOTE - || NOTE_LINE_NUMBER (prev) == NOTE_INSN_BASIC_BLOCK - || prev == a->head) - break; - - del_first = a_end; - -#ifdef HAVE_cc0 - /* If this was a conditional jump, we need to also delete - the insn that set cc0. */ - if (only_sets_cc0_p (prev)) - { - rtx tmp = prev; - prev = prev_nonnote_insn (prev); - if (!prev) - prev = a->head; - del_first = tmp; - } -#endif - - a_end = prev; - } - else if (GET_CODE (NEXT_INSN (a_end)) == BARRIER) - del_first = NEXT_INSN (a_end); - - /* Delete everything marked above as well as crap that might be - hanging out between the two blocks. */ - flow_delete_insn_chain (del_first, del_last); - - /* Normally there should only be one successor of A and that is B, but - partway though the merge of blocks for conditional_execution we'll - be merging a TEST block with THEN and ELSE successors. Free the - whole lot of them and hope the caller knows what they're doing. */ - while (a->succ) - remove_edge (a->succ); - - /* Adjust the edges out of B for the new owner. */ - for (e = b->succ; e; e = e->succ_next) - e->src = a; - a->succ = b->succ; - - /* B hasn't quite yet ceased to exist. Attempt to prevent mishap. */ - b->pred = b->succ = NULL; - - /* Reassociate the insns of B with A. */ - if (!b_empty) - { - if (basic_block_for_insn) - { - BLOCK_FOR_INSN (b_head) = a; - while (b_head != b_end) - { - b_head = NEXT_INSN (b_head); - BLOCK_FOR_INSN (b_head) = a; - } - } - a_end = b_end; - } - a->end = a_end; - - expunge_block (b); -} - -/* Blocks A and B are to be merged into a single block. A has no incoming - fallthru edge, so it can be moved before B without adding or modifying - any jumps (aside from the jump from A to B). */ - -static int -merge_blocks_move_predecessor_nojumps (a, b) - basic_block a, b; -{ - rtx barrier; - int index; - - barrier = next_nonnote_insn (a->end); - if (GET_CODE (barrier) != BARRIER) - abort (); - flow_delete_insn (barrier); - - /* Move block and loop notes out of the chain so that we do not - disturb their order. - - ??? A better solution would be to squeeze out all the non-nested notes - and adjust the block trees appropriately. Even better would be to have - a tighter connection between block trees and rtl so that this is not - necessary. */ - squeeze_notes (&a->head, &a->end); - - /* Scramble the insn chain. */ - if (a->end != PREV_INSN (b->head)) - reorder_insns (a->head, a->end, PREV_INSN (b->head)); - - if (rtl_dump_file) - { - fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n", - a->index, b->index); - } - - /* Swap the records for the two blocks around. Although we are deleting B, - A is now where B was and we want to compact the BB array from where - A used to be. */ - BASIC_BLOCK (a->index) = b; - BASIC_BLOCK (b->index) = a; - index = a->index; - a->index = b->index; - b->index = index; - - /* Now blocks A and B are contiguous. Merge them. */ - merge_blocks_nomove (a, b); - - return 1; -} - -/* Blocks A and B are to be merged into a single block. B has no outgoing - fallthru edge, so it can be moved after A without adding or modifying - any jumps (aside from the jump from A to B). */ - -static int -merge_blocks_move_successor_nojumps (a, b) - basic_block a, b; -{ - rtx barrier; - - barrier = NEXT_INSN (b->end); - - /* Recognize a jump table following block B. */ - if (barrier - && GET_CODE (barrier) == CODE_LABEL - && NEXT_INSN (barrier) - && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN - && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC - || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC)) - { - b->end = NEXT_INSN (barrier); - barrier = NEXT_INSN (b->end); - } - - /* There had better have been a barrier there. Delete it. */ - if (barrier && GET_CODE (barrier) == BARRIER) - flow_delete_insn (barrier); - - /* Move block and loop notes out of the chain so that we do not - disturb their order. - - ??? A better solution would be to squeeze out all the non-nested notes - and adjust the block trees appropriately. Even better would be to have - a tighter connection between block trees and rtl so that this is not - necessary. */ - squeeze_notes (&b->head, &b->end); - - /* Scramble the insn chain. */ - reorder_insns (b->head, b->end, a->end); - - /* Now blocks A and B are contiguous. Merge them. */ - merge_blocks_nomove (a, b); - - if (rtl_dump_file) - { - fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n", - b->index, a->index); - } - - return 1; -} - -/* Attempt to merge basic blocks that are potentially non-adjacent. - Return true iff the attempt succeeded. */ - -static int -merge_blocks (e, b, c, mode) - edge e; - basic_block b, c; - int mode; -{ - /* If C has a tail recursion label, do not merge. There is no - edge recorded from the call_placeholder back to this label, as - that would make optimize_sibling_and_tail_recursive_calls more - complex for no gain. */ - if (GET_CODE (c->head) == CODE_LABEL - && tail_recursion_label_p (c->head)) - return 0; - - /* If B has a fallthru edge to C, no need to move anything. */ - if (e->flags & EDGE_FALLTHRU) - { - merge_blocks_nomove (b, c); - - if (rtl_dump_file) - { - fprintf (rtl_dump_file, "Merged %d and %d without moving.\n", - b->index, c->index); - } - - return 1; - } - /* Otherwise we will need to move code around. Do that only if expensive - transformations are allowed. */ - else if (mode & CLEANUP_EXPENSIVE) - { - edge tmp_edge, c_fallthru_edge; - int c_has_outgoing_fallthru; - int b_has_incoming_fallthru; - - /* Avoid overactive code motion, as the forwarder blocks should be - eliminated by edge redirection instead. One exception might have - been if B is a forwarder block and C has no fallthru edge, but - that should be cleaned up by bb-reorder instead. */ - if (forwarder_block_p (b) || forwarder_block_p (c)) - return 0; - - /* We must make sure to not munge nesting of lexical blocks, - and loop notes. This is done by squeezing out all the notes - and leaving them there to lie. Not ideal, but functional. */ - - for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next) - if (tmp_edge->flags & EDGE_FALLTHRU) - break; - c_has_outgoing_fallthru = (tmp_edge != NULL); - c_fallthru_edge = tmp_edge; - - for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next) - if (tmp_edge->flags & EDGE_FALLTHRU) - break; - b_has_incoming_fallthru = (tmp_edge != NULL); - - /* If B does not have an incoming fallthru, then it can be moved - immediately before C without introducing or modifying jumps. - C cannot be the first block, so we do not have to worry about - accessing a non-existent block. */ - if (! b_has_incoming_fallthru) - return merge_blocks_move_predecessor_nojumps (b, c); - - /* Otherwise, we're going to try to move C after B. If C does - not have an outgoing fallthru, then it can be moved - immediately after B without introducing or modifying jumps. */ - if (! c_has_outgoing_fallthru) - return merge_blocks_move_successor_nojumps (b, c); - - /* Otherwise, we'll need to insert an extra jump, and possibly - a new block to contain it. We can't redirect to EXIT_BLOCK_PTR, - as we don't have explicit return instructions before epilogues - are generated, so give up on that case. */ - - if (c_fallthru_edge->dest != EXIT_BLOCK_PTR - && merge_blocks_move_successor_nojumps (b, c)) - { - basic_block target = c_fallthru_edge->dest; - rtx barrier; - basic_block new; - - /* This is a dirty hack to avoid code duplication. - - Set edge to point to wrong basic block, so - redirect_edge_and_branch_force will do the trick - and rewire edge back to the original location. */ - redirect_edge_succ (c_fallthru_edge, ENTRY_BLOCK_PTR); - new = redirect_edge_and_branch_force (c_fallthru_edge, target); - - /* We've just created barrier, but another barrier is - already present in the stream. Avoid the duplicate. */ - barrier = next_nonnote_insn (new ? new->end : b->end); - if (GET_CODE (barrier) != BARRIER) - abort (); - flow_delete_insn (barrier); - - return 1; - } - - return 0; - } - return 0; -} - -/* Simplify a conditional jump around an unconditional jump. - Return true if something changed. */ - -static bool -try_simplify_condjump (cbranch_block) - basic_block cbranch_block; -{ - basic_block jump_block, jump_dest_block, cbranch_dest_block; - edge cbranch_jump_edge, cbranch_fallthru_edge; - rtx cbranch_insn; - - /* Verify that there are exactly two successors. */ - if (!cbranch_block->succ - || !cbranch_block->succ->succ_next - || cbranch_block->succ->succ_next->succ_next) - return false; - - /* Verify that we've got a normal conditional branch at the end - of the block. */ - cbranch_insn = cbranch_block->end; - if (!any_condjump_p (cbranch_insn)) - return false; - - cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block); - cbranch_jump_edge = BRANCH_EDGE (cbranch_block); - - /* The next block must not have multiple predecessors, must not - be the last block in the function, and must contain just the - unconditional jump. */ - jump_block = cbranch_fallthru_edge->dest; - if (jump_block->pred->pred_next - || jump_block->index == n_basic_blocks - 1 - || !forwarder_block_p (jump_block)) - return false; - jump_dest_block = jump_block->succ->dest; - - /* The conditional branch must target the block after the - unconditional branch. */ - cbranch_dest_block = cbranch_jump_edge->dest; - - if (!can_fallthru (jump_block, cbranch_dest_block)) - return false; - - /* Invert the conditional branch. Prevent jump.c from deleting - "unreachable" instructions. */ - LABEL_NUSES (JUMP_LABEL (cbranch_insn))++; - if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 1)) - { - LABEL_NUSES (JUMP_LABEL (cbranch_insn))--; - return false; - } - - if (rtl_dump_file) - fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n", - INSN_UID (cbranch_insn), INSN_UID (jump_block->end)); - - /* Success. Update the CFG to match. Note that after this point - the edge variable names appear backwards; the redirection is done - this way to preserve edge profile data. */ - cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge, - cbranch_dest_block); - cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge, - jump_dest_block); - cbranch_jump_edge->flags |= EDGE_FALLTHRU; - cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU; - - /* Delete the block with the unconditional jump, and clean up the mess. */ - flow_delete_block (jump_block); - tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block); - - return true; -} - -/* Attempt to forward edges leaving basic block B. - Return true if sucessful. */ - -static bool -try_forward_edges (mode, b) - basic_block b; - int mode; -{ - bool changed = false; - edge e, next; - - for (e = b->succ; e ; e = next) - { - basic_block target, first; - int counter; - - next = e->succ_next; - - /* Skip complex edges because we don't know how to update them. - - Still handle fallthru edges, as we can suceed to forward fallthru - edge to the same place as the branch edge of conditional branch - and turn conditional branch to an unconditonal branch. */ - if (e->flags & EDGE_COMPLEX) - continue; - - target = first = e->dest; - counter = 0; - - /* Look for the real destination of the jump. - Avoid inifinite loop in the infinite empty loop by counting - up to n_basic_blocks. */ - while (forwarder_block_p (target) - && target->succ->dest != EXIT_BLOCK_PTR - && counter < n_basic_blocks) - { - /* Bypass trivial infinite loops. */ - if (target == target->succ->dest) - counter = n_basic_blocks; - - /* Avoid killing of loop pre-headers, as it is the place loop - optimizer wants to hoist code to. - - For fallthru forwarders, the LOOP_BEG note must appear between - the header of block and CODE_LABEL of the loop, for non forwarders - it must appear before the JUMP_INSN. */ - if (mode & CLEANUP_PRE_LOOP) - { - rtx insn = (target->succ->flags & EDGE_FALLTHRU - ? target->head : prev_nonnote_insn (target->end)); - - if (GET_CODE (insn) != NOTE) - insn = NEXT_INSN (insn); - - for (;insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn); - insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == NOTE - && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) - break; - - if (GET_CODE (insn) == NOTE) - break; - } - target = target->succ->dest, counter++; - } - - if (counter >= n_basic_blocks) - { - if (rtl_dump_file) - fprintf (rtl_dump_file, "Infinite loop in BB %i.\n", - target->index); - } - else if (target == first) - ; /* We didn't do anything. */ - else - { - /* Save the values now, as the edge may get removed. */ - gcov_type edge_count = e->count; - int edge_probability = e->probability; - - if (redirect_edge_and_branch (e, target)) - { - /* We successfully forwarded the edge. Now update profile - data: for each edge we traversed in the chain, remove - the original edge's execution count. */ - int edge_frequency = ((edge_probability * b->frequency - + REG_BR_PROB_BASE / 2) - / REG_BR_PROB_BASE); - - do - { - first->count -= edge_count; - first->succ->count -= edge_count; - first->frequency -= edge_frequency; - first = first->succ->dest; - } - while (first != target); - - changed = true; - } - else - { - if (rtl_dump_file) - fprintf (rtl_dump_file, "Forwarding edge %i->%i to %i failed.\n", - b->index, e->dest->index, target->index); - } - } - } - - return changed; -} - -/* Look through the insns at the end of BB1 and BB2 and find the longest - sequence that are equivalent. Store the first insns for that sequence - in *F1 and *F2 and return the sequence length. - - To simplify callers of this function, if the blocks match exactly, - store the head of the blocks in *F1 and *F2. */ - -static int -flow_find_cross_jump (mode, bb1, bb2, f1, f2) - int mode ATTRIBUTE_UNUSED; - basic_block bb1, bb2; - rtx *f1, *f2; -{ - rtx i1, i2, p1, p2, last1, last2, afterlast1, afterlast2; - int ninsns = 0; - - /* Skip simple jumps at the end of the blocks. Complex jumps still - need to be compared for equivalence, which we'll do below. */ - - i1 = bb1->end; - if (onlyjump_p (i1) - || (returnjump_p (i1) && !side_effects_p (PATTERN (i1)))) - i1 = PREV_INSN (i1); - i2 = bb2->end; - if (onlyjump_p (i2) - || (returnjump_p (i2) && !side_effects_p (PATTERN (i2)))) - i2 = PREV_INSN (i2); - - last1 = afterlast1 = last2 = afterlast2 = NULL_RTX; - while (true) - { - /* Ignore notes. */ - while ((GET_CODE (i1) == NOTE && i1 != bb1->head)) - i1 = PREV_INSN (i1); - while ((GET_CODE (i2) == NOTE && i2 != bb2->head)) - i2 = PREV_INSN (i2); - - if (i1 == bb1->head || i2 == bb2->head) - break; - - /* Verify that I1 and I2 are equivalent. */ - - if (GET_CODE (i1) != GET_CODE (i2)) - break; - - p1 = PATTERN (i1); - p2 = PATTERN (i2); - - /* If this is a CALL_INSN, compare register usage information. - If we don't check this on stack register machines, the two - CALL_INSNs might be merged leaving reg-stack.c with mismatching - numbers of stack registers in the same basic block. - If we don't check this on machines with delay slots, a delay slot may - be filled that clobbers a parameter expected by the subroutine. - - ??? We take the simple route for now and assume that if they're - equal, they were constructed identically. */ - - if (GET_CODE (i1) == CALL_INSN - && ! rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1), - CALL_INSN_FUNCTION_USAGE (i2))) - break; - -#ifdef STACK_REGS - /* If cross_jump_death_matters is not 0, the insn's mode - indicates whether or not the insn contains any stack-like - regs. */ - - if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1)) - { - /* If register stack conversion has already been done, then - death notes must also be compared before it is certain that - the two instruction streams match. */ - - rtx note; - HARD_REG_SET i1_regset, i2_regset; - - CLEAR_HARD_REG_SET (i1_regset); - CLEAR_HARD_REG_SET (i2_regset); - - for (note = REG_NOTES (i1); note; note = XEXP (note, 1)) - if (REG_NOTE_KIND (note) == REG_DEAD - && STACK_REG_P (XEXP (note, 0))) - SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0))); - - for (note = REG_NOTES (i2); note; note = XEXP (note, 1)) - if (REG_NOTE_KIND (note) == REG_DEAD - && STACK_REG_P (XEXP (note, 0))) - SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0))); - - GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done); - - break; - - done: - ; - } -#endif - - if (GET_CODE (p1) != GET_CODE (p2)) - break; - - if (! rtx_renumbered_equal_p (p1, p2)) - { - /* The following code helps take care of G++ cleanups. */ - rtx equiv1 = find_reg_equal_equiv_note (i1); - rtx equiv2 = find_reg_equal_equiv_note (i2); - - if (equiv1 && equiv2 - /* If the equivalences are not to a constant, they may - reference pseudos that no longer exist, so we can't - use them. */ - && CONSTANT_P (XEXP (equiv1, 0)) - && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0))) - { - rtx s1 = single_set (i1); - rtx s2 = single_set (i2); - if (s1 != 0 && s2 != 0 - && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2))) - { - validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1); - validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1); - if (! rtx_renumbered_equal_p (p1, p2)) - cancel_changes (0); - else if (apply_change_group ()) - goto win; - } - } - break; - } - - win: - /* Don't begin a cross-jump with a USE or CLOBBER insn. */ - if (GET_CODE (p1) != USE && GET_CODE (p1) != CLOBBER) - { - afterlast1 = last1, afterlast2 = last2; - last1 = i1, last2 = i2; - ninsns++; - } - i1 = PREV_INSN (i1); - i2 = PREV_INSN (i2); - } - -#ifdef HAVE_cc0 - if (ninsns) - { - /* Don't allow the insn after a compare to be shared by - cross-jumping unless the compare is also shared. */ - if (reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1)) - last1 = afterlast1, last2 = afterlast2, ninsns--; - } -#endif - - /* Include preceeding notes and labels in the cross-jump. One, - this may bring us to the head of the blocks as requested above. - Two, it keeps line number notes as matched as may be. */ - if (ninsns) - { - while (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == NOTE) - last1 = PREV_INSN (last1); - if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL) - last1 = PREV_INSN (last1); - while (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == NOTE) - last2 = PREV_INSN (last2); - if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL) - last2 = PREV_INSN (last2); - - *f1 = last1; - *f2 = last2; - } - - return ninsns; -} - -/* Return true iff outgoing edges of BB1 and BB2 match, together with - the branch instruction. This means that if we commonize the control - flow before end of the basic block, the semantic remains unchanged. - - We may assume that there exists one edge with a common destination. */ - -static bool -outgoing_edges_match (bb1, bb2) - basic_block bb1; - basic_block bb2; -{ - /* If BB1 has only one successor, we must be looking at an unconditional - jump. Which, by the assumption above, means that we only need to check - that BB2 has one successor. */ - if (bb1->succ && !bb1->succ->succ_next) - return (bb2->succ && !bb2->succ->succ_next); - - /* Match conditional jumps - this may get tricky when fallthru and branch - edges are crossed. */ - if (bb1->succ - && bb1->succ->succ_next - && !bb1->succ->succ_next->succ_next - && any_condjump_p (bb1->end)) - { - edge b1, f1, b2, f2; - bool reverse, match; - rtx set1, set2, cond1, cond2; - enum rtx_code code1, code2; - - if (!bb2->succ - || !bb2->succ->succ_next - || bb1->succ->succ_next->succ_next - || !any_condjump_p (bb2->end)) - return false; - - b1 = BRANCH_EDGE (bb1); - b2 = BRANCH_EDGE (bb2); - f1 = FALLTHRU_EDGE (bb1); - f2 = FALLTHRU_EDGE (bb2); - - /* Get around possible forwarders on fallthru edges. Other cases - should be optimized out already. */ - if (forwarder_block_p (f1->dest)) - f1 = f1->dest->succ; - if (forwarder_block_p (f2->dest)) - f2 = f2->dest->succ; - - /* To simplify use of this function, return false if there are - unneeded forwarder blocks. These will get eliminated later - during cleanup_cfg. */ - if (forwarder_block_p (f1->dest) - || forwarder_block_p (f2->dest) - || forwarder_block_p (b1->dest) - || forwarder_block_p (b2->dest)) - return false; - - if (f1->dest == f2->dest && b1->dest == b2->dest) - reverse = false; - else if (f1->dest == b2->dest && b1->dest == f2->dest) - reverse = true; - else - return false; - - set1 = pc_set (bb1->end); - set2 = pc_set (bb2->end); - if ((XEXP (SET_SRC (set1), 1) == pc_rtx) - != (XEXP (SET_SRC (set2), 1) == pc_rtx)) - reverse = !reverse; - - cond1 = XEXP (SET_SRC (set1), 0); - cond2 = XEXP (SET_SRC (set2), 0); - code1 = GET_CODE (cond1); - if (reverse) - code2 = reversed_comparison_code (cond2, bb2->end); - else - code2 = GET_CODE (cond2); - if (code2 == UNKNOWN) - return false; - - /* Verify codes and operands match. */ - match = ((code1 == code2 - && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) - && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) - || (code1 == swap_condition (code2) - && rtx_renumbered_equal_p (XEXP (cond1, 1), - XEXP (cond2, 0)) - && rtx_renumbered_equal_p (XEXP (cond1, 0), - XEXP (cond2, 1)))); - - /* If we return true, we will join the blocks. Which means that - we will only have one branch prediction bit to work with. Thus - we require the existing branches to have probabilities that are - roughly similar. */ - /* ??? We should use bb->frequency to allow merging in infrequently - executed blocks, but at the moment it is not available when - cleanup_cfg is run. */ - if (match && !optimize_size) - { - rtx note1, note2; - int prob1, prob2; - note1 = find_reg_note (bb1->end, REG_BR_PROB, 0); - note2 = find_reg_note (bb2->end, REG_BR_PROB, 0); - - if (note1 && note2) - { - prob1 = INTVAL (XEXP (note1, 0)); - prob2 = INTVAL (XEXP (note2, 0)); - if (reverse) - prob2 = REG_BR_PROB_BASE - prob2; - - /* Fail if the difference in probabilities is - greater than 5%. */ - if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20) - return false; - } - else if (note1 || note2) - return false; - } - - if (rtl_dump_file && match) - fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n", - bb1->index, bb2->index); - - return match; - } - - /* ??? We can handle computed jumps too. This may be important for - inlined functions containing switch statements. Also jumps w/o - fallthru edges can be handled by simply matching whole insn. */ - return false; -} - -/* E1 and E2 are edges with the same destination block. Search their - predecessors for common code. If found, redirect control flow from - (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */ - -static bool -try_crossjump_to_edge (mode, e1, e2) - int mode; - edge e1, e2; -{ - int nmatch; - basic_block src1 = e1->src, src2 = e2->src; - basic_block redirect_to; - rtx newpos1, newpos2; - edge s; - rtx last; - rtx label; - rtx note; - - /* Search backward through forwarder blocks. We don't need to worry - about multiple entry or chained forwarders, as they will be optimized - away. We do this to look past the unconditional jump following a - conditional jump that is required due to the current CFG shape. */ - if (src1->pred - && !src1->pred->pred_next - && forwarder_block_p (src1)) - { - e1 = src1->pred; - src1 = e1->src; - } - if (src2->pred - && !src2->pred->pred_next - && forwarder_block_p (src2)) - { - e2 = src2->pred; - src2 = e2->src; - } - - /* Nothing to do if we reach ENTRY, or a common source block. */ - if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR) - return false; - if (src1 == src2) - return false; - - /* Seeing more than 1 forwarder blocks would confuse us later... */ - if (forwarder_block_p (e1->dest) - && forwarder_block_p (e1->dest->succ->dest)) - return false; - if (forwarder_block_p (e2->dest) - && forwarder_block_p (e2->dest->succ->dest)) - return false; - - /* Likewise with dead code (possibly newly created by the other optimizations - of cfg_cleanup). */ - if (!src1->pred || !src2->pred) - return false; - - /* Likewise with complex edges. - ??? We should be able to handle most complex edges later with some - care. */ - if (e1->flags & EDGE_COMPLEX) - return false; - - /* Look for the common insn sequence, part the first ... */ - if (!outgoing_edges_match (src1, src2)) - return false; - - /* ... and part the second. */ - nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2); - if (!nmatch) - return false; - - /* Avoid splitting if possible. */ - if (newpos2 == src2->head) - redirect_to = src2; - else - { - if (rtl_dump_file) - fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n", - src2->index, nmatch); - redirect_to = split_block (src2, PREV_INSN (newpos2))->dest; - } - - if (rtl_dump_file) - fprintf (rtl_dump_file, - "Cross jumping from bb %i to bb %i; %i common insns\n", - src1->index, src2->index, nmatch); - - redirect_to->count += src1->count; - redirect_to->frequency += src1->frequency; - - /* Recompute the frequencies and counts of outgoing edges. */ - for (s = redirect_to->succ; s; s = s->succ_next) - { - edge s2; - basic_block d = s->dest; - - if (forwarder_block_p (d)) - d = d->succ->dest; - for (s2 = src1->succ; ; s2 = s2->succ_next) - { - basic_block d2 = s2->dest; - if (forwarder_block_p (d2)) - d2 = d2->succ->dest; - if (d == d2) - break; - } - s->count += s2->count; - - /* Take care to update possible forwarder blocks. We verified - that there is no more than one in the chain, so we can't run - into infinite loop. */ - if (forwarder_block_p (s->dest)) - { - s->dest->succ->count += s2->count; - s->dest->count += s2->count; - s->dest->frequency += EDGE_FREQUENCY (s); - } - if (forwarder_block_p (s2->dest)) - { - s2->dest->succ->count -= s2->count; - s2->dest->count -= s2->count; - s2->dest->frequency -= EDGE_FREQUENCY (s); - } - if (!redirect_to->frequency && !src1->frequency) - s->probability = (s->probability + s2->probability) / 2; - else - s->probability = - ((s->probability * redirect_to->frequency + - s2->probability * src1->frequency) - / (redirect_to->frequency + src1->frequency)); - } - - note = find_reg_note (redirect_to->end, REG_BR_PROB, 0); - if (note) - XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability); - - /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */ - - /* Skip possible basic block header. */ - if (GET_CODE (newpos1) == CODE_LABEL) - newpos1 = NEXT_INSN (newpos1); - if (GET_CODE (newpos1) == NOTE) - newpos1 = NEXT_INSN (newpos1); - last = src1->end; - - /* Emit the jump insn. */ - label = block_label (redirect_to); - src1->end = emit_jump_insn_before (gen_jump (label), newpos1); - JUMP_LABEL (src1->end) = label; - LABEL_NUSES (label)++; - if (basic_block_for_insn) - set_block_for_new_insns (src1->end, src1); - - /* Delete the now unreachable instructions. */ - flow_delete_insn_chain (newpos1, last); - - /* Make sure there is a barrier after the new jump. */ - last = next_nonnote_insn (src1->end); - if (!last || GET_CODE (last) != BARRIER) - emit_barrier_after (src1->end); - - /* Update CFG. */ - while (src1->succ) - remove_edge (src1->succ); - make_edge (NULL, src1, redirect_to, 0); - src1->succ->probability = REG_BR_PROB_BASE; - src1->succ->count = src1->count; - - return true; -} - -/* Search the predecessors of BB for common insn sequences. When found, - share code between them by redirecting control flow. Return true if - any changes made. */ - -static bool -try_crossjump_bb (mode, bb) - int mode; - basic_block bb; -{ - edge e, e2, nexte2, nexte, fallthru; - bool changed; - - /* Nothing to do if there is not at least two incomming edges. */ - if (!bb->pred || !bb->pred->pred_next) - return false; - - /* It is always cheapest to redirect a block that ends in a branch to - a block that falls through into BB, as that adds no branches to the - program. We'll try that combination first. */ - for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next) - if (fallthru->flags & EDGE_FALLTHRU) - break; - - changed = false; - for (e = bb->pred; e; e = nexte) - { - nexte = e->pred_next; - - /* Elide complex edges now, as neither try_crossjump_to_edge - nor outgoing_edges_match can handle them. */ - if (e->flags & EDGE_COMPLEX) - continue; - - /* As noted above, first try with the fallthru predecessor. */ - if (fallthru) - { - /* Don't combine the fallthru edge into anything else. - If there is a match, we'll do it the other way around. */ - if (e == fallthru) - continue; - - if (try_crossjump_to_edge (mode, e, fallthru)) - { - changed = true; - nexte = bb->pred; - continue; - } - } - - /* Non-obvious work limiting check: Recognize that we're going - to call try_crossjump_bb on every basic block. So if we have - two blocks with lots of outgoing edges (a switch) and they - share lots of common destinations, then we would do the - cross-jump check once for each common destination. - - Now, if the blocks actually are cross-jump candidates, then - all of their destinations will be shared. Which means that - we only need check them for cross-jump candidacy once. We - can eliminate redundant checks of crossjump(A,B) by arbitrarily - choosing to do the check from the block for which the edge - in question is the first successor of A. */ - if (e->src->succ != e) - continue; - - for (e2 = bb->pred; e2; e2 = nexte2) - { - nexte2 = e2->pred_next; - - if (e2 == e) - continue; - - /* We've already checked the fallthru edge above. */ - if (e2 == fallthru) - continue; - - /* Again, neither try_crossjump_to_edge nor outgoing_edges_match - can handle complex edges. */ - if (e2->flags & EDGE_COMPLEX) - continue; - - /* The "first successor" check above only prevents multiple - checks of crossjump(A,B). In order to prevent redundant - checks of crossjump(B,A), require that A be the block - with the lowest index. */ - if (e->src->index > e2->src->index) - continue; - - if (try_crossjump_to_edge (mode, e, e2)) - { - changed = true; - nexte = bb->pred; - break; - } - } - } - - return changed; -} - -/* Do simple CFG optimizations - basic block merging, simplifying of jump - instructions etc. Return nonzero if changes were made. */ - -static bool -try_optimize_cfg (mode) - int mode; -{ - int i; - bool changed_overall = false; - bool changed; - int iterations = 0; - - /* Attempt to merge blocks as made possible by edge removal. If a block - has only one successor, and the successor has only one predecessor, - they may be combined. */ - - do - { - changed = false; - iterations++; - - if (rtl_dump_file) - fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n", - iterations); - - for (i = 0; i < n_basic_blocks;) - { - basic_block c, b = BASIC_BLOCK (i); - edge s; - bool changed_here = false; - - /* Delete trivially dead basic blocks. */ - while (b->pred == NULL) - { - c = BASIC_BLOCK (b->index - 1); - if (rtl_dump_file) - fprintf (rtl_dump_file, "Deleting block %i.\n", b->index); - flow_delete_block (b); - changed = true; - b = c; - } - - /* Remove code labels no longer used. Don't do this before - CALL_PLACEHOLDER is removed, as some branches may be hidden - within. */ - if (b->pred->pred_next == NULL - && (b->pred->flags & EDGE_FALLTHRU) - && !(b->pred->flags & EDGE_COMPLEX) - && GET_CODE (b->head) == CODE_LABEL - && (!(mode & CLEANUP_PRE_SIBCALL) - || !tail_recursion_label_p (b->head)) - /* If previous block ends with condjump jumping to next BB, - we can't delete the label. */ - && (b->pred->src == ENTRY_BLOCK_PTR - || !reg_mentioned_p (b->head, b->pred->src->end))) - { - rtx label = b->head; - b->head = NEXT_INSN (b->head); - flow_delete_insn_chain (label, label); - if (rtl_dump_file) - fprintf (rtl_dump_file, "Deleted label in block %i.\n", - b->index); - } - - /* If we fall through an empty block, we can remove it. */ - if (b->pred->pred_next == NULL - && (b->pred->flags & EDGE_FALLTHRU) - && GET_CODE (b->head) != CODE_LABEL - && forwarder_block_p (b) - /* Note that forwarder_block_p true ensures that there - is a successor for this block. */ - && (b->succ->flags & EDGE_FALLTHRU) - && n_basic_blocks > 1) - { - if (rtl_dump_file) - fprintf (rtl_dump_file, "Deleting fallthru block %i.\n", - b->index); - c = BASIC_BLOCK (b->index ? b->index - 1 : 1); - redirect_edge_succ_nodup (b->pred, b->succ->dest); - flow_delete_block (b); - changed = true; - b = c; - } - - /* Merge blocks. Loop because chains of blocks might be - combineable. */ - while ((s = b->succ) != NULL - && s->succ_next == NULL - && !(s->flags & EDGE_COMPLEX) - && (c = s->dest) != EXIT_BLOCK_PTR - && c->pred->pred_next == NULL - /* If the jump insn has side effects, - we can't kill the edge. */ - && (GET_CODE (b->end) != JUMP_INSN - || onlyjump_p (b->end)) - && merge_blocks (s, b, c, mode)) - changed_here = true; - - /* Simplify branch over branch. */ - if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b)) - changed_here = true; - - /* If B has a single outgoing edge, but uses a non-trivial jump - instruction without side-effects, we can either delete the - jump entirely, or replace it with a simple unconditional jump. - Use redirect_edge_and_branch to do the dirty work. */ - if (b->succ - && ! b->succ->succ_next - && b->succ->dest != EXIT_BLOCK_PTR - && onlyjump_p (b->end) - && redirect_edge_and_branch (b->succ, b->succ->dest)) - changed_here = true; - - /* Simplify branch to branch. */ - if (try_forward_edges (mode, b)) - changed_here = true; - - /* Look for shared code between blocks. */ - if ((mode & CLEANUP_CROSSJUMP) - && try_crossjump_bb (mode, b)) - changed_here = true; - - /* Don't get confused by the index shift caused by deleting - blocks. */ - if (!changed_here) - i = b->index + 1; - else - changed = true; - } - - if ((mode & CLEANUP_CROSSJUMP) - && try_crossjump_bb (mode, EXIT_BLOCK_PTR)) - changed = true; - -#ifdef ENABLE_CHECKING - if (changed) - verify_flow_info (); -#endif - - changed_overall |= changed; - } - while (changed); - return changed_overall; -} - -/* The given edge should potentially be a fallthru edge. If that is in - fact true, delete the jump and barriers that are in the way. */ - -void -tidy_fallthru_edge (e, b, c) - edge e; - basic_block b, c; -{ - rtx q; - - /* ??? In a late-running flow pass, other folks may have deleted basic - blocks by nopping out blocks, leaving multiple BARRIERs between here - and the target label. They ought to be chastized and fixed. - - We can also wind up with a sequence of undeletable labels between - one block and the next. - - So search through a sequence of barriers, labels, and notes for - the head of block C and assert that we really do fall through. */ - - if (next_real_insn (b->end) != next_real_insn (PREV_INSN (c->head))) - return; - - /* Remove what will soon cease being the jump insn from the source block. - If block B consisted only of this single jump, turn it into a deleted - note. */ - q = b->end; - if (GET_CODE (q) == JUMP_INSN - && onlyjump_p (q) - && (any_uncondjump_p (q) - || (b->succ == e && e->succ_next == NULL))) - { -#ifdef HAVE_cc0 - /* If this was a conditional jump, we need to also delete - the insn that set cc0. */ - if (any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q))) - q = PREV_INSN (q); -#endif - - if (b->head == q) - { - PUT_CODE (q, NOTE); - NOTE_LINE_NUMBER (q) = NOTE_INSN_DELETED; - NOTE_SOURCE_FILE (q) = 0; - } - else - { - q = PREV_INSN (q); - - /* We don't want a block to end on a line-number note since that has - the potential of changing the code between -g and not -g. */ - while (GET_CODE (q) == NOTE && NOTE_LINE_NUMBER (q) >= 0) - q = PREV_INSN (q); - } - - b->end = q; - } - - /* Selectively unlink the sequence. */ - if (q != PREV_INSN (c->head)) - flow_delete_insn_chain (NEXT_INSN (q), PREV_INSN (c->head)); - - e->flags |= EDGE_FALLTHRU; -} - -/* Fix up edges that now fall through, or rather should now fall through - but previously required a jump around now deleted blocks. Simplify - the search by only examining blocks numerically adjacent, since this - is how find_basic_blocks created them. */ - -static void -tidy_fallthru_edges () -{ - int i; - - for (i = 1; i < n_basic_blocks; ++i) - { - basic_block b = BASIC_BLOCK (i - 1); - basic_block c = BASIC_BLOCK (i); - edge s; - - /* We care about simple conditional or unconditional jumps with - a single successor. - - If we had a conditional branch to the next instruction when - find_basic_blocks was called, then there will only be one - out edge for the block which ended with the conditional - branch (since we do not create duplicate edges). - - Furthermore, the edge will be marked as a fallthru because we - merge the flags for the duplicate edges. So we do not want to - check that the edge is not a FALLTHRU edge. */ - if ((s = b->succ) != NULL - && ! (s->flags & EDGE_COMPLEX) - && s->succ_next == NULL - && s->dest == c - /* If the jump insn has side effects, we can't tidy the edge. */ - && (GET_CODE (b->end) != JUMP_INSN - || onlyjump_p (b->end))) - tidy_fallthru_edge (s, b, c); - } -} /* Perform data flow analysis. F is the first insn of the function; FLAGS is a set of PROP_* flags @@ -4541,11 +663,8 @@ update_life_info (blocks, extent, prop_flags) | PROP_KILL_DEAD_CODE)); } - if (! changed || ! try_optimize_cfg (CLEANUP_EXPENSIVE)) + if (! changed || ! cleanup_cfg (CLEANUP_EXPENSIVE)) break; - - delete_unreachable_blocks (); - mark_critical_edges (); } /* If asked, remove notes from the blocks we'll update. */ @@ -7851,286 +3970,6 @@ debug_regset (r) putc ('\n', stderr); } -void -dump_flow_info (file) - FILE *file; -{ - register int i; - static const char * const reg_class_names[] = REG_CLASS_NAMES; - - fprintf (file, "%d registers.\n", max_regno); - for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) - if (REG_N_REFS (i)) - { - enum reg_class class, altclass; - fprintf (file, "\nRegister %d used %d times across %d insns", - i, REG_N_REFS (i), REG_LIVE_LENGTH (i)); - if (REG_BASIC_BLOCK (i) >= 0) - fprintf (file, " in block %d", REG_BASIC_BLOCK (i)); - if (REG_N_SETS (i)) - fprintf (file, "; set %d time%s", REG_N_SETS (i), - (REG_N_SETS (i) == 1) ? "" : "s"); - if (REG_USERVAR_P (regno_reg_rtx[i])) - fprintf (file, "; user var"); - if (REG_N_DEATHS (i) != 1) - fprintf (file, "; dies in %d places", REG_N_DEATHS (i)); - if (REG_N_CALLS_CROSSED (i) == 1) - fprintf (file, "; crosses 1 call"); - else if (REG_N_CALLS_CROSSED (i)) - fprintf (file, "; crosses %d calls", REG_N_CALLS_CROSSED (i)); - if (PSEUDO_REGNO_BYTES (i) != UNITS_PER_WORD) - fprintf (file, "; %d bytes", PSEUDO_REGNO_BYTES (i)); - class = reg_preferred_class (i); - altclass = reg_alternate_class (i); - if (class != GENERAL_REGS || altclass != ALL_REGS) - { - if (altclass == ALL_REGS || class == ALL_REGS) - fprintf (file, "; pref %s", reg_class_names[(int) class]); - else if (altclass == NO_REGS) - fprintf (file, "; %s or none", reg_class_names[(int) class]); - else - fprintf (file, "; pref %s, else %s", - reg_class_names[(int) class], - reg_class_names[(int) altclass]); - } - if (REG_POINTER (regno_reg_rtx[i])) - fprintf (file, "; pointer"); - fprintf (file, ".\n"); - } - - fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks, n_edges); - for (i = 0; i < n_basic_blocks; i++) - { - register basic_block bb = BASIC_BLOCK (i); - register edge e; - - fprintf (file, "\nBasic block %d: first insn %d, last %d, loop_depth %d, count ", - i, INSN_UID (bb->head), INSN_UID (bb->end), bb->loop_depth); - fprintf (file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) bb->count); - fprintf (file, ", freq %i.\n", bb->frequency); - - fprintf (file, "Predecessors: "); - for (e = bb->pred; e; e = e->pred_next) - dump_edge_info (file, e, 0); - - fprintf (file, "\nSuccessors: "); - for (e = bb->succ; e; e = e->succ_next) - dump_edge_info (file, e, 1); - - fprintf (file, "\nRegisters live at start:"); - dump_regset (bb->global_live_at_start, file); - - fprintf (file, "\nRegisters live at end:"); - dump_regset (bb->global_live_at_end, file); - - putc ('\n', file); - } - - putc ('\n', file); -} - -void -debug_flow_info () -{ - dump_flow_info (stderr); -} - -void -dump_edge_info (file, e, do_succ) - FILE *file; - edge e; - int do_succ; -{ - basic_block side = (do_succ ? e->dest : e->src); - - if (side == ENTRY_BLOCK_PTR) - fputs (" ENTRY", file); - else if (side == EXIT_BLOCK_PTR) - fputs (" EXIT", file); - else - fprintf (file, " %d", side->index); - - if (e->probability) - fprintf (file, " [%.1f%%] ", e->probability * 100.0 / REG_BR_PROB_BASE); - - if (e->count) - { - fprintf (file, " count:"); - fprintf (file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) e->count); - } - - if (e->flags) - { - static const char * const bitnames[] = { - "fallthru", "crit", "ab", "abcall", "eh", "fake", "dfs_back" - }; - int comma = 0; - int i, flags = e->flags; - - fputc (' ', file); - fputc ('(', file); - for (i = 0; flags; i++) - if (flags & (1 << i)) - { - flags &= ~(1 << i); - - if (comma) - fputc (',', file); - if (i < (int) ARRAY_SIZE (bitnames)) - fputs (bitnames[i], file); - else - fprintf (file, "%d", i); - comma = 1; - } - fputc (')', file); - } -} - -/* Print out one basic block with live information at start and end. */ - -void -dump_bb (bb, outf) - basic_block bb; - FILE *outf; -{ - rtx insn; - rtx last; - edge e; - - fprintf (outf, ";; Basic block %d, loop depth %d, count ", - bb->index, bb->loop_depth); - fprintf (outf, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) bb->count); - putc ('\n', outf); - - fputs (";; Predecessors: ", outf); - for (e = bb->pred; e; e = e->pred_next) - dump_edge_info (outf, e, 0); - putc ('\n', outf); - - fputs (";; Registers live at start:", outf); - dump_regset (bb->global_live_at_start, outf); - putc ('\n', outf); - - for (insn = bb->head, last = NEXT_INSN (bb->end); - insn != last; - insn = NEXT_INSN (insn)) - print_rtl_single (outf, insn); - - fputs (";; Registers live at end:", outf); - dump_regset (bb->global_live_at_end, outf); - putc ('\n', outf); - - fputs (";; Successors: ", outf); - for (e = bb->succ; e; e = e->succ_next) - dump_edge_info (outf, e, 1); - putc ('\n', outf); -} - -void -debug_bb (bb) - basic_block bb; -{ - dump_bb (bb, stderr); -} - -void -debug_bb_n (n) - int n; -{ - dump_bb (BASIC_BLOCK (n), stderr); -} - -/* Like print_rtl, but also print out live information for the start of each - basic block. */ - -void -print_rtl_with_bb (outf, rtx_first) - FILE *outf; - rtx rtx_first; -{ - register rtx tmp_rtx; - - if (rtx_first == 0) - fprintf (outf, "(nil)\n"); - else - { - int i; - enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB }; - int max_uid = get_max_uid (); - basic_block *start = (basic_block *) - xcalloc (max_uid, sizeof (basic_block)); - basic_block *end = (basic_block *) - xcalloc (max_uid, sizeof (basic_block)); - enum bb_state *in_bb_p = (enum bb_state *) - xcalloc (max_uid, sizeof (enum bb_state)); - - for (i = n_basic_blocks - 1; i >= 0; i--) - { - basic_block bb = BASIC_BLOCK (i); - rtx x; - - start[INSN_UID (bb->head)] = bb; - end[INSN_UID (bb->end)] = bb; - for (x = bb->head; x != NULL_RTX; x = NEXT_INSN (x)) - { - enum bb_state state = IN_MULTIPLE_BB; - if (in_bb_p[INSN_UID (x)] == NOT_IN_BB) - state = IN_ONE_BB; - in_bb_p[INSN_UID (x)] = state; - - if (x == bb->end) - break; - } - } - - for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx)) - { - int did_output; - basic_block bb; - - if ((bb = start[INSN_UID (tmp_rtx)]) != NULL) - { - fprintf (outf, ";; Start of basic block %d, registers live:", - bb->index); - dump_regset (bb->global_live_at_start, outf); - putc ('\n', outf); - } - - if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB - && GET_CODE (tmp_rtx) != NOTE - && GET_CODE (tmp_rtx) != BARRIER) - fprintf (outf, ";; Insn is not within a basic block\n"); - else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB) - fprintf (outf, ";; Insn is in multiple basic blocks\n"); - - did_output = print_rtl_single (outf, tmp_rtx); - - if ((bb = end[INSN_UID (tmp_rtx)]) != NULL) - { - fprintf (outf, ";; End of basic block %d, registers live:\n", - bb->index); - dump_regset (bb->global_live_at_end, outf); - putc ('\n', outf); - } - - if (did_output) - putc ('\n', outf); - } - - free (start); - free (end); - free (in_bb_p); - } - - if (current_function_epilogue_delay_list != 0) - { - fprintf (outf, "\n;; Insns in epilogue delay list:\n\n"); - for (tmp_rtx = current_function_epilogue_delay_list; tmp_rtx != 0; - tmp_rtx = XEXP (tmp_rtx, 1)) - print_rtl_single (outf, XEXP (tmp_rtx, 0)); - } -} - /* Dump the rtl into the current debugging dump file, then abort. */ static void @@ -8247,1971 +4086,6 @@ count_or_remove_death_notes (blocks, kill) return count; } - - -/* Update insns block within BB. */ - -void -update_bb_for_insn (bb) - basic_block bb; -{ - rtx insn; - - if (! basic_block_for_insn) - return; - - for (insn = bb->head; ; insn = NEXT_INSN (insn)) - { - set_block_for_insn (insn, bb); - - if (insn == bb->end) - break; - } -} - - -/* Record INSN's block as BB. */ - -void -set_block_for_insn (insn, bb) - rtx insn; - basic_block bb; -{ - size_t uid = INSN_UID (insn); - if (uid >= basic_block_for_insn->num_elements) - { - int new_size; - - /* Add one-eighth the size so we don't keep calling xrealloc. */ - new_size = uid + (uid + 7) / 8; - - VARRAY_GROW (basic_block_for_insn, new_size); - } - VARRAY_BB (basic_block_for_insn, uid) = bb; -} - -/* When a new insn has been inserted into an existing block, it will - sometimes emit more than a single insn. This routine will set the - block number for the specified insn, and look backwards in the insn - chain to see if there are any other uninitialized insns immediately - previous to this one, and set the block number for them too. */ - -void -set_block_for_new_insns (insn, bb) - rtx insn; - basic_block bb; -{ - set_block_for_insn (insn, bb); - - /* Scan the previous instructions setting the block number until we find - an instruction that has the block number set, or we find a note - of any kind. */ - for (insn = PREV_INSN (insn); insn != NULL_RTX; insn = PREV_INSN (insn)) - { - if (GET_CODE (insn) == NOTE) - break; - if ((unsigned) INSN_UID (insn) >= basic_block_for_insn->num_elements - || BLOCK_FOR_INSN (insn) == 0) - set_block_for_insn (insn, bb); - else - break; - } -} - -/* Verify the CFG consistency. This function check some CFG invariants and - aborts when something is wrong. Hope that this function will help to - convert many optimization passes to preserve CFG consistent. - - Currently it does following checks: - - - test head/end pointers - - overlapping of basic blocks - - edge list correctness - - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note) - - tails of basic blocks (ensure that boundary is necesary) - - scans body of the basic block for JUMP_INSN, CODE_LABEL - and NOTE_INSN_BASIC_BLOCK - - check that all insns are in the basic blocks - (except the switch handling code, barriers and notes) - - check that all returns are followed by barriers - - In future it can be extended check a lot of other stuff as well - (reachability of basic blocks, life information, etc. etc.). */ - -void -verify_flow_info () -{ - const int max_uid = get_max_uid (); - const rtx rtx_first = get_insns (); - rtx last_head = get_last_insn (); - basic_block *bb_info, *last_visited; - size_t *edge_checksum; - rtx x; - int i, last_bb_num_seen, num_bb_notes, err = 0; - - bb_info = (basic_block *) xcalloc (max_uid, sizeof (basic_block)); - last_visited = (basic_block *) xcalloc (n_basic_blocks + 2, - sizeof (basic_block)); - edge_checksum = (size_t *) xcalloc (n_basic_blocks + 2, sizeof (size_t)); - - for (i = n_basic_blocks - 1; i >= 0; i--) - { - basic_block bb = BASIC_BLOCK (i); - rtx head = bb->head; - rtx end = bb->end; - - /* Verify the end of the basic block is in the INSN chain. */ - for (x = last_head; x != NULL_RTX; x = PREV_INSN (x)) - if (x == end) - break; - if (!x) - { - error ("End insn %d for block %d not found in the insn stream.", - INSN_UID (end), bb->index); - err = 1; - } - - /* Work backwards from the end to the head of the basic block - to verify the head is in the RTL chain. */ - for (; x != NULL_RTX; x = PREV_INSN (x)) - { - /* While walking over the insn chain, verify insns appear - in only one basic block and initialize the BB_INFO array - used by other passes. */ - if (bb_info[INSN_UID (x)] != NULL) - { - error ("Insn %d is in multiple basic blocks (%d and %d)", - INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index); - err = 1; - } - bb_info[INSN_UID (x)] = bb; - - if (x == head) - break; - } - if (!x) - { - error ("Head insn %d for block %d not found in the insn stream.", - INSN_UID (head), bb->index); - err = 1; - } - - last_head = x; - } - - /* Now check the basic blocks (boundaries etc.) */ - for (i = n_basic_blocks - 1; i >= 0; i--) - { - basic_block bb = BASIC_BLOCK (i); - int has_fallthru = 0; - edge e; - - e = bb->succ; - while (e) - { - if (last_visited [e->dest->index + 2] == bb) - { - error ("verify_flow_info: Duplicate edge %i->%i", - e->src->index, e->dest->index); - err = 1; - } - last_visited [e->dest->index + 2] = bb; - - if (e->flags & EDGE_FALLTHRU) - has_fallthru = 1; - - if ((e->flags & EDGE_FALLTHRU) - && e->src != ENTRY_BLOCK_PTR - && e->dest != EXIT_BLOCK_PTR) - { - rtx insn; - if (e->src->index + 1 != e->dest->index) - { - error ("verify_flow_info: Incorrect blocks for fallthru %i->%i", - e->src->index, e->dest->index); - err = 1; - } - else - for (insn = NEXT_INSN (e->src->end); insn != e->dest->head; - insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == BARRIER || INSN_P (insn)) - { - error ("verify_flow_info: Incorrect fallthru %i->%i", - e->src->index, e->dest->index); - fatal_insn ("Wrong insn in the fallthru edge", insn); - err = 1; - } - } - if (e->src != bb) - { - error ("verify_flow_info: Basic block %d succ edge is corrupted", - bb->index); - fprintf (stderr, "Predecessor: "); - dump_edge_info (stderr, e, 0); - fprintf (stderr, "\nSuccessor: "); - dump_edge_info (stderr, e, 1); - fprintf (stderr, "\n"); - err = 1; - } - edge_checksum[e->dest->index + 2] += (size_t) e; - e = e->succ_next; - } - if (!has_fallthru) - { - rtx insn = bb->end; - - /* Ensure existence of barrier in BB with no fallthru edges. */ - for (insn = bb->end; GET_CODE (insn) != BARRIER; - insn = NEXT_INSN (insn)) - if (!insn - || (GET_CODE (insn) == NOTE - && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)) - { - error ("Missing barrier after block %i", bb->index); - err = 1; - } - } - - e = bb->pred; - while (e) - { - if (e->dest != bb) - { - error ("Basic block %d pred edge is corrupted", bb->index); - fputs ("Predecessor: ", stderr); - dump_edge_info (stderr, e, 0); - fputs ("\nSuccessor: ", stderr); - dump_edge_info (stderr, e, 1); - fputc ('\n', stderr); - err = 1; - } - edge_checksum[e->dest->index + 2] -= (size_t) e; - e = e->pred_next; - } - - /* OK pointers are correct. Now check the header of basic - block. It ought to contain optional CODE_LABEL followed - by NOTE_BASIC_BLOCK. */ - x = bb->head; - if (GET_CODE (x) == CODE_LABEL) - { - if (bb->end == x) - { - error ("NOTE_INSN_BASIC_BLOCK is missing for block %d", - bb->index); - err = 1; - } - x = NEXT_INSN (x); - } - if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb) - { - error ("NOTE_INSN_BASIC_BLOCK is missing for block %d", - bb->index); - err = 1; - } - - if (bb->end == x) - { - /* Do checks for empty blocks here */ - } - else - { - x = NEXT_INSN (x); - while (x) - { - if (NOTE_INSN_BASIC_BLOCK_P (x)) - { - error ("NOTE_INSN_BASIC_BLOCK %d in the middle of basic block %d", - INSN_UID (x), bb->index); - err = 1; - } - - if (x == bb->end) - break; - - if (GET_CODE (x) == JUMP_INSN - || GET_CODE (x) == CODE_LABEL - || GET_CODE (x) == BARRIER) - { - error ("In basic block %d:", bb->index); - fatal_insn ("Flow control insn inside a basic block", x); - } - - x = NEXT_INSN (x); - } - } - } - - /* Complete edge checksumming for ENTRY and EXIT. */ - { - edge e; - for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next) - edge_checksum[e->dest->index + 2] += (size_t) e; - for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) - edge_checksum[e->dest->index + 2] -= (size_t) e; - } - - for (i = -2; i < n_basic_blocks; ++i) - if (edge_checksum[i + 2]) - { - error ("Basic block %i edge lists are corrupted", i); - err = 1; - } - - last_bb_num_seen = -1; - num_bb_notes = 0; - x = rtx_first; - while (x) - { - if (NOTE_INSN_BASIC_BLOCK_P (x)) - { - basic_block bb = NOTE_BASIC_BLOCK (x); - num_bb_notes++; - if (bb->index != last_bb_num_seen + 1) - internal_error ("Basic blocks not numbered consecutively."); - - last_bb_num_seen = bb->index; - } - - if (!bb_info[INSN_UID (x)]) - { - switch (GET_CODE (x)) - { - case BARRIER: - case NOTE: - break; - - case CODE_LABEL: - /* An addr_vec is placed outside any block block. */ - if (NEXT_INSN (x) - && GET_CODE (NEXT_INSN (x)) == JUMP_INSN - && (GET_CODE (PATTERN (NEXT_INSN (x))) == ADDR_DIFF_VEC - || GET_CODE (PATTERN (NEXT_INSN (x))) == ADDR_VEC)) - { - x = NEXT_INSN (x); - } - - /* But in any case, non-deletable labels can appear anywhere. */ - break; - - default: - fatal_insn ("Insn outside basic block", x); - } - } - - if (INSN_P (x) - && GET_CODE (x) == JUMP_INSN - && returnjump_p (x) && ! condjump_p (x) - && ! (NEXT_INSN (x) && GET_CODE (NEXT_INSN (x)) == BARRIER)) - fatal_insn ("Return not followed by barrier", x); - - x = NEXT_INSN (x); - } - - if (num_bb_notes != n_basic_blocks) - internal_error - ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)", - num_bb_notes, n_basic_blocks); - - if (err) - internal_error ("verify_flow_info failed."); - - /* Clean up. */ - free (bb_info); - free (last_visited); - free (edge_checksum); -} - -/* Functions to access an edge list with a vector representation. - Enough data is kept such that given an index number, the - pred and succ that edge represents can be determined, or - given a pred and a succ, its index number can be returned. - This allows algorithms which consume a lot of memory to - represent the normally full matrix of edge (pred,succ) with a - single indexed vector, edge (EDGE_INDEX (pred, succ)), with no - wasted space in the client code due to sparse flow graphs. */ - -/* This functions initializes the edge list. Basically the entire - flowgraph is processed, and all edges are assigned a number, - and the data structure is filled in. */ - -struct edge_list * -create_edge_list () -{ - struct edge_list *elist; - edge e; - int num_edges; - int x; - int block_count; - - block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */ - - num_edges = 0; - - /* Determine the number of edges in the flow graph by counting successor - edges on each basic block. */ - for (x = 0; x < n_basic_blocks; x++) - { - basic_block bb = BASIC_BLOCK (x); - - for (e = bb->succ; e; e = e->succ_next) - num_edges++; - } - /* Don't forget successors of the entry block. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) - num_edges++; - - elist = (struct edge_list *) xmalloc (sizeof (struct edge_list)); - elist->num_blocks = block_count; - elist->num_edges = num_edges; - elist->index_to_edge = (edge *) xmalloc (sizeof (edge) * num_edges); - - num_edges = 0; - - /* Follow successors of the entry block, and register these edges. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) - { - elist->index_to_edge[num_edges] = e; - num_edges++; - } - - for (x = 0; x < n_basic_blocks; x++) - { - basic_block bb = BASIC_BLOCK (x); - - /* Follow all successors of blocks, and register these edges. */ - for (e = bb->succ; e; e = e->succ_next) - { - elist->index_to_edge[num_edges] = e; - num_edges++; - } - } - return elist; -} - -/* This function free's memory associated with an edge list. */ - -void -free_edge_list (elist) - struct edge_list *elist; -{ - if (elist) - { - free (elist->index_to_edge); - free (elist); - } -} - -/* This function provides debug output showing an edge list. */ - -void -print_edge_list (f, elist) - FILE *f; - struct edge_list *elist; -{ - int x; - fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n", - elist->num_blocks - 2, elist->num_edges); - - for (x = 0; x < elist->num_edges; x++) - { - fprintf (f, " %-4d - edge(", x); - if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR) - fprintf (f, "entry,"); - else - fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index); - - if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR) - fprintf (f, "exit)\n"); - else - fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index); - } -} - -/* This function provides an internal consistency check of an edge list, - verifying that all edges are present, and that there are no - extra edges. */ - -void -verify_edge_list (f, elist) - FILE *f; - struct edge_list *elist; -{ - int x, pred, succ, index; - edge e; - - for (x = 0; x < n_basic_blocks; x++) - { - basic_block bb = BASIC_BLOCK (x); - - for (e = bb->succ; e; e = e->succ_next) - { - pred = e->src->index; - succ = e->dest->index; - index = EDGE_INDEX (elist, e->src, e->dest); - if (index == EDGE_INDEX_NO_EDGE) - { - fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ); - continue; - } - if (INDEX_EDGE_PRED_BB (elist, index)->index != pred) - fprintf (f, "*p* Pred for index %d should be %d not %d\n", - index, pred, INDEX_EDGE_PRED_BB (elist, index)->index); - if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ) - fprintf (f, "*p* Succ for index %d should be %d not %d\n", - index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index); - } - } - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) - { - pred = e->src->index; - succ = e->dest->index; - index = EDGE_INDEX (elist, e->src, e->dest); - if (index == EDGE_INDEX_NO_EDGE) - { - fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ); - continue; - } - if (INDEX_EDGE_PRED_BB (elist, index)->index != pred) - fprintf (f, "*p* Pred for index %d should be %d not %d\n", - index, pred, INDEX_EDGE_PRED_BB (elist, index)->index); - if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ) - fprintf (f, "*p* Succ for index %d should be %d not %d\n", - index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index); - } - /* We've verified that all the edges are in the list, no lets make sure - there are no spurious edges in the list. */ - - for (pred = 0; pred < n_basic_blocks; pred++) - for (succ = 0; succ < n_basic_blocks; succ++) - { - basic_block p = BASIC_BLOCK (pred); - basic_block s = BASIC_BLOCK (succ); - - int found_edge = 0; - - for (e = p->succ; e; e = e->succ_next) - if (e->dest == s) - { - found_edge = 1; - break; - } - for (e = s->pred; e; e = e->pred_next) - if (e->src == p) - { - found_edge = 1; - break; - } - if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ)) - == EDGE_INDEX_NO_EDGE && found_edge != 0) - fprintf (f, "*** Edge (%d, %d) appears to not have an index\n", - pred, succ); - if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ)) - != EDGE_INDEX_NO_EDGE && found_edge == 0) - fprintf (f, "*** Edge (%d, %d) has index %d, but there is no edge\n", - pred, succ, EDGE_INDEX (elist, BASIC_BLOCK (pred), - BASIC_BLOCK (succ))); - } - for (succ = 0; succ < n_basic_blocks; succ++) - { - basic_block p = ENTRY_BLOCK_PTR; - basic_block s = BASIC_BLOCK (succ); - - int found_edge = 0; - - for (e = p->succ; e; e = e->succ_next) - if (e->dest == s) - { - found_edge = 1; - break; - } - for (e = s->pred; e; e = e->pred_next) - if (e->src == p) - { - found_edge = 1; - break; - } - if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ)) - == EDGE_INDEX_NO_EDGE && found_edge != 0) - fprintf (f, "*** Edge (entry, %d) appears to not have an index\n", - succ); - if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ)) - != EDGE_INDEX_NO_EDGE && found_edge == 0) - fprintf (f, "*** Edge (entry, %d) has index %d, but no edge exists\n", - succ, EDGE_INDEX (elist, ENTRY_BLOCK_PTR, - BASIC_BLOCK (succ))); - } - for (pred = 0; pred < n_basic_blocks; pred++) - { - basic_block p = BASIC_BLOCK (pred); - basic_block s = EXIT_BLOCK_PTR; - - int found_edge = 0; - - for (e = p->succ; e; e = e->succ_next) - if (e->dest == s) - { - found_edge = 1; - break; - } - for (e = s->pred; e; e = e->pred_next) - if (e->src == p) - { - found_edge = 1; - break; - } - if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR) - == EDGE_INDEX_NO_EDGE && found_edge != 0) - fprintf (f, "*** Edge (%d, exit) appears to not have an index\n", - pred); - if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR) - != EDGE_INDEX_NO_EDGE && found_edge == 0) - fprintf (f, "*** Edge (%d, exit) has index %d, but no edge exists\n", - pred, EDGE_INDEX (elist, BASIC_BLOCK (pred), - EXIT_BLOCK_PTR)); - } -} - -/* This routine will determine what, if any, edge there is between - a specified predecessor and successor. */ - -int -find_edge_index (edge_list, pred, succ) - struct edge_list *edge_list; - basic_block pred, succ; -{ - int x; - for (x = 0; x < NUM_EDGES (edge_list); x++) - { - if (INDEX_EDGE_PRED_BB (edge_list, x) == pred - && INDEX_EDGE_SUCC_BB (edge_list, x) == succ) - return x; - } - return (EDGE_INDEX_NO_EDGE); -} - -/* This function will remove an edge from the flow graph. */ - -void -remove_edge (e) - edge e; -{ - edge last_pred = NULL; - edge last_succ = NULL; - edge tmp; - basic_block src, dest; - src = e->src; - dest = e->dest; - for (tmp = src->succ; tmp && tmp != e; tmp = tmp->succ_next) - last_succ = tmp; - - if (!tmp) - abort (); - if (last_succ) - last_succ->succ_next = e->succ_next; - else - src->succ = e->succ_next; - - for (tmp = dest->pred; tmp && tmp != e; tmp = tmp->pred_next) - last_pred = tmp; - - if (!tmp) - abort (); - if (last_pred) - last_pred->pred_next = e->pred_next; - else - dest->pred = e->pred_next; - - n_edges--; - free (e); -} - -/* This routine will remove any fake successor edges for a basic block. - When the edge is removed, it is also removed from whatever predecessor - list it is in. */ - -static void -remove_fake_successors (bb) - basic_block bb; -{ - edge e; - for (e = bb->succ; e;) - { - edge tmp = e; - e = e->succ_next; - if ((tmp->flags & EDGE_FAKE) == EDGE_FAKE) - remove_edge (tmp); - } -} - -/* This routine will remove all fake edges from the flow graph. If - we remove all fake successors, it will automatically remove all - fake predecessors. */ - -void -remove_fake_edges () -{ - int x; - - for (x = 0; x < n_basic_blocks; x++) - remove_fake_successors (BASIC_BLOCK (x)); - - /* We've handled all successors except the entry block's. */ - remove_fake_successors (ENTRY_BLOCK_PTR); -} - -/* This function will add a fake edge between any block which has no - successors, and the exit block. Some data flow equations require these - edges to exist. */ - -void -add_noreturn_fake_exit_edges () -{ - int x; - - for (x = 0; x < n_basic_blocks; x++) - if (BASIC_BLOCK (x)->succ == NULL) - make_edge (NULL, BASIC_BLOCK (x), EXIT_BLOCK_PTR, EDGE_FAKE); -} - -/* This function adds a fake edge between any infinite loops to the - exit block. Some optimizations require a path from each node to - the exit node. - - See also Morgan, Figure 3.10, pp. 82-83. - - The current implementation is ugly, not attempting to minimize the - number of inserted fake edges. To reduce the number of fake edges - to insert, add fake edges from _innermost_ loops containing only - nodes not reachable from the exit block. */ - -void -connect_infinite_loops_to_exit () -{ - basic_block unvisited_block; - - /* Perform depth-first search in the reverse graph to find nodes - reachable from the exit block. */ - struct depth_first_search_dsS dfs_ds; - - flow_dfs_compute_reverse_init (&dfs_ds); - flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR); - - /* Repeatedly add fake edges, updating the unreachable nodes. */ - while (1) - { - unvisited_block = flow_dfs_compute_reverse_execute (&dfs_ds); - if (!unvisited_block) - break; - make_edge (NULL, unvisited_block, EXIT_BLOCK_PTR, EDGE_FAKE); - flow_dfs_compute_reverse_add_bb (&dfs_ds, unvisited_block); - } - - flow_dfs_compute_reverse_finish (&dfs_ds); - - return; -} - -/* Redirect an edge's successor from one block to another. */ - -void -redirect_edge_succ (e, new_succ) - edge e; - basic_block new_succ; -{ - edge *pe; - - /* Disconnect the edge from the old successor block. */ - for (pe = &e->dest->pred; *pe != e; pe = &(*pe)->pred_next) - continue; - *pe = (*pe)->pred_next; - - /* Reconnect the edge to the new successor block. */ - e->pred_next = new_succ->pred; - new_succ->pred = e; - e->dest = new_succ; -} - -/* Like previous but avoid possible dupplicate edge. */ - -edge -redirect_edge_succ_nodup (e, new_succ) - edge e; - basic_block new_succ; -{ - edge s; - /* Check whether the edge is already present. */ - for (s = e->src->succ; s; s = s->succ_next) - if (s->dest == new_succ && s != e) - break; - if (s) - { - s->flags |= e->flags; - s->probability += e->probability; - s->count += e->count; - remove_edge (e); - e = s; - } - else - redirect_edge_succ (e, new_succ); - return e; -} - -/* Redirect an edge's predecessor from one block to another. */ - -void -redirect_edge_pred (e, new_pred) - edge e; - basic_block new_pred; -{ - edge *pe; - - /* Disconnect the edge from the old predecessor block. */ - for (pe = &e->src->succ; *pe != e; pe = &(*pe)->succ_next) - continue; - *pe = (*pe)->succ_next; - - /* Reconnect the edge to the new predecessor block. */ - e->succ_next = new_pred->succ; - new_pred->succ = e; - e->src = new_pred; -} - -/* Dump the list of basic blocks in the bitmap NODES. */ - -static void -flow_nodes_print (str, nodes, file) - const char *str; - const sbitmap nodes; - FILE *file; -{ - int node; - - if (! nodes) - return; - - fprintf (file, "%s { ", str); - EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, {fprintf (file, "%d ", node);}); - fputs ("}\n", file); -} - - -/* Dump the list of edges in the array EDGE_LIST. */ - -static void -flow_edge_list_print (str, edge_list, num_edges, file) - const char *str; - const edge *edge_list; - int num_edges; - FILE *file; -{ - int i; - - if (! edge_list) - return; - - fprintf (file, "%s { ", str); - for (i = 0; i < num_edges; i++) - fprintf (file, "%d->%d ", edge_list[i]->src->index, - edge_list[i]->dest->index); - fputs ("}\n", file); -} - - -/* Dump loop related CFG information. */ - -static void -flow_loops_cfg_dump (loops, file) - const struct loops *loops; - FILE *file; -{ - int i; - - if (! loops->num || ! file || ! loops->cfg.dom) - return; - - for (i = 0; i < n_basic_blocks; i++) - { - edge succ; - - fprintf (file, ";; %d succs { ", i); - for (succ = BASIC_BLOCK (i)->succ; succ; succ = succ->succ_next) - fprintf (file, "%d ", succ->dest->index); - flow_nodes_print ("} dom", loops->cfg.dom[i], file); - } - - /* Dump the DFS node order. */ - if (loops->cfg.dfs_order) - { - fputs (";; DFS order: ", file); - for (i = 0; i < n_basic_blocks; i++) - fprintf (file, "%d ", loops->cfg.dfs_order[i]); - fputs ("\n", file); - } - /* Dump the reverse completion node order. */ - if (loops->cfg.rc_order) - { - fputs (";; RC order: ", file); - for (i = 0; i < n_basic_blocks; i++) - fprintf (file, "%d ", loops->cfg.rc_order[i]); - fputs ("\n", file); - } -} - -/* Return non-zero if the nodes of LOOP are a subset of OUTER. */ - -static int -flow_loop_nested_p (outer, loop) - struct loop *outer; - struct loop *loop; -{ - return sbitmap_a_subset_b_p (loop->nodes, outer->nodes); -} - - -/* Dump the loop information specified by LOOP to the stream FILE - using auxiliary dump callback function LOOP_DUMP_AUX if non null. */ -void -flow_loop_dump (loop, file, loop_dump_aux, verbose) - const struct loop *loop; - FILE *file; - void (*loop_dump_aux) PARAMS((const struct loop *, FILE *, int)); - int verbose; -{ - if (! loop || ! loop->header) - return; - - if (loop->first->head && loop->last->end) - fprintf (file, ";;\n;; Loop %d (%d to %d):%s%s\n", - loop->num, INSN_UID (loop->first->head), - INSN_UID (loop->last->end), - loop->shared ? " shared" : "", - loop->invalid ? " invalid" : ""); - else - fprintf (file, ";;\n;; Loop %d:%s%s\n", loop->num, - loop->shared ? " shared" : "", - loop->invalid ? " invalid" : ""); - - fprintf (file, ";; header %d, latch %d, pre-header %d, first %d, last %d\n", - loop->header->index, loop->latch->index, - loop->pre_header ? loop->pre_header->index : -1, - loop->first->index, loop->last->index); - fprintf (file, ";; depth %d, level %d, outer %ld\n", - loop->depth, loop->level, - (long) (loop->outer ? loop->outer->num : -1)); - - if (loop->pre_header_edges) - flow_edge_list_print (";; pre-header edges", loop->pre_header_edges, - loop->num_pre_header_edges, file); - flow_edge_list_print (";; entry edges", loop->entry_edges, - loop->num_entries, file); - fprintf (file, ";; %d", loop->num_nodes); - flow_nodes_print (" nodes", loop->nodes, file); - flow_edge_list_print (";; exit edges", loop->exit_edges, - loop->num_exits, file); - if (loop->exits_doms) - flow_nodes_print (";; exit doms", loop->exits_doms, file); - if (loop_dump_aux) - loop_dump_aux (loop, file, verbose); -} - - -/* Dump the loop information specified by LOOPS to the stream FILE, - using auxiliary dump callback function LOOP_DUMP_AUX if non null. */ -void -flow_loops_dump (loops, file, loop_dump_aux, verbose) - const struct loops *loops; - FILE *file; - void (*loop_dump_aux) PARAMS((const struct loop *, FILE *, int)); - int verbose; -{ - int i; - int num_loops; - - num_loops = loops->num; - if (! num_loops || ! file) - return; - - fprintf (file, ";; %d loops found, %d levels\n", - num_loops, loops->levels); - - for (i = 0; i < num_loops; i++) - { - struct loop *loop = &loops->array[i]; - - flow_loop_dump (loop, file, loop_dump_aux, verbose); - - if (loop->shared) - { - int j; - - for (j = 0; j < i; j++) - { - struct loop *oloop = &loops->array[j]; - - if (loop->header == oloop->header) - { - int disjoint; - int smaller; - - smaller = loop->num_nodes < oloop->num_nodes; - - /* If the union of LOOP and OLOOP is different than - the larger of LOOP and OLOOP then LOOP and OLOOP - must be disjoint. */ - disjoint = ! flow_loop_nested_p (smaller ? loop : oloop, - smaller ? oloop : loop); - fprintf (file, - ";; loop header %d shared by loops %d, %d %s\n", - loop->header->index, i, j, - disjoint ? "disjoint" : "nested"); - } - } - } - } - - if (verbose) - flow_loops_cfg_dump (loops, file); -} - - -/* Free all the memory allocated for LOOPS. */ - -void -flow_loops_free (loops) - struct loops *loops; -{ - if (loops->array) - { - int i; - - if (! loops->num) - abort (); - - /* Free the loop descriptors. */ - for (i = 0; i < loops->num; i++) - { - struct loop *loop = &loops->array[i]; - - if (loop->pre_header_edges) - free (loop->pre_header_edges); - if (loop->nodes) - sbitmap_free (loop->nodes); - if (loop->entry_edges) - free (loop->entry_edges); - if (loop->exit_edges) - free (loop->exit_edges); - if (loop->exits_doms) - sbitmap_free (loop->exits_doms); - } - free (loops->array); - loops->array = NULL; - - if (loops->cfg.dom) - sbitmap_vector_free (loops->cfg.dom); - if (loops->cfg.dfs_order) - free (loops->cfg.dfs_order); - - if (loops->shared_headers) - sbitmap_free (loops->shared_headers); - } -} - - -/* Find the entry edges into the loop with header HEADER and nodes - NODES and store in ENTRY_EDGES array. Return the number of entry - edges from the loop. */ - -static int -flow_loop_entry_edges_find (header, nodes, entry_edges) - basic_block header; - const sbitmap nodes; - edge **entry_edges; -{ - edge e; - int num_entries; - - *entry_edges = NULL; - - num_entries = 0; - for (e = header->pred; e; e = e->pred_next) - { - basic_block src = e->src; - - if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->index)) - num_entries++; - } - - if (! num_entries) - abort (); - - *entry_edges = (edge *) xmalloc (num_entries * sizeof (edge *)); - - num_entries = 0; - for (e = header->pred; e; e = e->pred_next) - { - basic_block src = e->src; - - if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->index)) - (*entry_edges)[num_entries++] = e; - } - - return num_entries; -} - - -/* Find the exit edges from the loop using the bitmap of loop nodes - NODES and store in EXIT_EDGES array. Return the number of - exit edges from the loop. */ - -static int -flow_loop_exit_edges_find (nodes, exit_edges) - const sbitmap nodes; - edge **exit_edges; -{ - edge e; - int node; - int num_exits; - - *exit_edges = NULL; - - /* Check all nodes within the loop to see if there are any - successors not in the loop. Note that a node may have multiple - exiting edges ????? A node can have one jumping edge and one fallthru - edge so only one of these can exit the loop. */ - num_exits = 0; - EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, { - for (e = BASIC_BLOCK (node)->succ; e; e = e->succ_next) - { - basic_block dest = e->dest; - - if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->index)) - num_exits++; - } - }); - - if (! num_exits) - return 0; - - *exit_edges = (edge *) xmalloc (num_exits * sizeof (edge *)); - - /* Store all exiting edges into an array. */ - num_exits = 0; - EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, { - for (e = BASIC_BLOCK (node)->succ; e; e = e->succ_next) - { - basic_block dest = e->dest; - - if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->index)) - (*exit_edges)[num_exits++] = e; - } - }); - - return num_exits; -} - - -/* Find the nodes contained within the loop with header HEADER and - latch LATCH and store in NODES. Return the number of nodes within - the loop. */ - -static int -flow_loop_nodes_find (header, latch, nodes) - basic_block header; - basic_block latch; - sbitmap nodes; -{ - basic_block *stack; - int sp; - int num_nodes = 0; - - stack = (basic_block *) xmalloc (n_basic_blocks * sizeof (basic_block)); - sp = 0; - - /* Start with only the loop header in the set of loop nodes. */ - sbitmap_zero (nodes); - SET_BIT (nodes, header->index); - num_nodes++; - header->loop_depth++; - - /* Push the loop latch on to the stack. */ - if (! TEST_BIT (nodes, latch->index)) - { - SET_BIT (nodes, latch->index); - latch->loop_depth++; - num_nodes++; - stack[sp++] = latch; - } - - while (sp) - { - basic_block node; - edge e; - - node = stack[--sp]; - for (e = node->pred; e; e = e->pred_next) - { - basic_block ancestor = e->src; - - /* If each ancestor not marked as part of loop, add to set of - loop nodes and push on to stack. */ - if (ancestor != ENTRY_BLOCK_PTR - && ! TEST_BIT (nodes, ancestor->index)) - { - SET_BIT (nodes, ancestor->index); - ancestor->loop_depth++; - num_nodes++; - stack[sp++] = ancestor; - } - } - } - free (stack); - return num_nodes; -} - -/* Compute reverse top sort order */ -void -flow_reverse_top_sort_order_compute (rts_order) - int *rts_order; -{ - edge *stack; - int sp; - int postnum = 0; - sbitmap visited; - - /* Allocate stack for back-tracking up CFG. */ - stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge)); - sp = 0; - - /* Allocate bitmap to track nodes that have been visited. */ - visited = sbitmap_alloc (n_basic_blocks); - - /* None of the nodes in the CFG have been visited yet. */ - sbitmap_zero (visited); - - /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; - - while (sp) - { - edge e; - basic_block src; - basic_block dest; - - /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; - - /* Check if the edge destination has been visited yet. */ - if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) - { - /* Mark that we have visited the destination. */ - SET_BIT (visited, dest->index); - - if (dest->succ) - { - /* Since the DEST node has been visited for the first - time, check its successors. */ - stack[sp++] = dest->succ; - } - else - rts_order[postnum++] = dest->index; - } - else - { - if (! e->succ_next && src != ENTRY_BLOCK_PTR) - rts_order[postnum++] = src->index; - - if (e->succ_next) - stack[sp - 1] = e->succ_next; - else - sp--; - } - } - - free (stack); - sbitmap_free (visited); -} - -/* Compute the depth first search order and store in the array - DFS_ORDER if non-zero, marking the nodes visited in VISITED. If - RC_ORDER is non-zero, return the reverse completion number for each - node. Returns the number of nodes visited. A depth first search - tries to get as far away from the starting point as quickly as - possible. */ - -int -flow_depth_first_order_compute (dfs_order, rc_order) - int *dfs_order; - int *rc_order; -{ - edge *stack; - int sp; - int dfsnum = 0; - int rcnum = n_basic_blocks - 1; - sbitmap visited; - - /* Allocate stack for back-tracking up CFG. */ - stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge)); - sp = 0; - - /* Allocate bitmap to track nodes that have been visited. */ - visited = sbitmap_alloc (n_basic_blocks); - - /* None of the nodes in the CFG have been visited yet. */ - sbitmap_zero (visited); - - /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; - - while (sp) - { - edge e; - basic_block src; - basic_block dest; - - /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; - - /* Check if the edge destination has been visited yet. */ - if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) - { - /* Mark that we have visited the destination. */ - SET_BIT (visited, dest->index); - - if (dfs_order) - dfs_order[dfsnum++] = dest->index; - - if (dest->succ) - { - /* Since the DEST node has been visited for the first - time, check its successors. */ - stack[sp++] = dest->succ; - } - else - { - /* There are no successors for the DEST node so assign - its reverse completion number. */ - if (rc_order) - rc_order[rcnum--] = dest->index; - } - } - else - { - if (! e->succ_next && src != ENTRY_BLOCK_PTR) - { - /* There are no more successors for the SRC node - so assign its reverse completion number. */ - if (rc_order) - rc_order[rcnum--] = src->index; - } - - if (e->succ_next) - stack[sp - 1] = e->succ_next; - else - sp--; - } - } - - free (stack); - sbitmap_free (visited); - - /* The number of nodes visited should not be greater than - n_basic_blocks. */ - if (dfsnum > n_basic_blocks) - abort (); - - /* There are some nodes left in the CFG that are unreachable. */ - if (dfsnum < n_basic_blocks) - abort (); - return dfsnum; -} - -/* Compute the depth first search order on the _reverse_ graph and - store in the array DFS_ORDER, marking the nodes visited in VISITED. - Returns the number of nodes visited. - - The computation is split into three pieces: - - flow_dfs_compute_reverse_init () creates the necessary data - structures. - - flow_dfs_compute_reverse_add_bb () adds a basic block to the data - structures. The block will start the search. - - flow_dfs_compute_reverse_execute () continues (or starts) the - search using the block on the top of the stack, stopping when the - stack is empty. - - flow_dfs_compute_reverse_finish () destroys the necessary data - structures. - - Thus, the user will probably call ..._init(), call ..._add_bb() to - add a beginning basic block to the stack, call ..._execute(), - possibly add another bb to the stack and again call ..._execute(), - ..., and finally call _finish(). */ - -/* Initialize the data structures used for depth-first search on the - reverse graph. If INITIALIZE_STACK is nonzero, the exit block is - added to the basic block stack. DATA is the current depth-first - search context. If INITIALIZE_STACK is non-zero, there is an - element on the stack. */ - -static void -flow_dfs_compute_reverse_init (data) - depth_first_search_ds data; -{ - /* Allocate stack for back-tracking up CFG. */ - data->stack = - (basic_block *) xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1)) - * sizeof (basic_block)); - data->sp = 0; - - /* Allocate bitmap to track nodes that have been visited. */ - data->visited_blocks = sbitmap_alloc (n_basic_blocks - (INVALID_BLOCK + 1)); - - /* None of the nodes in the CFG have been visited yet. */ - sbitmap_zero (data->visited_blocks); - - return; -} - -/* Add the specified basic block to the top of the dfs data - structures. When the search continues, it will start at the - block. */ - -static void -flow_dfs_compute_reverse_add_bb (data, bb) - depth_first_search_ds data; - basic_block bb; -{ - data->stack[data->sp++] = bb; - return; -} - -/* Continue the depth-first search through the reverse graph starting - with the block at the stack's top and ending when the stack is - empty. Visited nodes are marked. Returns an unvisited basic - block, or NULL if there is none available. */ - -static basic_block -flow_dfs_compute_reverse_execute (data) - depth_first_search_ds data; -{ - basic_block bb; - edge e; - int i; - - while (data->sp > 0) - { - bb = data->stack[--data->sp]; - - /* Mark that we have visited this node. */ - if (!TEST_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1))) - { - SET_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1)); - - /* Perform depth-first search on adjacent vertices. */ - for (e = bb->pred; e; e = e->pred_next) - flow_dfs_compute_reverse_add_bb (data, e->src); - } - } - - /* Determine if there are unvisited basic blocks. */ - for (i = n_basic_blocks - (INVALID_BLOCK + 1); --i >= 0;) - if (!TEST_BIT (data->visited_blocks, i)) - return BASIC_BLOCK (i + (INVALID_BLOCK + 1)); - return NULL; -} - -/* Destroy the data structures needed for depth-first search on the - reverse graph. */ - -static void -flow_dfs_compute_reverse_finish (data) - depth_first_search_ds data; -{ - free (data->stack); - sbitmap_free (data->visited_blocks); - return; -} - - -/* Find the root node of the loop pre-header extended basic block and - the edges along the trace from the root node to the loop header. */ - -static void -flow_loop_pre_header_scan (loop) - struct loop *loop; -{ - int num = 0; - basic_block ebb; - - loop->num_pre_header_edges = 0; - - if (loop->num_entries != 1) - return; - - ebb = loop->entry_edges[0]->src; - - if (ebb != ENTRY_BLOCK_PTR) - { - edge e; - - /* Count number of edges along trace from loop header to - root of pre-header extended basic block. Usually this is - only one or two edges. */ - num++; - while (ebb->pred->src != ENTRY_BLOCK_PTR && ! ebb->pred->pred_next) - { - ebb = ebb->pred->src; - num++; - } - - loop->pre_header_edges = (edge *) xmalloc (num * sizeof (edge *)); - loop->num_pre_header_edges = num; - - /* Store edges in order that they are followed. The source - of the first edge is the root node of the pre-header extended - basic block and the destination of the last last edge is - the loop header. */ - for (e = loop->entry_edges[0]; num; e = e->src->pred) - { - loop->pre_header_edges[--num] = e; - } - } -} - - -/* Return the block for the pre-header of the loop with header - HEADER where DOM specifies the dominator information. Return NULL if - there is no pre-header. */ - -static basic_block -flow_loop_pre_header_find (header, dom) - basic_block header; - const sbitmap *dom; -{ - basic_block pre_header; - edge e; - - /* If block p is a predecessor of the header and is the only block - that the header does not dominate, then it is the pre-header. */ - pre_header = NULL; - for (e = header->pred; e; e = e->pred_next) - { - basic_block node = e->src; - - if (node != ENTRY_BLOCK_PTR - && ! TEST_BIT (dom[node->index], header->index)) - { - if (pre_header == NULL) - pre_header = node; - else - { - /* There are multiple edges into the header from outside - the loop so there is no pre-header block. */ - pre_header = NULL; - break; - } - } - } - return pre_header; -} - -/* Add LOOP to the loop hierarchy tree where PREVLOOP was the loop - previously added. The insertion algorithm assumes that the loops - are added in the order found by a depth first search of the CFG. */ - -static void -flow_loop_tree_node_add (prevloop, loop) - struct loop *prevloop; - struct loop *loop; -{ - - if (flow_loop_nested_p (prevloop, loop)) - { - prevloop->inner = loop; - loop->outer = prevloop; - return; - } - - while (prevloop->outer) - { - if (flow_loop_nested_p (prevloop->outer, loop)) - { - prevloop->next = loop; - loop->outer = prevloop->outer; - return; - } - prevloop = prevloop->outer; - } - - prevloop->next = loop; - loop->outer = NULL; -} - -/* Build the loop hierarchy tree for LOOPS. */ - -static void -flow_loops_tree_build (loops) - struct loops *loops; -{ - int i; - int num_loops; - - num_loops = loops->num; - if (! num_loops) - return; - - /* Root the loop hierarchy tree with the first loop found. - Since we used a depth first search this should be the - outermost loop. */ - loops->tree_root = &loops->array[0]; - loops->tree_root->outer = loops->tree_root->inner = loops->tree_root->next = NULL; - - /* Add the remaining loops to the tree. */ - for (i = 1; i < num_loops; i++) - flow_loop_tree_node_add (&loops->array[i - 1], &loops->array[i]); -} - -/* Helper function to compute loop nesting depth and enclosed loop level - for the natural loop specified by LOOP at the loop depth DEPTH. - Returns the loop level. */ - -static int -flow_loop_level_compute (loop, depth) - struct loop *loop; - int depth; -{ - struct loop *inner; - int level = 1; - - if (! loop) - return 0; - - /* Traverse loop tree assigning depth and computing level as the - maximum level of all the inner loops of this loop. The loop - level is equivalent to the height of the loop in the loop tree - and corresponds to the number of enclosed loop levels (including - itself). */ - for (inner = loop->inner; inner; inner = inner->next) - { - int ilevel; - - ilevel = flow_loop_level_compute (inner, depth + 1) + 1; - - if (ilevel > level) - level = ilevel; - } - loop->level = level; - loop->depth = depth; - return level; -} - -/* Compute the loop nesting depth and enclosed loop level for the loop - hierarchy tree specfied by LOOPS. Return the maximum enclosed loop - level. */ - -static int -flow_loops_level_compute (loops) - struct loops *loops; -{ - struct loop *loop; - int level; - int levels = 0; - - /* Traverse all the outer level loops. */ - for (loop = loops->tree_root; loop; loop = loop->next) - { - level = flow_loop_level_compute (loop, 1); - if (level > levels) - levels = level; - } - return levels; -} - - -/* Scan a single natural loop specified by LOOP collecting information - about it specified by FLAGS. */ - -int -flow_loop_scan (loops, loop, flags) - struct loops *loops; - struct loop *loop; - int flags; -{ - /* Determine prerequisites. */ - if ((flags & LOOP_EXITS_DOMS) && ! loop->exit_edges) - flags |= LOOP_EXIT_EDGES; - - if (flags & LOOP_ENTRY_EDGES) - { - /* Find edges which enter the loop header. - Note that the entry edges should only - enter the header of a natural loop. */ - loop->num_entries - = flow_loop_entry_edges_find (loop->header, - loop->nodes, - &loop->entry_edges); - } - - if (flags & LOOP_EXIT_EDGES) - { - /* Find edges which exit the loop. */ - loop->num_exits - = flow_loop_exit_edges_find (loop->nodes, - &loop->exit_edges); - } - - if (flags & LOOP_EXITS_DOMS) - { - int j; - - /* Determine which loop nodes dominate all the exits - of the loop. */ - loop->exits_doms = sbitmap_alloc (n_basic_blocks); - sbitmap_copy (loop->exits_doms, loop->nodes); - for (j = 0; j < loop->num_exits; j++) - sbitmap_a_and_b (loop->exits_doms, loop->exits_doms, - loops->cfg.dom[loop->exit_edges[j]->src->index]); - - /* The header of a natural loop must dominate - all exits. */ - if (! TEST_BIT (loop->exits_doms, loop->header->index)) - abort (); - } - - if (flags & LOOP_PRE_HEADER) - { - /* Look to see if the loop has a pre-header node. */ - loop->pre_header - = flow_loop_pre_header_find (loop->header, loops->cfg.dom); - - /* Find the blocks within the extended basic block of - the loop pre-header. */ - flow_loop_pre_header_scan (loop); - } - return 1; -} - - -/* Find all the natural loops in the function and save in LOOPS structure - and recalculate loop_depth information in basic block structures. - FLAGS controls which loop information is collected. - Return the number of natural loops found. */ - -int -flow_loops_find (loops, flags) - struct loops *loops; - int flags; -{ - int i; - int b; - int num_loops; - edge e; - sbitmap headers; - sbitmap *dom; - int *dfs_order; - int *rc_order; - - /* This function cannot be repeatedly called with different - flags to build up the loop information. The loop tree - must always be built if this function is called. */ - if (! (flags & LOOP_TREE)) - abort (); - - memset (loops, 0, sizeof (*loops)); - - /* Taking care of this degenerate case makes the rest of - this code simpler. */ - if (n_basic_blocks == 0) - return 0; - - dfs_order = NULL; - rc_order = NULL; - - /* Compute the dominators. */ - dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks); - calculate_dominance_info (NULL, dom, CDI_DOMINATORS); - - /* Count the number of loop edges (back edges). This should be the - same as the number of natural loops. */ - - num_loops = 0; - for (b = 0; b < n_basic_blocks; b++) - { - basic_block header; - - header = BASIC_BLOCK (b); - header->loop_depth = 0; - - for (e = header->pred; e; e = e->pred_next) - { - basic_block latch = e->src; - - /* Look for back edges where a predecessor is dominated - by this block. A natural loop has a single entry - node (header) that dominates all the nodes in the - loop. It also has single back edge to the header - from a latch node. Note that multiple natural loops - may share the same header. */ - if (b != header->index) - abort (); - - if (latch != ENTRY_BLOCK_PTR && TEST_BIT (dom[latch->index], b)) - num_loops++; - } - } - - if (num_loops) - { - /* Compute depth first search order of the CFG so that outer - natural loops will be found before inner natural loops. */ - dfs_order = (int *) xmalloc (n_basic_blocks * sizeof (int)); - rc_order = (int *) xmalloc (n_basic_blocks * sizeof (int)); - flow_depth_first_order_compute (dfs_order, rc_order); - - /* Save CFG derived information to avoid recomputing it. */ - loops->cfg.dom = dom; - loops->cfg.dfs_order = dfs_order; - loops->cfg.rc_order = rc_order; - - /* Allocate loop structures. */ - loops->array - = (struct loop *) xcalloc (num_loops, sizeof (struct loop)); - - headers = sbitmap_alloc (n_basic_blocks); - sbitmap_zero (headers); - - loops->shared_headers = sbitmap_alloc (n_basic_blocks); - sbitmap_zero (loops->shared_headers); - - /* Find and record information about all the natural loops - in the CFG. */ - num_loops = 0; - for (b = 0; b < n_basic_blocks; b++) - { - basic_block header; - - /* Search the nodes of the CFG in reverse completion order - so that we can find outer loops first. */ - header = BASIC_BLOCK (rc_order[b]); - - /* Look for all the possible latch blocks for this header. */ - for (e = header->pred; e; e = e->pred_next) - { - basic_block latch = e->src; - - /* Look for back edges where a predecessor is dominated - by this block. A natural loop has a single entry - node (header) that dominates all the nodes in the - loop. It also has single back edge to the header - from a latch node. Note that multiple natural loops - may share the same header. */ - if (latch != ENTRY_BLOCK_PTR - && TEST_BIT (dom[latch->index], header->index)) - { - struct loop *loop; - - loop = loops->array + num_loops; - - loop->header = header; - loop->latch = latch; - loop->num = num_loops; - - num_loops++; - } - } - } - - for (i = 0; i < num_loops; i++) - { - struct loop *loop = &loops->array[i]; - - /* Keep track of blocks that are loop headers so - that we can tell which loops should be merged. */ - if (TEST_BIT (headers, loop->header->index)) - SET_BIT (loops->shared_headers, loop->header->index); - SET_BIT (headers, loop->header->index); - - /* Find nodes contained within the loop. */ - loop->nodes = sbitmap_alloc (n_basic_blocks); - loop->num_nodes - = flow_loop_nodes_find (loop->header, loop->latch, loop->nodes); - - /* Compute first and last blocks within the loop. - These are often the same as the loop header and - loop latch respectively, but this is not always - the case. */ - loop->first - = BASIC_BLOCK (sbitmap_first_set_bit (loop->nodes)); - loop->last - = BASIC_BLOCK (sbitmap_last_set_bit (loop->nodes)); - - flow_loop_scan (loops, loop, flags); - } - - /* Natural loops with shared headers may either be disjoint or - nested. Disjoint loops with shared headers cannot be inner - loops and should be merged. For now just mark loops that share - headers. */ - for (i = 0; i < num_loops; i++) - if (TEST_BIT (loops->shared_headers, loops->array[i].header->index)) - loops->array[i].shared = 1; - - sbitmap_free (headers); - } - else - { - sbitmap_vector_free (dom); - } - - loops->num = num_loops; - - /* Build the loop hierarchy tree. */ - flow_loops_tree_build (loops); - - /* Assign the loop nesting depth and enclosed loop level for each - loop. */ - loops->levels = flow_loops_level_compute (loops); - - return num_loops; -} - - -/* Update the information regarding the loops in the CFG - specified by LOOPS. */ -int -flow_loops_update (loops, flags) - struct loops *loops; - int flags; -{ - /* One day we may want to update the current loop data. For now - throw away the old stuff and rebuild what we need. */ - if (loops->array) - flow_loops_free (loops); - - return flow_loops_find (loops, flags); -} - - -/* Return non-zero if edge E enters header of LOOP from outside of LOOP. */ - -int -flow_loop_outside_edge_p (loop, e) - const struct loop *loop; - edge e; -{ - if (e->dest != loop->header) - abort (); - return (e->src == ENTRY_BLOCK_PTR) - || ! TEST_BIT (loop->nodes, e->src->index); -} - /* Clear LOG_LINKS fields of insns in a chain. Also clear the global_live_at_{start,end} fields of the basic block structures. */ @@ -10259,144 +4133,3 @@ reg_set_to_hard_reg_set (to, from) SET_HARD_REG_BIT (*to, i); }); } - -/* Called once at intialization time. */ - -void -init_flow () -{ - static int initialized; - - if (!initialized) - { - gcc_obstack_init (&flow_obstack); - flow_firstobj = (char *) obstack_alloc (&flow_obstack, 0); - initialized = 1; - } - else - { - obstack_free (&flow_obstack, flow_firstobj); - flow_firstobj = (char *) obstack_alloc (&flow_obstack, 0); - } -} - -/* Assume that the preceeding pass has possibly eliminated jump instructions - or converted the unconditional jumps. Eliminate the edges from CFG. - Return true if any edges are eliminated. */ - -bool -purge_dead_edges (bb) - basic_block bb; -{ - edge e, next; - rtx insn = bb->end; - bool purged = false; - - if (GET_CODE (insn) == JUMP_INSN && !simplejump_p (insn)) - return false; - if (GET_CODE (insn) == JUMP_INSN) - { - rtx note; - edge b,f; - /* We do care only about conditional jumps and simplejumps. */ - if (!any_condjump_p (insn) - && !returnjump_p (insn) - && !simplejump_p (insn)) - return false; - for (e = bb->succ; e; e = next) - { - next = e->succ_next; - - /* Check purposes we can have edge. */ - if ((e->flags & EDGE_FALLTHRU) - && any_condjump_p (insn)) - continue; - if (e->dest != EXIT_BLOCK_PTR - && e->dest->head == JUMP_LABEL (insn)) - continue; - if (e->dest == EXIT_BLOCK_PTR - && returnjump_p (insn)) - continue; - purged = true; - remove_edge (e); - } - if (!bb->succ || !purged) - return false; - if (rtl_dump_file) - fprintf (rtl_dump_file, "Purged edges from bb %i\n", bb->index); - if (!optimize) - return purged; - - /* Redistribute probabilities. */ - if (!bb->succ->succ_next) - { - bb->succ->probability = REG_BR_PROB_BASE; - bb->succ->count = bb->count; - } - else - { - note = find_reg_note (insn, REG_BR_PROB, NULL); - if (!note) - return purged; - b = BRANCH_EDGE (bb); - f = FALLTHRU_EDGE (bb); - b->probability = INTVAL (XEXP (note, 0)); - f->probability = REG_BR_PROB_BASE - b->probability; - b->count = bb->count * b->probability / REG_BR_PROB_BASE; - f->count = bb->count * f->probability / REG_BR_PROB_BASE; - } - return purged; - } - - /* Cleanup abnormal edges caused by throwing insns that have been - eliminated. */ - if (! can_throw_internal (bb->end)) - for (e = bb->succ; e; e = next) - { - next = e->succ_next; - if (e->flags & EDGE_EH) - { - remove_edge (e); - purged = true; - } - } - - /* If we don't see a jump insn, we don't know exactly why the block would - have been broken at this point. Look for a simple, non-fallthru edge, - as these are only created by conditional branches. If we find such an - edge we know that there used to be a jump here and can then safely - remove all non-fallthru edges. */ - for (e = bb->succ; e && (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)); - e = e->succ_next); - if (!e) - return purged; - for (e = bb->succ; e; e = next) - { - next = e->succ_next; - if (!(e->flags & EDGE_FALLTHRU)) - remove_edge (e), purged = true; - } - if (!bb->succ || bb->succ->succ_next) - abort (); - bb->succ->probability = REG_BR_PROB_BASE; - bb->succ->count = bb->count; - - if (rtl_dump_file) - fprintf (rtl_dump_file, "Purged non-fallthru edges from bb %i\n", - bb->index); - return purged; -} - -/* Search all basic blocks for potentionally dead edges and purge them. - - Return true ifif some edge has been elliminated. - */ - -bool -purge_all_dead_edges () -{ - int i, purged = false; - for (i = 0; i < n_basic_blocks; i++) - purged |= purge_dead_edges (BASIC_BLOCK (i)); - return purged; -} diff --git a/gcc/output.h b/gcc/output.h index 370589ad3ac..2edfb050794 100644 --- a/gcc/output.h +++ b/gcc/output.h @@ -144,7 +144,7 @@ extern void allocate_for_life_analysis PARAMS ((void)); extern int regno_uninitialized PARAMS ((int)); extern int regno_clobbered_at_setjmp PARAMS ((int)); extern void find_basic_blocks PARAMS ((rtx, int, FILE *)); -extern void cleanup_cfg PARAMS ((int)); +extern bool cleanup_cfg PARAMS ((int)); extern void check_function_return_warnings PARAMS ((void)); #endif |