973 return region;
974 }
975
976 // Clone this load to each catch block
977 static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) {
978 bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
979 phase->igvn().set_delay_transform(true);
980
981 // Verify pre conditions
982 assert(ctrl->isa_Proj() && ctrl->in(0)->isa_Call(), "Must be a call proj");
983 assert(ctrl->raw_out(0)->isa_Catch(), "Must be a catch");
984
985 if (ctrl->raw_out(0)->isa_Catch()->outcnt() == 1) {
986 if (trace) tty->print_cr("Cleaning up catch: Skipping load %i, call with single catch", load->_idx);
987 return;
988 }
989
990 // Process the loads successor nodes - if any is between
991 // the call and the catch blocks, they need to be cloned to.
992 // This is done recursively
993 int outcnt = load->outcnt();
994 uint index = 0;
995 for (int i = 0; i < outcnt; i++) {
996 if (index < load->outcnt()) {
997 Node *n = load->raw_out(index);
998 assert(!n->is_LoadBarrier(), "Sanity");
999 if (!fixup_uses_in_catch(phase, ctrl, n)) {
1000 // if no successor was cloned, progress to next out.
1001 index++;
1002 }
1003 }
1004 }
1005
1006 // Now all the loads uses has been cloned down
1007 // Only thing left is to clone the loads, but they must end up
1008 // first in the catch blocks.
1009
1010 // We clone the loads oo the catch blocks only when needed.
1011 // An array is used to map the catch blocks to each lazily cloned load.
1012 // In that way no extra unnecessary loads are cloned.
1013
1014 // Any use dominated by original block must have an phi and a region added
1015
1016 Node* catch_node = ctrl->raw_out(0);
1017 int number_of_catch_projs = catch_node->outcnt();
1018 Node** proj_to_load_mapping = NEW_RESOURCE_ARRAY(Node*, number_of_catch_projs);
1019 Copy::zero_to_bytes(proj_to_load_mapping, sizeof(Node*) * number_of_catch_projs);
1020
1021 // The phi_map is used to keep track of where phis have already been inserted
1022 int phi_map_len = phase->C->unique();
|
973 return region;
974 }
975
976 // Clone this load to each catch block
977 static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) {
978 bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
979 phase->igvn().set_delay_transform(true);
980
981 // Verify pre conditions
982 assert(ctrl->isa_Proj() && ctrl->in(0)->isa_Call(), "Must be a call proj");
983 assert(ctrl->raw_out(0)->isa_Catch(), "Must be a catch");
984
985 if (ctrl->raw_out(0)->isa_Catch()->outcnt() == 1) {
986 if (trace) tty->print_cr("Cleaning up catch: Skipping load %i, call with single catch", load->_idx);
987 return;
988 }
989
990 // Process the loads successor nodes - if any is between
991 // the call and the catch blocks, they need to be cloned to.
992 // This is done recursively
993 for (uint i = 0; i < load->outcnt();) {
994 Node *n = load->raw_out(i);
995 assert(!n->is_LoadBarrier(), "Sanity");
996 if (!fixup_uses_in_catch(phase, ctrl, n)) {
997 // if no successor was cloned, progress to next out.
998 i++;
999 }
1000 }
1001
1002 // Now all the loads uses has been cloned down
1003 // Only thing left is to clone the loads, but they must end up
1004 // first in the catch blocks.
1005
1006 // We clone the loads oo the catch blocks only when needed.
1007 // An array is used to map the catch blocks to each lazily cloned load.
1008 // In that way no extra unnecessary loads are cloned.
1009
1010 // Any use dominated by original block must have an phi and a region added
1011
1012 Node* catch_node = ctrl->raw_out(0);
1013 int number_of_catch_projs = catch_node->outcnt();
1014 Node** proj_to_load_mapping = NEW_RESOURCE_ARRAY(Node*, number_of_catch_projs);
1015 Copy::zero_to_bytes(proj_to_load_mapping, sizeof(Node*) * number_of_catch_projs);
1016
1017 // The phi_map is used to keep track of where phis have already been inserted
1018 int phi_map_len = phase->C->unique();
|