1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
57 // not cover the input (or output), use the input (or output) mask instead.
58 Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) {
59 // If ideal reg doesn't exist we've got a bad schedule happening
60 // that is forcing us to spill something that isn't spillable.
61 // Bail rather than abort
62 int ireg = def->ideal_reg();
63 if( ireg == 0 || ireg == Op_RegFlags ) {
64 assert(false, "attempted to spill a non-spillable item");
65 C->record_method_not_compilable("attempted to spill a non-spillable item");
66 return NULL;
67 }
68 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
69 return NULL;
70 }
71 const RegMask *i_mask = &def->out_RegMask();
72 const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg];
73 const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask;
74 const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask;
75 const RegMask *w_o_mask;
76
77 if( w_mask->overlap( *o_mask ) && // Overlap AND
78 ((ireg != Op_RegL && ireg != Op_RegD // Single use or aligned
79 #ifdef _LP64
80 && ireg != Op_RegP
81 #endif
82 ) || o_mask->is_aligned_Pairs()) ) {
83 // Don't come here for mis-aligned doubles
84 w_o_mask = w_mask;
85 } else { // wide ideal mask does not overlap with o_mask
86 // Mis-aligned doubles come here and XMM->FPR moves on x86.
87 w_o_mask = o_mask; // Must target desired registers
88 // Does the ideal-reg-mask overlap with o_mask? I.e., can I use
89 // a reg-reg move or do I need a trip across register classes
90 // (and thus through memory)?
91 if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() )
92 // Here we assume a trip through memory is required.
93 w_i_mask = &C->FIRST_STACK_mask();
94 }
95 return new (C) MachSpillCopyNode( def, *w_i_mask, *w_o_mask );
96 }
97
98 //------------------------------insert_proj------------------------------------
99 // Insert the spill at chosen location. Skip over any intervening Proj's or
100 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block
101 // instead. Update high-pressure indices. Create a new live range.
102 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
383 uint i = insidx+1;
384 if( clone_projs( b, i, def, spill, maxlrg ) ) {
385 // Adjust the point where we go hi-pressure
386 if( i <= b->_ihrp_index ) b->_ihrp_index++;
387 if( i <= b->_fhrp_index ) b->_fhrp_index++;
388 }
389
390 return spill;
391 }
392
393 //------------------------------is_high_pressure-------------------------------
394 // Function to compute whether or not this live range is "high pressure"
395 // in this block - whether it spills eagerly or not.
396 bool PhaseChaitin::is_high_pressure( Block *b, LRG *lrg, uint insidx ) {
397 if( lrg->_was_spilled1 ) return true;
398 // Forced spilling due to conflict? Then split only at binding uses
399 // or defs, not for supposed capacity problems.
400 // CNC - Turned off 7/8/99, causes too much spilling
401 // if( lrg->_is_bound ) return false;
402
403 // Not yet reached the high-pressure cutoff point, so low pressure
404 uint hrp_idx = lrg->_is_float ? b->_fhrp_index : b->_ihrp_index;
405 if( insidx < hrp_idx ) return false;
406 // Register pressure for the block as a whole depends on reg class
407 int block_pres = lrg->_is_float ? b->_freg_pressure : b->_reg_pressure;
408 // Bound live ranges will split at the binding points first;
409 // Intermediate splits should assume the live range's register set
410 // got "freed up" and that num_regs will become INT_PRESSURE.
411 int bound_pres = lrg->_is_float ? FLOATPRESSURE : INTPRESSURE;
412 // Effective register pressure limit.
413 int lrg_pres = (lrg->get_invalid_mask_size() > lrg->num_regs())
414 ? (lrg->get_invalid_mask_size() >> (lrg->num_regs()-1)) : bound_pres;
415 // High pressure if block pressure requires more register freedom
416 // than live range has.
417 return block_pres >= lrg_pres;
418 }
419
420
421 //------------------------------prompt_use---------------------------------
422 // True if lidx is used before any real register is def'd in the block
423 bool PhaseChaitin::prompt_use( Block *b, uint lidx ) {
424 if( lrgs(lidx)._was_spilled2 ) return false;
425
426 // Scan block for 1st use.
427 for( uint i = 1; i <= b->end_idx(); i++ ) {
428 Node *n = b->_nodes[i];
429 // Ignore PHI use, these can be up or down
430 if( n->is_Phi() ) continue;
431 for( uint j = 1; j < n->req(); j++ )
777 Reachblock[slidx] = debug_defs[slidx];
778 }
779 else {
780 // Insert point is just past last use or def in the block
781 int insert_point = insidx-1;
782 while( insert_point > 0 ) {
783 Node *n = b->_nodes[insert_point];
784 // Hit top of block? Quit going backwards
785 if( n->is_Phi() ) break;
786 // Found a def? Better split after it.
787 if( n2lidx(n) == lidx ) break;
788 // Look for a use
789 uint i;
790 for( i = 1; i < n->req(); i++ )
791 if( n2lidx(n->in(i)) == lidx )
792 break;
793 // Found a use? Better split after it.
794 if( i < n->req() ) break;
795 insert_point--;
796 }
797 maxlrg = split_DEF( n1, b, insert_point, maxlrg, Reachblock, debug_defs, splits, slidx);
798 // If it wasn't split bail
799 if (!maxlrg) {
800 return 0;
801 }
802 insidx++;
803 }
804 // This is a new DEF, so update UP
805 UPblock[slidx] = false;
806 #ifndef PRODUCT
807 // DEBUG
808 if( trace_spilling() ) {
809 tty->print("\nNew Split DOWN DEF of Spill Idx ");
810 tty->print("%d, UP %d:\n",slidx,false);
811 n1->dump();
812 }
813 #endif
814 }
815 } // end if LRG is UP
816 } // end for all spilling live ranges
817 assert( b->_nodes[insidx] == n, "got insidx set incorrectly" );
818 } // end if crossing HRP Boundry
819
820 // If the LRG index is oob, then this is a new spillcopy, skip it.
821 if( defidx >= _maxlrg ) {
943 if( jvms && b->_freq > BLOCK_FREQUENCY(0.5) ) {
944 uint debug_start = jvms->debug_start();
945 // If this is debug info use & there is a reaching DOWN def
946 if ((debug_start <= inpidx) && (debug_defs[slidx] != NULL)) {
947 assert(inpidx < oopoff, "handle only debug info here");
948 // Just hook it in & move on
949 n->set_req(inpidx, debug_defs[slidx]);
950 // (Note that this can make two sides of a split live at the
951 // same time: The debug def on stack, and another def in a
952 // register. The GC needs to know about both of them, but any
953 // derived pointers after oopoff will refer to only one of the
954 // two defs and the GC would therefore miss the other. Thus
955 // this hack is only allowed for debug info which is Java state
956 // and therefore never a derived pointer.)
957 continue;
958 }
959 }
960 // Grab register mask info
961 const RegMask &dmask = def->out_RegMask();
962 const RegMask &umask = n->in_RegMask(inpidx);
963
964 assert(inpidx < oopoff, "cannot use-split oop map info");
965
966 bool dup = UPblock[slidx];
967 bool uup = umask.is_UP();
968
969 // Need special logic to handle bound USES. Insert a split at this
970 // bound use if we can't rematerialize the def, or if we need the
971 // split to form a misaligned pair.
972 if( !umask.is_AllStack() &&
973 (int)umask.Size() <= lrgs(useidx).num_regs() &&
974 (!def->rematerialize() ||
975 umask.is_misaligned_Pair())) {
976 // These need a Split regardless of overlap or pressure
977 // SPLIT - NO DEF - NO CISC SPILL
978 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
979 // If it wasn't split bail
980 if (!maxlrg) {
981 return 0;
982 }
983 insidx++; // Reset iterator to skip USE side split
984 continue;
985 }
986
987 if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) {
988 // The use at the call can force the def down so insert
989 // a split before the use to allow the def more freedom.
990 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
991 // If it wasn't split bail
992 if (!maxlrg) {
993 return 0;
994 }
995 insidx++; // Reset iterator to skip USE side split
1106 } // End if Spill USE
1107 } // End For All Inputs
1108 } // End If not nullcheck
1109
1110 // ********** Handle DEFS **********
1111 // DEFS either Split DOWN in HRP regions or when the LRG is bound, or
1112 // just reset the Reaches info in LRP regions. DEFS must always update
1113 // UP info.
1114 if( deflrg.reg() >= LRG::SPILL_REG ) { // Spilled?
1115 uint slidx = lrg2reach[defidx];
1116 // Add to defs list for later assignment of new live range number
1117 defs->push(n);
1118 // Set a flag on the Node indicating it has already spilled.
1119 // Only do it for capacity spills not conflict spills.
1120 if( !deflrg._direct_conflict )
1121 set_was_spilled(n);
1122 assert(!n->is_Phi(),"Cannot insert Phi into DEFS list");
1123 // Grab UP info for DEF
1124 const RegMask &dmask = n->out_RegMask();
1125 bool defup = dmask.is_UP();
1126 // Only split at Def if this is a HRP block or bound (and spilled once)
1127 if( !n->rematerialize() &&
1128 (((dmask.is_bound1() || dmask.is_bound2() || dmask.is_misaligned_Pair()) &&
1129 (deflrg._direct_conflict || deflrg._must_spill)) ||
1130 // Check for LRG being up in a register and we are inside a high
1131 // pressure area. Spill it down immediately.
1132 (defup && is_high_pressure(b,&deflrg,insidx))) ) {
1133 assert( !n->rematerialize(), "" );
1134 assert( !n->is_SpillCopy(), "" );
1135 // Do a split at the def site.
1136 maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx );
1137 // If it wasn't split bail
1138 if (!maxlrg) {
1139 return 0;
1140 }
1141 // Split DEF's Down
1142 UPblock[slidx] = 0;
1143 #ifndef PRODUCT
1144 // DEBUG
1145 if( trace_spilling() ) {
1146 tty->print("\nNew Split DOWN DEF of Spill Idx ");
1147 tty->print("%d, UP %d:\n",slidx,false);
1148 n->dump();
|
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
57 // not cover the input (or output), use the input (or output) mask instead.
58 Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) {
59 // If ideal reg doesn't exist we've got a bad schedule happening
60 // that is forcing us to spill something that isn't spillable.
61 // Bail rather than abort
62 int ireg = def->ideal_reg();
63 if( ireg == 0 || ireg == Op_RegFlags ) {
64 assert(false, "attempted to spill a non-spillable item");
65 C->record_method_not_compilable("attempted to spill a non-spillable item");
66 return NULL;
67 }
68 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
69 return NULL;
70 }
71 const RegMask *i_mask = &def->out_RegMask();
72 const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg];
73 const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask;
74 const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask;
75 const RegMask *w_o_mask;
76
77 int num_regs = RegMask::num_registers(ireg);
78 bool is_vect = RegMask::is_vector(ireg);
79 if( w_mask->overlap( *o_mask ) && // Overlap AND
80 ((num_regs == 1) // Single use or aligned
81 || is_vect // or vector
82 || !is_vect && o_mask->is_aligned_pairs()) ) {
83 assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned");
84 // Don't come here for mis-aligned doubles
85 w_o_mask = w_mask;
86 } else { // wide ideal mask does not overlap with o_mask
87 // Mis-aligned doubles come here and XMM->FPR moves on x86.
88 w_o_mask = o_mask; // Must target desired registers
89 // Does the ideal-reg-mask overlap with o_mask? I.e., can I use
90 // a reg-reg move or do I need a trip across register classes
91 // (and thus through memory)?
92 if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() )
93 // Here we assume a trip through memory is required.
94 w_i_mask = &C->FIRST_STACK_mask();
95 }
96 return new (C) MachSpillCopyNode( def, *w_i_mask, *w_o_mask );
97 }
98
99 //------------------------------insert_proj------------------------------------
100 // Insert the spill at chosen location. Skip over any intervening Proj's or
101 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block
102 // instead. Update high-pressure indices. Create a new live range.
103 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
384 uint i = insidx+1;
385 if( clone_projs( b, i, def, spill, maxlrg ) ) {
386 // Adjust the point where we go hi-pressure
387 if( i <= b->_ihrp_index ) b->_ihrp_index++;
388 if( i <= b->_fhrp_index ) b->_fhrp_index++;
389 }
390
391 return spill;
392 }
393
394 //------------------------------is_high_pressure-------------------------------
395 // Function to compute whether or not this live range is "high pressure"
396 // in this block - whether it spills eagerly or not.
397 bool PhaseChaitin::is_high_pressure( Block *b, LRG *lrg, uint insidx ) {
398 if( lrg->_was_spilled1 ) return true;
399 // Forced spilling due to conflict? Then split only at binding uses
400 // or defs, not for supposed capacity problems.
401 // CNC - Turned off 7/8/99, causes too much spilling
402 // if( lrg->_is_bound ) return false;
403
404 // Use float pressure numbers for vectors.
405 bool is_float_or_vector = lrg->_is_float || lrg->_is_vector;
406 // Not yet reached the high-pressure cutoff point, so low pressure
407 uint hrp_idx = is_float_or_vector ? b->_fhrp_index : b->_ihrp_index;
408 if( insidx < hrp_idx ) return false;
409 // Register pressure for the block as a whole depends on reg class
410 int block_pres = is_float_or_vector ? b->_freg_pressure : b->_reg_pressure;
411 // Bound live ranges will split at the binding points first;
412 // Intermediate splits should assume the live range's register set
413 // got "freed up" and that num_regs will become INT_PRESSURE.
414 int bound_pres = is_float_or_vector ? FLOATPRESSURE : INTPRESSURE;
415 // Effective register pressure limit.
416 int lrg_pres = (lrg->get_invalid_mask_size() > lrg->num_regs())
417 ? (lrg->get_invalid_mask_size() >> (lrg->num_regs()-1)) : bound_pres;
418 // High pressure if block pressure requires more register freedom
419 // than live range has.
420 return block_pres >= lrg_pres;
421 }
422
423
424 //------------------------------prompt_use---------------------------------
425 // True if lidx is used before any real register is def'd in the block
426 bool PhaseChaitin::prompt_use( Block *b, uint lidx ) {
427 if( lrgs(lidx)._was_spilled2 ) return false;
428
429 // Scan block for 1st use.
430 for( uint i = 1; i <= b->end_idx(); i++ ) {
431 Node *n = b->_nodes[i];
432 // Ignore PHI use, these can be up or down
433 if( n->is_Phi() ) continue;
434 for( uint j = 1; j < n->req(); j++ )
780 Reachblock[slidx] = debug_defs[slidx];
781 }
782 else {
783 // Insert point is just past last use or def in the block
784 int insert_point = insidx-1;
785 while( insert_point > 0 ) {
786 Node *n = b->_nodes[insert_point];
787 // Hit top of block? Quit going backwards
788 if( n->is_Phi() ) break;
789 // Found a def? Better split after it.
790 if( n2lidx(n) == lidx ) break;
791 // Look for a use
792 uint i;
793 for( i = 1; i < n->req(); i++ )
794 if( n2lidx(n->in(i)) == lidx )
795 break;
796 // Found a use? Better split after it.
797 if( i < n->req() ) break;
798 insert_point--;
799 }
800 uint orig_eidx = b->end_idx();
801 maxlrg = split_DEF( n1, b, insert_point, maxlrg, Reachblock, debug_defs, splits, slidx);
802 // If it wasn't split bail
803 if (!maxlrg) {
804 return 0;
805 }
806 // Spill of NULL check mem op goes into the following block.
807 if (b->end_idx() > orig_eidx)
808 insidx++;
809 }
810 // This is a new DEF, so update UP
811 UPblock[slidx] = false;
812 #ifndef PRODUCT
813 // DEBUG
814 if( trace_spilling() ) {
815 tty->print("\nNew Split DOWN DEF of Spill Idx ");
816 tty->print("%d, UP %d:\n",slidx,false);
817 n1->dump();
818 }
819 #endif
820 }
821 } // end if LRG is UP
822 } // end for all spilling live ranges
823 assert( b->_nodes[insidx] == n, "got insidx set incorrectly" );
824 } // end if crossing HRP Boundry
825
826 // If the LRG index is oob, then this is a new spillcopy, skip it.
827 if( defidx >= _maxlrg ) {
949 if( jvms && b->_freq > BLOCK_FREQUENCY(0.5) ) {
950 uint debug_start = jvms->debug_start();
951 // If this is debug info use & there is a reaching DOWN def
952 if ((debug_start <= inpidx) && (debug_defs[slidx] != NULL)) {
953 assert(inpidx < oopoff, "handle only debug info here");
954 // Just hook it in & move on
955 n->set_req(inpidx, debug_defs[slidx]);
956 // (Note that this can make two sides of a split live at the
957 // same time: The debug def on stack, and another def in a
958 // register. The GC needs to know about both of them, but any
959 // derived pointers after oopoff will refer to only one of the
960 // two defs and the GC would therefore miss the other. Thus
961 // this hack is only allowed for debug info which is Java state
962 // and therefore never a derived pointer.)
963 continue;
964 }
965 }
966 // Grab register mask info
967 const RegMask &dmask = def->out_RegMask();
968 const RegMask &umask = n->in_RegMask(inpidx);
969 bool is_vect = RegMask::is_vector(def->ideal_reg());
970 assert(inpidx < oopoff, "cannot use-split oop map info");
971
972 bool dup = UPblock[slidx];
973 bool uup = umask.is_UP();
974
975 // Need special logic to handle bound USES. Insert a split at this
976 // bound use if we can't rematerialize the def, or if we need the
977 // split to form a misaligned pair.
978 if( !umask.is_AllStack() &&
979 (int)umask.Size() <= lrgs(useidx).num_regs() &&
980 (!def->rematerialize() ||
981 !is_vect && umask.is_misaligned_pair())) {
982 // These need a Split regardless of overlap or pressure
983 // SPLIT - NO DEF - NO CISC SPILL
984 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
985 // If it wasn't split bail
986 if (!maxlrg) {
987 return 0;
988 }
989 insidx++; // Reset iterator to skip USE side split
990 continue;
991 }
992
993 if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) {
994 // The use at the call can force the def down so insert
995 // a split before the use to allow the def more freedom.
996 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
997 // If it wasn't split bail
998 if (!maxlrg) {
999 return 0;
1000 }
1001 insidx++; // Reset iterator to skip USE side split
1112 } // End if Spill USE
1113 } // End For All Inputs
1114 } // End If not nullcheck
1115
1116 // ********** Handle DEFS **********
1117 // DEFS either Split DOWN in HRP regions or when the LRG is bound, or
1118 // just reset the Reaches info in LRP regions. DEFS must always update
1119 // UP info.
1120 if( deflrg.reg() >= LRG::SPILL_REG ) { // Spilled?
1121 uint slidx = lrg2reach[defidx];
1122 // Add to defs list for later assignment of new live range number
1123 defs->push(n);
1124 // Set a flag on the Node indicating it has already spilled.
1125 // Only do it for capacity spills not conflict spills.
1126 if( !deflrg._direct_conflict )
1127 set_was_spilled(n);
1128 assert(!n->is_Phi(),"Cannot insert Phi into DEFS list");
1129 // Grab UP info for DEF
1130 const RegMask &dmask = n->out_RegMask();
1131 bool defup = dmask.is_UP();
1132 int ireg = n->ideal_reg();
1133 bool is_vect = RegMask::is_vector(ireg);
1134 // Only split at Def if this is a HRP block or bound (and spilled once)
1135 if( !n->rematerialize() &&
1136 (((dmask.is_bound(ireg) || !is_vect && dmask.is_misaligned_pair()) &&
1137 (deflrg._direct_conflict || deflrg._must_spill)) ||
1138 // Check for LRG being up in a register and we are inside a high
1139 // pressure area. Spill it down immediately.
1140 (defup && is_high_pressure(b,&deflrg,insidx))) ) {
1141 assert( !n->rematerialize(), "" );
1142 assert( !n->is_SpillCopy(), "" );
1143 // Do a split at the def site.
1144 maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx );
1145 // If it wasn't split bail
1146 if (!maxlrg) {
1147 return 0;
1148 }
1149 // Split DEF's Down
1150 UPblock[slidx] = 0;
1151 #ifndef PRODUCT
1152 // DEBUG
1153 if( trace_spilling() ) {
1154 tty->print("\nNew Split DOWN DEF of Spill Idx ");
1155 tty->print("%d, UP %d:\n",slidx,false);
1156 n->dump();
|