< prev index next >

src/share/vm/opto/ifg.cpp

Print this page




 347 
 348       // Make all inputs live
 349       if (!n->is_Phi()) {      // Phi function uses come from prior block
 350         for(uint k = 1; k < n->req(); k++) {
 351           liveout->insert(_lrg_map.live_range_id(n->in(k)));
 352         }
 353       }
 354 
 355       // 2-address instructions always have the defined value live
 356       // on entry to the instruction, even though it is being defined
 357       // by the instruction.  We pretend a virtual copy sits just prior
 358       // to the instruction and kills the src-def'd register.
 359       // In other words, for 2-address instructions the defined value
 360       // interferes with all inputs.
 361       uint idx;
 362       if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) {
 363         const MachNode *mach = n->as_Mach();
 364         // Sometimes my 2-address ADDs are commuted in a bad way.
 365         // We generally want the USE-DEF register to refer to the
 366         // loop-varying quantity, to avoid a copy.
 367         uint op = mach->ideal_Opcode();
 368         // Check that mach->num_opnds() == 3 to ensure instruction is
 369         // not subsuming constants, effectively excludes addI_cin_imm
 370         // Can NOT swap for instructions like addI_cin_imm since it
 371         // is adding zero to yhi + carry and the second ideal-input
 372         // points to the result of adding low-halves.
 373         // Checking req() and num_opnds() does NOT distinguish addI_cout from addI_cout_imm
 374         if( (op == Op_AddI && mach->req() == 3 && mach->num_opnds() == 3) &&
 375             n->in(1)->bottom_type()->base() == Type::Int &&
 376             // See if the ADD is involved in a tight data loop the wrong way
 377             n->in(2)->is_Phi() &&
 378             n->in(2)->in(2) == n ) {
 379           Node *tmp = n->in(1);
 380           n->set_req( 1, n->in(2) );
 381           n->set_req( 2, tmp );
 382         }
 383         // Defined value interferes with all inputs
 384         uint lidx = _lrg_map.live_range_id(n->in(idx));
 385         for (uint k = 1; k < n->req(); k++) {
 386           uint kidx = _lrg_map.live_range_id(n->in(k));
 387           if (kidx != lidx) {
 388             _ifg->add_edge(r, kidx);
 389           }
 390         }
 391       }
 392     } // End of forall instructions in block
 393   } // End of forall blocks
 394 }
 395 
 396 #ifdef ASSERT
 397 uint PhaseChaitin::count_int_pressure(IndexSet* liveout) {
 398   IndexSetIterator elements(liveout);
 399   uint lidx = elements.next();
 400   uint cnt = 0;
 401   while (lidx != 0) {
 402     LRG& lrg = lrgs(lidx);
 403     if (lrg.mask_is_nonempty_and_up() &&
 404         !lrg.is_float_or_vector() &&
 405         lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) {
 406       cnt += lrg.reg_pressure();
 407     }
 408     lidx = elements.next();
 409   }
 410   return cnt;
 411 }
 412 
 413 uint PhaseChaitin::count_float_pressure(IndexSet* liveout) {
 414   IndexSetIterator elements(liveout);
 415   uint lidx = elements.next();
 416   uint cnt = 0;
 417   while (lidx != 0) {
 418     LRG& lrg = lrgs(lidx);
 419     if (lrg.mask_is_nonempty_and_up() && lrg.is_float_or_vector()) {
 420       cnt += lrg.reg_pressure();
 421     }
 422     lidx = elements.next();
 423   }
 424   return cnt;
 425 }
 426 #endif
 427 
 428 /*
 429  * Adjust register pressure down by 1.  Capture last hi-to-low transition,
 430  */
 431 void PhaseChaitin::lower_pressure(Block* b, uint location, LRG& lrg, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure) {
 432   if (lrg.mask_is_nonempty_and_up()) {
 433     if (lrg.is_float_or_vector()) {
 434       float_pressure.lower(lrg, location);
 435     } else {
 436       // Do not count the SP and flag registers
 437       const RegMask& r = lrg.mask();
 438       if (r.overlap(*Matcher::idealreg2regmask[Op_RegI])) {
 439         int_pressure.lower(lrg, location);
 440       }
 441     }
 442   }
 443   if (_scheduling_info_generated == false) {
 444     assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
 445     assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
 446   }
 447 }
 448 
 449 /* Go to the first non-phi index in a block */
 450 static uint first_nonphi_index(Block* b) {
 451   uint i;
 452   uint end_idx = b->end_idx();
 453   for (i = 1; i < end_idx; i++) {
 454     Node* n = b->get_node(i);
 455     if (!n->is_Phi()) {
 456       break;
 457     }
 458   }
 459   return i;
 460 }
 461 
 462 /*
 463  * Spills could be inserted before a CreateEx node which should be the first
 464  * instruction in a block after Phi nodes. If so, move the CreateEx node up.
 465  */
 466 static void move_exception_node_up(Block* b, uint first_inst, uint last_inst) {
 467   for (uint i = first_inst; i < last_inst; i++) {
 468     Node* ex = b->get_node(i);
 469     if (ex->is_SpillCopy()) {
 470       continue;
 471     }
 472 
 473     if (i > first_inst &&
 474         ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
 475       b->remove_node(i);
 476       b->insert_node(ex, first_inst);
 477     }
 478     // Stop once a CreateEx or any other node is found
 479     break;
 480   }
 481 }
 482 
 483 /*
 484  * When new live ranges are live, we raise the register pressure
 485  */
 486 void PhaseChaitin::raise_pressure(Block* b, LRG& lrg, Pressure& int_pressure, Pressure& float_pressure) {
 487   if (lrg.mask_is_nonempty_and_up()) {
 488     if (lrg.is_float_or_vector()) {
 489       float_pressure.raise(lrg);
 490     } else {
 491       // Do not count the SP and flag registers
 492       const RegMask& rm = lrg.mask();
 493       if (rm.overlap(*Matcher::idealreg2regmask[Op_RegI])) {
 494         int_pressure.raise(lrg);
 495       }
 496     }
 497   }
 498 }
 499 
 500 
 501 /*
 502  * Computes the initial register pressure of a block, looking at all live
 503  * ranges in the liveout. The register pressure is computed for both float
 504  * and int/pointer registers.
 505  * Live ranges in the liveout are presumed live for the whole block.
 506  * We add the cost for the whole block to the area of the live ranges initially.
 507  * If a live range gets killed in the block, we'll subtract the unused part of
 508  * the block from the area.
 509  */
 510 void PhaseChaitin::compute_initial_block_pressure(Block* b, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure, double cost) {
 511   IndexSetIterator elements(liveout);
 512   uint lid = elements.next();
 513   while (lid != 0) {


 573 }
 574 
 575 /*
 576  * Remove dead node if it's not used.
 577  * We only remove projection nodes if the node "defining" the projection is
 578  * dead, for example on x86, if we have a dead Add node we remove its
 579  * RFLAGS node.
 580  */
 581 bool PhaseChaitin::remove_node_if_not_used(Block* b, uint location, Node* n, uint lid, IndexSet* liveout) {
 582   Node* def = n->in(0);
 583   if (!n->is_Proj() ||
 584       (_lrg_map.live_range_id(def) && !liveout->member(_lrg_map.live_range_id(def)))) {
 585     if (n->is_MachProj()) {
 586       // Don't remove KILL projections if their "defining" nodes have
 587       // memory effects (have SCMemProj projection node) -
 588       // they are not dead even when their result is not used.
 589       // For example, compareAndSwapL (and other CAS) and EncodeISOArray nodes.
 590       // The method add_input_to_liveout() keeps such nodes alive (put them on liveout list)
 591       // when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed
 592       // in block in such order that KILL MachProj nodes are processed first.
 593       if (def->has_out_with(Op_SCMemProj)) {
 594         return false;
 595       }
 596     }
 597     b->remove_node(location);
 598     LRG& lrg = lrgs(lid);
 599     if (lrg._def == n) {
 600       lrg._def = 0;
 601     }
 602     n->disconnect_inputs(NULL, C);
 603     _cfg.unmap_node_from_block(n);
 604     n->replace_by(C->top());
 605     return true;
 606   }
 607   return false;
 608 }
 609 
 610 /*
 611  * When encountering a fat projection, we might go from a low to high to low
 612  * (since the fat proj only lives at this instruction) going backwards in the
 613  * block. If we find a low to high transition, we record it.
 614  */
 615 void PhaseChaitin::check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const int op_regtype) {
 616   RegMask mask_tmp = lrg.mask();
 617   mask_tmp.AND(*Matcher::idealreg2regmask[op_regtype]);
 618   pressure.check_pressure_at_fatproj(location, mask_tmp);
 619 }
 620 
 621 /*
 622  * Insure high score for immediate-use spill copies so they get a color.
 623  * All single-use MachSpillCopy(s) that immediately precede their
 624  * use must color early.  If a longer live range steals their
 625  * color, the spill copy will split and may push another spill copy
 626  * further away resulting in an infinite spill-split-retry cycle.
 627  * Assigning a zero area results in a high score() and a good
 628  * location in the simplify list.
 629  */
 630 void PhaseChaitin::assign_high_score_to_immediate_copies(Block* b, Node* n, LRG& lrg, uint next_inst, uint last_inst) {
 631   if (n->is_SpillCopy() &&
 632       lrg.is_singledef() && // A multi defined live range can still split
 633       n->outcnt() == 1 &&   // and use must be in this block
 634       _cfg.get_block_for_node(n->unique_out()) == b) {
 635 
 636     Node* single_use = n->unique_out();
 637     assert(b->find_node(single_use) >= next_inst, "Use must be later in block");


 721       must_spill++;
 722       interfering_lrg._must_spill = 1;
 723       interfering_lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
 724     }
 725     l = elements.next();
 726   }
 727 }
 728 
 729 /*
 730  * Start loop at 1 (skip control edge) for most Nodes. SCMemProj's might be the
 731  * sole use of a StoreLConditional. While StoreLConditionals set memory (the
 732  * SCMemProj use) they also def flags; if that flag def is unused the allocator
 733  * sees a flag-setting instruction with no use of the flags and assumes it's
 734  * dead.  This keeps the (useless) flag-setting behavior alive while also
 735  * keeping the (useful) memory update effect.
 736  */
 737 void PhaseChaitin::add_input_to_liveout(Block* b, Node* n, IndexSet* liveout, double cost, Pressure& int_pressure, Pressure& float_pressure) {
 738   JVMState* jvms = n->jvms();
 739   uint debug_start = jvms ? jvms->debug_start() : 999999;
 740 
 741   for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) {
 742     Node* def = n->in(k);
 743     uint lid = _lrg_map.live_range_id(def);
 744     if (!lid) {
 745       continue;
 746     }
 747     LRG& lrg = lrgs(lid);
 748 
 749     // No use-side cost for spilling debug info
 750     if (k < debug_start) {
 751       // A USE costs twice block frequency (once for the Load, once
 752       // for a Load-delay).  Rematerialized uses only cost once.
 753       lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq * 2));
 754     }
 755 
 756     if (liveout->insert(lid)) {
 757       // Newly live things assumed live from here to top of block
 758       lrg._area += cost;
 759       raise_pressure(b, lrg, int_pressure, float_pressure);
 760       assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
 761       assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");


 837     block->_reg_pressure = 0;
 838     block->_freg_pressure = 0;
 839 
 840     int inst_count = last_inst - first_inst;
 841     double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
 842     assert(cost >= 0.0, "negative spill cost" );
 843 
 844     compute_initial_block_pressure(block, &liveout, int_pressure, float_pressure, cost);
 845 
 846     for (uint location = last_inst; location > 0; location--) {
 847       Node* n = block->get_node(location);
 848       uint lid = _lrg_map.live_range_id(n);
 849 
 850       if(lid) {
 851         LRG& lrg = lrgs(lid);
 852 
 853         // A DEF normally costs block frequency; rematerialized values are
 854         // removed from the DEF sight, so LOWER costs here.
 855         lrg._cost += n->rematerialize() ? 0 : block->_freq;
 856 
 857         if (!liveout.member(lid) && n->Opcode() != Op_SafePoint) {
 858           if (remove_node_if_not_used(block, location, n, lid, &liveout)) {
 859             float_pressure.lower_high_pressure_index();
 860             int_pressure.lower_high_pressure_index();
 861             continue;
 862           }
 863           if (lrg._fat_proj) {
 864             check_for_high_pressure_transition_at_fatproj(block->_reg_pressure, location, lrg, int_pressure, Op_RegI);
 865             check_for_high_pressure_transition_at_fatproj(block->_freg_pressure, location, lrg, float_pressure, Op_RegD);
 866           }
 867         } else {
 868           // A live range ends at its definition, remove the remaining area.
 869           // If the cost is +Inf (which might happen in extreme cases), the lrg area will also be +Inf,
 870           // and +Inf - +Inf = NaN. So let's not do that subtraction.
 871           if (g_isfinite(cost)) {
 872             lrg._area -= cost;
 873           }
 874           assert(lrg._area >= 0.0, "negative spill area" );
 875 
 876           assign_high_score_to_immediate_copies(block, n, lrg, location + 1, last_inst);
 877 
 878           if (liveout.remove(lid)) {
 879             lower_pressure(block, location, lrg, &liveout, int_pressure, float_pressure);
 880           }
 881           uint copy_idx = n->is_Copy();
 882           if (copy_idx) {
 883             uint lid_copy = _lrg_map.live_range_id(n->in(copy_idx));
 884             remove_interference_from_copy(block, location, lid_copy, &liveout, cost, int_pressure, float_pressure);
 885           }




 347 
 348       // Make all inputs live
 349       if (!n->is_Phi()) {      // Phi function uses come from prior block
 350         for(uint k = 1; k < n->req(); k++) {
 351           liveout->insert(_lrg_map.live_range_id(n->in(k)));
 352         }
 353       }
 354 
 355       // 2-address instructions always have the defined value live
 356       // on entry to the instruction, even though it is being defined
 357       // by the instruction.  We pretend a virtual copy sits just prior
 358       // to the instruction and kills the src-def'd register.
 359       // In other words, for 2-address instructions the defined value
 360       // interferes with all inputs.
 361       uint idx;
 362       if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) {
 363         const MachNode *mach = n->as_Mach();
 364         // Sometimes my 2-address ADDs are commuted in a bad way.
 365         // We generally want the USE-DEF register to refer to the
 366         // loop-varying quantity, to avoid a copy.
 367         Opcodes op = mach->ideal_Opcode();
 368         // Check that mach->num_opnds() == 3 to ensure instruction is
 369         // not subsuming constants, effectively excludes addI_cin_imm
 370         // Can NOT swap for instructions like addI_cin_imm since it
 371         // is adding zero to yhi + carry and the second ideal-input
 372         // points to the result of adding low-halves.
 373         // Checking req() and num_opnds() does NOT distinguish addI_cout from addI_cout_imm
 374         if( (op == Opcodes::Op_AddI && mach->req() == 3 && mach->num_opnds() == 3) &&
 375             n->in(1)->bottom_type()->base() == Type::Int &&
 376             // See if the ADD is involved in a tight data loop the wrong way
 377             n->in(2)->is_Phi() &&
 378             n->in(2)->in(2) == n ) {
 379           Node *tmp = n->in(1);
 380           n->set_req( 1, n->in(2) );
 381           n->set_req( 2, tmp );
 382         }
 383         // Defined value interferes with all inputs
 384         uint lidx = _lrg_map.live_range_id(n->in(idx));
 385         for (uint k = 1; k < n->req(); k++) {
 386           uint kidx = _lrg_map.live_range_id(n->in(k));
 387           if (kidx != lidx) {
 388             _ifg->add_edge(r, kidx);
 389           }
 390         }
 391       }
 392     } // End of forall instructions in block
 393   } // End of forall blocks
 394 }
 395 
 396 #ifdef ASSERT
 397 uint PhaseChaitin::count_int_pressure(IndexSet* liveout) {
 398   IndexSetIterator elements(liveout);
 399   uint lidx = elements.next();
 400   uint cnt = 0;
 401   while (lidx != 0) {
 402     LRG& lrg = lrgs(lidx);
 403     if (lrg.mask_is_nonempty_and_up() &&
 404         !lrg.is_float_or_vector() &&
 405         lrg.mask().overlap(*Matcher::idealreg2regmask[static_cast<uint>(Opcodes::Op_RegI)])) {
 406       cnt += lrg.reg_pressure();
 407     }
 408     lidx = elements.next();
 409   }
 410   return cnt;
 411 }
 412 
 413 uint PhaseChaitin::count_float_pressure(IndexSet* liveout) {
 414   IndexSetIterator elements(liveout);
 415   uint lidx = elements.next();
 416   uint cnt = 0;
 417   while (lidx != 0) {
 418     LRG& lrg = lrgs(lidx);
 419     if (lrg.mask_is_nonempty_and_up() && lrg.is_float_or_vector()) {
 420       cnt += lrg.reg_pressure();
 421     }
 422     lidx = elements.next();
 423   }
 424   return cnt;
 425 }
 426 #endif
 427 
 428 /*
 429  * Adjust register pressure down by 1.  Capture last hi-to-low transition,
 430  */
 431 void PhaseChaitin::lower_pressure(Block* b, uint location, LRG& lrg, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure) {
 432   if (lrg.mask_is_nonempty_and_up()) {
 433     if (lrg.is_float_or_vector()) {
 434       float_pressure.lower(lrg, location);
 435     } else {
 436       // Do not count the SP and flag registers
 437       const RegMask& r = lrg.mask();
 438       if (r.overlap(*Matcher::idealreg2regmask[static_cast<uint>(Opcodes::Op_RegI)])) {
 439         int_pressure.lower(lrg, location);
 440       }
 441     }
 442   }
 443   if (_scheduling_info_generated == false) {
 444     assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
 445     assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
 446   }
 447 }
 448 
 449 /* Go to the first non-phi index in a block */
 450 static uint first_nonphi_index(Block* b) {
 451   uint i;
 452   uint end_idx = b->end_idx();
 453   for (i = 1; i < end_idx; i++) {
 454     Node* n = b->get_node(i);
 455     if (!n->is_Phi()) {
 456       break;
 457     }
 458   }
 459   return i;
 460 }
 461 
 462 /*
 463  * Spills could be inserted before a CreateEx node which should be the first
 464  * instruction in a block after Phi nodes. If so, move the CreateEx node up.
 465  */
 466 static void move_exception_node_up(Block* b, uint first_inst, uint last_inst) {
 467   for (uint i = first_inst; i < last_inst; i++) {
 468     Node* ex = b->get_node(i);
 469     if (ex->is_SpillCopy()) {
 470       continue;
 471     }
 472 
 473     if (i > first_inst &&
 474         ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Opcodes::Op_CreateEx) {
 475       b->remove_node(i);
 476       b->insert_node(ex, first_inst);
 477     }
 478     // Stop once a CreateEx or any other node is found
 479     break;
 480   }
 481 }
 482 
 483 /*
 484  * When new live ranges are live, we raise the register pressure
 485  */
 486 void PhaseChaitin::raise_pressure(Block* b, LRG& lrg, Pressure& int_pressure, Pressure& float_pressure) {
 487   if (lrg.mask_is_nonempty_and_up()) {
 488     if (lrg.is_float_or_vector()) {
 489       float_pressure.raise(lrg);
 490     } else {
 491       // Do not count the SP and flag registers
 492       const RegMask& rm = lrg.mask();
 493       if (rm.overlap(*Matcher::idealreg2regmask[static_cast<uint>(Opcodes::Op_RegI)])) {
 494         int_pressure.raise(lrg);
 495       }
 496     }
 497   }
 498 }
 499 
 500 
 501 /*
 502  * Computes the initial register pressure of a block, looking at all live
 503  * ranges in the liveout. The register pressure is computed for both float
 504  * and int/pointer registers.
 505  * Live ranges in the liveout are presumed live for the whole block.
 506  * We add the cost for the whole block to the area of the live ranges initially.
 507  * If a live range gets killed in the block, we'll subtract the unused part of
 508  * the block from the area.
 509  */
 510 void PhaseChaitin::compute_initial_block_pressure(Block* b, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure, double cost) {
 511   IndexSetIterator elements(liveout);
 512   uint lid = elements.next();
 513   while (lid != 0) {


 573 }
 574 
 575 /*
 576  * Remove dead node if it's not used.
 577  * We only remove projection nodes if the node "defining" the projection is
 578  * dead, for example on x86, if we have a dead Add node we remove its
 579  * RFLAGS node.
 580  */
 581 bool PhaseChaitin::remove_node_if_not_used(Block* b, uint location, Node* n, uint lid, IndexSet* liveout) {
 582   Node* def = n->in(0);
 583   if (!n->is_Proj() ||
 584       (_lrg_map.live_range_id(def) && !liveout->member(_lrg_map.live_range_id(def)))) {
 585     if (n->is_MachProj()) {
 586       // Don't remove KILL projections if their "defining" nodes have
 587       // memory effects (have SCMemProj projection node) -
 588       // they are not dead even when their result is not used.
 589       // For example, compareAndSwapL (and other CAS) and EncodeISOArray nodes.
 590       // The method add_input_to_liveout() keeps such nodes alive (put them on liveout list)
 591       // when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed
 592       // in block in such order that KILL MachProj nodes are processed first.
 593       if (def->has_out_with(Opcodes::Op_SCMemProj)) {
 594         return false;
 595       }
 596     }
 597     b->remove_node(location);
 598     LRG& lrg = lrgs(lid);
 599     if (lrg._def == n) {
 600       lrg._def = 0;
 601     }
 602     n->disconnect_inputs(NULL, C);
 603     _cfg.unmap_node_from_block(n);
 604     n->replace_by(C->top());
 605     return true;
 606   }
 607   return false;
 608 }
 609 
 610 /*
 611  * When encountering a fat projection, we might go from a low to high to low
 612  * (since the fat proj only lives at this instruction) going backwards in the
 613  * block. If we find a low to high transition, we record it.
 614  */
 615 void PhaseChaitin::check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const Opcodes op_regtype) {
 616   RegMask mask_tmp = lrg.mask();
 617   mask_tmp.AND(*Matcher::idealreg2regmask[static_cast<uint>(op_regtype)]);
 618   pressure.check_pressure_at_fatproj(location, mask_tmp);
 619 }
 620 
 621 /*
 622  * Insure high score for immediate-use spill copies so they get a color.
 623  * All single-use MachSpillCopy(s) that immediately precede their
 624  * use must color early.  If a longer live range steals their
 625  * color, the spill copy will split and may push another spill copy
 626  * further away resulting in an infinite spill-split-retry cycle.
 627  * Assigning a zero area results in a high score() and a good
 628  * location in the simplify list.
 629  */
 630 void PhaseChaitin::assign_high_score_to_immediate_copies(Block* b, Node* n, LRG& lrg, uint next_inst, uint last_inst) {
 631   if (n->is_SpillCopy() &&
 632       lrg.is_singledef() && // A multi defined live range can still split
 633       n->outcnt() == 1 &&   // and use must be in this block
 634       _cfg.get_block_for_node(n->unique_out()) == b) {
 635 
 636     Node* single_use = n->unique_out();
 637     assert(b->find_node(single_use) >= next_inst, "Use must be later in block");


 721       must_spill++;
 722       interfering_lrg._must_spill = 1;
 723       interfering_lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
 724     }
 725     l = elements.next();
 726   }
 727 }
 728 
 729 /*
 730  * Start loop at 1 (skip control edge) for most Nodes. SCMemProj's might be the
 731  * sole use of a StoreLConditional. While StoreLConditionals set memory (the
 732  * SCMemProj use) they also def flags; if that flag def is unused the allocator
 733  * sees a flag-setting instruction with no use of the flags and assumes it's
 734  * dead.  This keeps the (useless) flag-setting behavior alive while also
 735  * keeping the (useful) memory update effect.
 736  */
 737 void PhaseChaitin::add_input_to_liveout(Block* b, Node* n, IndexSet* liveout, double cost, Pressure& int_pressure, Pressure& float_pressure) {
 738   JVMState* jvms = n->jvms();
 739   uint debug_start = jvms ? jvms->debug_start() : 999999;
 740 
 741   for (uint k = ((n->Opcode() == Opcodes::Op_SCMemProj) ? 0:1); k < n->req(); k++) {
 742     Node* def = n->in(k);
 743     uint lid = _lrg_map.live_range_id(def);
 744     if (!lid) {
 745       continue;
 746     }
 747     LRG& lrg = lrgs(lid);
 748 
 749     // No use-side cost for spilling debug info
 750     if (k < debug_start) {
 751       // A USE costs twice block frequency (once for the Load, once
 752       // for a Load-delay).  Rematerialized uses only cost once.
 753       lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq * 2));
 754     }
 755 
 756     if (liveout->insert(lid)) {
 757       // Newly live things assumed live from here to top of block
 758       lrg._area += cost;
 759       raise_pressure(b, lrg, int_pressure, float_pressure);
 760       assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
 761       assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");


 837     block->_reg_pressure = 0;
 838     block->_freg_pressure = 0;
 839 
 840     int inst_count = last_inst - first_inst;
 841     double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
 842     assert(cost >= 0.0, "negative spill cost" );
 843 
 844     compute_initial_block_pressure(block, &liveout, int_pressure, float_pressure, cost);
 845 
 846     for (uint location = last_inst; location > 0; location--) {
 847       Node* n = block->get_node(location);
 848       uint lid = _lrg_map.live_range_id(n);
 849 
 850       if(lid) {
 851         LRG& lrg = lrgs(lid);
 852 
 853         // A DEF normally costs block frequency; rematerialized values are
 854         // removed from the DEF sight, so LOWER costs here.
 855         lrg._cost += n->rematerialize() ? 0 : block->_freq;
 856 
 857         if (!liveout.member(lid) && n->Opcode() != Opcodes::Op_SafePoint) {
 858           if (remove_node_if_not_used(block, location, n, lid, &liveout)) {
 859             float_pressure.lower_high_pressure_index();
 860             int_pressure.lower_high_pressure_index();
 861             continue;
 862           }
 863           if (lrg._fat_proj) {
 864             check_for_high_pressure_transition_at_fatproj(block->_reg_pressure, location, lrg, int_pressure, Opcodes::Op_RegI);
 865             check_for_high_pressure_transition_at_fatproj(block->_freg_pressure, location, lrg, float_pressure, Opcodes::Op_RegD);
 866           }
 867         } else {
 868           // A live range ends at its definition, remove the remaining area.
 869           // If the cost is +Inf (which might happen in extreme cases), the lrg area will also be +Inf,
 870           // and +Inf - +Inf = NaN. So let's not do that subtraction.
 871           if (g_isfinite(cost)) {
 872             lrg._area -= cost;
 873           }
 874           assert(lrg._area >= 0.0, "negative spill area" );
 875 
 876           assign_high_score_to_immediate_copies(block, n, lrg, location + 1, last_inst);
 877 
 878           if (liveout.remove(lid)) {
 879             lower_pressure(block, location, lrg, &liveout, int_pressure, float_pressure);
 880           }
 881           uint copy_idx = n->is_Copy();
 882           if (copy_idx) {
 883             uint lid_copy = _lrg_map.live_range_id(n->in(copy_idx));
 884             remove_interference_from_copy(block, location, lid_copy, &liveout, cost, int_pressure, float_pressure);
 885           }


< prev index next >