< prev index next >

src/hotspot/share/opto/lcm.cpp

Print this page




 259       const Node* base = mach->get_base_and_disp(offset, adr_type);
 260       if (base == NULL || base == NodeSentinel) {
 261         // Narrow oop address doesn't have base, only index.
 262         // Give up if offset is beyond page size or if heap base is not protected.
 263         if (val->bottom_type()->isa_narrowoop() &&
 264             (MacroAssembler::needs_explicit_null_check(offset) ||
 265              !Universe::narrow_oop_use_implicit_null_checks()))
 266           continue;
 267         // cannot reason about it; is probably not implicit null exception
 268       } else {
 269         const TypePtr* tptr;
 270         if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 ||
 271                                   Universe::narrow_klass_shift() == 0)) {
 272           // 32-bits narrow oop can be the base of address expressions
 273           tptr = base->get_ptr_type();
 274         } else {
 275           // only regular oops are expected here
 276           tptr = base->bottom_type()->is_ptr();
 277         }
 278         // Give up if offset is not a compile-time constant.
 279         if (offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot)
 280           continue;
 281         offset += tptr->_offset; // correct if base is offseted
 282         // Give up if reference is beyond page size.
 283         if (MacroAssembler::needs_explicit_null_check(offset))
 284           continue;
 285         // Give up if base is a decode node and the heap base is not protected.
 286         if (base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_DecodeN &&
 287             !Universe::narrow_oop_use_implicit_null_checks())
 288           continue;
 289       }
 290     }
 291 
 292     // Check ctrl input to see if the null-check dominates the memory op
 293     Block *cb = get_block_for_node(mach);
 294     cb = cb->_idom;             // Always hoist at least 1 block
 295     if( !was_store ) {          // Stores can be hoisted only one block
 296       while( cb->_dom_depth > (block->_dom_depth + 1))
 297         cb = cb->_idom;         // Hoist loads as far as we want
 298       // The non-null-block should dominate the memory op, too. Live
 299       // range spilling will insert a spill in the non-null-block if it is
 300       // needs to spill the memory op for an implicit null check.
 301       if (cb->_dom_depth == (block->_dom_depth + 1)) {


 826     // Children of projections are now all ready
 827     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 828       Node* m = n->fast_out(j); // Get user
 829       if(get_block_for_node(m) != block) {
 830         continue;
 831       }
 832       if( m->is_Phi() ) continue;
 833       int m_cnt = ready_cnt.at(m->_idx) - 1;
 834       ready_cnt.at_put(m->_idx, m_cnt);
 835       if( m_cnt == 0 )
 836         worklist.push(m);
 837     }
 838 
 839   }
 840 
 841   // Act as if the call defines the Frame Pointer.
 842   // Certainly the FP is alive and well after the call.
 843   regs.Insert(_matcher.c_frame_pointer());
 844 
 845   // Set all registers killed and not already defined by the call.
 846   uint r_cnt = mcall->tf()->range()->cnt();
 847   int op = mcall->ideal_Opcode();
 848   MachProjNode *proj = new MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
 849   map_node_to_block(proj, block);
 850   block->insert_node(proj, node_cnt++);
 851 
 852   // Select the right register save policy.
 853   const char *save_policy = NULL;
 854   switch (op) {
 855     case Op_CallRuntime:
 856     case Op_CallLeaf:
 857     case Op_CallLeafNoFP:
 858       // Calling C code so use C calling convention
 859       save_policy = _matcher._c_reg_save_policy;
 860       break;
 861 
 862     case Op_CallStaticJava:
 863     case Op_CallDynamicJava:
 864       // Calling Java code so use Java calling convention
 865       save_policy = _matcher._register_save_policy;
 866       break;




 259       const Node* base = mach->get_base_and_disp(offset, adr_type);
 260       if (base == NULL || base == NodeSentinel) {
 261         // Narrow oop address doesn't have base, only index.
 262         // Give up if offset is beyond page size or if heap base is not protected.
 263         if (val->bottom_type()->isa_narrowoop() &&
 264             (MacroAssembler::needs_explicit_null_check(offset) ||
 265              !Universe::narrow_oop_use_implicit_null_checks()))
 266           continue;
 267         // cannot reason about it; is probably not implicit null exception
 268       } else {
 269         const TypePtr* tptr;
 270         if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 ||
 271                                   Universe::narrow_klass_shift() == 0)) {
 272           // 32-bits narrow oop can be the base of address expressions
 273           tptr = base->get_ptr_type();
 274         } else {
 275           // only regular oops are expected here
 276           tptr = base->bottom_type()->is_ptr();
 277         }
 278         // Give up if offset is not a compile-time constant.
 279         if (offset == Type::OffsetBot || tptr->offset() == Type::OffsetBot)
 280           continue;
 281         offset += tptr->offset(); // correct if base is offseted
 282         // Give up if reference is beyond page size.
 283         if (MacroAssembler::needs_explicit_null_check(offset))
 284           continue;
 285         // Give up if base is a decode node and the heap base is not protected.
 286         if (base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_DecodeN &&
 287             !Universe::narrow_oop_use_implicit_null_checks())
 288           continue;
 289       }
 290     }
 291 
 292     // Check ctrl input to see if the null-check dominates the memory op
 293     Block *cb = get_block_for_node(mach);
 294     cb = cb->_idom;             // Always hoist at least 1 block
 295     if( !was_store ) {          // Stores can be hoisted only one block
 296       while( cb->_dom_depth > (block->_dom_depth + 1))
 297         cb = cb->_idom;         // Hoist loads as far as we want
 298       // The non-null-block should dominate the memory op, too. Live
 299       // range spilling will insert a spill in the non-null-block if it is
 300       // needs to spill the memory op for an implicit null check.
 301       if (cb->_dom_depth == (block->_dom_depth + 1)) {


 826     // Children of projections are now all ready
 827     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 828       Node* m = n->fast_out(j); // Get user
 829       if(get_block_for_node(m) != block) {
 830         continue;
 831       }
 832       if( m->is_Phi() ) continue;
 833       int m_cnt = ready_cnt.at(m->_idx) - 1;
 834       ready_cnt.at_put(m->_idx, m_cnt);
 835       if( m_cnt == 0 )
 836         worklist.push(m);
 837     }
 838 
 839   }
 840 
 841   // Act as if the call defines the Frame Pointer.
 842   // Certainly the FP is alive and well after the call.
 843   regs.Insert(_matcher.c_frame_pointer());
 844 
 845   // Set all registers killed and not already defined by the call.
 846   uint r_cnt = mcall->tf()->range_cc()->cnt();
 847   int op = mcall->ideal_Opcode();
 848   MachProjNode *proj = new MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
 849   map_node_to_block(proj, block);
 850   block->insert_node(proj, node_cnt++);
 851 
 852   // Select the right register save policy.
 853   const char *save_policy = NULL;
 854   switch (op) {
 855     case Op_CallRuntime:
 856     case Op_CallLeaf:
 857     case Op_CallLeafNoFP:
 858       // Calling C code so use C calling convention
 859       save_policy = _matcher._c_reg_save_policy;
 860       break;
 861 
 862     case Op_CallStaticJava:
 863     case Op_CallDynamicJava:
 864       // Calling Java code so use Java calling convention
 865       save_policy = _matcher._register_save_policy;
 866       break;


< prev index next >