src/cpu/sparc/vm/sparc.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File bug_8003424.4 Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/sparc.ad

Print this page




 542   int offset = NativeCall::instruction_size;  // call; delay slot
 543   if (_method_handle_invoke)
 544     offset += 4;  // restore SP
 545   return offset;
 546 }
 547 
 548 int MachCallDynamicJavaNode::ret_addr_offset() {
 549   int vtable_index = this->_vtable_index;
 550   if (vtable_index < 0) {
 551     // must be invalid_vtable_index, not nonvirtual_vtable_index
 552     assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
 553     return (NativeMovConstReg::instruction_size +
 554            NativeCall::instruction_size);  // sethi; setlo; call; delay slot
 555   } else {
 556     assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
 557     int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
 558     int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
 559     int klass_load_size;
 560     if (UseCompressedKlassPointers) {
 561       assert(Universe::heap() != NULL, "java heap should be initialized");
 562       if (Universe::narrow_klass_base() == NULL)
 563         klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
 564       else
 565         klass_load_size = 3*BytesPerInstWord;
 566     } else {
 567       klass_load_size = 1*BytesPerInstWord;
 568     }
 569     if (Assembler::is_simm13(v_off)) {
 570       return klass_load_size +
 571              (2*BytesPerInstWord +           // ld_ptr, ld_ptr
 572              NativeCall::instruction_size);  // call; delay slot
 573     } else {
 574       return klass_load_size +
 575              (4*BytesPerInstWord +           // set_hi, set, ld_ptr, ld_ptr
 576              NativeCall::instruction_size);  // call; delay slot
 577     }
 578   }
 579 }
 580 
 581 int MachCallRuntimeNode::ret_addr_offset() {
 582 #ifdef _LP64
 583   if (MacroAssembler::is_far_target(entry_point())) {
 584     return NativeFarCall::instruction_size;
 585   } else {


1646   } else {
1647      __ set(offset, O7);
1648      __ add(SP, O7, reg_to_register_object(reg));
1649   }
1650 }
1651 
1652 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1653   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
1654   assert(ra_ == ra_->C->regalloc(), "sanity");
1655   return ra_->C->scratch_emit_size(this);
1656 }
1657 
1658 //=============================================================================
1659 #ifndef PRODUCT
1660 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1661   st->print_cr("\nUEP:");
1662 #ifdef    _LP64
1663   if (UseCompressedKlassPointers) {
1664     assert(Universe::heap() != NULL, "java heap should be initialized");
1665     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");


1666     st->print_cr("\tSLL    R_G5,3,R_G5");
1667     if (Universe::narrow_klass_base() != NULL)
1668       st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");

1669   } else {
1670     st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1671   }
1672   st->print_cr("\tCMP    R_G5,R_G3" );
1673   st->print   ("\tTne    xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
1674 #else  // _LP64
1675   st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1676   st->print_cr("\tCMP    R_G5,R_G3" );
1677   st->print   ("\tTne    icc,R_G0+ST_RESERVED_FOR_USER_0+2");
1678 #endif // _LP64
1679 }
1680 #endif
1681 
1682 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1683   MacroAssembler _masm(&cbuf);
1684   Register G5_ic_reg  = reg_to_register_object(Matcher::inline_cache_reg_encode());
1685   Register temp_reg   = G3;
1686   assert( G5_ic_reg != temp_reg, "conflicting registers" );
1687 
1688   // Load klass from receiver


2546     __ set_inst_mark();
2547     int vtable_index = this->_vtable_index;
2548     // MachCallDynamicJavaNode::ret_addr_offset uses this same test
2549     if (vtable_index < 0) {
2550       // must be invalid_vtable_index, not nonvirtual_vtable_index
2551       assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
2552       Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2553       assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()");
2554       assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub");
2555       __ ic_call((address)$meth$$method);
2556     } else {
2557       assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
2558       // Just go thru the vtable
2559       // get receiver klass (receiver already checked for non-null)
2560       // If we end up going thru a c2i adapter interpreter expects method in G5
2561       int off = __ offset();
2562       __ load_klass(O0, G3_scratch);
2563       int klass_load_size;
2564       if (UseCompressedKlassPointers) {
2565         assert(Universe::heap() != NULL, "java heap should be initialized");
2566         if (Universe::narrow_klass_base() == NULL)
2567           klass_load_size = 2*BytesPerInstWord;
2568         else
2569           klass_load_size = 3*BytesPerInstWord;
2570       } else {
2571         klass_load_size = 1*BytesPerInstWord;
2572       }
2573       int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
2574       int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
2575       if (Assembler::is_simm13(v_off)) {
2576         __ ld_ptr(G3, v_off, G5_method);
2577       } else {
2578         // Generate 2 instructions
2579         __ Assembler::sethi(v_off & ~0x3ff, G5_method);
2580         __ or3(G5_method, v_off & 0x3ff, G5_method);
2581         // ld_ptr, set_hi, set
2582         assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
2583                "Unexpected instruction size(s)");
2584         __ ld_ptr(G3, G5_method, G5_method);
2585       }
2586       // NOTE: for vtable dispatches, the vtable entry will never be null.
2587       // However it may very well end up in handle_wrong_method if the
2588       // method is abstract for the particular class.
2589       __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);




 542   int offset = NativeCall::instruction_size;  // call; delay slot
 543   if (_method_handle_invoke)
 544     offset += 4;  // restore SP
 545   return offset;
 546 }
 547 
 548 int MachCallDynamicJavaNode::ret_addr_offset() {
 549   int vtable_index = this->_vtable_index;
 550   if (vtable_index < 0) {
 551     // must be invalid_vtable_index, not nonvirtual_vtable_index
 552     assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
 553     return (NativeMovConstReg::instruction_size +
 554            NativeCall::instruction_size);  // sethi; setlo; call; delay slot
 555   } else {
 556     assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
 557     int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
 558     int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
 559     int klass_load_size;
 560     if (UseCompressedKlassPointers) {
 561       assert(Universe::heap() != NULL, "java heap should be initialized");
 562       klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;



 563     } else {
 564       klass_load_size = 1*BytesPerInstWord;
 565     }
 566     if (Assembler::is_simm13(v_off)) {
 567       return klass_load_size +
 568              (2*BytesPerInstWord +           // ld_ptr, ld_ptr
 569              NativeCall::instruction_size);  // call; delay slot
 570     } else {
 571       return klass_load_size +
 572              (4*BytesPerInstWord +           // set_hi, set, ld_ptr, ld_ptr
 573              NativeCall::instruction_size);  // call; delay slot
 574     }
 575   }
 576 }
 577 
 578 int MachCallRuntimeNode::ret_addr_offset() {
 579 #ifdef _LP64
 580   if (MacroAssembler::is_far_target(entry_point())) {
 581     return NativeFarCall::instruction_size;
 582   } else {


1643   } else {
1644      __ set(offset, O7);
1645      __ add(SP, O7, reg_to_register_object(reg));
1646   }
1647 }
1648 
1649 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1650   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
1651   assert(ra_ == ra_->C->regalloc(), "sanity");
1652   return ra_->C->scratch_emit_size(this);
1653 }
1654 
1655 //=============================================================================
1656 #ifndef PRODUCT
1657 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1658   st->print_cr("\nUEP:");
1659 #ifdef    _LP64
1660   if (UseCompressedKlassPointers) {
1661     assert(Universe::heap() != NULL, "java heap should be initialized");
1662     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
1663     st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
1664     if (Universe::narrow_klass_shift() != 0) {
1665       st->print_cr("\tSLL    R_G5,3,R_G5");
1666     }
1667     st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
1668     st->print_cr("\tSET    Universe::narrow_ptrs_base,R_G6_heap_base");
1669   } else {
1670     st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1671   }
1672   st->print_cr("\tCMP    R_G5,R_G3" );
1673   st->print   ("\tTne    xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
1674 #else  // _LP64
1675   st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1676   st->print_cr("\tCMP    R_G5,R_G3" );
1677   st->print   ("\tTne    icc,R_G0+ST_RESERVED_FOR_USER_0+2");
1678 #endif // _LP64
1679 }
1680 #endif
1681 
1682 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1683   MacroAssembler _masm(&cbuf);
1684   Register G5_ic_reg  = reg_to_register_object(Matcher::inline_cache_reg_encode());
1685   Register temp_reg   = G3;
1686   assert( G5_ic_reg != temp_reg, "conflicting registers" );
1687 
1688   // Load klass from receiver


2546     __ set_inst_mark();
2547     int vtable_index = this->_vtable_index;
2548     // MachCallDynamicJavaNode::ret_addr_offset uses this same test
2549     if (vtable_index < 0) {
2550       // must be invalid_vtable_index, not nonvirtual_vtable_index
2551       assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
2552       Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2553       assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()");
2554       assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub");
2555       __ ic_call((address)$meth$$method);
2556     } else {
2557       assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
2558       // Just go thru the vtable
2559       // get receiver klass (receiver already checked for non-null)
2560       // If we end up going thru a c2i adapter interpreter expects method in G5
2561       int off = __ offset();
2562       __ load_klass(O0, G3_scratch);
2563       int klass_load_size;
2564       if (UseCompressedKlassPointers) {
2565         assert(Universe::heap() != NULL, "java heap should be initialized");
2566         klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;



2567       } else {
2568         klass_load_size = 1*BytesPerInstWord;
2569       }
2570       int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
2571       int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
2572       if (Assembler::is_simm13(v_off)) {
2573         __ ld_ptr(G3, v_off, G5_method);
2574       } else {
2575         // Generate 2 instructions
2576         __ Assembler::sethi(v_off & ~0x3ff, G5_method);
2577         __ or3(G5_method, v_off & 0x3ff, G5_method);
2578         // ld_ptr, set_hi, set
2579         assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
2580                "Unexpected instruction size(s)");
2581         __ ld_ptr(G3, G5_method, G5_method);
2582       }
2583       // NOTE: for vtable dispatches, the vtable entry will never be null.
2584       // However it may very well end up in handle_wrong_method if the
2585       // method is abstract for the particular class.
2586       __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);


src/cpu/sparc/vm/sparc.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File