--- old/src/cpu/aarch64/vm/aarch64.ad 2015-10-30 00:18:06.000000000 +0300 +++ new/src/cpu/aarch64/vm/aarch64.ad 2015-10-30 00:18:06.000000000 +0300 @@ -1079,10 +1079,10 @@ // and for a volatile write we need // // stlr - // + // // Alternatively, we can implement them by pairing a normal // load/store with a memory barrier. For a volatile read we need - // + // // ldr // dmb ishld // @@ -1240,7 +1240,7 @@ // Alternatively, we can elide generation of the dmb instructions // and plant the alternative CompareAndSwap macro-instruction // sequence (which uses ldaxr). - // + // // Of course, the above only applies when we see these signature // configurations. We still want to plant dmb instructions in any // other cases where we may see a MemBarAcquire, MemBarRelease or @@ -1367,7 +1367,7 @@ opcode = parent->Opcode(); return opcode == Op_MemBarRelease; } - + // 2) card mark detection helper // helper predicate which can be used to detect a volatile membar @@ -1383,7 +1383,7 @@ // true // // iii) the node's Mem projection feeds a StoreCM node. - + bool is_card_mark_membar(const MemBarNode *barrier) { if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) { @@ -1402,7 +1402,7 @@ return true; } } - + return false; } @@ -1430,7 +1430,7 @@ // where // || and \\ represent Ctl and Mem feeds via Proj nodes // | \ and / indicate further routing of the Ctl and Mem feeds - // + // // this is the graph we see for non-object stores. however, for a // volatile Object store (StoreN/P) we may see other nodes below the // leading membar because of the need for a GC pre- or post-write @@ -1592,7 +1592,7 @@ // ordering but neither will a releasing store (stlr). The latter // guarantees that the object put is visible but does not guarantee // that writes by other threads have also been observed. - // + // // So, returning to the task of translating the object put and the // leading/trailing membar nodes: what do the non-normal node graph // look like for these 2 special cases? and how can we determine the @@ -1731,7 +1731,7 @@ // | | | | // C | M | M | M | // \ | | / - // . . . + // . . . // (post write subtree elided) // . . . // C \ M / @@ -1812,12 +1812,12 @@ // | | | / / // | Region . . . Phi[M] _____/ // | / | / - // | | / + // | | / // | . . . . . . | / // | / | / // Region | | Phi[M] // | | | / Bot - // \ MergeMem + // \ MergeMem // \ / // MemBarVolatile // @@ -1858,7 +1858,7 @@ // to a trailing barrier via a MergeMem. That feed is either direct // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier // memory flow (for G1). - // + // // The predicates controlling generation of instructions for store // and barrier nodes employ a few simple helper functions (described // below) which identify the presence or absence of all these @@ -2112,8 +2112,8 @@ x = x->in(MemNode::Memory); } else { // the merge should get its Bottom mem feed from the leading membar - x = mm->in(Compile::AliasIdxBot); - } + x = mm->in(Compile::AliasIdxBot); + } // ensure this is a non control projection if (!x->is_Proj() || x->is_CFG()) { @@ -2190,12 +2190,12 @@ // . . . // | // MemBarVolatile (card mark) - // | | + // | | // | StoreCM // | | // | . . . - // Bot | / - // MergeMem + // Bot | / + // MergeMem // | // | // MemBarVolatile {trailing} @@ -2203,10 +2203,10 @@ // 2) // MemBarRelease/CPUOrder (leading) // | - // | + // | // |\ . . . - // | \ | - // | \ MemBarVolatile (card mark) + // | \ | + // | \ MemBarVolatile (card mark) // | \ | | // \ \ | StoreCM . . . // \ \ | @@ -2231,7 +2231,7 @@ // | \ \ | StoreCM . . . // | \ \ | // \ \ Phi - // \ \ / + // \ \ / // \ Phi // \ / // Phi . . . @@ -2506,7 +2506,7 @@ return (x->is_Load() && x->as_Load()->is_acquire()); } - + // now check for an unsafe volatile get // need to check for @@ -2644,7 +2644,7 @@ } membar = child_membar(membar); - + if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) { return false; } @@ -2703,7 +2703,7 @@ // first we check if this is part of a card mark. if so then we have // to generate a StoreLoad barrier - + if (is_card_mark_membar(mbvol)) { return false; } @@ -2769,7 +2769,7 @@ if (!is_card_mark_membar(mbvol)) { return true; } - + // we found a card mark -- just make sure we have a trailing barrier return (card_mark_to_trailing(mbvol) != NULL); @@ -2808,7 +2808,7 @@ assert(barrier->Opcode() == Op_MemBarCPUOrder, "CAS not fed by cpuorder membar!"); - + MemBarNode *b = parent_membar(barrier); assert ((b != NULL && b->Opcode() == Op_MemBarRelease), "CAS not fed by cpuorder+release membar pair!"); @@ -4657,31 +4657,31 @@ if (!_method) { // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap. call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf); - } else if (_optimized_virtual) { - call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf); } else { - call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf); - } - if (call == NULL) { - ciEnv::current()->record_failure("CodeCache is full"); - return; - } + int method_index = resolved_method_index(cbuf); + RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index) + : static_call_Relocation::spec(method_index); + call = __ trampoline_call(Address(addr, rspec), &cbuf); - if (_method) { // Emit stub for static call address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); if (stub == NULL) { - ciEnv::current()->record_failure("CodeCache is full"); + ciEnv::current()->record_failure("CodeCache is full"); return; } } + if (call == NULL) { + ciEnv::current()->record_failure("CodeCache is full"); + return; + } %} enc_class aarch64_enc_java_dynamic_call(method meth) %{ MacroAssembler _masm(&cbuf); - address call = __ ic_call((address)$meth$$method); + int method_index = resolved_method_index(cbuf); + address call = __ ic_call((address)$meth$$method, method_index); if (call == NULL) { - ciEnv::current()->record_failure("CodeCache is full"); + ciEnv::current()->record_failure("CodeCache is full"); return; } %} @@ -4706,7 +4706,7 @@ if (cb) { address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type)); if (call == NULL) { - ciEnv::current()->record_failure("CodeCache is full"); + ciEnv::current()->record_failure("CodeCache is full"); return; } } else { --- old/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp 2015-10-30 00:18:08.000000000 +0300 +++ new/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp 2015-10-30 00:18:07.000000000 +0300 @@ -732,8 +732,8 @@ return stub; } -address MacroAssembler::ic_call(address entry) { - RelocationHolder rh = virtual_call_Relocation::spec(pc()); +address MacroAssembler::ic_call(address entry, jint method_index) { + RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); // address const_ptr = long_constant((jlong)Universe::non_oop_word()); // unsigned long offset; // ldr_constant(rscratch2, const_ptr); --- old/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp 2015-10-30 00:18:09.000000000 +0300 +++ new/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp 2015-10-30 00:18:08.000000000 +0300 @@ -980,7 +980,7 @@ } // Emit the CompiledIC call idiom - address ic_call(address entry); + address ic_call(address entry, jint method_index = 0); public: --- old/src/cpu/ppc/vm/ppc.ad 2015-10-30 00:18:10.000000000 +0300 +++ new/src/cpu/ppc/vm/ppc.ad 2015-10-30 00:18:09.000000000 +0300 @@ -3399,11 +3399,13 @@ const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none); const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); + // Emit the trampoline stub which will be related to the branch-and-link below. CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset); if (ciEnv::current()->failing()) { return; } // Code cache may be full. - __ relocate(_optimized_virtual ? - relocInfo::opt_virtual_call_type : relocInfo::static_call_type); + int method_index = resolved_method_index(cbuf); + __ relocate(_optimized_virtual ? opt_virtual_call_Relocate::spec(method_index) + : static_call_Relocate::spec(method_index)); } // The real call. @@ -3416,7 +3418,7 @@ // The stub for call to interpreter. address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); if (stub == NULL) { - ciEnv::current()->record_failure("CodeCache is full"); + ciEnv::current()->record_failure("CodeCache is full"); return; } } @@ -3447,6 +3449,8 @@ const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none); const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); + assert(!_override_symbolic_info, "resolved method overriding not supported"); + // Emit the trampoline stub which will be related to the branch-and-link below. CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset); if (ra_->C->env()->failing()) { return; } // Code cache may be full. @@ -3465,7 +3469,7 @@ // The stub for call to interpreter. address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); if (stub == NULL) { - ciEnv::current()->record_failure("CodeCache is full"); + ciEnv::current()->record_failure("CodeCache is full"); return; } @@ -3519,8 +3523,8 @@ const address virtual_call_oop_addr = __ addr_at(virtual_call_oop_addr_offset); assert(MacroAssembler::is_load_const_from_method_toc_at(virtual_call_oop_addr), "should be load from TOC"); - - __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); + int method_index = resolved_method_index(cbuf); + __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index)); } // At this point I do not have the address of the trampoline stub, @@ -6912,7 +6916,7 @@ n_compare->_opnds[0] = op_crx; n_compare->_opnds[1] = op_src; n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR); - + decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode(); n2->add_req(n_region, n_src, n1); n2->_opnds[0] = op_dst; @@ -10589,7 +10593,7 @@ instruct cmpFUnordered_reg_reg(flagsReg crx, regF src1, regF src2) %{ // Needs matchrule, see cmpDUnordered. - match(Set crx (CmpF src1 src2)); + match(Set crx (CmpF src1 src2)); // no match-rule, false predicate predicate(false); @@ -10698,13 +10702,13 @@ %} instruct cmpDUnordered_reg_reg(flagsReg crx, regD src1, regD src2) %{ - // Needs matchrule so that ideal opcode is Cmp. This causes that gcm places the - // node right before the conditional move using it. + // Needs matchrule so that ideal opcode is Cmp. This causes that gcm places the + // node right before the conditional move using it. // In jck test api/java_awt/geom/QuadCurve2DFloat/index.html#SetCurveTesttestCase7, // compilation of java.awt.geom.RectangularShape::getBounds()Ljava/awt/Rectangle // crashed in register allocation where the flags Reg between cmpDUnoredered and a // conditional move was supposed to be spilled. - match(Set crx (CmpD src1 src2)); + match(Set crx (CmpD src1 src2)); // False predicate, shall not be matched. predicate(false); --- old/src/cpu/sparc/vm/assembler_sparc.hpp 2015-10-30 00:18:11.000000000 +0300 +++ new/src/cpu/sparc/vm/assembler_sparc.hpp 2015-10-30 00:18:11.000000000 +0300 @@ -812,6 +812,8 @@ inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type ); inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type ); + inline void call( address d, RelocationHolder const& rspec ); + public: // pp 150 --- old/src/cpu/sparc/vm/assembler_sparc.inline.hpp 2015-10-30 00:18:12.000000000 +0300 +++ new/src/cpu/sparc/vm/assembler_sparc.inline.hpp 2015-10-30 00:18:11.000000000 +0300 @@ -76,6 +76,8 @@ inline void Assembler::call( address d, relocInfo::relocType rt ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); } inline void Assembler::call( Label& L, relocInfo::relocType rt ) { insert_nop_after_cbcond(); call( target(L), rt); } +inline void Assembler::call( address d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rspec); has_delay_slot(); assert(rspec.type() != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); } + inline void Assembler::flush( Register s1, Register s2) { emit_int32( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); } inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } --- old/src/cpu/sparc/vm/macroAssembler_sparc.cpp 2015-10-30 00:18:12.000000000 +0300 +++ new/src/cpu/sparc/vm/macroAssembler_sparc.cpp 2015-10-30 00:18:12.000000000 +0300 @@ -767,8 +767,8 @@ } -void MacroAssembler::ic_call(address entry, bool emit_delay) { - RelocationHolder rspec = virtual_call_Relocation::spec(pc()); +void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { + RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); relocate(rspec); call(entry, relocInfo::none); @@ -777,7 +777,6 @@ } } - void MacroAssembler::card_table_write(jbyte* byte_map_base, Register tmp, Register obj) { #ifdef _LP64 --- old/src/cpu/sparc/vm/macroAssembler_sparc.hpp 2015-10-30 00:18:13.000000000 +0300 +++ new/src/cpu/sparc/vm/macroAssembler_sparc.hpp 2015-10-30 00:18:13.000000000 +0300 @@ -729,7 +729,11 @@ // Check if the call target is out of wdisp30 range (relative to the code cache) static inline bool is_far_target(address d); inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type ); + inline void call( address d, RelocationHolder const& rspec); + inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type ); + inline void call( Label& L, RelocationHolder const& rspec); + inline void callr( Register s1, Register s2 ); inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() ); @@ -1144,7 +1148,7 @@ void set_vm_result(Register oop_result); // Emit the CompiledIC call idiom - void ic_call(address entry, bool emit_delay = true); + void ic_call(address entry, bool emit_delay = true, jint method_index = 0); // if call_VM_base was called with check_exceptions=false, then call // check_and_forward_exception to handle exceptions when it is safe --- old/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp 2015-10-30 00:18:14.000000000 +0300 +++ new/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp 2015-10-30 00:18:14.000000000 +0300 @@ -298,6 +298,10 @@ // expense of relocation and if we overflow the displacement // of the quick call instruction. inline void MacroAssembler::call( address d, relocInfo::relocType rt ) { + MacroAssembler::call(d, Relocation::spec_simple(rt)); +} + +inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) { #ifdef _LP64 intptr_t disp; // NULL is ok because it will be relocated later. @@ -309,14 +313,14 @@ // Is this address within range of the call instruction? // If not, use the expensive instruction sequence if (is_far_target(d)) { - relocate(rt); + relocate(rspec); AddressLiteral dest(d); jumpl_to(dest, O7, O7); } else { - Assembler::call(d, rt); + Assembler::call(d, rspec); } #else - Assembler::call( d, rt ); + Assembler::call( d, rspec ); #endif } --- old/src/cpu/sparc/vm/sparc.ad 2015-10-30 00:18:15.000000000 +0300 +++ new/src/cpu/sparc/vm/sparc.ad 2015-10-30 00:18:14.000000000 +0300 @@ -1001,7 +1001,7 @@ #endif } -void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) { +void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, RelocationHolder const& rspec, bool preserve_g2 = false) { // The method which records debug information at every safepoint // expects the call to be the first instruction in the snippet as // it creates a PcDesc structure which tracks the offset of a call @@ -1023,7 +1023,7 @@ int startpos = __ offset(); #endif /* ASSERT */ - __ call((address)entry_point, rtype); + __ call((address)entry_point, rspec); if (preserve_g2) __ delayed()->mov(G2, L7); else __ delayed()->nop(); @@ -1905,7 +1905,7 @@ } // Current (2013) SPARC platforms need to read original key -// to construct decryption expanded key +// to construct decryption expanded key const bool Matcher::pass_original_key_for_aes() { return true; } @@ -2582,8 +2582,7 @@ enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime // CALL directly to the runtime // The user of this is responsible for ensuring that R_L7 is empty (killed). - emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type, - /*preserve_g2=*/true); + emit_call_reloc(cbuf, $meth$$method, runtime_call_Relocation::spec(), /*preserve_g2=*/true); %} enc_class preserve_SP %{ @@ -2600,19 +2599,20 @@ // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine // who we intended to call. if (!_method) { - emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); - } else if (_optimized_virtual) { - emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); + emit_call_reloc(cbuf, $meth$$method, runtime_call_Relocation::spec()); } else { - emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); - } - if (_method) { // Emit stub for static call. + int method_index = resolved_method_index(cbuf); + RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index) + : static_call_Relocation::spec(method_index); + emit_call_reloc(cbuf, $meth$$method, rspec); + + // Emit stub for static call. address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); // Stub does not fit into scratch buffer if TraceJumps is enabled if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) { ciEnv::current()->record_failure("CodeCache is full"); return; - } + } } %} @@ -2627,7 +2627,7 @@ Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()"); assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub"); - __ ic_call((address)$meth$$method); + __ ic_call((address)$meth$$method, /*emit_delay=*/true, resolved_method_index(cbuf)); } else { assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); // Just go thru the vtable @@ -3358,10 +3358,10 @@ // AVOID_NONE - instruction can be placed anywhere // AVOID_BEFORE - instruction cannot be placed after an // instruction with MachNode::AVOID_AFTER -// AVOID_AFTER - the next instruction cannot be the one +// AVOID_AFTER - the next instruction cannot be the one // with MachNode::AVOID_BEFORE -// AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at -// the same time +// AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at +// the same time ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE); ins_attrib ins_short_branch(0); // Required flag: is this instruction a --- old/src/cpu/x86/vm/macroAssembler_x86.cpp 2015-10-30 00:18:16.000000000 +0300 +++ new/src/cpu/x86/vm/macroAssembler_x86.cpp 2015-10-30 00:18:16.000000000 +0300 @@ -2257,8 +2257,8 @@ } } -void MacroAssembler::ic_call(address entry) { - RelocationHolder rh = virtual_call_Relocation::spec(pc()); +void MacroAssembler::ic_call(address entry, jint method_index) { + RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); movptr(rax, (intptr_t)Universe::non_oop_word()); call(AddressLiteral(entry, rh)); } --- old/src/cpu/x86/vm/macroAssembler_x86.hpp 2015-10-30 00:18:17.000000000 +0300 +++ new/src/cpu/x86/vm/macroAssembler_x86.hpp 2015-10-30 00:18:17.000000000 +0300 @@ -851,7 +851,7 @@ void call(AddressLiteral entry); // Emit the CompiledIC call idiom - void ic_call(address entry); + void ic_call(address entry, jint method_index = 0); // Jumps --- old/src/cpu/x86/vm/x86_32.ad 2015-10-30 00:18:18.000000000 +0300 +++ new/src/cpu/x86/vm/x86_32.ad 2015-10-30 00:18:17.000000000 +0300 @@ -1900,28 +1900,29 @@ // who we intended to call. cbuf.set_insts_mark(); $$$emit8$primary; + if (!_method) { emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), - runtime_call_Relocation::spec(), RELOC_IMM32 ); - } else if (_optimized_virtual) { - emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), - opt_virtual_call_Relocation::spec(), RELOC_IMM32 ); + runtime_call_Relocation::spec(), + RELOC_IMM32); } else { + int method_index = resolved_method_index(cbuf); + RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index) + : static_call_Relocation::spec(method_index); emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), - static_call_Relocation::spec(), RELOC_IMM32 ); - } - if (_method) { // Emit stub for static call. + rspec, RELOC_DISP32); + // Emit stubs for static call. address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); if (stub == NULL) { ciEnv::current()->record_failure("CodeCache is full"); return; - } + } } %} enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL MacroAssembler _masm(&cbuf); - __ ic_call((address)$meth$$method); + __ ic_call((address)$meth$$method, resolved_method_index(cbuf)); %} enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL --- old/src/cpu/x86/vm/x86_64.ad 2015-10-30 00:18:19.000000000 +0300 +++ new/src/cpu/x86/vm/x86_64.ad 2015-10-30 00:18:19.000000000 +0300 @@ -871,7 +871,7 @@ if (framesize > 0) { st->print("\n\t"); st->print("addq rbp, #%d", framesize); - } + } } } @@ -2124,22 +2124,15 @@ $$$emit8$primary; if (!_method) { - emit_d32_reloc(cbuf, - (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), + emit_d32_reloc(cbuf, (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), runtime_call_Relocation::spec(), RELOC_DISP32); - } else if (_optimized_virtual) { - emit_d32_reloc(cbuf, - (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), - opt_virtual_call_Relocation::spec(), - RELOC_DISP32); } else { - emit_d32_reloc(cbuf, - (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), - static_call_Relocation::spec(), - RELOC_DISP32); - } - if (_method) { + int method_index = resolved_method_index(cbuf); + RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index) + : static_call_Relocation::spec(method_index); + emit_d32_reloc(cbuf, (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), + rspec, RELOC_DISP32); // Emit stubs for static call. address mark = cbuf.insts_mark(); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark); @@ -2152,7 +2145,7 @@ enc_class Java_Dynamic_Call(method meth) %{ MacroAssembler _masm(&cbuf); - __ ic_call((address)$meth$$method); + __ ic_call((address)$meth$$method, resolved_method_index(cbuf)); %} enc_class Java_Compiled_Call(method meth) --- old/src/share/vm/asm/codeBuffer.cpp 2015-10-30 00:18:20.000000000 +0300 +++ new/src/share/vm/asm/codeBuffer.cpp 2015-10-30 00:18:20.000000000 +0300 @@ -305,6 +305,31 @@ } } +void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) { + RelocationHolder rh; + switch (rtype) { + case relocInfo::none: return; + case relocInfo::opt_virtual_call_type: { + rh = opt_virtual_call_Relocation::spec(method_index); + break; + } + case relocInfo::static_call_type: { + rh = static_call_Relocation::spec(method_index); + break; + } + case relocInfo::virtual_call_type: { + assert(method_index == 0, "resolved method overriding is not supported"); + rh = Relocation::spec_simple(rtype); + break; + } + default: { + rh = Relocation::spec_simple(rtype); + break; + } + } + relocate(at, rh, format); +} + void CodeSection::relocate(address at, RelocationHolder const& spec, int format) { Relocation* reloc = spec.reloc(); relocInfo::relocType rtype = (relocInfo::relocType) reloc->type(); --- old/src/share/vm/asm/codeBuffer.hpp 2015-10-30 00:18:21.000000000 +0300 +++ new/src/share/vm/asm/codeBuffer.hpp 2015-10-30 00:18:20.000000000 +0300 @@ -209,10 +209,7 @@ // Emit a relocation. void relocate(address at, RelocationHolder const& rspec, int format = 0); - void relocate(address at, relocInfo::relocType rtype, int format = 0) { - if (rtype != relocInfo::none) - relocate(at, Relocation::spec_simple(rtype), format); - } + void relocate(address at, relocInfo::relocType rtype, int format = 0, jint method_index = 0); // alignment requirement for starting offset // Requirements are that the instruction area and the --- old/src/share/vm/code/nmethod.cpp 2015-10-30 00:18:21.000000000 +0300 +++ new/src/share/vm/code/nmethod.cpp 2015-10-30 00:18:21.000000000 +0300 @@ -986,19 +986,23 @@ oop_maps()->print(); } } - if (PrintDebugInfo || CompilerOracle::has_option_string(_method, "PrintDebugInfo")) { + if (printmethod || PrintDebugInfo || CompilerOracle::has_option_string(_method, "PrintDebugInfo")) { print_scopes(); } - if (PrintRelocations || CompilerOracle::has_option_string(_method, "PrintRelocations")) { + if (printmethod || PrintRelocations || CompilerOracle::has_option_string(_method, "PrintRelocations")) { print_relocations(); } - if (PrintDependencies || CompilerOracle::has_option_string(_method, "PrintDependencies")) { + if (printmethod || PrintDependencies || CompilerOracle::has_option_string(_method, "PrintDependencies")) { print_dependencies(); } - if (PrintExceptionHandlers) { + if (printmethod || PrintExceptionHandlers) { print_handler_table(); print_nul_chk_table(); } + if (printmethod) { + print_recorded_oops(); + print_recorded_metadata(); + } if (xtty != NULL) { xtty->tail("print_nmethod"); } @@ -3049,6 +3053,26 @@ } } +void nmethod::print_recorded_oops() { + tty->print_cr("Recorded oops:"); + for (int i = 0; i < oops_count(); i++) { + oop o = oop_at(i); + tty->print("#%3d: " INTPTR_FORMAT " ", i, p2i(o)); + o->print_value(); + tty->cr(); + } +} + +void nmethod::print_recorded_metadata() { + tty->print_cr("Recorded metadata:"); + for (int i = 0; i < metadata_count(); i++) { + Metadata* m = metadata_at(i); + tty->print("#%3d: " INTPTR_FORMAT " ", i, p2i(m)); + m->print_value_on_maybe_null(tty); + tty->cr(); + } +} + #endif // PRODUCT const char* nmethod::reloc_string_for(u_char* begin, u_char* end) { @@ -3089,9 +3113,39 @@ } return st.as_string(); } - case relocInfo::virtual_call_type: return "virtual_call"; - case relocInfo::opt_virtual_call_type: return "optimized virtual_call"; - case relocInfo::static_call_type: return "static_call"; + case relocInfo::virtual_call_type: { + stringStream st; + st.print_raw("virtual_call"); + virtual_call_Relocation* r = iter.virtual_call_reloc(); + Method* m = r->method_value(); + if (m != NULL) { + assert(m->is_method(), ""); + m->print_short_name(&st); + } + return st.as_string(); + } + case relocInfo::opt_virtual_call_type: { + stringStream st; + st.print_raw("optimized virtual_call"); + opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc(); + Method* m = r->method_value(); + if (m != NULL) { + assert(m->is_method(), ""); + m->print_short_name(&st); + } + return st.as_string(); + } + case relocInfo::static_call_type: { + stringStream st; + st.print_raw("static_call"); + static_call_Relocation* r = iter.static_call_reloc(); + Method* m = r->method_value(); + if (m != NULL) { + assert(m->is_method(), ""); + m->print_short_name(&st); + } + return st.as_string(); + } case relocInfo::static_stub_type: return "static_stub"; case relocInfo::external_word_type: return "external_word"; case relocInfo::internal_word_type: return "internal_word"; --- old/src/share/vm/code/nmethod.hpp 2015-10-30 00:18:22.000000000 +0300 +++ new/src/share/vm/code/nmethod.hpp 2015-10-30 00:18:22.000000000 +0300 @@ -392,6 +392,9 @@ int handler_table_size() const { return handler_table_end() - handler_table_begin(); } int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } + int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } + int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } + int total_size () const; void dec_hotness_counter() { _hotness_counter--; } @@ -491,7 +494,7 @@ oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); } oop* oop_addr_at(int index) const { // for GC // relocation indexes are biased by 1 (because 0 is reserved) - assert(index > 0 && index <= oops_size(), "must be a valid non-zero index"); + assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); assert(!_oops_are_stale, "oops are stale"); return &oops_begin()[index - 1]; } @@ -501,7 +504,7 @@ Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } Metadata** metadata_addr_at(int index) const { // for GC // relocation indexes are biased by 1 (because 0 is reserved) - assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index"); + assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); return &metadata_begin()[index - 1]; } @@ -695,6 +698,8 @@ void print_calls(outputStream* st) PRODUCT_RETURN; void print_handler_table() PRODUCT_RETURN; void print_nul_chk_table() PRODUCT_RETURN; + void print_recorded_oops() PRODUCT_RETURN; + void print_recorded_metadata() PRODUCT_RETURN; void print_nmethod(bool print_code); // need to re-define this from CodeBlob else the overload hides it --- old/src/share/vm/code/relocInfo.cpp 2015-10-30 00:18:23.000000000 +0300 +++ new/src/share/vm/code/relocInfo.cpp 2015-10-30 00:18:22.000000000 +0300 @@ -581,13 +581,14 @@ normalize_address(_cached_value, dest); jint x0 = scaled_offset_null_special(_cached_value, point); - p = pack_1_int_to(p, x0); + p = pack_2_ints_to(p, x0, _method_index); dest->set_locs_end((relocInfo*) p); } void virtual_call_Relocation::unpack_data() { - jint x0 = unpack_1_int(); + jint x0 = 0; + unpack_2_ints(x0, _method_index); address point = addr(); _cached_value = x0==0? NULL: address_from_scaled_offset(x0, point); } @@ -793,6 +794,12 @@ return _cached_value; } +Method* virtual_call_Relocation::method_value() { + Metadata* m = code()->metadata_at(_method_index); + assert(m != NULL || _method_index == 0, "should be non-null for non-zero index"); + assert(m == NULL || m->is_method(), "not a method"); + return (Method*)m; +} void virtual_call_Relocation::clear_inline_cache() { // No stubs for ICs @@ -803,6 +810,23 @@ } +void opt_virtual_call_Relocation::pack_data_to(CodeSection* dest) { + short* p = (short*) dest->locs_end(); + p = pack_1_int_to(p, _method_index); + dest->set_locs_end((relocInfo*) p); +} + +void opt_virtual_call_Relocation::unpack_data() { + _method_index = unpack_1_int(); +} + +Method* opt_virtual_call_Relocation::method_value() { + Metadata* m = code()->metadata_at(_method_index); + assert(m != NULL || _method_index == 0, "should be non-null for non-zero index"); + assert(m == NULL || m->is_method(), "not a method"); + return (Method*)m; +} + void opt_virtual_call_Relocation::clear_inline_cache() { // No stubs for ICs // Clean IC @@ -827,6 +851,22 @@ return NULL; } +Method* static_call_Relocation::method_value() { + Metadata* m = code()->metadata_at(_method_index); + assert(m != NULL || _method_index == 0, "should be non-null for non-zero index"); + assert(m == NULL || m->is_method(), "not a method"); + return (Method*)m; +} + +void static_call_Relocation::pack_data_to(CodeSection* dest) { + short* p = (short*) dest->locs_end(); + p = pack_1_int_to(p, _method_index); + dest->set_locs_end((relocInfo*) p); +} + +void static_call_Relocation::unpack_data() { + _method_index = unpack_1_int(); +} void static_call_Relocation::clear_inline_cache() { // Safe call site info @@ -1014,6 +1054,12 @@ break; } case relocInfo::static_call_type: + { + static_call_Relocation* r = (static_call_Relocation*) reloc(); + tty->print(" | [destination=" INTPTR_FORMAT " metadata=" INTPTR_FORMAT "]", + p2i(r->destination()), p2i(r->method_value())); + break; + } case relocInfo::runtime_call_type: { CallRelocation* r = (CallRelocation*) reloc(); @@ -1023,8 +1069,8 @@ case relocInfo::virtual_call_type: { virtual_call_Relocation* r = (virtual_call_Relocation*) reloc(); - tty->print(" | [destination=" INTPTR_FORMAT " cached_value=" INTPTR_FORMAT "]", - p2i(r->destination()), p2i(r->cached_value())); + tty->print(" | [destination=" INTPTR_FORMAT " cached_value=" INTPTR_FORMAT " metadata=" INTPTR_FORMAT "]", + p2i(r->destination()), p2i(r->cached_value()), p2i(r->method_value())); break; } case relocInfo::static_stub_type: @@ -1039,6 +1085,13 @@ tty->print(" | [trampoline owner=" INTPTR_FORMAT "]", p2i(r->owner())); break; } + case relocInfo::opt_virtual_call_type: + { + opt_virtual_call_Relocation* r = (opt_virtual_call_Relocation*) reloc(); + tty->print(" | [destination=" INTPTR_FORMAT " metadata=" INTPTR_FORMAT "]", + p2i(r->destination()), p2i(r->method_value())); + break; + } } tty->cr(); } --- old/src/share/vm/code/relocInfo.hpp 2015-10-30 00:18:23.000000000 +0300 +++ new/src/share/vm/code/relocInfo.hpp 2015-10-30 00:18:23.000000000 +0300 @@ -1044,27 +1044,31 @@ // "cached_value" points to the first associated set-oop. // The oop_limit helps find the last associated set-oop. // (See comments at the top of this file.) - static RelocationHolder spec(address cached_value) { + static RelocationHolder spec(address cached_value, jint method_index = 0) { RelocationHolder rh = newHolder(); - new(rh) virtual_call_Relocation(cached_value); + new(rh) virtual_call_Relocation(cached_value, method_index); return rh; } - virtual_call_Relocation(address cached_value) { + private: + address _cached_value; // location of set-value instruction + jint _method_index; // resolved method for a Java call + + virtual_call_Relocation(address cached_value, int method_index) { _cached_value = cached_value; + _method_index = method_index; assert(cached_value != NULL, "first oop address must be specified"); } - private: - address _cached_value; // location of set-value instruction - friend class RelocIterator; virtual_call_Relocation() { } - public: address cached_value(); + int method_index() { return _method_index; } + Method* method_value(); + // data is packed as scaled offsets in "2_ints" format: [f l] or [Ff Ll] // oop_limit is set to 0 if the limit falls somewhere within the call. // When unpacking, a zero oop_limit is taken to refer to the end of the call. @@ -1080,17 +1084,29 @@ relocInfo::relocType type() { return relocInfo::opt_virtual_call_type; } public: - static RelocationHolder spec() { + static RelocationHolder spec(int method_index = 0) { RelocationHolder rh = newHolder(); - new(rh) opt_virtual_call_Relocation(); + new(rh) opt_virtual_call_Relocation(method_index); return rh; } private: + jint _method_index; // resolved method for a Java call + + opt_virtual_call_Relocation(int method_index) { + _method_index = method_index; + } + friend class RelocIterator; - opt_virtual_call_Relocation() { } + opt_virtual_call_Relocation() {} public: + int method_index() { return _method_index; } + Method* method_value(); + + void pack_data_to(CodeSection* dest); + void unpack_data(); + void clear_inline_cache(); // find the matching static_stub @@ -1102,17 +1118,29 @@ relocInfo::relocType type() { return relocInfo::static_call_type; } public: - static RelocationHolder spec() { + static RelocationHolder spec(int method_index = 0) { RelocationHolder rh = newHolder(); - new(rh) static_call_Relocation(); + new(rh) static_call_Relocation(method_index); return rh; } private: + jint _method_index; // resolved method for a Java call + + static_call_Relocation(int method_index) { + _method_index = method_index; + } + friend class RelocIterator; - static_call_Relocation() { } + static_call_Relocation() {} public: + int method_index() { return _method_index; } + Method* method_value(); + + void pack_data_to(CodeSection* dest); + void unpack_data(); + void clear_inline_cache(); // find the matching static_stub