hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp

Print this page
rev 611 : Merge

*** 1,10 **** - #ifdef USE_PRAGMA_IDENT_SRC - #pragma ident "@(#)templateTable_x86_64.cpp 1.58 07/09/17 09:25:59 JVM" - #endif /* ! * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 26,35 **** --- 23,34 ---- */ #include "incls/_precompiled.incl" #include "incls/_templateTable_x86_64.cpp.incl" + #ifndef CC_INTERP + #define __ _masm-> // Platform-dependent initialization void TemplateTable::pd_initialize() {
*** 114,123 **** --- 113,185 ---- return Assembler::zero; } // Miscelaneous helper routines + // Store an oop (or NULL) at the address described by obj. + // If val == noreg this means store a NULL + + static void do_oop_store(InterpreterMacroAssembler* _masm, + Address obj, + Register val, + BarrierSet::Name barrier, + bool precise) { + assert(val == noreg || val == rax, "parameter is just for looks"); + switch (barrier) { + #ifndef SERIALGC + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + { + // flatten object address if needed + if (obj.index() == noreg && obj.disp() == 0) { + if (obj.base() != rdx) { + __ movq(rdx, obj.base()); + } + } else { + __ leaq(rdx, obj); + } + __ g1_write_barrier_pre(rdx, r8, rbx, val != noreg); + if (val == noreg) { + __ store_heap_oop(Address(rdx, 0), NULL_WORD); + } else { + __ store_heap_oop(Address(rdx, 0), val); + __ g1_write_barrier_post(rdx, val, r8, rbx); + } + + } + break; + #endif // SERIALGC + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + { + if (val == noreg) { + __ store_heap_oop(obj, NULL_WORD); + } else { + __ store_heap_oop(obj, val); + // flatten object address if needed + if (!precise || (obj.index() == noreg && obj.disp() == 0)) { + __ store_check(obj.base()); + } else { + __ leaq(rdx, obj); + __ store_check(rdx); + } + } + } + break; + case BarrierSet::ModRef: + case BarrierSet::Other: + if (val == noreg) { + __ store_heap_oop(obj, NULL_WORD); + } else { + __ store_heap_oop(obj, val); + } + break; + default : + ShouldNotReachHere(); + + } + } Address TemplateTable::at_bcp(int offset) { assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); return Address(r13, offset); }
*** 318,328 **** __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset)); __ push_i(rax); __ jmp(Done); __ bind(isOop); ! __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); __ push_ptr(rax); if (VerifyOops) { __ verify_oop(rax); } --- 380,390 ---- __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset)); __ push_i(rax); __ jmp(Done); __ bind(isOop); ! __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset)); __ push_ptr(rax); if (VerifyOops) { __ verify_oop(rax); }
*** 356,367 **** __ bind(Done); } void TemplateTable::locals_index(Register reg, int offset) { __ load_unsigned_byte(reg, at_bcp(offset)); ! __ negq(reg); ! if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2 } void TemplateTable::iload() { transition(vtos, itos); if (RewriteFrequentPairs) { --- 418,429 ---- __ bind(Done); } void TemplateTable::locals_index(Register reg, int offset) { __ load_unsigned_byte(reg, at_bcp(offset)); ! __ negptr(reg); ! if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2 } void TemplateTable::iload() { transition(vtos, itos); if (RewriteFrequentPairs) {
*** 444,463 **** } void TemplateTable::aload() { transition(vtos, atos); locals_index(rbx); ! __ movq(rax, aaddress(rbx)); debug_only(__ verify_local_tag(frame::TagReference, rbx)); } void TemplateTable::locals_index_wide(Register reg) { __ movl(reg, at_bcp(2)); __ bswapl(reg); __ shrl(reg, 16); ! __ negq(reg); ! if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2 } void TemplateTable::wide_iload() { transition(vtos, itos); locals_index_wide(rbx); --- 506,525 ---- } void TemplateTable::aload() { transition(vtos, atos); locals_index(rbx); ! __ movptr(rax, aaddress(rbx)); debug_only(__ verify_local_tag(frame::TagReference, rbx)); } void TemplateTable::locals_index_wide(Register reg) { __ movl(reg, at_bcp(2)); __ bswapl(reg); __ shrl(reg, 16); ! __ negptr(reg); ! if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2 } void TemplateTable::wide_iload() { transition(vtos, itos); locals_index_wide(rbx);
*** 487,506 **** } void TemplateTable::wide_aload() { transition(vtos, atos); locals_index_wide(rbx); ! __ movq(rax, aaddress(rbx)); debug_only(__ verify_local_tag(frame::TagReference, rbx)); } void TemplateTable::index_check(Register array, Register index) { // destroys rbx // check array __ null_check(array, arrayOopDesc::length_offset_in_bytes()); // sign extend index for use by indexed load ! __ movslq(index, index); // check index __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); if (index != rbx) { // ??? convention: move aberrant index into ebx for exception message assert(rbx != array, "different registers"); --- 549,568 ---- } void TemplateTable::wide_aload() { transition(vtos, atos); locals_index_wide(rbx); ! __ movptr(rax, aaddress(rbx)); debug_only(__ verify_local_tag(frame::TagReference, rbx)); } void TemplateTable::index_check(Register array, Register index) { // destroys rbx // check array __ null_check(array, arrayOopDesc::length_offset_in_bytes()); // sign extend index for use by indexed load ! __ movl2ptr(index, index); // check index __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); if (index != rbx) { // ??? convention: move aberrant index into ebx for exception message assert(rbx != array, "different registers");
*** 558,569 **** transition(itos, atos); __ pop_ptr(rdx); // eax: index // rdx: array index_check(rdx, rax); // kills rbx ! __ movq(rax, Address(rdx, rax, ! Address::times_8, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); } void TemplateTable::baload() { transition(itos, itos); --- 620,631 ---- transition(itos, atos); __ pop_ptr(rdx); // eax: index // rdx: array index_check(rdx, rax); // kills rbx ! __ load_heap_oop(rax, Address(rdx, rax, ! UseCompressedOops ? Address::times_4 : Address::times_8, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); } void TemplateTable::baload() { transition(itos, itos);
*** 643,653 **** debug_only(__ verify_local_tag(frame::TagCategory2, n)); } void TemplateTable::aload(int n) { transition(vtos, atos); ! __ movq(rax, aaddress(n)); debug_only(__ verify_local_tag(frame::TagReference, n)); } void TemplateTable::aload_0() { transition(vtos, atos); --- 705,715 ---- debug_only(__ verify_local_tag(frame::TagCategory2, n)); } void TemplateTable::aload(int n) { transition(vtos, atos); ! __ movptr(rax, aaddress(n)); debug_only(__ verify_local_tag(frame::TagReference, n)); } void TemplateTable::aload_0() { transition(vtos, atos);
*** 758,768 **** void TemplateTable::astore() { transition(vtos, vtos); __ pop_ptr(rax, rdx); // will need to pop tag too locals_index(rbx); ! __ movq(aaddress(rbx), rax); __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr } void TemplateTable::wide_istore() { transition(vtos, vtos); --- 820,830 ---- void TemplateTable::astore() { transition(vtos, vtos); __ pop_ptr(rax, rdx); // will need to pop tag too locals_index(rbx); ! __ movptr(aaddress(rbx), rax); __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr } void TemplateTable::wide_istore() { transition(vtos, vtos);
*** 798,808 **** void TemplateTable::wide_astore() { transition(vtos, vtos); __ pop_ptr(rax, rdx); // will need to pop tag too locals_index_wide(rbx); ! __ movq(aaddress(rbx), rax); __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr } void TemplateTable::iastore() { transition(itos, vtos); --- 860,870 ---- void TemplateTable::wide_astore() { transition(vtos, vtos); __ pop_ptr(rax, rdx); // will need to pop tag too locals_index_wide(rbx); ! __ movptr(aaddress(rbx), rax); __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr } void TemplateTable::iastore() { transition(itos, vtos);
*** 862,890 **** void TemplateTable::aastore() { Label is_null, ok_is_subtype, done; transition(vtos, vtos); // stack: ..., array, index, value ! __ movq(rax, at_tos()); // value __ movl(rcx, at_tos_p1()); // index ! __ movq(rdx, at_tos_p2()); // array index_check(rdx, rcx); // kills rbx // do array store check - check for NULL value first ! __ testq(rax, rax); __ jcc(Assembler::zero, is_null); // Move subklass into rbx ! __ movq(rbx, Address(rax, oopDesc::klass_offset_in_bytes())); // Move superklass into rax ! __ movq(rax, Address(rdx, oopDesc::klass_offset_in_bytes())); ! __ movq(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes())); ! // Compress array + index*8 + 12 into a single register. Frees rcx. ! __ leaq(rdx, Address(rdx, rcx, ! Address::times_8, ! arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // Generate subtype check. Blows rcx, rdi // Superklass in rax. Subklass in rbx. __ gen_subtype_check(rbx, ok_is_subtype); --- 924,955 ---- void TemplateTable::aastore() { Label is_null, ok_is_subtype, done; transition(vtos, vtos); // stack: ..., array, index, value ! __ movptr(rax, at_tos()); // value __ movl(rcx, at_tos_p1()); // index ! __ movptr(rdx, at_tos_p2()); // array ! ! Address element_address(rdx, rcx, ! UseCompressedOops? Address::times_4 : Address::times_8, ! arrayOopDesc::base_offset_in_bytes(T_OBJECT)); ! index_check(rdx, rcx); // kills rbx // do array store check - check for NULL value first ! __ testptr(rax, rax); __ jcc(Assembler::zero, is_null); // Move subklass into rbx ! __ load_klass(rbx, rax); // Move superklass into rax ! __ load_klass(rax, rdx); ! __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes())); ! // Compress array + index*oopSize + 12 into a single register. Frees rcx. ! __ lea(rdx, element_address); // Generate subtype check. Blows rcx, rdi // Superklass in rax. Subklass in rbx. __ gen_subtype_check(rbx, ok_is_subtype);
*** 892,917 **** // object is at TOS __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); // Come here on success __ bind(ok_is_subtype); ! __ movq(rax, at_tos()); // Value ! __ movq(Address(rdx, 0), rax); ! __ store_check(rdx); __ jmp(done); // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] __ bind(is_null); __ profile_null_seen(rbx); ! __ movq(Address(rdx, rcx, ! Address::times_8, ! arrayOopDesc::base_offset_in_bytes(T_OBJECT)), ! rax); // Pop stack arguments __ bind(done); ! __ addq(rsp, 3 * Interpreter::stackElementSize()); } void TemplateTable::bastore() { transition(itos, vtos); __ pop_i(rbx); --- 957,983 ---- // object is at TOS __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); // Come here on success __ bind(ok_is_subtype); ! ! // Get the value we will store ! __ movptr(rax, at_tos()); ! // Now store using the appropriate barrier ! do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); __ jmp(done); // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] __ bind(is_null); __ profile_null_seen(rbx); ! ! // Store a NULL ! do_oop_store(_masm, element_address, noreg, _bs->kind(), true); // Pop stack arguments __ bind(done); ! __ addptr(rsp, 3 * Interpreter::stackElementSize()); } void TemplateTable::bastore() { transition(itos, vtos); __ pop_i(rbx);
*** 969,990 **** } void TemplateTable::astore(int n) { transition(vtos, vtos); __ pop_ptr(rax, rdx); ! __ movq(aaddress(n), rax); __ tag_local(rdx, n); } void TemplateTable::pop() { transition(vtos, vtos); ! __ addq(rsp, Interpreter::stackElementSize()); } void TemplateTable::pop2() { transition(vtos, vtos); ! __ addq(rsp, 2 * Interpreter::stackElementSize()); } void TemplateTable::dup() { transition(vtos, vtos); __ load_ptr_and_tag(0, rax, rdx); --- 1035,1056 ---- } void TemplateTable::astore(int n) { transition(vtos, vtos); __ pop_ptr(rax, rdx); ! __ movptr(aaddress(n), rax); __ tag_local(rdx, n); } void TemplateTable::pop() { transition(vtos, vtos); ! __ addptr(rsp, Interpreter::stackElementSize()); } void TemplateTable::pop2() { transition(vtos, vtos); ! __ addptr(rsp, 2 * Interpreter::stackElementSize()); } void TemplateTable::dup() { transition(vtos, vtos); __ load_ptr_and_tag(0, rax, rdx);
*** 1091,1105 **** } void TemplateTable::lop2(Operation op) { transition(ltos, ltos); switch (op) { ! case add : __ pop_l(rdx); __ addq (rax, rdx); break; ! case sub : __ movq(rdx, rax); __ pop_l(rax); __ subq (rax, rdx); break; ! case _and : __ pop_l(rdx); __ andq (rax, rdx); break; ! case _or : __ pop_l(rdx); __ orq (rax, rdx); break; ! case _xor : __ pop_l(rdx); __ xorq (rax, rdx); break; default : ShouldNotReachHere(); } } void TemplateTable::idiv() { --- 1157,1171 ---- } void TemplateTable::lop2(Operation op) { transition(ltos, ltos); switch (op) { ! case add : __ pop_l(rdx); __ addptr (rax, rdx); break; ! case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr (rax, rdx); break; ! case _and : __ pop_l(rdx); __ andptr (rax, rdx); break; ! case _or : __ pop_l(rdx); __ orptr (rax, rdx); break; ! case _xor : __ pop_l(rdx); __ xorptr (rax, rdx); break; default : ShouldNotReachHere(); } } void TemplateTable::idiv() {
*** 1131,1141 **** __ imulq(rax, rdx); } void TemplateTable::ldiv() { transition(ltos, ltos); ! __ movq(rcx, rax); __ pop_l(rax); // generate explicit div0 check __ testq(rcx, rcx); __ jump_cc(Assembler::zero, ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); --- 1197,1207 ---- __ imulq(rax, rdx); } void TemplateTable::ldiv() { transition(ltos, ltos); ! __ mov(rcx, rax); __ pop_l(rax); // generate explicit div0 check __ testq(rcx, rcx); __ jump_cc(Assembler::zero, ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
*** 1146,1166 **** __ corrected_idivq(rcx); // kills rbx } void TemplateTable::lrem() { transition(ltos, ltos); ! __ movq(rcx, rax); __ pop_l(rax); __ testq(rcx, rcx); __ jump_cc(Assembler::zero, ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); // Note: could xor rax and rcx and compare with (-1 ^ min_int). If // they are not equal, one could do a normal division (no correction // needed), which may speed up this implementation for the common case. // (see also JVM spec., p.243 & p.271) __ corrected_idivq(rcx); // kills rbx ! __ movq(rax, rdx); } void TemplateTable::lshl() { transition(itos, ltos); __ movl(rcx, rax); // get shift count --- 1212,1232 ---- __ corrected_idivq(rcx); // kills rbx } void TemplateTable::lrem() { transition(ltos, ltos); ! __ mov(rcx, rax); __ pop_l(rax); __ testq(rcx, rcx); __ jump_cc(Assembler::zero, ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); // Note: could xor rax and rcx and compare with (-1 ^ min_int). If // they are not equal, one could do a normal division (no correction // needed), which may speed up this implementation for the common case. // (see also JVM spec., p.243 & p.271) __ corrected_idivq(rcx); // kills rbx ! __ mov(rax, rdx); } void TemplateTable::lshl() { transition(itos, ltos); __ movl(rcx, rax); // get shift count
*** 1185,1204 **** void TemplateTable::fop2(Operation op) { transition(ftos, ftos); switch (op) { case add: __ addss(xmm0, at_rsp()); ! __ addq(rsp, Interpreter::stackElementSize()); break; case sub: __ movflt(xmm1, xmm0); __ pop_f(xmm0); __ subss(xmm0, xmm1); break; case mul: __ mulss(xmm0, at_rsp()); ! __ addq(rsp, Interpreter::stackElementSize()); break; case div: __ movflt(xmm1, xmm0); __ pop_f(xmm0); __ divss(xmm0, xmm1); --- 1251,1270 ---- void TemplateTable::fop2(Operation op) { transition(ftos, ftos); switch (op) { case add: __ addss(xmm0, at_rsp()); ! __ addptr(rsp, Interpreter::stackElementSize()); break; case sub: __ movflt(xmm1, xmm0); __ pop_f(xmm0); __ subss(xmm0, xmm1); break; case mul: __ mulss(xmm0, at_rsp()); ! __ addptr(rsp, Interpreter::stackElementSize()); break; case div: __ movflt(xmm1, xmm0); __ pop_f(xmm0); __ divss(xmm0, xmm1);
*** 1217,1236 **** void TemplateTable::dop2(Operation op) { transition(dtos, dtos); switch (op) { case add: __ addsd(xmm0, at_rsp()); ! __ addq(rsp, 2 * Interpreter::stackElementSize()); break; case sub: __ movdbl(xmm1, xmm0); __ pop_d(xmm0); __ subsd(xmm0, xmm1); break; case mul: __ mulsd(xmm0, at_rsp()); ! __ addq(rsp, 2 * Interpreter::stackElementSize()); break; case div: __ movdbl(xmm1, xmm0); __ pop_d(xmm0); __ divsd(xmm0, xmm1); --- 1283,1302 ---- void TemplateTable::dop2(Operation op) { transition(dtos, dtos); switch (op) { case add: __ addsd(xmm0, at_rsp()); ! __ addptr(rsp, 2 * Interpreter::stackElementSize()); break; case sub: __ movdbl(xmm1, xmm0); __ pop_d(xmm0); __ subsd(xmm0, xmm1); break; case mul: __ mulsd(xmm0, at_rsp()); ! __ addptr(rsp, 2 * Interpreter::stackElementSize()); break; case div: __ movdbl(xmm1, xmm0); __ pop_d(xmm0); __ divsd(xmm0, xmm1);
*** 1487,1521 **** __ bswapl(rdx); if (!is_wide) { __ sarl(rdx, 16); } ! __ movslq(rdx, rdx); // Handle all the JSR stuff here, then exit. // It's much shorter and cleaner than intermingling with the non-JSR // normal-branch stuff occuring below. if (is_jsr) { // Pre-load the next target bytecode into rbx __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); // compute return address as bci in rax ! __ leaq(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset()))); ! __ subq(rax, Address(rcx, methodOopDesc::const_offset())); // Adjust the bcp in r13 by the displacement in rdx ! __ addq(r13, rdx); // jsr returns atos that is not an oop __ push_i(rax); __ dispatch_only(vtos); return; } // Normal (non-jsr) branch handling // Adjust the bcp in r13 by the displacement in rdx ! __ addq(r13, rdx); assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters"); Label backedge_counter_overflow; Label profile_method; --- 1553,1587 ---- __ bswapl(rdx); if (!is_wide) { __ sarl(rdx, 16); } ! __ movl2ptr(rdx, rdx); // Handle all the JSR stuff here, then exit. // It's much shorter and cleaner than intermingling with the non-JSR // normal-branch stuff occuring below. if (is_jsr) { // Pre-load the next target bytecode into rbx __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); // compute return address as bci in rax ! __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset()))); ! __ subptr(rax, Address(rcx, methodOopDesc::const_offset())); // Adjust the bcp in r13 by the displacement in rdx ! __ addptr(r13, rdx); // jsr returns atos that is not an oop __ push_i(rax); __ dispatch_only(vtos); return; } // Normal (non-jsr) branch handling // Adjust the bcp in r13 by the displacement in rdx ! __ addptr(r13, rdx); assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters"); Label backedge_counter_overflow; Label profile_method;
*** 1595,1623 **** __ bind(profile_method); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), r13); __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode ! __ movq(rcx, Address(rbp, method_offset)); ! __ movq(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset()))); ! __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); __ test_method_data_pointer(rcx, dispatch); // offset non-null mdp by MDO::data_offset() + IR::profile_method() ! __ addq(rcx, in_bytes(methodDataOopDesc::data_offset())); ! __ addq(rcx, rax); ! __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); __ jmp(dispatch); } if (UseOnStackReplacement) { // invocation counter overflow __ bind(backedge_counter_overflow); ! __ negq(rdx); ! __ addq(rdx, r13); // branch bcp // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx); --- 1661,1689 ---- __ bind(profile_method); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), r13); __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode ! __ movptr(rcx, Address(rbp, method_offset)); ! __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset()))); ! __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); __ test_method_data_pointer(rcx, dispatch); // offset non-null mdp by MDO::data_offset() + IR::profile_method() ! __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset())); ! __ addptr(rcx, rax); ! __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); __ jmp(dispatch); } if (UseOnStackReplacement) { // invocation counter overflow __ bind(backedge_counter_overflow); ! __ negptr(rdx); ! __ addptr(rdx, r13); // branch bcp // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
*** 1626,1673 **** // rax: osr nmethod (osr ok) or NULL (osr not possible) // ebx: target bytecode // rdx: scratch // r14: locals pointer // r13: bcp ! __ testq(rax, rax); // test result __ jcc(Assembler::zero, dispatch); // no osr if null // nmethod may have been invalidated (VM may block upon call_VM return) __ movl(rcx, Address(rax, nmethod::entry_bci_offset())); __ cmpl(rcx, InvalidOSREntryBci); __ jcc(Assembler::equal, dispatch); // We have the address of an on stack replacement routine in eax // We need to prepare to execute the OSR method. First we must // migrate the locals and monitors off of the stack. ! __ movq(r13, rax); // save the nmethod call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); // eax is OSR buffer, move it to expected parameter location ! __ movq(j_rarg0, rax); // We use j_rarg definitions here so that registers don't conflict as parameter // registers change across platforms as we are in the midst of a calling // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. const Register retaddr = j_rarg2; const Register sender_sp = j_rarg1; // pop the interpreter frame ! __ movq(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp __ leave(); // remove frame anchor ! __ popq(retaddr); // get return address ! __ movq(rsp, sender_sp); // set sp to sender sp // Ensure compiled code always sees stack at proper alignment ! __ andq(rsp, -(StackAlignmentInBytes)); // unlike x86 we need no specialized return from compiled code // to the interpreter or the call stub. // push the return address ! __ pushq(retaddr); // and begin the OSR nmethod __ jmp(Address(r13, nmethod::osr_entry_point_offset())); } } --- 1692,1739 ---- // rax: osr nmethod (osr ok) or NULL (osr not possible) // ebx: target bytecode // rdx: scratch // r14: locals pointer // r13: bcp ! __ testptr(rax, rax); // test result __ jcc(Assembler::zero, dispatch); // no osr if null // nmethod may have been invalidated (VM may block upon call_VM return) __ movl(rcx, Address(rax, nmethod::entry_bci_offset())); __ cmpl(rcx, InvalidOSREntryBci); __ jcc(Assembler::equal, dispatch); // We have the address of an on stack replacement routine in eax // We need to prepare to execute the OSR method. First we must // migrate the locals and monitors off of the stack. ! __ mov(r13, rax); // save the nmethod call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); // eax is OSR buffer, move it to expected parameter location ! __ mov(j_rarg0, rax); // We use j_rarg definitions here so that registers don't conflict as parameter // registers change across platforms as we are in the midst of a calling // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. const Register retaddr = j_rarg2; const Register sender_sp = j_rarg1; // pop the interpreter frame ! __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp __ leave(); // remove frame anchor ! __ pop(retaddr); // get return address ! __ mov(rsp, sender_sp); // set sp to sender sp // Ensure compiled code always sees stack at proper alignment ! __ andptr(rsp, -(StackAlignmentInBytes)); // unlike x86 we need no specialized return from compiled code // to the interpreter or the call stub. // push the return address ! __ push(retaddr); // and begin the OSR nmethod __ jmp(Address(r13, nmethod::osr_entry_point_offset())); } }
*** 1699,1709 **** void TemplateTable::if_nullcmp(Condition cc) { transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches) Label not_taken; ! __ testq(rax, rax); __ jcc(j_not(cc), not_taken); branch(false, false); __ bind(not_taken); __ profile_not_taken_branch(rax); } --- 1765,1775 ---- void TemplateTable::if_nullcmp(Condition cc) { transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches) Label not_taken; ! __ testptr(rax, rax); __ jcc(j_not(cc), not_taken); branch(false, false); __ bind(not_taken); __ profile_not_taken_branch(rax); }
*** 1711,1756 **** void TemplateTable::if_acmp(Condition cc) { transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches) Label not_taken; __ pop_ptr(rdx); ! __ cmpq(rdx, rax); __ jcc(j_not(cc), not_taken); branch(false, false); __ bind(not_taken); __ profile_not_taken_branch(rax); } void TemplateTable::ret() { transition(vtos, vtos); locals_index(rbx); ! __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp __ profile_ret(rbx, rcx); __ get_method(rax); ! __ movq(r13, Address(rax, methodOopDesc::const_offset())); ! __ leaq(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); __ dispatch_next(vtos); } void TemplateTable::wide_ret() { transition(vtos, vtos); locals_index_wide(rbx); ! __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp __ profile_ret(rbx, rcx); __ get_method(rax); ! __ movq(r13, Address(rax, methodOopDesc::const_offset())); ! __ leaq(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); __ dispatch_next(vtos); } void TemplateTable::tableswitch() { Label default_case, continue_execution; transition(itos, vtos); // align r13 ! __ leaq(rbx, at_bcp(BytesPerInt)); ! __ andq(rbx, -BytesPerInt); // load lo & hi __ movl(rcx, Address(rbx, BytesPerInt)); __ movl(rdx, Address(rbx, 2 * BytesPerInt)); __ bswapl(rcx); __ bswapl(rdx); --- 1777,1822 ---- void TemplateTable::if_acmp(Condition cc) { transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches) Label not_taken; __ pop_ptr(rdx); ! __ cmpptr(rdx, rax); __ jcc(j_not(cc), not_taken); branch(false, false); __ bind(not_taken); __ profile_not_taken_branch(rax); } void TemplateTable::ret() { transition(vtos, vtos); locals_index(rbx); ! __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp __ profile_ret(rbx, rcx); __ get_method(rax); ! __ movptr(r13, Address(rax, methodOopDesc::const_offset())); ! __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); __ dispatch_next(vtos); } void TemplateTable::wide_ret() { transition(vtos, vtos); locals_index_wide(rbx); ! __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp __ profile_ret(rbx, rcx); __ get_method(rax); ! __ movptr(r13, Address(rax, methodOopDesc::const_offset())); ! __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); __ dispatch_next(vtos); } void TemplateTable::tableswitch() { Label default_case, continue_execution; transition(itos, vtos); // align r13 ! __ lea(rbx, at_bcp(BytesPerInt)); ! __ andptr(rbx, -BytesPerInt); // load lo & hi __ movl(rcx, Address(rbx, BytesPerInt)); __ movl(rdx, Address(rbx, 2 * BytesPerInt)); __ bswapl(rcx); __ bswapl(rdx);
*** 1764,1776 **** __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); __ profile_switch_case(rax, rbx, rcx); // continue execution __ bind(continue_execution); __ bswapl(rdx); ! __ movslq(rdx, rdx); __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); ! __ addq(r13, rdx); __ dispatch_only(vtos); // handle default __ bind(default_case); __ profile_switch_default(rax); __ movl(rdx, Address(rbx, 0)); --- 1830,1842 ---- __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); __ profile_switch_case(rax, rbx, rcx); // continue execution __ bind(continue_execution); __ bswapl(rdx); ! __ movl2ptr(rdx, rdx); __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); ! __ addptr(r13, rdx); __ dispatch_only(vtos); // handle default __ bind(default_case); __ profile_switch_default(rax); __ movl(rdx, Address(rbx, 0));
*** 1786,1799 **** transition(itos, vtos); Label loop_entry, loop, found, continue_execution; // bswap rax so we can avoid bswapping the table entries __ bswapl(rax); // align r13 ! __ leaq(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of // this instruction (change offsets // below) ! __ andq(rbx, -BytesPerInt); // set counter __ movl(rcx, Address(rbx, BytesPerInt)); __ bswapl(rcx); __ jmpb(loop_entry); // table search --- 1852,1865 ---- transition(itos, vtos); Label loop_entry, loop, found, continue_execution; // bswap rax so we can avoid bswapping the table entries __ bswapl(rax); // align r13 ! __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of // this instruction (change offsets // below) ! __ andptr(rbx, -BytesPerInt); // set counter __ movl(rcx, Address(rbx, BytesPerInt)); __ bswapl(rcx); __ jmpb(loop_entry); // table search
*** 1812,1824 **** __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); __ profile_switch_case(rcx, rax, rbx); // continue execution __ bind(continue_execution); __ bswapl(rdx); ! __ movslq(rdx, rdx); __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); ! __ addq(r13, rdx); __ dispatch_only(vtos); } void TemplateTable::fast_binaryswitch() { transition(itos, vtos); --- 1878,1890 ---- __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); __ profile_switch_case(rcx, rax, rbx); // continue execution __ bind(continue_execution); __ bswapl(rdx); ! __ movl2ptr(rdx, rdx); __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); ! __ addptr(r13, rdx); __ dispatch_only(vtos); } void TemplateTable::fast_binaryswitch() { transition(itos, vtos);
*** 1854,1868 **** const Register j = rdx; const Register h = rdi; const Register temp = rsi; // Find array start ! __ leaq(array, at_bcp(3 * BytesPerInt)); // btw: should be able to // get rid of this // instruction (change // offsets below) ! __ andq(array, -BytesPerInt); // Initialize i & j __ xorl(i, i); // i = 0; __ movl(j, Address(array, -BytesPerInt)); // j = length(array); --- 1920,1934 ---- const Register j = rdx; const Register h = rdi; const Register temp = rsi; // Find array start ! __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to // get rid of this // instruction (change // offsets below) ! __ andptr(array, -BytesPerInt); // Initialize i & j __ xorl(i, i); // i = 0; __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
*** 1910,1932 **** // entry found -> j = offset __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); __ profile_switch_case(i, key, array); __ bswapl(j); ! __ movslq(j, j); __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); ! __ addq(r13, j); __ dispatch_only(vtos); // default case -> j = default offset __ bind(default_case); __ profile_switch_default(i); __ movl(j, Address(array, -2 * BytesPerInt)); __ bswapl(j); ! __ movslq(j, j); __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); ! __ addq(r13, j); __ dispatch_only(vtos); } void TemplateTable::_return(TosState state) { --- 1976,1998 ---- // entry found -> j = offset __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); __ profile_switch_case(i, key, array); __ bswapl(j); ! __ movl2ptr(j, j); __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); ! __ addptr(r13, j); __ dispatch_only(vtos); // default case -> j = default offset __ bind(default_case); __ profile_switch_default(i); __ movl(j, Address(array, -2 * BytesPerInt)); __ bswapl(j); ! __ movl2ptr(j, j); __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); ! __ addptr(r13, j); __ dispatch_only(vtos); } void TemplateTable::_return(TosState state) {
*** 1934,1945 **** assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { assert(state == vtos, "only valid state"); ! __ movq(c_rarg1, aaddress(0)); ! __ movq(rdi, Address(c_rarg1, oopDesc::klass_offset_in_bytes())); __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(rdi, JVM_ACC_HAS_FINALIZER); Label skip_register_finalizer; __ jcc(Assembler::zero, skip_register_finalizer); --- 2000,2011 ---- assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { assert(state == vtos, "only valid state"); ! __ movptr(c_rarg1, aaddress(0)); ! __ load_klass(rdi, c_rarg1); __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(rdi, JVM_ACC_HAS_FINALIZER); Label skip_register_finalizer; __ jcc(Assembler::zero, skip_register_finalizer);
*** 2045,2065 **** bool is_static = false) { assert_different_registers(cache, index, flags, off); ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); // Field offset ! __ movq(off, Address(cache, index, Address::times_8, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()))); // Flags __ movl(flags, Address(cache, index, Address::times_8, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); // klass overwrite register if (is_static) { ! __ movq(obj, Address(cache, index, Address::times_8, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset()))); } } --- 2111,2131 ---- bool is_static = false) { assert_different_registers(cache, index, flags, off); ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); // Field offset ! __ movptr(off, Address(cache, index, Address::times_8, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()))); // Flags __ movl(flags, Address(cache, index, Address::times_8, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); // klass overwrite register if (is_static) { ! __ movptr(obj, Address(cache, index, Address::times_8, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset()))); } }
*** 2089,2101 **** ConstantPoolCacheEntry::f2_offset()); resolve_cache_and_index(byte_no, cache, index); assert(wordSize == 8, "adjust code below"); ! __ movq(method, Address(cache, index, Address::times_8, method_offset)); if (itable_index != noreg) { ! __ movq(itable_index, Address(cache, index, Address::times_8, index_offset)); } __ movl(flags , Address(cache, index, Address::times_8, flags_offset)); } --- 2155,2167 ---- ConstantPoolCacheEntry::f2_offset()); resolve_cache_and_index(byte_no, cache, index); assert(wordSize == 8, "adjust code below"); ! __ movptr(method, Address(cache, index, Address::times_8, method_offset)); if (itable_index != noreg) { ! __ movptr(itable_index, Address(cache, index, Address::times_8, index_offset)); } __ movl(flags , Address(cache, index, Address::times_8, flags_offset)); }
*** 2117,2133 **** __ jcc(Assembler::zero, L1); __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); // cache entry pointer ! __ addq(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset())); __ shll(c_rarg3, LogBytesPerWord); ! __ addq(c_rarg2, c_rarg3); if (is_static) { __ xorl(c_rarg1, c_rarg1); // NULL object reference } else { ! __ movq(c_rarg1, at_tos()); // get object pointer without popping it __ verify_oop(c_rarg1); } // c_rarg1: object pointer or NULL // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack --- 2183,2199 ---- __ jcc(Assembler::zero, L1); __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); // cache entry pointer ! __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset())); __ shll(c_rarg3, LogBytesPerWord); ! __ addptr(c_rarg2, c_rarg3); if (is_static) { __ xorl(c_rarg1, c_rarg1); // NULL object reference } else { ! __ movptr(c_rarg1, at_tos()); // get object pointer without popping it __ verify_oop(c_rarg1); } // c_rarg1: object pointer or NULL // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack
*** 2185,2195 **** __ bind(notByte); __ cmpl(flags, atos); __ jcc(Assembler::notEqual, notObj); // atos ! __ movq(rax, field); __ push(atos); if (!is_static) { patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); } __ jmp(Done); --- 2251,2261 ---- __ bind(notByte); __ cmpl(flags, atos); __ jcc(Assembler::notEqual, notObj); // atos ! __ load_heap_oop(rax, field); __ push(atos); if (!is_static) { patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); } __ jmp(Done);
*** 2320,2343 **** ConstantPoolCacheEntry::flags_offset()))); __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); // Make sure we don't need to mask rcx for tosBits after the // above shift ConstantPoolCacheEntry::verify_tosBits(); ! __ movq(c_rarg1, at_tos_p1()); // initially assume a one word jvalue __ cmpl(c_rarg3, ltos); ! __ cmovq(Assembler::equal, c_rarg1, at_tos_p2()); // ltos (two word jvalue) __ cmpl(c_rarg3, dtos); ! __ cmovq(Assembler::equal, c_rarg1, at_tos_p2()); // dtos (two word jvalue) } // cache entry pointer ! __ addq(c_rarg2, in_bytes(cp_base_offset)); __ shll(rscratch1, LogBytesPerWord); ! __ addq(c_rarg2, rscratch1); // object (tos) ! __ movq(c_rarg3, rsp); // c_rarg1: object pointer set up above (NULL if static) // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address, --- 2386,2409 ---- ConstantPoolCacheEntry::flags_offset()))); __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); // Make sure we don't need to mask rcx for tosBits after the // above shift ConstantPoolCacheEntry::verify_tosBits(); ! __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue __ cmpl(c_rarg3, ltos); ! __ cmovptr(Assembler::equal, c_rarg1, at_tos_p2()); // ltos (two word jvalue) __ cmpl(c_rarg3, dtos); ! __ cmovptr(Assembler::equal, c_rarg1, at_tos_p2()); // dtos (two word jvalue) } // cache entry pointer ! __ addptr(c_rarg2, in_bytes(cp_base_offset)); __ shll(rscratch1, LogBytesPerWord); ! __ addptr(c_rarg2, rscratch1); // object (tos) ! __ mov(c_rarg3, rsp); // c_rarg1: object pointer set up above (NULL if static) // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address,
*** 2395,2406 **** __ cmpl(flags, atos); __ jcc(Assembler::notEqual, notObj); // atos __ pop(atos); if (!is_static) pop_and_check_object(obj); ! __ movq(field, rax); ! __ store_check(obj, field); // Need to mark card if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx); } __ jmp(Done); --- 2461,2474 ---- __ cmpl(flags, atos); __ jcc(Assembler::notEqual, notObj); // atos __ pop(atos); if (!is_static) pop_and_check_object(obj); ! ! // Store into the field ! do_oop_store(_masm, field, rax, _bs->kind(), false); ! if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx); } __ jmp(Done);
*** 2511,2526 **** __ testl(c_rarg3, c_rarg3); __ jcc(Assembler::zero, L2); __ pop_ptr(rbx); // copy the object pointer from tos __ verify_oop(rbx); __ push_ptr(rbx); // put the object pointer back on tos ! __ subq(rsp, sizeof(jvalue)); // add space for a jvalue object ! __ movq(c_rarg3, rsp); const Address field(c_rarg3, 0); switch (bytecode()) { // load values into the jvalue object ! case Bytecodes::_fast_aputfield: // fall through case Bytecodes::_fast_lputfield: __ movq(field, rax); break; case Bytecodes::_fast_iputfield: __ movl(field, rax); break; case Bytecodes::_fast_bputfield: __ movb(field, rax); break; case Bytecodes::_fast_sputfield: // fall through case Bytecodes::_fast_cputfield: __ movw(field, rax); break; --- 2579,2594 ---- __ testl(c_rarg3, c_rarg3); __ jcc(Assembler::zero, L2); __ pop_ptr(rbx); // copy the object pointer from tos __ verify_oop(rbx); __ push_ptr(rbx); // put the object pointer back on tos ! __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object ! __ mov(c_rarg3, rsp); const Address field(c_rarg3, 0); switch (bytecode()) { // load values into the jvalue object ! case Bytecodes::_fast_aputfield: __ movq(field, rax); break; case Bytecodes::_fast_lputfield: __ movq(field, rax); break; case Bytecodes::_fast_iputfield: __ movl(field, rax); break; case Bytecodes::_fast_bputfield: __ movb(field, rax); break; case Bytecodes::_fast_sputfield: // fall through case Bytecodes::_fast_cputfield: __ movw(field, rax); break;
*** 2530,2552 **** ShouldNotReachHere(); } // Save rax because call_VM() will clobber it, then use it for // JVMTI purposes ! __ pushq(rax); // access constant pool cache entry __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); __ verify_oop(rbx); // rbx: object pointer copied above // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3); ! __ popq(rax); // restore lower value ! __ addq(rsp, sizeof(jvalue)); // release jvalue object space __ bind(L2); } } void TemplateTable::fast_storefield(TosState state) { --- 2598,2620 ---- ShouldNotReachHere(); } // Save rax because call_VM() will clobber it, then use it for // JVMTI purposes ! __ push(rax); // access constant pool cache entry __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); __ verify_oop(rbx); // rbx: object pointer copied above // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3); ! __ pop(rax); // restore lower value ! __ addptr(rsp, sizeof(jvalue)); // release jvalue object space __ bind(L2); } } void TemplateTable::fast_storefield(TosState state) {
*** 2563,2573 **** __ movl(rdx, Address(rcx, rbx, Address::times_8, in_bytes(base + ConstantPoolCacheEntry::flags_offset()))); // replace index with field offset from cache entry ! __ movq(rbx, Address(rcx, rbx, Address::times_8, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); // [jk] not needed currently // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | // Assembler::StoreStore)); --- 2631,2641 ---- __ movl(rdx, Address(rcx, rbx, Address::times_8, in_bytes(base + ConstantPoolCacheEntry::flags_offset()))); // replace index with field offset from cache entry ! __ movptr(rbx, Address(rcx, rbx, Address::times_8, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); // [jk] not needed currently // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | // Assembler::StoreStore));
*** 2583,2594 **** const Address field(rcx, rbx, Address::times_1); // access field switch (bytecode()) { case Bytecodes::_fast_aputfield: ! __ movq(field, rax); ! __ store_check(rcx, field); break; case Bytecodes::_fast_lputfield: __ movq(field, rax); break; case Bytecodes::_fast_iputfield: --- 2651,2661 ---- const Address field(rcx, rbx, Address::times_1); // access field switch (bytecode()) { case Bytecodes::_fast_aputfield: ! do_oop_store(_masm, field, rax, _bs->kind(), false); break; case Bytecodes::_fast_lputfield: __ movq(field, rax); break; case Bytecodes::_fast_iputfield:
*** 2632,2651 **** __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); __ testl(rcx, rcx); __ jcc(Assembler::zero, L1); // access constant pool cache entry __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); - __ movq(r12, rax); // save object pointer before call_VM() clobbers it __ verify_oop(rax); ! __ movq(c_rarg1, rax); // c_rarg1: object pointer copied above // c_rarg2: cache entry pointer __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2); ! __ movq(rax, r12); // restore object pointer __ bind(L1); } // access constant pool cache __ get_cache_and_index_at_bcp(rcx, rbx, 1); --- 2699,2719 ---- __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); __ testl(rcx, rcx); __ jcc(Assembler::zero, L1); // access constant pool cache entry __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); __ verify_oop(rax); ! __ mov(r12, rax); // save object pointer before call_VM() clobbers it ! __ mov(c_rarg1, rax); // c_rarg1: object pointer copied above // c_rarg2: cache entry pointer __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2); ! __ mov(rax, r12); // restore object pointer ! __ reinit_heapbase(); __ bind(L1); } // access constant pool cache __ get_cache_and_index_at_bcp(rcx, rbx, 1);
*** 2656,2666 **** // in_bytes(constantPoolCacheOopDesc::base_offset() + // ConstantPoolCacheEntry::flags_offset()))); // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); // __ andl(rdx, 0x1); // } ! __ movq(rbx, Address(rcx, rbx, Address::times_8, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()))); // rax: object __ verify_oop(rax); --- 2724,2734 ---- // in_bytes(constantPoolCacheOopDesc::base_offset() + // ConstantPoolCacheEntry::flags_offset()))); // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); // __ andl(rdx, 0x1); // } ! __ movptr(rbx, Address(rcx, rbx, Address::times_8, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()))); // rax: object __ verify_oop(rax);
*** 2668,2678 **** Address field(rax, rbx, Address::times_1); // access field switch (bytecode()) { case Bytecodes::_fast_agetfield: ! __ movq(rax, field); __ verify_oop(rax); break; case Bytecodes::_fast_lgetfield: __ movq(rax, field); break; --- 2736,2746 ---- Address field(rax, rbx, Address::times_1); // access field switch (bytecode()) { case Bytecodes::_fast_agetfield: ! __ load_heap_oop(rax, field); __ verify_oop(rax); break; case Bytecodes::_fast_lgetfield: __ movq(rax, field); break;
*** 2709,2736 **** void TemplateTable::fast_xaccess(TosState state) { transition(vtos, state); // get receiver ! __ movq(rax, aaddress(0)); debug_only(__ verify_local_tag(frame::TagReference, 0)); // access constant pool cache __ get_cache_and_index_at_bcp(rcx, rdx, 2); ! __ movq(rbx, Address(rcx, rdx, Address::times_8, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()))); // make sure exception is reported in correct bcp range (getfield is // next instruction) ! __ incrementq(r13); __ null_check(rax); switch (state) { case itos: __ movl(rax, Address(rax, rbx, Address::times_1)); break; case atos: ! __ movq(rax, Address(rax, rbx, Address::times_1)); __ verify_oop(rax); break; case ftos: __ movflt(xmm0, Address(rax, rbx, Address::times_1)); break; --- 2777,2804 ---- void TemplateTable::fast_xaccess(TosState state) { transition(vtos, state); // get receiver ! __ movptr(rax, aaddress(0)); debug_only(__ verify_local_tag(frame::TagReference, 0)); // access constant pool cache __ get_cache_and_index_at_bcp(rcx, rdx, 2); ! __ movptr(rbx, Address(rcx, rdx, Address::times_8, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()))); // make sure exception is reported in correct bcp range (getfield is // next instruction) ! __ increment(r13); __ null_check(rax); switch (state) { case itos: __ movl(rax, Address(rax, rbx, Address::times_1)); break; case atos: ! __ load_heap_oop(rax, Address(rax, rbx, Address::times_1)); __ verify_oop(rax); break; case ftos: __ movflt(xmm0, Address(rax, rbx, Address::times_1)); break;
*** 2749,2759 **** // __ jcc(Assembler::zero, notVolatile); // __ membar(Assembler::LoadLoad); // __ bind(notVolatile); // } ! __ decrementq(r13); } //----------------------------------------------------------------------------- --- 2817,2827 ---- // __ jcc(Assembler::zero, notVolatile); // __ membar(Assembler::LoadLoad); // __ bind(notVolatile); // } ! __ decrement(r13); } //-----------------------------------------------------------------------------
*** 2788,2798 **** // load receiver if needed (note: no return address pushed yet) if (load_receiver) { __ movl(recv, flags); __ andl(recv, 0xFF); if (TaggedStackInterpreter) __ shll(recv, 1); // index*2 ! __ movq(recv, Address(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1))); __ verify_oop(recv); } // do null check if needed if (receiver_null_check) { --- 2856,2867 ---- // load receiver if needed (note: no return address pushed yet) if (load_receiver) { __ movl(recv, flags); __ andl(recv, 0xFF); if (TaggedStackInterpreter) __ shll(recv, 1); // index*2 ! __ movptr(recv, Address(rsp, recv, Address::times_8, ! -Interpreter::expr_offset_in_bytes(1))); __ verify_oop(recv); } // do null check if needed if (receiver_null_check) {
*** 2810,2824 **** // load return address { ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table()); ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table()); __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3)); ! __ movq(flags, Address(rscratch1, flags, Address::times_8)); } // push return address ! __ pushq(flags); // Restore flag field from the constant pool cache, and restore esi // for later null checks. r13 is the bytecode pointer if (save_flags) { __ movl(flags, r13); --- 2879,2893 ---- // load return address { ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table()); ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table()); __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3)); ! __ movptr(flags, Address(rscratch1, flags, Address::times_8)); } // push return address ! __ push(flags); // Restore flag field from the constant pool cache, and restore esi // for later null checks. r13 is the bytecode pointer if (save_flags) { __ movl(flags, r13);
*** 2855,2879 **** __ bind(notFinal); // get receiver klass __ null_check(recv, oopDesc::klass_offset_in_bytes()); ! __ movq(rax, Address(recv, oopDesc::klass_offset_in_bytes())); __ verify_oop(rax); // profile this call __ profile_virtual_call(rax, r14, rdx); // get target methodOop & entry point const int base = instanceKlass::vtable_start_offset() * wordSize; assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below"); ! __ movq(method, Address(rax, index, Address::times_8, base + vtableEntry::method_offset_in_bytes())); ! __ movq(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); __ jump_from_interpreted(method, rdx); } void TemplateTable::invokevirtual(int byte_no) { --- 2924,2948 ---- __ bind(notFinal); // get receiver klass __ null_check(recv, oopDesc::klass_offset_in_bytes()); ! __ load_klass(rax, recv); __ verify_oop(rax); // profile this call __ profile_virtual_call(rax, r14, rdx); // get target methodOop & entry point const int base = instanceKlass::vtable_start_offset() * wordSize; assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below"); ! __ movptr(method, Address(rax, index, Address::times_8, base + vtableEntry::method_offset_in_bytes())); ! __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); __ jump_from_interpreted(method, rdx); } void TemplateTable::invokevirtual(int byte_no) {
*** 2933,3019 **** invokevirtual_helper(rbx, rcx, rdx); __ bind(notMethod); // Get receiver klass into rdx - also a null check __ restore_locals(); // restore r14 ! __ movq(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); __ verify_oop(rdx); // profile this call __ profile_virtual_call(rdx, r13, r14); ! __ movq(r14, rdx); // Save klassOop in r14 // Compute start of first itableOffsetEntry (which is at the end of // the vtable) const int base = instanceKlass::vtable_start_offset() * wordSize; // Get length of vtable assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below"); __ movl(r13, Address(rdx, instanceKlass::vtable_length_offset() * wordSize)); ! __ leaq(rdx, Address(rdx, r13, Address::times_8, base)); if (HeapWordsPerLong > 1) { // Round up to align_object_offset boundary ! __ round_to_q(rdx, BytesPerLong); } Label entry, search, interface_ok; __ jmpb(entry); __ bind(search); ! __ addq(rdx, itableOffsetEntry::size() * wordSize); __ bind(entry); // Check that the entry is non-null. A null entry means that the // receiver class doesn't implement the interface, and wasn't the // same as the receiver class checked when the interface was // resolved. ! __ pushq(rdx); ! __ movq(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); ! __ testq(rdx, rdx); __ jcc(Assembler::notZero, interface_ok); // throw exception ! __ popq(rdx); // pop saved register first. ! __ popq(rbx); // pop return address (pushed by prepare_invoke) __ restore_bcp(); // r13 must be correct for exception handler (was // destroyed) __ restore_locals(); // make sure locals pointer is correct as well // (was destroyed) __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); // the call_VM checks for exception, so we should never return here. __ should_not_reach_here(); __ bind(interface_ok); ! __ popq(rdx); ! __ cmpq(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); __ jcc(Assembler::notEqual, search); __ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes())); ! __ addq(rdx, r14); // Add offset to klassOop assert(itableMethodEntry::size() * wordSize == 8, "adjust the scaling in the code below"); ! __ movq(rbx, Address(rdx, rbx, Address::times_8)); // rbx: methodOop to call // rcx: receiver // Check for abstract method error // Note: This should be done more efficiently via a // throw_abstract_method_error interpreter entry point and a // conditional jump to it in case of a null method. { Label L; ! __ testq(rbx, rbx); __ jcc(Assembler::notZero, L); // throw exception // note: must restore interpreter registers to canonical // state for exception handling to work correctly! ! __ popq(rbx); // pop return address (pushed by prepare_invoke) __ restore_bcp(); // r13 must be correct for exception handler // (was destroyed) __ restore_locals(); // make sure locals pointer is correct as // well (was destroyed) __ call_VM(noreg, --- 3002,3088 ---- invokevirtual_helper(rbx, rcx, rdx); __ bind(notMethod); // Get receiver klass into rdx - also a null check __ restore_locals(); // restore r14 ! __ load_klass(rdx, rcx); __ verify_oop(rdx); // profile this call __ profile_virtual_call(rdx, r13, r14); ! __ mov(r14, rdx); // Save klassOop in r14 // Compute start of first itableOffsetEntry (which is at the end of // the vtable) const int base = instanceKlass::vtable_start_offset() * wordSize; // Get length of vtable assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below"); __ movl(r13, Address(rdx, instanceKlass::vtable_length_offset() * wordSize)); ! __ lea(rdx, Address(rdx, r13, Address::times_8, base)); if (HeapWordsPerLong > 1) { // Round up to align_object_offset boundary ! __ round_to(rdx, BytesPerLong); } Label entry, search, interface_ok; __ jmpb(entry); __ bind(search); ! __ addptr(rdx, itableOffsetEntry::size() * wordSize); __ bind(entry); // Check that the entry is non-null. A null entry means that the // receiver class doesn't implement the interface, and wasn't the // same as the receiver class checked when the interface was // resolved. ! __ push(rdx); ! __ movptr(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); ! __ testptr(rdx, rdx); __ jcc(Assembler::notZero, interface_ok); // throw exception ! __ pop(rdx); // pop saved register first. ! __ pop(rbx); // pop return address (pushed by prepare_invoke) __ restore_bcp(); // r13 must be correct for exception handler (was // destroyed) __ restore_locals(); // make sure locals pointer is correct as well // (was destroyed) __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); // the call_VM checks for exception, so we should never return here. __ should_not_reach_here(); __ bind(interface_ok); ! __ pop(rdx); ! __ cmpptr(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); __ jcc(Assembler::notEqual, search); __ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes())); ! __ addptr(rdx, r14); // Add offset to klassOop assert(itableMethodEntry::size() * wordSize == 8, "adjust the scaling in the code below"); ! __ movptr(rbx, Address(rdx, rbx, Address::times_8)); // rbx: methodOop to call // rcx: receiver // Check for abstract method error // Note: This should be done more efficiently via a // throw_abstract_method_error interpreter entry point and a // conditional jump to it in case of a null method. { Label L; ! __ testptr(rbx, rbx); __ jcc(Assembler::notZero, L); // throw exception // note: must restore interpreter registers to canonical // state for exception handling to work correctly! ! __ pop(rbx); // pop return address (pushed by prepare_invoke) __ restore_bcp(); // r13 must be correct for exception handler // (was destroyed) __ restore_locals(); // make sure locals pointer is correct as // well (was destroyed) __ call_VM(noreg,
*** 3022,3032 **** // the call_VM checks for exception, so we should never return here. __ should_not_reach_here(); __ bind(L); } ! __ movq(rcx, Address(rbx, methodOopDesc::interpreter_entry_offset())); // do the call // rcx: receiver // rbx: methodOop __ jump_from_interpreted(rbx, rdx); --- 3091,3101 ---- // the call_VM checks for exception, so we should never return here. __ should_not_reach_here(); __ bind(L); } ! __ movptr(rcx, Address(rbx, methodOopDesc::interpreter_entry_offset())); // do the call // rcx: receiver // rbx: methodOop __ jump_from_interpreted(rbx, rdx);
*** 3041,3056 **** Label slow_case; Label done; Label initialize_header; Label initialize_object; // including clearing the fields Label allocate_shared; - ExternalAddress top((address)Universe::heap()->top_addr()); - ExternalAddress end((address)Universe::heap()->end_addr()); __ get_cpool_and_tags(rsi, rax); // get instanceKlass ! __ movq(rsi, Address(rsi, rdx, Address::times_8, sizeof(constantPoolOopDesc))); // make sure the class we're about to instantiate has been // resolved. Note: slow_case does a pop of stack, which is why we // loaded class/pushed above --- 3110,3123 ---- Label slow_case; Label done; Label initialize_header; Label initialize_object; // including clearing the fields Label allocate_shared; __ get_cpool_and_tags(rsi, rax); // get instanceKlass ! __ movptr(rsi, Address(rsi, rdx, Address::times_8, sizeof(constantPoolOopDesc))); // make sure the class we're about to instantiate has been // resolved. Note: slow_case does a pop of stack, which is why we // loaded class/pushed above
*** 3083,3097 **** const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; if (UseTLAB) { ! __ movq(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); ! __ leaq(rbx, Address(rax, rdx, Address::times_1)); ! __ cmpq(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); ! __ movq(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); if (ZeroTLAB) { // the fields have been already cleared __ jmp(initialize_header); } else { // initialize both the header and fields --- 3150,3164 ---- const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; if (UseTLAB) { ! __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); ! __ lea(rbx, Address(rax, rdx, Address::times_1)); ! __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); ! __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); if (ZeroTLAB) { // the fields have been already cleared __ jmp(initialize_header); } else { // initialize both the header and fields
*** 3103,3124 **** // // rdx: instance size in bytes if (allow_shared_alloc) { __ bind(allocate_shared); const Register RtopAddr = rscratch1; const Register RendAddr = rscratch2; __ lea(RtopAddr, top); __ lea(RendAddr, end); ! __ movq(rax, Address(RtopAddr, 0)); // For retries rax gets set by cmpxchgq Label retry; __ bind(retry); ! __ leaq(rbx, Address(rax, rdx, Address::times_1)); ! __ cmpq(rbx, Address(RendAddr, 0)); __ jcc(Assembler::above, slow_case); // Compare rax with the top addr, and if still equal, store the new // top addr in rbx at the address of the top addr pointer. Sets ZF if was // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. --- 3170,3194 ---- // // rdx: instance size in bytes if (allow_shared_alloc) { __ bind(allocate_shared); + ExternalAddress top((address)Universe::heap()->top_addr()); + ExternalAddress end((address)Universe::heap()->end_addr()); + const Register RtopAddr = rscratch1; const Register RendAddr = rscratch2; __ lea(RtopAddr, top); __ lea(RendAddr, end); ! __ movptr(rax, Address(RtopAddr, 0)); // For retries rax gets set by cmpxchgq Label retry; __ bind(retry); ! __ lea(rbx, Address(rax, rdx, Address::times_1)); ! __ cmpptr(rbx, Address(RendAddr, 0)); __ jcc(Assembler::above, slow_case); // Compare rax with the top addr, and if still equal, store the new // top addr in rbx at the address of the top addr pointer. Sets ZF if was // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
*** 3127,3137 **** // rbx: object end // rdx: instance size in bytes if (os::is_MP()) { __ lock(); } ! __ cmpxchgq(rbx, Address(RtopAddr, 0)); // if someone beat us on the allocation, try again, otherwise continue __ jcc(Assembler::notEqual, retry); } --- 3197,3207 ---- // rbx: object end // rdx: instance size in bytes if (os::is_MP()) { __ lock(); } ! __ cmpxchgptr(rbx, Address(RtopAddr, 0)); // if someone beat us on the allocation, try again, otherwise continue __ jcc(Assembler::notEqual, retry); }
*** 3156,3172 **** } // initialize object header only. __ bind(initialize_header); if (UseBiasedLocking) { ! __ movq(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); ! __ movq(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); } else { __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), (intptr_t) markOopDesc::prototype()); // header (address 0x1) } ! __ movq(Address(rax, oopDesc::klass_offset_in_bytes()), rsi); // klass __ jmp(done); } { SkipIfEqual skip(_masm, &DTraceAllocProbes, false); --- 3226,3244 ---- } // initialize object header only. __ bind(initialize_header); if (UseBiasedLocking) { ! __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); ! __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); } else { __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), (intptr_t) markOopDesc::prototype()); // header (address 0x1) } ! __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) ! __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops ! __ store_klass(rax, rsi); // store klass last __ jmp(done); } { SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
*** 3212,3222 **** } void TemplateTable::checkcast() { transition(atos, atos); Label done, is_null, ok_is_subtype, quicked, resolved; ! __ testq(rax, rax); // object is in rax __ jcc(Assembler::zero, is_null); // Get cpool & tags index __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index --- 3284,3294 ---- } void TemplateTable::checkcast() { transition(atos, atos); Label done, is_null, ok_is_subtype, quicked, resolved; ! __ testptr(rax, rax); // object is in rax __ jcc(Assembler::zero, is_null); // Get cpool & tags index __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
*** 3224,3249 **** __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class); __ jcc(Assembler::equal, quicked); - - __ movq(r12, rcx); // save rcx XXX __ push(atos); // save receiver for result, and for GC call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); - __ pop_ptr(rdx); // restore receiver __ movq(rcx, r12); // restore rcx XXX __ jmpb(resolved); // Get superklass in rax and subklass in rbx __ bind(quicked); ! __ movq(rdx, rax); // Save object in rdx; rax needed for subtype check ! __ movq(rax, Address(rcx, rbx, Address::times_8, sizeof(constantPoolOopDesc))); __ bind(resolved); ! __ movq(rbx, Address(rdx, oopDesc::klass_offset_in_bytes())); // Generate subtype check. Blows rcx, rdi. Object in rdx. // Superklass in rax. Subklass in rbx. __ gen_subtype_check(rbx, ok_is_subtype); --- 3296,3321 ---- __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class); __ jcc(Assembler::equal, quicked); __ push(atos); // save receiver for result, and for GC + __ mov(r12, rcx); // save rcx XXX call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); __ movq(rcx, r12); // restore rcx XXX + __ reinit_heapbase(); + __ pop_ptr(rdx); // restore receiver __ jmpb(resolved); // Get superklass in rax and subklass in rbx __ bind(quicked); ! __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check ! __ movptr(rax, Address(rcx, rbx, Address::times_8, sizeof(constantPoolOopDesc))); __ bind(resolved); ! __ load_klass(rbx, rdx); // Generate subtype check. Blows rcx, rdi. Object in rdx. // Superklass in rax. Subklass in rbx. __ gen_subtype_check(rbx, ok_is_subtype);
*** 3252,3262 **** // object is at TOS __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); // Come here on success __ bind(ok_is_subtype); ! __ movq(rax, rdx); // Restore object in rdx // Collect counts on whether this check-cast sees NULLs a lot or not. if (ProfileInterpreter) { __ jmp(done); __ bind(is_null); --- 3324,3334 ---- // object is at TOS __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); // Come here on success __ bind(ok_is_subtype); ! __ mov(rax, rdx); // Restore object in rdx // Collect counts on whether this check-cast sees NULLs a lot or not. if (ProfileInterpreter) { __ jmp(done); __ bind(is_null);
*** 3268,3278 **** } void TemplateTable::instanceof() { transition(atos, itos); Label done, is_null, ok_is_subtype, quicked, resolved; ! __ testq(rax, rax); __ jcc(Assembler::zero, is_null); // Get cpool & tags index __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index --- 3340,3350 ---- } void TemplateTable::instanceof() { transition(atos, itos); Label done, is_null, ok_is_subtype, quicked, resolved; ! __ testptr(rax, rax); __ jcc(Assembler::zero, is_null); // Get cpool & tags index __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
*** 3281,3302 **** Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class); __ jcc(Assembler::equal, quicked); - __ movq(r12, rcx); // save rcx __ push(atos); // save receiver for result, and for GC call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); - __ pop_ptr(rdx); // restore receiver - __ movq(rdx, Address(rdx, oopDesc::klass_offset_in_bytes())); __ movq(rcx, r12); // restore rcx __ jmpb(resolved); // Get superklass in rax and subklass in rdx __ bind(quicked); ! __ movq(rdx, Address(rax, oopDesc::klass_offset_in_bytes())); ! __ movq(rax, Address(rcx, rbx, Address::times_8, sizeof(constantPoolOopDesc))); __ bind(resolved); // Generate subtype check. Blows rcx, rdi --- 3353,3375 ---- Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class); __ jcc(Assembler::equal, quicked); __ push(atos); // save receiver for result, and for GC + __ mov(r12, rcx); // save rcx call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); __ movq(rcx, r12); // restore rcx + __ reinit_heapbase(); + __ pop_ptr(rdx); // restore receiver + __ load_klass(rdx, rdx); __ jmpb(resolved); // Get superklass in rax and subklass in rdx __ bind(quicked); ! __ load_klass(rdx, rax); ! __ movptr(rax, Address(rcx, rbx, Address::times_8, sizeof(constantPoolOopDesc))); __ bind(resolved); // Generate subtype check. Blows rcx, rdi
*** 3336,3346 **** __ get_method(c_rarg1); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), c_rarg1, r13); ! __ movq(rbx, rax); // post the breakpoint event __ get_method(c_rarg1); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), --- 3409,3419 ---- __ get_method(c_rarg1); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), c_rarg1, r13); ! __ mov(rbx, rax); // post the breakpoint event __ get_method(c_rarg1); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
*** 3394,3449 **** __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL // find a free slot in the monitor block (result in c_rarg1) { Label entry, loop, exit; ! __ movq(c_rarg3, monitor_block_top); // points to current entry, // starting with top-most entry ! __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom // of monitor block __ jmpb(entry); __ bind(loop); // check if current entry is used ! __ cmpq(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int) NULL); // if not used then remember entry in c_rarg1 ! __ cmovq(Assembler::equal, c_rarg1, c_rarg3); // check if current entry is for same object ! __ cmpq(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); // if same object then stop searching __ jccb(Assembler::equal, exit); // otherwise advance to next entry ! __ addq(c_rarg3, entry_size); __ bind(entry); // check if bottom reached ! __ cmpq(c_rarg3, c_rarg2); // if not at bottom then check this entry __ jcc(Assembler::notEqual, loop); __ bind(exit); } ! __ testq(c_rarg1, c_rarg1); // check if a slot has been found __ jcc(Assembler::notZero, allocated); // if found, continue with that one // allocate one if there's no free slot { Label entry, loop; // 1. compute new pointers // rsp: old expression stack top ! __ movq(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom ! __ subq(rsp, entry_size); // move expression stack top ! __ subq(c_rarg1, entry_size); // move expression stack bottom ! __ movq(c_rarg3, rsp); // set start value for copy loop ! __ movq(monitor_block_bot, c_rarg1); // set new monitor block bottom __ jmp(entry); // 2. move expression stack contents __ bind(loop); ! __ movq(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack // word from old location ! __ movq(Address(c_rarg3, 0), c_rarg2); // and store it at new location ! __ addq(c_rarg3, wordSize); // advance to next word __ bind(entry); ! __ cmpq(c_rarg3, c_rarg1); // check if bottom reached __ jcc(Assembler::notEqual, loop); // if not at bottom then // copy next word } // call run-time routine --- 3467,3522 ---- __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL // find a free slot in the monitor block (result in c_rarg1) { Label entry, loop, exit; ! __ movptr(c_rarg3, monitor_block_top); // points to current entry, // starting with top-most entry ! __ lea(c_rarg2, monitor_block_bot); // points to word before bottom // of monitor block __ jmpb(entry); __ bind(loop); // check if current entry is used ! __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); // if not used then remember entry in c_rarg1 ! __ cmov(Assembler::equal, c_rarg1, c_rarg3); // check if current entry is for same object ! __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); // if same object then stop searching __ jccb(Assembler::equal, exit); // otherwise advance to next entry ! __ addptr(c_rarg3, entry_size); __ bind(entry); // check if bottom reached ! __ cmpptr(c_rarg3, c_rarg2); // if not at bottom then check this entry __ jcc(Assembler::notEqual, loop); __ bind(exit); } ! __ testptr(c_rarg1, c_rarg1); // check if a slot has been found __ jcc(Assembler::notZero, allocated); // if found, continue with that one // allocate one if there's no free slot { Label entry, loop; // 1. compute new pointers // rsp: old expression stack top ! __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom ! __ subptr(rsp, entry_size); // move expression stack top ! __ subptr(c_rarg1, entry_size); // move expression stack bottom ! __ mov(c_rarg3, rsp); // set start value for copy loop ! __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom __ jmp(entry); // 2. move expression stack contents __ bind(loop); ! __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack // word from old location ! __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location ! __ addptr(c_rarg3, wordSize); // advance to next word __ bind(entry); ! __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached __ jcc(Assembler::notEqual, loop); // if not at bottom then // copy next word } // call run-time routine
*** 3452,3465 **** // Increment bcp to point to the next bytecode, so exception // handling for async. exceptions work correctly. // The object has already been poped from the stack, so the // expression stack looks correct. ! __ incrementq(r13); // store object ! __ movq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); __ lock_object(c_rarg1); // check to make sure this monitor doesn't cause stack overflow after locking __ save_bcp(); // in case of exception __ generate_stack_overflow_check(0); --- 3525,3538 ---- // Increment bcp to point to the next bytecode, so exception // handling for async. exceptions work correctly. // The object has already been poped from the stack, so the // expression stack looks correct. ! __ increment(r13); // store object ! __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); __ lock_object(c_rarg1); // check to make sure this monitor doesn't cause stack overflow after locking __ save_bcp(); // in case of exception __ generate_stack_overflow_check(0);
*** 3485,3510 **** Label found; // find matching slot { Label entry, loop; ! __ movq(c_rarg1, monitor_block_top); // points to current entry, // starting with top-most entry ! __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom // of monitor block __ jmpb(entry); __ bind(loop); // check if current entry is for same object ! __ cmpq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); // if same object then stop searching __ jcc(Assembler::equal, found); // otherwise advance to next entry ! __ addq(c_rarg1, entry_size); __ bind(entry); // check if bottom reached ! __ cmpq(c_rarg1, c_rarg2); // if not at bottom then check this entry __ jcc(Assembler::notEqual, loop); } // error handling. Unlocking was not block-structured --- 3558,3583 ---- Label found; // find matching slot { Label entry, loop; ! __ movptr(c_rarg1, monitor_block_top); // points to current entry, // starting with top-most entry ! __ lea(c_rarg2, monitor_block_bot); // points to word before bottom // of monitor block __ jmpb(entry); __ bind(loop); // check if current entry is for same object ! __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); // if same object then stop searching __ jcc(Assembler::equal, found); // otherwise advance to next entry ! __ addptr(c_rarg1, entry_size); __ bind(entry); // check if bottom reached ! __ cmpptr(c_rarg1, c_rarg2); // if not at bottom then check this entry __ jcc(Assembler::notEqual, loop); } // error handling. Unlocking was not block-structured
*** 3537,3549 **** transition(vtos, atos); __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions // last dim is on top of stack; we want address of first one: // first_addr = last_addr + (ndims - 1) * wordSize if (TaggedStackInterpreter) __ shll(rax, 1); // index*2 ! __ leaq(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), c_rarg1); __ load_unsigned_byte(rbx, at_bcp(3)); if (TaggedStackInterpreter) __ shll(rbx, 1); // index*2 ! __ leaq(rsp, Address(rsp, rbx, Address::times_8)); } --- 3610,3623 ---- transition(vtos, atos); __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions // last dim is on top of stack; we want address of first one: // first_addr = last_addr + (ndims - 1) * wordSize if (TaggedStackInterpreter) __ shll(rax, 1); // index*2 ! __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), c_rarg1); __ load_unsigned_byte(rbx, at_bcp(3)); if (TaggedStackInterpreter) __ shll(rbx, 1); // index*2 ! __ lea(rsp, Address(rsp, rbx, Address::times_8)); } + #endif // !CC_INTERP