--- old/src/cpu/sparc/vm/assembler_sparc.hpp 2010-04-26 03:23:52.406617912 -0700 +++ new/src/cpu/sparc/vm/assembler_sparc.hpp 2010-04-26 03:23:52.010516611 -0700 @@ -87,6 +87,7 @@ // JSR 292 fixed register usages: REGISTER_DECLARATION(Register, G5_method_type , G5); REGISTER_DECLARATION(Register, G3_method_handle , G3); +REGISTER_DECLARATION(Register, L7_mh_SP_save , L7); // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass, // because a single patchable "set" instruction (NativeMovConstReg, --- old/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp 2010-04-26 03:23:54.042375124 -0700 +++ new/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp 2010-04-26 03:23:53.658989438 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -345,6 +345,13 @@ } +// JSR 292 +LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() { + assert(L7 == L7_mh_SP_save, "must be same register"); + return L7_opr; +} + + bool FrameMap::validate_frame() { int max_offset = in_bytes(framesize_in_bytes()); int java_index = 0; --- old/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp 2010-04-26 03:23:55.532972702 -0700 +++ new/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp 2010-04-26 03:23:55.156470797 -0700 @@ -143,6 +143,3 @@ static bool is_caller_save_register (LIR_Opr reg); static bool is_caller_save_register (Register r); - - // JSR 292 - static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; } --- old/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp 2010-04-26 03:23:57.137541018 -0700 +++ new/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp 2010-04-26 03:23:56.675216403 -0700 @@ -736,7 +736,8 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { __ call(op->addr(), rtype); - // the peephole pass fills the delay slot + // The peephole pass fills the delay slot, add_call_info is done in + // LIR_Assembler::emit_delay. } @@ -745,7 +746,8 @@ __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg); __ relocate(rspec); __ call(op->addr(), relocInfo::none); - // the peephole pass fills the delay slot + // The peephole pass fills the delay slot, add_call_info is done in + // LIR_Assembler::emit_delay. } @@ -766,16 +768,6 @@ } -void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) { - Unimplemented(); -} - - -void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) { - Unimplemented(); -} - - // load with 32-bit displacement int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) { int load_offset = code_offset(); @@ -2934,7 +2926,7 @@ // we may also be emitting the call info for the instruction // which we are the delay slot of. - CodeEmitInfo * call_info = op->call_info(); + CodeEmitInfo* call_info = op->call_info(); if (call_info) { add_call_info(code_offset(), call_info); } @@ -3159,6 +3151,7 @@ tty->print_cr("delayed"); inst->at(i - 1)->print(); inst->at(i)->print(); + tty->cr(); } #endif continue; @@ -3174,8 +3167,8 @@ case lir_static_call: case lir_virtual_call: case lir_icvirtual_call: - case lir_optvirtual_call: { - LIR_Op* delay_op = NULL; + case lir_optvirtual_call: + case lir_dynamic_call: { LIR_Op* prev = inst->at(i - 1); if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && (op->code() != lir_virtual_call || @@ -3192,15 +3185,14 @@ tty->print_cr("delayed"); inst->at(i - 1)->print(); inst->at(i)->print(); + tty->cr(); } #endif continue; } - if (!delay_op) { - delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); - inst->insert_before(i + 1, delay_op); - } + LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); + inst->insert_before(i + 1, delay_op); break; } } --- old/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp 2010-04-26 03:23:58.760802717 -0700 +++ new/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp 2010-04-26 03:23:58.377473839 -0700 @@ -679,8 +679,15 @@ __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Oissuing_pc->after_save()); __ verify_not_null_oop(Oexception->after_save()); - __ jmp(O0, 0); - __ delayed()->restore(); + + // Restore SP from L7 if the exception PC is a MethodHandle call site. + __ mov(O0, G5); // Save the target address. + __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); + __ tst(L0); // Condition codes are preserved over the restore. + __ restore(); + + __ jmp(G5, 0); + __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. } break; --- old/src/cpu/sparc/vm/frame_sparc.cpp 2010-04-26 03:24:00.202612245 -0700 +++ new/src/cpu/sparc/vm/frame_sparc.cpp 2010-04-26 03:23:59.827551783 -0700 @@ -336,9 +336,11 @@ #endif // ASSERT } -frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_stack) { - _sp = sp; - _younger_sp = younger_sp; +frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) : + _sp(sp), + _younger_sp(younger_sp), + _deopt_state(unknown), + _sp_adjustment_by_callee(0) { if (younger_sp == NULL) { // make a deficient frame which doesn't know where its PC is _pc = NULL; @@ -352,20 +354,32 @@ // wrong. (the _last_native_pc will have the right value) // So do not put add any asserts on the _pc here. } - if (younger_frame_adjusted_stack) { - // compute adjustment to this frame's SP made by its interpreted callee - _sp_adjustment_by_callee = (intptr_t*)((intptr_t)younger_sp[I5_savedSP->sp_offset_in_saved_window()] + - STACK_BIAS) - sp; - } else { - _sp_adjustment_by_callee = 0; + + if (_pc != NULL) + _cb = CodeCache::find_blob(_pc); + + // Check for MethodHandle call sites. + if (_cb != NULL) { + nmethod* nm = _cb->as_nmethod_or_null(); + if (nm != NULL) { + if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) { + _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp; + // The SP is already adjusted by this MH call site, don't + // overwrite this value with the wrong interpreter value. + younger_frame_is_interpreted = false; + } + } } - _deopt_state = unknown; + if (younger_frame_is_interpreted) { + // compute adjustment to this frame's SP made by its interpreted callee + _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp; + } - // It is important that frame be fully construct when we do this lookup - // as get_original_pc() needs correct value for unextended_sp() + // It is important that the frame is fully constructed when we do + // this lookup as get_deopt_original_pc() needs a correct value for + // unextended_sp() which uses _sp_adjustment_by_callee. if (_pc != NULL) { - _cb = CodeCache::find_blob(_pc); address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != NULL) { _pc = original_pc; @@ -462,9 +476,8 @@ if (is_entry_frame()) return sender_for_entry_frame(map); - intptr_t* younger_sp = sp(); - intptr_t* sp = sender_sp(); - bool adjusted_stack = false; + intptr_t* younger_sp = sp(); + intptr_t* sp = sender_sp(); // Note: The version of this operation on any platform with callee-save // registers must update the register map (if not null). @@ -483,8 +496,8 @@ // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be // explicitly recognized. - adjusted_stack = is_interpreted_frame(); - if (adjusted_stack) { + bool frame_is_interpreted = is_interpreted_frame(); + if (frame_is_interpreted) { map->make_integer_regs_unsaved(); map->shift_window(sp, younger_sp); } else if (_cb != NULL) { @@ -503,7 +516,7 @@ } } } - return frame(sp, younger_sp, adjusted_stack); + return frame(sp, younger_sp, frame_is_interpreted); } --- old/src/cpu/sparc/vm/register_definitions_sparc.cpp 2010-04-26 03:24:01.677609729 -0700 +++ new/src/cpu/sparc/vm/register_definitions_sparc.cpp 2010-04-26 03:24:01.250862751 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -142,9 +142,12 @@ REGISTER_DEFINITION(Register, G3_scratch); REGISTER_DEFINITION(Register, G4_scratch); REGISTER_DEFINITION(Register, Gtemp); +REGISTER_DEFINITION(Register, Lentry_args); + +// JSR 292 REGISTER_DEFINITION(Register, G5_method_type); REGISTER_DEFINITION(Register, G3_method_handle); -REGISTER_DEFINITION(Register, Lentry_args); +REGISTER_DEFINITION(Register, L7_mh_SP_save); #ifdef CC_INTERP REGISTER_DEFINITION(Register, Lstate); --- old/src/cpu/sparc/vm/sharedRuntime_sparc.cpp 2010-04-26 03:24:03.299400522 -0700 +++ new/src/cpu/sparc/vm/sharedRuntime_sparc.cpp 2010-04-26 03:24:02.847692330 -0700 @@ -950,26 +950,13 @@ // O0-O5 - Outgoing args in compiled layout // O6 - Adjusted or restored SP // O7 - Valid return address - // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) + // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) // F0-F7 - more outgoing args // Gargs is the incoming argument base, and also an outgoing argument. __ sub(Gargs, BytesPerWord, Gargs); -#ifdef ASSERT - { - // on entry OsavedSP and SP should be equal - Label ok; - __ cmp(O5_savedSP, SP); - __ br(Assembler::equal, false, Assembler::pt, ok); - __ delayed()->nop(); - __ stop("I5_savedSP not set"); - __ should_not_reach_here(); - __ bind(ok); - } -#endif - // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME // WITH O7 HOLDING A VALID RETURN PC // --- old/src/cpu/x86/vm/assembler_x86.hpp 2010-04-26 03:24:05.045657990 -0700 +++ new/src/cpu/x86/vm/assembler_x86.hpp 2010-04-26 03:24:04.583651217 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -135,6 +135,9 @@ #endif // _LP64 +// JSR 292 fixed register usages: +REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp); + // Address is an abstraction used to represent a memory location // using any of the amd64 addressing modes with one object. // --- old/src/cpu/x86/vm/c1_FrameMap_x86.cpp 2010-04-26 03:24:06.655339090 -0700 +++ new/src/cpu/x86/vm/c1_FrameMap_x86.cpp 2010-04-26 03:24:06.278187207 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -309,6 +309,13 @@ } +// JSR 292 +LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() { + assert(rbp == rbp_mh_SP_save, "must be same register"); + return rbp_opr; +} + + bool FrameMap::validate_frame() { return true; } --- old/src/cpu/x86/vm/c1_FrameMap_x86.hpp 2010-04-26 03:24:08.042015517 -0700 +++ new/src/cpu/x86/vm/c1_FrameMap_x86.hpp 2010-04-26 03:24:07.665925371 -0700 @@ -126,6 +126,3 @@ assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds"); return _caller_save_xmm_regs[i]; } - - // JSR 292 - static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; } --- old/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2010-04-26 03:24:09.440441505 -0700 +++ new/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2010-04-26 03:24:09.063104987 -0700 @@ -2790,7 +2790,7 @@ assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); __ call(AddressLiteral(op->addr(), rtype)); - add_call_info(code_offset(), op->info(), op->is_method_handle_invoke()); + add_call_info(code_offset(), op->info()); } @@ -2801,7 +2801,7 @@ (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); __ call(AddressLiteral(op->addr(), rh)); - add_call_info(code_offset(), op->info(), op->is_method_handle_invoke()); + add_call_info(code_offset(), op->info()); } @@ -2811,16 +2811,6 @@ } -void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) { - __ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp); -} - - -void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) { - __ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register()); -} - - void LIR_Assembler::emit_static_call_stub() { address call_pc = __ pc(); address stub = __ start_a_stub(call_stub_size); --- old/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2010-04-26 03:24:10.957328216 -0700 +++ new/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2010-04-26 03:24:10.580997209 -0700 @@ -782,7 +782,7 @@ // Restore SP from BP if the exception PC is a MethodHandle call site. NOT_LP64(__ get_thread(thread);) __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); - __ cmovptr(Assembler::notEqual, rsp, rbp); + __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); // continue at exception handler (return address removed) // note: do *not* remove arguments when unwinding the --- old/src/cpu/x86/vm/register_definitions_x86.cpp 2010-04-26 03:24:12.406482911 -0700 +++ new/src/cpu/x86/vm/register_definitions_x86.cpp 2010-04-26 03:24:12.033609786 -0700 @@ -115,3 +115,6 @@ REGISTER_DEFINITION(MMXRegister, mmx5 ); REGISTER_DEFINITION(MMXRegister, mmx6 ); REGISTER_DEFINITION(MMXRegister, mmx7 ); + +// JSR 292 +REGISTER_DEFINITION(Register, rbp_mh_SP_save); --- old/src/share/vm/c1/c1_FrameMap.hpp 2010-04-26 03:24:13.931312822 -0700 +++ new/src/share/vm/c1/c1_FrameMap.hpp 2010-04-26 03:24:13.507590358 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -150,6 +150,9 @@ // Opr representing the stack_pointer on this platform static LIR_Opr stack_pointer(); + // JSR 292 + static LIR_Opr method_handle_invoke_SP_save_opr(); + static BasicTypeArray* signature_type_array_for(const ciMethod* method); static BasicTypeArray* signature_type_array_for(const char * signature); --- old/src/share/vm/c1/c1_IR.cpp 2010-04-26 03:24:15.553824367 -0700 +++ new/src/share/vm/c1/c1_IR.cpp 2010-04-26 03:24:15.071705029 -0700 @@ -230,7 +230,8 @@ , _stack(stack) , _exception_handlers(exception_handlers) , _next(NULL) - , _id(-1) { + , _id(-1) + , _is_method_handle_invoke(false) { assert(_stack != NULL, "must be non null"); assert(_bci == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(_bci)), "make sure bci points at a real bytecode"); } @@ -241,7 +242,8 @@ , _exception_handlers(NULL) , _bci(info->_bci) , _scope_debug_info(NULL) - , _oop_map(NULL) { + , _oop_map(NULL) + , _is_method_handle_invoke(info->_is_method_handle_invoke) { if (lock_stack_only) { if (info->_stack != NULL) { _stack = info->_stack->copy_locks(); @@ -259,10 +261,10 @@ } -void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) { +void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { // record the safepoint before recording the debug info for enclosing scopes recorder->add_safepoint(pc_offset, _oop_map->deep_copy()); - _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke); + _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke); recorder->end_safepoint(pc_offset); } --- old/src/share/vm/c1/c1_IR.hpp 2010-04-26 03:24:17.097620689 -0700 +++ new/src/share/vm/c1/c1_IR.hpp 2010-04-26 03:24:16.695795710 -0700 @@ -269,6 +269,7 @@ int _bci; CodeEmitInfo* _next; int _id; + bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site. FrameMap* frame_map() const { return scope()->compilation()->frame_map(); } Compilation* compilation() const { return scope()->compilation(); } @@ -287,7 +288,8 @@ , _stack(NULL) , _exception_handlers(NULL) , _next(NULL) - , _id(-1) { + , _id(-1) + , _is_method_handle_invoke(false) { } // make a copy @@ -302,13 +304,16 @@ int bci() const { return _bci; } void add_register_oop(LIR_Opr opr); - void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false); + void record_debug_info(DebugInformationRecorder* recorder, int pc_offset); CodeEmitInfo* next() const { return _next; } void set_next(CodeEmitInfo* next) { _next = next; } int id() const { return _id; } void set_id(int id) { _id = id; } + + bool is_method_handle_invoke() const { return _is_method_handle_invoke; } + void set_is_method_handle_invoke(bool x) { _is_method_handle_invoke = x; } }; --- old/src/share/vm/c1/c1_LIR.cpp 2010-04-26 03:24:18.659088388 -0700 +++ new/src/share/vm/c1/c1_LIR.cpp 2010-04-26 03:24:18.228007983 -0700 @@ -715,7 +715,10 @@ } if (opJavaCall->_info) do_info(opJavaCall->_info); - if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr()); + if (opJavaCall->is_method_handle_invoke()) { + LIR_Opr method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr(); + do_temp(method_handle_invoke_SP_save_opr); + } do_call(); if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result); --- old/src/share/vm/c1/c1_LIRAssembler.cpp 2010-04-26 03:24:20.274422129 -0700 +++ new/src/share/vm/c1/c1_LIRAssembler.cpp 2010-04-26 03:24:19.840675275 -0700 @@ -301,9 +301,9 @@ } -void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) { +void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { flush_debug_info(pc_offset); - cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke); + cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); if (cinfo->exception_handlers() != NULL) { compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); } @@ -413,12 +413,6 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { verify_oop_map(op->info()); - // JSR 292 - // Preserve the SP over MethodHandle call sites. - if (op->is_method_handle_invoke()) { - preserve_SP(op); - } - if (os::is_MP()) { // must align calls sites, otherwise they can't be updated atomically on MP hardware align_call(op->code()); @@ -444,10 +438,6 @@ default: ShouldNotReachHere(); } - if (op->is_method_handle_invoke()) { - restore_SP(op); - } - #if defined(X86) && defined(TIERED) // C2 leave fpu stack dirty clean it if (UseSSE < 2) { --- old/src/share/vm/c1/c1_LIRAssembler.hpp 2010-04-26 03:24:21.831670756 -0700 +++ new/src/share/vm/c1/c1_LIRAssembler.hpp 2010-04-26 03:24:21.414873442 -0700 @@ -84,7 +84,7 @@ Address as_Address_hi(LIR_Address* addr); // debug information - void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false); + void add_call_info(int pc_offset, CodeEmitInfo* cinfo); void add_debug_info_for_branch(CodeEmitInfo* info); void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo); void add_debug_info_for_div0_here(CodeEmitInfo* info); @@ -212,10 +212,6 @@ void ic_call( LIR_OpJavaCall* op); void vtable_call( LIR_OpJavaCall* op); - // JSR 292 - void preserve_SP(LIR_OpJavaCall* op); - void restore_SP( LIR_OpJavaCall* op); - void osr_entry(); void build_frame(); --- old/src/share/vm/c1/c1_LIRGenerator.cpp 2010-04-26 03:24:23.333608822 -0700 +++ new/src/share/vm/c1/c1_LIRGenerator.cpp 2010-04-26 03:24:22.890330844 -0700 @@ -2371,9 +2371,17 @@ bool optimized = x->target_is_loaded() && x->target_is_final(); assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match"); + // JSR 292 + // Preserve the SP over MethodHandle call sites. + ciMethod* target = x->target(); + if (target->is_method_handle_invoke()) { + info->set_is_method_handle_invoke(true); + __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr()); + } + switch (x->code()) { case Bytecodes::_invokestatic: - __ call_static(x->target(), result_register, + __ call_static(target, result_register, SharedRuntime::get_resolve_static_call_stub(), arg_list, info); break; @@ -2383,17 +2391,17 @@ // for final target we still produce an inline cache, in order // to be able to call mixed mode if (x->code() == Bytecodes::_invokespecial || optimized) { - __ call_opt_virtual(x->target(), receiver, result_register, + __ call_opt_virtual(target, receiver, result_register, SharedRuntime::get_resolve_opt_virtual_call_stub(), arg_list, info); } else if (x->vtable_index() < 0) { - __ call_icvirtual(x->target(), receiver, result_register, + __ call_icvirtual(target, receiver, result_register, SharedRuntime::get_resolve_virtual_call_stub(), arg_list, info); } else { int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size(); int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes(); - __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info); + __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info); } break; case Bytecodes::_invokedynamic: { @@ -2432,7 +2440,7 @@ // Load target MethodHandle from CallSite object. __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver); - __ call_dynamic(x->target(), receiver, result_register, + __ call_dynamic(target, receiver, result_register, SharedRuntime::get_resolve_opt_virtual_call_stub(), arg_list, info); break; @@ -2442,6 +2450,12 @@ break; } + // JSR 292 + // Restore the SP after MethodHandle call sites. + if (target->is_method_handle_invoke()) { + __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer()); + } + if (x->type()->is_float() || x->type()->is_double()) { // Force rounding of results from non-strictfp when in strictfp // scope (or when we don't know the strictness of the callee, to --- old/src/share/vm/ci/ciMethod.cpp 2010-04-26 03:24:24.903241704 -0700 +++ new/src/share/vm/ci/ciMethod.cpp 2010-04-26 03:24:24.527118033 -0700 @@ -692,13 +692,18 @@ // invokedynamic support // bool ciMethod::is_method_handle_invoke() const { - check_is_loaded(); - bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS); + bool flag = ((holder()->name() == ciSymbol::java_dyn_MethodHandle() && + name() == ciSymbol::invoke_name()) || + holder()->name() == ciSymbol::java_dyn_InvokeDynamic()); #ifdef ASSERT - { - VM_ENTRY_MARK; - bool flag2 = get_methodOop()->is_method_handle_invoke(); - assert(flag == flag2, "consistent"); + if (is_loaded()) { + bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS); + { + VM_ENTRY_MARK; + bool flag3 = get_methodOop()->is_method_handle_invoke(); + assert(flag2 == flag3, "consistent"); + assert(flag == flag3, "consistent"); + } } #endif //ASSERT return flag;