src/cpu/x86/vm/templateInterpreter_x86_32.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Oct 22 15:01:03 2013
--- new/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Oct 22 15:01:02 2013

*** 148,185 **** --- 148,184 ---- __ dispatch_next(state); return entry; } ! address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { TosState incoming_state = state; address entry = __ pc(); #ifdef COMPILER2 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases ! if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { for (int i = 1; i < 8; i++) { __ ffree(i); } } else if (UseSSE < 2) { __ empty_FPU_stack(); } #endif ! if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); } else { __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); } // In SSE mode, interpreter returns FP results in xmm0 but they need // to end up back on the FPU so it can operate on them. - if (incoming_state == ftos && UseSSE >= 1) { __ subptr(rsp, wordSize); __ movflt(Address(rsp, 0), xmm0); __ fld_s(Address(rsp, 0)); __ addptr(rsp, wordSize); - } else if (incoming_state == dtos && UseSSE >= 2) { __ subptr(rsp, 2*wordSize); __ movdbl(Address(rsp, 0), xmm0); __ fld_d(Address(rsp, 0)); __ addptr(rsp, 2*wordSize); }
*** 192,228 **** --- 191,216 ---- __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ restore_bcp(); __ restore_locals(); - if (incoming_state == atos) { Register mdp = rbx; Register tmp = rcx; __ profile_return_type(mdp, rax, tmp); } ! Label L_got_cache, L_giant_index; if (EnableInvokeDynamic) { ! __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic); __ jcc(Assembler::equal, L_giant_index); } ! __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); ! __ bind(L_got_cache); __ movl(rbx, Address(rbx, rcx, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); __ andptr(rbx, 0xFF); __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); ! const Register cache = rbx; + const Register index = rcx; ! __ get_cache_and_index_at_bcp(cache, index, 1, index_size); + + const Register flags = cache; ! __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); ! __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); + __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); __ dispatch_next(state, step); // out of the main line of code... if (EnableInvokeDynamic) { __ bind(L_giant_index); __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); __ jmp(L_got_cache); } return entry; } address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {

src/cpu/x86/vm/templateInterpreter_x86_32.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File