< prev index next >

src/cpu/x86/vm/sharedRuntime_x86_32.cpp

Print this page
rev 10530 : fix x86 32 bit build (broken by change to calling convention)

*** 555,569 **** int next_off = st_off - Interpreter::stackElementSize; __ movdbl(Address(rsp, next_off), r); } static void gen_c2i_adapter(MacroAssembler *masm, ! int total_args_passed, ! int comp_args_on_stack, ! const BasicType *sig_bt, const VMRegPair *regs, ! Label& skip_fixup) { // Before we get into the guts of the C2I adapter, see if we should be here // at all. We've come from compiled code and are attempting to jump to the // interpreter, which means the caller made a static call to get here // (vcalls always get a compiled target if there is one). Check for a // compiled target. If there is one, we need to patch the caller's call. --- 555,571 ---- int next_off = st_off - Interpreter::stackElementSize; __ movdbl(Address(rsp, next_off), r); } static void gen_c2i_adapter(MacroAssembler *masm, ! const GrowableArray<SigEntry>& sig_extended, const VMRegPair *regs, ! Label& skip_fixup, ! address start, ! OopMapSet*& oop_maps, ! int& frame_complete, ! int& frame_size_in_words) { // Before we get into the guts of the C2I adapter, see if we should be here // at all. We've come from compiled code and are attempting to jump to the // interpreter, which means the caller made a static call to get here // (vcalls always get a compiled target if there is one). Check for a // compiled target. If there is one, we need to patch the caller's call.
*** 581,609 **** #endif /* COMPILER2 */ // Since all args are passed on the stack, total_args_passed * interpreter_ // stack_element_size is the // space we need. ! int extraspace = total_args_passed * Interpreter::stackElementSize; // Get return address __ pop(rax); // set senderSP value __ movptr(rsi, rsp); __ subptr(rsp, extraspace); // Now write the args into the outgoing interpreter space ! for (int i = 0; i < total_args_passed; i++) { ! if (sig_bt[i] == T_VOID) { ! assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); continue; } // st_off points to lowest address on stack. ! int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize; int next_off = st_off - Interpreter::stackElementSize; // Say 4 args: // i st_off // 0 12 T_LONG --- 583,611 ---- #endif /* COMPILER2 */ // Since all args are passed on the stack, total_args_passed * interpreter_ // stack_element_size is the // space we need. ! int extraspace = sig_extended.length() * Interpreter::stackElementSize; // Get return address __ pop(rax); // set senderSP value __ movptr(rsi, rsp); __ subptr(rsp, extraspace); // Now write the args into the outgoing interpreter space ! for (int i = 0; i < sig_extended.length(); i++) { ! if (sig_extended.at(i)._bt == T_VOID) { ! assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half"); continue; } // st_off points to lowest address on stack. ! int st_off = ((sig_extended.length() - 1) - i) * Interpreter::stackElementSize; int next_off = st_off - Interpreter::stackElementSize; // Say 4 args: // i st_off // 0 12 T_LONG
*** 649,659 **** } else { // long/double in gpr NOT_LP64(ShouldNotReachHere()); // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG // T_DOUBLE and T_LONG use two slots in the interpreter ! if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { // long/double in gpr #ifdef ASSERT // Overwrite the unused slot with known junk LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab))); __ movptr(Address(rsp, st_off), rax); --- 651,661 ---- } else { // long/double in gpr NOT_LP64(ShouldNotReachHere()); // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG // T_DOUBLE and T_LONG use two slots in the interpreter ! if ( sig_extended.at(i)._bt == T_LONG || sig_extended.at(i)._bt == T_DOUBLE) { // long/double in gpr #ifdef ASSERT // Overwrite the unused slot with known junk LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab))); __ movptr(Address(rsp, st_off), rax);
*** 666,676 **** } else { assert(r_1->is_XMMRegister(), ""); if (!r_2->is_valid()) { __ movflt(Address(rsp, st_off), r_1->as_XMMRegister()); } else { ! assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type"); move_c2i_double(masm, r_1->as_XMMRegister(), st_off); } } } --- 668,678 ---- } else { assert(r_1->is_XMMRegister(), ""); if (!r_2->is_valid()) { __ movflt(Address(rsp, st_off), r_1->as_XMMRegister()); } else { ! assert(sig_extended.at(i)._bt == T_DOUBLE || sig_extended.at(i)._bt == T_LONG, "wrong type"); move_c2i_double(masm, r_1->as_XMMRegister(), st_off); } } }
*** 699,712 **** __ jcc(Assembler::below, L_ok); __ bind(L_fail); } void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, - int total_args_passed, int comp_args_on_stack, ! const BasicType *sig_bt, const VMRegPair *regs) { // Note: rsi contains the senderSP on entry. We must preserve it since // we may do a i2c -> c2i transition if we lose a race where compiled // code goes non-entrant while we get args ready. // Adapters can be frameless because they do not require the caller --- 701,714 ---- __ jcc(Assembler::below, L_ok); __ bind(L_fail); } void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, ! const GrowableArray<SigEntry>& sig_extended, const VMRegPair *regs) { + // Note: rsi contains the senderSP on entry. We must preserve it since // we may do a i2c -> c2i transition if we lose a race where compiled // code goes non-entrant while we get args ready. // Adapters can be frameless because they do not require the caller
*** 791,814 **** // Pre-load the register-jump target early, to schedule it better. __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset()))); // Now generate the shuffle code. Pick up all register args and move the // rest through the floating point stack top. ! for (int i = 0; i < total_args_passed; i++) { ! if (sig_bt[i] == T_VOID) { // Longs and doubles are passed in native word order, but misaligned // in the 32-bit build. ! assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); continue; } // Pick up 0, 1 or 2 words from SP+offset. assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); // Load in argument order going down. ! int ld_off = (total_args_passed - i) * Interpreter::stackElementSize; // Point to interpreter value (vs. tag) int next_off = ld_off - Interpreter::stackElementSize; // // // --- 793,816 ---- // Pre-load the register-jump target early, to schedule it better. __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset()))); // Now generate the shuffle code. Pick up all register args and move the // rest through the floating point stack top. ! for (int i = 0; i < sig_extended.length(); i++) { ! if (sig_extended.at(i)._bt == T_VOID) { // Longs and doubles are passed in native word order, but misaligned // in the 32-bit build. ! assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half"); continue; } // Pick up 0, 1 or 2 words from SP+offset. assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); // Load in argument order going down. ! int ld_off = (sig_extended.length() - i) * Interpreter::stackElementSize; // Point to interpreter value (vs. tag) int next_off = ld_off - Interpreter::stackElementSize; // // //
*** 845,855 **** // // Interpreter local[n] == MSW, local[n+1] == LSW however locals // are accessed as negative so LSW is at LOW address // ld_off is MSW so get LSW ! const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? next_off : ld_off; __ movptr(rsi, Address(saved_sp, offset)); __ movptr(Address(rsp, st_off), rsi); #ifndef _LP64 __ movptr(rsi, Address(saved_sp, ld_off)); --- 847,857 ---- // // Interpreter local[n] == MSW, local[n+1] == LSW however locals // are accessed as negative so LSW is at LOW address // ld_off is MSW so get LSW ! const int offset = (NOT_LP64(true ||) sig_extended.at(i)._bt==T_LONG||sig_extended.at(i)._bt==T_DOUBLE)? next_off : ld_off; __ movptr(rsi, Address(saved_sp, offset)); __ movptr(Address(rsp, st_off), rsi); #ifndef _LP64 __ movptr(rsi, Address(saved_sp, ld_off));
*** 863,873 **** // // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case // So we must adjust where to pick up the data to match the interpreter. ! const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? next_off : ld_off; // this can be a misaligned move __ movptr(r, Address(saved_sp, offset)); #ifndef _LP64 --- 865,875 ---- // // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case // So we must adjust where to pick up the data to match the interpreter. ! const int offset = (NOT_LP64(true ||) sig_extended.at(i)._bt==T_LONG||sig_extended.at(i)._bt==T_DOUBLE)? next_off : ld_off; // this can be a misaligned move __ movptr(r, Address(saved_sp, offset)); #ifndef _LP64
*** 911,928 **** __ jmp(rdi); } // --------------------------------------------------------------- AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, - int total_args_passed, int comp_args_on_stack, ! const BasicType *sig_bt, const VMRegPair *regs, ! AdapterFingerPrint* fingerprint) { address i2c_entry = __ pc(); ! gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); // ------------------------------------------------------------------------- // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls // to the interpreter. The args start out packed in the compiled layout. They // need to be unpacked into the interpreter layout. This will almost always --- 913,930 ---- __ jmp(rdi); } // --------------------------------------------------------------- AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, int comp_args_on_stack, ! const GrowableArray<SigEntry>& sig_extended, const VMRegPair *regs, ! AdapterFingerPrint* fingerprint, ! AdapterBlob*& new_adapter) { address i2c_entry = __ pc(); ! gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs); // ------------------------------------------------------------------------- // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls // to the interpreter. The args start out packed in the compiled layout. They // need to be unpacked into the interpreter layout. This will almost always
*** 955,967 **** __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); } address c2i_entry = __ pc(); ! gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); __ flush(); return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); } int SharedRuntime::c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, --- 957,973 ---- __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); } address c2i_entry = __ pc(); ! OopMapSet* oop_maps = NULL; ! int frame_complete = CodeOffsets::frame_never_safe; ! int frame_size_in_words = 0; ! gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words); __ flush(); + new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps); return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); } int SharedRuntime::c_calling_convention(const BasicType *sig_bt, VMRegPair *regs,
< prev index next >