< prev index next >

src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Print this page

        

*** 41,50 **** --- 41,53 ---- #include "c1/c1_Runtime1.hpp" #endif #ifdef COMPILER2 #include "opto/runtime.hpp" #endif + #if INCLUDE_JVMCI + #include "jvmci/jvmciJavaClasses.hpp" + #endif #define __ masm-> const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
*** 156,182 **** static void restore_result_registers(MacroAssembler* masm); }; OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) { int vect_words = 0; int off = 0; int num_xmm_regs = XMMRegisterImpl::number_of_registers; if (UseAVX < 3) { num_xmm_regs = num_xmm_regs/2; } ! #ifdef COMPILER2 if (save_vectors) { assert(UseAVX > 0, "512bit vectors are supported only with EVEX"); assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); // Save upper half of YMM registers vect_words = 16 * num_xmm_regs / wordSize; if (UseAVX < 3) { additional_frame_words += vect_words; } } #else ! assert(!save_vectors, "vectors are generated only by C2"); #endif // Always make the frame size 16-byte aligned int frame_size_in_bytes = round_to(additional_frame_words*wordSize + reg_save_size*BytesPerInt, num_xmm_regs); --- 159,187 ---- static void restore_result_registers(MacroAssembler* masm); }; OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) { int vect_words = 0; + int ymmhi_offset = -1; int off = 0; int num_xmm_regs = XMMRegisterImpl::number_of_registers; if (UseAVX < 3) { num_xmm_regs = num_xmm_regs/2; } ! #if defined(COMPILER2) || INCLUDE_JVMCI if (save_vectors) { assert(UseAVX > 0, "512bit vectors are supported only with EVEX"); assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); // Save upper half of YMM registers vect_words = 16 * num_xmm_regs / wordSize; if (UseAVX < 3) { + ymmhi_offset = additional_frame_words; additional_frame_words += vect_words; } } #else ! assert(!save_vectors, "vectors are generated only by C2 and JVMCI"); #endif // Always make the frame size 16-byte aligned int frame_size_in_bytes = round_to(additional_frame_words*wordSize + reg_save_size*BytesPerInt, num_xmm_regs);
*** 218,227 **** --- 223,233 ---- OopMapSet *oop_maps = new OopMapSet(); OopMap* map = new OopMap(frame_size_in_slots, 0); #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots) + #define YMMHI_STACK_OFFSET(x) VMRegImpl::stack2reg((x / VMRegImpl::stack_slot_size) + ymmhi_offset) map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg()); map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg()); map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg()); map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
*** 255,264 **** --- 261,292 ---- map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()); off += delta; } } + #if defined(COMPILER2) || INCLUDE_JVMCI + if (save_vectors) { + assert(ymmhi_offset != -1, "save area must exist"); + map->set_callee_saved(YMMHI_STACK_OFFSET( 0), xmm0->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET( 16), xmm1->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET( 32), xmm2->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET( 48), xmm3->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET( 64), xmm4->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET( 80), xmm5->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET( 96), xmm6->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET(112), xmm7->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET(128), xmm8->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET(144), xmm9->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET(160), xmm10->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET(176), xmm11->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET(192), xmm12->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET(208), xmm13->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET(224), xmm14->as_VMReg()->next(4)); + map->set_callee_saved(YMMHI_STACK_OFFSET(240), xmm15->as_VMReg()->next(4)); + } + #endif // COMPILER2 || INCLUDE_JVMCI + // %%% These should all be a waste but we'll keep things as they were for now if (true) { map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next()); map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next()); map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
*** 305,315 **** } if (frame::arg_reg_save_area_bytes != 0) { // Pop arg register save area __ addptr(rsp, frame::arg_reg_save_area_bytes); } ! #ifdef COMPILER2 // On EVEX enabled targets everything is handled in pop fpu state if ((restore_vectors) && (UseAVX < 3)) { assert(UseAVX > 0, "256/512-bit vectors are supported only with AVX"); assert(MaxVectorSize == 64, "up to 512bit vectors are supported now"); int off = 0; --- 333,343 ---- } if (frame::arg_reg_save_area_bytes != 0) { // Pop arg register save area __ addptr(rsp, frame::arg_reg_save_area_bytes); } ! #if defined(COMPILER2) || INCLUDE_JVMCI // On EVEX enabled targets everything is handled in pop fpu state if ((restore_vectors) && (UseAVX < 3)) { assert(UseAVX > 0, "256/512-bit vectors are supported only with AVX"); assert(MaxVectorSize == 64, "up to 512bit vectors are supported now"); int off = 0;
*** 318,328 **** __ vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16)); } __ addptr(rsp, num_xmm_regs*16); } #else ! assert(!restore_vectors, "vectors are generated only by C2"); #endif // Recover CPU state __ pop_CPU_state(); // Get the rbp described implicitly by the calling convention (no oopMap) __ pop(rbp); --- 346,356 ---- __ vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16)); } __ addptr(rsp, num_xmm_regs*16); } #else ! assert(!restore_vectors, "vectors are generated only by C2 and JVMCI"); #endif // Recover CPU state __ pop_CPU_state(); // Get the rbp described implicitly by the calling convention (no oopMap) __ pop(rbp);
*** 653,663 **** __ cmpptr(pc_reg, temp_reg); __ jcc(Assembler::below, L_ok); __ bind(L_fail); } ! static void gen_i2c_adapter(MacroAssembler *masm, int total_args_passed, int comp_args_on_stack, const BasicType *sig_bt, const VMRegPair *regs) { --- 681,691 ---- __ cmpptr(pc_reg, temp_reg); __ jcc(Assembler::below, L_ok); __ bind(L_fail); } ! void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int total_args_passed, int comp_args_on_stack, const BasicType *sig_bt, const VMRegPair *regs) {
*** 750,759 **** --- 778,799 ---- // Will jump to the compiled code just as if compiled code was doing it. // Pre-load the register-jump target early, to schedule it better. __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset()))); + #if INCLUDE_JVMCI + if (EnableJVMCI) { + // check if this call should be routed towards a specific entry point + __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0); + Label no_alternative_target; + __ jcc(Assembler::equal, no_alternative_target); + __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); + __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0); + __ bind(no_alternative_target); + } + #endif // INCLUDE_JVMCI + // Now generate the shuffle code. Pick up all register args and move the // rest through the floating point stack top. for (int i = 0; i < total_args_passed; i++) { if (sig_bt[i] == T_VOID) { // Longs and doubles are passed in native word order, but misaligned
*** 2683,2693 **** //------------------------------generate_deopt_blob---------------------------- void SharedRuntime::generate_deopt_blob() { // Allocate space for the code ResourceMark rm; // Setup code generation tools ! CodeBuffer buffer("deopt_blob", 2048, 1024); MacroAssembler* masm = new MacroAssembler(&buffer); int frame_size_in_words; OopMap* map = NULL; OopMapSet *oop_maps = new OopMapSet(); --- 2723,2739 ---- //------------------------------generate_deopt_blob---------------------------- void SharedRuntime::generate_deopt_blob() { // Allocate space for the code ResourceMark rm; // Setup code generation tools ! int pad = 0; ! #if INCLUDE_JVMCI ! if (EnableJVMCI) { ! pad += 512; // Increase the buffer size when compiling for JVMCI ! } ! #endif ! CodeBuffer buffer("deopt_blob", 2048+pad, 1024); MacroAssembler* masm = new MacroAssembler(&buffer); int frame_size_in_words; OopMap* map = NULL; OopMapSet *oop_maps = new OopMapSet();
*** 2732,2751 **** --- 2778,2835 ---- // Normal deoptimization. Save exec mode for unpack_frames. __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved __ jmp(cont); int reexecute_offset = __ pc() - start; + #if INCLUDE_JVMCI && !defined(COMPILER1) + if (EnableJVMCI && UseJVMCICompiler) { + // JVMCI does not use this kind of deoptimization + __ should_not_reach_here(); + } + #endif // Reexecute case // return address is the pc describes what bci to do re-execute at // No need to update map as each call to save_live_registers will produce identical oopmap (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved __ jmp(cont); + #if INCLUDE_JVMCI + Label after_fetch_unroll_info_call; + int implicit_exception_uncommon_trap_offset = 0; + int uncommon_trap_offset = 0; + + if (EnableJVMCI) { + implicit_exception_uncommon_trap_offset = __ pc() - start; + + __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); + __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD); + + uncommon_trap_offset = __ pc() - start; + + // Save everything in sight. + RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); + // fetch_unroll_info needs to call last_java_frame() + __ set_last_Java_frame(noreg, noreg, NULL); + + __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset()))); + __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1); + + __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute); + __ mov(c_rarg0, r15_thread); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); + oop_maps->add_gc_map( __ pc()-start, map->deep_copy()); + + __ reset_last_Java_frame(false, false); + + __ jmp(after_fetch_unroll_info_call); + } // EnableJVMCI + #endif // INCLUDE_JVMCI + int exception_offset = __ pc() - start; // Prolog for exception case // all registers are dead at this entry point, except for rax, and
*** 2827,2836 **** --- 2911,2926 ---- // find any register it might need. oop_maps->add_gc_map(__ pc() - start, map); __ reset_last_Java_frame(false, false); + #if INCLUDE_JVMCI + if (EnableJVMCI) { + __ bind(after_fetch_unroll_info_call); + } + #endif + // Load UnrollBlock* into rdi __ mov(rdi, rax); Label noException; __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
*** 3001,3010 **** --- 3091,3106 ---- // Make sure all code is generated masm->flush(); _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); + #if INCLUDE_JVMCI + if (EnableJVMCI) { + _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); + _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); + } + #endif } #ifdef COMPILER2 //------------------------------generate_uncommon_trap_blob-------------------- void SharedRuntime::generate_uncommon_trap_blob() {
< prev index next >