src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8031320_9 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Print this page




1995   // because it can be patched on the fly by make_non_entrant. The stack bang
1996   // instruction fits that requirement.
1997 
1998   // Generate stack overflow check
1999 
2000   if (UseStackBanging) {
2001     __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2002   } else {
2003     // need a 5 byte instruction to allow MT safe patching to non-entrant
2004     __ fat_nop();
2005   }
2006 
2007   // Generate a new frame for the wrapper.
2008   __ enter();
2009   // -2 because return address is already present and so is saved rbp
2010   __ subptr(rsp, stack_size - 2*wordSize);
2011 
2012   // Frame is now completed as far as size and linkage.
2013   int frame_complete = ((intptr_t)__ pc()) - start;
2014 




2015 #ifdef ASSERT
2016     {
2017       Label L;
2018       __ mov(rax, rsp);
2019       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2020       __ cmpptr(rax, rsp);
2021       __ jcc(Assembler::equal, L);
2022       __ stop("improperly aligned stack");
2023       __ bind(L);
2024     }
2025 #endif /* ASSERT */
2026 
2027 
2028   // We use r14 as the oop handle for the receiver/klass
2029   // It is callee save so it survives the call to native
2030 
2031   const Register oop_handle_reg = r14;
2032 
2033   if (is_critical_native) {
2034     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,


3595   // Make sure all code is generated
3596   masm->flush();
3597 
3598   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3599   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3600 }
3601 
3602 #ifdef COMPILER2
3603 //------------------------------generate_uncommon_trap_blob--------------------
3604 void SharedRuntime::generate_uncommon_trap_blob() {
3605   // Allocate space for the code
3606   ResourceMark rm;
3607   // Setup code generation tools
3608   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3609   MacroAssembler* masm = new MacroAssembler(&buffer);
3610 
3611   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3612 
3613   address start = __ pc();
3614 




3615   // Push self-frame.  We get here with a return address on the
3616   // stack, so rsp is 8-byte aligned until we allocate our frame.
3617   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3618 
3619   // No callee saved registers. rbp is assumed implicitly saved
3620   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3621 
3622   // compiler left unloaded_class_index in j_rarg0 move to where the
3623   // runtime expects it.
3624   __ movl(c_rarg1, j_rarg0);
3625 
3626   __ set_last_Java_frame(noreg, noreg, NULL);
3627 
3628   // Call C code.  Need thread but NOT official VM entry
3629   // crud.  We cannot block on this call, no GC can happen.  Call should
3630   // capture callee-saved registers as well as return values.
3631   // Thread is in rdi already.
3632   //
3633   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3634 


3775 // and setup oopmap.
3776 //
3777 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3778   assert(StubRoutines::forward_exception_entry() != NULL,
3779          "must be generated before");
3780 
3781   ResourceMark rm;
3782   OopMapSet *oop_maps = new OopMapSet();
3783   OopMap* map;
3784 
3785   // Allocate space for the code.  Setup code generation tools.
3786   CodeBuffer buffer("handler_blob", 2048, 1024);
3787   MacroAssembler* masm = new MacroAssembler(&buffer);
3788 
3789   address start   = __ pc();
3790   address call_pc = NULL;
3791   int frame_size_in_words;
3792   bool cause_return = (poll_type == POLL_AT_RETURN);
3793   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3794 




3795   // Make room for return address (or push it again)
3796   if (!cause_return) {
3797     __ push(rbx);
3798   }
3799 
3800   // Save registers, fpu state, and flags
3801   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3802 
3803   // The following is basically a call_VM.  However, we need the precise
3804   // address of the call in order to generate an oopmap. Hence, we do all the
3805   // work outselves.
3806 
3807   __ set_last_Java_frame(noreg, noreg, NULL);
3808 
3809   // The return address must always be correct so that frame constructor never
3810   // sees an invalid pc.
3811 
3812   if (!cause_return) {
3813     // overwrite the dummy value we pushed on entry
3814     __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));




1995   // because it can be patched on the fly by make_non_entrant. The stack bang
1996   // instruction fits that requirement.
1997 
1998   // Generate stack overflow check
1999 
2000   if (UseStackBanging) {
2001     __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2002   } else {
2003     // need a 5 byte instruction to allow MT safe patching to non-entrant
2004     __ fat_nop();
2005   }
2006 
2007   // Generate a new frame for the wrapper.
2008   __ enter();
2009   // -2 because return address is already present and so is saved rbp
2010   __ subptr(rsp, stack_size - 2*wordSize);
2011 
2012   // Frame is now completed as far as size and linkage.
2013   int frame_complete = ((intptr_t)__ pc()) - start;
2014 
2015     if (UseRTMLocking) {
2016       __ xabort(0);
2017     }
2018 
2019 #ifdef ASSERT
2020     {
2021       Label L;
2022       __ mov(rax, rsp);
2023       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2024       __ cmpptr(rax, rsp);
2025       __ jcc(Assembler::equal, L);
2026       __ stop("improperly aligned stack");
2027       __ bind(L);
2028     }
2029 #endif /* ASSERT */
2030 
2031 
2032   // We use r14 as the oop handle for the receiver/klass
2033   // It is callee save so it survives the call to native
2034 
2035   const Register oop_handle_reg = r14;
2036 
2037   if (is_critical_native) {
2038     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,


3599   // Make sure all code is generated
3600   masm->flush();
3601 
3602   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3603   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3604 }
3605 
3606 #ifdef COMPILER2
3607 //------------------------------generate_uncommon_trap_blob--------------------
3608 void SharedRuntime::generate_uncommon_trap_blob() {
3609   // Allocate space for the code
3610   ResourceMark rm;
3611   // Setup code generation tools
3612   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3613   MacroAssembler* masm = new MacroAssembler(&buffer);
3614 
3615   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3616 
3617   address start = __ pc();
3618  
3619   if (UseRTMLocking) {
3620     __ xabort(0);
3621   }
3622 
3623   // Push self-frame.  We get here with a return address on the
3624   // stack, so rsp is 8-byte aligned until we allocate our frame.
3625   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3626 
3627   // No callee saved registers. rbp is assumed implicitly saved
3628   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3629 
3630   // compiler left unloaded_class_index in j_rarg0 move to where the
3631   // runtime expects it.
3632   __ movl(c_rarg1, j_rarg0);
3633 
3634   __ set_last_Java_frame(noreg, noreg, NULL);
3635 
3636   // Call C code.  Need thread but NOT official VM entry
3637   // crud.  We cannot block on this call, no GC can happen.  Call should
3638   // capture callee-saved registers as well as return values.
3639   // Thread is in rdi already.
3640   //
3641   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3642 


3783 // and setup oopmap.
3784 //
3785 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3786   assert(StubRoutines::forward_exception_entry() != NULL,
3787          "must be generated before");
3788 
3789   ResourceMark rm;
3790   OopMapSet *oop_maps = new OopMapSet();
3791   OopMap* map;
3792 
3793   // Allocate space for the code.  Setup code generation tools.
3794   CodeBuffer buffer("handler_blob", 2048, 1024);
3795   MacroAssembler* masm = new MacroAssembler(&buffer);
3796 
3797   address start   = __ pc();
3798   address call_pc = NULL;
3799   int frame_size_in_words;
3800   bool cause_return = (poll_type == POLL_AT_RETURN);
3801   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3802 
3803   if (UseRTMLocking) {
3804     __ xabort(0);
3805   }
3806   
3807   // Make room for return address (or push it again)
3808   if (!cause_return) {
3809     __ push(rbx);
3810   }
3811 
3812   // Save registers, fpu state, and flags
3813   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3814 
3815   // The following is basically a call_VM.  However, we need the precise
3816   // address of the call in order to generate an oopmap. Hence, we do all the
3817   // work outselves.
3818 
3819   __ set_last_Java_frame(noreg, noreg, NULL);
3820 
3821   // The return address must always be correct so that frame constructor never
3822   // sees an invalid pc.
3823 
3824   if (!cause_return) {
3825     // overwrite the dummy value we pushed on entry
3826     __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));


src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File