< prev index next >

src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp

Print this page
rev 56744 : 8233078: fix minimal VM build on Linux ppc64(le)


 554 }
 555 
 556 // Is vector's size (in bytes) bigger than a size saved by default?
 557 bool SharedRuntime::is_wide_vector(int size) {
 558   // Note, MaxVectorSize == 8/16 on PPC64.
 559   assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size);
 560   return size > 8;
 561 }
 562 
 563 size_t SharedRuntime::trampoline_size() {
 564   return Assembler::load_const_size + 8;
 565 }
 566 
 567 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 568   Register Rtemp = R12;
 569   __ load_const(Rtemp, destination);
 570   __ mtctr(Rtemp);
 571   __ bctr();
 572 }
 573 
 574 #ifdef COMPILER2
 575 static int reg2slot(VMReg r) {
 576   return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 577 }
 578 
 579 static int reg2offset(VMReg r) {
 580   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 581 }
 582 #endif
 583 
 584 // ---------------------------------------------------------------------------
 585 // Read the array of BasicTypes from a signature, and compute where the
 586 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
 587 // quantities. Values less than VMRegImpl::stack0 are registers, those above
 588 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
 589 // as framesizes are fixed.
 590 // VMRegImpl::stack0 refers to the first slot 0(sp).
 591 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register
 592 // up to RegisterImpl::number_of_registers) are the 64-bit
 593 // integer registers.
 594 
 595 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 596 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
 597 // units regardless of build. Of course for i486 there is no 64 bit build
 598 
 599 // The Java calling convention is a "shifted" version of the C ABI.
 600 // By skipping the first C ABI register we can call non-static jni methods
 601 // with small numbers of arguments without having to shuffle the arguments
 602 // at all. Since we control the java ABI we ought to at least get some


1288       __ beq(CCR0, L_skip_barrier); // non-static
1289     }
1290 
1291     Register klass = R11_scratch1;
1292     __ load_method_holder(klass, R19_method);
1293     __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
1294 
1295     __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
1296     __ mtctr(klass);
1297     __ bctr();
1298 
1299     __ bind(L_skip_barrier);
1300     c2i_no_clinit_check_entry = __ pc();
1301   }
1302 
1303   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
1304 
1305   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1306 }
1307 
1308 #ifdef COMPILER2
1309 // An oop arg. Must pass a handle not the oop itself.
1310 static void object_move(MacroAssembler* masm,
1311                         int frame_size_in_slots,
1312                         OopMap* oop_map, int oop_handle_offset,
1313                         bool is_receiver, int* receiver_offset,
1314                         VMRegPair src, VMRegPair dst,
1315                         Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
1316   assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
1317          "receiver has already been moved");
1318 
1319   // We must pass a handle. First figure out the location we use as a handle.
1320 
1321   if (src.first()->is_stack()) {
1322     // stack to stack or reg
1323 
1324     const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1325     Label skip;
1326     const int oop_slot_in_callers_frame = reg2slot(src.first());
1327 
1328     guarantee(!is_receiver, "expecting receiver in register");


1796     VMReg r = regs[0].first();
1797     assert(r->is_valid(), "bad receiver arg");
1798     if (r->is_stack()) {
1799       // Porting note:  This assumes that compiled calling conventions always
1800       // pass the receiver oop in a register.  If this is not true on some
1801       // platform, pick a temp and load the receiver from stack.
1802       fatal("receiver always in a register");
1803       receiver_reg = R11_scratch1;  // TODO (hs24): is R11_scratch1 really free at this point?
1804       __ ld(receiver_reg, reg2offset(r), R1_SP);
1805     } else {
1806       // no data motion is needed
1807       receiver_reg = r->as_Register();
1808     }
1809   }
1810 
1811   // Figure out which address we are really jumping to:
1812   MethodHandles::generate_method_handle_dispatch(masm, iid,
1813                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1814 }
1815 
1816 #endif // COMPILER2
1817 
1818 // ---------------------------------------------------------------------------
1819 // Generate a native wrapper for a given method. The method takes arguments
1820 // in the Java compiled code convention, marshals them to the native
1821 // convention (handlizes oops, etc), transitions to native, makes the call,
1822 // returns to java state (possibly blocking), unhandlizes any result and
1823 // returns.
1824 //
1825 // Critical native functions are a shorthand for the use of
1826 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1827 // functions.  The wrapper is expected to unpack the arguments before
1828 // passing them to the callee and perform checks before and after the
1829 // native call to ensure that they GCLocker
1830 // lock_critical/unlock_critical semantics are followed.  Some other
1831 // parts of JNI setup are skipped like the tear down of the JNI handle
1832 // block and the check for pending exceptions it's impossible for them
1833 // to be thrown.
1834 //
1835 // They are roughly structured like this:
1836 //   if (GCLocker::needs_gc())
1837 //     SharedRuntime::block_for_jni_critical();
1838 //   tranistion to thread_in_native
1839 //   unpack arrray arguments and call native entry point
1840 //   check for safepoint in progress
1841 //   check if any thread suspend flags are set
1842 //     call into JVM and possible unlock the JNI critical
1843 //     if a GC was suppressed while in the critical native.
1844 //   transition back to thread_in_Java
1845 //   return to caller
1846 //
1847 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
1848                                                 const methodHandle& method,
1849                                                 int compile_id,
1850                                                 BasicType *in_sig_bt,
1851                                                 VMRegPair *in_regs,
1852                                                 BasicType ret_type,
1853                                                 address critical_entry) {
1854 #ifdef COMPILER2
1855   if (method->is_method_handle_intrinsic()) {
1856     vmIntrinsics::ID iid = method->intrinsic_id();
1857     intptr_t start = (intptr_t)__ pc();
1858     int vep_offset = ((intptr_t)__ pc()) - start;
1859     gen_special_dispatch(masm,
1860                          method,
1861                          in_sig_bt,
1862                          in_regs);
1863     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1864     __ flush();
1865     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1866     return nmethod::new_native_nmethod(method,
1867                                        compile_id,
1868                                        masm->code(),
1869                                        vep_offset,
1870                                        frame_complete,
1871                                        stack_slots / VMRegImpl::slots_per_word,
1872                                        in_ByteSize(-1),
1873                                        in_ByteSize(-1),
1874                                        (OopMapSet*)NULL);


2091   Register r_temp_3     = R24;
2092   Register r_temp_4     = R25;
2093   Register r_temp_5     = R26;
2094   Register r_temp_6     = R27;
2095   Register r_return_pc  = R28;
2096 
2097   Register r_carg1_jnienv        = noreg;
2098   Register r_carg2_classorobject = noreg;
2099   if (!is_critical_native) {
2100     r_carg1_jnienv        = out_regs[0].first()->as_Register();
2101     r_carg2_classorobject = out_regs[1].first()->as_Register();
2102   }
2103 
2104 
2105   // Generate the Unverified Entry Point (UEP).
2106   // --------------------------------------------------------------------------
2107   assert(start_pc == (intptr_t)__ pc(), "uep must be at start");
2108 
2109   // Check ic: object class == cached class?
2110   if (!method_is_static) {
2111   Register ic = as_Register(Matcher::inline_cache_reg_encode());
2112   Register receiver_klass = r_temp_1;
2113 
2114   __ cmpdi(CCR0, R3_ARG1, 0);
2115   __ beq(CCR0, ic_miss);
2116   __ verify_oop(R3_ARG1);
2117   __ load_klass(receiver_klass, R3_ARG1);
2118 
2119   __ cmpd(CCR0, receiver_klass, ic);
2120   __ bne(CCR0, ic_miss);
2121   }
2122 
2123 
2124   // Generate the Verified Entry Point (VEP).
2125   // --------------------------------------------------------------------------
2126   vep_start_pc = (intptr_t)__ pc();
2127 
2128   if (UseRTMLocking) {
2129     // Abort RTM transaction before calling JNI
2130     // because critical section can be large and
2131     // abort anyway. Also nmethod can be deoptimized.


2621   __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1);
2622 
2623 
2624   // Check for pending exceptions.
2625   // --------------------------------------------------------------------------
2626   __ ld(r_temp_2, thread_(pending_exception));
2627   __ cmpdi(CCR0, r_temp_2, 0);
2628   __ bne(CCR0, handle_pending_exception);
2629   }
2630 
2631   // Return
2632   // --------------------------------------------------------------------------
2633 
2634   __ pop_frame();
2635   __ restore_LR_CR(R11);
2636   __ blr();
2637 
2638 
2639   // Handler for pending exceptions (out-of-line).
2640   // --------------------------------------------------------------------------
2641 
2642   // Since this is a native call, we know the proper exception handler
2643   // is the empty function. We just pop this frame and then jump to
2644   // forward_exception_entry.
2645   if (!is_critical_native) {
2646   __ align(InteriorEntryAlignment);
2647   __ bind(handle_pending_exception);
2648 
2649   __ pop_frame();
2650   __ restore_LR_CR(R11);
2651   __ b64_patchable((address)StubRoutines::forward_exception_entry(),
2652                        relocInfo::runtime_call_type);
2653   }
2654 
2655   // Handler for a cache miss (out-of-line).
2656   // --------------------------------------------------------------------------
2657 
2658   if (!method_is_static) {
2659   __ align(InteriorEntryAlignment);
2660   __ bind(ic_miss);
2661 
2662   __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
2663                        relocInfo::runtime_call_type);
2664   }
2665 
2666   // Done.
2667   // --------------------------------------------------------------------------
2668 
2669   __ flush();
2670 
2671   nmethod *nm = nmethod::new_native_nmethod(method,
2672                                             compile_id,
2673                                             masm->code(),
2674                                             vep_start_pc-start_pc,
2675                                             frame_done_pc-start_pc,
2676                                             stack_slots / VMRegImpl::slots_per_word,
2677                                             (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2678                                             in_ByteSize(lock_offset),
2679                                             oop_maps);
2680 
2681   if (is_critical_native) {
2682     nm->set_lazy_critical_native(true);
2683   }
2684 
2685   return nm;
2686 #else
2687   ShouldNotReachHere();
2688   return NULL;
2689 #endif // COMPILER2
2690 }
2691 
2692 // This function returns the adjust size (in number of words) to a c2i adapter
2693 // activation for use during deoptimization.
2694 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2695   return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
2696 }
2697 
2698 uint SharedRuntime::out_preserve_stack_slots() {
2699 #if defined(COMPILER1) || defined(COMPILER2)
2700   return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
2701 #else
2702   return 0;
2703 #endif
2704 }
2705 
2706 #if defined(COMPILER1) || defined(COMPILER2)
2707 // Frame generation for deopt and uncommon trap blobs.
2708 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
2709                                 /* Read */


2846   // --------------------------------------------------------------------------
2847   // Prolog for non exception case!
2848 
2849   // We have been called from the deopt handler of the deoptee.
2850   //
2851   // deoptee:
2852   //                      ...
2853   //                      call X
2854   //                      ...
2855   //  deopt_handler:      call_deopt_stub
2856   //  cur. return pc  --> ...
2857   //
2858   // So currently SR_LR points behind the call in the deopt handler.
2859   // We adjust it such that it points to the start of the deopt handler.
2860   // The return_pc has been stored in the frame of the deoptee and
2861   // will replace the address of the deopt_handler in the call
2862   // to Deoptimization::fetch_unroll_info below.
2863   // We can't grab a free register here, because all registers may
2864   // contain live values, so let the RegisterSaver do the adjustment
2865   // of the return pc.
2866   const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler();

2867 
2868   // Push the "unpack frame"
2869   // Save everything in sight.
2870   map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2871                                                                    &first_frame_size_in_bytes,
2872                                                                    /*generate_oop_map=*/ true,
2873                                                                    return_pc_adjustment_no_exception,
2874                                                                    RegisterSaver::return_pc_is_lr);
2875   assert(map != NULL, "OopMap must have been created");
2876 
2877   __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
2878   // Save exec mode for unpack_frames.
2879   __ b(exec_mode_initialized);
2880 
2881   // --------------------------------------------------------------------------
2882   // Prolog for exception case
2883 
2884   // An exception is pending.
2885   // We have been called with a return (interpreter) or a jump (exception blob).
2886   //




 554 }
 555 
 556 // Is vector's size (in bytes) bigger than a size saved by default?
 557 bool SharedRuntime::is_wide_vector(int size) {
 558   // Note, MaxVectorSize == 8/16 on PPC64.
 559   assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size);
 560   return size > 8;
 561 }
 562 
 563 size_t SharedRuntime::trampoline_size() {
 564   return Assembler::load_const_size + 8;
 565 }
 566 
 567 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 568   Register Rtemp = R12;
 569   __ load_const(Rtemp, destination);
 570   __ mtctr(Rtemp);
 571   __ bctr();
 572 }
 573 

 574 static int reg2slot(VMReg r) {
 575   return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 576 }
 577 
 578 static int reg2offset(VMReg r) {
 579   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 580 }

 581 
 582 // ---------------------------------------------------------------------------
 583 // Read the array of BasicTypes from a signature, and compute where the
 584 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
 585 // quantities. Values less than VMRegImpl::stack0 are registers, those above
 586 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
 587 // as framesizes are fixed.
 588 // VMRegImpl::stack0 refers to the first slot 0(sp).
 589 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register
 590 // up to RegisterImpl::number_of_registers) are the 64-bit
 591 // integer registers.
 592 
 593 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 594 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
 595 // units regardless of build. Of course for i486 there is no 64 bit build
 596 
 597 // The Java calling convention is a "shifted" version of the C ABI.
 598 // By skipping the first C ABI register we can call non-static jni methods
 599 // with small numbers of arguments without having to shuffle the arguments
 600 // at all. Since we control the java ABI we ought to at least get some


1286       __ beq(CCR0, L_skip_barrier); // non-static
1287     }
1288 
1289     Register klass = R11_scratch1;
1290     __ load_method_holder(klass, R19_method);
1291     __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
1292 
1293     __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
1294     __ mtctr(klass);
1295     __ bctr();
1296 
1297     __ bind(L_skip_barrier);
1298     c2i_no_clinit_check_entry = __ pc();
1299   }
1300 
1301   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
1302 
1303   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1304 }
1305 

1306 // An oop arg. Must pass a handle not the oop itself.
1307 static void object_move(MacroAssembler* masm,
1308                         int frame_size_in_slots,
1309                         OopMap* oop_map, int oop_handle_offset,
1310                         bool is_receiver, int* receiver_offset,
1311                         VMRegPair src, VMRegPair dst,
1312                         Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
1313   assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
1314          "receiver has already been moved");
1315 
1316   // We must pass a handle. First figure out the location we use as a handle.
1317 
1318   if (src.first()->is_stack()) {
1319     // stack to stack or reg
1320 
1321     const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
1322     Label skip;
1323     const int oop_slot_in_callers_frame = reg2slot(src.first());
1324 
1325     guarantee(!is_receiver, "expecting receiver in register");


1793     VMReg r = regs[0].first();
1794     assert(r->is_valid(), "bad receiver arg");
1795     if (r->is_stack()) {
1796       // Porting note:  This assumes that compiled calling conventions always
1797       // pass the receiver oop in a register.  If this is not true on some
1798       // platform, pick a temp and load the receiver from stack.
1799       fatal("receiver always in a register");
1800       receiver_reg = R11_scratch1;  // TODO (hs24): is R11_scratch1 really free at this point?
1801       __ ld(receiver_reg, reg2offset(r), R1_SP);
1802     } else {
1803       // no data motion is needed
1804       receiver_reg = r->as_Register();
1805     }
1806   }
1807 
1808   // Figure out which address we are really jumping to:
1809   MethodHandles::generate_method_handle_dispatch(masm, iid,
1810                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1811 }
1812 


1813 // ---------------------------------------------------------------------------
1814 // Generate a native wrapper for a given method. The method takes arguments
1815 // in the Java compiled code convention, marshals them to the native
1816 // convention (handlizes oops, etc), transitions to native, makes the call,
1817 // returns to java state (possibly blocking), unhandlizes any result and
1818 // returns.
1819 //
1820 // Critical native functions are a shorthand for the use of
1821 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1822 // functions.  The wrapper is expected to unpack the arguments before
1823 // passing them to the callee and perform checks before and after the
1824 // native call to ensure that they GCLocker
1825 // lock_critical/unlock_critical semantics are followed.  Some other
1826 // parts of JNI setup are skipped like the tear down of the JNI handle
1827 // block and the check for pending exceptions it's impossible for them
1828 // to be thrown.
1829 //
1830 // They are roughly structured like this:
1831 //   if (GCLocker::needs_gc())
1832 //     SharedRuntime::block_for_jni_critical();
1833 //   tranistion to thread_in_native
1834 //   unpack arrray arguments and call native entry point
1835 //   check for safepoint in progress
1836 //   check if any thread suspend flags are set
1837 //     call into JVM and possible unlock the JNI critical
1838 //     if a GC was suppressed while in the critical native.
1839 //   transition back to thread_in_Java
1840 //   return to caller
1841 //
1842 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
1843                                                 const methodHandle& method,
1844                                                 int compile_id,
1845                                                 BasicType *in_sig_bt,
1846                                                 VMRegPair *in_regs,
1847                                                 BasicType ret_type,
1848                                                 address critical_entry) {

1849   if (method->is_method_handle_intrinsic()) {
1850     vmIntrinsics::ID iid = method->intrinsic_id();
1851     intptr_t start = (intptr_t)__ pc();
1852     int vep_offset = ((intptr_t)__ pc()) - start;
1853     gen_special_dispatch(masm,
1854                          method,
1855                          in_sig_bt,
1856                          in_regs);
1857     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1858     __ flush();
1859     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1860     return nmethod::new_native_nmethod(method,
1861                                        compile_id,
1862                                        masm->code(),
1863                                        vep_offset,
1864                                        frame_complete,
1865                                        stack_slots / VMRegImpl::slots_per_word,
1866                                        in_ByteSize(-1),
1867                                        in_ByteSize(-1),
1868                                        (OopMapSet*)NULL);


2085   Register r_temp_3     = R24;
2086   Register r_temp_4     = R25;
2087   Register r_temp_5     = R26;
2088   Register r_temp_6     = R27;
2089   Register r_return_pc  = R28;
2090 
2091   Register r_carg1_jnienv        = noreg;
2092   Register r_carg2_classorobject = noreg;
2093   if (!is_critical_native) {
2094     r_carg1_jnienv        = out_regs[0].first()->as_Register();
2095     r_carg2_classorobject = out_regs[1].first()->as_Register();
2096   }
2097 
2098 
2099   // Generate the Unverified Entry Point (UEP).
2100   // --------------------------------------------------------------------------
2101   assert(start_pc == (intptr_t)__ pc(), "uep must be at start");
2102 
2103   // Check ic: object class == cached class?
2104   if (!method_is_static) {
2105   Register ic = R19_inline_cache_reg;
2106   Register receiver_klass = r_temp_1;
2107 
2108   __ cmpdi(CCR0, R3_ARG1, 0);
2109   __ beq(CCR0, ic_miss);
2110   __ verify_oop(R3_ARG1);
2111   __ load_klass(receiver_klass, R3_ARG1);
2112 
2113   __ cmpd(CCR0, receiver_klass, ic);
2114   __ bne(CCR0, ic_miss);
2115   }
2116 
2117 
2118   // Generate the Verified Entry Point (VEP).
2119   // --------------------------------------------------------------------------
2120   vep_start_pc = (intptr_t)__ pc();
2121 
2122   if (UseRTMLocking) {
2123     // Abort RTM transaction before calling JNI
2124     // because critical section can be large and
2125     // abort anyway. Also nmethod can be deoptimized.


2615   __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1);
2616 
2617 
2618   // Check for pending exceptions.
2619   // --------------------------------------------------------------------------
2620   __ ld(r_temp_2, thread_(pending_exception));
2621   __ cmpdi(CCR0, r_temp_2, 0);
2622   __ bne(CCR0, handle_pending_exception);
2623   }
2624 
2625   // Return
2626   // --------------------------------------------------------------------------
2627 
2628   __ pop_frame();
2629   __ restore_LR_CR(R11);
2630   __ blr();
2631 
2632 
2633   // Handler for pending exceptions (out-of-line).
2634   // --------------------------------------------------------------------------

2635   // Since this is a native call, we know the proper exception handler
2636   // is the empty function. We just pop this frame and then jump to
2637   // forward_exception_entry.
2638   if (!is_critical_native) {

2639   __ bind(handle_pending_exception);
2640 
2641   __ pop_frame();
2642   __ restore_LR_CR(R11);
2643   __ b64_patchable((address)StubRoutines::forward_exception_entry(),
2644                        relocInfo::runtime_call_type);
2645   }
2646 
2647   // Handler for a cache miss (out-of-line).
2648   // --------------------------------------------------------------------------
2649 
2650   if (!method_is_static) {

2651   __ bind(ic_miss);
2652 
2653   __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
2654                        relocInfo::runtime_call_type);
2655   }
2656 
2657   // Done.
2658   // --------------------------------------------------------------------------
2659 
2660   __ flush();
2661 
2662   nmethod *nm = nmethod::new_native_nmethod(method,
2663                                             compile_id,
2664                                             masm->code(),
2665                                             vep_start_pc-start_pc,
2666                                             frame_done_pc-start_pc,
2667                                             stack_slots / VMRegImpl::slots_per_word,
2668                                             (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2669                                             in_ByteSize(lock_offset),
2670                                             oop_maps);
2671 
2672   if (is_critical_native) {
2673     nm->set_lazy_critical_native(true);
2674   }
2675 
2676   return nm;




2677 }
2678 
2679 // This function returns the adjust size (in number of words) to a c2i adapter
2680 // activation for use during deoptimization.
2681 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2682   return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
2683 }
2684 
2685 uint SharedRuntime::out_preserve_stack_slots() {
2686 #if defined(COMPILER1) || defined(COMPILER2)
2687   return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
2688 #else
2689   return 0;
2690 #endif
2691 }
2692 
2693 #if defined(COMPILER1) || defined(COMPILER2)
2694 // Frame generation for deopt and uncommon trap blobs.
2695 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
2696                                 /* Read */


2833   // --------------------------------------------------------------------------
2834   // Prolog for non exception case!
2835 
2836   // We have been called from the deopt handler of the deoptee.
2837   //
2838   // deoptee:
2839   //                      ...
2840   //                      call X
2841   //                      ...
2842   //  deopt_handler:      call_deopt_stub
2843   //  cur. return pc  --> ...
2844   //
2845   // So currently SR_LR points behind the call in the deopt handler.
2846   // We adjust it such that it points to the start of the deopt handler.
2847   // The return_pc has been stored in the frame of the deoptee and
2848   // will replace the address of the deopt_handler in the call
2849   // to Deoptimization::fetch_unroll_info below.
2850   // We can't grab a free register here, because all registers may
2851   // contain live values, so let the RegisterSaver do the adjustment
2852   // of the return pc.
2853   //const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler();
2854   const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size;
2855 
2856   // Push the "unpack frame"
2857   // Save everything in sight.
2858   map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
2859                                                                    &first_frame_size_in_bytes,
2860                                                                    /*generate_oop_map=*/ true,
2861                                                                    return_pc_adjustment_no_exception,
2862                                                                    RegisterSaver::return_pc_is_lr);
2863   assert(map != NULL, "OopMap must have been created");
2864 
2865   __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
2866   // Save exec mode for unpack_frames.
2867   __ b(exec_mode_initialized);
2868 
2869   // --------------------------------------------------------------------------
2870   // Prolog for exception case
2871 
2872   // An exception is pending.
2873   // We have been called with a return (interpreter) or a jump (exception blob).
2874   //


< prev index next >