10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "interpreter/bytecodeHistogram.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterGenerator.hpp"
31 #include "interpreter/interpreterRuntime.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "interpreter/templateTable.hpp"
34 #include "interpreter/bytecodeTracer.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/methodData.hpp"
37 #include "oops/method.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "prims/jvmtiExport.hpp"
40 #include "prims/jvmtiThreadState.hpp"
41 #include "runtime/arguments.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/synchronizer.hpp"
47 #include "runtime/timer.hpp"
48 #include "runtime/vframeArray.hpp"
49 #include "utilities/debug.hpp"
50 #include <sys/types.h>
51
52 #ifndef PRODUCT
53 #include "oops/method.hpp"
54 #endif // !PRODUCT
55
56 #ifdef BUILTIN_SIM
57 #include "../../../../../../simulator/simulator.hpp"
58 #endif
59
60 #define __ _masm->
61
62 #ifndef CC_INTERP
63
64 //-----------------------------------------------------------------------------
65
66 extern "C" void entry(CodeBuffer*);
67
68 //-----------------------------------------------------------------------------
69
70 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
71 address entry = __ pc();
72
73 #ifdef ASSERT
74 {
75 Label L;
76 __ ldr(rscratch1, Address(rfp,
77 frame::interpreter_frame_monitor_block_top_offset *
78 wordSize));
79 __ mov(rscratch2, sp);
80 __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
81 // grows negative)
82 __ br(Assembler::HS, L); // check if frame is complete
83 __ stop ("interpreter frame not set up");
287 address runtime_entry) {
288 address entry = __ pc();
289 __ push(state);
290 __ call_VM(noreg, runtime_entry);
291 __ membar(Assembler::AnyAny);
292 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
293 return entry;
294 }
295
296 // Helpers for commoning out cases in the various type of method entries.
297 //
298
299
300 // increment invocation count & check for overflow
301 //
302 // Note: checking for negative value instead of overflow
303 // so we have a 'sticky' overflow test
304 //
305 // rmethod: method
306 //
307 void InterpreterGenerator::generate_counter_incr(
308 Label* overflow,
309 Label* profile_method,
310 Label* profile_method_continue) {
311 Label done;
312 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
313 if (TieredCompilation) {
314 int increment = InvocationCounter::count_increment;
315 Label no_mdo;
316 if (ProfileInterpreter) {
317 // Are we profiling?
318 __ ldr(r0, Address(rmethod, Method::method_data_offset()));
319 __ cbz(r0, no_mdo);
320 // Increment counter in the MDO
321 const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
322 in_bytes(InvocationCounter::counter_offset()));
323 const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
324 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
325 __ b(done);
326 }
327 __ bind(no_mdo);
365 // Test to see if we should create a method data oop
366 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
367 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
368 __ cmpw(r0, rscratch2);
369 __ br(Assembler::LT, *profile_method_continue);
370
371 // if no method data exists, go to profile_method
372 __ test_method_data_pointer(r0, *profile_method);
373 }
374
375 {
376 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
377 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
378 __ cmpw(r0, rscratch2);
379 __ br(Assembler::HS, *overflow);
380 }
381 __ bind(done);
382 }
383 }
384
385 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
386
387 // Asm interpreter on entry
388 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
389 // Everything as it was on entry
390
391 // InterpreterRuntime::frequency_counter_overflow takes two
392 // arguments, the first (thread) is passed by call_VM, the second
393 // indicates if the counter overflow occurs at a backwards branch
394 // (NULL bcp). We pass zero for it. The call returns the address
395 // of the verified entry point for the method or NULL if the
396 // compilation did not complete (either went background or bailed
397 // out).
398 __ mov(c_rarg1, 0);
399 __ call_VM(noreg,
400 CAST_FROM_FN_PTR(address,
401 InterpreterRuntime::frequency_counter_overflow),
402 c_rarg1);
403
404 __ b(*do_continue);
405 }
406
407 // See if we've got enough room on the stack for locals plus overhead.
408 // The expression stack grows down incrementally, so the normal guard
409 // page mechanism will work for that.
410 //
411 // NOTE: Since the additional locals are also always pushed (wasn't
412 // obvious in generate_method_entry) so the guard should work for them
413 // too.
414 //
415 // Args:
416 // r3: number of additional locals this frame needs (what we must check)
417 // rmethod: Method*
418 //
419 // Kills:
420 // r0
421 void InterpreterGenerator::generate_stack_overflow_check(void) {
422
423 // monitor entry size: see picture of stack set
424 // (generate_method_entry) and frame_amd64.hpp
425 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
426
427 // total overhead size: entry_size + (saved rbp through expr stack
428 // bottom). be sure to change this if you add/subtract anything
429 // to/from the overhead area
430 const int overhead_size =
431 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
432
433 const int page_size = os::vm_page_size();
434
435 Label after_frame_check;
436
437 // see if the frame is greater than one page in size. If so,
438 // then we need to verify there is enough stack space remaining
439 // for the additional locals.
440 //
441 // Note that we use SUBS rather than CMP here because the immediate
617 __ stp(zr, r13, Address(sp, 6 * wordSize));
618
619 // Move SP out of the way
620 if (! native_call) {
621 __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
622 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
623 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
624 __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
625 __ andr(sp, rscratch1, -16);
626 }
627 }
628
629 // End of helpers
630
631 // Various method entries
632 //------------------------------------------------------------------------------------------------------------------------
633 //
634 //
635
636 // Method entry for java.lang.ref.Reference.get.
637 address InterpreterGenerator::generate_Reference_get_entry(void) {
638 #if INCLUDE_ALL_GCS
639 // Code: _aload_0, _getfield, _areturn
640 // parameter size = 1
641 //
642 // The code that gets generated by this routine is split into 2 parts:
643 // 1. The "intrinsified" code for G1 (or any SATB based GC),
644 // 2. The slow path - which is an expansion of the regular method entry.
645 //
646 // Notes:-
647 // * In the G1 code we do not check whether we need to block for
648 // a safepoint. If G1 is enabled then we must execute the specialized
649 // code for Reference.get (except when the Reference object is null)
650 // so that we can log the value in the referent field with an SATB
651 // update buffer.
652 // If the code for the getfield template is modified so that the
653 // G1 pre-barrier code is executed when the current method is
654 // Reference.get() then going through the normal method entry
655 // will be fine.
656 // * The G1 code can, however, check the receiver object (the instance
657 // of java.lang.Reference) and jump to the slow path if null. If the
695 // areturn
696 __ andr(sp, r13, -16); // done with stack
697 __ ret(lr);
698
699 // generate a vanilla interpreter entry as the slow path
700 __ bind(slow_path);
701 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
702 return entry;
703 }
704 #endif // INCLUDE_ALL_GCS
705
706 // If G1 is not enabled then attempt to go through the accessor entry point
707 // Reference.get is an accessor
708 return generate_accessor_entry();
709 }
710
711 /**
712 * Method entry for static native methods:
713 * int java.util.zip.CRC32.update(int crc, int b)
714 */
715 address InterpreterGenerator::generate_CRC32_update_entry() {
716 if (UseCRC32Intrinsics) {
717 address entry = __ pc();
718
719 // rmethod: Method*
720 // r13: senderSP must preserved for slow path
721 // esp: args
722
723 Label slow_path;
724 // If we need a safepoint check, generate full interpreter entry.
725 ExternalAddress state(SafepointSynchronize::address_of_state());
726 unsigned long offset;
727 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
728 __ ldrw(rscratch1, Address(rscratch1, offset));
729 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
730 __ cbnz(rscratch1, slow_path);
731
732 // We don't generate local frame and don't align stack because
733 // we call stub code and there is no safepoint on this path.
734
735 // Load parameters
749 __ ornw(crc, zr, crc); // ~crc
750
751 // result in c_rarg0
752
753 __ andr(sp, r13, -16);
754 __ ret(lr);
755
756 // generate a vanilla native entry as the slow path
757 __ bind(slow_path);
758 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
759 return entry;
760 }
761 return NULL;
762 }
763
764 /**
765 * Method entry for static native methods:
766 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
767 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
768 */
769 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
770 if (UseCRC32Intrinsics) {
771 address entry = __ pc();
772
773 // rmethod,: Method*
774 // r13: senderSP must preserved for slow path
775
776 Label slow_path;
777 // If we need a safepoint check, generate full interpreter entry.
778 ExternalAddress state(SafepointSynchronize::address_of_state());
779 unsigned long offset;
780 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
781 __ ldrw(rscratch1, Address(rscratch1, offset));
782 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
783 __ cbnz(rscratch1, slow_path);
784
785 // We don't generate local frame and don't align stack because
786 // we call stub code and there is no safepoint on this path.
787
788 // Load parameters
789 const Register crc = c_rarg0; // crc
804 __ ldrw(off, Address(esp, wordSize)); // offset
805 __ add(buf, buf, off); // + offset
806 __ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC
807 }
808 // Can now load 'len' since we're finished with 'off'
809 __ ldrw(len, Address(esp, 0x0)); // Length
810
811 __ andr(sp, r13, -16); // Restore the caller's SP
812
813 // We are frameless so we can just jump to the stub.
814 __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
815
816 // generate a vanilla native entry as the slow path
817 __ bind(slow_path);
818 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
819 return entry;
820 }
821 return NULL;
822 }
823
824 void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
825 // Bang each page in the shadow zone. We can't assume it's been done for
826 // an interpreter frame with greater than a page of locals, so each page
827 // needs to be checked. Only true for non-native.
828 if (UseStackBanging) {
829 const int start_page = native_call ? StackShadowPages : 1;
830 const int page_size = os::vm_page_size();
831 for (int pages = start_page; pages <= StackShadowPages ; pages++) {
832 __ sub(rscratch2, sp, pages*page_size);
833 __ str(zr, Address(rscratch2));
834 }
835 }
836 }
837
838
839 // Interpreter stub for calling a native method. (asm interpreter)
840 // This sets up a somewhat different looking stack for calling the
841 // native method than the typical interpreter frame setup.
842 address InterpreterGenerator::generate_native_entry(bool synchronized) {
843 // determine code generation flags
844 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
845
846 // r1: Method*
847 // rscratch1: sender sp
848
849 address entry_point = __ pc();
850
851 const Address constMethod (rmethod, Method::const_offset());
852 const Address access_flags (rmethod, Method::access_flags_offset());
853 const Address size_of_parameters(r2, ConstMethod::
854 size_of_parameters_offset());
855
856 // get parameter size (always needed)
857 __ ldr(r2, constMethod);
858 __ load_unsigned_short(r2, size_of_parameters);
859
860 // native calls don't need the stack size check since they have no
861 // expression stack and the arguments are already on the stack and
862 // we only add a handful of words to the stack
1251 __ pop(ltos);
1252 __ pop(dtos);
1253
1254 __ blr(result_handler);
1255
1256 // remove activation
1257 __ ldr(esp, Address(rfp,
1258 frame::interpreter_frame_sender_sp_offset *
1259 wordSize)); // get sender sp
1260 // remove frame anchor
1261 __ leave();
1262
1263 // resture sender sp
1264 __ mov(sp, esp);
1265
1266 __ ret(lr);
1267
1268 if (inc_counter) {
1269 // Handle overflow of counter and compile method
1270 __ bind(invocation_counter_overflow);
1271 generate_counter_overflow(&continue_after_compile);
1272 }
1273
1274 return entry_point;
1275 }
1276
1277 //
1278 // Generic interpreted method entry to (asm) interpreter
1279 //
1280 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1281 // determine code generation flags
1282 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1283
1284 // rscratch1: sender sp
1285 address entry_point = __ pc();
1286
1287 const Address constMethod(rmethod, Method::const_offset());
1288 const Address access_flags(rmethod, Method::access_flags_offset());
1289 const Address size_of_parameters(r3,
1290 ConstMethod::size_of_parameters_offset());
1291 const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
1292
1293 // get parameter size (always needed)
1294 // need to load the const method first
1295 __ ldr(r3, constMethod);
1296 __ load_unsigned_short(r2, size_of_parameters);
1297
1298 // r2: size of parameters
1299
1300 __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
1422 #endif
1423
1424 // jvmti support
1425 __ notify_method_entry();
1426
1427 __ dispatch_next(vtos);
1428
1429 // invocation counter overflow
1430 if (inc_counter) {
1431 if (ProfileInterpreter) {
1432 // We have decided to profile this method in the interpreter
1433 __ bind(profile_method);
1434 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1435 __ set_method_data_pointer_for_bcp();
1436 // don't think we need this
1437 __ get_method(r1);
1438 __ b(profile_method_continue);
1439 }
1440 // Handle overflow of counter and compile method
1441 __ bind(invocation_counter_overflow);
1442 generate_counter_overflow(&continue_after_compile);
1443 }
1444
1445 return entry_point;
1446 }
1447
1448 //-----------------------------------------------------------------------------
1449 // Exceptions
1450
1451 void TemplateInterpreterGenerator::generate_throw_exception() {
1452 // Entry point in previous activation (i.e., if the caller was
1453 // interpreted)
1454 Interpreter::_rethrow_exception_entry = __ pc();
1455 // Restore sp to interpreter_frame_last_sp even though we are going
1456 // to empty the expression stack for the exception processing.
1457 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1458 // r0: exception
1459 // r3: return address/pc that threw exception
1460 __ restore_bcp(); // rbcp points to call/send
1461 __ restore_locals();
1462 __ restore_constant_pool_cache();
1708 address& aep,
1709 address& iep,
1710 address& lep,
1711 address& fep,
1712 address& dep,
1713 address& vep) {
1714 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1715 Label L;
1716 aep = __ pc(); __ push_ptr(); __ b(L);
1717 fep = __ pc(); __ push_f(); __ b(L);
1718 dep = __ pc(); __ push_d(); __ b(L);
1719 lep = __ pc(); __ push_l(); __ b(L);
1720 bep = cep = sep =
1721 iep = __ pc(); __ push_i();
1722 vep = __ pc();
1723 __ bind(L);
1724 generate_and_dispatch(t);
1725 }
1726
1727 //-----------------------------------------------------------------------------
1728 // Generation of individual instructions
1729
1730 // helpers for generate_and_dispatch
1731
1732
1733 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1734 : TemplateInterpreterGenerator(code) {
1735 generate_all(); // down here so it can be "virtual"
1736 }
1737
1738 //-----------------------------------------------------------------------------
1739
1740 // Non-product code
1741 #ifndef PRODUCT
1742 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1743 address entry = __ pc();
1744
1745 __ push(lr);
1746 __ push(state);
1747 __ push(RegSet::range(r0, r15), sp);
1748 __ mov(c_rarg2, r0); // Pass itos
1749 __ call_VM(noreg,
1750 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
1751 c_rarg1, c_rarg2, c_rarg3);
1752 __ pop(RegSet::range(r0, r15), sp);
1753 __ pop(state);
1754 __ pop(lr);
1755 __ ret(lr); // return from result handler
1756
1757 return entry;
1758 }
1905 strcpy(method, "B unknown blob : ");
1906 strcat(method, cb->name());
1907 }
1908 if (framesize != NULL) {
1909 *framesize = cb->frame_size();
1910 }
1911 }
1912 }
1913 }
1914 }
1915
1916
1917 JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
1918 {
1919 bccheck1(pc, fp, method, bcidx, framesize, decode);
1920 }
1921 }
1922
1923 #endif // BUILTIN_SIM
1924 #endif // !PRODUCT
1925 #endif // ! CC_INTERP
|
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "interpreter/bytecodeHistogram.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/interp_masm.hpp"
32 #include "interpreter/templateInterpreterGenerator.hpp"
33 #include "interpreter/templateTable.hpp"
34 #include "interpreter/bytecodeTracer.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/methodData.hpp"
37 #include "oops/method.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "prims/jvmtiExport.hpp"
40 #include "prims/jvmtiThreadState.hpp"
41 #include "runtime/arguments.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/synchronizer.hpp"
47 #include "runtime/timer.hpp"
48 #include "runtime/vframeArray.hpp"
49 #include "utilities/debug.hpp"
50 #include <sys/types.h>
51
52 #ifndef PRODUCT
53 #include "oops/method.hpp"
54 #endif // !PRODUCT
55
56 #ifdef BUILTIN_SIM
57 #include "../../../../../../simulator/simulator.hpp"
58 #endif
59
60 #define __ _masm->
61
62 //-----------------------------------------------------------------------------
63
64 extern "C" void entry(CodeBuffer*);
65
66 //-----------------------------------------------------------------------------
67
68 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
69 address entry = __ pc();
70
71 #ifdef ASSERT
72 {
73 Label L;
74 __ ldr(rscratch1, Address(rfp,
75 frame::interpreter_frame_monitor_block_top_offset *
76 wordSize));
77 __ mov(rscratch2, sp);
78 __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
79 // grows negative)
80 __ br(Assembler::HS, L); // check if frame is complete
81 __ stop ("interpreter frame not set up");
285 address runtime_entry) {
286 address entry = __ pc();
287 __ push(state);
288 __ call_VM(noreg, runtime_entry);
289 __ membar(Assembler::AnyAny);
290 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
291 return entry;
292 }
293
294 // Helpers for commoning out cases in the various type of method entries.
295 //
296
297
298 // increment invocation count & check for overflow
299 //
300 // Note: checking for negative value instead of overflow
301 // so we have a 'sticky' overflow test
302 //
303 // rmethod: method
304 //
305 void TemplateInterpreterGenerator::generate_counter_incr(
306 Label* overflow,
307 Label* profile_method,
308 Label* profile_method_continue) {
309 Label done;
310 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
311 if (TieredCompilation) {
312 int increment = InvocationCounter::count_increment;
313 Label no_mdo;
314 if (ProfileInterpreter) {
315 // Are we profiling?
316 __ ldr(r0, Address(rmethod, Method::method_data_offset()));
317 __ cbz(r0, no_mdo);
318 // Increment counter in the MDO
319 const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
320 in_bytes(InvocationCounter::counter_offset()));
321 const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
322 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
323 __ b(done);
324 }
325 __ bind(no_mdo);
363 // Test to see if we should create a method data oop
364 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
365 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
366 __ cmpw(r0, rscratch2);
367 __ br(Assembler::LT, *profile_method_continue);
368
369 // if no method data exists, go to profile_method
370 __ test_method_data_pointer(r0, *profile_method);
371 }
372
373 {
374 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
375 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
376 __ cmpw(r0, rscratch2);
377 __ br(Assembler::HS, *overflow);
378 }
379 __ bind(done);
380 }
381 }
382
383 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
384
385 // Asm interpreter on entry
386 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
387 // Everything as it was on entry
388
389 // InterpreterRuntime::frequency_counter_overflow takes two
390 // arguments, the first (thread) is passed by call_VM, the second
391 // indicates if the counter overflow occurs at a backwards branch
392 // (NULL bcp). We pass zero for it. The call returns the address
393 // of the verified entry point for the method or NULL if the
394 // compilation did not complete (either went background or bailed
395 // out).
396 __ mov(c_rarg1, 0);
397 __ call_VM(noreg,
398 CAST_FROM_FN_PTR(address,
399 InterpreterRuntime::frequency_counter_overflow),
400 c_rarg1);
401
402 __ b(do_continue);
403 }
404
405 // See if we've got enough room on the stack for locals plus overhead.
406 // The expression stack grows down incrementally, so the normal guard
407 // page mechanism will work for that.
408 //
409 // NOTE: Since the additional locals are also always pushed (wasn't
410 // obvious in generate_method_entry) so the guard should work for them
411 // too.
412 //
413 // Args:
414 // r3: number of additional locals this frame needs (what we must check)
415 // rmethod: Method*
416 //
417 // Kills:
418 // r0
419 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
420
421 // monitor entry size: see picture of stack set
422 // (generate_method_entry) and frame_amd64.hpp
423 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
424
425 // total overhead size: entry_size + (saved rbp through expr stack
426 // bottom). be sure to change this if you add/subtract anything
427 // to/from the overhead area
428 const int overhead_size =
429 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
430
431 const int page_size = os::vm_page_size();
432
433 Label after_frame_check;
434
435 // see if the frame is greater than one page in size. If so,
436 // then we need to verify there is enough stack space remaining
437 // for the additional locals.
438 //
439 // Note that we use SUBS rather than CMP here because the immediate
615 __ stp(zr, r13, Address(sp, 6 * wordSize));
616
617 // Move SP out of the way
618 if (! native_call) {
619 __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
620 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
621 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
622 __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
623 __ andr(sp, rscratch1, -16);
624 }
625 }
626
627 // End of helpers
628
629 // Various method entries
630 //------------------------------------------------------------------------------------------------------------------------
631 //
632 //
633
634 // Method entry for java.lang.ref.Reference.get.
635 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
636 #if INCLUDE_ALL_GCS
637 // Code: _aload_0, _getfield, _areturn
638 // parameter size = 1
639 //
640 // The code that gets generated by this routine is split into 2 parts:
641 // 1. The "intrinsified" code for G1 (or any SATB based GC),
642 // 2. The slow path - which is an expansion of the regular method entry.
643 //
644 // Notes:-
645 // * In the G1 code we do not check whether we need to block for
646 // a safepoint. If G1 is enabled then we must execute the specialized
647 // code for Reference.get (except when the Reference object is null)
648 // so that we can log the value in the referent field with an SATB
649 // update buffer.
650 // If the code for the getfield template is modified so that the
651 // G1 pre-barrier code is executed when the current method is
652 // Reference.get() then going through the normal method entry
653 // will be fine.
654 // * The G1 code can, however, check the receiver object (the instance
655 // of java.lang.Reference) and jump to the slow path if null. If the
693 // areturn
694 __ andr(sp, r13, -16); // done with stack
695 __ ret(lr);
696
697 // generate a vanilla interpreter entry as the slow path
698 __ bind(slow_path);
699 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
700 return entry;
701 }
702 #endif // INCLUDE_ALL_GCS
703
704 // If G1 is not enabled then attempt to go through the accessor entry point
705 // Reference.get is an accessor
706 return generate_accessor_entry();
707 }
708
709 /**
710 * Method entry for static native methods:
711 * int java.util.zip.CRC32.update(int crc, int b)
712 */
713 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
714 if (UseCRC32Intrinsics) {
715 address entry = __ pc();
716
717 // rmethod: Method*
718 // r13: senderSP must preserved for slow path
719 // esp: args
720
721 Label slow_path;
722 // If we need a safepoint check, generate full interpreter entry.
723 ExternalAddress state(SafepointSynchronize::address_of_state());
724 unsigned long offset;
725 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
726 __ ldrw(rscratch1, Address(rscratch1, offset));
727 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
728 __ cbnz(rscratch1, slow_path);
729
730 // We don't generate local frame and don't align stack because
731 // we call stub code and there is no safepoint on this path.
732
733 // Load parameters
747 __ ornw(crc, zr, crc); // ~crc
748
749 // result in c_rarg0
750
751 __ andr(sp, r13, -16);
752 __ ret(lr);
753
754 // generate a vanilla native entry as the slow path
755 __ bind(slow_path);
756 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
757 return entry;
758 }
759 return NULL;
760 }
761
762 /**
763 * Method entry for static native methods:
764 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
765 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
766 */
767 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
768 if (UseCRC32Intrinsics) {
769 address entry = __ pc();
770
771 // rmethod,: Method*
772 // r13: senderSP must preserved for slow path
773
774 Label slow_path;
775 // If we need a safepoint check, generate full interpreter entry.
776 ExternalAddress state(SafepointSynchronize::address_of_state());
777 unsigned long offset;
778 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
779 __ ldrw(rscratch1, Address(rscratch1, offset));
780 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
781 __ cbnz(rscratch1, slow_path);
782
783 // We don't generate local frame and don't align stack because
784 // we call stub code and there is no safepoint on this path.
785
786 // Load parameters
787 const Register crc = c_rarg0; // crc
802 __ ldrw(off, Address(esp, wordSize)); // offset
803 __ add(buf, buf, off); // + offset
804 __ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC
805 }
806 // Can now load 'len' since we're finished with 'off'
807 __ ldrw(len, Address(esp, 0x0)); // Length
808
809 __ andr(sp, r13, -16); // Restore the caller's SP
810
811 // We are frameless so we can just jump to the stub.
812 __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
813
814 // generate a vanilla native entry as the slow path
815 __ bind(slow_path);
816 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
817 return entry;
818 }
819 return NULL;
820 }
821
822 // Not supported
823 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
824 return NULL;
825 }
826
827 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
828 // Bang each page in the shadow zone. We can't assume it's been done for
829 // an interpreter frame with greater than a page of locals, so each page
830 // needs to be checked. Only true for non-native.
831 if (UseStackBanging) {
832 const int start_page = native_call ? StackShadowPages : 1;
833 const int page_size = os::vm_page_size();
834 for (int pages = start_page; pages <= StackShadowPages ; pages++) {
835 __ sub(rscratch2, sp, pages*page_size);
836 __ str(zr, Address(rscratch2));
837 }
838 }
839 }
840
841
842 // Interpreter stub for calling a native method. (asm interpreter)
843 // This sets up a somewhat different looking stack for calling the
844 // native method than the typical interpreter frame setup.
845 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
846 // determine code generation flags
847 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
848
849 // r1: Method*
850 // rscratch1: sender sp
851
852 address entry_point = __ pc();
853
854 const Address constMethod (rmethod, Method::const_offset());
855 const Address access_flags (rmethod, Method::access_flags_offset());
856 const Address size_of_parameters(r2, ConstMethod::
857 size_of_parameters_offset());
858
859 // get parameter size (always needed)
860 __ ldr(r2, constMethod);
861 __ load_unsigned_short(r2, size_of_parameters);
862
863 // native calls don't need the stack size check since they have no
864 // expression stack and the arguments are already on the stack and
865 // we only add a handful of words to the stack
1254 __ pop(ltos);
1255 __ pop(dtos);
1256
1257 __ blr(result_handler);
1258
1259 // remove activation
1260 __ ldr(esp, Address(rfp,
1261 frame::interpreter_frame_sender_sp_offset *
1262 wordSize)); // get sender sp
1263 // remove frame anchor
1264 __ leave();
1265
1266 // resture sender sp
1267 __ mov(sp, esp);
1268
1269 __ ret(lr);
1270
1271 if (inc_counter) {
1272 // Handle overflow of counter and compile method
1273 __ bind(invocation_counter_overflow);
1274 generate_counter_overflow(continue_after_compile);
1275 }
1276
1277 return entry_point;
1278 }
1279
1280 //
1281 // Generic interpreted method entry to (asm) interpreter
1282 //
1283 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1284 // determine code generation flags
1285 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1286
1287 // rscratch1: sender sp
1288 address entry_point = __ pc();
1289
1290 const Address constMethod(rmethod, Method::const_offset());
1291 const Address access_flags(rmethod, Method::access_flags_offset());
1292 const Address size_of_parameters(r3,
1293 ConstMethod::size_of_parameters_offset());
1294 const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
1295
1296 // get parameter size (always needed)
1297 // need to load the const method first
1298 __ ldr(r3, constMethod);
1299 __ load_unsigned_short(r2, size_of_parameters);
1300
1301 // r2: size of parameters
1302
1303 __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
1425 #endif
1426
1427 // jvmti support
1428 __ notify_method_entry();
1429
1430 __ dispatch_next(vtos);
1431
1432 // invocation counter overflow
1433 if (inc_counter) {
1434 if (ProfileInterpreter) {
1435 // We have decided to profile this method in the interpreter
1436 __ bind(profile_method);
1437 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1438 __ set_method_data_pointer_for_bcp();
1439 // don't think we need this
1440 __ get_method(r1);
1441 __ b(profile_method_continue);
1442 }
1443 // Handle overflow of counter and compile method
1444 __ bind(invocation_counter_overflow);
1445 generate_counter_overflow(continue_after_compile);
1446 }
1447
1448 return entry_point;
1449 }
1450
1451 //-----------------------------------------------------------------------------
1452 // Exceptions
1453
1454 void TemplateInterpreterGenerator::generate_throw_exception() {
1455 // Entry point in previous activation (i.e., if the caller was
1456 // interpreted)
1457 Interpreter::_rethrow_exception_entry = __ pc();
1458 // Restore sp to interpreter_frame_last_sp even though we are going
1459 // to empty the expression stack for the exception processing.
1460 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1461 // r0: exception
1462 // r3: return address/pc that threw exception
1463 __ restore_bcp(); // rbcp points to call/send
1464 __ restore_locals();
1465 __ restore_constant_pool_cache();
1711 address& aep,
1712 address& iep,
1713 address& lep,
1714 address& fep,
1715 address& dep,
1716 address& vep) {
1717 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1718 Label L;
1719 aep = __ pc(); __ push_ptr(); __ b(L);
1720 fep = __ pc(); __ push_f(); __ b(L);
1721 dep = __ pc(); __ push_d(); __ b(L);
1722 lep = __ pc(); __ push_l(); __ b(L);
1723 bep = cep = sep =
1724 iep = __ pc(); __ push_i();
1725 vep = __ pc();
1726 __ bind(L);
1727 generate_and_dispatch(t);
1728 }
1729
1730 //-----------------------------------------------------------------------------
1731
1732 // Non-product code
1733 #ifndef PRODUCT
1734 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1735 address entry = __ pc();
1736
1737 __ push(lr);
1738 __ push(state);
1739 __ push(RegSet::range(r0, r15), sp);
1740 __ mov(c_rarg2, r0); // Pass itos
1741 __ call_VM(noreg,
1742 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
1743 c_rarg1, c_rarg2, c_rarg3);
1744 __ pop(RegSet::range(r0, r15), sp);
1745 __ pop(state);
1746 __ pop(lr);
1747 __ ret(lr); // return from result handler
1748
1749 return entry;
1750 }
1897 strcpy(method, "B unknown blob : ");
1898 strcat(method, cb->name());
1899 }
1900 if (framesize != NULL) {
1901 *framesize = cb->frame_size();
1902 }
1903 }
1904 }
1905 }
1906 }
1907
1908
1909 JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
1910 {
1911 bccheck1(pc, fp, method, bcidx, framesize, decode);
1912 }
1913 }
1914
1915 #endif // BUILTIN_SIM
1916 #endif // !PRODUCT
|