src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Thu Dec 17 17:51:53 2015
--- new/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Thu Dec 17 17:51:52 2015

*** 25,37 **** --- 25,37 ---- #include "precompiled.hpp" #include "asm/macroAssembler.hpp" #include "interpreter/bytecodeHistogram.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterGenerator.hpp" #include "interpreter/interpreterRuntime.hpp" #include "interpreter/interp_masm.hpp" + #include "interpreter/templateInterpreterGenerator.hpp" #include "interpreter/templateTable.hpp" #include "interpreter/bytecodeTracer.hpp" #include "oops/arrayOop.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp"
*** 57,68 **** --- 57,66 ---- #include "../../../../../../simulator/simulator.hpp" #endif #define __ _masm-> #ifndef CC_INTERP //----------------------------------------------------------------------------- extern "C" void entry(CodeBuffer*); //-----------------------------------------------------------------------------
*** 302,312 **** --- 300,310 ---- // Note: checking for negative value instead of overflow // so we have a 'sticky' overflow test // // rmethod: method // ! void TemplateInterpreterGenerator::generate_counter_incr( Label* overflow, Label* profile_method, Label* profile_method_continue) { Label done; // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
*** 380,390 **** --- 378,388 ---- } __ bind(done); } } ! void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { ! void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { // Asm interpreter on entry // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] // Everything as it was on entry
*** 399,409 **** --- 397,407 ---- __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1); - __ b(*do_continue); } // See if we've got enough room on the stack for locals plus overhead. // The expression stack grows down incrementally, so the normal guard // page mechanism will work for that.
*** 416,426 **** --- 414,424 ---- // r3: number of additional locals this frame needs (what we must check) // rmethod: Method* // // Kills: // r0 ! void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { // monitor entry size: see picture of stack set // (generate_method_entry) and frame_amd64.hpp const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
*** 632,642 **** --- 630,640 ---- //------------------------------------------------------------------------------------------------------------------------ // // // Method entry for java.lang.ref.Reference.get. ! address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { #if INCLUDE_ALL_GCS // Code: _aload_0, _getfield, _areturn // parameter size = 1 // // The code that gets generated by this routine is split into 2 parts:
*** 710,720 **** --- 708,718 ---- /** * Method entry for static native methods: * int java.util.zip.CRC32.update(int crc, int b) */ ! address TemplateInterpreterGenerator::generate_CRC32_update_entry() { if (UseCRC32Intrinsics) { address entry = __ pc(); // rmethod: Method* // r13: senderSP must preserved for slow path
*** 764,774 **** --- 762,772 ---- /** * Method entry for static native methods: * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) */ ! address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { if (UseCRC32Intrinsics) { address entry = __ pc(); // rmethod,: Method* // r13: senderSP must preserved for slow path
*** 819,829 **** --- 817,832 ---- return entry; } return NULL; } void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) { + // Not supported + address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { + return NULL; + } + + void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // Bang each page in the shadow zone. We can't assume it's been done for // an interpreter frame with greater than a page of locals, so each page // needs to be checked. Only true for non-native. if (UseStackBanging) { const int start_page = native_call ? StackShadowPages : 1;
*** 837,847 **** --- 840,850 ---- // Interpreter stub for calling a native method. (asm interpreter) // This sets up a somewhat different looking stack for calling the // native method than the typical interpreter frame setup. ! address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // determine code generation flags bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; // r1: Method* // rscratch1: sender sp
*** 1266,1285 **** --- 1269,1288 ---- __ ret(lr); if (inc_counter) { // Handle overflow of counter and compile method __ bind(invocation_counter_overflow); - generate_counter_overflow(&continue_after_compile); } return entry_point; } // // Generic interpreted method entry to (asm) interpreter // ! address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { // determine code generation flags bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; // rscratch1: sender sp address entry_point = __ pc();
*** 1437,1447 **** --- 1440,1450 ---- __ get_method(r1); __ b(profile_method_continue); } // Handle overflow of counter and compile method __ bind(invocation_counter_overflow); - generate_counter_overflow(&continue_after_compile); } return entry_point; }
*** 1723,1743 **** --- 1726,1735 ---- __ bind(L); generate_and_dispatch(t); } //----------------------------------------------------------------------------- // Generation of individual instructions // helpers for generate_and_dispatch InterpreterGenerator::InterpreterGenerator(StubQueue* code) : TemplateInterpreterGenerator(code) { generate_all(); // down here so it can be "virtual" } //----------------------------------------------------------------------------- // Non-product code #ifndef PRODUCT address TemplateInterpreterGenerator::generate_trace_code(TosState state) { address entry = __ pc();
*** 1920,1925 **** --- 1912,1916 ---- } } #endif // BUILTIN_SIM #endif // !PRODUCT #endif // ! CC_INTERP

src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File