< prev index next >

src/share/vm/c1/c1_Runtime1.cpp

Print this page

        

*** 799,808 **** --- 799,813 ---- // JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id )) NOT_PRODUCT(_patch_code_slowcase_cnt++;) + #ifdef AARCH64 + // AArch64 does not patch C1-generated code. + ShouldNotReachHere(); + #endif + ResourceMark rm(thread); RegisterMap reg_map(thread, false); frame runtime_frame = thread->last_frame(); frame caller_frame = runtime_frame.sender(&reg_map);
*** 945,955 **** // Return to the now deoptimized frame. } // Now copy code back - { MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag); // // Deoptimization may have happened while we waited for the lock. // In that case we don't bother to do any patching we just return --- 950,959 ----
*** 1188,1197 **** --- 1192,1202 ---- // if the calling nmethod was deoptimized. We do this by calling a // helper method which does the normal VM transition and when it // completes we can check for deoptimization. This simplifies the // assembly code in the cpu directories. // + #ifndef TARGET_ARCH_aarch64 int Runtime1::move_klass_patching(JavaThread* thread) { // // NOTE: we are still in Java // Thread* THREAD = thread;
*** 1272,1282 **** // Return true if calling code is deoptimized return caller_is_deopted(); JRT_END ! JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id)) // for now we just print out the block id tty->print("%d ", block_id); JRT_END --- 1277,1287 ---- // Return true if calling code is deoptimized return caller_is_deopted(); JRT_END ! #endif JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id)) // for now we just print out the block id tty->print("%d ", block_id); JRT_END
< prev index next >