< prev index next >

src/share/vm/c1/c1_Runtime1.cpp

Print this page




 474 // been deoptimized. If that is the case we return the deopt blob
 475 // unpack_with_exception entry instead. This makes life for the exception blob easier
 476 // because making that same check and diverting is painful from assembly language.
 477 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
 478   // Reset method handle flag.
 479   thread->set_is_method_handle_return(false);
 480 
 481   Handle exception(thread, ex);
 482   nm = CodeCache::find_nmethod(pc);
 483   assert(nm != NULL, "this is not an nmethod");
 484   // Adjust the pc as needed/
 485   if (nm->is_deopt_pc(pc)) {
 486     RegisterMap map(thread, false);
 487     frame exception_frame = thread->last_frame().sender(&map);
 488     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 489     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 490     pc = exception_frame.pc();
 491   }
 492 #ifdef ASSERT
 493   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
 494   assert(exception->is_oop(), "just checking");
 495   // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
 496   if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
 497     if (ExitVMOnVerifyError) vm_exit(-1);
 498     ShouldNotReachHere();
 499   }
 500 #endif
 501 
 502   // Check the stack guard pages and reenable them if necessary and there is
 503   // enough space on the stack to do so.  Use fast exceptions only if the guard
 504   // pages are enabled.
 505   bool guard_pages_enabled = thread->stack_guards_enabled();
 506   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 507 
 508   if (JvmtiExport::can_post_on_exceptions()) {
 509     // To ensure correct notification of exception catches and throws
 510     // we have to deoptimize here.  If we attempted to notify the
 511     // catches and throws during this exception lookup it's possible
 512     // we could deoptimize on the way out of the VM and end back in
 513     // the interpreter at the throw site.  This would result in double
 514     // notifications since the interpreter would also notify about


 659   char* message = SharedRuntime::generate_class_cast_message(
 660     thread, object->klass());
 661   SharedRuntime::throw_and_post_jvmti_exception(
 662     thread, vmSymbols::java_lang_ClassCastException(), message);
 663 JRT_END
 664 
 665 
 666 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread))
 667   NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)
 668   ResourceMark rm(thread);
 669   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
 670 JRT_END
 671 
 672 
 673 JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
 674   NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
 675   if (PrintBiasedLockingStatistics) {
 676     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
 677   }
 678   Handle h_obj(thread, obj);
 679   assert(h_obj()->is_oop(), "must be NULL or an object");
 680   if (UseBiasedLocking) {
 681     // Retry fast entry if bias is revoked to avoid unnecessary inflation
 682     ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK);
 683   } else {
 684     if (UseFastLocking) {
 685       // When using fast locking, the compiled code has already tried the fast case
 686       assert(obj == lock->obj(), "must match");
 687       ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD);
 688     } else {
 689       lock->set_obj(obj);
 690       ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD);
 691     }
 692   }
 693 JRT_END
 694 
 695 
 696 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
 697   NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
 698   assert(thread == JavaThread::current(), "threads must correspond");
 699   assert(thread->last_Java_sp(), "last_Java_sp must be set");
 700   // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
 701   EXCEPTION_MARK;
 702 
 703   oop obj = lock->obj();
 704   assert(obj->is_oop(), "must be NULL or an object");
 705   if (UseFastLocking) {
 706     // When using fast locking, the compiled code has already tried the fast case
 707     ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD);
 708   } else {
 709     ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD);
 710   }
 711 JRT_END
 712 
 713 // Cf. OptoRuntime::deoptimize_caller_frame
 714 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread, jint trap_request))
 715   // Called from within the owner thread, so no need for safepoint
 716   RegisterMap reg_map(thread, false);
 717   frame stub_frame = thread->last_frame();
 718   assert(stub_frame.is_runtime_frame(), "Sanity check");
 719   frame caller_frame = stub_frame.sender(&reg_map);
 720   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 721   assert(nm != NULL, "Sanity check");
 722   methodHandle method(thread, nm->method());
 723   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 724   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);




 474 // been deoptimized. If that is the case we return the deopt blob
 475 // unpack_with_exception entry instead. This makes life for the exception blob easier
 476 // because making that same check and diverting is painful from assembly language.
 477 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
 478   // Reset method handle flag.
 479   thread->set_is_method_handle_return(false);
 480 
 481   Handle exception(thread, ex);
 482   nm = CodeCache::find_nmethod(pc);
 483   assert(nm != NULL, "this is not an nmethod");
 484   // Adjust the pc as needed/
 485   if (nm->is_deopt_pc(pc)) {
 486     RegisterMap map(thread, false);
 487     frame exception_frame = thread->last_frame().sender(&map);
 488     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 489     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 490     pc = exception_frame.pc();
 491   }
 492 #ifdef ASSERT
 493   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");

 494   // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
 495   if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
 496     if (ExitVMOnVerifyError) vm_exit(-1);
 497     ShouldNotReachHere();
 498   }
 499 #endif
 500 
 501   // Check the stack guard pages and reenable them if necessary and there is
 502   // enough space on the stack to do so.  Use fast exceptions only if the guard
 503   // pages are enabled.
 504   bool guard_pages_enabled = thread->stack_guards_enabled();
 505   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 506 
 507   if (JvmtiExport::can_post_on_exceptions()) {
 508     // To ensure correct notification of exception catches and throws
 509     // we have to deoptimize here.  If we attempted to notify the
 510     // catches and throws during this exception lookup it's possible
 511     // we could deoptimize on the way out of the VM and end back in
 512     // the interpreter at the throw site.  This would result in double
 513     // notifications since the interpreter would also notify about


 658   char* message = SharedRuntime::generate_class_cast_message(
 659     thread, object->klass());
 660   SharedRuntime::throw_and_post_jvmti_exception(
 661     thread, vmSymbols::java_lang_ClassCastException(), message);
 662 JRT_END
 663 
 664 
 665 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread))
 666   NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)
 667   ResourceMark rm(thread);
 668   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
 669 JRT_END
 670 
 671 
 672 JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
 673   NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
 674   if (PrintBiasedLockingStatistics) {
 675     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
 676   }
 677   Handle h_obj(thread, obj);

 678   if (UseBiasedLocking) {
 679     // Retry fast entry if bias is revoked to avoid unnecessary inflation
 680     ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK);
 681   } else {
 682     if (UseFastLocking) {
 683       // When using fast locking, the compiled code has already tried the fast case
 684       assert(obj == lock->obj(), "must match");
 685       ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD);
 686     } else {
 687       lock->set_obj(obj);
 688       ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD);
 689     }
 690   }
 691 JRT_END
 692 
 693 
 694 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
 695   NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
 696   assert(thread == JavaThread::current(), "threads must correspond");
 697   assert(thread->last_Java_sp(), "last_Java_sp must be set");
 698   // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
 699   EXCEPTION_MARK;
 700 
 701   oop obj = lock->obj();
 702   assert(oopDesc::is_oop(obj), "must be NULL or an object");
 703   if (UseFastLocking) {
 704     // When using fast locking, the compiled code has already tried the fast case
 705     ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD);
 706   } else {
 707     ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD);
 708   }
 709 JRT_END
 710 
 711 // Cf. OptoRuntime::deoptimize_caller_frame
 712 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread, jint trap_request))
 713   // Called from within the owner thread, so no need for safepoint
 714   RegisterMap reg_map(thread, false);
 715   frame stub_frame = thread->last_frame();
 716   assert(stub_frame.is_runtime_frame(), "Sanity check");
 717   frame caller_frame = stub_frame.sender(&reg_map);
 718   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 719   assert(nm != NULL, "Sanity check");
 720   methodHandle method(thread, nm->method());
 721   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 722   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);


< prev index next >