src/share/vm/c1/c1_Runtime1.cpp

Print this page
rev 1178 : merge with cd37471eaecc from http://hg.openjdk.java.net/jdk7/hotspot-comp/hotspot


 337   NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
 338 
 339   assert(oop(klass)->is_klass(), "not a class");
 340   assert(rank >= 1, "rank must be nonzero");
 341   oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 342   thread->set_vm_result(obj);
 343 JRT_END
 344 
 345 
 346 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))
 347   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
 348 JRT_END
 349 
 350 
 351 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread))
 352   THROW(vmSymbolHandles::java_lang_ArrayStoreException());
 353 JRT_END
 354 
 355 
 356 JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
 357   if (JvmtiExport::can_post_exceptions()) {
 358     vframeStream vfst(thread, true);
 359     address bcp = vfst.method()->bcp_from(vfst.bci());
 360     JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
 361   }
 362 JRT_END
 363 
 364 #ifdef TIERED
 365 JRT_ENTRY(void, Runtime1::counter_overflow(JavaThread* thread, int bci))
 366   RegisterMap map(thread, false);
 367   frame fr =  thread->last_frame().sender(&map);
 368   nmethod* nm = (nmethod*) fr.cb();
 369   assert(nm!= NULL && nm->is_nmethod(), "what?");
 370   methodHandle method(thread, nm->method());
 371   if (bci == 0) {
 372     // invocation counter overflow
 373     if (!Tier1CountOnly) {
 374       CompilationPolicy::policy()->method_invocation_event(method, CHECK);
 375     } else {
 376       method()->invocation_counter()->reset();
 377     }


 420     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 421     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 422     pc = exception_frame.pc();
 423   }
 424 #ifdef ASSERT
 425   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
 426   assert(exception->is_oop(), "just checking");
 427   // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
 428   if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
 429     if (ExitVMOnVerifyError) vm_exit(-1);
 430     ShouldNotReachHere();
 431   }
 432 #endif
 433 
 434   // Check the stack guard pages and reenable them if necessary and there is
 435   // enough space on the stack to do so.  Use fast exceptions only if the guard
 436   // pages are enabled.
 437   bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
 438   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 439 
 440   if (JvmtiExport::can_post_exceptions()) {
 441     // To ensure correct notification of exception catches and throws
 442     // we have to deoptimize here.  If we attempted to notify the
 443     // catches and throws during this exception lookup it's possible
 444     // we could deoptimize on the way out of the VM and end back in
 445     // the interpreter at the throw site.  This would result in double
 446     // notifications since the interpreter would also notify about
 447     // these same catches and throws as it unwound the frame.
 448 
 449     RegisterMap reg_map(thread);
 450     frame stub_frame = thread->last_frame();
 451     frame caller_frame = stub_frame.sender(&reg_map);
 452 
 453     // We don't really want to deoptimize the nmethod itself since we
 454     // can actually continue in the exception handler ourselves but I
 455     // don't see an easy way to have the desired effect.
 456     VM_DeoptimizeFrame deopt(thread, caller_frame.id());
 457     VMThread::execute(&deopt);
 458 
 459     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 460   }




 337   NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
 338 
 339   assert(oop(klass)->is_klass(), "not a class");
 340   assert(rank >= 1, "rank must be nonzero");
 341   oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 342   thread->set_vm_result(obj);
 343 JRT_END
 344 
 345 
 346 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))
 347   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
 348 JRT_END
 349 
 350 
 351 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread))
 352   THROW(vmSymbolHandles::java_lang_ArrayStoreException());
 353 JRT_END
 354 
 355 
 356 JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
 357   if (JvmtiExport::can_post_on_exceptions()) {
 358     vframeStream vfst(thread, true);
 359     address bcp = vfst.method()->bcp_from(vfst.bci());
 360     JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
 361   }
 362 JRT_END
 363 
 364 #ifdef TIERED
 365 JRT_ENTRY(void, Runtime1::counter_overflow(JavaThread* thread, int bci))
 366   RegisterMap map(thread, false);
 367   frame fr =  thread->last_frame().sender(&map);
 368   nmethod* nm = (nmethod*) fr.cb();
 369   assert(nm!= NULL && nm->is_nmethod(), "what?");
 370   methodHandle method(thread, nm->method());
 371   if (bci == 0) {
 372     // invocation counter overflow
 373     if (!Tier1CountOnly) {
 374       CompilationPolicy::policy()->method_invocation_event(method, CHECK);
 375     } else {
 376       method()->invocation_counter()->reset();
 377     }


 420     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 421     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 422     pc = exception_frame.pc();
 423   }
 424 #ifdef ASSERT
 425   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
 426   assert(exception->is_oop(), "just checking");
 427   // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
 428   if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
 429     if (ExitVMOnVerifyError) vm_exit(-1);
 430     ShouldNotReachHere();
 431   }
 432 #endif
 433 
 434   // Check the stack guard pages and reenable them if necessary and there is
 435   // enough space on the stack to do so.  Use fast exceptions only if the guard
 436   // pages are enabled.
 437   bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
 438   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 439 
 440   if (thread->should_post_on_exceptions_flag()) {
 441     // To ensure correct notification of exception catches and throws
 442     // we have to deoptimize here.  If we attempted to notify the
 443     // catches and throws during this exception lookup it's possible
 444     // we could deoptimize on the way out of the VM and end back in
 445     // the interpreter at the throw site.  This would result in double
 446     // notifications since the interpreter would also notify about
 447     // these same catches and throws as it unwound the frame.
 448 
 449     RegisterMap reg_map(thread);
 450     frame stub_frame = thread->last_frame();
 451     frame caller_frame = stub_frame.sender(&reg_map);
 452 
 453     // We don't really want to deoptimize the nmethod itself since we
 454     // can actually continue in the exception handler ourselves but I
 455     // don't see an easy way to have the desired effect.
 456     VM_DeoptimizeFrame deopt(thread, caller_frame.id());
 457     VMThread::execute(&deopt);
 458 
 459     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 460   }