src/share/vm/runtime/sharedRuntime.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6919934 Sdiff src/share/vm/runtime

src/share/vm/runtime/sharedRuntime.cpp

Print this page




 239 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
 240   return (jfloat)x;
 241 JRT_END
 242 
 243 
 244 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
 245   return (jfloat)x;
 246 JRT_END
 247 
 248 
 249 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
 250   return (jdouble)x;
 251 JRT_END
 252 
 253 // Exception handling accross interpreter/compiler boundaries
 254 //
 255 // exception_handler_for_return_address(...) returns the continuation address.
 256 // The continuation address is the entry point of the exception handler of the
 257 // previous frame depending on the return address.
 258 
 259 address SharedRuntime::raw_exception_handler_for_return_address(address return_address) {
 260   assert(frame::verify_return_pc(return_address), "must be a return pc");
 261 
 262   // the fastest case first
 263   CodeBlob* blob = CodeCache::find_blob(return_address);
 264   if (blob != NULL && blob->is_nmethod()) {
 265     nmethod* code = (nmethod*)blob;
 266     assert(code != NULL, "nmethod must be present");


 267     // native nmethods don't have exception handlers
 268     assert(!code->is_native_method(), "no exception handler");
 269     assert(code->header_begin() != code->exception_begin(), "no exception handler");
 270     if (code->is_deopt_pc(return_address)) {
 271       return SharedRuntime::deopt_blob()->unpack_with_exception();
 272     } else {
 273       return code->exception_begin();
 274     }
 275   }
 276 
 277   // Entry code
 278   if (StubRoutines::returns_to_call_stub(return_address)) {
 279     return StubRoutines::catch_exception_entry();
 280   }
 281   // Interpreted code
 282   if (Interpreter::contains(return_address)) {
 283     return Interpreter::rethrow_exception_entry();
 284   }
 285 
 286   // Compiled code
 287   if (CodeCache::contains(return_address)) {
 288     CodeBlob* blob = CodeCache::find_blob(return_address);
 289     if (blob->is_nmethod()) {
 290       nmethod* code = (nmethod*)blob;
 291       assert(code != NULL, "nmethod must be present");


 292       assert(code->header_begin() != code->exception_begin(), "no exception handler");
 293       return code->exception_begin();
 294     }
 295     if (blob->is_runtime_stub()) {
 296       ShouldNotReachHere();   // callers are responsible for skipping runtime stub frames
 297     }
 298   }
 299   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 300 #ifndef PRODUCT
 301   { ResourceMark rm;
 302     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
 303     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 304     tty->print_cr("b) other problem");
 305   }
 306 #endif // PRODUCT
 307   ShouldNotReachHere();
 308   return NULL;
 309 }
 310 
 311 
 312 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address))
 313   return raw_exception_handler_for_return_address(return_address);
 314 JRT_END
 315 

 316 address SharedRuntime::get_poll_stub(address pc) {
 317   address stub;
 318   // Look up the code blob
 319   CodeBlob *cb = CodeCache::find_blob(pc);
 320 
 321   // Should be an nmethod
 322   assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
 323 
 324   // Look up the relocation information
 325   assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
 326     "safepoint polling: type must be poll" );
 327 
 328   assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
 329     "Only polling locations are used for safepoint");
 330 
 331   bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
 332   if (at_poll_return) {
 333     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 334            "polling page return stub not created yet");
 335     stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin();


 448         ++scope_depth;
 449       }
 450     } while (!top_frame_only && handler_bci < 0 && sd != NULL);
 451   }
 452 
 453   // found handling method => lookup exception handler
 454   int catch_pco = ret_pc - nm->instructions_begin();
 455 
 456   ExceptionHandlerTable table(nm);
 457   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 458   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 459     // Allow abbreviated catch tables.  The idea is to allow a method
 460     // to materialize its exceptions without committing to the exact
 461     // routing of exceptions.  In particular this is needed for adding
 462     // a synthethic handler to unlock monitors when inlining
 463     // synchonized methods since the unlock path isn't represented in
 464     // the bytecodes.
 465     t = table.entry_for(catch_pco, -1, 0);
 466   }
 467 
 468 #ifdef COMPILER1
 469   if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) {
 470     // Exception is not handled by this frame so unwind.  Note that
 471     // this is not the same as how C2 does this.  C2 emits a table
 472     // entry that dispatches to the unwind code in the nmethod.
 473     return NULL;
 474   }
 475 #endif /* COMPILER1 */
 476 
 477 
 478   if (t == NULL) {
 479     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
 480     tty->print_cr("   Exception:");
 481     exception->print();
 482     tty->cr();
 483     tty->print_cr(" Compiled exception table :");
 484     table.print();
 485     nm->print_code();
 486     guarantee(false, "missing exception handler");
 487     return NULL;
 488   }
 489 
 490   return nm->instructions_begin() + t->pco();
 491 }
 492 
 493 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
 494   // These errors occur only at call sites
 495   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
 496 JRT_END
 497 


 875       // means then there could be a bug here.
 876       guarantee((retry_count++ < 100),
 877                 "Could not resolve to latest version of redefined method");
 878       // method is redefined in the middle of resolve so re-try.
 879       callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
 880     }
 881   }
 882   return callee_method;
 883 }
 884 
 885 // Resolves a call.  The compilers generate code for calls that go here
 886 // and are patched with the real destination of the call.
 887 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
 888                                            bool is_virtual,
 889                                            bool is_optimized, TRAPS) {
 890 
 891   ResourceMark rm(thread);
 892   RegisterMap cbl_map(thread, false);
 893   frame caller_frame = thread->last_frame().sender(&cbl_map);
 894 
 895   CodeBlob* cb = caller_frame.cb();
 896   guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod");

 897   // make sure caller is not getting deoptimized
 898   // and removed before we are done with it.
 899   // CLEANUP - with lazy deopt shouldn't need this lock
 900   nmethodLocker caller_lock((nmethod*)cb);
 901 
 902 
 903   // determine call info & receiver
 904   // note: a) receiver is NULL for static calls
 905   //       b) an exception is thrown if receiver is NULL for non-static calls
 906   CallInfo call_info;
 907   Bytecodes::Code invoke_code = Bytecodes::_illegal;
 908   Handle receiver = find_callee_info(thread, invoke_code,
 909                                      call_info, CHECK_(methodHandle()));
 910   methodHandle callee_method = call_info.selected_method();
 911 
 912   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
 913          ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
 914 
 915 #ifndef PRODUCT
 916   // tracing/debugging/statistics
 917   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
 918                 (is_virtual) ? (&_resolve_virtual_ctr) :
 919                                (&_resolve_static_ctr);
 920   Atomic::inc(addr);
 921 
 922   if (TraceCallFixup) {
 923     ResourceMark rm(thread);
 924     tty->print("resolving %s%s (%s) call to",
 925       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
 926       Bytecodes::name(invoke_code));
 927     callee_method->print_short_name(tty);
 928     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
 929   }
 930 #endif
 931 







 932   // Compute entry points. This might require generation of C2I converter
 933   // frames, so we cannot be holding any locks here. Furthermore, the
 934   // computation of the entry points is independent of patching the call.  We
 935   // always return the entry-point, but we only patch the stub if the call has
 936   // not been deoptimized.  Return values: For a virtual call this is an
 937   // (cached_oop, destination address) pair. For a static call/optimized
 938   // virtual this is just a destination address.
 939 
 940   StaticCallInfo static_call_info;
 941   CompiledICInfo virtual_call_info;
 942 
 943 
 944   // Make sure the callee nmethod does not get deoptimized and removed before
 945   // we are done patching the code.
 946   nmethod* nm = callee_method->code();
 947   nmethodLocker nl_callee(nm);
 948 #ifdef ASSERT
 949   address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below
 950 #endif
 951 
 952   if (is_virtual) {
 953     assert(receiver.not_null(), "sanity check");
 954     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
 955     KlassHandle h_klass(THREAD, receiver->klass());
 956     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
 957                      is_optimized, static_bound, virtual_call_info,
 958                      CHECK_(methodHandle()));
 959   } else {
 960     // static call
 961     CompiledStaticCall::compute_entry(callee_method, static_call_info);
 962   }
 963 
 964   // grab lock, check for deoptimization and potentially patch caller
 965   {
 966     MutexLocker ml_patch(CompiledIC_lock);
 967 
 968     // Now that we are ready to patch if the methodOop was redefined then
 969     // don't update call site and let the caller retry.




 239 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
 240   return (jfloat)x;
 241 JRT_END
 242 
 243 
 244 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
 245   return (jfloat)x;
 246 JRT_END
 247 
 248 
 249 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
 250   return (jdouble)x;
 251 JRT_END
 252 
 253 // Exception handling accross interpreter/compiler boundaries
 254 //
 255 // exception_handler_for_return_address(...) returns the continuation address.
 256 // The continuation address is the entry point of the exception handler of the
 257 // previous frame depending on the return address.
 258 
 259 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
 260   assert(frame::verify_return_pc(return_address), "must be a return pc");
 261 
 262   // the fastest case first
 263   CodeBlob* blob = CodeCache::find_blob(return_address);
 264   if (blob != NULL && blob->is_nmethod()) {
 265     nmethod* code = (nmethod*)blob;
 266     assert(code != NULL, "nmethod must be present");
 267     // Check if the return address is a MethodHandle call site.
 268     thread->set_is_method_handle_exception(code->is_method_handle_return(return_address));
 269     // native nmethods don't have exception handlers
 270     assert(!code->is_native_method(), "no exception handler");
 271     assert(code->header_begin() != code->exception_begin(), "no exception handler");
 272     if (code->is_deopt_pc(return_address)) {
 273       return SharedRuntime::deopt_blob()->unpack_with_exception();
 274     } else {
 275       return code->exception_begin();
 276     }
 277   }
 278 
 279   // Entry code
 280   if (StubRoutines::returns_to_call_stub(return_address)) {
 281     return StubRoutines::catch_exception_entry();
 282   }
 283   // Interpreted code
 284   if (Interpreter::contains(return_address)) {
 285     return Interpreter::rethrow_exception_entry();
 286   }
 287 
 288   // Compiled code
 289   if (CodeCache::contains(return_address)) {
 290     CodeBlob* blob = CodeCache::find_blob(return_address);
 291     if (blob->is_nmethod()) {
 292       nmethod* code = (nmethod*)blob;
 293       assert(code != NULL, "nmethod must be present");
 294       // Check if the return address is a MethodHandle call site.
 295       thread->set_is_method_handle_exception(code->is_method_handle_return(return_address));
 296       assert(code->header_begin() != code->exception_begin(), "no exception handler");
 297       return code->exception_begin();
 298     }
 299     if (blob->is_runtime_stub()) {
 300       ShouldNotReachHere();   // callers are responsible for skipping runtime stub frames
 301     }
 302   }
 303   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 304 #ifndef PRODUCT
 305   { ResourceMark rm;
 306     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
 307     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 308     tty->print_cr("b) other problem");
 309   }
 310 #endif // PRODUCT
 311   ShouldNotReachHere();
 312   return NULL;
 313 }
 314 
 315 
 316 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
 317   return raw_exception_handler_for_return_address(thread, return_address);
 318 JRT_END
 319 
 320 
 321 address SharedRuntime::get_poll_stub(address pc) {
 322   address stub;
 323   // Look up the code blob
 324   CodeBlob *cb = CodeCache::find_blob(pc);
 325 
 326   // Should be an nmethod
 327   assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
 328 
 329   // Look up the relocation information
 330   assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
 331     "safepoint polling: type must be poll" );
 332 
 333   assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
 334     "Only polling locations are used for safepoint");
 335 
 336   bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
 337   if (at_poll_return) {
 338     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 339            "polling page return stub not created yet");
 340     stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin();


 453         ++scope_depth;
 454       }
 455     } while (!top_frame_only && handler_bci < 0 && sd != NULL);
 456   }
 457 
 458   // found handling method => lookup exception handler
 459   int catch_pco = ret_pc - nm->instructions_begin();
 460 
 461   ExceptionHandlerTable table(nm);
 462   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 463   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 464     // Allow abbreviated catch tables.  The idea is to allow a method
 465     // to materialize its exceptions without committing to the exact
 466     // routing of exceptions.  In particular this is needed for adding
 467     // a synthethic handler to unlock monitors when inlining
 468     // synchonized methods since the unlock path isn't represented in
 469     // the bytecodes.
 470     t = table.entry_for(catch_pco, -1, 0);
 471   }
 472 










 473   if (t == NULL) {
 474     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
 475     tty->print_cr("   Exception:");
 476     exception->print();
 477     tty->cr();
 478     tty->print_cr(" Compiled exception table :");
 479     table.print();
 480     nm->print_code();
 481     guarantee(false, "missing exception handler");
 482     return NULL;
 483   }
 484 
 485   return nm->instructions_begin() + t->pco();
 486 }
 487 
 488 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
 489   // These errors occur only at call sites
 490   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
 491 JRT_END
 492 


 870       // means then there could be a bug here.
 871       guarantee((retry_count++ < 100),
 872                 "Could not resolve to latest version of redefined method");
 873       // method is redefined in the middle of resolve so re-try.
 874       callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
 875     }
 876   }
 877   return callee_method;
 878 }
 879 
 880 // Resolves a call.  The compilers generate code for calls that go here
 881 // and are patched with the real destination of the call.
 882 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
 883                                            bool is_virtual,
 884                                            bool is_optimized, TRAPS) {
 885 
 886   ResourceMark rm(thread);
 887   RegisterMap cbl_map(thread, false);
 888   frame caller_frame = thread->last_frame().sender(&cbl_map);
 889 
 890   CodeBlob* caller_cb = caller_frame.cb();
 891   guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
 892   nmethod* caller_nm = caller_cb->as_nmethod_or_null();
 893   // make sure caller is not getting deoptimized
 894   // and removed before we are done with it.
 895   // CLEANUP - with lazy deopt shouldn't need this lock
 896   nmethodLocker caller_lock(caller_nm);
 897 
 898 
 899   // determine call info & receiver
 900   // note: a) receiver is NULL for static calls
 901   //       b) an exception is thrown if receiver is NULL for non-static calls
 902   CallInfo call_info;
 903   Bytecodes::Code invoke_code = Bytecodes::_illegal;
 904   Handle receiver = find_callee_info(thread, invoke_code,
 905                                      call_info, CHECK_(methodHandle()));
 906   methodHandle callee_method = call_info.selected_method();
 907 
 908   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
 909          ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
 910 
 911 #ifndef PRODUCT
 912   // tracing/debugging/statistics
 913   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
 914                 (is_virtual) ? (&_resolve_virtual_ctr) :
 915                                (&_resolve_static_ctr);
 916   Atomic::inc(addr);
 917 
 918   if (TraceCallFixup) {
 919     ResourceMark rm(thread);
 920     tty->print("resolving %s%s (%s) call to",
 921       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
 922       Bytecodes::name(invoke_code));
 923     callee_method->print_short_name(tty);
 924     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
 925   }
 926 #endif
 927 
 928   // JSR 292
 929   // If the resolved method is a MethodHandle invoke target the call
 930   // site must be a MethodHandle call site.
 931   if (callee_method->is_method_handle_invoke()) {
 932     assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
 933   }
 934 
 935   // Compute entry points. This might require generation of C2I converter
 936   // frames, so we cannot be holding any locks here. Furthermore, the
 937   // computation of the entry points is independent of patching the call.  We
 938   // always return the entry-point, but we only patch the stub if the call has
 939   // not been deoptimized.  Return values: For a virtual call this is an
 940   // (cached_oop, destination address) pair. For a static call/optimized
 941   // virtual this is just a destination address.
 942 
 943   StaticCallInfo static_call_info;
 944   CompiledICInfo virtual_call_info;
 945 

 946   // Make sure the callee nmethod does not get deoptimized and removed before
 947   // we are done patching the code.
 948   nmethod* callee_nm = callee_method->code();
 949   nmethodLocker nl_callee(callee_nm);
 950 #ifdef ASSERT
 951   address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
 952 #endif
 953 
 954   if (is_virtual) {
 955     assert(receiver.not_null(), "sanity check");
 956     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
 957     KlassHandle h_klass(THREAD, receiver->klass());
 958     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
 959                      is_optimized, static_bound, virtual_call_info,
 960                      CHECK_(methodHandle()));
 961   } else {
 962     // static call
 963     CompiledStaticCall::compute_entry(callee_method, static_call_info);
 964   }
 965 
 966   // grab lock, check for deoptimization and potentially patch caller
 967   {
 968     MutexLocker ml_patch(CompiledIC_lock);
 969 
 970     // Now that we are ready to patch if the methodOop was redefined then
 971     // don't update call site and let the caller retry.


src/share/vm/runtime/sharedRuntime.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File