< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page




 392   _stack_traversal_mark       = 0;
 393   _unload_reported            = false; // jvmti state
 394   _is_far_code                = false; // nmethods are located in CodeCache
 395 
 396 #ifdef ASSERT
 397   _oops_are_stale             = false;
 398 #endif
 399 
 400   _oops_do_mark_link       = NULL;
 401   _jmethod_id              = NULL;
 402   _osr_link                = NULL;
 403   _unloading_next          = NULL;
 404   _scavenge_root_link      = NULL;
 405   _scavenge_root_state     = 0;
 406 #if INCLUDE_RTM_OPT
 407   _rtm_state               = NoRTM;
 408 #endif
 409 #if INCLUDE_JVMCI
 410   _jvmci_installed_code   = NULL;
 411   _speculation_log        = NULL;

 412 #endif
 413 }
 414 
 415 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
 416   int compile_id,
 417   CodeBuffer *code_buffer,
 418   int vep_offset,
 419   int frame_complete,
 420   int frame_size,
 421   ByteSize basic_lock_owner_sp_offset,
 422   ByteSize basic_lock_sp_offset,
 423   OopMapSet* oop_maps) {
 424   code_buffer->finalize_oop_references(method);
 425   // create nmethod
 426   nmethod* nm = NULL;
 427   {
 428     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 429     int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
 430     CodeOffsets offsets;
 431     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);


 444     nm->log_new_nmethod();
 445   }
 446 
 447   return nm;
 448 }
 449 
 450 nmethod* nmethod::new_nmethod(const methodHandle& method,
 451   int compile_id,
 452   int entry_bci,
 453   CodeOffsets* offsets,
 454   int orig_pc_offset,
 455   DebugInformationRecorder* debug_info,
 456   Dependencies* dependencies,
 457   CodeBuffer* code_buffer, int frame_size,
 458   OopMapSet* oop_maps,
 459   ExceptionHandlerTable* handler_table,
 460   ImplicitExceptionTable* nul_chk_table,
 461   AbstractCompiler* compiler,
 462   int comp_level
 463 #if INCLUDE_JVMCI
 464   , Handle installed_code,
 465   Handle speculationLog
 466 #endif
 467 )
 468 {
 469   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 470   code_buffer->finalize_oop_references(method);
 471   // create nmethod
 472   nmethod* nm = NULL;
 473   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 474     int nmethod_size =
 475       CodeBlob::allocation_size(code_buffer, sizeof(nmethod))
 476       + adjust_pcs_size(debug_info->pcs_size())
 477       + align_up((int)dependencies->size_in_bytes(), oopSize)
 478       + align_up(handler_table->size_in_bytes()    , oopSize)
 479       + align_up(nul_chk_table->size_in_bytes()    , oopSize)
 480       + align_up(debug_info->data_size()           , oopSize);
 481 
 482     nm = new (nmethod_size, comp_level)
 483     nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
 484             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
 485             oop_maps,


 625 }
 626 
 627 nmethod::nmethod(
 628   Method* method,
 629   CompilerType type,
 630   int nmethod_size,
 631   int compile_id,
 632   int entry_bci,
 633   CodeOffsets* offsets,
 634   int orig_pc_offset,
 635   DebugInformationRecorder* debug_info,
 636   Dependencies* dependencies,
 637   CodeBuffer *code_buffer,
 638   int frame_size,
 639   OopMapSet* oop_maps,
 640   ExceptionHandlerTable* handler_table,
 641   ImplicitExceptionTable* nul_chk_table,
 642   AbstractCompiler* compiler,
 643   int comp_level
 644 #if INCLUDE_JVMCI
 645   , Handle installed_code,
 646   Handle speculation_log
 647 #endif
 648   )
 649   : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
 650   _native_receiver_sp_offset(in_ByteSize(-1)),
 651   _native_basic_lock_sp_offset(in_ByteSize(-1))
 652 {
 653   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 654   {
 655     debug_only(NoSafepointVerifier nsv;)
 656     assert_locked_or_safepoint(CodeCache_lock);
 657 
 658     _deopt_handler_begin = (address) this;
 659     _deopt_mh_handler_begin = (address) this;
 660 
 661     init_defaults();
 662     _entry_bci               = entry_bci;
 663     _compile_id              = compile_id;
 664     _comp_level              = comp_level;
 665     _orig_pc_offset          = orig_pc_offset;
 666     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 667 
 668     // Section offsets
 669     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
 670     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
 671     set_ctable_begin(header_begin() + _consts_offset);
 672 
 673 #if INCLUDE_JVMCI
 674     _jvmci_installed_code = installed_code();
 675     _speculation_log = (instanceOop)speculation_log();






 676 
 677     if (compiler->is_jvmci()) {
 678       // JVMCI might not produce any stub sections
 679       if (offsets->value(CodeOffsets::Exceptions) != -1) {
 680         _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
 681       } else {
 682         _exception_offset = -1;
 683       }
 684       if (offsets->value(CodeOffsets::Deopt) != -1) {
 685         _deopt_handler_begin       = (address) this + code_offset()          + offsets->value(CodeOffsets::Deopt);
 686       } else {
 687         _deopt_handler_begin = NULL;
 688       }
 689       if (offsets->value(CodeOffsets::DeoptMH) != -1) {
 690         _deopt_mh_handler_begin  = (address) this + code_offset()          + offsets->value(CodeOffsets::DeoptMH);
 691       } else {
 692         _deopt_mh_handler_begin = NULL;
 693       }
 694     } else {
 695 #endif


1009 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1010 
1011   post_compiled_method_unload();
1012 
1013   // Since this nmethod is being unloaded, make sure that dependencies
1014   // recorded in instanceKlasses get flushed and pass non-NULL closure to
1015   // indicate that this work is being done during a GC.
1016   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1017   assert(is_alive != NULL, "Should be non-NULL");
1018   // A non-NULL is_alive closure indicates that this is being called during GC.
1019   flush_dependencies(is_alive);
1020 
1021   // Break cycle between nmethod & method
1022   LogTarget(Trace, class, unload) lt;
1023   if (lt.is_enabled()) {
1024     LogStream ls(lt);
1025     ls.print_cr("making nmethod " INTPTR_FORMAT
1026                   " unloadable, Method*(" INTPTR_FORMAT
1027                   "), cause(" INTPTR_FORMAT ")",
1028                   p2i(this), p2i(_method), p2i(cause));
1029     if (!Universe::heap()->is_gc_active())
1030       cause->klass()->print_on(&ls);
1031   }
1032   // Unlink the osr method, so we do not look this up again
1033   if (is_osr_method()) {
1034     // Invalidate the osr nmethod only once
1035     if (is_in_use()) {
1036       invalidate_osr_method();
1037     }
1038 #ifdef ASSERT
1039     if (method() != NULL) {
1040       // Make sure osr nmethod is invalidated, i.e. not on the list
1041       bool found = method()->method_holder()->remove_osr_nmethod(this);
1042       assert(!found, "osr nmethod should have been invalidated");
1043     }
1044 #endif
1045   }
1046 
1047   // If _method is already NULL the Method* is about to be unloaded,
1048   // so we don't have to break the cycle. Note that it is possible to
1049   // have the Method* live here, in case we unload the nmethod because
1050   // it is pointing to some oop (other than the Method*) being unloaded.


1060   // Make the class unloaded - i.e., change state and notify sweeper
1061   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1062   if (is_in_use()) {
1063     // Transitioning directly from live to unloaded -- so
1064     // we need to force a cache clean-up; remember this
1065     // for later on.
1066     CodeCache::set_needs_cache_clean(true);
1067   }
1068 
1069   // Unregister must be done before the state change
1070   Universe::heap()->unregister_nmethod(this);
1071 
1072   _state = unloaded;
1073 
1074   // Log the unloading.
1075   log_state_change();
1076 
1077 #if INCLUDE_JVMCI
1078   // The method can only be unloaded after the pointer to the installed code
1079   // Java wrapper is no longer alive. Here we need to clear out this weak
1080   // reference to the dead object. Nulling out the reference has to happen
1081   // after the method is unregistered since the original value may be still
1082   // tracked by the rset.
1083   maybe_invalidate_installed_code();
1084   // Clear these out after the nmethod has been unregistered and any
1085   // updates to the InstalledCode instance have been performed.
1086   _jvmci_installed_code = NULL;
1087   _speculation_log = NULL;
1088 #endif
1089 
1090   // The Method* is gone at this point
1091   assert(_method == NULL, "Tautology");
1092 
1093   set_osr_link(NULL);
1094   NMethodSweeper::report_state_change(this);
1095 }
1096 
1097 void nmethod::invalidate_osr_method() {
1098   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1099   // Remove from list of active nmethods
1100   if (method() != NULL) {
1101     method()->method_holder()->remove_osr_nmethod(this);
1102   }
1103 }
1104 
1105 void nmethod::log_state_change() const {
1106   if (LogCompilation) {
1107     if (xtty != NULL) {


1229 #ifdef ASSERT
1230   if (is_osr_method() && method() != NULL) {
1231     // Make sure osr nmethod is invalidated, i.e. not on the list
1232     bool found = method()->method_holder()->remove_osr_nmethod(this);
1233     assert(!found, "osr nmethod should have been invalidated");
1234   }
1235 #endif
1236 
1237   // When the nmethod becomes zombie it is no longer alive so the
1238   // dependencies must be flushed.  nmethods in the not_entrant
1239   // state will be flushed later when the transition to zombie
1240   // happens or they get unloaded.
1241   if (state == zombie) {
1242     {
1243       // Flushing dependencies must be done before any possible
1244       // safepoint can sneak in, otherwise the oops used by the
1245       // dependency logic could have become stale.
1246       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1247       if (nmethod_needs_unregister) {
1248         Universe::heap()->unregister_nmethod(this);
1249 #ifdef JVMCI
1250         _jvmci_installed_code = NULL;
1251         _speculation_log = NULL;
1252 #endif
1253       }
1254       flush_dependencies(NULL);
1255     }
1256 
1257     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1258     // event and it hasn't already been reported for this nmethod then
1259     // report it now. The event may have been reported earlier if the GC
1260     // marked it for unloading). JvmtiDeferredEventQueue support means
1261     // we no longer go to a safepoint here.
1262     post_compiled_method_unload();
1263 
1264 #ifdef ASSERT
1265     // It's no longer safe to access the oops section since zombie
1266     // nmethods aren't scanned for GC.
1267     _oops_are_stale = true;
1268 #endif
1269      // the Method may be reclaimed by class unloading now that the
1270      // nmethod is in zombie state
1271     set_method(NULL);
1272   } else {


1297                   "/Free CodeCache:" SIZE_FORMAT "Kb",
1298                   is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
1299                   CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
1300   }
1301 
1302   // We need to deallocate any ExceptionCache data.
1303   // Note that we do not need to grab the nmethod lock for this, it
1304   // better be thread safe if we're disposing of it!
1305   ExceptionCache* ec = exception_cache();
1306   set_exception_cache(NULL);
1307   while(ec != NULL) {
1308     ExceptionCache* next = ec->next();
1309     delete ec;
1310     ec = next;
1311   }
1312 
1313   if (on_scavenge_root_list()) {
1314     CodeCache::drop_scavenge_root_nmethod(this);
1315   }
1316 





1317   CodeBlob::flush();
1318   CodeCache::free(this);
1319 }
1320 
1321 //
1322 // Notify all classes this nmethod is dependent on that it is no
1323 // longer dependent. This should only be called in two situations.
1324 // First, when a nmethod transitions to a zombie all dependents need
1325 // to be clear.  Since zombification happens at a safepoint there's no
1326 // synchronization issues.  The second place is a little more tricky.
1327 // During phase 1 of mark sweep class unloading may happen and as a
1328 // result some nmethods may get unloaded.  In this case the flushing
1329 // of dependencies must happen during phase 1 since after GC any
1330 // dependencies in the unloaded nmethod won't be updated, so
1331 // traversing the dependency information in unsafe.  In that case this
1332 // function is called with a non-NULL argument and this function only
1333 // notifies instanceKlasses that are reachable
1334 
1335 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1336   assert_locked_or_safepoint(CodeCache_lock);


1483 }
1484 
1485 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
1486   // Compiled code
1487   {
1488   RelocIterator iter(this, low_boundary);
1489   while (iter.next()) {
1490     if (iter.type() == relocInfo::oop_type) {
1491       if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
1492         return true;
1493       }
1494     }
1495   }
1496   }
1497 
1498   return do_unloading_scopes(is_alive, unloading_occurred);
1499 }
1500 
1501 #if INCLUDE_JVMCI
1502 bool nmethod::do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) {
1503   bool is_unloaded = false;
1504   // Follow JVMCI method
1505   BarrierSet* bs = Universe::heap()->barrier_set();
1506   if (_jvmci_installed_code != NULL) {
1507     if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
1508       if (!is_alive->do_object_b(_jvmci_installed_code)) {
1509         clear_jvmci_installed_code();
1510       }
1511     } else {
1512       if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
1513         return true;


1514       }
1515     }
1516   }
1517 
1518   if (_speculation_log != NULL) {
1519     if (!is_alive->do_object_b(_speculation_log)) {
1520       bs->write_ref_nmethod_pre(&_speculation_log, this);
1521       _speculation_log = NULL;
1522       bs->write_ref_nmethod_post(&_speculation_log, this);
1523     }
1524   }
1525   return is_unloaded;
1526 }
1527 #endif
1528 
1529 // Iterate over metadata calling this function.   Used by RedefineClasses
1530 void nmethod::metadata_do(void f(Metadata*)) {
1531   address low_boundary = verified_entry_point();
1532   if (is_not_entrant()) {
1533     low_boundary += NativeJump::instruction_size;
1534     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1535     // (See comment above.)
1536   }
1537   {
1538     // Visit all immediate references that are embedded in the instruction stream.
1539     RelocIterator iter(this, low_boundary);
1540     while (iter.next()) {
1541       if (iter.type() == relocInfo::metadata_type ) {
1542         metadata_Relocation* r = iter.metadata_reloc();
1543         // In this metadata, we must only follow those metadatas directly embedded in
1544         // the code.  Other metadatas (oop_index>0) are seen as part of
1545         // the metadata section below.


1577   // Visit metadata not embedded in the other places.
1578   if (_method != NULL) f(_method);
1579 }
1580 
1581 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
1582   // make sure the oops ready to receive visitors
1583   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
1584   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
1585 
1586   // If the method is not entrant or zombie then a JMP is plastered over the
1587   // first few bytes.  If an oop in the old code was there, that oop
1588   // should not get GC'd.  Skip the first few bytes of oops on
1589   // not-entrant methods.
1590   address low_boundary = verified_entry_point();
1591   if (is_not_entrant()) {
1592     low_boundary += NativeJump::instruction_size;
1593     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1594     // (See comment above.)
1595   }
1596 
1597 #if INCLUDE_JVMCI
1598   if (_jvmci_installed_code != NULL) {
1599     f->do_oop((oop*) &_jvmci_installed_code);
1600   }
1601   if (_speculation_log != NULL) {
1602     f->do_oop((oop*) &_speculation_log);
1603   }
1604 #endif
1605 
1606   RelocIterator iter(this, low_boundary);
1607 
1608   while (iter.next()) {
1609     if (iter.type() == relocInfo::oop_type ) {
1610       oop_Relocation* r = iter.oop_reloc();
1611       // In this loop, we must only follow those oops directly embedded in
1612       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1613       assert(1 == (r->oop_is_immediate()) +
1614                    (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1615              "oop must be found in exactly one place");
1616       if (r->oop_is_immediate() && r->oop_value() != NULL) {
1617         f->do_oop(r->oop_addr());
1618       }
1619     }
1620   }
1621 
1622   // Scopes
1623   // This includes oop constants not inlined in the code stream.
1624   for (oop* p = oops_begin(); p < oops_end(); p++) {
1625     if (*p == Universe::non_oop_word())  continue;  // skip non-oops


2843 #endif
2844 #ifdef COMPILER2
2845   c2_java_nmethod_stats.print_nmethod_stats("C2");
2846 #endif
2847 #if INCLUDE_JVMCI
2848   jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
2849 #endif
2850   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
2851   DebugInformationRecorder::print_statistics();
2852 #ifndef PRODUCT
2853   pc_nmethod_stats.print_pc_stats();
2854 #endif
2855   Dependencies::print_statistics();
2856   if (xtty != NULL)  xtty->tail("statistics");
2857 }
2858 
2859 #endif // !PRODUCT
2860 
2861 #if INCLUDE_JVMCI
2862 void nmethod::clear_jvmci_installed_code() {
2863   // write_ref_method_pre/post can only be safely called at a
2864   // safepoint or while holding the CodeCache_lock
2865   assert(CodeCache_lock->is_locked() ||
2866          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2867   if (_jvmci_installed_code != NULL) {
2868     // This must be done carefully to maintain nmethod remembered sets properly
2869     BarrierSet* bs = Universe::heap()->barrier_set();
2870     bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
2871     _jvmci_installed_code = NULL;
2872     bs->write_ref_nmethod_post(&_jvmci_installed_code, this);







2873   }
2874 }
2875 
2876 void nmethod::maybe_invalidate_installed_code() {
2877   assert(Patching_lock->is_locked() ||
2878          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2879   oop installed_code = jvmci_installed_code();
2880   if (installed_code != NULL) {

2881     nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
2882     if (nm == NULL || nm != this) {
2883       // The link has been broken or the InstalledCode instance is
2884       // associated with another nmethod so do nothing.
2885       return;
2886     }
2887     if (!is_alive()) {
2888       // Break the link between nmethod and InstalledCode such that the nmethod
2889       // can subsequently be flushed safely.  The link must be maintained while
2890       // the method could have live activations since invalidateInstalledCode
2891       // might want to invalidate all existing activations.
2892       InstalledCode::set_address(installed_code, 0);
2893       InstalledCode::set_entryPoint(installed_code, 0);
2894     } else if (is_not_entrant()) {
2895       // Remove the entry point so any invocation will fail but keep
2896       // the address link around that so that existing activations can
2897       // be invalidated.
2898       InstalledCode::set_entryPoint(installed_code, 0);
2899     }
2900   }







2901 }
2902 
2903 void nmethod::invalidate_installed_code(Handle installedCode, TRAPS) {
2904   if (installedCode() == NULL) {
2905     THROW(vmSymbols::java_lang_NullPointerException());
2906   }
2907   jlong nativeMethod = InstalledCode::address(installedCode);
2908   nmethod* nm = (nmethod*)nativeMethod;
2909   if (nm == NULL) {
2910     // Nothing to do
2911     return;
2912   }
2913 
2914   nmethodLocker nml(nm);
2915 #ifdef ASSERT
2916   {
2917     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
2918     // This relationship can only be checked safely under a lock
2919     assert(nm == NULL || !nm->is_alive() || nm->jvmci_installed_code() == installedCode(), "sanity check");
2920   }
2921 #endif
2922 
2923   if (nm->is_alive()) {
2924     // The nmethod state machinery maintains the link between the
2925     // HotSpotInstalledCode and nmethod* so as long as the nmethod appears to be
2926     // alive assume there is work to do and deoptimize the nmethod.
2927     nm->mark_for_deoptimization();
2928     VM_Deoptimize op;
2929     VMThread::execute(&op);
2930   }
2931 



2932   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
2933   // Check that it's still associated with the same nmethod and break
2934   // the link if it is.
2935   if (InstalledCode::address(installedCode) == nativeMethod) {
2936     InstalledCode::set_address(installedCode, 0);
2937   }
2938 }
2939 








2940 char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) {
2941   if (!this->is_compiled_by_jvmci()) {
2942     return NULL;
2943   }
2944   oop installedCode = this->jvmci_installed_code();
2945   if (installedCode != NULL) {
2946     oop installedCodeName = NULL;
2947     if (installedCode->is_a(InstalledCode::klass())) {
2948       installedCodeName = InstalledCode::name(installedCode);
2949     }
2950     if (installedCodeName != NULL) {
2951       return java_lang_String::as_utf8_string(installedCodeName, buf, (int)buflen);
2952     } else {
2953       jio_snprintf(buf, buflen, "null");
2954       return buf;
2955     }
2956   }
2957   jio_snprintf(buf, buflen, "noInstalledCode");
2958   return buf;
2959 }
2960 #endif


 392   _stack_traversal_mark       = 0;
 393   _unload_reported            = false; // jvmti state
 394   _is_far_code                = false; // nmethods are located in CodeCache
 395 
 396 #ifdef ASSERT
 397   _oops_are_stale             = false;
 398 #endif
 399 
 400   _oops_do_mark_link       = NULL;
 401   _jmethod_id              = NULL;
 402   _osr_link                = NULL;
 403   _unloading_next          = NULL;
 404   _scavenge_root_link      = NULL;
 405   _scavenge_root_state     = 0;
 406 #if INCLUDE_RTM_OPT
 407   _rtm_state               = NoRTM;
 408 #endif
 409 #if INCLUDE_JVMCI
 410   _jvmci_installed_code   = NULL;
 411   _speculation_log        = NULL;
 412   _jvmci_installed_code_triggers_unloading = false;
 413 #endif
 414 }
 415 
 416 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
 417   int compile_id,
 418   CodeBuffer *code_buffer,
 419   int vep_offset,
 420   int frame_complete,
 421   int frame_size,
 422   ByteSize basic_lock_owner_sp_offset,
 423   ByteSize basic_lock_sp_offset,
 424   OopMapSet* oop_maps) {
 425   code_buffer->finalize_oop_references(method);
 426   // create nmethod
 427   nmethod* nm = NULL;
 428   {
 429     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 430     int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
 431     CodeOffsets offsets;
 432     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);


 445     nm->log_new_nmethod();
 446   }
 447 
 448   return nm;
 449 }
 450 
 451 nmethod* nmethod::new_nmethod(const methodHandle& method,
 452   int compile_id,
 453   int entry_bci,
 454   CodeOffsets* offsets,
 455   int orig_pc_offset,
 456   DebugInformationRecorder* debug_info,
 457   Dependencies* dependencies,
 458   CodeBuffer* code_buffer, int frame_size,
 459   OopMapSet* oop_maps,
 460   ExceptionHandlerTable* handler_table,
 461   ImplicitExceptionTable* nul_chk_table,
 462   AbstractCompiler* compiler,
 463   int comp_level
 464 #if INCLUDE_JVMCI
 465   , jweak installed_code,
 466   jweak speculationLog
 467 #endif
 468 )
 469 {
 470   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 471   code_buffer->finalize_oop_references(method);
 472   // create nmethod
 473   nmethod* nm = NULL;
 474   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 475     int nmethod_size =
 476       CodeBlob::allocation_size(code_buffer, sizeof(nmethod))
 477       + adjust_pcs_size(debug_info->pcs_size())
 478       + align_up((int)dependencies->size_in_bytes(), oopSize)
 479       + align_up(handler_table->size_in_bytes()    , oopSize)
 480       + align_up(nul_chk_table->size_in_bytes()    , oopSize)
 481       + align_up(debug_info->data_size()           , oopSize);
 482 
 483     nm = new (nmethod_size, comp_level)
 484     nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
 485             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
 486             oop_maps,


 626 }
 627 
 628 nmethod::nmethod(
 629   Method* method,
 630   CompilerType type,
 631   int nmethod_size,
 632   int compile_id,
 633   int entry_bci,
 634   CodeOffsets* offsets,
 635   int orig_pc_offset,
 636   DebugInformationRecorder* debug_info,
 637   Dependencies* dependencies,
 638   CodeBuffer *code_buffer,
 639   int frame_size,
 640   OopMapSet* oop_maps,
 641   ExceptionHandlerTable* handler_table,
 642   ImplicitExceptionTable* nul_chk_table,
 643   AbstractCompiler* compiler,
 644   int comp_level
 645 #if INCLUDE_JVMCI
 646   , jweak installed_code,
 647   jweak speculation_log
 648 #endif
 649   )
 650   : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
 651   _native_receiver_sp_offset(in_ByteSize(-1)),
 652   _native_basic_lock_sp_offset(in_ByteSize(-1))
 653 {
 654   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 655   {
 656     debug_only(NoSafepointVerifier nsv;)
 657     assert_locked_or_safepoint(CodeCache_lock);
 658 
 659     _deopt_handler_begin = (address) this;
 660     _deopt_mh_handler_begin = (address) this;
 661 
 662     init_defaults();
 663     _entry_bci               = entry_bci;
 664     _compile_id              = compile_id;
 665     _comp_level              = comp_level;
 666     _orig_pc_offset          = orig_pc_offset;
 667     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 668 
 669     // Section offsets
 670     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
 671     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
 672     set_ctable_begin(header_begin() + _consts_offset);
 673 
 674 #if INCLUDE_JVMCI
 675     _jvmci_installed_code = installed_code;
 676     _speculation_log = speculation_log;
 677     oop obj = JNIHandles::resolve(installed_code);
 678     if (obj == NULL || (obj->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(obj))) {
 679       _jvmci_installed_code_triggers_unloading = false;
 680     } else {
 681       _jvmci_installed_code_triggers_unloading = true;
 682     }
 683 
 684     if (compiler->is_jvmci()) {
 685       // JVMCI might not produce any stub sections
 686       if (offsets->value(CodeOffsets::Exceptions) != -1) {
 687         _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
 688       } else {
 689         _exception_offset = -1;
 690       }
 691       if (offsets->value(CodeOffsets::Deopt) != -1) {
 692         _deopt_handler_begin       = (address) this + code_offset()          + offsets->value(CodeOffsets::Deopt);
 693       } else {
 694         _deopt_handler_begin = NULL;
 695       }
 696       if (offsets->value(CodeOffsets::DeoptMH) != -1) {
 697         _deopt_mh_handler_begin  = (address) this + code_offset()          + offsets->value(CodeOffsets::DeoptMH);
 698       } else {
 699         _deopt_mh_handler_begin = NULL;
 700       }
 701     } else {
 702 #endif


1016 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1017 
1018   post_compiled_method_unload();
1019 
1020   // Since this nmethod is being unloaded, make sure that dependencies
1021   // recorded in instanceKlasses get flushed and pass non-NULL closure to
1022   // indicate that this work is being done during a GC.
1023   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1024   assert(is_alive != NULL, "Should be non-NULL");
1025   // A non-NULL is_alive closure indicates that this is being called during GC.
1026   flush_dependencies(is_alive);
1027 
1028   // Break cycle between nmethod & method
1029   LogTarget(Trace, class, unload) lt;
1030   if (lt.is_enabled()) {
1031     LogStream ls(lt);
1032     ls.print_cr("making nmethod " INTPTR_FORMAT
1033                   " unloadable, Method*(" INTPTR_FORMAT
1034                   "), cause(" INTPTR_FORMAT ")",
1035                   p2i(this), p2i(_method), p2i(cause));


1036   }
1037   // Unlink the osr method, so we do not look this up again
1038   if (is_osr_method()) {
1039     // Invalidate the osr nmethod only once
1040     if (is_in_use()) {
1041       invalidate_osr_method();
1042     }
1043 #ifdef ASSERT
1044     if (method() != NULL) {
1045       // Make sure osr nmethod is invalidated, i.e. not on the list
1046       bool found = method()->method_holder()->remove_osr_nmethod(this);
1047       assert(!found, "osr nmethod should have been invalidated");
1048     }
1049 #endif
1050   }
1051 
1052   // If _method is already NULL the Method* is about to be unloaded,
1053   // so we don't have to break the cycle. Note that it is possible to
1054   // have the Method* live here, in case we unload the nmethod because
1055   // it is pointing to some oop (other than the Method*) being unloaded.


1065   // Make the class unloaded - i.e., change state and notify sweeper
1066   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1067   if (is_in_use()) {
1068     // Transitioning directly from live to unloaded -- so
1069     // we need to force a cache clean-up; remember this
1070     // for later on.
1071     CodeCache::set_needs_cache_clean(true);
1072   }
1073 
1074   // Unregister must be done before the state change
1075   Universe::heap()->unregister_nmethod(this);
1076 
1077   _state = unloaded;
1078 
1079   // Log the unloading.
1080   log_state_change();
1081 
1082 #if INCLUDE_JVMCI
1083   // The method can only be unloaded after the pointer to the installed code
1084   // Java wrapper is no longer alive. Here we need to clear out this weak
1085   // reference to the dead object.


1086   maybe_invalidate_installed_code();




1087 #endif
1088 
1089   // The Method* is gone at this point
1090   assert(_method == NULL, "Tautology");
1091 
1092   set_osr_link(NULL);
1093   NMethodSweeper::report_state_change(this);
1094 }
1095 
1096 void nmethod::invalidate_osr_method() {
1097   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1098   // Remove from list of active nmethods
1099   if (method() != NULL) {
1100     method()->method_holder()->remove_osr_nmethod(this);
1101   }
1102 }
1103 
1104 void nmethod::log_state_change() const {
1105   if (LogCompilation) {
1106     if (xtty != NULL) {


1228 #ifdef ASSERT
1229   if (is_osr_method() && method() != NULL) {
1230     // Make sure osr nmethod is invalidated, i.e. not on the list
1231     bool found = method()->method_holder()->remove_osr_nmethod(this);
1232     assert(!found, "osr nmethod should have been invalidated");
1233   }
1234 #endif
1235 
1236   // When the nmethod becomes zombie it is no longer alive so the
1237   // dependencies must be flushed.  nmethods in the not_entrant
1238   // state will be flushed later when the transition to zombie
1239   // happens or they get unloaded.
1240   if (state == zombie) {
1241     {
1242       // Flushing dependencies must be done before any possible
1243       // safepoint can sneak in, otherwise the oops used by the
1244       // dependency logic could have become stale.
1245       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1246       if (nmethod_needs_unregister) {
1247         Universe::heap()->unregister_nmethod(this);




1248       }
1249       flush_dependencies(NULL);
1250     }
1251 
1252     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1253     // event and it hasn't already been reported for this nmethod then
1254     // report it now. The event may have been reported earlier if the GC
1255     // marked it for unloading). JvmtiDeferredEventQueue support means
1256     // we no longer go to a safepoint here.
1257     post_compiled_method_unload();
1258 
1259 #ifdef ASSERT
1260     // It's no longer safe to access the oops section since zombie
1261     // nmethods aren't scanned for GC.
1262     _oops_are_stale = true;
1263 #endif
1264      // the Method may be reclaimed by class unloading now that the
1265      // nmethod is in zombie state
1266     set_method(NULL);
1267   } else {


1292                   "/Free CodeCache:" SIZE_FORMAT "Kb",
1293                   is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
1294                   CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
1295   }
1296 
1297   // We need to deallocate any ExceptionCache data.
1298   // Note that we do not need to grab the nmethod lock for this, it
1299   // better be thread safe if we're disposing of it!
1300   ExceptionCache* ec = exception_cache();
1301   set_exception_cache(NULL);
1302   while(ec != NULL) {
1303     ExceptionCache* next = ec->next();
1304     delete ec;
1305     ec = next;
1306   }
1307 
1308   if (on_scavenge_root_list()) {
1309     CodeCache::drop_scavenge_root_nmethod(this);
1310   }
1311 
1312 #if INCLUDE_JVMCI
1313   assert(_jvmci_installed_code == NULL, "should have been nulled out when transitioned to zombie");
1314   assert(_speculation_log == NULL, "should have been nulled out when transitioned to zombie");
1315 #endif
1316 
1317   CodeBlob::flush();
1318   CodeCache::free(this);
1319 }
1320 
1321 //
1322 // Notify all classes this nmethod is dependent on that it is no
1323 // longer dependent. This should only be called in two situations.
1324 // First, when a nmethod transitions to a zombie all dependents need
1325 // to be clear.  Since zombification happens at a safepoint there's no
1326 // synchronization issues.  The second place is a little more tricky.
1327 // During phase 1 of mark sweep class unloading may happen and as a
1328 // result some nmethods may get unloaded.  In this case the flushing
1329 // of dependencies must happen during phase 1 since after GC any
1330 // dependencies in the unloaded nmethod won't be updated, so
1331 // traversing the dependency information in unsafe.  In that case this
1332 // function is called with a non-NULL argument and this function only
1333 // notifies instanceKlasses that are reachable
1334 
1335 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1336   assert_locked_or_safepoint(CodeCache_lock);


1483 }
1484 
1485 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
1486   // Compiled code
1487   {
1488   RelocIterator iter(this, low_boundary);
1489   while (iter.next()) {
1490     if (iter.type() == relocInfo::oop_type) {
1491       if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
1492         return true;
1493       }
1494     }
1495   }
1496   }
1497 
1498   return do_unloading_scopes(is_alive, unloading_occurred);
1499 }
1500 
1501 #if INCLUDE_JVMCI
1502 bool nmethod::do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) {



1503   if (_jvmci_installed_code != NULL) {
1504     if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
1505       if (_jvmci_installed_code_triggers_unloading) {
1506         // jweak reference processing has already cleared the referent
1507         make_unloaded(is_alive, NULL);


1508         return true;
1509       } else {
1510         clear_jvmci_installed_code();
1511       }
1512     }
1513   }
1514   return false;








1515 }
1516 #endif
1517 
1518 // Iterate over metadata calling this function.   Used by RedefineClasses
1519 void nmethod::metadata_do(void f(Metadata*)) {
1520   address low_boundary = verified_entry_point();
1521   if (is_not_entrant()) {
1522     low_boundary += NativeJump::instruction_size;
1523     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1524     // (See comment above.)
1525   }
1526   {
1527     // Visit all immediate references that are embedded in the instruction stream.
1528     RelocIterator iter(this, low_boundary);
1529     while (iter.next()) {
1530       if (iter.type() == relocInfo::metadata_type ) {
1531         metadata_Relocation* r = iter.metadata_reloc();
1532         // In this metadata, we must only follow those metadatas directly embedded in
1533         // the code.  Other metadatas (oop_index>0) are seen as part of
1534         // the metadata section below.


1566   // Visit metadata not embedded in the other places.
1567   if (_method != NULL) f(_method);
1568 }
1569 
1570 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
1571   // make sure the oops ready to receive visitors
1572   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
1573   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
1574 
1575   // If the method is not entrant or zombie then a JMP is plastered over the
1576   // first few bytes.  If an oop in the old code was there, that oop
1577   // should not get GC'd.  Skip the first few bytes of oops on
1578   // not-entrant methods.
1579   address low_boundary = verified_entry_point();
1580   if (is_not_entrant()) {
1581     low_boundary += NativeJump::instruction_size;
1582     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1583     // (See comment above.)
1584   }
1585 









1586   RelocIterator iter(this, low_boundary);
1587 
1588   while (iter.next()) {
1589     if (iter.type() == relocInfo::oop_type ) {
1590       oop_Relocation* r = iter.oop_reloc();
1591       // In this loop, we must only follow those oops directly embedded in
1592       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1593       assert(1 == (r->oop_is_immediate()) +
1594                    (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1595              "oop must be found in exactly one place");
1596       if (r->oop_is_immediate() && r->oop_value() != NULL) {
1597         f->do_oop(r->oop_addr());
1598       }
1599     }
1600   }
1601 
1602   // Scopes
1603   // This includes oop constants not inlined in the code stream.
1604   for (oop* p = oops_begin(); p < oops_end(); p++) {
1605     if (*p == Universe::non_oop_word())  continue;  // skip non-oops


2823 #endif
2824 #ifdef COMPILER2
2825   c2_java_nmethod_stats.print_nmethod_stats("C2");
2826 #endif
2827 #if INCLUDE_JVMCI
2828   jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
2829 #endif
2830   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
2831   DebugInformationRecorder::print_statistics();
2832 #ifndef PRODUCT
2833   pc_nmethod_stats.print_pc_stats();
2834 #endif
2835   Dependencies::print_statistics();
2836   if (xtty != NULL)  xtty->tail("statistics");
2837 }
2838 
2839 #endif // !PRODUCT
2840 
2841 #if INCLUDE_JVMCI
2842 void nmethod::clear_jvmci_installed_code() {
2843   assert_locked_or_safepoint(Patching_lock);



2844   if (_jvmci_installed_code != NULL) {
2845     JNIHandles::destroy_weak_global(_jvmci_installed_code);


2846     _jvmci_installed_code = NULL;
2847   }
2848 }
2849 
2850 void nmethod::clear_speculation_log() {
2851   assert_locked_or_safepoint(Patching_lock);
2852   if (_speculation_log != NULL) {
2853     JNIHandles::destroy_weak_global(_speculation_log);
2854     _speculation_log = NULL;
2855   }
2856 }
2857 
2858 void nmethod::maybe_invalidate_installed_code() {
2859   assert(Patching_lock->is_locked() ||
2860          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
2861   oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
2862   if (installed_code != NULL) {
2863     // Update the values in the InstalledCode instance if it still refers to this nmethod
2864     nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
2865     if (nm == this) {




2866       if (!is_alive()) {
2867         // Break the link between nmethod and InstalledCode such that the nmethod
2868         // can subsequently be flushed safely.  The link must be maintained while
2869         // the method could have live activations since invalidateInstalledCode
2870         // might want to invalidate all existing activations.
2871         InstalledCode::set_address(installed_code, 0);
2872         InstalledCode::set_entryPoint(installed_code, 0);
2873       } else if (is_not_entrant()) {
2874         // Remove the entry point so any invocation will fail but keep
2875         // the address link around that so that existing activations can
2876         // be invalidated.
2877         InstalledCode::set_entryPoint(installed_code, 0);
2878       }
2879     }
2880   }
2881   if (!is_alive()) {
2882     // Clear these out after the nmethod has been unregistered and any
2883     // updates to the InstalledCode instance have been performed.
2884     clear_jvmci_installed_code();
2885     clear_speculation_log();
2886   }
2887 }
2888 
2889 void nmethod::invalidate_installed_code(Handle installedCode, TRAPS) {
2890   if (installedCode() == NULL) {
2891     THROW(vmSymbols::java_lang_NullPointerException());
2892   }
2893   jlong nativeMethod = InstalledCode::address(installedCode);
2894   nmethod* nm = (nmethod*)nativeMethod;
2895   if (nm == NULL) {
2896     // Nothing to do
2897     return;
2898   }
2899 
2900   nmethodLocker nml(nm);
2901 #ifdef ASSERT
2902   {
2903     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
2904     // This relationship can only be checked safely under a lock
2905     assert(!nm->is_alive() || nm->jvmci_installed_code() == installedCode(), "sanity check");
2906   }
2907 #endif
2908 
2909   if (nm->is_alive()) {
2910     // Invalidating the InstalledCode means we want the nmethod
2911     // to be deoptimized.

2912     nm->mark_for_deoptimization();
2913     VM_Deoptimize op;
2914     VMThread::execute(&op);
2915   }
2916 
2917   // Multiple threads could reach this point so we now need to
2918   // lock and re-check the link to the nmethod so that only one
2919   // thread clears it.
2920   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);


2921   if (InstalledCode::address(installedCode) == nativeMethod) {
2922       InstalledCode::set_address(installedCode, 0);
2923   }
2924 }
2925 
2926 oop nmethod::jvmci_installed_code() {
2927   return JNIHandles::resolve(_jvmci_installed_code);
2928 }
2929 
2930 oop nmethod::speculation_log() {
2931   return JNIHandles::resolve(_speculation_log);
2932 }
2933 
2934 char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) {
2935   if (!this->is_compiled_by_jvmci()) {
2936     return NULL;
2937   }
2938   oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
2939   if (installed_code != NULL) {
2940     oop installed_code_name = NULL;
2941     if (installed_code->is_a(InstalledCode::klass())) {
2942       installed_code_name = InstalledCode::name(installed_code);
2943     }
2944     if (installed_code_name != NULL) {
2945       return java_lang_String::as_utf8_string(installed_code_name, buf, (int)buflen);



2946     }
2947   }
2948   return NULL;

2949 }
2950 #endif
< prev index next >