src/share/vm/code/nmethod.cpp

Print this page




 445     consts_size()        +
 446     insts_size()         +
 447     stub_size()          +
 448     scopes_data_size()   +
 449     scopes_pcs_size()    +
 450     handler_table_size() +
 451     nul_chk_table_size();
 452 }
 453 
 454 const char* nmethod::compile_kind() const {
 455   if (is_osr_method())     return "osr";
 456   if (method() != NULL && is_native_method())  return "c2n";
 457   return NULL;
 458 }
 459 
 460 // Fill in default values for various flag fields
 461 void nmethod::init_defaults() {
 462   _state                      = alive;
 463   _marked_for_reclamation     = 0;
 464   _has_flushed_dependencies   = 0;
 465   _speculatively_disconnected = 0;
 466   _has_unsafe_access          = 0;
 467   _has_method_handle_invokes  = 0;
 468   _lazy_critical_native       = 0;
 469   _has_wide_vectors           = 0;
 470   _marked_for_deoptimization  = 0;
 471   _lock_count                 = 0;
 472   _stack_traversal_mark       = 0;
 473   _unload_reported            = false;           // jvmti state
 474 
 475 #ifdef ASSERT
 476   _oops_are_stale             = false;
 477 #endif
 478 
 479   _oops_do_mark_link       = NULL;
 480   _jmethod_id              = NULL;
 481   _osr_link                = NULL;
 482   _scavenge_root_link      = NULL;
 483   _scavenge_root_state     = 0;
 484   _saved_nmethod_link      = NULL;
 485   _compiler                = NULL;
 486 
 487 #ifdef HAVE_DTRACE_H
 488   _trap_offset             = 0;
 489 #endif // def HAVE_DTRACE_H
 490 }
 491 
 492 nmethod* nmethod::new_native_nmethod(methodHandle method,
 493   int compile_id,
 494   CodeBuffer *code_buffer,
 495   int vep_offset,
 496   int frame_complete,
 497   int frame_size,
 498   ByteSize basic_lock_owner_sp_offset,
 499   ByteSize basic_lock_sp_offset,
 500   OopMapSet* oop_maps) {
 501   code_buffer->finalize_oop_references(method);
 502   // create nmethod
 503   nmethod* nm = NULL;
 504   {


 669     _deoptimize_mh_offset    = 0;
 670     _orig_pc_offset          = 0;
 671 
 672     _consts_offset           = data_offset();
 673     _stub_offset             = data_offset();
 674     _oops_offset             = data_offset();
 675     _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
 676     _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
 677     _scopes_pcs_offset       = _scopes_data_offset;
 678     _dependencies_offset     = _scopes_pcs_offset;
 679     _handler_table_offset    = _dependencies_offset;
 680     _nul_chk_table_offset    = _handler_table_offset;
 681     _nmethod_end_offset      = _nul_chk_table_offset;
 682     _compile_id              = compile_id;
 683     _comp_level              = CompLevel_none;
 684     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 685     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 686     _osr_entry_point         = NULL;
 687     _exception_cache         = NULL;
 688     _pc_desc_cache.reset_to(NULL);

 689 
 690     code_buffer->copy_values_to(this);
 691     if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
 692       CodeCache::add_scavenge_root_nmethod(this);
 693       Universe::heap()->register_nmethod(this);
 694     }
 695     debug_only(verify_scavenge_root_oops());
 696     CodeCache::commit(this);
 697   }
 698 
 699   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
 700     ttyLocker ttyl;  // keep the following output all in one block
 701     // This output goes directly to the tty, not the compiler log.
 702     // To enable tools to match it up with the compilation activity,
 703     // be sure to tag this tty output with the compile ID.
 704     if (xtty != NULL) {
 705       xtty->begin_head("print_native_nmethod");
 706       xtty->method(_method);
 707       xtty->stamp();
 708       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);


 753     _unwind_handler_offset   = -1;
 754     _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
 755     _orig_pc_offset          = 0;
 756     _consts_offset           = data_offset();
 757     _stub_offset             = data_offset();
 758     _oops_offset             = data_offset();
 759     _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
 760     _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
 761     _scopes_pcs_offset       = _scopes_data_offset;
 762     _dependencies_offset     = _scopes_pcs_offset;
 763     _handler_table_offset    = _dependencies_offset;
 764     _nul_chk_table_offset    = _handler_table_offset;
 765     _nmethod_end_offset      = _nul_chk_table_offset;
 766     _compile_id              = 0;  // default
 767     _comp_level              = CompLevel_none;
 768     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 769     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 770     _osr_entry_point         = NULL;
 771     _exception_cache         = NULL;
 772     _pc_desc_cache.reset_to(NULL);

 773 
 774     code_buffer->copy_values_to(this);
 775     debug_only(verify_scavenge_root_oops());
 776     CodeCache::commit(this);
 777   }
 778 
 779   if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
 780     ttyLocker ttyl;  // keep the following output all in one block
 781     // This output goes directly to the tty, not the compiler log.
 782     // To enable tools to match it up with the compilation activity,
 783     // be sure to tag this tty output with the compile ID.
 784     if (xtty != NULL) {
 785       xtty->begin_head("print_dtrace_nmethod");
 786       xtty->method(_method);
 787       xtty->stamp();
 788       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 789     }
 790     // print the header part first
 791     print();
 792     // then print the requested information


 825   AbstractCompiler* compiler,
 826   int comp_level
 827   )
 828   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
 829              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
 830   _native_receiver_sp_offset(in_ByteSize(-1)),
 831   _native_basic_lock_sp_offset(in_ByteSize(-1))
 832 {
 833   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 834   {
 835     debug_only(No_Safepoint_Verifier nsv;)
 836     assert_locked_or_safepoint(CodeCache_lock);
 837 
 838     init_defaults();
 839     _method                  = method;
 840     _entry_bci               = entry_bci;
 841     _compile_id              = compile_id;
 842     _comp_level              = comp_level;
 843     _compiler                = compiler;
 844     _orig_pc_offset          = orig_pc_offset;

 845 
 846     // Section offsets
 847     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
 848     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
 849 
 850     // Exception handler and deopt handler are in the stub section
 851     assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
 852     assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
 853     _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
 854     _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
 855     if (offsets->value(CodeOffsets::DeoptMH) != -1) {
 856       _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
 857     } else {
 858       _deoptimize_mh_offset  = -1;
 859     }
 860     if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
 861       _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
 862     } else {
 863       _unwind_handler_offset = -1;
 864     }


1244     _method = NULL;            // Clear the method of this dead nmethod
1245   }
1246   // Make the class unloaded - i.e., change state and notify sweeper
1247   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1248   if (is_in_use()) {
1249     // Transitioning directly from live to unloaded -- so
1250     // we need to force a cache clean-up; remember this
1251     // for later on.
1252     CodeCache::set_needs_cache_clean(true);
1253   }
1254   _state = unloaded;
1255 
1256   // Log the unloading.
1257   log_state_change();
1258 
1259   // The Method* is gone at this point
1260   assert(_method == NULL, "Tautology");
1261 
1262   set_osr_link(NULL);
1263   //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1264   NMethodSweeper::notify(this);
1265 }
1266 
1267 void nmethod::invalidate_osr_method() {
1268   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1269   // Remove from list of active nmethods
1270   if (method() != NULL)
1271     method()->method_holder()->remove_osr_nmethod(this);
1272   // Set entry as invalid
1273   _entry_bci = InvalidOSREntryBci;
1274 }
1275 
1276 void nmethod::log_state_change() const {
1277   if (LogCompilation) {
1278     if (xtty != NULL) {
1279       ttyLocker ttyl;  // keep the following output all in one block
1280       if (_state == unloaded) {
1281         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1282                          os::current_thread_id());
1283       } else {
1284         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",


1334     // The caller can be calling the method statically or through an inline
1335     // cache call.
1336     if (!is_osr_method() && !is_not_entrant()) {
1337       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1338                   SharedRuntime::get_handle_wrong_method_stub());
1339     }
1340 
1341     if (is_in_use()) {
1342       // It's a true state change, so mark the method as decompiled.
1343       // Do it only for transition from alive.
1344       inc_decompile_count();
1345     }
1346 
1347     // If the state is becoming a zombie, signal to unregister the nmethod with
1348     // the heap.
1349     // This nmethod may have already been unloaded during a full GC.
1350     if ((state == zombie) && !is_unloaded()) {
1351       nmethod_needs_unregister = true;
1352     }
1353 








1354     // Change state
1355     _state = state;
1356 
1357     // Log the transition once
1358     log_state_change();
1359 
1360     // Remove nmethod from method.
1361     // We need to check if both the _code and _from_compiled_code_entry_point
1362     // refer to this nmethod because there is a race in setting these two fields
1363     // in Method* as seen in bugid 4947125.
1364     // If the vep() points to the zombie nmethod, the memory for the nmethod
1365     // could be flushed and the compiler and vtable stubs could still call
1366     // through it.
1367     if (method() != NULL && (method()->code() == this ||
1368                              method()->from_compiled_entry() == verified_entry_point())) {
1369       HandleMark hm;
1370       method()->clear_code();
1371     }
1372 
1373     if (state == not_entrant) {
1374       mark_as_seen_on_stack();
1375     }
1376 
1377   } // leave critical region under Patching_lock
1378 
1379   // When the nmethod becomes zombie it is no longer alive so the
1380   // dependencies must be flushed.  nmethods in the not_entrant
1381   // state will be flushed later when the transition to zombie
1382   // happens or they get unloaded.
1383   if (state == zombie) {
1384     {
1385       // Flushing dependecies must be done before any possible
1386       // safepoint can sneak in, otherwise the oops used by the
1387       // dependency logic could have become stale.
1388       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1389       if (nmethod_needs_unregister) {
1390         Universe::heap()->unregister_nmethod(this);
1391       }
1392       flush_dependencies(NULL);
1393     }
1394 
1395     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1396     // event and it hasn't already been reported for this nmethod then


1399     // we no longer go to a safepoint here.
1400     post_compiled_method_unload();
1401 
1402 #ifdef ASSERT
1403     // It's no longer safe to access the oops section since zombie
1404     // nmethods aren't scanned for GC.
1405     _oops_are_stale = true;
1406 #endif
1407      // the Method may be reclaimed by class unloading now that the
1408      // nmethod is in zombie state
1409     set_method(NULL);
1410   } else {
1411     assert(state == not_entrant, "other cases may need to be handled differently");
1412   }
1413 
1414   if (TraceCreateZombies) {
1415     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1416   }
1417 
1418   // Make sweeper aware that there is a zombie method that needs to be removed
1419   NMethodSweeper::notify(this);
1420 
1421   return true;
1422 }
1423 
1424 void nmethod::flush() {
1425   // Note that there are no valid oops in the nmethod anymore.
1426   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1427   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1428 
1429   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1430   assert_locked_or_safepoint(CodeCache_lock);
1431 
1432   // completely deallocate this method
1433   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1434   if (PrintMethodFlushing) {
1435     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1436         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
1437   }
1438 
1439   // We need to deallocate any ExceptionCache data.
1440   // Note that we do not need to grab the nmethod lock for this, it
1441   // better be thread safe if we're disposing of it!
1442   ExceptionCache* ec = exception_cache();
1443   set_exception_cache(NULL);
1444   while(ec != NULL) {
1445     ExceptionCache* next = ec->next();
1446     delete ec;
1447     ec = next;
1448   }
1449 
1450   if (on_scavenge_root_list()) {
1451     CodeCache::drop_scavenge_root_nmethod(this);
1452   }
1453 
1454   if (is_speculatively_disconnected()) {
1455     CodeCache::remove_saved_code(this);
1456   }
1457 
1458 #ifdef SHARK
1459   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1460 #endif // SHARK
1461 
1462   ((CodeBlob*)(this))->flush();
1463 
1464   CodeCache::free(this);
1465 }
1466 
1467 
1468 //
1469 // Notify all classes this nmethod is dependent on that it is no
1470 // longer dependent. This should only be called in two situations.
1471 // First, when a nmethod transitions to a zombie all dependents need
1472 // to be clear.  Since zombification happens at a safepoint there's no
1473 // synchronization issues.  The second place is a little more tricky.
1474 // During phase 1 of mark sweep class unloading may happen and as a
1475 // result some nmethods may get unloaded.  In this case the flushing




 445     consts_size()        +
 446     insts_size()         +
 447     stub_size()          +
 448     scopes_data_size()   +
 449     scopes_pcs_size()    +
 450     handler_table_size() +
 451     nul_chk_table_size();
 452 }
 453 
 454 const char* nmethod::compile_kind() const {
 455   if (is_osr_method())     return "osr";
 456   if (method() != NULL && is_native_method())  return "c2n";
 457   return NULL;
 458 }
 459 
 460 // Fill in default values for various flag fields
 461 void nmethod::init_defaults() {
 462   _state                      = alive;
 463   _marked_for_reclamation     = 0;
 464   _has_flushed_dependencies   = 0;

 465   _has_unsafe_access          = 0;
 466   _has_method_handle_invokes  = 0;
 467   _lazy_critical_native       = 0;
 468   _has_wide_vectors           = 0;
 469   _marked_for_deoptimization  = 0;
 470   _lock_count                 = 0;
 471   _stack_traversal_mark       = 0;
 472   _unload_reported            = false;           // jvmti state
 473 
 474 #ifdef ASSERT
 475   _oops_are_stale             = false;
 476 #endif
 477 
 478   _oops_do_mark_link       = NULL;
 479   _jmethod_id              = NULL;
 480   _osr_link                = NULL;
 481   _scavenge_root_link      = NULL;
 482   _scavenge_root_state     = 0;

 483   _compiler                = NULL;
 484 
 485 #ifdef HAVE_DTRACE_H
 486   _trap_offset             = 0;
 487 #endif // def HAVE_DTRACE_H
 488 }
 489 
 490 nmethod* nmethod::new_native_nmethod(methodHandle method,
 491   int compile_id,
 492   CodeBuffer *code_buffer,
 493   int vep_offset,
 494   int frame_complete,
 495   int frame_size,
 496   ByteSize basic_lock_owner_sp_offset,
 497   ByteSize basic_lock_sp_offset,
 498   OopMapSet* oop_maps) {
 499   code_buffer->finalize_oop_references(method);
 500   // create nmethod
 501   nmethod* nm = NULL;
 502   {


 667     _deoptimize_mh_offset    = 0;
 668     _orig_pc_offset          = 0;
 669 
 670     _consts_offset           = data_offset();
 671     _stub_offset             = data_offset();
 672     _oops_offset             = data_offset();
 673     _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
 674     _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
 675     _scopes_pcs_offset       = _scopes_data_offset;
 676     _dependencies_offset     = _scopes_pcs_offset;
 677     _handler_table_offset    = _dependencies_offset;
 678     _nul_chk_table_offset    = _handler_table_offset;
 679     _nmethod_end_offset      = _nul_chk_table_offset;
 680     _compile_id              = compile_id;
 681     _comp_level              = CompLevel_none;
 682     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 683     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 684     _osr_entry_point         = NULL;
 685     _exception_cache         = NULL;
 686     _pc_desc_cache.reset_to(NULL);
 687     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 688 
 689     code_buffer->copy_values_to(this);
 690     if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
 691       CodeCache::add_scavenge_root_nmethod(this);
 692       Universe::heap()->register_nmethod(this);
 693     }
 694     debug_only(verify_scavenge_root_oops());
 695     CodeCache::commit(this);
 696   }
 697 
 698   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
 699     ttyLocker ttyl;  // keep the following output all in one block
 700     // This output goes directly to the tty, not the compiler log.
 701     // To enable tools to match it up with the compilation activity,
 702     // be sure to tag this tty output with the compile ID.
 703     if (xtty != NULL) {
 704       xtty->begin_head("print_native_nmethod");
 705       xtty->method(_method);
 706       xtty->stamp();
 707       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);


 752     _unwind_handler_offset   = -1;
 753     _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
 754     _orig_pc_offset          = 0;
 755     _consts_offset           = data_offset();
 756     _stub_offset             = data_offset();
 757     _oops_offset             = data_offset();
 758     _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
 759     _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
 760     _scopes_pcs_offset       = _scopes_data_offset;
 761     _dependencies_offset     = _scopes_pcs_offset;
 762     _handler_table_offset    = _dependencies_offset;
 763     _nul_chk_table_offset    = _handler_table_offset;
 764     _nmethod_end_offset      = _nul_chk_table_offset;
 765     _compile_id              = 0;  // default
 766     _comp_level              = CompLevel_none;
 767     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 768     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 769     _osr_entry_point         = NULL;
 770     _exception_cache         = NULL;
 771     _pc_desc_cache.reset_to(NULL);
 772     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 773 
 774     code_buffer->copy_values_to(this);
 775     debug_only(verify_scavenge_root_oops());
 776     CodeCache::commit(this);
 777   }
 778 
 779   if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
 780     ttyLocker ttyl;  // keep the following output all in one block
 781     // This output goes directly to the tty, not the compiler log.
 782     // To enable tools to match it up with the compilation activity,
 783     // be sure to tag this tty output with the compile ID.
 784     if (xtty != NULL) {
 785       xtty->begin_head("print_dtrace_nmethod");
 786       xtty->method(_method);
 787       xtty->stamp();
 788       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 789     }
 790     // print the header part first
 791     print();
 792     // then print the requested information


 825   AbstractCompiler* compiler,
 826   int comp_level
 827   )
 828   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
 829              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
 830   _native_receiver_sp_offset(in_ByteSize(-1)),
 831   _native_basic_lock_sp_offset(in_ByteSize(-1))
 832 {
 833   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 834   {
 835     debug_only(No_Safepoint_Verifier nsv;)
 836     assert_locked_or_safepoint(CodeCache_lock);
 837 
 838     init_defaults();
 839     _method                  = method;
 840     _entry_bci               = entry_bci;
 841     _compile_id              = compile_id;
 842     _comp_level              = comp_level;
 843     _compiler                = compiler;
 844     _orig_pc_offset          = orig_pc_offset;
 845     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 846 
 847     // Section offsets
 848     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
 849     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
 850 
 851     // Exception handler and deopt handler are in the stub section
 852     assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
 853     assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
 854     _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
 855     _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
 856     if (offsets->value(CodeOffsets::DeoptMH) != -1) {
 857       _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
 858     } else {
 859       _deoptimize_mh_offset  = -1;
 860     }
 861     if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
 862       _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
 863     } else {
 864       _unwind_handler_offset = -1;
 865     }


1245     _method = NULL;            // Clear the method of this dead nmethod
1246   }
1247   // Make the class unloaded - i.e., change state and notify sweeper
1248   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1249   if (is_in_use()) {
1250     // Transitioning directly from live to unloaded -- so
1251     // we need to force a cache clean-up; remember this
1252     // for later on.
1253     CodeCache::set_needs_cache_clean(true);
1254   }
1255   _state = unloaded;
1256 
1257   // Log the unloading.
1258   log_state_change();
1259 
1260   // The Method* is gone at this point
1261   assert(_method == NULL, "Tautology");
1262 
1263   set_osr_link(NULL);
1264   //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1265   NMethodSweeper::notify();
1266 }
1267 
1268 void nmethod::invalidate_osr_method() {
1269   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1270   // Remove from list of active nmethods
1271   if (method() != NULL)
1272     method()->method_holder()->remove_osr_nmethod(this);
1273   // Set entry as invalid
1274   _entry_bci = InvalidOSREntryBci;
1275 }
1276 
1277 void nmethod::log_state_change() const {
1278   if (LogCompilation) {
1279     if (xtty != NULL) {
1280       ttyLocker ttyl;  // keep the following output all in one block
1281       if (_state == unloaded) {
1282         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1283                          os::current_thread_id());
1284       } else {
1285         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",


1335     // The caller can be calling the method statically or through an inline
1336     // cache call.
1337     if (!is_osr_method() && !is_not_entrant()) {
1338       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1339                   SharedRuntime::get_handle_wrong_method_stub());
1340     }
1341 
1342     if (is_in_use()) {
1343       // It's a true state change, so mark the method as decompiled.
1344       // Do it only for transition from alive.
1345       inc_decompile_count();
1346     }
1347 
1348     // If the state is becoming a zombie, signal to unregister the nmethod with
1349     // the heap.
1350     // This nmethod may have already been unloaded during a full GC.
1351     if ((state == zombie) && !is_unloaded()) {
1352       nmethod_needs_unregister = true;
1353     }
1354 
1355     // Must happen before state change. Otherwise we have a race condition in
1356     // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1357     // transition its state from 'not_entrant' to 'zombie' without having to wait
1358     // for stack scanning.
1359     if (state == not_entrant) {
1360       mark_as_seen_on_stack();
1361     }
1362 
1363     // Change state
1364     _state = state;
1365 
1366     // Log the transition once
1367     log_state_change();
1368 
1369     // Remove nmethod from method.
1370     // We need to check if both the _code and _from_compiled_code_entry_point
1371     // refer to this nmethod because there is a race in setting these two fields
1372     // in Method* as seen in bugid 4947125.
1373     // If the vep() points to the zombie nmethod, the memory for the nmethod
1374     // could be flushed and the compiler and vtable stubs could still call
1375     // through it.
1376     if (method() != NULL && (method()->code() == this ||
1377                              method()->from_compiled_entry() == verified_entry_point())) {
1378       HandleMark hm;
1379       method()->clear_code();
1380     }





1381   } // leave critical region under Patching_lock
1382 
1383   // When the nmethod becomes zombie it is no longer alive so the
1384   // dependencies must be flushed.  nmethods in the not_entrant
1385   // state will be flushed later when the transition to zombie
1386   // happens or they get unloaded.
1387   if (state == zombie) {
1388     {
1389       // Flushing dependecies must be done before any possible
1390       // safepoint can sneak in, otherwise the oops used by the
1391       // dependency logic could have become stale.
1392       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1393       if (nmethod_needs_unregister) {
1394         Universe::heap()->unregister_nmethod(this);
1395       }
1396       flush_dependencies(NULL);
1397     }
1398 
1399     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1400     // event and it hasn't already been reported for this nmethod then


1403     // we no longer go to a safepoint here.
1404     post_compiled_method_unload();
1405 
1406 #ifdef ASSERT
1407     // It's no longer safe to access the oops section since zombie
1408     // nmethods aren't scanned for GC.
1409     _oops_are_stale = true;
1410 #endif
1411      // the Method may be reclaimed by class unloading now that the
1412      // nmethod is in zombie state
1413     set_method(NULL);
1414   } else {
1415     assert(state == not_entrant, "other cases may need to be handled differently");
1416   }
1417 
1418   if (TraceCreateZombies) {
1419     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1420   }
1421 
1422   // Make sweeper aware that there is a zombie method that needs to be removed
1423   NMethodSweeper::notify();
1424 
1425   return true;
1426 }
1427 
1428 void nmethod::flush() {
1429   // Note that there are no valid oops in the nmethod anymore.
1430   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1431   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1432 
1433   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1434   assert_locked_or_safepoint(CodeCache_lock);
1435 
1436   // completely deallocate this method
1437   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1438   if (PrintMethodFlushing) {
1439     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1440         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
1441   }
1442 
1443   // We need to deallocate any ExceptionCache data.
1444   // Note that we do not need to grab the nmethod lock for this, it
1445   // better be thread safe if we're disposing of it!
1446   ExceptionCache* ec = exception_cache();
1447   set_exception_cache(NULL);
1448   while(ec != NULL) {
1449     ExceptionCache* next = ec->next();
1450     delete ec;
1451     ec = next;
1452   }
1453 
1454   if (on_scavenge_root_list()) {
1455     CodeCache::drop_scavenge_root_nmethod(this);




1456   }
1457 
1458 #ifdef SHARK
1459   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1460 #endif // SHARK
1461 
1462   ((CodeBlob*)(this))->flush();
1463 
1464   CodeCache::free(this);
1465 }
1466 
1467 
1468 //
1469 // Notify all classes this nmethod is dependent on that it is no
1470 // longer dependent. This should only be called in two situations.
1471 // First, when a nmethod transitions to a zombie all dependents need
1472 // to be clear.  Since zombification happens at a safepoint there's no
1473 // synchronization issues.  The second place is a little more tricky.
1474 // During phase 1 of mark sweep class unloading may happen and as a
1475 // result some nmethods may get unloaded.  In this case the flushing