1607
1608 void nmethod::do_unloading(bool unloading_occurred) {
1609 // Make sure the oop's ready to receive visitors
1610 assert(!is_zombie() && !is_unloaded(),
1611 "should not call follow on zombie or unloaded nmethod");
1612
1613 if (is_unloading()) {
1614 make_unloaded();
1615 } else {
1616 #if INCLUDE_JVMCI
1617 if (_jvmci_installed_code != NULL) {
1618 if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
1619 if (_jvmci_installed_code_triggers_invalidation) {
1620 make_not_entrant();
1621 }
1622 clear_jvmci_installed_code();
1623 }
1624 }
1625 #endif
1626
1627 unload_nmethod_caches(unloading_occurred);
1628 }
1629 }
1630
1631 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
1632 // make sure the oops ready to receive visitors
1633 assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
1634 assert(!is_unloaded(), "should not call follow on unloaded nmethod");
1635
1636 // Prevent extra code cache walk for platforms that don't have immediate oops.
1637 if (relocInfo::mustIterateImmediateOopsInCode()) {
1638 RelocIterator iter(this, oops_reloc_begin());
1639
1640 while (iter.next()) {
1641 if (iter.type() == relocInfo::oop_type ) {
1642 oop_Relocation* r = iter.oop_reloc();
1643 // In this loop, we must only follow those oops directly embedded in
1644 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
1645 assert(1 == (r->oop_is_immediate()) +
1646 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
1607
1608 void nmethod::do_unloading(bool unloading_occurred) {
1609 // Make sure the oop's ready to receive visitors
1610 assert(!is_zombie() && !is_unloaded(),
1611 "should not call follow on zombie or unloaded nmethod");
1612
1613 if (is_unloading()) {
1614 make_unloaded();
1615 } else {
1616 #if INCLUDE_JVMCI
1617 if (_jvmci_installed_code != NULL) {
1618 if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
1619 if (_jvmci_installed_code_triggers_invalidation) {
1620 make_not_entrant();
1621 }
1622 clear_jvmci_installed_code();
1623 }
1624 }
1625 #endif
1626
1627 guarantee(unload_nmethod_caches(unloading_occurred),
1628 "Should not need transition stubs");
1629 }
1630 }
1631
1632 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
1633 // make sure the oops ready to receive visitors
1634 assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
1635 assert(!is_unloaded(), "should not call follow on unloaded nmethod");
1636
1637 // Prevent extra code cache walk for platforms that don't have immediate oops.
1638 if (relocInfo::mustIterateImmediateOopsInCode()) {
1639 RelocIterator iter(this, oops_reloc_begin());
1640
1641 while (iter.next()) {
1642 if (iter.type() == relocInfo::oop_type ) {
1643 oop_Relocation* r = iter.oop_reloc();
1644 // In this loop, we must only follow those oops directly embedded in
1645 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
1646 assert(1 == (r->oop_is_immediate()) +
1647 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|