1606
1607 void nmethod::do_unloading(bool unloading_occurred) {
1608 // Make sure the oop's ready to receive visitors
1609 assert(!is_zombie() && !is_unloaded(),
1610 "should not call follow on zombie or unloaded nmethod");
1611
1612 if (is_unloading()) {
1613 make_unloaded();
1614 } else {
1615 #if INCLUDE_JVMCI
1616 if (_jvmci_installed_code != NULL) {
1617 if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
1618 if (_jvmci_installed_code_triggers_invalidation) {
1619 make_not_entrant();
1620 }
1621 clear_jvmci_installed_code();
1622 }
1623 }
1624 #endif
1625
1626 unload_nmethod_caches(unloading_occurred);
1627 }
1628 }
1629
1630 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
1631 // make sure the oops ready to receive visitors
1632 assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
1633 assert(!is_unloaded(), "should not call follow on unloaded nmethod");
1634
1635 // Prevent extra code cache walk for platforms that don't have immediate oops.
1636 if (relocInfo::mustIterateImmediateOopsInCode()) {
1637 RelocIterator iter(this, oops_reloc_begin());
1638
1639 while (iter.next()) {
1640 if (iter.type() == relocInfo::oop_type ) {
1641 oop_Relocation* r = iter.oop_reloc();
1642 // In this loop, we must only follow those oops directly embedded in
1643 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
1644 assert(1 == (r->oop_is_immediate()) +
1645 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
1606
1607 void nmethod::do_unloading(bool unloading_occurred) {
1608 // Make sure the oop's ready to receive visitors
1609 assert(!is_zombie() && !is_unloaded(),
1610 "should not call follow on zombie or unloaded nmethod");
1611
1612 if (is_unloading()) {
1613 make_unloaded();
1614 } else {
1615 #if INCLUDE_JVMCI
1616 if (_jvmci_installed_code != NULL) {
1617 if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
1618 if (_jvmci_installed_code_triggers_invalidation) {
1619 make_not_entrant();
1620 }
1621 clear_jvmci_installed_code();
1622 }
1623 }
1624 #endif
1625
1626 guarantee(unload_nmethod_caches(unloading_occurred),
1627 "Should not need transition stubs");
1628 }
1629 }
1630
1631 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
1632 // make sure the oops ready to receive visitors
1633 assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
1634 assert(!is_unloaded(), "should not call follow on unloaded nmethod");
1635
1636 // Prevent extra code cache walk for platforms that don't have immediate oops.
1637 if (relocInfo::mustIterateImmediateOopsInCode()) {
1638 RelocIterator iter(this, oops_reloc_begin());
1639
1640 while (iter.next()) {
1641 if (iter.type() == relocInfo::oop_type ) {
1642 oop_Relocation* r = iter.oop_reloc();
1643 // In this loop, we must only follow those oops directly embedded in
1644 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
1645 assert(1 == (r->oop_is_immediate()) +
1646 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|