< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page
rev 48402 : 8193927: Optimize scanning code for oops.
Reviewed-by: simonis, mdoerr, aph
   1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1468       return true;;
1469     }
1470   }
1471 
1472   return false;
1473 }
1474 
1475 bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred) {
1476   // Scopes
1477   for (oop* p = oops_begin(); p < oops_end(); p++) {
1478     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1479     if (can_unload(is_alive, p, unloading_occurred)) {
1480       return true;
1481     }
1482   }
1483   return false;
1484 }
1485 
1486 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
1487   // Compiled code
1488   {


1489   RelocIterator iter(this, low_boundary);
1490   while (iter.next()) {
1491     if (iter.type() == relocInfo::oop_type) {
1492       if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
1493         return true;
1494       }
1495     }
1496   }
1497   }
1498 
1499   return do_unloading_scopes(is_alive, unloading_occurred);
1500 }
1501 
1502 #if INCLUDE_JVMCI
1503 bool nmethod::do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) {
1504   if (_jvmci_installed_code != NULL) {
1505     if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
1506       if (_jvmci_installed_code_triggers_unloading) {
1507         // jweak reference processing has already cleared the referent
1508         make_unloaded(is_alive, NULL);


1567   // Visit metadata not embedded in the other places.
1568   if (_method != NULL) f(_method);
1569 }
1570 
1571 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
1572   // make sure the oops ready to receive visitors
1573   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
1574   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
1575 
1576   // If the method is not entrant or zombie then a JMP is plastered over the
1577   // first few bytes.  If an oop in the old code was there, that oop
1578   // should not get GC'd.  Skip the first few bytes of oops on
1579   // not-entrant methods.
1580   address low_boundary = verified_entry_point();
1581   if (is_not_entrant()) {
1582     low_boundary += NativeJump::instruction_size;
1583     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1584     // (See comment above.)
1585   }
1586 


1587   RelocIterator iter(this, low_boundary);
1588 
1589   while (iter.next()) {
1590     if (iter.type() == relocInfo::oop_type ) {
1591       oop_Relocation* r = iter.oop_reloc();
1592       // In this loop, we must only follow those oops directly embedded in
1593       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1594       assert(1 == (r->oop_is_immediate()) +
1595                    (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1596              "oop must be found in exactly one place");
1597       if (r->oop_is_immediate() && r->oop_value() != NULL) {
1598         f->do_oop(r->oop_addr());

1599       }
1600     }
1601   }
1602 
1603   // Scopes
1604   // This includes oop constants not inlined in the code stream.
1605   for (oop* p = oops_begin(); p < oops_end(); p++) {
1606     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1607     f->do_oop(p);
1608   }
1609 }
1610 
1611 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
1612 
1613 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1614 
1615 // An nmethod is "marked" if its _mark_link is set non-null.
1616 // Even if it is the end of the linked list, it will have a non-null link value,
1617 // as long as it is on the list.
1618 // This code must be MP safe, because it is used from parallel GC passes.


   1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1468       return true;;
1469     }
1470   }
1471 
1472   return false;
1473 }
1474 
1475 bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred) {
1476   // Scopes
1477   for (oop* p = oops_begin(); p < oops_end(); p++) {
1478     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1479     if (can_unload(is_alive, p, unloading_occurred)) {
1480       return true;
1481     }
1482   }
1483   return false;
1484 }
1485 
1486 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
1487   // Compiled code
1488 
1489   // Prevent extra code cache walk for platforms that don't have immediate oops.
1490   if (relocInfo::mustIterateImmediateOopsInCode()) {
1491     RelocIterator iter(this, low_boundary);
1492     while (iter.next()) {
1493       if (iter.type() == relocInfo::oop_type) {
1494         if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
1495           return true;
1496         }
1497       }
1498     }
1499   }
1500 
1501   return do_unloading_scopes(is_alive, unloading_occurred);
1502 }
1503 
1504 #if INCLUDE_JVMCI
1505 bool nmethod::do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) {
1506   if (_jvmci_installed_code != NULL) {
1507     if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
1508       if (_jvmci_installed_code_triggers_unloading) {
1509         // jweak reference processing has already cleared the referent
1510         make_unloaded(is_alive, NULL);


1569   // Visit metadata not embedded in the other places.
1570   if (_method != NULL) f(_method);
1571 }
1572 
1573 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
1574   // make sure the oops ready to receive visitors
1575   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
1576   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
1577 
1578   // If the method is not entrant or zombie then a JMP is plastered over the
1579   // first few bytes.  If an oop in the old code was there, that oop
1580   // should not get GC'd.  Skip the first few bytes of oops on
1581   // not-entrant methods.
1582   address low_boundary = verified_entry_point();
1583   if (is_not_entrant()) {
1584     low_boundary += NativeJump::instruction_size;
1585     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1586     // (See comment above.)
1587   }
1588 
1589   // Prevent extra code cache walk for platforms that don't have immediate oops.
1590   if (relocInfo::mustIterateImmediateOopsInCode()) {
1591     RelocIterator iter(this, low_boundary);
1592 
1593     while (iter.next()) {
1594       if (iter.type() == relocInfo::oop_type ) {
1595         oop_Relocation* r = iter.oop_reloc();
1596         // In this loop, we must only follow those oops directly embedded in
1597         // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1598         assert(1 == (r->oop_is_immediate()) +
1599                (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1600                "oop must be found in exactly one place");
1601         if (r->oop_is_immediate() && r->oop_value() != NULL) {
1602           f->do_oop(r->oop_addr());
1603         }
1604       }
1605     }
1606   }
1607 
1608   // Scopes
1609   // This includes oop constants not inlined in the code stream.
1610   for (oop* p = oops_begin(); p < oops_end(); p++) {
1611     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1612     f->do_oop(p);
1613   }
1614 }
1615 
1616 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
1617 
1618 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1619 
1620 // An nmethod is "marked" if its _mark_link is set non-null.
1621 // Even if it is the end of the linked list, it will have a non-null link value,
1622 // as long as it is on the list.
1623 // This code must be MP safe, because it is used from parallel GC passes.


< prev index next >