< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page




1644   }
1645 
1646   // Scopes
1647   // This includes oop constants not inlined in the code stream.
1648   for (oop* p = oops_begin(); p < oops_end(); p++) {
1649     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1650     f->do_oop(p);
1651   }
1652 }
1653 
1654 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
1655 
1656 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1657 
1658 // An nmethod is "marked" if its _mark_link is set non-null.
1659 // Even if it is the end of the linked list, it will have a non-null link value,
1660 // as long as it is on the list.
1661 // This code must be MP safe, because it is used from parallel GC passes.
1662 bool nmethod::test_set_oops_do_mark() {
1663   assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
1664   nmethod* observed_mark_link = _oops_do_mark_link;
1665   if (observed_mark_link == NULL) {
1666     // Claim this nmethod for this thread to mark.
1667     observed_mark_link = (nmethod*)
1668       Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
1669     if (observed_mark_link == NULL) {
1670 
1671       // Atomically append this nmethod (now claimed) to the head of the list:
1672       nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
1673       for (;;) {
1674         nmethod* required_mark_nmethods = observed_mark_nmethods;
1675         _oops_do_mark_link = required_mark_nmethods;
1676         observed_mark_nmethods = (nmethod*)
1677           Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
1678         if (observed_mark_nmethods == required_mark_nmethods)
1679           break;
1680       }
1681       // Mark was clear when we first saw this guy.
1682       if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
1683       return false;
1684     }
1685   }
1686   // On fall through, another racing thread marked this nmethod before we did.
1687   return true;
1688 }
1689 
1690 void nmethod::oops_do_marking_prologue() {
1691   if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
1692   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
1693   // We use cmpxchg_ptr instead of regular assignment here because the user
1694   // may fork a bunch of threads, and we need them all to see the same state.
1695   void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
1696   guarantee(observed == NULL, "no races in this sequential code");
1697 }
1698 
1699 void nmethod::oops_do_marking_epilogue() {
1700   assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
1701   nmethod* cur = _oops_do_mark_nmethods;
1702   while (cur != NMETHOD_SENTINEL) {
1703     assert(cur != NULL, "not NULL-terminated");
1704     nmethod* next = cur->_oops_do_mark_link;
1705     cur->_oops_do_mark_link = NULL;
1706     DEBUG_ONLY(cur->verify_oop_relocations());
1707     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
1708     cur = next;
1709   }
1710   void* required = _oops_do_mark_nmethods;
1711   void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
1712   guarantee(observed == required, "no races in this sequential code");
1713   if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
1714 }
1715 
1716 class DetectScavengeRoot: public OopClosure {
1717   bool     _detected_scavenge_root;
1718 public:
1719   DetectScavengeRoot() : _detected_scavenge_root(false)
1720   { NOT_PRODUCT(_print_nm = NULL); }
1721   bool detected_scavenge_root() { return _detected_scavenge_root; }
1722   virtual void do_oop(oop* p) {
1723     if ((*p) != NULL && (*p)->is_scavengable()) {
1724       NOT_PRODUCT(maybe_print(p));
1725       _detected_scavenge_root = true;
1726     }
1727   }
1728   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
1729 
1730 #ifndef PRODUCT
1731   nmethod* _print_nm;




1644   }
1645 
1646   // Scopes
1647   // This includes oop constants not inlined in the code stream.
1648   for (oop* p = oops_begin(); p < oops_end(); p++) {
1649     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1650     f->do_oop(p);
1651   }
1652 }
1653 
1654 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
1655 
1656 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1657 
1658 // An nmethod is "marked" if its _mark_link is set non-null.
1659 // Even if it is the end of the linked list, it will have a non-null link value,
1660 // as long as it is on the list.
1661 // This code must be MP safe, because it is used from parallel GC passes.
1662 bool nmethod::test_set_oops_do_mark() {
1663   assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
1664   if (_oops_do_mark_link == NULL) {

1665     // Claim this nmethod for this thread to mark.
1666     if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) {



1667       // Atomically append this nmethod (now claimed) to the head of the list:
1668       nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
1669       for (;;) {
1670         nmethod* required_mark_nmethods = observed_mark_nmethods;
1671         _oops_do_mark_link = required_mark_nmethods;
1672         observed_mark_nmethods =
1673           Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods);
1674         if (observed_mark_nmethods == required_mark_nmethods)
1675           break;
1676       }
1677       // Mark was clear when we first saw this guy.
1678       if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
1679       return false;
1680     }
1681   }
1682   // On fall through, another racing thread marked this nmethod before we did.
1683   return true;
1684 }
1685 
1686 void nmethod::oops_do_marking_prologue() {
1687   if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
1688   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
1689   // We use cmpxchg instead of regular assignment here because the user
1690   // may fork a bunch of threads, and we need them all to see the same state.
1691   nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL);
1692   guarantee(observed == NULL, "no races in this sequential code");
1693 }
1694 
1695 void nmethod::oops_do_marking_epilogue() {
1696   assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
1697   nmethod* cur = _oops_do_mark_nmethods;
1698   while (cur != NMETHOD_SENTINEL) {
1699     assert(cur != NULL, "not NULL-terminated");
1700     nmethod* next = cur->_oops_do_mark_link;
1701     cur->_oops_do_mark_link = NULL;
1702     DEBUG_ONLY(cur->verify_oop_relocations());
1703     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
1704     cur = next;
1705   }
1706   nmethod* required = _oops_do_mark_nmethods;
1707   nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
1708   guarantee(observed == required, "no races in this sequential code");
1709   if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
1710 }
1711 
1712 class DetectScavengeRoot: public OopClosure {
1713   bool     _detected_scavenge_root;
1714 public:
1715   DetectScavengeRoot() : _detected_scavenge_root(false)
1716   { NOT_PRODUCT(_print_nm = NULL); }
1717   bool detected_scavenge_root() { return _detected_scavenge_root; }
1718   virtual void do_oop(oop* p) {
1719     if ((*p) != NULL && (*p)->is_scavengable()) {
1720       NOT_PRODUCT(maybe_print(p));
1721       _detected_scavenge_root = true;
1722     }
1723   }
1724   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
1725 
1726 #ifndef PRODUCT
1727   nmethod* _print_nm;


< prev index next >