1647 // This includes oop constants not inlined in the code stream.
1648 for (oop* p = oops_begin(); p < oops_end(); p++) {
1649 if (*p == Universe::non_oop_word()) continue; // skip non-oops
1650 f->do_oop(p);
1651 }
1652 }
1653
1654 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
1655
1656 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1657
1658 // An nmethod is "marked" if its _mark_link is set non-null.
1659 // Even if it is the end of the linked list, it will have a non-null link value,
1660 // as long as it is on the list.
1661 // This code must be MP safe, because it is used from parallel GC passes.
1662 bool nmethod::test_set_oops_do_mark() {
1663 assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
1664 nmethod* observed_mark_link = _oops_do_mark_link;
1665 if (observed_mark_link == NULL) {
1666 // Claim this nmethod for this thread to mark.
1667 observed_mark_link = (nmethod*)
1668 Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
1669 if (observed_mark_link == NULL) {
1670
1671 // Atomically append this nmethod (now claimed) to the head of the list:
1672 nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
1673 for (;;) {
1674 nmethod* required_mark_nmethods = observed_mark_nmethods;
1675 _oops_do_mark_link = required_mark_nmethods;
1676 observed_mark_nmethods = (nmethod*)
1677 Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
1678 if (observed_mark_nmethods == required_mark_nmethods)
1679 break;
1680 }
1681 // Mark was clear when we first saw this guy.
1682 if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
1683 return false;
1684 }
1685 }
1686 // On fall through, another racing thread marked this nmethod before we did.
1687 return true;
1688 }
1689
1690 void nmethod::oops_do_marking_prologue() {
1691 if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
1692 assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
1693 // We use cmpxchg_ptr instead of regular assignment here because the user
1694 // may fork a bunch of threads, and we need them all to see the same state.
1695 void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
1696 guarantee(observed == NULL, "no races in this sequential code");
1697 }
1698
1699 void nmethod::oops_do_marking_epilogue() {
1700 assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
1701 nmethod* cur = _oops_do_mark_nmethods;
1702 while (cur != NMETHOD_SENTINEL) {
1703 assert(cur != NULL, "not NULL-terminated");
1704 nmethod* next = cur->_oops_do_mark_link;
1705 cur->_oops_do_mark_link = NULL;
1706 DEBUG_ONLY(cur->verify_oop_relocations());
1707 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
1708 cur = next;
1709 }
1710 void* required = _oops_do_mark_nmethods;
1711 void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
1712 guarantee(observed == required, "no races in this sequential code");
1713 if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
1714 }
1715
1716 class DetectScavengeRoot: public OopClosure {
1717 bool _detected_scavenge_root;
1718 public:
1719 DetectScavengeRoot() : _detected_scavenge_root(false)
1720 { NOT_PRODUCT(_print_nm = NULL); }
1721 bool detected_scavenge_root() { return _detected_scavenge_root; }
1722 virtual void do_oop(oop* p) {
1723 if ((*p) != NULL && (*p)->is_scavengable()) {
1724 NOT_PRODUCT(maybe_print(p));
1725 _detected_scavenge_root = true;
1726 }
1727 }
1728 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
1729
1730 #ifndef PRODUCT
1731 nmethod* _print_nm;
|
1647 // This includes oop constants not inlined in the code stream.
1648 for (oop* p = oops_begin(); p < oops_end(); p++) {
1649 if (*p == Universe::non_oop_word()) continue; // skip non-oops
1650 f->do_oop(p);
1651 }
1652 }
1653
1654 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
1655
1656 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1657
1658 // An nmethod is "marked" if its _mark_link is set non-null.
1659 // Even if it is the end of the linked list, it will have a non-null link value,
1660 // as long as it is on the list.
1661 // This code must be MP safe, because it is used from parallel GC passes.
1662 bool nmethod::test_set_oops_do_mark() {
1663 assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
1664 nmethod* observed_mark_link = _oops_do_mark_link;
1665 if (observed_mark_link == NULL) {
1666 // Claim this nmethod for this thread to mark.
1667 if (Atomic::cmpxchg_if_null(NMETHOD_SENTINEL, &_oops_do_mark_link)) {
1668 // Atomically append this nmethod (now claimed) to the head of the list:
1669 nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
1670 for (;;) {
1671 nmethod* required_mark_nmethods = observed_mark_nmethods;
1672 _oops_do_mark_link = required_mark_nmethods;
1673 observed_mark_nmethods =
1674 Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods);
1675 if (observed_mark_nmethods == required_mark_nmethods)
1676 break;
1677 }
1678 // Mark was clear when we first saw this guy.
1679 if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
1680 return false;
1681 }
1682 }
1683 // On fall through, another racing thread marked this nmethod before we did.
1684 return true;
1685 }
1686
1687 void nmethod::oops_do_marking_prologue() {
1688 if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
1689 assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
1690 // We use cmpxchg instead of regular assignment here because the user
1691 // may fork a bunch of threads, and we need them all to see the same state.
1692 bool observed = Atomic::cmpxchg_if_null(NMETHOD_SENTINEL, &_oops_do_mark_nmethods);
1693 guarantee(observed, "no races in this sequential code");
1694 }
1695
1696 void nmethod::oops_do_marking_epilogue() {
1697 assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
1698 nmethod* cur = _oops_do_mark_nmethods;
1699 while (cur != NMETHOD_SENTINEL) {
1700 assert(cur != NULL, "not NULL-terminated");
1701 nmethod* next = cur->_oops_do_mark_link;
1702 cur->_oops_do_mark_link = NULL;
1703 DEBUG_ONLY(cur->verify_oop_relocations());
1704 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
1705 cur = next;
1706 }
1707 nmethod* required = _oops_do_mark_nmethods;
1708 nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
1709 guarantee(observed == required, "no races in this sequential code");
1710 if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
1711 }
1712
1713 class DetectScavengeRoot: public OopClosure {
1714 bool _detected_scavenge_root;
1715 public:
1716 DetectScavengeRoot() : _detected_scavenge_root(false)
1717 { NOT_PRODUCT(_print_nm = NULL); }
1718 bool detected_scavenge_root() { return _detected_scavenge_root; }
1719 virtual void do_oop(oop* p) {
1720 if ((*p) != NULL && (*p)->is_scavengable()) {
1721 NOT_PRODUCT(maybe_print(p));
1722 _detected_scavenge_root = true;
1723 }
1724 }
1725 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
1726
1727 #ifndef PRODUCT
1728 nmethod* _print_nm;
|