4148 { }
4149 virtual void do_oop(oop* p);
4150 virtual void do_oop(narrowOop* p);
4151
4152 void trim_queue(size_t max);
4153 void handle_stack_overflow(HeapWord* lost);
4154 void do_yield_check() {
4155 if (_task->should_yield()) {
4156 _task->yield();
4157 }
4158 }
4159 };
4160
4161 // Grey object scanning during work stealing phase --
4162 // the salient assumption here is that any references
4163 // that are in these stolen objects being scanned must
4164 // already have been initialized (else they would not have
4165 // been published), so we do not need to check for
4166 // uninitialized objects before pushing here.
4167 void Par_ConcMarkingClosure::do_oop(oop obj) {
4168 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4169 HeapWord* addr = (HeapWord*)obj;
4170 // Check if oop points into the CMS generation
4171 // and is not marked
4172 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4173 // a white object ...
4174 // If we manage to "claim" the object, by being the
4175 // first thread to mark it, then we push it on our
4176 // marking stack
4177 if (_bit_map->par_mark(addr)) { // ... now grey
4178 // push on work queue (grey set)
4179 bool simulate_overflow = false;
4180 NOT_PRODUCT(
4181 if (CMSMarkStackOverflowALot &&
4182 _collector->simulate_overflow()) {
4183 // simulate a stack overflow
4184 simulate_overflow = true;
4185 }
4186 )
4187 if (simulate_overflow ||
4188 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7207
7208 // See the comment in coordinator_yield()
7209 for (unsigned i = 0; i < CMSYieldSleepCount &&
7210 ConcurrentMarkSweepThread::should_yield() &&
7211 !CMSCollector::foregroundGCIsActive(); ++i) {
7212 os::sleep(Thread::current(), 1, false);
7213 ConcurrentMarkSweepThread::acknowledge_yield_request();
7214 }
7215
7216 ConcurrentMarkSweepThread::synchronize(true);
7217 _bit_map->lock()->lock_without_safepoint_check();
7218 _collector->startTimer();
7219 }
7220
7221 // This closure is used to rescan the marked objects on the dirty cards
7222 // in the mod union table and the card table proper. In the parallel
7223 // case, although the bitMap is shared, we do a single read so the
7224 // isMarked() query is "safe".
7225 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7226 // Ignore mark word because we are running concurrent with mutators
7227 assert(p->is_oop_or_null(true), "expected an oop or null");
7228 HeapWord* addr = (HeapWord*)p;
7229 assert(_span.contains(addr), "we are scanning the CMS generation");
7230 bool is_obj_array = false;
7231 #ifdef ASSERT
7232 if (!_parallel) {
7233 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7234 assert(_collector->overflow_list_is_empty(),
7235 "overflow list should be empty");
7236
7237 }
7238 #endif // ASSERT
7239 if (_bit_map->isMarked(addr)) {
7240 // Obj arrays are precisely marked, non-arrays are not;
7241 // so we scan objArrays precisely and non-arrays in their
7242 // entirety.
7243 if (p->is_objArray()) {
7244 is_obj_array = true;
7245 if (_parallel) {
7246 p->oop_iterate(_par_scan_closure, mr);
7247 } else {
7647 _verification_bm(verification_bm),
7648 _cms_bm(cms_bm),
7649 _mark_stack(mark_stack)
7650 { }
7651
7652 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7653 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7654
7655 // Upon stack overflow, we discard (part of) the stack,
7656 // remembering the least address amongst those discarded
7657 // in CMSCollector's _restart_address.
7658 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7659 // Remember the least grey address discarded
7660 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7661 _collector->lower_restart_addr(ra);
7662 _mark_stack->reset(); // discard stack contents
7663 _mark_stack->expand(); // expand the stack if possible
7664 }
7665
7666 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7667 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7668 HeapWord* addr = (HeapWord*)obj;
7669 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7670 // Oop lies in _span and isn't yet grey or black
7671 _verification_bm->mark(addr); // now grey
7672 if (!_cms_bm->isMarked(addr)) {
7673 oop(addr)->print();
7674 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7675 addr);
7676 fatal("... aborting");
7677 }
7678
7679 if (!_mark_stack->push(obj)) { // stack overflow
7680 if (PrintCMSStatistics != 0) {
7681 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7682 SIZE_FORMAT, _mark_stack->capacity());
7683 }
7684 assert(_mark_stack->isFull(), "Else push should have succeeded");
7685 handle_stack_overflow(addr);
7686 }
7687 // anything including and to the right of _finger
7745 _markStack->expand(); // expand the stack if possible
7746 }
7747
7748 // Upon stack overflow, we discard (part of) the stack,
7749 // remembering the least address amongst those discarded
7750 // in CMSCollector's _restart_address.
7751 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7752 // We need to do this under a mutex to prevent other
7753 // workers from interfering with the work done below.
7754 MutexLockerEx ml(_overflow_stack->par_lock(),
7755 Mutex::_no_safepoint_check_flag);
7756 // Remember the least grey address discarded
7757 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7758 _collector->lower_restart_addr(ra);
7759 _overflow_stack->reset(); // discard stack contents
7760 _overflow_stack->expand(); // expand the stack if possible
7761 }
7762
7763 void PushOrMarkClosure::do_oop(oop obj) {
7764 // Ignore mark word because we are running concurrent with mutators.
7765 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7766 HeapWord* addr = (HeapWord*)obj;
7767 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7768 // Oop lies in _span and isn't yet grey or black
7769 _bitMap->mark(addr); // now grey
7770 if (addr < _finger) {
7771 // the bit map iteration has already either passed, or
7772 // sampled, this bit in the bit map; we'll need to
7773 // use the marking stack to scan this oop's oops.
7774 bool simulate_overflow = false;
7775 NOT_PRODUCT(
7776 if (CMSMarkStackOverflowALot &&
7777 _collector->simulate_overflow()) {
7778 // simulate a stack overflow
7779 simulate_overflow = true;
7780 }
7781 )
7782 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7783 if (PrintCMSStatistics != 0) {
7784 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7785 SIZE_FORMAT, _markStack->capacity());
7786 }
7787 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7788 handle_stack_overflow(addr);
7789 }
7790 }
7791 // anything including and to the right of _finger
7792 // will be scanned as we iterate over the remainder of the
7793 // bit map
7794 do_yield_check();
7795 }
7796 }
7797
7798 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7799 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7800
7801 void Par_PushOrMarkClosure::do_oop(oop obj) {
7802 // Ignore mark word because we are running concurrent with mutators.
7803 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7804 HeapWord* addr = (HeapWord*)obj;
7805 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7806 // Oop lies in _span and isn't yet grey or black
7807 // We read the global_finger (volatile read) strictly after marking oop
7808 bool res = _bit_map->par_mark(addr); // now grey
7809 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7810 // Should we push this marked oop on our stack?
7811 // -- if someone else marked it, nothing to do
7812 // -- if target oop is above global finger nothing to do
7813 // -- if target oop is in chunk and above local finger
7814 // then nothing to do
7815 // -- else push on work queue
7816 if ( !res // someone else marked it, they will deal with it
7817 || (addr >= *gfa) // will be scanned in a later task
7818 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7819 return;
7820 }
7821 // the bit map iteration has already either passed, or
7822 // sampled, this bit in the bit map; we'll need to
7823 // use the marking stack to scan this oop's oops.
7860 MetadataAwareOopClosure(rp),
7861 _collector(collector),
7862 _span(span),
7863 _bit_map(bit_map),
7864 _mod_union_table(mod_union_table),
7865 _mark_stack(mark_stack),
7866 _concurrent_precleaning(concurrent_precleaning)
7867 {
7868 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7869 }
7870
7871 // Grey object rescan during pre-cleaning and second checkpoint phases --
7872 // the non-parallel version (the parallel version appears further below.)
7873 void PushAndMarkClosure::do_oop(oop obj) {
7874 // Ignore mark word verification. If during concurrent precleaning,
7875 // the object monitor may be locked. If during the checkpoint
7876 // phases, the object may already have been reached by a different
7877 // path and may be at the end of the global overflow list (so
7878 // the mark word may be NULL).
7879 assert(obj->is_oop_or_null(true /* ignore mark word */),
7880 "expected an oop or NULL");
7881 HeapWord* addr = (HeapWord*)obj;
7882 // Check if oop points into the CMS generation
7883 // and is not marked
7884 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7885 // a white object ...
7886 _bit_map->mark(addr); // ... now grey
7887 // push on the marking stack (grey set)
7888 bool simulate_overflow = false;
7889 NOT_PRODUCT(
7890 if (CMSMarkStackOverflowALot &&
7891 _collector->simulate_overflow()) {
7892 // simulate a stack overflow
7893 simulate_overflow = true;
7894 }
7895 )
7896 if (simulate_overflow || !_mark_stack->push(obj)) {
7897 if (_concurrent_precleaning) {
7898 // During precleaning we can just dirty the appropriate card(s)
7899 // in the mod union table, thus ensuring that the object remains
7900 // in the grey set and continue. In the case of object arrays
7940 }
7941
7942 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7943 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7944
7945 // Grey object rescan during second checkpoint phase --
7946 // the parallel version.
7947 void Par_PushAndMarkClosure::do_oop(oop obj) {
7948 // In the assert below, we ignore the mark word because
7949 // this oop may point to an already visited object that is
7950 // on the overflow stack (in which case the mark word has
7951 // been hijacked for chaining into the overflow stack --
7952 // if this is the last object in the overflow stack then
7953 // its mark word will be NULL). Because this object may
7954 // have been subsequently popped off the global overflow
7955 // stack, and the mark word possibly restored to the prototypical
7956 // value, by the time we get to examined this failing assert in
7957 // the debugger, is_oop_or_null(false) may subsequently start
7958 // to hold.
7959 assert(obj->is_oop_or_null(true),
7960 "expected an oop or NULL");
7961 HeapWord* addr = (HeapWord*)obj;
7962 // Check if oop points into the CMS generation
7963 // and is not marked
7964 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7965 // a white object ...
7966 // If we manage to "claim" the object, by being the
7967 // first thread to mark it, then we push it on our
7968 // marking stack
7969 if (_bit_map->par_mark(addr)) { // ... now grey
7970 // push on work queue (grey set)
7971 bool simulate_overflow = false;
7972 NOT_PRODUCT(
7973 if (CMSMarkStackOverflowALot &&
7974 _collector->par_simulate_overflow()) {
7975 // simulate a stack overflow
7976 simulate_overflow = true;
7977 }
7978 )
7979 if (simulate_overflow || !_work_queue->push(obj)) {
7980 _collector->par_push_on_overflow_list(obj);
|
4148 { }
4149 virtual void do_oop(oop* p);
4150 virtual void do_oop(narrowOop* p);
4151
4152 void trim_queue(size_t max);
4153 void handle_stack_overflow(HeapWord* lost);
4154 void do_yield_check() {
4155 if (_task->should_yield()) {
4156 _task->yield();
4157 }
4158 }
4159 };
4160
4161 // Grey object scanning during work stealing phase --
4162 // the salient assumption here is that any references
4163 // that are in these stolen objects being scanned must
4164 // already have been initialized (else they would not have
4165 // been published), so we do not need to check for
4166 // uninitialized objects before pushing here.
4167 void Par_ConcMarkingClosure::do_oop(oop obj) {
4168 assert(obj->is_oop_or_null(true), err_msg("expected an oop or NULL: " PTR_FORMAT, p2i(obj)));
4169 HeapWord* addr = (HeapWord*)obj;
4170 // Check if oop points into the CMS generation
4171 // and is not marked
4172 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4173 // a white object ...
4174 // If we manage to "claim" the object, by being the
4175 // first thread to mark it, then we push it on our
4176 // marking stack
4177 if (_bit_map->par_mark(addr)) { // ... now grey
4178 // push on work queue (grey set)
4179 bool simulate_overflow = false;
4180 NOT_PRODUCT(
4181 if (CMSMarkStackOverflowALot &&
4182 _collector->simulate_overflow()) {
4183 // simulate a stack overflow
4184 simulate_overflow = true;
4185 }
4186 )
4187 if (simulate_overflow ||
4188 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7207
7208 // See the comment in coordinator_yield()
7209 for (unsigned i = 0; i < CMSYieldSleepCount &&
7210 ConcurrentMarkSweepThread::should_yield() &&
7211 !CMSCollector::foregroundGCIsActive(); ++i) {
7212 os::sleep(Thread::current(), 1, false);
7213 ConcurrentMarkSweepThread::acknowledge_yield_request();
7214 }
7215
7216 ConcurrentMarkSweepThread::synchronize(true);
7217 _bit_map->lock()->lock_without_safepoint_check();
7218 _collector->startTimer();
7219 }
7220
7221 // This closure is used to rescan the marked objects on the dirty cards
7222 // in the mod union table and the card table proper. In the parallel
7223 // case, although the bitMap is shared, we do a single read so the
7224 // isMarked() query is "safe".
7225 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7226 // Ignore mark word because we are running concurrent with mutators
7227 assert(p->is_oop_or_null(true), err_msg("expected an oop or NULL: " PTR_FORMAT, p2i(p)));
7228 HeapWord* addr = (HeapWord*)p;
7229 assert(_span.contains(addr), "we are scanning the CMS generation");
7230 bool is_obj_array = false;
7231 #ifdef ASSERT
7232 if (!_parallel) {
7233 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7234 assert(_collector->overflow_list_is_empty(),
7235 "overflow list should be empty");
7236
7237 }
7238 #endif // ASSERT
7239 if (_bit_map->isMarked(addr)) {
7240 // Obj arrays are precisely marked, non-arrays are not;
7241 // so we scan objArrays precisely and non-arrays in their
7242 // entirety.
7243 if (p->is_objArray()) {
7244 is_obj_array = true;
7245 if (_parallel) {
7246 p->oop_iterate(_par_scan_closure, mr);
7247 } else {
7647 _verification_bm(verification_bm),
7648 _cms_bm(cms_bm),
7649 _mark_stack(mark_stack)
7650 { }
7651
7652 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7653 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7654
7655 // Upon stack overflow, we discard (part of) the stack,
7656 // remembering the least address amongst those discarded
7657 // in CMSCollector's _restart_address.
7658 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7659 // Remember the least grey address discarded
7660 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7661 _collector->lower_restart_addr(ra);
7662 _mark_stack->reset(); // discard stack contents
7663 _mark_stack->expand(); // expand the stack if possible
7664 }
7665
7666 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7667 assert(obj->is_oop_or_null(), err_msg("expected an oop or NULL: " PTR_FORMAT, p2i(obj)));
7668 HeapWord* addr = (HeapWord*)obj;
7669 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7670 // Oop lies in _span and isn't yet grey or black
7671 _verification_bm->mark(addr); // now grey
7672 if (!_cms_bm->isMarked(addr)) {
7673 oop(addr)->print();
7674 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7675 addr);
7676 fatal("... aborting");
7677 }
7678
7679 if (!_mark_stack->push(obj)) { // stack overflow
7680 if (PrintCMSStatistics != 0) {
7681 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7682 SIZE_FORMAT, _mark_stack->capacity());
7683 }
7684 assert(_mark_stack->isFull(), "Else push should have succeeded");
7685 handle_stack_overflow(addr);
7686 }
7687 // anything including and to the right of _finger
7745 _markStack->expand(); // expand the stack if possible
7746 }
7747
7748 // Upon stack overflow, we discard (part of) the stack,
7749 // remembering the least address amongst those discarded
7750 // in CMSCollector's _restart_address.
7751 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7752 // We need to do this under a mutex to prevent other
7753 // workers from interfering with the work done below.
7754 MutexLockerEx ml(_overflow_stack->par_lock(),
7755 Mutex::_no_safepoint_check_flag);
7756 // Remember the least grey address discarded
7757 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7758 _collector->lower_restart_addr(ra);
7759 _overflow_stack->reset(); // discard stack contents
7760 _overflow_stack->expand(); // expand the stack if possible
7761 }
7762
7763 void PushOrMarkClosure::do_oop(oop obj) {
7764 // Ignore mark word because we are running concurrent with mutators.
7765 assert(obj->is_oop_or_null(true), err_msg("expected an oop or NULL: " PTR_FORMAT, p2i(obj)));
7766 HeapWord* addr = (HeapWord*)obj;
7767 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7768 // Oop lies in _span and isn't yet grey or black
7769 _bitMap->mark(addr); // now grey
7770 if (addr < _finger) {
7771 // the bit map iteration has already either passed, or
7772 // sampled, this bit in the bit map; we'll need to
7773 // use the marking stack to scan this oop's oops.
7774 bool simulate_overflow = false;
7775 NOT_PRODUCT(
7776 if (CMSMarkStackOverflowALot &&
7777 _collector->simulate_overflow()) {
7778 // simulate a stack overflow
7779 simulate_overflow = true;
7780 }
7781 )
7782 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7783 if (PrintCMSStatistics != 0) {
7784 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7785 SIZE_FORMAT, _markStack->capacity());
7786 }
7787 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7788 handle_stack_overflow(addr);
7789 }
7790 }
7791 // anything including and to the right of _finger
7792 // will be scanned as we iterate over the remainder of the
7793 // bit map
7794 do_yield_check();
7795 }
7796 }
7797
7798 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7799 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7800
7801 void Par_PushOrMarkClosure::do_oop(oop obj) {
7802 // Ignore mark word because we are running concurrent with mutators.
7803 assert(obj->is_oop_or_null(true), err_msg("expected an oop or NULL: " PTR_FORMAT, p2i(obj)));
7804 HeapWord* addr = (HeapWord*)obj;
7805 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7806 // Oop lies in _span and isn't yet grey or black
7807 // We read the global_finger (volatile read) strictly after marking oop
7808 bool res = _bit_map->par_mark(addr); // now grey
7809 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7810 // Should we push this marked oop on our stack?
7811 // -- if someone else marked it, nothing to do
7812 // -- if target oop is above global finger nothing to do
7813 // -- if target oop is in chunk and above local finger
7814 // then nothing to do
7815 // -- else push on work queue
7816 if ( !res // someone else marked it, they will deal with it
7817 || (addr >= *gfa) // will be scanned in a later task
7818 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7819 return;
7820 }
7821 // the bit map iteration has already either passed, or
7822 // sampled, this bit in the bit map; we'll need to
7823 // use the marking stack to scan this oop's oops.
7860 MetadataAwareOopClosure(rp),
7861 _collector(collector),
7862 _span(span),
7863 _bit_map(bit_map),
7864 _mod_union_table(mod_union_table),
7865 _mark_stack(mark_stack),
7866 _concurrent_precleaning(concurrent_precleaning)
7867 {
7868 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7869 }
7870
7871 // Grey object rescan during pre-cleaning and second checkpoint phases --
7872 // the non-parallel version (the parallel version appears further below.)
7873 void PushAndMarkClosure::do_oop(oop obj) {
7874 // Ignore mark word verification. If during concurrent precleaning,
7875 // the object monitor may be locked. If during the checkpoint
7876 // phases, the object may already have been reached by a different
7877 // path and may be at the end of the global overflow list (so
7878 // the mark word may be NULL).
7879 assert(obj->is_oop_or_null(true /* ignore mark word */),
7880 err_msg("expected an oop or NULL: " PTR_FORMAT, p2i(obj)));
7881 HeapWord* addr = (HeapWord*)obj;
7882 // Check if oop points into the CMS generation
7883 // and is not marked
7884 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7885 // a white object ...
7886 _bit_map->mark(addr); // ... now grey
7887 // push on the marking stack (grey set)
7888 bool simulate_overflow = false;
7889 NOT_PRODUCT(
7890 if (CMSMarkStackOverflowALot &&
7891 _collector->simulate_overflow()) {
7892 // simulate a stack overflow
7893 simulate_overflow = true;
7894 }
7895 )
7896 if (simulate_overflow || !_mark_stack->push(obj)) {
7897 if (_concurrent_precleaning) {
7898 // During precleaning we can just dirty the appropriate card(s)
7899 // in the mod union table, thus ensuring that the object remains
7900 // in the grey set and continue. In the case of object arrays
7940 }
7941
7942 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7943 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7944
7945 // Grey object rescan during second checkpoint phase --
7946 // the parallel version.
7947 void Par_PushAndMarkClosure::do_oop(oop obj) {
7948 // In the assert below, we ignore the mark word because
7949 // this oop may point to an already visited object that is
7950 // on the overflow stack (in which case the mark word has
7951 // been hijacked for chaining into the overflow stack --
7952 // if this is the last object in the overflow stack then
7953 // its mark word will be NULL). Because this object may
7954 // have been subsequently popped off the global overflow
7955 // stack, and the mark word possibly restored to the prototypical
7956 // value, by the time we get to examined this failing assert in
7957 // the debugger, is_oop_or_null(false) may subsequently start
7958 // to hold.
7959 assert(obj->is_oop_or_null(true),
7960 err_msg("expected an oop or NULL: " PTR_FORMAT, p2i(obj)));
7961 HeapWord* addr = (HeapWord*)obj;
7962 // Check if oop points into the CMS generation
7963 // and is not marked
7964 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7965 // a white object ...
7966 // If we manage to "claim" the object, by being the
7967 // first thread to mark it, then we push it on our
7968 // marking stack
7969 if (_bit_map->par_mark(addr)) { // ... now grey
7970 // push on work queue (grey set)
7971 bool simulate_overflow = false;
7972 NOT_PRODUCT(
7973 if (CMSMarkStackOverflowALot &&
7974 _collector->par_simulate_overflow()) {
7975 // simulate a stack overflow
7976 simulate_overflow = true;
7977 }
7978 )
7979 if (simulate_overflow || !_work_queue->push(obj)) {
7980 _collector->par_push_on_overflow_list(obj);
|