< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




5806                         _capacity / K, new_capacity / K);
5807   }
5808 }
5809 
5810 
5811 // Closures
5812 // XXX: there seems to be a lot of code  duplication here;
5813 // should refactor and consolidate common code.
5814 
5815 // This closure is used to mark refs into the CMS generation in
5816 // the CMS bit map. Called at the first checkpoint. This closure
5817 // assumes that we do not need to re-mark dirty cards; if the CMS
5818 // generation on which this is used is not an oldest
5819 // generation then this will lose younger_gen cards!
5820 
5821 MarkRefsIntoClosure::MarkRefsIntoClosure(
5822   MemRegion span, CMSBitMap* bitMap):
5823     _span(span),
5824     _bitMap(bitMap)
5825 {
5826   assert(ref_processor() == NULL, "deliberately left NULL");
5827   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5828 }
5829 
5830 void MarkRefsIntoClosure::do_oop(oop obj) {
5831   // if p points into _span, then mark corresponding bit in _markBitMap
5832   assert(oopDesc::is_oop(obj), "expected an oop");
5833   HeapWord* addr = (HeapWord*)obj;
5834   if (_span.contains(addr)) {
5835     // this should be made more efficient
5836     _bitMap->mark(addr);
5837   }
5838 }
5839 
5840 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
5841 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5842 
5843 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5844   MemRegion span, CMSBitMap* bitMap):
5845     _span(span),
5846     _bitMap(bitMap)
5847 {
5848   assert(ref_processor() == NULL, "deliberately left NULL");
5849   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5850 }
5851 
5852 void ParMarkRefsIntoClosure::do_oop(oop obj) {
5853   // if p points into _span, then mark corresponding bit in _markBitMap
5854   assert(oopDesc::is_oop(obj), "expected an oop");
5855   HeapWord* addr = (HeapWord*)obj;
5856   if (_span.contains(addr)) {
5857     // this should be made more efficient
5858     _bitMap->par_mark(addr);
5859   }
5860 }
5861 
5862 void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
5863 void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5864 
5865 // A variant of the above, used for CMS marking verification.
5866 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5867   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5868     _span(span),
5869     _verification_bm(verification_bm),
5870     _cms_bm(cms_bm)
5871 {
5872   assert(ref_processor() == NULL, "deliberately left NULL");
5873   assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5874 }
5875 
5876 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5877   // if p points into _span, then mark corresponding bit in _markBitMap
5878   assert(oopDesc::is_oop(obj), "expected an oop");
5879   HeapWord* addr = (HeapWord*)obj;
5880   if (_span.contains(addr)) {
5881     _verification_bm->mark(addr);
5882     if (!_cms_bm->isMarked(addr)) {
5883       Log(gc, verify) log;
5884       ResourceMark rm;
5885       LogStream ls(log.error());
5886       oop(addr)->print_on(&ls);
5887       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5888       fatal("... aborting");
5889     }
5890   }
5891 }
5892 
5893 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5894 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5895 
5896 //////////////////////////////////////////////////
5897 // MarkRefsIntoAndScanClosure
5898 //////////////////////////////////////////////////
5899 
5900 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5901                                                        ReferenceProcessor* rp,
5902                                                        CMSBitMap* bit_map,
5903                                                        CMSBitMap* mod_union_table,
5904                                                        CMSMarkStack*  mark_stack,
5905                                                        CMSCollector* collector,
5906                                                        bool should_yield,
5907                                                        bool concurrent_precleaning):
5908   _collector(collector),
5909   _span(span),
5910   _bit_map(bit_map),
5911   _mark_stack(mark_stack),
5912   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
5913                       mark_stack, concurrent_precleaning),
5914   _yield(should_yield),
5915   _concurrent_precleaning(concurrent_precleaning),
5916   _freelistLock(NULL)
5917 {
5918   // FIXME: Should initialize in base class constructor.
5919   assert(rp != NULL, "ref_processor shouldn't be NULL");
5920   set_ref_processor_internal(rp);
5921 }
5922 
5923 // This closure is used to mark refs into the CMS generation at the
5924 // second (final) checkpoint, and to scan and transitively follow
5925 // the unmarked oops. It is also used during the concurrent precleaning
5926 // phase while scanning objects on dirty cards in the CMS generation.
5927 // The marks are made in the marking bit map and the marking stack is
5928 // used for keeping the (newly) grey objects during the scan.
5929 // The parallel version (Par_...) appears further below.
5930 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5931   if (obj != NULL) {
5932     assert(oopDesc::is_oop(obj), "expected an oop");
5933     HeapWord* addr = (HeapWord*)obj;
5934     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5935     assert(_collector->overflow_list_is_empty(),
5936            "overflow list should be empty");
5937     if (_span.contains(addr) &&
5938         !_bit_map->isMarked(addr)) {
5939       // mark bit map (object is now grey)
5940       _bit_map->mark(addr);


5985   // See the comment in coordinator_yield()
5986   for (unsigned i = 0;
5987        i < CMSYieldSleepCount &&
5988        ConcurrentMarkSweepThread::should_yield() &&
5989        !CMSCollector::foregroundGCIsActive();
5990        ++i) {
5991     os::sleep(Thread::current(), 1, false);
5992   }
5993 
5994   ConcurrentMarkSweepThread::synchronize(true);
5995   _freelistLock->lock_without_safepoint_check();
5996   _bit_map->lock()->lock_without_safepoint_check();
5997   _collector->startTimer();
5998 }
5999 
6000 ///////////////////////////////////////////////////////////
6001 // ParMarkRefsIntoAndScanClosure: a parallel version of
6002 //                                MarkRefsIntoAndScanClosure
6003 ///////////////////////////////////////////////////////////
6004 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
6005   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6006   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6007   _span(span),
6008   _bit_map(bit_map),
6009   _work_queue(work_queue),
6010   _low_water_mark(MIN2((work_queue->max_elems()/4),
6011                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6012   _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6013 {
6014   // FIXME: Should initialize in base class constructor.
6015   assert(rp != NULL, "ref_processor shouldn't be NULL");
6016   set_ref_processor_internal(rp);
6017 }
6018 
6019 // This closure is used to mark refs into the CMS generation at the
6020 // second (final) checkpoint, and to scan and transitively follow
6021 // the unmarked oops. The marks are made in the marking bit map and
6022 // the work_queue is used for keeping the (newly) grey objects during
6023 // the scan phase whence they are also available for stealing by parallel
6024 // threads. Since the marking bit map is shared, updates are
6025 // synchronized (via CAS).
6026 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6027   if (obj != NULL) {
6028     // Ignore mark word because this could be an already marked oop
6029     // that may be chained at the end of the overflow list.
6030     assert(oopDesc::is_oop(obj, true), "expected an oop");
6031     HeapWord* addr = (HeapWord*)obj;
6032     if (_span.contains(addr) &&
6033         !_bit_map->isMarked(addr)) {
6034       // mark bit map (object will become grey):
6035       // It is possible for several threads to be
6036       // trying to "claim" this object concurrently;


6823     if (simulate_overflow ||
6824         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6825       // stack overflow
6826       log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6827       // We cannot assert that the overflow stack is full because
6828       // it may have been emptied since.
6829       assert(simulate_overflow ||
6830              _work_queue->size() == _work_queue->max_elems(),
6831             "Else push should have succeeded");
6832       handle_stack_overflow(addr);
6833     }
6834     do_yield_check();
6835   }
6836 }
6837 
6838 void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
6839 void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
6840 
6841 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6842                                        MemRegion span,
6843                                        ReferenceProcessor* rp,
6844                                        CMSBitMap* bit_map,
6845                                        CMSBitMap* mod_union_table,
6846                                        CMSMarkStack*  mark_stack,
6847                                        bool           concurrent_precleaning):
6848   MetadataAwareOopClosure(rp),
6849   _collector(collector),
6850   _span(span),
6851   _bit_map(bit_map),
6852   _mod_union_table(mod_union_table),
6853   _mark_stack(mark_stack),
6854   _concurrent_precleaning(concurrent_precleaning)
6855 {
6856   assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6857 }
6858 
6859 // Grey object rescan during pre-cleaning and second checkpoint phases --
6860 // the non-parallel version (the parallel version appears further below.)
6861 void PushAndMarkClosure::do_oop(oop obj) {
6862   // Ignore mark word verification. If during concurrent precleaning,
6863   // the object monitor may be locked. If during the checkpoint
6864   // phases, the object may already have been reached by a  different
6865   // path and may be at the end of the global overflow list (so
6866   // the mark word may be NULL).
6867   assert(oopDesc::is_oop_or_null(obj, true /* ignore mark word */),
6868          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6869   HeapWord* addr = (HeapWord*)obj;
6870   // Check if oop points into the CMS generation
6871   // and is not marked
6872   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6873     // a white object ...
6874     _bit_map->mark(addr);         // ... now grey
6875     // push on the marking stack (grey set)
6876     bool simulate_overflow = false;


6897            HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
6898            MemRegion redirty_range = MemRegion(addr, end_card_addr);
6899            assert(!redirty_range.is_empty(), "Arithmetical tautology");
6900            _mod_union_table->mark_range(redirty_range);
6901          } else {
6902            _mod_union_table->mark(addr);
6903          }
6904          _collector->_ser_pmc_preclean_ovflw++;
6905       } else {
6906          // During the remark phase, we need to remember this oop
6907          // in the overflow list.
6908          _collector->push_on_overflow_list(obj);
6909          _collector->_ser_pmc_remark_ovflw++;
6910       }
6911     }
6912   }
6913 }
6914 
6915 ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6916                                              MemRegion span,
6917                                              ReferenceProcessor* rp,
6918                                              CMSBitMap* bit_map,
6919                                              OopTaskQueue* work_queue):
6920   MetadataAwareOopClosure(rp),
6921   _collector(collector),
6922   _span(span),
6923   _bit_map(bit_map),
6924   _work_queue(work_queue)
6925 {
6926   assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6927 }
6928 
6929 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
6930 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6931 
6932 // Grey object rescan during second checkpoint phase --
6933 // the parallel version.
6934 void ParPushAndMarkClosure::do_oop(oop obj) {
6935   // In the assert below, we ignore the mark word because
6936   // this oop may point to an already visited object that is
6937   // on the overflow stack (in which case the mark word has
6938   // been hijacked for chaining into the overflow stack --
6939   // if this is the last object in the overflow stack then
6940   // its mark word will be NULL). Because this object may
6941   // have been subsequently popped off the global overflow
6942   // stack, and the mark word possibly restored to the prototypical
6943   // value, by the time we get to examined this failing assert in
6944   // the debugger, is_oop_or_null(false) may subsequently start
6945   // to hold.
6946   assert(oopDesc::is_oop_or_null(obj, true),




5806                         _capacity / K, new_capacity / K);
5807   }
5808 }
5809 
5810 
5811 // Closures
5812 // XXX: there seems to be a lot of code  duplication here;
5813 // should refactor and consolidate common code.
5814 
5815 // This closure is used to mark refs into the CMS generation in
5816 // the CMS bit map. Called at the first checkpoint. This closure
5817 // assumes that we do not need to re-mark dirty cards; if the CMS
5818 // generation on which this is used is not an oldest
5819 // generation then this will lose younger_gen cards!
5820 
5821 MarkRefsIntoClosure::MarkRefsIntoClosure(
5822   MemRegion span, CMSBitMap* bitMap):
5823     _span(span),
5824     _bitMap(bitMap)
5825 {
5826   assert(ref_discoverer() == NULL, "deliberately left NULL");
5827   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5828 }
5829 
5830 void MarkRefsIntoClosure::do_oop(oop obj) {
5831   // if p points into _span, then mark corresponding bit in _markBitMap
5832   assert(oopDesc::is_oop(obj), "expected an oop");
5833   HeapWord* addr = (HeapWord*)obj;
5834   if (_span.contains(addr)) {
5835     // this should be made more efficient
5836     _bitMap->mark(addr);
5837   }
5838 }
5839 
5840 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
5841 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5842 
5843 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5844   MemRegion span, CMSBitMap* bitMap):
5845     _span(span),
5846     _bitMap(bitMap)
5847 {
5848   assert(ref_discoverer() == NULL, "deliberately left NULL");
5849   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5850 }
5851 
5852 void ParMarkRefsIntoClosure::do_oop(oop obj) {
5853   // if p points into _span, then mark corresponding bit in _markBitMap
5854   assert(oopDesc::is_oop(obj), "expected an oop");
5855   HeapWord* addr = (HeapWord*)obj;
5856   if (_span.contains(addr)) {
5857     // this should be made more efficient
5858     _bitMap->par_mark(addr);
5859   }
5860 }
5861 
5862 void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
5863 void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5864 
5865 // A variant of the above, used for CMS marking verification.
5866 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5867   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5868     _span(span),
5869     _verification_bm(verification_bm),
5870     _cms_bm(cms_bm)
5871 {
5872   assert(ref_discoverer() == NULL, "deliberately left NULL");
5873   assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5874 }
5875 
5876 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5877   // if p points into _span, then mark corresponding bit in _markBitMap
5878   assert(oopDesc::is_oop(obj), "expected an oop");
5879   HeapWord* addr = (HeapWord*)obj;
5880   if (_span.contains(addr)) {
5881     _verification_bm->mark(addr);
5882     if (!_cms_bm->isMarked(addr)) {
5883       Log(gc, verify) log;
5884       ResourceMark rm;
5885       LogStream ls(log.error());
5886       oop(addr)->print_on(&ls);
5887       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5888       fatal("... aborting");
5889     }
5890   }
5891 }
5892 
5893 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5894 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5895 
5896 //////////////////////////////////////////////////
5897 // MarkRefsIntoAndScanClosure
5898 //////////////////////////////////////////////////
5899 
5900 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5901                                                        ReferenceDiscoverer* rd,
5902                                                        CMSBitMap* bit_map,
5903                                                        CMSBitMap* mod_union_table,
5904                                                        CMSMarkStack*  mark_stack,
5905                                                        CMSCollector* collector,
5906                                                        bool should_yield,
5907                                                        bool concurrent_precleaning):
5908   _collector(collector),
5909   _span(span),
5910   _bit_map(bit_map),
5911   _mark_stack(mark_stack),
5912   _pushAndMarkClosure(collector, span, rd, bit_map, mod_union_table,
5913                       mark_stack, concurrent_precleaning),
5914   _yield(should_yield),
5915   _concurrent_precleaning(concurrent_precleaning),
5916   _freelistLock(NULL)
5917 {
5918   // FIXME: Should initialize in base class constructor.
5919   assert(rd != NULL, "ref_discoverer shouldn't be NULL");
5920   set_ref_discoverer_internal(rd);
5921 }
5922 
5923 // This closure is used to mark refs into the CMS generation at the
5924 // second (final) checkpoint, and to scan and transitively follow
5925 // the unmarked oops. It is also used during the concurrent precleaning
5926 // phase while scanning objects on dirty cards in the CMS generation.
5927 // The marks are made in the marking bit map and the marking stack is
5928 // used for keeping the (newly) grey objects during the scan.
5929 // The parallel version (Par_...) appears further below.
5930 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5931   if (obj != NULL) {
5932     assert(oopDesc::is_oop(obj), "expected an oop");
5933     HeapWord* addr = (HeapWord*)obj;
5934     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5935     assert(_collector->overflow_list_is_empty(),
5936            "overflow list should be empty");
5937     if (_span.contains(addr) &&
5938         !_bit_map->isMarked(addr)) {
5939       // mark bit map (object is now grey)
5940       _bit_map->mark(addr);


5985   // See the comment in coordinator_yield()
5986   for (unsigned i = 0;
5987        i < CMSYieldSleepCount &&
5988        ConcurrentMarkSweepThread::should_yield() &&
5989        !CMSCollector::foregroundGCIsActive();
5990        ++i) {
5991     os::sleep(Thread::current(), 1, false);
5992   }
5993 
5994   ConcurrentMarkSweepThread::synchronize(true);
5995   _freelistLock->lock_without_safepoint_check();
5996   _bit_map->lock()->lock_without_safepoint_check();
5997   _collector->startTimer();
5998 }
5999 
6000 ///////////////////////////////////////////////////////////
6001 // ParMarkRefsIntoAndScanClosure: a parallel version of
6002 //                                MarkRefsIntoAndScanClosure
6003 ///////////////////////////////////////////////////////////
6004 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
6005   CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd,
6006   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6007   _span(span),
6008   _bit_map(bit_map),
6009   _work_queue(work_queue),
6010   _low_water_mark(MIN2((work_queue->max_elems()/4),
6011                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6012   _parPushAndMarkClosure(collector, span, rd, bit_map, work_queue)
6013 {
6014   // FIXME: Should initialize in base class constructor.
6015   assert(rd != NULL, "ref_discoverer shouldn't be NULL");
6016   set_ref_discoverer_internal(rd);
6017 }
6018 
6019 // This closure is used to mark refs into the CMS generation at the
6020 // second (final) checkpoint, and to scan and transitively follow
6021 // the unmarked oops. The marks are made in the marking bit map and
6022 // the work_queue is used for keeping the (newly) grey objects during
6023 // the scan phase whence they are also available for stealing by parallel
6024 // threads. Since the marking bit map is shared, updates are
6025 // synchronized (via CAS).
6026 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6027   if (obj != NULL) {
6028     // Ignore mark word because this could be an already marked oop
6029     // that may be chained at the end of the overflow list.
6030     assert(oopDesc::is_oop(obj, true), "expected an oop");
6031     HeapWord* addr = (HeapWord*)obj;
6032     if (_span.contains(addr) &&
6033         !_bit_map->isMarked(addr)) {
6034       // mark bit map (object will become grey):
6035       // It is possible for several threads to be
6036       // trying to "claim" this object concurrently;


6823     if (simulate_overflow ||
6824         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6825       // stack overflow
6826       log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6827       // We cannot assert that the overflow stack is full because
6828       // it may have been emptied since.
6829       assert(simulate_overflow ||
6830              _work_queue->size() == _work_queue->max_elems(),
6831             "Else push should have succeeded");
6832       handle_stack_overflow(addr);
6833     }
6834     do_yield_check();
6835   }
6836 }
6837 
6838 void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
6839 void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
6840 
6841 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6842                                        MemRegion span,
6843                                        ReferenceDiscoverer* rd,
6844                                        CMSBitMap* bit_map,
6845                                        CMSBitMap* mod_union_table,
6846                                        CMSMarkStack*  mark_stack,
6847                                        bool           concurrent_precleaning):
6848   MetadataAwareOopClosure(rd),
6849   _collector(collector),
6850   _span(span),
6851   _bit_map(bit_map),
6852   _mod_union_table(mod_union_table),
6853   _mark_stack(mark_stack),
6854   _concurrent_precleaning(concurrent_precleaning)
6855 {
6856   assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
6857 }
6858 
6859 // Grey object rescan during pre-cleaning and second checkpoint phases --
6860 // the non-parallel version (the parallel version appears further below.)
6861 void PushAndMarkClosure::do_oop(oop obj) {
6862   // Ignore mark word verification. If during concurrent precleaning,
6863   // the object monitor may be locked. If during the checkpoint
6864   // phases, the object may already have been reached by a  different
6865   // path and may be at the end of the global overflow list (so
6866   // the mark word may be NULL).
6867   assert(oopDesc::is_oop_or_null(obj, true /* ignore mark word */),
6868          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6869   HeapWord* addr = (HeapWord*)obj;
6870   // Check if oop points into the CMS generation
6871   // and is not marked
6872   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6873     // a white object ...
6874     _bit_map->mark(addr);         // ... now grey
6875     // push on the marking stack (grey set)
6876     bool simulate_overflow = false;


6897            HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
6898            MemRegion redirty_range = MemRegion(addr, end_card_addr);
6899            assert(!redirty_range.is_empty(), "Arithmetical tautology");
6900            _mod_union_table->mark_range(redirty_range);
6901          } else {
6902            _mod_union_table->mark(addr);
6903          }
6904          _collector->_ser_pmc_preclean_ovflw++;
6905       } else {
6906          // During the remark phase, we need to remember this oop
6907          // in the overflow list.
6908          _collector->push_on_overflow_list(obj);
6909          _collector->_ser_pmc_remark_ovflw++;
6910       }
6911     }
6912   }
6913 }
6914 
6915 ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6916                                              MemRegion span,
6917                                              ReferenceDiscoverer* rd,
6918                                              CMSBitMap* bit_map,
6919                                              OopTaskQueue* work_queue):
6920   MetadataAwareOopClosure(rd),
6921   _collector(collector),
6922   _span(span),
6923   _bit_map(bit_map),
6924   _work_queue(work_queue)
6925 {
6926   assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
6927 }
6928 
6929 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
6930 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6931 
6932 // Grey object rescan during second checkpoint phase --
6933 // the parallel version.
6934 void ParPushAndMarkClosure::do_oop(oop obj) {
6935   // In the assert below, we ignore the mark word because
6936   // this oop may point to an already visited object that is
6937   // on the overflow stack (in which case the mark word has
6938   // been hijacked for chaining into the overflow stack --
6939   // if this is the last object in the overflow stack then
6940   // its mark word will be NULL). Because this object may
6941   // have been subsequently popped off the global overflow
6942   // stack, and the mark word possibly restored to the prototypical
6943   // value, by the time we get to examined this failing assert in
6944   // the debugger, is_oop_or_null(false) may subsequently start
6945   // to hold.
6946   assert(oopDesc::is_oop_or_null(obj, true),


< prev index next >