38 #include "gc/serial/genMarkSweep.hpp"
39 #include "gc/serial/tenuredGeneration.hpp"
40 #include "gc/shared/adaptiveSizePolicy.hpp"
41 #include "gc/shared/cardGeneration.inline.hpp"
42 #include "gc/shared/cardTableRS.hpp"
43 #include "gc/shared/collectedHeap.inline.hpp"
44 #include "gc/shared/collectorCounters.hpp"
45 #include "gc/shared/collectorPolicy.hpp"
46 #include "gc/shared/gcLocker.inline.hpp"
47 #include "gc/shared/gcPolicyCounters.hpp"
48 #include "gc/shared/gcTimer.hpp"
49 #include "gc/shared/gcTrace.hpp"
50 #include "gc/shared/gcTraceTime.inline.hpp"
51 #include "gc/shared/genCollectedHeap.hpp"
52 #include "gc/shared/genOopClosures.inline.hpp"
53 #include "gc/shared/isGCActiveMark.hpp"
54 #include "gc/shared/referencePolicy.hpp"
55 #include "gc/shared/strongRootsScope.hpp"
56 #include "gc/shared/taskqueue.inline.hpp"
57 #include "logging/log.hpp"
58 #include "memory/allocation.hpp"
59 #include "memory/iterator.inline.hpp"
60 #include "memory/padded.hpp"
61 #include "memory/resourceArea.hpp"
62 #include "oops/oop.inline.hpp"
63 #include "prims/jvmtiExport.hpp"
64 #include "runtime/atomic.hpp"
65 #include "runtime/globals_extension.hpp"
66 #include "runtime/handles.inline.hpp"
67 #include "runtime/java.hpp"
68 #include "runtime/orderAccess.inline.hpp"
69 #include "runtime/timer.hpp"
70 #include "runtime/vmThread.hpp"
71 #include "services/memoryService.hpp"
72 #include "services/runtimeService.hpp"
73 #include "utilities/stack.inline.hpp"
74
75 // statics
76 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
77 bool CMSCollector::_full_gc_requested = false;
677 }
678
679 size_t ConcurrentMarkSweepGeneration::max_available() const {
680 return free() + _virtual_space.uncommitted_size();
681 }
682
683 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
684 size_t available = max_available();
685 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
686 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
687 log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
688 res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
689 return res;
690 }
691
692 // At a promotion failure dump information on block layout in heap
693 // (cms old generation).
694 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
695 Log(gc, promotion) log;
696 if (log.is_trace()) {
697 ResourceMark rm;
698 cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream());
699 }
700 }
701
702 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
703 // Clear the promotion information. These pointers can be adjusted
704 // along with all the other pointers into the heap but
705 // compaction is expected to be a rare event with
706 // a heap using cms so don't do it without seeing the need.
707 for (uint i = 0; i < ParallelGCThreads; i++) {
708 _par_gc_thread_states[i]->promo.reset();
709 }
710 }
711
712 void ConcurrentMarkSweepGeneration::compute_new_size() {
713 assert_locked_or_safepoint(Heap_lock);
714
715 // If incremental collection failed, we just want to expand
716 // to the limit.
717 if (incremental_collection_failed()) {
718 clear_incremental_collection_failed();
2229
2230 ////////////////////////////////////////////////////////
2231 // CMS Verification Support
2232 ////////////////////////////////////////////////////////
2233 // Following the remark phase, the following invariant
2234 // should hold -- each object in the CMS heap which is
2235 // marked in markBitMap() should be marked in the verification_mark_bm().
2236
2237 class VerifyMarkedClosure: public BitMapClosure {
2238 CMSBitMap* _marks;
2239 bool _failed;
2240
2241 public:
2242 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2243
2244 bool do_bit(size_t offset) {
2245 HeapWord* addr = _marks->offsetToHeapWord(offset);
2246 if (!_marks->isMarked(addr)) {
2247 Log(gc, verify) log;
2248 ResourceMark rm;
2249 oop(addr)->print_on(log.error_stream());
2250 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2251 _failed = true;
2252 }
2253 return true;
2254 }
2255
2256 bool failed() { return _failed; }
2257 };
2258
2259 bool CMSCollector::verify_after_remark() {
2260 GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
2261 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2262 static bool init = false;
2263
2264 assert(SafepointSynchronize::is_at_safepoint(),
2265 "Else mutations in object graph will make answer suspect");
2266 assert(have_cms_token(),
2267 "Else there may be mutual interference in use of "
2268 " verification data structures");
2269 assert(_collectorState > Marking && _collectorState <= Sweeping,
2354 while (_restart_addr != NULL) {
2355 // Deal with stack overflow: by restarting at the indicated
2356 // address.
2357 HeapWord* ra = _restart_addr;
2358 markFromRootsClosure.reset(ra);
2359 _restart_addr = NULL;
2360 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2361 }
2362 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2363 verify_work_stacks_empty();
2364
2365 // Marking completed -- now verify that each bit marked in
2366 // verification_mark_bm() is also marked in markBitMap(); flag all
2367 // errors by printing corresponding objects.
2368 VerifyMarkedClosure vcl(markBitMap());
2369 verification_mark_bm()->iterate(&vcl);
2370 if (vcl.failed()) {
2371 Log(gc, verify) log;
2372 log.error("Failed marking verification after remark");
2373 ResourceMark rm;
2374 gch->print_on(log.error_stream());
2375 fatal("CMS: failed marking verification after remark");
2376 }
2377 }
2378
2379 class VerifyKlassOopsKlassClosure : public KlassClosure {
2380 class VerifyKlassOopsClosure : public OopClosure {
2381 CMSBitMap* _bitmap;
2382 public:
2383 VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2384 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2385 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2386 } _oop_closure;
2387 public:
2388 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2389 void do_klass(Klass* k) {
2390 k->oops_do(&_oop_closure);
2391 }
2392 };
2393
2394 void CMSCollector::verify_after_remark_work_2() {
5862 // A variant of the above, used for CMS marking verification.
5863 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5864 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5865 _span(span),
5866 _verification_bm(verification_bm),
5867 _cms_bm(cms_bm)
5868 {
5869 assert(ref_processor() == NULL, "deliberately left NULL");
5870 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5871 }
5872
5873 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5874 // if p points into _span, then mark corresponding bit in _markBitMap
5875 assert(obj->is_oop(), "expected an oop");
5876 HeapWord* addr = (HeapWord*)obj;
5877 if (_span.contains(addr)) {
5878 _verification_bm->mark(addr);
5879 if (!_cms_bm->isMarked(addr)) {
5880 Log(gc, verify) log;
5881 ResourceMark rm;
5882 oop(addr)->print_on(log.error_stream());
5883 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5884 fatal("... aborting");
5885 }
5886 }
5887 }
5888
5889 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5890 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5891
5892 //////////////////////////////////////////////////
5893 // MarkRefsIntoAndScanClosure
5894 //////////////////////////////////////////////////
5895
5896 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5897 ReferenceProcessor* rp,
5898 CMSBitMap* bit_map,
5899 CMSBitMap* mod_union_table,
5900 CMSMarkStack* mark_stack,
5901 CMSCollector* collector,
5902 bool should_yield,
6647 // Upon stack overflow, we discard (part of) the stack,
6648 // remembering the least address amongst those discarded
6649 // in CMSCollector's _restart_address.
6650 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6651 // Remember the least grey address discarded
6652 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6653 _collector->lower_restart_addr(ra);
6654 _mark_stack->reset(); // discard stack contents
6655 _mark_stack->expand(); // expand the stack if possible
6656 }
6657
6658 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6659 assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6660 HeapWord* addr = (HeapWord*)obj;
6661 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6662 // Oop lies in _span and isn't yet grey or black
6663 _verification_bm->mark(addr); // now grey
6664 if (!_cms_bm->isMarked(addr)) {
6665 Log(gc, verify) log;
6666 ResourceMark rm;
6667 oop(addr)->print_on(log.error_stream());
6668 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6669 fatal("... aborting");
6670 }
6671
6672 if (!_mark_stack->push(obj)) { // stack overflow
6673 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6674 assert(_mark_stack->isFull(), "Else push should have succeeded");
6675 handle_stack_overflow(addr);
6676 }
6677 // anything including and to the right of _finger
6678 // will be scanned as we iterate over the remainder of the
6679 // bit map
6680 }
6681 }
6682
6683 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6684 MemRegion span,
6685 CMSBitMap* bitMap, CMSMarkStack* markStack,
6686 HeapWord* finger, MarkFromRootsClosure* parent) :
6687 MetadataAwareOopClosure(collector->ref_processor()),
7055 st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7056 st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7057 NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7058 st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7059 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7060 }
7061
7062 #ifndef PRODUCT
7063 // Assertion checking only: no useful work in product mode --
7064 // however, if any of the flags below become product flags,
7065 // you may need to review this code to see if it needs to be
7066 // enabled in product mode.
7067 SweepClosure::~SweepClosure() {
7068 assert_lock_strong(_freelistLock);
7069 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7070 "sweep _limit out of bounds");
7071 if (inFreeRange()) {
7072 Log(gc, sweep) log;
7073 log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
7074 ResourceMark rm;
7075 print_on(log.error_stream());
7076 ShouldNotReachHere();
7077 }
7078
7079 if (log_is_enabled(Debug, gc, sweep)) {
7080 log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7081 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7082 log_debug(gc, sweep)("Live " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7083 _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7084 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7085 log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7086 }
7087
7088 if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7089 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7090 size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7091 size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7092 log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes Indexed List Returned " SIZE_FORMAT " bytes Dictionary Returned " SIZE_FORMAT " bytes",
7093 returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7094 }
7095 log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
|
38 #include "gc/serial/genMarkSweep.hpp"
39 #include "gc/serial/tenuredGeneration.hpp"
40 #include "gc/shared/adaptiveSizePolicy.hpp"
41 #include "gc/shared/cardGeneration.inline.hpp"
42 #include "gc/shared/cardTableRS.hpp"
43 #include "gc/shared/collectedHeap.inline.hpp"
44 #include "gc/shared/collectorCounters.hpp"
45 #include "gc/shared/collectorPolicy.hpp"
46 #include "gc/shared/gcLocker.inline.hpp"
47 #include "gc/shared/gcPolicyCounters.hpp"
48 #include "gc/shared/gcTimer.hpp"
49 #include "gc/shared/gcTrace.hpp"
50 #include "gc/shared/gcTraceTime.inline.hpp"
51 #include "gc/shared/genCollectedHeap.hpp"
52 #include "gc/shared/genOopClosures.inline.hpp"
53 #include "gc/shared/isGCActiveMark.hpp"
54 #include "gc/shared/referencePolicy.hpp"
55 #include "gc/shared/strongRootsScope.hpp"
56 #include "gc/shared/taskqueue.inline.hpp"
57 #include "logging/log.hpp"
58 #include "logging/logStream.hpp"
59 #include "memory/allocation.hpp"
60 #include "memory/iterator.inline.hpp"
61 #include "memory/padded.hpp"
62 #include "memory/resourceArea.hpp"
63 #include "oops/oop.inline.hpp"
64 #include "prims/jvmtiExport.hpp"
65 #include "runtime/atomic.hpp"
66 #include "runtime/globals_extension.hpp"
67 #include "runtime/handles.inline.hpp"
68 #include "runtime/java.hpp"
69 #include "runtime/orderAccess.inline.hpp"
70 #include "runtime/timer.hpp"
71 #include "runtime/vmThread.hpp"
72 #include "services/memoryService.hpp"
73 #include "services/runtimeService.hpp"
74 #include "utilities/stack.inline.hpp"
75
76 // statics
77 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
78 bool CMSCollector::_full_gc_requested = false;
678 }
679
680 size_t ConcurrentMarkSweepGeneration::max_available() const {
681 return free() + _virtual_space.uncommitted_size();
682 }
683
684 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
685 size_t available = max_available();
686 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
687 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
688 log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
689 res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
690 return res;
691 }
692
693 // At a promotion failure dump information on block layout in heap
694 // (cms old generation).
695 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
696 Log(gc, promotion) log;
697 if (log.is_trace()) {
698 LogStream ls(log.trace());
699 cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);
700 }
701 }
702
703 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
704 // Clear the promotion information. These pointers can be adjusted
705 // along with all the other pointers into the heap but
706 // compaction is expected to be a rare event with
707 // a heap using cms so don't do it without seeing the need.
708 for (uint i = 0; i < ParallelGCThreads; i++) {
709 _par_gc_thread_states[i]->promo.reset();
710 }
711 }
712
713 void ConcurrentMarkSweepGeneration::compute_new_size() {
714 assert_locked_or_safepoint(Heap_lock);
715
716 // If incremental collection failed, we just want to expand
717 // to the limit.
718 if (incremental_collection_failed()) {
719 clear_incremental_collection_failed();
2230
2231 ////////////////////////////////////////////////////////
2232 // CMS Verification Support
2233 ////////////////////////////////////////////////////////
2234 // Following the remark phase, the following invariant
2235 // should hold -- each object in the CMS heap which is
2236 // marked in markBitMap() should be marked in the verification_mark_bm().
2237
2238 class VerifyMarkedClosure: public BitMapClosure {
2239 CMSBitMap* _marks;
2240 bool _failed;
2241
2242 public:
2243 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2244
2245 bool do_bit(size_t offset) {
2246 HeapWord* addr = _marks->offsetToHeapWord(offset);
2247 if (!_marks->isMarked(addr)) {
2248 Log(gc, verify) log;
2249 ResourceMark rm;
2250 // Unconditional write?
2251 LogStream ls(log.error());
2252 oop(addr)->print_on(&ls);
2253 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2254 _failed = true;
2255 }
2256 return true;
2257 }
2258
2259 bool failed() { return _failed; }
2260 };
2261
2262 bool CMSCollector::verify_after_remark() {
2263 GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
2264 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2265 static bool init = false;
2266
2267 assert(SafepointSynchronize::is_at_safepoint(),
2268 "Else mutations in object graph will make answer suspect");
2269 assert(have_cms_token(),
2270 "Else there may be mutual interference in use of "
2271 " verification data structures");
2272 assert(_collectorState > Marking && _collectorState <= Sweeping,
2357 while (_restart_addr != NULL) {
2358 // Deal with stack overflow: by restarting at the indicated
2359 // address.
2360 HeapWord* ra = _restart_addr;
2361 markFromRootsClosure.reset(ra);
2362 _restart_addr = NULL;
2363 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2364 }
2365 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2366 verify_work_stacks_empty();
2367
2368 // Marking completed -- now verify that each bit marked in
2369 // verification_mark_bm() is also marked in markBitMap(); flag all
2370 // errors by printing corresponding objects.
2371 VerifyMarkedClosure vcl(markBitMap());
2372 verification_mark_bm()->iterate(&vcl);
2373 if (vcl.failed()) {
2374 Log(gc, verify) log;
2375 log.error("Failed marking verification after remark");
2376 ResourceMark rm;
2377 // Unconditional write?
2378 LogStream ls(log.error());
2379 gch->print_on(&ls);
2380 fatal("CMS: failed marking verification after remark");
2381 }
2382 }
2383
2384 class VerifyKlassOopsKlassClosure : public KlassClosure {
2385 class VerifyKlassOopsClosure : public OopClosure {
2386 CMSBitMap* _bitmap;
2387 public:
2388 VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2389 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2390 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2391 } _oop_closure;
2392 public:
2393 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2394 void do_klass(Klass* k) {
2395 k->oops_do(&_oop_closure);
2396 }
2397 };
2398
2399 void CMSCollector::verify_after_remark_work_2() {
5867 // A variant of the above, used for CMS marking verification.
5868 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5869 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5870 _span(span),
5871 _verification_bm(verification_bm),
5872 _cms_bm(cms_bm)
5873 {
5874 assert(ref_processor() == NULL, "deliberately left NULL");
5875 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5876 }
5877
5878 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5879 // if p points into _span, then mark corresponding bit in _markBitMap
5880 assert(obj->is_oop(), "expected an oop");
5881 HeapWord* addr = (HeapWord*)obj;
5882 if (_span.contains(addr)) {
5883 _verification_bm->mark(addr);
5884 if (!_cms_bm->isMarked(addr)) {
5885 Log(gc, verify) log;
5886 ResourceMark rm;
5887 // Unconditional write?
5888 LogStream ls(log.error());
5889 oop(addr)->print_on(&ls);
5890 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5891 fatal("... aborting");
5892 }
5893 }
5894 }
5895
5896 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5897 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5898
5899 //////////////////////////////////////////////////
5900 // MarkRefsIntoAndScanClosure
5901 //////////////////////////////////////////////////
5902
5903 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5904 ReferenceProcessor* rp,
5905 CMSBitMap* bit_map,
5906 CMSBitMap* mod_union_table,
5907 CMSMarkStack* mark_stack,
5908 CMSCollector* collector,
5909 bool should_yield,
6654 // Upon stack overflow, we discard (part of) the stack,
6655 // remembering the least address amongst those discarded
6656 // in CMSCollector's _restart_address.
6657 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6658 // Remember the least grey address discarded
6659 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6660 _collector->lower_restart_addr(ra);
6661 _mark_stack->reset(); // discard stack contents
6662 _mark_stack->expand(); // expand the stack if possible
6663 }
6664
6665 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6666 assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6667 HeapWord* addr = (HeapWord*)obj;
6668 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6669 // Oop lies in _span and isn't yet grey or black
6670 _verification_bm->mark(addr); // now grey
6671 if (!_cms_bm->isMarked(addr)) {
6672 Log(gc, verify) log;
6673 ResourceMark rm;
6674 // Unconditional write?
6675 LogStream ls(log.error());
6676 oop(addr)->print_on(&ls);
6677 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6678 fatal("... aborting");
6679 }
6680
6681 if (!_mark_stack->push(obj)) { // stack overflow
6682 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6683 assert(_mark_stack->isFull(), "Else push should have succeeded");
6684 handle_stack_overflow(addr);
6685 }
6686 // anything including and to the right of _finger
6687 // will be scanned as we iterate over the remainder of the
6688 // bit map
6689 }
6690 }
6691
6692 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6693 MemRegion span,
6694 CMSBitMap* bitMap, CMSMarkStack* markStack,
6695 HeapWord* finger, MarkFromRootsClosure* parent) :
6696 MetadataAwareOopClosure(collector->ref_processor()),
7064 st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7065 st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7066 NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7067 st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7068 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7069 }
7070
7071 #ifndef PRODUCT
7072 // Assertion checking only: no useful work in product mode --
7073 // however, if any of the flags below become product flags,
7074 // you may need to review this code to see if it needs to be
7075 // enabled in product mode.
7076 SweepClosure::~SweepClosure() {
7077 assert_lock_strong(_freelistLock);
7078 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7079 "sweep _limit out of bounds");
7080 if (inFreeRange()) {
7081 Log(gc, sweep) log;
7082 log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
7083 ResourceMark rm;
7084 // Unconditional write?
7085 LogStream ls(log.error());
7086 print_on(&ls);
7087 ShouldNotReachHere();
7088 }
7089
7090 if (log_is_enabled(Debug, gc, sweep)) {
7091 log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7092 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7093 log_debug(gc, sweep)("Live " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7094 _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7095 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7096 log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7097 }
7098
7099 if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7100 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7101 size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7102 size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7103 log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes Indexed List Returned " SIZE_FORMAT " bytes Dictionary Returned " SIZE_FORMAT " bytes",
7104 returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7105 }
7106 log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
|