3459 { }
3460 virtual void do_oop(oop* p);
3461 virtual void do_oop(narrowOop* p);
3462
3463 void trim_queue(size_t max);
3464 void handle_stack_overflow(HeapWord* lost);
3465 void do_yield_check() {
3466 if (_task->should_yield()) {
3467 _task->yield();
3468 }
3469 }
3470 };
3471
3472 // Grey object scanning during work stealing phase --
3473 // the salient assumption here is that any references
3474 // that are in these stolen objects being scanned must
3475 // already have been initialized (else they would not have
3476 // been published), so we do not need to check for
3477 // uninitialized objects before pushing here.
3478 void Par_ConcMarkingClosure::do_oop(oop obj) {
3479 assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
3480 HeapWord* addr = (HeapWord*)obj;
3481 // Check if oop points into the CMS generation
3482 // and is not marked
3483 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3484 // a white object ...
3485 // If we manage to "claim" the object, by being the
3486 // first thread to mark it, then we push it on our
3487 // marking stack
3488 if (_bit_map->par_mark(addr)) { // ... now grey
3489 // push on work queue (grey set)
3490 bool simulate_overflow = false;
3491 NOT_PRODUCT(
3492 if (CMSMarkStackOverflowALot &&
3493 _collector->simulate_overflow()) {
3494 // simulate a stack overflow
3495 simulate_overflow = true;
3496 }
3497 )
3498 if (simulate_overflow ||
3499 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6441 }
6442
6443 // See the comment in coordinator_yield()
6444 for (unsigned i = 0; i < CMSYieldSleepCount &&
6445 ConcurrentMarkSweepThread::should_yield() &&
6446 !CMSCollector::foregroundGCIsActive(); ++i) {
6447 os::sleep(Thread::current(), 1, false);
6448 }
6449
6450 ConcurrentMarkSweepThread::synchronize(true);
6451 _bit_map->lock()->lock_without_safepoint_check();
6452 _collector->startTimer();
6453 }
6454
6455 // This closure is used to rescan the marked objects on the dirty cards
6456 // in the mod union table and the card table proper. In the parallel
6457 // case, although the bitMap is shared, we do a single read so the
6458 // isMarked() query is "safe".
6459 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6460 // Ignore mark word because we are running concurrent with mutators
6461 assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
6462 HeapWord* addr = (HeapWord*)p;
6463 assert(_span.contains(addr), "we are scanning the CMS generation");
6464 bool is_obj_array = false;
6465 #ifdef ASSERT
6466 if (!_parallel) {
6467 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6468 assert(_collector->overflow_list_is_empty(),
6469 "overflow list should be empty");
6470
6471 }
6472 #endif // ASSERT
6473 if (_bit_map->isMarked(addr)) {
6474 // Obj arrays are precisely marked, non-arrays are not;
6475 // so we scan objArrays precisely and non-arrays in their
6476 // entirety.
6477 if (p->is_objArray()) {
6478 is_obj_array = true;
6479 if (_parallel) {
6480 p->oop_iterate(_par_scan_closure, mr);
6481 } else {
6876 _verification_bm(verification_bm),
6877 _cms_bm(cms_bm),
6878 _mark_stack(mark_stack)
6879 { }
6880
6881 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6882 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6883
6884 // Upon stack overflow, we discard (part of) the stack,
6885 // remembering the least address amongst those discarded
6886 // in CMSCollector's _restart_address.
6887 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6888 // Remember the least grey address discarded
6889 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6890 _collector->lower_restart_addr(ra);
6891 _mark_stack->reset(); // discard stack contents
6892 _mark_stack->expand(); // expand the stack if possible
6893 }
6894
6895 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6896 assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
6897 HeapWord* addr = (HeapWord*)obj;
6898 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6899 // Oop lies in _span and isn't yet grey or black
6900 _verification_bm->mark(addr); // now grey
6901 if (!_cms_bm->isMarked(addr)) {
6902 oop(addr)->print();
6903 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6904 p2i(addr));
6905 fatal("... aborting");
6906 }
6907
6908 if (!_mark_stack->push(obj)) { // stack overflow
6909 if (PrintCMSStatistics != 0) {
6910 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6911 SIZE_FORMAT, _mark_stack->capacity());
6912 }
6913 assert(_mark_stack->isFull(), "Else push should have succeeded");
6914 handle_stack_overflow(addr);
6915 }
6916 // anything including and to the right of _finger
6974 _markStack->expand(); // expand the stack if possible
6975 }
6976
6977 // Upon stack overflow, we discard (part of) the stack,
6978 // remembering the least address amongst those discarded
6979 // in CMSCollector's _restart_address.
6980 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6981 // We need to do this under a mutex to prevent other
6982 // workers from interfering with the work done below.
6983 MutexLockerEx ml(_overflow_stack->par_lock(),
6984 Mutex::_no_safepoint_check_flag);
6985 // Remember the least grey address discarded
6986 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6987 _collector->lower_restart_addr(ra);
6988 _overflow_stack->reset(); // discard stack contents
6989 _overflow_stack->expand(); // expand the stack if possible
6990 }
6991
6992 void PushOrMarkClosure::do_oop(oop obj) {
6993 // Ignore mark word because we are running concurrent with mutators.
6994 assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
6995 HeapWord* addr = (HeapWord*)obj;
6996 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6997 // Oop lies in _span and isn't yet grey or black
6998 _bitMap->mark(addr); // now grey
6999 if (addr < _finger) {
7000 // the bit map iteration has already either passed, or
7001 // sampled, this bit in the bit map; we'll need to
7002 // use the marking stack to scan this oop's oops.
7003 bool simulate_overflow = false;
7004 NOT_PRODUCT(
7005 if (CMSMarkStackOverflowALot &&
7006 _collector->simulate_overflow()) {
7007 // simulate a stack overflow
7008 simulate_overflow = true;
7009 }
7010 )
7011 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7012 if (PrintCMSStatistics != 0) {
7013 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7014 SIZE_FORMAT, _markStack->capacity());
7015 }
7016 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7017 handle_stack_overflow(addr);
7018 }
7019 }
7020 // anything including and to the right of _finger
7021 // will be scanned as we iterate over the remainder of the
7022 // bit map
7023 do_yield_check();
7024 }
7025 }
7026
7027 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7028 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7029
7030 void Par_PushOrMarkClosure::do_oop(oop obj) {
7031 // Ignore mark word because we are running concurrent with mutators.
7032 assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7033 HeapWord* addr = (HeapWord*)obj;
7034 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7035 // Oop lies in _span and isn't yet grey or black
7036 // We read the global_finger (volatile read) strictly after marking oop
7037 bool res = _bit_map->par_mark(addr); // now grey
7038 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7039 // Should we push this marked oop on our stack?
7040 // -- if someone else marked it, nothing to do
7041 // -- if target oop is above global finger nothing to do
7042 // -- if target oop is in chunk and above local finger
7043 // then nothing to do
7044 // -- else push on work queue
7045 if ( !res // someone else marked it, they will deal with it
7046 || (addr >= *gfa) // will be scanned in a later task
7047 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7048 return;
7049 }
7050 // the bit map iteration has already either passed, or
7051 // sampled, this bit in the bit map; we'll need to
7052 // use the marking stack to scan this oop's oops.
7089 MetadataAwareOopClosure(rp),
7090 _collector(collector),
7091 _span(span),
7092 _bit_map(bit_map),
7093 _mod_union_table(mod_union_table),
7094 _mark_stack(mark_stack),
7095 _concurrent_precleaning(concurrent_precleaning)
7096 {
7097 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7098 }
7099
7100 // Grey object rescan during pre-cleaning and second checkpoint phases --
7101 // the non-parallel version (the parallel version appears further below.)
7102 void PushAndMarkClosure::do_oop(oop obj) {
7103 // Ignore mark word verification. If during concurrent precleaning,
7104 // the object monitor may be locked. If during the checkpoint
7105 // phases, the object may already have been reached by a different
7106 // path and may be at the end of the global overflow list (so
7107 // the mark word may be NULL).
7108 assert(obj->is_oop_or_null(true /* ignore mark word */),
7109 err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7110 HeapWord* addr = (HeapWord*)obj;
7111 // Check if oop points into the CMS generation
7112 // and is not marked
7113 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7114 // a white object ...
7115 _bit_map->mark(addr); // ... now grey
7116 // push on the marking stack (grey set)
7117 bool simulate_overflow = false;
7118 NOT_PRODUCT(
7119 if (CMSMarkStackOverflowALot &&
7120 _collector->simulate_overflow()) {
7121 // simulate a stack overflow
7122 simulate_overflow = true;
7123 }
7124 )
7125 if (simulate_overflow || !_mark_stack->push(obj)) {
7126 if (_concurrent_precleaning) {
7127 // During precleaning we can just dirty the appropriate card(s)
7128 // in the mod union table, thus ensuring that the object remains
7129 // in the grey set and continue. In the case of object arrays
7169 }
7170
7171 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7172 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7173
7174 // Grey object rescan during second checkpoint phase --
7175 // the parallel version.
7176 void Par_PushAndMarkClosure::do_oop(oop obj) {
7177 // In the assert below, we ignore the mark word because
7178 // this oop may point to an already visited object that is
7179 // on the overflow stack (in which case the mark word has
7180 // been hijacked for chaining into the overflow stack --
7181 // if this is the last object in the overflow stack then
7182 // its mark word will be NULL). Because this object may
7183 // have been subsequently popped off the global overflow
7184 // stack, and the mark word possibly restored to the prototypical
7185 // value, by the time we get to examined this failing assert in
7186 // the debugger, is_oop_or_null(false) may subsequently start
7187 // to hold.
7188 assert(obj->is_oop_or_null(true),
7189 err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7190 HeapWord* addr = (HeapWord*)obj;
7191 // Check if oop points into the CMS generation
7192 // and is not marked
7193 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7194 // a white object ...
7195 // If we manage to "claim" the object, by being the
7196 // first thread to mark it, then we push it on our
7197 // marking stack
7198 if (_bit_map->par_mark(addr)) { // ... now grey
7199 // push on work queue (grey set)
7200 bool simulate_overflow = false;
7201 NOT_PRODUCT(
7202 if (CMSMarkStackOverflowALot &&
7203 _collector->par_simulate_overflow()) {
7204 // simulate a stack overflow
7205 simulate_overflow = true;
7206 }
7207 )
7208 if (simulate_overflow || !_work_queue->push(obj)) {
7209 _collector->par_push_on_overflow_list(obj);
7406
7407 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7408 FreeChunk* fc = (FreeChunk*)addr;
7409 size_t res;
7410
7411 // Check if we are done sweeping. Below we check "addr >= _limit" rather
7412 // than "addr == _limit" because although _limit was a block boundary when
7413 // we started the sweep, it may no longer be one because heap expansion
7414 // may have caused us to coalesce the block ending at the address _limit
7415 // with a newly expanded chunk (this happens when _limit was set to the
7416 // previous _end of the space), so we may have stepped past _limit:
7417 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7418 if (addr >= _limit) { // we have swept up to or past the limit: finish up
7419 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7420 "sweep _limit out of bounds");
7421 assert(addr < _sp->end(), "addr out of bounds");
7422 // Flush any free range we might be holding as a single
7423 // coalesced chunk to the appropriate free list.
7424 if (inFreeRange()) {
7425 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7426 err_msg("freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger())));
7427 flush_cur_free_chunk(freeFinger(),
7428 pointer_delta(addr, freeFinger()));
7429 if (CMSTraceSweeper) {
7430 gclog_or_tty->print("Sweep: last chunk: ");
7431 gclog_or_tty->print("put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") "
7432 "[coalesced:%d]\n",
7433 p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7434 lastFreeRangeCoalesced() ? 1 : 0);
7435 }
7436 }
7437
7438 // help the iterator loop finish
7439 return pointer_delta(_sp->end(), addr);
7440 }
7441
7442 assert(addr < _limit, "sweep invariant");
7443 // check if we should yield
7444 do_yield_check(addr);
7445 if (fc->is_free()) {
7446 // Chunk that is already free
7808 initialize_free_range((HeapWord*)fc, fcInFreeLists);
7809 }
7810 }
7811
7812 // Lookahead flush:
7813 // If we are tracking a free range, and this is the last chunk that
7814 // we'll look at because its end crosses past _limit, we'll preemptively
7815 // flush it along with any free range we may be holding on to. Note that
7816 // this can be the case only for an already free or freshly garbage
7817 // chunk. If this block is an object, it can never straddle
7818 // over _limit. The "straddling" occurs when _limit is set at
7819 // the previous end of the space when this cycle started, and
7820 // a subsequent heap expansion caused the previously co-terminal
7821 // free block to be coalesced with the newly expanded portion,
7822 // thus rendering _limit a non-block-boundary making it dangerous
7823 // for the sweeper to step over and examine.
7824 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7825 assert(inFreeRange(), "Should only be called if currently in a free range.");
7826 HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7827 assert(_sp->used_region().contains(eob - 1),
7828 err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7829 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7830 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7831 p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size));
7832 if (eob >= _limit) {
7833 assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7834 if (CMSTraceSweeper) {
7835 gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7836 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7837 "[" PTR_FORMAT "," PTR_FORMAT ")",
7838 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7839 }
7840 // Return the storage we are tracking back into the free lists.
7841 if (CMSTraceSweeper) {
7842 gclog_or_tty->print_cr("Flushing ... ");
7843 }
7844 assert(freeFinger() < eob, "Error");
7845 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7846 }
7847 }
7848
7849 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7850 assert(inFreeRange(), "Should only be called if currently in a free range.");
7851 assert(size > 0,
|
3459 { }
3460 virtual void do_oop(oop* p);
3461 virtual void do_oop(narrowOop* p);
3462
3463 void trim_queue(size_t max);
3464 void handle_stack_overflow(HeapWord* lost);
3465 void do_yield_check() {
3466 if (_task->should_yield()) {
3467 _task->yield();
3468 }
3469 }
3470 };
3471
3472 // Grey object scanning during work stealing phase --
3473 // the salient assumption here is that any references
3474 // that are in these stolen objects being scanned must
3475 // already have been initialized (else they would not have
3476 // been published), so we do not need to check for
3477 // uninitialized objects before pushing here.
3478 void Par_ConcMarkingClosure::do_oop(oop obj) {
3479 assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3480 HeapWord* addr = (HeapWord*)obj;
3481 // Check if oop points into the CMS generation
3482 // and is not marked
3483 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3484 // a white object ...
3485 // If we manage to "claim" the object, by being the
3486 // first thread to mark it, then we push it on our
3487 // marking stack
3488 if (_bit_map->par_mark(addr)) { // ... now grey
3489 // push on work queue (grey set)
3490 bool simulate_overflow = false;
3491 NOT_PRODUCT(
3492 if (CMSMarkStackOverflowALot &&
3493 _collector->simulate_overflow()) {
3494 // simulate a stack overflow
3495 simulate_overflow = true;
3496 }
3497 )
3498 if (simulate_overflow ||
3499 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6441 }
6442
6443 // See the comment in coordinator_yield()
6444 for (unsigned i = 0; i < CMSYieldSleepCount &&
6445 ConcurrentMarkSweepThread::should_yield() &&
6446 !CMSCollector::foregroundGCIsActive(); ++i) {
6447 os::sleep(Thread::current(), 1, false);
6448 }
6449
6450 ConcurrentMarkSweepThread::synchronize(true);
6451 _bit_map->lock()->lock_without_safepoint_check();
6452 _collector->startTimer();
6453 }
6454
6455 // This closure is used to rescan the marked objects on the dirty cards
6456 // in the mod union table and the card table proper. In the parallel
6457 // case, although the bitMap is shared, we do a single read so the
6458 // isMarked() query is "safe".
6459 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6460 // Ignore mark word because we are running concurrent with mutators
6461 assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6462 HeapWord* addr = (HeapWord*)p;
6463 assert(_span.contains(addr), "we are scanning the CMS generation");
6464 bool is_obj_array = false;
6465 #ifdef ASSERT
6466 if (!_parallel) {
6467 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6468 assert(_collector->overflow_list_is_empty(),
6469 "overflow list should be empty");
6470
6471 }
6472 #endif // ASSERT
6473 if (_bit_map->isMarked(addr)) {
6474 // Obj arrays are precisely marked, non-arrays are not;
6475 // so we scan objArrays precisely and non-arrays in their
6476 // entirety.
6477 if (p->is_objArray()) {
6478 is_obj_array = true;
6479 if (_parallel) {
6480 p->oop_iterate(_par_scan_closure, mr);
6481 } else {
6876 _verification_bm(verification_bm),
6877 _cms_bm(cms_bm),
6878 _mark_stack(mark_stack)
6879 { }
6880
6881 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6882 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6883
6884 // Upon stack overflow, we discard (part of) the stack,
6885 // remembering the least address amongst those discarded
6886 // in CMSCollector's _restart_address.
6887 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6888 // Remember the least grey address discarded
6889 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6890 _collector->lower_restart_addr(ra);
6891 _mark_stack->reset(); // discard stack contents
6892 _mark_stack->expand(); // expand the stack if possible
6893 }
6894
6895 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6896 assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6897 HeapWord* addr = (HeapWord*)obj;
6898 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6899 // Oop lies in _span and isn't yet grey or black
6900 _verification_bm->mark(addr); // now grey
6901 if (!_cms_bm->isMarked(addr)) {
6902 oop(addr)->print();
6903 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6904 p2i(addr));
6905 fatal("... aborting");
6906 }
6907
6908 if (!_mark_stack->push(obj)) { // stack overflow
6909 if (PrintCMSStatistics != 0) {
6910 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6911 SIZE_FORMAT, _mark_stack->capacity());
6912 }
6913 assert(_mark_stack->isFull(), "Else push should have succeeded");
6914 handle_stack_overflow(addr);
6915 }
6916 // anything including and to the right of _finger
6974 _markStack->expand(); // expand the stack if possible
6975 }
6976
6977 // Upon stack overflow, we discard (part of) the stack,
6978 // remembering the least address amongst those discarded
6979 // in CMSCollector's _restart_address.
6980 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6981 // We need to do this under a mutex to prevent other
6982 // workers from interfering with the work done below.
6983 MutexLockerEx ml(_overflow_stack->par_lock(),
6984 Mutex::_no_safepoint_check_flag);
6985 // Remember the least grey address discarded
6986 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6987 _collector->lower_restart_addr(ra);
6988 _overflow_stack->reset(); // discard stack contents
6989 _overflow_stack->expand(); // expand the stack if possible
6990 }
6991
6992 void PushOrMarkClosure::do_oop(oop obj) {
6993 // Ignore mark word because we are running concurrent with mutators.
6994 assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6995 HeapWord* addr = (HeapWord*)obj;
6996 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6997 // Oop lies in _span and isn't yet grey or black
6998 _bitMap->mark(addr); // now grey
6999 if (addr < _finger) {
7000 // the bit map iteration has already either passed, or
7001 // sampled, this bit in the bit map; we'll need to
7002 // use the marking stack to scan this oop's oops.
7003 bool simulate_overflow = false;
7004 NOT_PRODUCT(
7005 if (CMSMarkStackOverflowALot &&
7006 _collector->simulate_overflow()) {
7007 // simulate a stack overflow
7008 simulate_overflow = true;
7009 }
7010 )
7011 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7012 if (PrintCMSStatistics != 0) {
7013 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7014 SIZE_FORMAT, _markStack->capacity());
7015 }
7016 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7017 handle_stack_overflow(addr);
7018 }
7019 }
7020 // anything including and to the right of _finger
7021 // will be scanned as we iterate over the remainder of the
7022 // bit map
7023 do_yield_check();
7024 }
7025 }
7026
7027 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7028 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7029
7030 void Par_PushOrMarkClosure::do_oop(oop obj) {
7031 // Ignore mark word because we are running concurrent with mutators.
7032 assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7033 HeapWord* addr = (HeapWord*)obj;
7034 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7035 // Oop lies in _span and isn't yet grey or black
7036 // We read the global_finger (volatile read) strictly after marking oop
7037 bool res = _bit_map->par_mark(addr); // now grey
7038 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7039 // Should we push this marked oop on our stack?
7040 // -- if someone else marked it, nothing to do
7041 // -- if target oop is above global finger nothing to do
7042 // -- if target oop is in chunk and above local finger
7043 // then nothing to do
7044 // -- else push on work queue
7045 if ( !res // someone else marked it, they will deal with it
7046 || (addr >= *gfa) // will be scanned in a later task
7047 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7048 return;
7049 }
7050 // the bit map iteration has already either passed, or
7051 // sampled, this bit in the bit map; we'll need to
7052 // use the marking stack to scan this oop's oops.
7089 MetadataAwareOopClosure(rp),
7090 _collector(collector),
7091 _span(span),
7092 _bit_map(bit_map),
7093 _mod_union_table(mod_union_table),
7094 _mark_stack(mark_stack),
7095 _concurrent_precleaning(concurrent_precleaning)
7096 {
7097 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7098 }
7099
7100 // Grey object rescan during pre-cleaning and second checkpoint phases --
7101 // the non-parallel version (the parallel version appears further below.)
7102 void PushAndMarkClosure::do_oop(oop obj) {
7103 // Ignore mark word verification. If during concurrent precleaning,
7104 // the object monitor may be locked. If during the checkpoint
7105 // phases, the object may already have been reached by a different
7106 // path and may be at the end of the global overflow list (so
7107 // the mark word may be NULL).
7108 assert(obj->is_oop_or_null(true /* ignore mark word */),
7109 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7110 HeapWord* addr = (HeapWord*)obj;
7111 // Check if oop points into the CMS generation
7112 // and is not marked
7113 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7114 // a white object ...
7115 _bit_map->mark(addr); // ... now grey
7116 // push on the marking stack (grey set)
7117 bool simulate_overflow = false;
7118 NOT_PRODUCT(
7119 if (CMSMarkStackOverflowALot &&
7120 _collector->simulate_overflow()) {
7121 // simulate a stack overflow
7122 simulate_overflow = true;
7123 }
7124 )
7125 if (simulate_overflow || !_mark_stack->push(obj)) {
7126 if (_concurrent_precleaning) {
7127 // During precleaning we can just dirty the appropriate card(s)
7128 // in the mod union table, thus ensuring that the object remains
7129 // in the grey set and continue. In the case of object arrays
7169 }
7170
7171 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7172 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7173
7174 // Grey object rescan during second checkpoint phase --
7175 // the parallel version.
7176 void Par_PushAndMarkClosure::do_oop(oop obj) {
7177 // In the assert below, we ignore the mark word because
7178 // this oop may point to an already visited object that is
7179 // on the overflow stack (in which case the mark word has
7180 // been hijacked for chaining into the overflow stack --
7181 // if this is the last object in the overflow stack then
7182 // its mark word will be NULL). Because this object may
7183 // have been subsequently popped off the global overflow
7184 // stack, and the mark word possibly restored to the prototypical
7185 // value, by the time we get to examined this failing assert in
7186 // the debugger, is_oop_or_null(false) may subsequently start
7187 // to hold.
7188 assert(obj->is_oop_or_null(true),
7189 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7190 HeapWord* addr = (HeapWord*)obj;
7191 // Check if oop points into the CMS generation
7192 // and is not marked
7193 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7194 // a white object ...
7195 // If we manage to "claim" the object, by being the
7196 // first thread to mark it, then we push it on our
7197 // marking stack
7198 if (_bit_map->par_mark(addr)) { // ... now grey
7199 // push on work queue (grey set)
7200 bool simulate_overflow = false;
7201 NOT_PRODUCT(
7202 if (CMSMarkStackOverflowALot &&
7203 _collector->par_simulate_overflow()) {
7204 // simulate a stack overflow
7205 simulate_overflow = true;
7206 }
7207 )
7208 if (simulate_overflow || !_work_queue->push(obj)) {
7209 _collector->par_push_on_overflow_list(obj);
7406
7407 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7408 FreeChunk* fc = (FreeChunk*)addr;
7409 size_t res;
7410
7411 // Check if we are done sweeping. Below we check "addr >= _limit" rather
7412 // than "addr == _limit" because although _limit was a block boundary when
7413 // we started the sweep, it may no longer be one because heap expansion
7414 // may have caused us to coalesce the block ending at the address _limit
7415 // with a newly expanded chunk (this happens when _limit was set to the
7416 // previous _end of the space), so we may have stepped past _limit:
7417 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7418 if (addr >= _limit) { // we have swept up to or past the limit: finish up
7419 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7420 "sweep _limit out of bounds");
7421 assert(addr < _sp->end(), "addr out of bounds");
7422 // Flush any free range we might be holding as a single
7423 // coalesced chunk to the appropriate free list.
7424 if (inFreeRange()) {
7425 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7426 "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7427 flush_cur_free_chunk(freeFinger(),
7428 pointer_delta(addr, freeFinger()));
7429 if (CMSTraceSweeper) {
7430 gclog_or_tty->print("Sweep: last chunk: ");
7431 gclog_or_tty->print("put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") "
7432 "[coalesced:%d]\n",
7433 p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7434 lastFreeRangeCoalesced() ? 1 : 0);
7435 }
7436 }
7437
7438 // help the iterator loop finish
7439 return pointer_delta(_sp->end(), addr);
7440 }
7441
7442 assert(addr < _limit, "sweep invariant");
7443 // check if we should yield
7444 do_yield_check(addr);
7445 if (fc->is_free()) {
7446 // Chunk that is already free
7808 initialize_free_range((HeapWord*)fc, fcInFreeLists);
7809 }
7810 }
7811
7812 // Lookahead flush:
7813 // If we are tracking a free range, and this is the last chunk that
7814 // we'll look at because its end crosses past _limit, we'll preemptively
7815 // flush it along with any free range we may be holding on to. Note that
7816 // this can be the case only for an already free or freshly garbage
7817 // chunk. If this block is an object, it can never straddle
7818 // over _limit. The "straddling" occurs when _limit is set at
7819 // the previous end of the space when this cycle started, and
7820 // a subsequent heap expansion caused the previously co-terminal
7821 // free block to be coalesced with the newly expanded portion,
7822 // thus rendering _limit a non-block-boundary making it dangerous
7823 // for the sweeper to step over and examine.
7824 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7825 assert(inFreeRange(), "Should only be called if currently in a free range.");
7826 HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7827 assert(_sp->used_region().contains(eob - 1),
7828 "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7829 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7830 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7831 p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7832 if (eob >= _limit) {
7833 assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7834 if (CMSTraceSweeper) {
7835 gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7836 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7837 "[" PTR_FORMAT "," PTR_FORMAT ")",
7838 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7839 }
7840 // Return the storage we are tracking back into the free lists.
7841 if (CMSTraceSweeper) {
7842 gclog_or_tty->print_cr("Flushing ... ");
7843 }
7844 assert(freeFinger() < eob, "Error");
7845 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7846 }
7847 }
7848
7849 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7850 assert(inFreeRange(), "Should only be called if currently in a free range.");
7851 assert(size > 0,
|