2628 HeapWord* CMSCollector::block_start(const void* p) const {
2629 const HeapWord* addr = (HeapWord*)p;
2630 if (_span.contains(p)) {
2631 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2632 return _cmsGen->cmsSpace()->block_start(p);
2633 }
2634 }
2635 return NULL;
2636 }
2637 #endif
2638
2639 HeapWord*
2640 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2641 bool tlab,
2642 bool parallel) {
2643 CMSSynchronousYieldRequest yr;
2644 assert(!tlab, "Can't deal with TLAB allocation");
2645 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
2646 expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2647 if (GCExpandToAllocateDelayMillis > 0) {
2648 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2649 }
2650 return have_lock_and_allocate(word_size, tlab);
2651 }
2652
2653 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2654 size_t bytes,
2655 size_t expand_bytes,
2656 CMSExpansionCause::Cause cause)
2657 {
2658
2659 bool success = expand(bytes, expand_bytes);
2660
2661 // remember why we expanded; this information is used
2662 // by shouldConcurrentCollect() when making decisions on whether to start
2663 // a new CMS cycle.
2664 if (success) {
2665 set_expansion_cause(cause);
2666 log_trace(gc)("Expanded CMS gen for %s", CMSExpansionCause::to_string(cause));
2667 }
2668 }
2669
2670 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2671 HeapWord* res = NULL;
2672 MutexLocker x(ParGCRareEvent_lock);
2673 while (true) {
2674 // Expansion by some other thread might make alloc OK now:
2675 res = ps->lab.alloc(word_sz);
2676 if (res != NULL) return res;
2677 // If there's not enough expansion space available, give up.
2678 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2679 return NULL;
2680 }
2681 // Otherwise, we try expansion.
2682 expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2683 // Now go around the loop and try alloc again;
2684 // A competing par_promote might beat us to the expansion space,
2685 // so we may go around the loop again if promotion fails again.
2686 if (GCExpandToAllocateDelayMillis > 0) {
2687 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2688 }
2689 }
2690 }
2691
2692
2693 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2694 PromotionInfo* promo) {
2695 MutexLocker x(ParGCRareEvent_lock);
2696 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2697 while (true) {
2698 // Expansion by some other thread might make alloc OK now:
2699 if (promo->ensure_spooling_space()) {
2700 assert(promo->has_spooling_space(),
2701 "Post-condition of successful ensure_spooling_space()");
2702 return true;
2703 }
2704 // If there's not enough expansion space available, give up.
2705 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2706 return false;
2707 }
2708 // Otherwise, we try expansion.
2709 expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2710 // Now go around the loop and try alloc again;
2711 // A competing allocation might beat us to the expansion space,
2712 // so we may go around the loop again if allocation fails again.
2713 if (GCExpandToAllocateDelayMillis > 0) {
2714 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2715 }
2716 }
2717 }
2718
2719 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2720 // Only shrink if a compaction was done so that all the free space
2721 // in the generation is in a contiguous block at the end.
2722 if (did_compact()) {
2723 CardGeneration::shrink(bytes);
2724 }
2725 }
2726
2727 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2728 assert_locked_or_safepoint(Heap_lock);
2729 }
2730
2731 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2732 assert_locked_or_safepoint(Heap_lock);
2733 assert_lock_strong(freelistLock());
2734 log_trace(gc)("Shrinking of CMS not yet implemented");
3527 // (coordinator_yield()) method that was observed to cause the
3528 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3529 // which is by default non-zero. For the other seven methods that
3530 // also perform the yield operation, as are using a different
3531 // parameter (CMSYieldSleepCount) which is by default zero. This way we
3532 // can enable the sleeping for those methods too, if necessary.
3533 // See 6442774.
3534 //
3535 // We really need to reconsider the synchronization between the GC
3536 // thread and the yield-requesting threads in the future and we
3537 // should really use wait/notify, which is the recommended
3538 // way of doing this type of interaction. Additionally, we should
3539 // consolidate the eight methods that do the yield operation and they
3540 // are almost identical into one for better maintainability and
3541 // readability. See 6445193.
3542 //
3543 // Tony 2006.06.29
3544 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3545 ConcurrentMarkSweepThread::should_yield() &&
3546 !CMSCollector::foregroundGCIsActive(); ++i) {
3547 os::sleep(Thread::current(), 1, false);
3548 }
3549
3550 ConcurrentMarkSweepThread::synchronize(true);
3551 _bit_map_lock->lock_without_safepoint_check();
3552 _collector->startTimer();
3553 }
3554
3555 bool CMSCollector::do_marking_mt() {
3556 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3557 uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3558 conc_workers()->active_workers(),
3559 Threads::number_of_non_daemon_threads());
3560 num_workers = conc_workers()->update_active_workers(num_workers);
3561 log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
3562
3563 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
3564
3565 CMSConcMarkingTask tsk(this,
3566 cms_space,
3567 conc_workers(),
5524 HeapWord* curAddr = _markBitMap.startWord();
5525 while (curAddr < _markBitMap.endWord()) {
5526 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
5527 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5528 _markBitMap.clear_large_range(chunk);
5529 if (ConcurrentMarkSweepThread::should_yield() &&
5530 !foregroundGCIsActive() &&
5531 CMSYield) {
5532 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5533 "CMS thread should hold CMS token");
5534 assert_lock_strong(bitMapLock());
5535 bitMapLock()->unlock();
5536 ConcurrentMarkSweepThread::desynchronize(true);
5537 stopTimer();
5538 incrementYields();
5539
5540 // See the comment in coordinator_yield()
5541 for (unsigned i = 0; i < CMSYieldSleepCount &&
5542 ConcurrentMarkSweepThread::should_yield() &&
5543 !CMSCollector::foregroundGCIsActive(); ++i) {
5544 os::sleep(Thread::current(), 1, false);
5545 }
5546
5547 ConcurrentMarkSweepThread::synchronize(true);
5548 bitMapLock()->lock_without_safepoint_check();
5549 startTimer();
5550 }
5551 curAddr = chunk.end();
5552 }
5553 // A successful mostly concurrent collection has been done.
5554 // Because only the full (i.e., concurrent mode failure) collections
5555 // are being measured for gc overhead limits, clean the "near" flag
5556 // and count.
5557 size_policy()->reset_gc_overhead_limit_count();
5558 _collectorState = Idling;
5559 }
5560
5561 register_gc_end();
5562 }
5563
5564 // Same as above but for STW paths
5978 }
5979
5980 void MarkRefsIntoAndScanClosure::do_yield_work() {
5981 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5982 "CMS thread should hold CMS token");
5983 assert_lock_strong(_freelistLock);
5984 assert_lock_strong(_bit_map->lock());
5985 // relinquish the free_list_lock and bitMaplock()
5986 _bit_map->lock()->unlock();
5987 _freelistLock->unlock();
5988 ConcurrentMarkSweepThread::desynchronize(true);
5989 _collector->stopTimer();
5990 _collector->incrementYields();
5991
5992 // See the comment in coordinator_yield()
5993 for (unsigned i = 0;
5994 i < CMSYieldSleepCount &&
5995 ConcurrentMarkSweepThread::should_yield() &&
5996 !CMSCollector::foregroundGCIsActive();
5997 ++i) {
5998 os::sleep(Thread::current(), 1, false);
5999 }
6000
6001 ConcurrentMarkSweepThread::synchronize(true);
6002 _freelistLock->lock_without_safepoint_check();
6003 _bit_map->lock()->lock_without_safepoint_check();
6004 _collector->startTimer();
6005 }
6006
6007 ///////////////////////////////////////////////////////////
6008 // ParMarkRefsIntoAndScanClosure: a parallel version of
6009 // MarkRefsIntoAndScanClosure
6010 ///////////////////////////////////////////////////////////
6011 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
6012 CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd,
6013 CMSBitMap* bit_map, OopTaskQueue* work_queue):
6014 _span(span),
6015 _bit_map(bit_map),
6016 _work_queue(work_queue),
6017 _low_water_mark(MIN2((work_queue->max_elems()/4),
6018 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6133 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6134 return size;
6135 }
6136
6137 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6138 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6139 "CMS thread should hold CMS token");
6140 assert_lock_strong(_freelistLock);
6141 assert_lock_strong(_bitMap->lock());
6142 // relinquish the free_list_lock and bitMaplock()
6143 _bitMap->lock()->unlock();
6144 _freelistLock->unlock();
6145 ConcurrentMarkSweepThread::desynchronize(true);
6146 _collector->stopTimer();
6147 _collector->incrementYields();
6148
6149 // See the comment in coordinator_yield()
6150 for (unsigned i = 0; i < CMSYieldSleepCount &&
6151 ConcurrentMarkSweepThread::should_yield() &&
6152 !CMSCollector::foregroundGCIsActive(); ++i) {
6153 os::sleep(Thread::current(), 1, false);
6154 }
6155
6156 ConcurrentMarkSweepThread::synchronize(true);
6157 _freelistLock->lock_without_safepoint_check();
6158 _bitMap->lock()->lock_without_safepoint_check();
6159 _collector->startTimer();
6160 }
6161
6162
6163 //////////////////////////////////////////////////////////////////
6164 // SurvivorSpacePrecleanClosure
6165 //////////////////////////////////////////////////////////////////
6166 // This (single-threaded) closure is used to preclean the oops in
6167 // the survivor spaces.
6168 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6169
6170 HeapWord* addr = (HeapWord*)p;
6171 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6172 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6173 assert(p->klass_or_null() != NULL, "object should be initialized");
6200 CMSHeap::heap()->total_collections();
6201 bool abort = (_before_count != after_count) ||
6202 _collector->should_abort_preclean();
6203 return abort ? 0 : size;
6204 }
6205
6206 void SurvivorSpacePrecleanClosure::do_yield_work() {
6207 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6208 "CMS thread should hold CMS token");
6209 assert_lock_strong(_bit_map->lock());
6210 // Relinquish the bit map lock
6211 _bit_map->lock()->unlock();
6212 ConcurrentMarkSweepThread::desynchronize(true);
6213 _collector->stopTimer();
6214 _collector->incrementYields();
6215
6216 // See the comment in coordinator_yield()
6217 for (unsigned i = 0; i < CMSYieldSleepCount &&
6218 ConcurrentMarkSweepThread::should_yield() &&
6219 !CMSCollector::foregroundGCIsActive(); ++i) {
6220 os::sleep(Thread::current(), 1, false);
6221 }
6222
6223 ConcurrentMarkSweepThread::synchronize(true);
6224 _bit_map->lock()->lock_without_safepoint_check();
6225 _collector->startTimer();
6226 }
6227
6228 // This closure is used to rescan the marked objects on the dirty cards
6229 // in the mod union table and the card table proper. In the parallel
6230 // case, although the bitMap is shared, we do a single read so the
6231 // isMarked() query is "safe".
6232 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6233 // Ignore mark word because we are running concurrent with mutators
6234 assert(oopDesc::is_oop_or_null(p, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6235 HeapWord* addr = (HeapWord*)p;
6236 assert(_span.contains(addr), "we are scanning the CMS generation");
6237 bool is_obj_array = false;
6238 #ifdef ASSERT
6239 if (!_parallel) {
6240 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6351
6352 // We take a break if we've been at this for a while,
6353 // so as to avoid monopolizing the locks involved.
6354 void MarkFromRootsClosure::do_yield_work() {
6355 // First give up the locks, then yield, then re-lock
6356 // We should probably use a constructor/destructor idiom to
6357 // do this unlock/lock or modify the MutexUnlocker class to
6358 // serve our purpose. XXX
6359 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6360 "CMS thread should hold CMS token");
6361 assert_lock_strong(_bitMap->lock());
6362 _bitMap->lock()->unlock();
6363 ConcurrentMarkSweepThread::desynchronize(true);
6364 _collector->stopTimer();
6365 _collector->incrementYields();
6366
6367 // See the comment in coordinator_yield()
6368 for (unsigned i = 0; i < CMSYieldSleepCount &&
6369 ConcurrentMarkSweepThread::should_yield() &&
6370 !CMSCollector::foregroundGCIsActive(); ++i) {
6371 os::sleep(Thread::current(), 1, false);
6372 }
6373
6374 ConcurrentMarkSweepThread::synchronize(true);
6375 _bitMap->lock()->lock_without_safepoint_check();
6376 _collector->startTimer();
6377 }
6378
6379 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6380 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6381 assert(_markStack->isEmpty(),
6382 "should drain stack to limit stack usage");
6383 // convert ptr to an oop preparatory to scanning
6384 oop obj = oop(ptr);
6385 // Ignore mark word in verification below, since we
6386 // may be running concurrent with mutators.
6387 assert(oopDesc::is_oop(obj, true), "should be an oop");
6388 assert(_finger <= ptr, "_finger runneth ahead");
6389 // advance the finger to right end of this object
6390 _finger = ptr + obj->size();
6391 assert(_finger > ptr, "we just incremented it above");
6965 } // Else, some other thread got there first
6966 }
6967 }
6968
6969 void CMSPrecleanRefsYieldClosure::do_yield_work() {
6970 Mutex* bml = _collector->bitMapLock();
6971 assert_lock_strong(bml);
6972 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6973 "CMS thread should hold CMS token");
6974
6975 bml->unlock();
6976 ConcurrentMarkSweepThread::desynchronize(true);
6977
6978 _collector->stopTimer();
6979 _collector->incrementYields();
6980
6981 // See the comment in coordinator_yield()
6982 for (unsigned i = 0; i < CMSYieldSleepCount &&
6983 ConcurrentMarkSweepThread::should_yield() &&
6984 !CMSCollector::foregroundGCIsActive(); ++i) {
6985 os::sleep(Thread::current(), 1, false);
6986 }
6987
6988 ConcurrentMarkSweepThread::synchronize(true);
6989 bml->lock_without_safepoint_check();
6990
6991 _collector->startTimer();
6992 }
6993
6994 bool CMSPrecleanRefsYieldClosure::should_return() {
6995 if (ConcurrentMarkSweepThread::should_yield()) {
6996 do_yield_work();
6997 }
6998 return _collector->foregroundGCIsActive();
6999 }
7000
7001 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7002 assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
7003 "mr should be aligned to start at a card boundary");
7004 // We'd like to assert:
7005 // assert(mr.word_size()%CardTable::card_size_in_words == 0,
7530 }
7531
7532 // First give up the locks, then yield, then re-lock.
7533 // We should probably use a constructor/destructor idiom to
7534 // do this unlock/lock or modify the MutexUnlocker class to
7535 // serve our purpose. XXX
7536 assert_lock_strong(_bitMap->lock());
7537 assert_lock_strong(_freelistLock);
7538 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7539 "CMS thread should hold CMS token");
7540 _bitMap->lock()->unlock();
7541 _freelistLock->unlock();
7542 ConcurrentMarkSweepThread::desynchronize(true);
7543 _collector->stopTimer();
7544 _collector->incrementYields();
7545
7546 // See the comment in coordinator_yield()
7547 for (unsigned i = 0; i < CMSYieldSleepCount &&
7548 ConcurrentMarkSweepThread::should_yield() &&
7549 !CMSCollector::foregroundGCIsActive(); ++i) {
7550 os::sleep(Thread::current(), 1, false);
7551 }
7552
7553 ConcurrentMarkSweepThread::synchronize(true);
7554 _freelistLock->lock_without_safepoint_check();
7555 _bitMap->lock()->lock_without_safepoint_check();
7556 _collector->startTimer();
7557 }
7558
7559 #ifndef PRODUCT
7560 // This is actually very useful in a product build if it can
7561 // be called from the debugger. Compile it into the product
7562 // as needed.
7563 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7564 return debug_cms_space->verify_chunk_in_free_list(fc);
7565 }
7566 #endif
7567
7568 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7569 log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7570 p2i(fc), fc->size());
7828 // when there are many objects in the overflow list and
7829 // there is much producer-consumer contention on the list.
7830 // *NOTE*: The overflow list manipulation code here and
7831 // in ParNewGeneration:: are very similar in shape,
7832 // except that in the ParNew case we use the old (from/eden)
7833 // copy of the object to thread the list via its klass word.
7834 // Because of the common code, if you make any changes in
7835 // the code below, please check the ParNew version to see if
7836 // similar changes might be needed.
7837 // CR 6797058 has been filed to consolidate the common code.
7838 bool CMSCollector::par_take_from_overflow_list(size_t num,
7839 OopTaskQueue* work_q,
7840 int no_of_gc_threads) {
7841 assert(work_q->size() == 0, "First empty local work queue");
7842 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7843 if (_overflow_list == NULL) {
7844 return false;
7845 }
7846 // Grab the entire list; we'll put back a suffix
7847 oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
7848 Thread* tid = Thread::current();
7849 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7850 // set to ParallelGCThreads.
7851 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7852 size_t sleep_time_millis = MAX2((size_t)1, num/100);
7853 // If the list is busy, we spin for a short while,
7854 // sleeping between attempts to get the list.
7855 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7856 os::sleep(tid, sleep_time_millis, false);
7857 if (_overflow_list == NULL) {
7858 // Nothing left to take
7859 return false;
7860 } else if (_overflow_list != BUSY) {
7861 // Try and grab the prefix
7862 prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
7863 }
7864 }
7865 // If the list was found to be empty, or we spun long
7866 // enough, we give up and return empty-handed. If we leave
7867 // the list in the BUSY state below, it must be the case that
7868 // some other thread holds the overflow list and will set it
7869 // to a non-BUSY state in the future.
7870 if (prefix == NULL || prefix == BUSY) {
7871 // Nothing to take or waited long enough
7872 if (prefix == NULL) {
7873 // Write back the NULL in case we overwrote it with BUSY above
7874 // and it is still the same value.
7875 Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7876 }
|
2628 HeapWord* CMSCollector::block_start(const void* p) const {
2629 const HeapWord* addr = (HeapWord*)p;
2630 if (_span.contains(p)) {
2631 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2632 return _cmsGen->cmsSpace()->block_start(p);
2633 }
2634 }
2635 return NULL;
2636 }
2637 #endif
2638
2639 HeapWord*
2640 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2641 bool tlab,
2642 bool parallel) {
2643 CMSSynchronousYieldRequest yr;
2644 assert(!tlab, "Can't deal with TLAB allocation");
2645 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
2646 expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2647 if (GCExpandToAllocateDelayMillis > 0) {
2648 os::naked_sleep(GCExpandToAllocateDelayMillis);
2649 }
2650 return have_lock_and_allocate(word_size, tlab);
2651 }
2652
2653 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2654 size_t bytes,
2655 size_t expand_bytes,
2656 CMSExpansionCause::Cause cause)
2657 {
2658
2659 bool success = expand(bytes, expand_bytes);
2660
2661 // remember why we expanded; this information is used
2662 // by shouldConcurrentCollect() when making decisions on whether to start
2663 // a new CMS cycle.
2664 if (success) {
2665 set_expansion_cause(cause);
2666 log_trace(gc)("Expanded CMS gen for %s", CMSExpansionCause::to_string(cause));
2667 }
2668 }
2669
2670 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2671 HeapWord* res = NULL;
2672 MutexLocker x(ParGCRareEvent_lock);
2673 while (true) {
2674 // Expansion by some other thread might make alloc OK now:
2675 res = ps->lab.alloc(word_sz);
2676 if (res != NULL) return res;
2677 // If there's not enough expansion space available, give up.
2678 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2679 return NULL;
2680 }
2681 // Otherwise, we try expansion.
2682 expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2683 // Now go around the loop and try alloc again;
2684 // A competing par_promote might beat us to the expansion space,
2685 // so we may go around the loop again if promotion fails again.
2686 if (GCExpandToAllocateDelayMillis > 0) {
2687 os::naked_sleep(GCExpandToAllocateDelayMillis);
2688 }
2689 }
2690 }
2691
2692
2693 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2694 PromotionInfo* promo) {
2695 MutexLocker x(ParGCRareEvent_lock);
2696 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2697 while (true) {
2698 // Expansion by some other thread might make alloc OK now:
2699 if (promo->ensure_spooling_space()) {
2700 assert(promo->has_spooling_space(),
2701 "Post-condition of successful ensure_spooling_space()");
2702 return true;
2703 }
2704 // If there's not enough expansion space available, give up.
2705 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2706 return false;
2707 }
2708 // Otherwise, we try expansion.
2709 expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2710 // Now go around the loop and try alloc again;
2711 // A competing allocation might beat us to the expansion space,
2712 // so we may go around the loop again if allocation fails again.
2713 if (GCExpandToAllocateDelayMillis > 0) {
2714 os::naked_sleep(GCExpandToAllocateDelayMillis);
2715 }
2716 }
2717 }
2718
2719 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2720 // Only shrink if a compaction was done so that all the free space
2721 // in the generation is in a contiguous block at the end.
2722 if (did_compact()) {
2723 CardGeneration::shrink(bytes);
2724 }
2725 }
2726
2727 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2728 assert_locked_or_safepoint(Heap_lock);
2729 }
2730
2731 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2732 assert_locked_or_safepoint(Heap_lock);
2733 assert_lock_strong(freelistLock());
2734 log_trace(gc)("Shrinking of CMS not yet implemented");
3527 // (coordinator_yield()) method that was observed to cause the
3528 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3529 // which is by default non-zero. For the other seven methods that
3530 // also perform the yield operation, as are using a different
3531 // parameter (CMSYieldSleepCount) which is by default zero. This way we
3532 // can enable the sleeping for those methods too, if necessary.
3533 // See 6442774.
3534 //
3535 // We really need to reconsider the synchronization between the GC
3536 // thread and the yield-requesting threads in the future and we
3537 // should really use wait/notify, which is the recommended
3538 // way of doing this type of interaction. Additionally, we should
3539 // consolidate the eight methods that do the yield operation and they
3540 // are almost identical into one for better maintainability and
3541 // readability. See 6445193.
3542 //
3543 // Tony 2006.06.29
3544 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3545 ConcurrentMarkSweepThread::should_yield() &&
3546 !CMSCollector::foregroundGCIsActive(); ++i) {
3547 os::naked_short_sleep(1);
3548 }
3549
3550 ConcurrentMarkSweepThread::synchronize(true);
3551 _bit_map_lock->lock_without_safepoint_check();
3552 _collector->startTimer();
3553 }
3554
3555 bool CMSCollector::do_marking_mt() {
3556 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3557 uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3558 conc_workers()->active_workers(),
3559 Threads::number_of_non_daemon_threads());
3560 num_workers = conc_workers()->update_active_workers(num_workers);
3561 log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
3562
3563 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
3564
3565 CMSConcMarkingTask tsk(this,
3566 cms_space,
3567 conc_workers(),
5524 HeapWord* curAddr = _markBitMap.startWord();
5525 while (curAddr < _markBitMap.endWord()) {
5526 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
5527 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5528 _markBitMap.clear_large_range(chunk);
5529 if (ConcurrentMarkSweepThread::should_yield() &&
5530 !foregroundGCIsActive() &&
5531 CMSYield) {
5532 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5533 "CMS thread should hold CMS token");
5534 assert_lock_strong(bitMapLock());
5535 bitMapLock()->unlock();
5536 ConcurrentMarkSweepThread::desynchronize(true);
5537 stopTimer();
5538 incrementYields();
5539
5540 // See the comment in coordinator_yield()
5541 for (unsigned i = 0; i < CMSYieldSleepCount &&
5542 ConcurrentMarkSweepThread::should_yield() &&
5543 !CMSCollector::foregroundGCIsActive(); ++i) {
5544 os::naked_short_sleep(1);
5545 }
5546
5547 ConcurrentMarkSweepThread::synchronize(true);
5548 bitMapLock()->lock_without_safepoint_check();
5549 startTimer();
5550 }
5551 curAddr = chunk.end();
5552 }
5553 // A successful mostly concurrent collection has been done.
5554 // Because only the full (i.e., concurrent mode failure) collections
5555 // are being measured for gc overhead limits, clean the "near" flag
5556 // and count.
5557 size_policy()->reset_gc_overhead_limit_count();
5558 _collectorState = Idling;
5559 }
5560
5561 register_gc_end();
5562 }
5563
5564 // Same as above but for STW paths
5978 }
5979
5980 void MarkRefsIntoAndScanClosure::do_yield_work() {
5981 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5982 "CMS thread should hold CMS token");
5983 assert_lock_strong(_freelistLock);
5984 assert_lock_strong(_bit_map->lock());
5985 // relinquish the free_list_lock and bitMaplock()
5986 _bit_map->lock()->unlock();
5987 _freelistLock->unlock();
5988 ConcurrentMarkSweepThread::desynchronize(true);
5989 _collector->stopTimer();
5990 _collector->incrementYields();
5991
5992 // See the comment in coordinator_yield()
5993 for (unsigned i = 0;
5994 i < CMSYieldSleepCount &&
5995 ConcurrentMarkSweepThread::should_yield() &&
5996 !CMSCollector::foregroundGCIsActive();
5997 ++i) {
5998 os::naked_short_sleep(1);
5999 }
6000
6001 ConcurrentMarkSweepThread::synchronize(true);
6002 _freelistLock->lock_without_safepoint_check();
6003 _bit_map->lock()->lock_without_safepoint_check();
6004 _collector->startTimer();
6005 }
6006
6007 ///////////////////////////////////////////////////////////
6008 // ParMarkRefsIntoAndScanClosure: a parallel version of
6009 // MarkRefsIntoAndScanClosure
6010 ///////////////////////////////////////////////////////////
6011 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
6012 CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd,
6013 CMSBitMap* bit_map, OopTaskQueue* work_queue):
6014 _span(span),
6015 _bit_map(bit_map),
6016 _work_queue(work_queue),
6017 _low_water_mark(MIN2((work_queue->max_elems()/4),
6018 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6133 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6134 return size;
6135 }
6136
6137 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6138 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6139 "CMS thread should hold CMS token");
6140 assert_lock_strong(_freelistLock);
6141 assert_lock_strong(_bitMap->lock());
6142 // relinquish the free_list_lock and bitMaplock()
6143 _bitMap->lock()->unlock();
6144 _freelistLock->unlock();
6145 ConcurrentMarkSweepThread::desynchronize(true);
6146 _collector->stopTimer();
6147 _collector->incrementYields();
6148
6149 // See the comment in coordinator_yield()
6150 for (unsigned i = 0; i < CMSYieldSleepCount &&
6151 ConcurrentMarkSweepThread::should_yield() &&
6152 !CMSCollector::foregroundGCIsActive(); ++i) {
6153 os::naked_short_sleep(1);
6154 }
6155
6156 ConcurrentMarkSweepThread::synchronize(true);
6157 _freelistLock->lock_without_safepoint_check();
6158 _bitMap->lock()->lock_without_safepoint_check();
6159 _collector->startTimer();
6160 }
6161
6162
6163 //////////////////////////////////////////////////////////////////
6164 // SurvivorSpacePrecleanClosure
6165 //////////////////////////////////////////////////////////////////
6166 // This (single-threaded) closure is used to preclean the oops in
6167 // the survivor spaces.
6168 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6169
6170 HeapWord* addr = (HeapWord*)p;
6171 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6172 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6173 assert(p->klass_or_null() != NULL, "object should be initialized");
6200 CMSHeap::heap()->total_collections();
6201 bool abort = (_before_count != after_count) ||
6202 _collector->should_abort_preclean();
6203 return abort ? 0 : size;
6204 }
6205
6206 void SurvivorSpacePrecleanClosure::do_yield_work() {
6207 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6208 "CMS thread should hold CMS token");
6209 assert_lock_strong(_bit_map->lock());
6210 // Relinquish the bit map lock
6211 _bit_map->lock()->unlock();
6212 ConcurrentMarkSweepThread::desynchronize(true);
6213 _collector->stopTimer();
6214 _collector->incrementYields();
6215
6216 // See the comment in coordinator_yield()
6217 for (unsigned i = 0; i < CMSYieldSleepCount &&
6218 ConcurrentMarkSweepThread::should_yield() &&
6219 !CMSCollector::foregroundGCIsActive(); ++i) {
6220 os::naked_short_sleep(1);
6221 }
6222
6223 ConcurrentMarkSweepThread::synchronize(true);
6224 _bit_map->lock()->lock_without_safepoint_check();
6225 _collector->startTimer();
6226 }
6227
6228 // This closure is used to rescan the marked objects on the dirty cards
6229 // in the mod union table and the card table proper. In the parallel
6230 // case, although the bitMap is shared, we do a single read so the
6231 // isMarked() query is "safe".
6232 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6233 // Ignore mark word because we are running concurrent with mutators
6234 assert(oopDesc::is_oop_or_null(p, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6235 HeapWord* addr = (HeapWord*)p;
6236 assert(_span.contains(addr), "we are scanning the CMS generation");
6237 bool is_obj_array = false;
6238 #ifdef ASSERT
6239 if (!_parallel) {
6240 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6351
6352 // We take a break if we've been at this for a while,
6353 // so as to avoid monopolizing the locks involved.
6354 void MarkFromRootsClosure::do_yield_work() {
6355 // First give up the locks, then yield, then re-lock
6356 // We should probably use a constructor/destructor idiom to
6357 // do this unlock/lock or modify the MutexUnlocker class to
6358 // serve our purpose. XXX
6359 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6360 "CMS thread should hold CMS token");
6361 assert_lock_strong(_bitMap->lock());
6362 _bitMap->lock()->unlock();
6363 ConcurrentMarkSweepThread::desynchronize(true);
6364 _collector->stopTimer();
6365 _collector->incrementYields();
6366
6367 // See the comment in coordinator_yield()
6368 for (unsigned i = 0; i < CMSYieldSleepCount &&
6369 ConcurrentMarkSweepThread::should_yield() &&
6370 !CMSCollector::foregroundGCIsActive(); ++i) {
6371 os::naked_short_sleep(1);
6372 }
6373
6374 ConcurrentMarkSweepThread::synchronize(true);
6375 _bitMap->lock()->lock_without_safepoint_check();
6376 _collector->startTimer();
6377 }
6378
6379 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6380 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6381 assert(_markStack->isEmpty(),
6382 "should drain stack to limit stack usage");
6383 // convert ptr to an oop preparatory to scanning
6384 oop obj = oop(ptr);
6385 // Ignore mark word in verification below, since we
6386 // may be running concurrent with mutators.
6387 assert(oopDesc::is_oop(obj, true), "should be an oop");
6388 assert(_finger <= ptr, "_finger runneth ahead");
6389 // advance the finger to right end of this object
6390 _finger = ptr + obj->size();
6391 assert(_finger > ptr, "we just incremented it above");
6965 } // Else, some other thread got there first
6966 }
6967 }
6968
6969 void CMSPrecleanRefsYieldClosure::do_yield_work() {
6970 Mutex* bml = _collector->bitMapLock();
6971 assert_lock_strong(bml);
6972 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6973 "CMS thread should hold CMS token");
6974
6975 bml->unlock();
6976 ConcurrentMarkSweepThread::desynchronize(true);
6977
6978 _collector->stopTimer();
6979 _collector->incrementYields();
6980
6981 // See the comment in coordinator_yield()
6982 for (unsigned i = 0; i < CMSYieldSleepCount &&
6983 ConcurrentMarkSweepThread::should_yield() &&
6984 !CMSCollector::foregroundGCIsActive(); ++i) {
6985 os::naked_short_sleep(1);
6986 }
6987
6988 ConcurrentMarkSweepThread::synchronize(true);
6989 bml->lock_without_safepoint_check();
6990
6991 _collector->startTimer();
6992 }
6993
6994 bool CMSPrecleanRefsYieldClosure::should_return() {
6995 if (ConcurrentMarkSweepThread::should_yield()) {
6996 do_yield_work();
6997 }
6998 return _collector->foregroundGCIsActive();
6999 }
7000
7001 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7002 assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
7003 "mr should be aligned to start at a card boundary");
7004 // We'd like to assert:
7005 // assert(mr.word_size()%CardTable::card_size_in_words == 0,
7530 }
7531
7532 // First give up the locks, then yield, then re-lock.
7533 // We should probably use a constructor/destructor idiom to
7534 // do this unlock/lock or modify the MutexUnlocker class to
7535 // serve our purpose. XXX
7536 assert_lock_strong(_bitMap->lock());
7537 assert_lock_strong(_freelistLock);
7538 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7539 "CMS thread should hold CMS token");
7540 _bitMap->lock()->unlock();
7541 _freelistLock->unlock();
7542 ConcurrentMarkSweepThread::desynchronize(true);
7543 _collector->stopTimer();
7544 _collector->incrementYields();
7545
7546 // See the comment in coordinator_yield()
7547 for (unsigned i = 0; i < CMSYieldSleepCount &&
7548 ConcurrentMarkSweepThread::should_yield() &&
7549 !CMSCollector::foregroundGCIsActive(); ++i) {
7550 os::naked_short_sleep(1);
7551 }
7552
7553 ConcurrentMarkSweepThread::synchronize(true);
7554 _freelistLock->lock_without_safepoint_check();
7555 _bitMap->lock()->lock_without_safepoint_check();
7556 _collector->startTimer();
7557 }
7558
7559 #ifndef PRODUCT
7560 // This is actually very useful in a product build if it can
7561 // be called from the debugger. Compile it into the product
7562 // as needed.
7563 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7564 return debug_cms_space->verify_chunk_in_free_list(fc);
7565 }
7566 #endif
7567
7568 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7569 log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7570 p2i(fc), fc->size());
7828 // when there are many objects in the overflow list and
7829 // there is much producer-consumer contention on the list.
7830 // *NOTE*: The overflow list manipulation code here and
7831 // in ParNewGeneration:: are very similar in shape,
7832 // except that in the ParNew case we use the old (from/eden)
7833 // copy of the object to thread the list via its klass word.
7834 // Because of the common code, if you make any changes in
7835 // the code below, please check the ParNew version to see if
7836 // similar changes might be needed.
7837 // CR 6797058 has been filed to consolidate the common code.
7838 bool CMSCollector::par_take_from_overflow_list(size_t num,
7839 OopTaskQueue* work_q,
7840 int no_of_gc_threads) {
7841 assert(work_q->size() == 0, "First empty local work queue");
7842 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7843 if (_overflow_list == NULL) {
7844 return false;
7845 }
7846 // Grab the entire list; we'll put back a suffix
7847 oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
7848 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7849 // set to ParallelGCThreads.
7850 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7851 size_t sleep_time_millis = MAX2((size_t)1, num/100);
7852 // If the list is busy, we spin for a short while,
7853 // sleeping between attempts to get the list.
7854 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7855 os::naked_sleep(sleep_time_millis);
7856 if (_overflow_list == NULL) {
7857 // Nothing left to take
7858 return false;
7859 } else if (_overflow_list != BUSY) {
7860 // Try and grab the prefix
7861 prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
7862 }
7863 }
7864 // If the list was found to be empty, or we spun long
7865 // enough, we give up and return empty-handed. If we leave
7866 // the list in the BUSY state below, it must be the case that
7867 // some other thread holds the overflow list and will set it
7868 // to a non-BUSY state in the future.
7869 if (prefix == NULL || prefix == BUSY) {
7870 // Nothing to take or waited long enough
7871 if (prefix == NULL) {
7872 // Write back the NULL in case we overwrote it with BUSY above
7873 // and it is still the same value.
7874 Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7875 }
|