< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page




 186 // This struct contains per-thread things necessary to support parallel
 187 // young-gen collection.
 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
 189  public:
 190   CFLS_LAB lab;
 191   PromotionInfo promo;
 192 
 193   // Constructor.
 194   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 195     promo.setSpace(cfls);
 196   }
 197 };
 198 
 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 200      ReservedSpace rs, size_t initial_byte_size, int level,
 201      CardTableRS* ct, bool use_adaptive_freelists,
 202      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 203   CardGeneration(rs, initial_byte_size, level, ct),
 204   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 205   _debug_collection_type(Concurrent_collection_type),
 206   _did_compact(false)

 207 {
 208   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 209   HeapWord* end    = (HeapWord*) _virtual_space.high();
 210 
 211   _direct_allocated_words = 0;
 212   NOT_PRODUCT(
 213     _numObjectsPromoted = 0;
 214     _numWordsPromoted = 0;
 215     _numObjectsAllocated = 0;
 216     _numWordsAllocated = 0;
 217   )
 218 
 219   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 220                                            use_adaptive_freelists,
 221                                            dictionaryChoice);
 222   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 223   if (_cmsSpace == NULL) {
 224     vm_exit_during_initialization(
 225       "CompactibleFreeListSpace allocation failure");
 226   }


 858 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 859   size_t available = max_available();
 860   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 861   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 862   if (Verbose && PrintGCDetails) {
 863     gclog_or_tty->print_cr(
 864       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 865       "max_promo("SIZE_FORMAT")",
 866       res? "":" not", available, res? ">=":"<",
 867       av_promo, max_promotion_in_bytes);
 868   }
 869   return res;
 870 }
 871 
 872 // At a promotion failure dump information on block layout in heap
 873 // (cms old generation).
 874 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 875   if (CMSDumpAtPromotionFailure) {
 876     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 877   }



 878 }
 879 
 880 CompactibleSpace*
 881 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 882   return _cmsSpace;
 883 }
 884 
 885 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 886   // Clear the promotion information.  These pointers can be adjusted
 887   // along with all the other pointers into the heap but
 888   // compaction is expected to be a rare event with
 889   // a heap using cms so don't do it without seeing the need.
 890   if (CollectedHeap::use_parallel_gc_threads()) {
 891     for (uint i = 0; i < ParallelGCThreads; i++) {
 892       _par_gc_thread_states[i]->promo.reset();
 893     }
 894   }
 895 }
 896 
 897 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {


1332 //  ^                                                                   |
1333 //  |------------------------ DEAD <------------------------------------|
1334 //         sweep                            mut
1335 //
1336 // While a block is in TRANSIENT state its size cannot be determined
1337 // so readers will either need to come back later or stall until
1338 // the size can be determined. Note that for the case of direct
1339 // allocation, P-bits, when available, may be used to determine the
1340 // size of an object that may not yet have been initialized.
1341 
1342 // Things to support parallel young-gen collection.
1343 oop
1344 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1345                                            oop old, markOop m,
1346                                            size_t word_sz) {
1347 #ifndef PRODUCT
1348   if (Universe::heap()->promotion_should_fail()) {
1349     return NULL;
1350   }
1351 #endif  // #ifndef PRODUCT



1352 
1353   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1354   PromotionInfo* promoInfo = &ps->promo;
1355   // if we are tracking promotions, then first ensure space for
1356   // promotion (including spooling space for saving header if necessary).
1357   // then allocate and copy, then track promoted info if needed.
1358   // When tracking (see PromotionInfo::track()), the mark word may
1359   // be displaced and in this case restoration of the mark word
1360   // occurs in the (oop_since_save_marks_)iterate phase.
1361   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1362     // Out of space for allocating spooling buffers;
1363     // try expanding and allocating spooling buffers.
1364     if (!expand_and_ensure_spooling_space(promoInfo)) {
1365       return NULL;
1366     }
1367   }
1368   assert(promoInfo->has_spooling_space(), "Control point invariant");
1369   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1370   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1371   if (obj_ptr == NULL) {


3330 
3331 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3332   CMSExpansionCause::Cause cause)
3333 {
3334 
3335   bool success = expand(bytes, expand_bytes);
3336 
3337   // remember why we expanded; this information is used
3338   // by shouldConcurrentCollect() when making decisions on whether to start
3339   // a new CMS cycle.
3340   if (success) {
3341     set_expansion_cause(cause);
3342     if (PrintGCDetails && Verbose) {
3343       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3344         CMSExpansionCause::to_string(cause));
3345     }
3346   }
3347 }
3348 
3349 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {





3350   HeapWord* res = NULL;
3351   MutexLocker x(ParGCRareEvent_lock);






3352   while (true) {
3353     // Expansion by some other thread might make alloc OK now:
3354     res = ps->lab.alloc(word_sz);
3355     if (res != NULL) return res;
3356     // If there's not enough expansion space available, give up.
3357     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {

3358       return NULL;
3359     }
3360     // Otherwise, we try expansion.
3361     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3362       CMSExpansionCause::_allocate_par_lab);
3363     // Now go around the loop and try alloc again;
3364     // A competing par_promote might beat us to the expansion space,
3365     // so we may go around the loop again if promotion fails again.
3366     if (GCExpandToAllocateDelayMillis > 0) {
3367       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3368     }
3369   }
3370 }
3371 
3372 
3373 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3374   PromotionInfo* promo) {
3375   MutexLocker x(ParGCRareEvent_lock);






3376   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3377   while (true) {
3378     // Expansion by some other thread might make alloc OK now:
3379     if (promo->ensure_spooling_space()) {
3380       assert(promo->has_spooling_space(),
3381              "Post-condition of successful ensure_spooling_space()");
3382       return true;
3383     }
3384     // If there's not enough expansion space available, give up.
3385     if (_virtual_space.uncommitted_size() < refill_size_bytes) {

3386       return false;
3387     }
3388     // Otherwise, we try expansion.
3389     expand(refill_size_bytes, MinHeapDeltaBytes,
3390       CMSExpansionCause::_allocate_par_spooling_space);
3391     // Now go around the loop and try alloc again;
3392     // A competing allocation might beat us to the expansion space,
3393     // so we may go around the loop again if allocation fails again.
3394     if (GCExpandToAllocateDelayMillis > 0) {
3395       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3396     }
3397   }
3398 }
3399 
3400 
3401 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3402   assert_locked_or_safepoint(ExpandHeap_lock);
3403   // Shrink committed space
3404   _virtual_space.shrink_by(bytes);
3405   // Shrink space; this also shrinks the space's BOT




 186 // This struct contains per-thread things necessary to support parallel
 187 // young-gen collection.
 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
 189  public:
 190   CFLS_LAB lab;
 191   PromotionInfo promo;
 192 
 193   // Constructor.
 194   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 195     promo.setSpace(cfls);
 196   }
 197 };
 198 
 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 200      ReservedSpace rs, size_t initial_byte_size, int level,
 201      CardTableRS* ct, bool use_adaptive_freelists,
 202      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 203   CardGeneration(rs, initial_byte_size, level, ct),
 204   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 205   _debug_collection_type(Concurrent_collection_type),
 206   _did_compact(false),
 207   _has_promotion_failed(false)
 208 {
 209   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 210   HeapWord* end    = (HeapWord*) _virtual_space.high();
 211 
 212   _direct_allocated_words = 0;
 213   NOT_PRODUCT(
 214     _numObjectsPromoted = 0;
 215     _numWordsPromoted = 0;
 216     _numObjectsAllocated = 0;
 217     _numWordsAllocated = 0;
 218   )
 219 
 220   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 221                                            use_adaptive_freelists,
 222                                            dictionaryChoice);
 223   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 224   if (_cmsSpace == NULL) {
 225     vm_exit_during_initialization(
 226       "CompactibleFreeListSpace allocation failure");
 227   }


 859 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 860   size_t available = max_available();
 861   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 862   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 863   if (Verbose && PrintGCDetails) {
 864     gclog_or_tty->print_cr(
 865       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 866       "max_promo("SIZE_FORMAT")",
 867       res? "":" not", available, res? ">=":"<",
 868       av_promo, max_promotion_in_bytes);
 869   }
 870   return res;
 871 }
 872 
 873 // At a promotion failure dump information on block layout in heap
 874 // (cms old generation).
 875 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 876   if (CMSDumpAtPromotionFailure) {
 877     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 878   }
 879   if (CMSFastPromotionFailure) {
 880     reset_promotion_failed();
 881   }
 882 }
 883 
 884 CompactibleSpace*
 885 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 886   return _cmsSpace;
 887 }
 888 
 889 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 890   // Clear the promotion information.  These pointers can be adjusted
 891   // along with all the other pointers into the heap but
 892   // compaction is expected to be a rare event with
 893   // a heap using cms so don't do it without seeing the need.
 894   if (CollectedHeap::use_parallel_gc_threads()) {
 895     for (uint i = 0; i < ParallelGCThreads; i++) {
 896       _par_gc_thread_states[i]->promo.reset();
 897     }
 898   }
 899 }
 900 
 901 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {


1336 //  ^                                                                   |
1337 //  |------------------------ DEAD <------------------------------------|
1338 //         sweep                            mut
1339 //
1340 // While a block is in TRANSIENT state its size cannot be determined
1341 // so readers will either need to come back later or stall until
1342 // the size can be determined. Note that for the case of direct
1343 // allocation, P-bits, when available, may be used to determine the
1344 // size of an object that may not yet have been initialized.
1345 
1346 // Things to support parallel young-gen collection.
1347 oop
1348 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1349                                            oop old, markOop m,
1350                                            size_t word_sz) {
1351 #ifndef PRODUCT
1352   if (Universe::heap()->promotion_should_fail()) {
1353     return NULL;
1354   }
1355 #endif  // #ifndef PRODUCT
1356   if (CMSFastPromotionFailure && has_promotion_failed()) {
1357     return NULL;
1358   }
1359 
1360   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1361   PromotionInfo* promoInfo = &ps->promo;
1362   // if we are tracking promotions, then first ensure space for
1363   // promotion (including spooling space for saving header if necessary).
1364   // then allocate and copy, then track promoted info if needed.
1365   // When tracking (see PromotionInfo::track()), the mark word may
1366   // be displaced and in this case restoration of the mark word
1367   // occurs in the (oop_since_save_marks_)iterate phase.
1368   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1369     // Out of space for allocating spooling buffers;
1370     // try expanding and allocating spooling buffers.
1371     if (!expand_and_ensure_spooling_space(promoInfo)) {
1372       return NULL;
1373     }
1374   }
1375   assert(promoInfo->has_spooling_space(), "Control point invariant");
1376   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1377   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1378   if (obj_ptr == NULL) {


3337 
3338 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3339   CMSExpansionCause::Cause cause)
3340 {
3341 
3342   bool success = expand(bytes, expand_bytes);
3343 
3344   // remember why we expanded; this information is used
3345   // by shouldConcurrentCollect() when making decisions on whether to start
3346   // a new CMS cycle.
3347   if (success) {
3348     set_expansion_cause(cause);
3349     if (PrintGCDetails && Verbose) {
3350       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3351         CMSExpansionCause::to_string(cause));
3352     }
3353   }
3354 }
3355 
3356 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3357   // Fast-path for promotion failure outside the lock.
3358   if (CMSFastPromotionFailure && has_promotion_failed()) {
3359     return NULL;
3360   }
3361 
3362   HeapWord* res = NULL;
3363   MutexLocker x(ParGCRareEvent_lock);
3364 
3365   // Check again here while holding the lock.
3366   if (CMSFastPromotionFailure && has_promotion_failed()) {
3367     return NULL;
3368   }
3369 
3370   while (true) {
3371     // Expansion by some other thread might make alloc OK now:
3372     res = ps->lab.alloc(word_sz);
3373     if (res != NULL) return res;
3374     // If there's not enough expansion space available, give up.
3375     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3376       set_promotion_failed();
3377       return NULL;
3378     }
3379     // Otherwise, we try expansion.
3380     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3381       CMSExpansionCause::_allocate_par_lab);
3382     // Now go around the loop and try alloc again;
3383     // A competing par_promote might beat us to the expansion space,
3384     // so we may go around the loop again if promotion fails again.
3385     if (GCExpandToAllocateDelayMillis > 0) {
3386       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3387     }
3388   }
3389 }
3390 
3391 
3392 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3393   PromotionInfo* promo) {
3394   MutexLocker x(ParGCRareEvent_lock);
3395 
3396   // Check again here while holding the lock.
3397   if (CMSFastPromotionFailure && has_promotion_failed()) {
3398     return false;
3399   }
3400 
3401   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3402   while (true) {
3403     // Expansion by some other thread might make alloc OK now:
3404     if (promo->ensure_spooling_space()) {
3405       assert(promo->has_spooling_space(),
3406              "Post-condition of successful ensure_spooling_space()");
3407       return true;
3408     }
3409     // If there's not enough expansion space available, give up.
3410     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3411       set_promotion_failed();
3412       return false;
3413     }
3414     // Otherwise, we try expansion.
3415     expand(refill_size_bytes, MinHeapDeltaBytes,
3416       CMSExpansionCause::_allocate_par_spooling_space);
3417     // Now go around the loop and try alloc again;
3418     // A competing allocation might beat us to the expansion space,
3419     // so we may go around the loop again if allocation fails again.
3420     if (GCExpandToAllocateDelayMillis > 0) {
3421       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3422     }
3423   }
3424 }
3425 
3426 
3427 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3428   assert_locked_or_safepoint(ExpandHeap_lock);
3429   // Shrink committed space
3430   _virtual_space.shrink_by(bytes);
3431   // Shrink space; this also shrinks the space's BOT


< prev index next >