< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page




 250       if (_par_gc_thread_states[i] == NULL) {
 251         vm_exit_during_initialization("Could not allocate par gc structs");
 252       }
 253     }
 254   } else {
 255     _par_gc_thread_states = NULL;
 256   }
 257   _incremental_collection_failed = false;
 258   // The "dilatation_factor" is the expansion that can occur on
 259   // account of the fact that the minimum object size in the CMS
 260   // generation may be larger than that in, say, a contiguous young
 261   //  generation.
 262   // Ideally, in the calculation below, we'd compute the dilatation
 263   // factor as: MinChunkSize/(promoting_gen's min object size)
 264   // Since we do not have such a general query interface for the
 265   // promoting generation, we'll instead just use the minimum
 266   // object size (which today is a header's worth of space);
 267   // note that all arithmetic is in units of HeapWords.
 268   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 269   assert(_dilatation_factor >= 1.0, "from previous assert");



 270 }
 271 
 272 
 273 // The field "_initiating_occupancy" represents the occupancy percentage
 274 // at which we trigger a new collection cycle.  Unless explicitly specified
 275 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 276 // is calculated by:
 277 //
 278 //   Let "f" be MinHeapFreeRatio in
 279 //
 280 //    _initiating_occupancy = 100-f +
 281 //                           f * (CMSTriggerRatio/100)
 282 //   where CMSTriggerRatio is the argument "tr" below.
 283 //
 284 // That is, if we assume the heap is at its desired maximum occupancy at the
 285 // end of a collection, we let CMSTriggerRatio of the (purported) free
 286 // space be allocated before initiating a new collection cycle.
 287 //
 288 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 289   assert(io <= 100 && tr <= 100, "Check the arguments");


 858 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 859   size_t available = max_available();
 860   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 861   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 862   if (Verbose && PrintGCDetails) {
 863     gclog_or_tty->print_cr(
 864       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 865       "max_promo("SIZE_FORMAT")",
 866       res? "":" not", available, res? ">=":"<",
 867       av_promo, max_promotion_in_bytes);
 868   }
 869   return res;
 870 }
 871 
 872 // At a promotion failure dump information on block layout in heap
 873 // (cms old generation).
 874 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 875   if (CMSDumpAtPromotionFailure) {
 876     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 877   }



 878 }
 879 
 880 CompactibleSpace*
 881 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 882   return _cmsSpace;
 883 }
 884 
 885 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 886   // Clear the promotion information.  These pointers can be adjusted
 887   // along with all the other pointers into the heap but
 888   // compaction is expected to be a rare event with
 889   // a heap using cms so don't do it without seeing the need.
 890   if (CollectedHeap::use_parallel_gc_threads()) {
 891     for (uint i = 0; i < ParallelGCThreads; i++) {
 892       _par_gc_thread_states[i]->promo.reset();
 893     }
 894   }
 895 }
 896 
 897 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {


1332 //  ^                                                                   |
1333 //  |------------------------ DEAD <------------------------------------|
1334 //         sweep                            mut
1335 //
1336 // While a block is in TRANSIENT state its size cannot be determined
1337 // so readers will either need to come back later or stall until
1338 // the size can be determined. Note that for the case of direct
1339 // allocation, P-bits, when available, may be used to determine the
1340 // size of an object that may not yet have been initialized.
1341 
1342 // Things to support parallel young-gen collection.
1343 oop
1344 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1345                                            oop old, markOop m,
1346                                            size_t word_sz) {
1347 #ifndef PRODUCT
1348   if (Universe::heap()->promotion_should_fail()) {
1349     return NULL;
1350   }
1351 #endif  // #ifndef PRODUCT



1352 
1353   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1354   PromotionInfo* promoInfo = &ps->promo;
1355   // if we are tracking promotions, then first ensure space for
1356   // promotion (including spooling space for saving header if necessary).
1357   // then allocate and copy, then track promoted info if needed.
1358   // When tracking (see PromotionInfo::track()), the mark word may
1359   // be displaced and in this case restoration of the mark word
1360   // occurs in the (oop_since_save_marks_)iterate phase.
1361   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1362     // Out of space for allocating spooling buffers;
1363     // try expanding and allocating spooling buffers.
1364     if (!expand_and_ensure_spooling_space(promoInfo)) {
1365       return NULL;
1366     }
1367   }
1368   assert(promoInfo->has_spooling_space(), "Control point invariant");
1369   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1370   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1371   if (obj_ptr == NULL) {



1372      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1373      if (obj_ptr == NULL) {
1374        return NULL;
1375      }
1376   }
1377   oop obj = oop(obj_ptr);
1378   OrderAccess::storestore();
1379   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1380   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1381   // IMPORTANT: See note on object initialization for CMS above.
1382   // Otherwise, copy the object.  Here we must be careful to insert the
1383   // klass pointer last, since this marks the block as an allocated object.
1384   // Except with compressed oops it's the mark word.
1385   HeapWord* old_ptr = (HeapWord*)old;
1386   // Restore the mark word copied above.
1387   obj->set_mark(m);
1388   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1389   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1390   OrderAccess::storestore();
1391 


3332   CMSExpansionCause::Cause cause)
3333 {
3334 
3335   bool success = expand(bytes, expand_bytes);
3336 
3337   // remember why we expanded; this information is used
3338   // by shouldConcurrentCollect() when making decisions on whether to start
3339   // a new CMS cycle.
3340   if (success) {
3341     set_expansion_cause(cause);
3342     if (PrintGCDetails && Verbose) {
3343       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3344         CMSExpansionCause::to_string(cause));
3345     }
3346   }
3347 }
3348 
3349 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3350   HeapWord* res = NULL;
3351   MutexLocker x(ParGCRareEvent_lock);






3352   while (true) {
3353     // Expansion by some other thread might make alloc OK now:
3354     res = ps->lab.alloc(word_sz);
3355     if (res != NULL) return res;
3356     // If there's not enough expansion space available, give up.
3357     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {

3358       return NULL;
3359     }
3360     // Otherwise, we try expansion.
3361     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3362       CMSExpansionCause::_allocate_par_lab);
3363     // Now go around the loop and try alloc again;
3364     // A competing par_promote might beat us to the expansion space,
3365     // so we may go around the loop again if promotion fails again.
3366     if (GCExpandToAllocateDelayMillis > 0) {
3367       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3368     }
3369   }
3370 }
3371 
3372 
3373 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3374   PromotionInfo* promo) {
3375   MutexLocker x(ParGCRareEvent_lock);






3376   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3377   while (true) {
3378     // Expansion by some other thread might make alloc OK now:
3379     if (promo->ensure_spooling_space()) {
3380       assert(promo->has_spooling_space(),
3381              "Post-condition of successful ensure_spooling_space()");
3382       return true;
3383     }
3384     // If there's not enough expansion space available, give up.
3385     if (_virtual_space.uncommitted_size() < refill_size_bytes) {

3386       return false;
3387     }
3388     // Otherwise, we try expansion.
3389     expand(refill_size_bytes, MinHeapDeltaBytes,
3390       CMSExpansionCause::_allocate_par_spooling_space);
3391     // Now go around the loop and try alloc again;
3392     // A competing allocation might beat us to the expansion space,
3393     // so we may go around the loop again if allocation fails again.
3394     if (GCExpandToAllocateDelayMillis > 0) {
3395       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3396     }
3397   }
3398 }
3399 
3400 
3401 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3402   assert_locked_or_safepoint(ExpandHeap_lock);
3403   // Shrink committed space
3404   _virtual_space.shrink_by(bytes);
3405   // Shrink space; this also shrinks the space's BOT




 250       if (_par_gc_thread_states[i] == NULL) {
 251         vm_exit_during_initialization("Could not allocate par gc structs");
 252       }
 253     }
 254   } else {
 255     _par_gc_thread_states = NULL;
 256   }
 257   _incremental_collection_failed = false;
 258   // The "dilatation_factor" is the expansion that can occur on
 259   // account of the fact that the minimum object size in the CMS
 260   // generation may be larger than that in, say, a contiguous young
 261   //  generation.
 262   // Ideally, in the calculation below, we'd compute the dilatation
 263   // factor as: MinChunkSize/(promoting_gen's min object size)
 264   // Since we do not have such a general query interface for the
 265   // promoting generation, we'll instead just use the minimum
 266   // object size (which today is a header's worth of space);
 267   // note that all arithmetic is in units of HeapWords.
 268   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 269   assert(_dilatation_factor >= 1.0, "from previous assert");
 270 
 271   // Support for CMSFastPromotionFailure
 272   reset_promotion_failed();
 273 }
 274 
 275 
 276 // The field "_initiating_occupancy" represents the occupancy percentage
 277 // at which we trigger a new collection cycle.  Unless explicitly specified
 278 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 279 // is calculated by:
 280 //
 281 //   Let "f" be MinHeapFreeRatio in
 282 //
 283 //    _initiating_occupancy = 100-f +
 284 //                           f * (CMSTriggerRatio/100)
 285 //   where CMSTriggerRatio is the argument "tr" below.
 286 //
 287 // That is, if we assume the heap is at its desired maximum occupancy at the
 288 // end of a collection, we let CMSTriggerRatio of the (purported) free
 289 // space be allocated before initiating a new collection cycle.
 290 //
 291 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 292   assert(io <= 100 && tr <= 100, "Check the arguments");


 861 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 862   size_t available = max_available();
 863   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 864   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 865   if (Verbose && PrintGCDetails) {
 866     gclog_or_tty->print_cr(
 867       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 868       "max_promo("SIZE_FORMAT")",
 869       res? "":" not", available, res? ">=":"<",
 870       av_promo, max_promotion_in_bytes);
 871   }
 872   return res;
 873 }
 874 
 875 // At a promotion failure dump information on block layout in heap
 876 // (cms old generation).
 877 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 878   if (CMSDumpAtPromotionFailure) {
 879     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 880   }
 881   if (CMSFastPromotionFailure) {
 882     reset_promotion_failed();
 883   }
 884 }
 885 
 886 CompactibleSpace*
 887 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 888   return _cmsSpace;
 889 }
 890 
 891 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 892   // Clear the promotion information.  These pointers can be adjusted
 893   // along with all the other pointers into the heap but
 894   // compaction is expected to be a rare event with
 895   // a heap using cms so don't do it without seeing the need.
 896   if (CollectedHeap::use_parallel_gc_threads()) {
 897     for (uint i = 0; i < ParallelGCThreads; i++) {
 898       _par_gc_thread_states[i]->promo.reset();
 899     }
 900   }
 901 }
 902 
 903 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {


1338 //  ^                                                                   |
1339 //  |------------------------ DEAD <------------------------------------|
1340 //         sweep                            mut
1341 //
1342 // While a block is in TRANSIENT state its size cannot be determined
1343 // so readers will either need to come back later or stall until
1344 // the size can be determined. Note that for the case of direct
1345 // allocation, P-bits, when available, may be used to determine the
1346 // size of an object that may not yet have been initialized.
1347 
1348 // Things to support parallel young-gen collection.
1349 oop
1350 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1351                                            oop old, markOop m,
1352                                            size_t word_sz) {
1353 #ifndef PRODUCT
1354   if (Universe::heap()->promotion_should_fail()) {
1355     return NULL;
1356   }
1357 #endif  // #ifndef PRODUCT
1358   if (CMSFastPromotionFailure && has_promotion_failed()) {
1359     return NULL;
1360   }
1361 
1362   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1363   PromotionInfo* promoInfo = &ps->promo;
1364   // if we are tracking promotions, then first ensure space for
1365   // promotion (including spooling space for saving header if necessary).
1366   // then allocate and copy, then track promoted info if needed.
1367   // When tracking (see PromotionInfo::track()), the mark word may
1368   // be displaced and in this case restoration of the mark word
1369   // occurs in the (oop_since_save_marks_)iterate phase.
1370   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1371     // Out of space for allocating spooling buffers;
1372     // try expanding and allocating spooling buffers.
1373     if (!expand_and_ensure_spooling_space(promoInfo)) {
1374       return NULL;
1375     }
1376   }
1377   assert(promoInfo->has_spooling_space(), "Control point invariant");
1378   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1379   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1380   if (obj_ptr == NULL) {
1381      if (CMSFastPromotionFailure && has_promotion_failed()) {
1382        return NULL;
1383      }
1384      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1385      if (obj_ptr == NULL) {
1386        return NULL;
1387      }
1388   }
1389   oop obj = oop(obj_ptr);
1390   OrderAccess::storestore();
1391   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1392   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1393   // IMPORTANT: See note on object initialization for CMS above.
1394   // Otherwise, copy the object.  Here we must be careful to insert the
1395   // klass pointer last, since this marks the block as an allocated object.
1396   // Except with compressed oops it's the mark word.
1397   HeapWord* old_ptr = (HeapWord*)old;
1398   // Restore the mark word copied above.
1399   obj->set_mark(m);
1400   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1401   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1402   OrderAccess::storestore();
1403 


3344   CMSExpansionCause::Cause cause)
3345 {
3346 
3347   bool success = expand(bytes, expand_bytes);
3348 
3349   // remember why we expanded; this information is used
3350   // by shouldConcurrentCollect() when making decisions on whether to start
3351   // a new CMS cycle.
3352   if (success) {
3353     set_expansion_cause(cause);
3354     if (PrintGCDetails && Verbose) {
3355       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3356         CMSExpansionCause::to_string(cause));
3357     }
3358   }
3359 }
3360 
3361 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3362   HeapWord* res = NULL;
3363   MutexLocker x(ParGCRareEvent_lock);
3364 
3365   // Check again here while holding the lock.
3366   if (CMSFastPromotionFailure && has_promotion_failed()) {
3367     return NULL;
3368   }
3369 
3370   while (true) {
3371     // Expansion by some other thread might make alloc OK now:
3372     res = ps->lab.alloc(word_sz);
3373     if (res != NULL) return res;
3374     // If there's not enough expansion space available, give up.
3375     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3376       set_promotion_failed();
3377       return NULL;
3378     }
3379     // Otherwise, we try expansion.
3380     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3381       CMSExpansionCause::_allocate_par_lab);
3382     // Now go around the loop and try alloc again;
3383     // A competing par_promote might beat us to the expansion space,
3384     // so we may go around the loop again if promotion fails again.
3385     if (GCExpandToAllocateDelayMillis > 0) {
3386       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3387     }
3388   }
3389 }
3390 
3391 
3392 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3393   PromotionInfo* promo) {
3394   MutexLocker x(ParGCRareEvent_lock);
3395 
3396   // Check again here while holding the lock.
3397   if (CMSFastPromotionFailure && has_promotion_failed()) {
3398     return false;
3399   }
3400 
3401   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3402   while (true) {
3403     // Expansion by some other thread might make alloc OK now:
3404     if (promo->ensure_spooling_space()) {
3405       assert(promo->has_spooling_space(),
3406              "Post-condition of successful ensure_spooling_space()");
3407       return true;
3408     }
3409     // If there's not enough expansion space available, give up.
3410     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3411       set_promotion_failed();
3412       return false;
3413     }
3414     // Otherwise, we try expansion.
3415     expand(refill_size_bytes, MinHeapDeltaBytes,
3416       CMSExpansionCause::_allocate_par_spooling_space);
3417     // Now go around the loop and try alloc again;
3418     // A competing allocation might beat us to the expansion space,
3419     // so we may go around the loop again if allocation fails again.
3420     if (GCExpandToAllocateDelayMillis > 0) {
3421       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3422     }
3423   }
3424 }
3425 
3426 
3427 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3428   assert_locked_or_safepoint(ExpandHeap_lock);
3429   // Shrink committed space
3430   _virtual_space.shrink_by(bytes);
3431   // Shrink space; this also shrinks the space's BOT


< prev index next >