src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Oct 17 16:28:32 2014
--- new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Oct 17 16:28:32 2014

*** 195,208 **** --- 195,208 ---- promo.setSpace(cfls); } }; ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( - ReservedSpace rs, size_t initial_byte_size, int level, CardTableRS* ct, bool use_adaptive_freelists, FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) : - CardGeneration(rs, initial_byte_size, level, ct), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), _debug_collection_type(Concurrent_collection_type), _did_compact(false) { HeapWord* bottom = (HeapWord*) _virtual_space.low();
*** 378,388 **** --- 378,388 ---- // the padded average size of the promotion for each // young generation collection. double CMSStats::time_until_cms_gen_full() const { size_t cms_free = _cms_gen->cmsSpace()->free(); GenCollectedHeap* gch = GenCollectedHeap::heap(); ! size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(), ! size_t expected_promotion = MIN2(gch->young_gen()->capacity(), (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); if (cms_free > expected_promotion) { // Start a cms collection if there isn't enough space to promote // for the next minor collection. Use the padded average as // a safety factor.
*** 706,716 **** --- 706,716 ---- "CMS Thread should refer to this gen"); assert(CGC_lock != NULL, "Where's the CGC_lock?"); // Support for parallelizing young gen rescan GenCollectedHeap* gch = GenCollectedHeap::heap(); ! _young_gen = gch->prev_gen(_cmsGen); ! _young_gen = gch->young_gen(); if (gch->supports_inline_contig_alloc()) { _top_addr = gch->top_addr(); _end_addr = gch->end_addr(); assert(_young_gen != NULL, "no _young_gen"); _eden_chunk_index = 0;
*** 817,832 **** --- 817,837 ---- #endif void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) { GenCollectedHeap* gch = GenCollectedHeap::heap(); if (PrintGCDetails) { + // I didn't want to change the logging when removing the level concept, + // but I guess this logging could say "old" or something instead of "1". + assert(this == gch->old_gen(), + "The CMS generation should be the old generation"); + int level = 1; if (Verbose) { gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", - level(), short_name(), s, used(), capacity()); } else { gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", - level(), short_name(), s, used() / K, capacity() / K); } } if (Verbose) { gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")", gch->used(), gch->capacity());
*** 946,971 **** --- 951,973 ---- gclog_or_tty->print_cr(" Free fraction %f", free_percentage); gclog_or_tty->print_cr(" Desired free fraction %f", desired_free_percentage); gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage); ! gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity() / 1000); gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT, ! desired_capacity/1000); int prev_level = level() - 1; if (prev_level >= 0) { size_t prev_size = 0; ! desired_capacity / 1000); GenCollectedHeap* gch = GenCollectedHeap::heap(); Generation* prev_gen = gch->_gens[prev_level]; ! prev_size = prev_gen->capacity(); gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT, prev_size/1000); } + assert(this == gch->_old_gen, ! "The CMS generation should always be the old generation"); + size_t young_size = gch->_young_gen->capacity(); + gclog_or_tty->print_cr(" Young gen size "SIZE_FORMAT, + young_size / 1000); gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT, ! unsafe_max_alloc_nogc()/1000); ! unsafe_max_alloc_nogc() / 1000); gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT, ! contiguous_available()/1000); ! contiguous_available() / 1000); gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", expand_bytes); } // safe if expansion fails expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
*** 1124,1134 **** --- 1126,1136 ---- return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize)); } void CMSCollector::icms_update_allocation_limits() { ! Generation* young = GenCollectedHeap::heap()->get_gen(0); ! Generation* young = GenCollectedHeap::heap()->young_gen(); EdenSpace* eden = young->as_DefNewGeneration()->eden(); const unsigned int duty_cycle = stats().icms_update_duty_cycle(); if (CMSTraceIncrementalPacing) { stats().print();
*** 1265,1279 **** --- 1267,1278 ---- if (res == NULL) { // expand and retry size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords expand(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion); ! // Since there's currently no next generation, we don't try to promote ! // Since this is the old generation, we don't try to promote // into a more senior generation. assert(next_gen() == NULL, "assumption, based upon which no attempt " "is made to pass on a possibly failing " "promotion to next generation"); res = _cmsSpace->promote(obj, obj_size); } if (res != NULL) { // See comment in allocate() about when objects should // be allocated live.
*** 2055,2066 **** --- 2054,2064 ---- _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), _inter_sweep_estimate.padded_average(), _intra_sweep_estimate.padded_average()); } ! GenMarkSweep::invoke_at_safepoint(_cmsGen->level(), ref_processor(), clear_all_soft_refs); ! GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); #ifdef ASSERT CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); size_t free_size = cms_space->free(); assert(free_size == pointer_delta(cms_space->end(), cms_space->compaction_top())
*** 3004,3014 **** --- 3002,3012 ---- // Mark from roots one level into CMS MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. ! gch->gen_process_roots(_cmsGen->level(), ! gch->gen_process_roots(Generation::Old, true, // younger gens are roots true, // activate StrongRootsScope SharedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &notOlder,
*** 3072,3082 **** --- 3070,3080 ---- markBitMap()); CLDToOopClosure cld_closure(&notOlder, true); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. ! gch->gen_process_roots(_cmsGen->level(), ! gch->gen_process_roots(Generation::Old, true, // younger gens are roots true, // activate StrongRootsScope SharedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &notOlder,
*** 3686,3696 **** --- 3684,3694 ---- gch->set_par_threads(0); } else { // The serial version. CLDToOopClosure cld_closure(&notOlder, true); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. ! gch->gen_process_roots(_cmsGen->level(), ! gch->gen_process_roots(Generation::Old, true, // younger gens are roots true, // activate StrongRootsScope SharedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &notOlder,
*** 4962,4981 **** --- 4960,4976 ---- // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) int level = _cmsGen->level() - 1; if (level >= 0) { gch->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size false, // is_tlab level // max_level + Generation::Young // type ); } } FreelistLocker x(this); MutexLockerEx y(bitMapLock(), Mutex::_no_safepoint_check_flag); assert(!init_mark_was_synchronous, "but that's impossible!"); checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
*** 5157,5167 **** --- 5152,5162 ---- _timer.reset(); _timer.start(); CLDToOopClosure cld_closure(&par_mri_cl, true); ! gch->gen_process_roots(_collector->_cmsGen->level(), ! gch->gen_process_roots(Generation::Old, false, // yg was scanned above false, // this is parallel code SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mri_cl,
*** 5293,5303 **** --- 5288,5298 ---- } // ---------- remaining roots -------------- _timer.reset(); _timer.start(); ! gch->gen_process_roots(_collector->_cmsGen->level(), ! gch->gen_process_roots(Generation::Old, false, // yg was scanned above false, // this is parallel code SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mrias_cl,
*** 5885,5895 **** --- 5880,5890 ---- verify_work_stacks_empty(); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. GenCollectedHeap::StrongRootsScope srs(gch); ! gch->gen_process_roots(_cmsGen->level(), ! gch->gen_process_roots(Generation::Old, true, // younger gens as roots false, // use the local StrongRootsScope SharedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &mrias_cl,
*** 6362,6376 **** --- 6357,6372 ---- FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() { return _cmsSpace->find_chunk_at_end(); } ! void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level, ! void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation, bool full) { ! // The next lower level has been collected. Gather any statistics ! // If the young generation has been collected. Gather any statistics // that are of interest at this point. if (!full && (current_level + 1) == level()) { + bool current_is_young = (current_generation == GenCollectedHeap::heap()->young_gen()); + if (!full && current_is_young) { // Gather statistics on the young generation collection. collector()->stats().record_gc0_end(used()); } }

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File