src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/gc_implementation/concurrentMarkSweep

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 7211 : [mq]: remove_ngen
rev 7212 : [mq]: remove_get_gen
rev 7215 : imported patch remove_levels


 180 //////////////////////////////////////////////////////////////////
 181 //  Concurrent Mark-Sweep Generation /////////////////////////////
 182 //////////////////////////////////////////////////////////////////
 183 
 184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 185 
 186 // This struct contains per-thread things necessary to support parallel
 187 // young-gen collection.
 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
 189  public:
 190   CFLS_LAB lab;
 191   PromotionInfo promo;
 192 
 193   // Constructor.
 194   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 195     promo.setSpace(cfls);
 196   }
 197 };
 198 
 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 200      ReservedSpace rs, size_t initial_byte_size, int level,
 201      CardTableRS* ct, bool use_adaptive_freelists,
 202      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 203   CardGeneration(rs, initial_byte_size, level, ct),
 204   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 205   _debug_collection_type(Concurrent_collection_type),
 206   _did_compact(false)
 207 {
 208   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 209   HeapWord* end    = (HeapWord*) _virtual_space.high();
 210 
 211   _direct_allocated_words = 0;
 212   NOT_PRODUCT(
 213     _numObjectsPromoted = 0;
 214     _numWordsPromoted = 0;
 215     _numObjectsAllocated = 0;
 216     _numWordsAllocated = 0;
 217   )
 218 
 219   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 220                                            use_adaptive_freelists,
 221                                            dictionaryChoice);
 222   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 223   if (_cmsSpace == NULL) {


 363   _cms_used_at_gc0_end = 0;
 364   _allow_duty_cycle_reduction = false;
 365   _valid_bits = 0;
 366   _icms_duty_cycle = CMSIncrementalDutyCycle;
 367 }
 368 
 369 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 370   // TBD: CR 6909490
 371   return 1.0;
 372 }
 373 
 374 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 375 }
 376 
 377 // If promotion failure handling is on use
 378 // the padded average size of the promotion for each
 379 // young generation collection.
 380 double CMSStats::time_until_cms_gen_full() const {
 381   size_t cms_free = _cms_gen->cmsSpace()->free();
 382   GenCollectedHeap* gch = GenCollectedHeap::heap();
 383   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 384                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 385   if (cms_free > expected_promotion) {
 386     // Start a cms collection if there isn't enough space to promote
 387     // for the next minor collection.  Use the padded average as
 388     // a safety factor.
 389     cms_free -= expected_promotion;
 390 
 391     // Adjust by the safety factor.
 392     double cms_free_dbl = (double)cms_free;
 393     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 394     // Apply a further correction factor which tries to adjust
 395     // for recent occurance of concurrent mode failures.
 396     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 397     cms_free_dbl = cms_free_dbl * cms_adjustment;
 398 
 399     if (PrintGCDetails && Verbose) {
 400       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 401         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 402         cms_free, expected_promotion);
 403       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",


 691 
 692   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 693 
 694   // Clip CMSBootstrapOccupancy between 0 and 100.
 695   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 696 
 697   _full_gcs_since_conc_gc = 0;
 698 
 699   // Now tell CMS generations the identity of their collector
 700   ConcurrentMarkSweepGeneration::set_collector(this);
 701 
 702   // Create & start a CMS thread for this CMS collector
 703   _cmsThread = ConcurrentMarkSweepThread::start(this);
 704   assert(cmsThread() != NULL, "CMS Thread should have been created");
 705   assert(cmsThread()->collector() == this,
 706          "CMS Thread should refer to this gen");
 707   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 708 
 709   // Support for parallelizing young gen rescan
 710   GenCollectedHeap* gch = GenCollectedHeap::heap();
 711   _young_gen = gch->prev_gen(_cmsGen);
 712   if (gch->supports_inline_contig_alloc()) {
 713     _top_addr = gch->top_addr();
 714     _end_addr = gch->end_addr();
 715     assert(_young_gen != NULL, "no _young_gen");
 716     _eden_chunk_index = 0;
 717     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 718     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 719     if (_eden_chunk_array == NULL) {
 720       _eden_chunk_capacity = 0;
 721       warning("GC/CMS: _eden_chunk_array allocation failure");
 722     }
 723   }
 724   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 725 
 726   // Support for parallelizing survivor space rescan
 727   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 728     const size_t max_plab_samples =
 729       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 730 
 731     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);


 802     _space_counters->update_used(used);
 803     _space_counters->update_capacity();
 804     _gen_counters->update_all();
 805   }
 806 }
 807 
 808 void ConcurrentMarkSweepGeneration::print() const {
 809   Generation::print();
 810   cmsSpace()->print();
 811 }
 812 
 813 #ifndef PRODUCT
 814 void ConcurrentMarkSweepGeneration::print_statistics() {
 815   cmsSpace()->printFLCensus(0);
 816 }
 817 #endif
 818 
 819 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 820   GenCollectedHeap* gch = GenCollectedHeap::heap();
 821   if (PrintGCDetails) {





 822     if (Verbose) {
 823       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 824         level(), short_name(), s, used(), capacity());
 825     } else {
 826       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 827         level(), short_name(), s, used() / K, capacity() / K);
 828     }
 829   }
 830   if (Verbose) {
 831     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 832               gch->used(), gch->capacity());
 833   } else {
 834     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 835               gch->used() / K, gch->capacity() / K);
 836   }
 837 }
 838 
 839 size_t
 840 ConcurrentMarkSweepGeneration::contiguous_available() const {
 841   // dld proposes an improvement in precision here. If the committed
 842   // part of the space ends in a free block we should add that to
 843   // uncommitted size in the calculation below. Will make this
 844   // change later, staying with the approximation below for the
 845   // time being. -- ysr.
 846   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 847 }


 931     return;
 932   }
 933 
 934   double free_percentage = ((double) free()) / capacity();
 935   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 936   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 937 
 938   // compute expansion delta needed for reaching desired free percentage
 939   if (free_percentage < desired_free_percentage) {
 940     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 941     assert(desired_capacity >= capacity(), "invalid expansion size");
 942     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 943     if (PrintGCDetails && Verbose) {
 944       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 945       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 946       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 947       gclog_or_tty->print_cr("  Desired free fraction %f",
 948         desired_free_percentage);
 949       gclog_or_tty->print_cr("  Maximum free fraction %f",
 950         maximum_free_percentage);
 951       gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
 952       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 953         desired_capacity/1000);
 954       int prev_level = level() - 1;
 955       if (prev_level >= 0) {
 956         size_t prev_size = 0;
 957         GenCollectedHeap* gch = GenCollectedHeap::heap();
 958         Generation* prev_gen = gch->_gens[prev_level];
 959         prev_size = prev_gen->capacity();
 960           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 961                                  prev_size/1000);
 962       }
 963       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 964         unsafe_max_alloc_nogc()/1000);
 965       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 966         contiguous_available()/1000);
 967       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 968         expand_bytes);
 969     }
 970     // safe if expansion fails
 971     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 972     if (PrintGCDetails && Verbose) {
 973       gclog_or_tty->print_cr("  Expanded free fraction %f",
 974         ((double) free()) / capacity());
 975     }
 976   } else {
 977     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 978     assert(desired_capacity <= capacity(), "invalid expansion size");
 979     size_t shrink_bytes = capacity() - desired_capacity;
 980     // Don't shrink unless the delta is greater than the minimum shrink we want
 981     if (shrink_bytes >= MinHeapDeltaBytes) {
 982       shrink_free_list_by(shrink_bytes);
 983     }
 984   }
 985 }
 986 


1109         }
1110       } else {  // not an obj array; we can just mark the head
1111         if (par) {
1112           _modUnionTable.par_mark(start);
1113         } else {
1114           _modUnionTable.mark(start);
1115         }
1116       }
1117     }
1118   }
1119 }
1120 
1121 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1122 {
1123   size_t delta = pointer_delta(addr, space->bottom());
1124   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1125 }
1126 
1127 void CMSCollector::icms_update_allocation_limits()
1128 {
1129   Generation* young = GenCollectedHeap::heap()->get_gen(0);
1130   EdenSpace* eden = young->as_DefNewGeneration()->eden();
1131 
1132   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1133   if (CMSTraceIncrementalPacing) {
1134     stats().print();
1135   }
1136 
1137   assert(duty_cycle <= 100, "invalid duty cycle");
1138   if (duty_cycle != 0) {
1139     // The duty_cycle is a percentage between 0 and 100; convert to words and
1140     // then compute the offset from the endpoints of the space.
1141     size_t free_words = eden->free() / HeapWordSize;
1142     double free_words_dbl = (double)free_words;
1143     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1144     size_t offset_words = (free_words - duty_cycle_words) / 2;
1145 
1146     _icms_start_limit = eden->top() + offset_words;
1147     _icms_stop_limit = eden->end() - offset_words;
1148 
1149     // The limits may be adjusted (shifted to the right) by


1250 }
1251 
1252 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1253   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1254   // allocate, copy and if necessary update promoinfo --
1255   // delegate to underlying space.
1256   assert_lock_strong(freelistLock());
1257 
1258 #ifndef PRODUCT
1259   if (Universe::heap()->promotion_should_fail()) {
1260     return NULL;
1261   }
1262 #endif  // #ifndef PRODUCT
1263 
1264   oop res = _cmsSpace->promote(obj, obj_size);
1265   if (res == NULL) {
1266     // expand and retry
1267     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1268     expand(s*HeapWordSize, MinHeapDeltaBytes,
1269       CMSExpansionCause::_satisfy_promotion);
1270     // Since there's currently no next generation, we don't try to promote
1271     // into a more senior generation.
1272     assert(next_gen() == NULL, "assumption, based upon which no attempt "
1273                                "is made to pass on a possibly failing "
1274                                "promotion to next generation");
1275     res = _cmsSpace->promote(obj, obj_size);
1276   }
1277   if (res != NULL) {
1278     // See comment in allocate() about when objects should
1279     // be allocated live.
1280     assert(obj->is_oop(), "Will dereference klass pointer below");
1281     collector()->promoted(false,           // Not parallel
1282                           (HeapWord*)res, obj->is_objArray(), obj_size);
1283     // promotion counters
1284     NOT_PRODUCT(
1285       _numObjectsPromoted++;
1286       _numWordsPromoted +=
1287         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1288     )
1289   }
1290   return res;
1291 }
1292 
1293 
1294 HeapWord*


2040   // collection, clear the _modUnionTable.
2041   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2042     "_modUnionTable should be clear if the baton was not passed");
2043   _modUnionTable.clear_all();
2044   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2045     "mod union for klasses should be clear if the baton was passed");
2046   _ct->klass_rem_set()->clear_mod_union();
2047 
2048   // We must adjust the allocation statistics being maintained
2049   // in the free list space. We do so by reading and clearing
2050   // the sweep timer and updating the block flux rate estimates below.
2051   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2052   if (_inter_sweep_timer.is_active()) {
2053     _inter_sweep_timer.stop();
2054     // Note that we do not use this sample to update the _inter_sweep_estimate.
2055     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2056                                             _inter_sweep_estimate.padded_average(),
2057                                             _intra_sweep_estimate.padded_average());
2058   }
2059 
2060   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2061     ref_processor(), clear_all_soft_refs);
2062   #ifdef ASSERT
2063     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2064     size_t free_size = cms_space->free();
2065     assert(free_size ==
2066            pointer_delta(cms_space->end(), cms_space->compaction_top())
2067            * HeapWordSize,
2068       "All the free space should be compacted into one chunk at top");
2069     assert(cms_space->dictionary()->total_chunk_size(
2070                                       debug_only(cms_space->freelistLock())) == 0 ||
2071            cms_space->totalSizeInIndexedFreeLists() == 0,
2072       "All the free space should be in a single chunk");
2073     size_t num = cms_space->totalCount();
2074     assert((free_size == 0 && num == 0) ||
2075            (free_size > 0  && (num == 1 || num == 2)),
2076          "There should be at most 2 free chunks after compaction");
2077   #endif // ASSERT
2078   _collectorState = Resetting;
2079   assert(_restart_addr == NULL,
2080          "Should have been NULL'd before baton was passed");
2081   reset(false /* == !asynch */);


2989   } else {
2990     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2991             CMSRemarkVerifyVariant);
2992   }
2993   if (!silent) gclog_or_tty->print(" done] ");
2994   return true;
2995 }
2996 
2997 void CMSCollector::verify_after_remark_work_1() {
2998   ResourceMark rm;
2999   HandleMark  hm;
3000   GenCollectedHeap* gch = GenCollectedHeap::heap();
3001 
3002   // Get a clear set of claim bits for the roots processing to work with.
3003   ClassLoaderDataGraph::clear_claimed_marks();
3004 
3005   // Mark from roots one level into CMS
3006   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3007   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3008 
3009   gch->gen_process_roots(_cmsGen->level(),
3010                          true,   // younger gens are roots
3011                          true,   // activate StrongRootsScope
3012                          SharedHeap::ScanningOption(roots_scanning_options()),
3013                          should_unload_classes(),
3014                          &notOlder,
3015                          NULL,
3016                          NULL);  // SSS: Provide correct closure
3017 
3018   // Now mark from the roots
3019   MarkFromRootsClosure markFromRootsClosure(this, _span,
3020     verification_mark_bm(), verification_mark_stack(),
3021     false /* don't yield */, true /* verifying */);
3022   assert(_restart_addr == NULL, "Expected pre-condition");
3023   verification_mark_bm()->iterate(&markFromRootsClosure);
3024   while (_restart_addr != NULL) {
3025     // Deal with stack overflow: by restarting at the indicated
3026     // address.
3027     HeapWord* ra = _restart_addr;
3028     markFromRootsClosure.reset(ra);
3029     _restart_addr = NULL;


3057   void do_klass(Klass* k) {
3058     k->oops_do(&_oop_closure);
3059   }
3060 };
3061 
3062 void CMSCollector::verify_after_remark_work_2() {
3063   ResourceMark rm;
3064   HandleMark  hm;
3065   GenCollectedHeap* gch = GenCollectedHeap::heap();
3066 
3067   // Get a clear set of claim bits for the roots processing to work with.
3068   ClassLoaderDataGraph::clear_claimed_marks();
3069 
3070   // Mark from roots one level into CMS
3071   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3072                                      markBitMap());
3073   CLDToOopClosure cld_closure(&notOlder, true);
3074 
3075   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3076 
3077   gch->gen_process_roots(_cmsGen->level(),
3078                          true,   // younger gens are roots
3079                          true,   // activate StrongRootsScope
3080                          SharedHeap::ScanningOption(roots_scanning_options()),
3081                          should_unload_classes(),
3082                          &notOlder,
3083                          NULL,
3084                          &cld_closure);
3085 
3086   // Now mark from the roots
3087   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3088     verification_mark_bm(), markBitMap(), verification_mark_stack());
3089   assert(_restart_addr == NULL, "Expected pre-condition");
3090   verification_mark_bm()->iterate(&markFromRootsClosure);
3091   while (_restart_addr != NULL) {
3092     // Deal with stack overflow: by restarting at the indicated
3093     // address.
3094     HeapWord* ra = _restart_addr;
3095     markFromRootsClosure.reset(ra);
3096     _restart_addr = NULL;
3097     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());


3671     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3672       // The parallel version.
3673       FlexibleWorkGang* workers = gch->workers();
3674       assert(workers != NULL, "Need parallel worker threads.");
3675       int n_workers = workers->active_workers();
3676       CMSParInitialMarkTask tsk(this, n_workers);
3677       gch->set_par_threads(n_workers);
3678       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3679       if (n_workers > 1) {
3680         GenCollectedHeap::StrongRootsScope srs(gch);
3681         workers->run_task(&tsk);
3682       } else {
3683         GenCollectedHeap::StrongRootsScope srs(gch);
3684         tsk.work(0);
3685       }
3686       gch->set_par_threads(0);
3687     } else {
3688       // The serial version.
3689       CLDToOopClosure cld_closure(&notOlder, true);
3690       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3691       gch->gen_process_roots(_cmsGen->level(),
3692                              true,   // younger gens are roots
3693                              true,   // activate StrongRootsScope
3694                              SharedHeap::ScanningOption(roots_scanning_options()),
3695                              should_unload_classes(),
3696                              &notOlder,
3697                              NULL,
3698                              &cld_closure);
3699     }
3700   }
3701 
3702   // Clear mod-union table; it will be dirtied in the prologue of
3703   // CMS generation per each younger generation collection.
3704 
3705   assert(_modUnionTable.isAllClear(),
3706        "Was cleared in most recent final checkpoint phase"
3707        " or no bits are set in the gc_prologue before the start of the next "
3708        "subsequent marking phase.");
3709 
3710   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3711 


4947          "world should be stopped");
4948   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4949 
4950   verify_work_stacks_empty();
4951   verify_overflow_empty();
4952 
4953   SpecializationStats::clear();
4954   if (PrintGCDetails) {
4955     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4956                         _young_gen->used() / K,
4957                         _young_gen->capacity() / K);
4958   }
4959   if (asynch) {
4960     if (CMSScavengeBeforeRemark) {
4961       GenCollectedHeap* gch = GenCollectedHeap::heap();
4962       // Temporarily set flag to false, GCH->do_collection will
4963       // expect it to be false and set to true
4964       FlagSetting fl(gch->_is_gc_active, false);
4965       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4966         PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4967       int level = _cmsGen->level() - 1;
4968       if (level >= 0) {
4969         gch->do_collection(true,        // full (i.e. force, see below)
4970                            false,       // !clear_all_soft_refs
4971                            0,           // size
4972                            false,       // is_tlab
4973                            level        // max_level
4974                           );
4975       }
4976     }
4977     FreelistLocker x(this);
4978     MutexLockerEx y(bitMapLock(),
4979                     Mutex::_no_safepoint_check_flag);
4980     assert(!init_mark_was_synchronous, "but that's impossible!");
4981     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4982   } else {
4983     // already have all the locks
4984     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4985                              init_mark_was_synchronous);
4986   }
4987   verify_work_stacks_empty();
4988   verify_overflow_empty();
4989   SpecializationStats::print();
4990 }
4991 
4992 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4993   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4994 
4995   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4996 


5142   GenCollectedHeap* gch = GenCollectedHeap::heap();
5143   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5144 
5145   // ---------- young gen roots --------------
5146   {
5147     work_on_young_gen_roots(worker_id, &par_mri_cl);
5148     _timer.stop();
5149     if (PrintCMSStatistics != 0) {
5150       gclog_or_tty->print_cr(
5151         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5152         worker_id, _timer.seconds());
5153     }
5154   }
5155 
5156   // ---------- remaining roots --------------
5157   _timer.reset();
5158   _timer.start();
5159 
5160   CLDToOopClosure cld_closure(&par_mri_cl, true);
5161 
5162   gch->gen_process_roots(_collector->_cmsGen->level(),
5163                          false,     // yg was scanned above
5164                          false,     // this is parallel code
5165                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5166                          _collector->should_unload_classes(),
5167                          &par_mri_cl,
5168                          NULL,
5169                          &cld_closure);
5170   assert(_collector->should_unload_classes()
5171          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5172          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5173   _timer.stop();
5174   if (PrintCMSStatistics != 0) {
5175     gclog_or_tty->print_cr(
5176       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5177       worker_id, _timer.seconds());
5178   }
5179 }
5180 
5181 // Parallel remark task
5182 class CMSParRemarkTask: public CMSParMarkTask {


5278     work_queue(worker_id));
5279 
5280   // Rescan young gen roots first since these are likely
5281   // coarsely partitioned and may, on that account, constitute
5282   // the critical path; thus, it's best to start off that
5283   // work first.
5284   // ---------- young gen roots --------------
5285   {
5286     work_on_young_gen_roots(worker_id, &par_mrias_cl);
5287     _timer.stop();
5288     if (PrintCMSStatistics != 0) {
5289       gclog_or_tty->print_cr(
5290         "Finished young gen rescan work in %dth thread: %3.3f sec",
5291         worker_id, _timer.seconds());
5292     }
5293   }
5294 
5295   // ---------- remaining roots --------------
5296   _timer.reset();
5297   _timer.start();
5298   gch->gen_process_roots(_collector->_cmsGen->level(),
5299                          false,     // yg was scanned above
5300                          false,     // this is parallel code
5301                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5302                          _collector->should_unload_classes(),
5303                          &par_mrias_cl,
5304                          NULL,
5305                          NULL);     // The dirty klasses will be handled below
5306 
5307   assert(_collector->should_unload_classes()
5308          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5309          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5310   _timer.stop();
5311   if (PrintCMSStatistics != 0) {
5312     gclog_or_tty->print_cr(
5313       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5314       worker_id, _timer.seconds());
5315   }
5316 
5317   // ---------- unhandled CLD scanning ----------
5318   if (worker_id == 0) { // Single threaded at the moment.


5870       verify_work_stacks_empty();
5871       if (PrintCMSStatistics != 0) {
5872         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5873           markFromDirtyCardsClosure.num_dirty_cards());
5874       }
5875     }
5876   }
5877   if (VerifyDuringGC &&
5878       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5879     HandleMark hm;  // Discard invalid handles created during verification
5880     Universe::verify();
5881   }
5882   {
5883     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5884 
5885     verify_work_stacks_empty();
5886 
5887     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5888     GenCollectedHeap::StrongRootsScope srs(gch);
5889 
5890     gch->gen_process_roots(_cmsGen->level(),
5891                            true,  // younger gens as roots
5892                            false, // use the local StrongRootsScope
5893                            SharedHeap::ScanningOption(roots_scanning_options()),
5894                            should_unload_classes(),
5895                            &mrias_cl,
5896                            NULL,
5897                            NULL); // The dirty klasses will be handled below
5898 
5899     assert(should_unload_classes()
5900            || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5901            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5902   }
5903 
5904   {
5905     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5906 
5907     verify_work_stacks_empty();
5908 
5909     // Scan all class loader data objects that might have been introduced
5910     // during concurrent marking.


6347   size_t nearLargestOffset =
6348     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6349   if (PrintFLSStatistics != 0) {
6350     gclog_or_tty->print_cr(
6351       "CMS: Large Block: " PTR_FORMAT ";"
6352       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6353       largestAddr,
6354       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6355   }
6356   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6357 }
6358 
6359 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6360   return addr >= _cmsSpace->nearLargestChunk();
6361 }
6362 
6363 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6364   return _cmsSpace->find_chunk_at_end();
6365 }
6366 
6367 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6368                                                     bool full) {
6369   // The next lower level has been collected.  Gather any statistics
6370   // that are of interest at this point.
6371   if (!full && (current_level + 1) == level()) {

6372     // Gather statistics on the young generation collection.
6373     collector()->stats().record_gc0_end(used());
6374   }
6375 }
6376 
6377 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6378   if (PrintGCDetails && Verbose) {
6379     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6380   }
6381   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6382   _debug_collection_type =
6383     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6384   if (PrintGCDetails && Verbose) {
6385     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6386   }
6387 }
6388 
6389 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6390   bool asynch) {
6391   // We iterate over the space(s) underlying this generation,




 180 //////////////////////////////////////////////////////////////////
 181 //  Concurrent Mark-Sweep Generation /////////////////////////////
 182 //////////////////////////////////////////////////////////////////
 183 
 184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 185 
 186 // This struct contains per-thread things necessary to support parallel
 187 // young-gen collection.
 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
 189  public:
 190   CFLS_LAB lab;
 191   PromotionInfo promo;
 192 
 193   // Constructor.
 194   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 195     promo.setSpace(cfls);
 196   }
 197 };
 198 
 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 200      ReservedSpace rs, size_t initial_byte_size,
 201      CardTableRS* ct, bool use_adaptive_freelists,
 202      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 203   CardGeneration(rs, initial_byte_size, ct),
 204   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 205   _debug_collection_type(Concurrent_collection_type),
 206   _did_compact(false)
 207 {
 208   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 209   HeapWord* end    = (HeapWord*) _virtual_space.high();
 210 
 211   _direct_allocated_words = 0;
 212   NOT_PRODUCT(
 213     _numObjectsPromoted = 0;
 214     _numWordsPromoted = 0;
 215     _numObjectsAllocated = 0;
 216     _numWordsAllocated = 0;
 217   )
 218 
 219   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 220                                            use_adaptive_freelists,
 221                                            dictionaryChoice);
 222   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 223   if (_cmsSpace == NULL) {


 363   _cms_used_at_gc0_end = 0;
 364   _allow_duty_cycle_reduction = false;
 365   _valid_bits = 0;
 366   _icms_duty_cycle = CMSIncrementalDutyCycle;
 367 }
 368 
 369 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 370   // TBD: CR 6909490
 371   return 1.0;
 372 }
 373 
 374 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 375 }
 376 
 377 // If promotion failure handling is on use
 378 // the padded average size of the promotion for each
 379 // young generation collection.
 380 double CMSStats::time_until_cms_gen_full() const {
 381   size_t cms_free = _cms_gen->cmsSpace()->free();
 382   GenCollectedHeap* gch = GenCollectedHeap::heap();
 383   size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
 384                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 385   if (cms_free > expected_promotion) {
 386     // Start a cms collection if there isn't enough space to promote
 387     // for the next minor collection.  Use the padded average as
 388     // a safety factor.
 389     cms_free -= expected_promotion;
 390 
 391     // Adjust by the safety factor.
 392     double cms_free_dbl = (double)cms_free;
 393     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 394     // Apply a further correction factor which tries to adjust
 395     // for recent occurance of concurrent mode failures.
 396     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 397     cms_free_dbl = cms_free_dbl * cms_adjustment;
 398 
 399     if (PrintGCDetails && Verbose) {
 400       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 401         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 402         cms_free, expected_promotion);
 403       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",


 691 
 692   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 693 
 694   // Clip CMSBootstrapOccupancy between 0 and 100.
 695   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 696 
 697   _full_gcs_since_conc_gc = 0;
 698 
 699   // Now tell CMS generations the identity of their collector
 700   ConcurrentMarkSweepGeneration::set_collector(this);
 701 
 702   // Create & start a CMS thread for this CMS collector
 703   _cmsThread = ConcurrentMarkSweepThread::start(this);
 704   assert(cmsThread() != NULL, "CMS Thread should have been created");
 705   assert(cmsThread()->collector() == this,
 706          "CMS Thread should refer to this gen");
 707   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 708 
 709   // Support for parallelizing young gen rescan
 710   GenCollectedHeap* gch = GenCollectedHeap::heap();
 711   _young_gen = gch->young_gen();
 712   if (gch->supports_inline_contig_alloc()) {
 713     _top_addr = gch->top_addr();
 714     _end_addr = gch->end_addr();
 715     assert(_young_gen != NULL, "no _young_gen");
 716     _eden_chunk_index = 0;
 717     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 718     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 719     if (_eden_chunk_array == NULL) {
 720       _eden_chunk_capacity = 0;
 721       warning("GC/CMS: _eden_chunk_array allocation failure");
 722     }
 723   }
 724   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 725 
 726   // Support for parallelizing survivor space rescan
 727   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 728     const size_t max_plab_samples =
 729       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 730 
 731     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);


 802     _space_counters->update_used(used);
 803     _space_counters->update_capacity();
 804     _gen_counters->update_all();
 805   }
 806 }
 807 
 808 void ConcurrentMarkSweepGeneration::print() const {
 809   Generation::print();
 810   cmsSpace()->print();
 811 }
 812 
 813 #ifndef PRODUCT
 814 void ConcurrentMarkSweepGeneration::print_statistics() {
 815   cmsSpace()->printFLCensus(0);
 816 }
 817 #endif
 818 
 819 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 820   GenCollectedHeap* gch = GenCollectedHeap::heap();
 821   if (PrintGCDetails) {
 822     // I didn't want to change the logging when removing the level concept,
 823     // but I guess this logging could say "old" or something instead of "1".
 824     assert(this == gch->old_gen(),
 825            "The CMS generation should be the old generation");
 826     int level = 1;
 827     if (Verbose) {
 828       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 829         level, short_name(), s, used(), capacity());
 830     } else {
 831       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 832         level, short_name(), s, used() / K, capacity() / K);
 833     }
 834   }
 835   if (Verbose) {
 836     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 837               gch->used(), gch->capacity());
 838   } else {
 839     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 840               gch->used() / K, gch->capacity() / K);
 841   }
 842 }
 843 
 844 size_t
 845 ConcurrentMarkSweepGeneration::contiguous_available() const {
 846   // dld proposes an improvement in precision here. If the committed
 847   // part of the space ends in a free block we should add that to
 848   // uncommitted size in the calculation below. Will make this
 849   // change later, staying with the approximation below for the
 850   // time being. -- ysr.
 851   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 852 }


 936     return;
 937   }
 938 
 939   double free_percentage = ((double) free()) / capacity();
 940   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 941   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 942 
 943   // compute expansion delta needed for reaching desired free percentage
 944   if (free_percentage < desired_free_percentage) {
 945     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 946     assert(desired_capacity >= capacity(), "invalid expansion size");
 947     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 948     if (PrintGCDetails && Verbose) {
 949       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 950       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 951       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 952       gclog_or_tty->print_cr("  Desired free fraction %f",
 953                              desired_free_percentage);
 954       gclog_or_tty->print_cr("  Maximum free fraction %f",
 955                              maximum_free_percentage);
 956       gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity() / 1000);
 957       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 958                              desired_capacity / 1000);



 959       GenCollectedHeap* gch = GenCollectedHeap::heap();
 960       assert(this == gch->_old_gen,
 961              "The CMS generation should always be the old generation");
 962       size_t young_size = gch->_young_gen->capacity();
 963       gclog_or_tty->print_cr("  Young gen size "SIZE_FORMAT,
 964                              young_size / 1000);
 965       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 966                              unsafe_max_alloc_nogc() / 1000);
 967       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 968                              contiguous_available() / 1000);
 969       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 970                              expand_bytes);
 971     }
 972     // safe if expansion fails
 973     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 974     if (PrintGCDetails && Verbose) {
 975       gclog_or_tty->print_cr("  Expanded free fraction %f",
 976         ((double) free()) / capacity());
 977     }
 978   } else {
 979     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 980     assert(desired_capacity <= capacity(), "invalid expansion size");
 981     size_t shrink_bytes = capacity() - desired_capacity;
 982     // Don't shrink unless the delta is greater than the minimum shrink we want
 983     if (shrink_bytes >= MinHeapDeltaBytes) {
 984       shrink_free_list_by(shrink_bytes);
 985     }
 986   }
 987 }
 988 


1111         }
1112       } else {  // not an obj array; we can just mark the head
1113         if (par) {
1114           _modUnionTable.par_mark(start);
1115         } else {
1116           _modUnionTable.mark(start);
1117         }
1118       }
1119     }
1120   }
1121 }
1122 
1123 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1124 {
1125   size_t delta = pointer_delta(addr, space->bottom());
1126   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1127 }
1128 
1129 void CMSCollector::icms_update_allocation_limits()
1130 {
1131   Generation* young = GenCollectedHeap::heap()->young_gen();
1132   EdenSpace* eden = young->as_DefNewGeneration()->eden();
1133 
1134   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1135   if (CMSTraceIncrementalPacing) {
1136     stats().print();
1137   }
1138 
1139   assert(duty_cycle <= 100, "invalid duty cycle");
1140   if (duty_cycle != 0) {
1141     // The duty_cycle is a percentage between 0 and 100; convert to words and
1142     // then compute the offset from the endpoints of the space.
1143     size_t free_words = eden->free() / HeapWordSize;
1144     double free_words_dbl = (double)free_words;
1145     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1146     size_t offset_words = (free_words - duty_cycle_words) / 2;
1147 
1148     _icms_start_limit = eden->top() + offset_words;
1149     _icms_stop_limit = eden->end() - offset_words;
1150 
1151     // The limits may be adjusted (shifted to the right) by


1252 }
1253 
1254 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1255   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1256   // allocate, copy and if necessary update promoinfo --
1257   // delegate to underlying space.
1258   assert_lock_strong(freelistLock());
1259 
1260 #ifndef PRODUCT
1261   if (Universe::heap()->promotion_should_fail()) {
1262     return NULL;
1263   }
1264 #endif  // #ifndef PRODUCT
1265 
1266   oop res = _cmsSpace->promote(obj, obj_size);
1267   if (res == NULL) {
1268     // expand and retry
1269     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1270     expand(s*HeapWordSize, MinHeapDeltaBytes,
1271       CMSExpansionCause::_satisfy_promotion);
1272     // Since this is the old generation, we don't try to promote
1273     // into a more senior generation.



1274     res = _cmsSpace->promote(obj, obj_size);
1275   }
1276   if (res != NULL) {
1277     // See comment in allocate() about when objects should
1278     // be allocated live.
1279     assert(obj->is_oop(), "Will dereference klass pointer below");
1280     collector()->promoted(false,           // Not parallel
1281                           (HeapWord*)res, obj->is_objArray(), obj_size);
1282     // promotion counters
1283     NOT_PRODUCT(
1284       _numObjectsPromoted++;
1285       _numWordsPromoted +=
1286         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1287     )
1288   }
1289   return res;
1290 }
1291 
1292 
1293 HeapWord*


2039   // collection, clear the _modUnionTable.
2040   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2041     "_modUnionTable should be clear if the baton was not passed");
2042   _modUnionTable.clear_all();
2043   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2044     "mod union for klasses should be clear if the baton was passed");
2045   _ct->klass_rem_set()->clear_mod_union();
2046 
2047   // We must adjust the allocation statistics being maintained
2048   // in the free list space. We do so by reading and clearing
2049   // the sweep timer and updating the block flux rate estimates below.
2050   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2051   if (_inter_sweep_timer.is_active()) {
2052     _inter_sweep_timer.stop();
2053     // Note that we do not use this sample to update the _inter_sweep_estimate.
2054     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2055                                             _inter_sweep_estimate.padded_average(),
2056                                             _intra_sweep_estimate.padded_average());
2057   }
2058 
2059   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);

2060   #ifdef ASSERT
2061     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2062     size_t free_size = cms_space->free();
2063     assert(free_size ==
2064            pointer_delta(cms_space->end(), cms_space->compaction_top())
2065            * HeapWordSize,
2066       "All the free space should be compacted into one chunk at top");
2067     assert(cms_space->dictionary()->total_chunk_size(
2068                                       debug_only(cms_space->freelistLock())) == 0 ||
2069            cms_space->totalSizeInIndexedFreeLists() == 0,
2070       "All the free space should be in a single chunk");
2071     size_t num = cms_space->totalCount();
2072     assert((free_size == 0 && num == 0) ||
2073            (free_size > 0  && (num == 1 || num == 2)),
2074          "There should be at most 2 free chunks after compaction");
2075   #endif // ASSERT
2076   _collectorState = Resetting;
2077   assert(_restart_addr == NULL,
2078          "Should have been NULL'd before baton was passed");
2079   reset(false /* == !asynch */);


2987   } else {
2988     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2989             CMSRemarkVerifyVariant);
2990   }
2991   if (!silent) gclog_or_tty->print(" done] ");
2992   return true;
2993 }
2994 
2995 void CMSCollector::verify_after_remark_work_1() {
2996   ResourceMark rm;
2997   HandleMark  hm;
2998   GenCollectedHeap* gch = GenCollectedHeap::heap();
2999 
3000   // Get a clear set of claim bits for the roots processing to work with.
3001   ClassLoaderDataGraph::clear_claimed_marks();
3002 
3003   // Mark from roots one level into CMS
3004   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3005   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3006 
3007   gch->gen_process_roots(Generation::Old,
3008                          true,   // younger gens are roots
3009                          true,   // activate StrongRootsScope
3010                          SharedHeap::ScanningOption(roots_scanning_options()),
3011                          should_unload_classes(),
3012                          &notOlder,
3013                          NULL,
3014                          NULL);  // SSS: Provide correct closure
3015 
3016   // Now mark from the roots
3017   MarkFromRootsClosure markFromRootsClosure(this, _span,
3018     verification_mark_bm(), verification_mark_stack(),
3019     false /* don't yield */, true /* verifying */);
3020   assert(_restart_addr == NULL, "Expected pre-condition");
3021   verification_mark_bm()->iterate(&markFromRootsClosure);
3022   while (_restart_addr != NULL) {
3023     // Deal with stack overflow: by restarting at the indicated
3024     // address.
3025     HeapWord* ra = _restart_addr;
3026     markFromRootsClosure.reset(ra);
3027     _restart_addr = NULL;


3055   void do_klass(Klass* k) {
3056     k->oops_do(&_oop_closure);
3057   }
3058 };
3059 
3060 void CMSCollector::verify_after_remark_work_2() {
3061   ResourceMark rm;
3062   HandleMark  hm;
3063   GenCollectedHeap* gch = GenCollectedHeap::heap();
3064 
3065   // Get a clear set of claim bits for the roots processing to work with.
3066   ClassLoaderDataGraph::clear_claimed_marks();
3067 
3068   // Mark from roots one level into CMS
3069   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3070                                      markBitMap());
3071   CLDToOopClosure cld_closure(&notOlder, true);
3072 
3073   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3074 
3075   gch->gen_process_roots(Generation::Old,
3076                          true,   // younger gens are roots
3077                          true,   // activate StrongRootsScope
3078                          SharedHeap::ScanningOption(roots_scanning_options()),
3079                          should_unload_classes(),
3080                          &notOlder,
3081                          NULL,
3082                          &cld_closure);
3083 
3084   // Now mark from the roots
3085   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3086     verification_mark_bm(), markBitMap(), verification_mark_stack());
3087   assert(_restart_addr == NULL, "Expected pre-condition");
3088   verification_mark_bm()->iterate(&markFromRootsClosure);
3089   while (_restart_addr != NULL) {
3090     // Deal with stack overflow: by restarting at the indicated
3091     // address.
3092     HeapWord* ra = _restart_addr;
3093     markFromRootsClosure.reset(ra);
3094     _restart_addr = NULL;
3095     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());


3669     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3670       // The parallel version.
3671       FlexibleWorkGang* workers = gch->workers();
3672       assert(workers != NULL, "Need parallel worker threads.");
3673       int n_workers = workers->active_workers();
3674       CMSParInitialMarkTask tsk(this, n_workers);
3675       gch->set_par_threads(n_workers);
3676       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3677       if (n_workers > 1) {
3678         GenCollectedHeap::StrongRootsScope srs(gch);
3679         workers->run_task(&tsk);
3680       } else {
3681         GenCollectedHeap::StrongRootsScope srs(gch);
3682         tsk.work(0);
3683       }
3684       gch->set_par_threads(0);
3685     } else {
3686       // The serial version.
3687       CLDToOopClosure cld_closure(&notOlder, true);
3688       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3689       gch->gen_process_roots(Generation::Old,
3690                              true,   // younger gens are roots
3691                              true,   // activate StrongRootsScope
3692                              SharedHeap::ScanningOption(roots_scanning_options()),
3693                              should_unload_classes(),
3694                              &notOlder,
3695                              NULL,
3696                              &cld_closure);
3697     }
3698   }
3699 
3700   // Clear mod-union table; it will be dirtied in the prologue of
3701   // CMS generation per each younger generation collection.
3702 
3703   assert(_modUnionTable.isAllClear(),
3704        "Was cleared in most recent final checkpoint phase"
3705        " or no bits are set in the gc_prologue before the start of the next "
3706        "subsequent marking phase.");
3707 
3708   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3709 


4945          "world should be stopped");
4946   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4947 
4948   verify_work_stacks_empty();
4949   verify_overflow_empty();
4950 
4951   SpecializationStats::clear();
4952   if (PrintGCDetails) {
4953     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4954                         _young_gen->used() / K,
4955                         _young_gen->capacity() / K);
4956   }
4957   if (asynch) {
4958     if (CMSScavengeBeforeRemark) {
4959       GenCollectedHeap* gch = GenCollectedHeap::heap();
4960       // Temporarily set flag to false, GCH->do_collection will
4961       // expect it to be false and set to true
4962       FlagSetting fl(gch->_is_gc_active, false);
4963       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4964         PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)


4965       gch->do_collection(true,             // full (i.e. force, see below)
4966                          false,            // !clear_all_soft_refs
4967                          0,                // size
4968                          false,            // is_tlab
4969                          Generation::Young // type
4970         );
4971     }

4972     FreelistLocker x(this);
4973     MutexLockerEx y(bitMapLock(),
4974                     Mutex::_no_safepoint_check_flag);
4975     assert(!init_mark_was_synchronous, "but that's impossible!");
4976     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4977   } else {
4978     // already have all the locks
4979     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4980                              init_mark_was_synchronous);
4981   }
4982   verify_work_stacks_empty();
4983   verify_overflow_empty();
4984   SpecializationStats::print();
4985 }
4986 
4987 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4988   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4989 
4990   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4991 


5137   GenCollectedHeap* gch = GenCollectedHeap::heap();
5138   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5139 
5140   // ---------- young gen roots --------------
5141   {
5142     work_on_young_gen_roots(worker_id, &par_mri_cl);
5143     _timer.stop();
5144     if (PrintCMSStatistics != 0) {
5145       gclog_or_tty->print_cr(
5146         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5147         worker_id, _timer.seconds());
5148     }
5149   }
5150 
5151   // ---------- remaining roots --------------
5152   _timer.reset();
5153   _timer.start();
5154 
5155   CLDToOopClosure cld_closure(&par_mri_cl, true);
5156 
5157   gch->gen_process_roots(Generation::Old,
5158                          false,     // yg was scanned above
5159                          false,     // this is parallel code
5160                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5161                          _collector->should_unload_classes(),
5162                          &par_mri_cl,
5163                          NULL,
5164                          &cld_closure);
5165   assert(_collector->should_unload_classes()
5166          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5167          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5168   _timer.stop();
5169   if (PrintCMSStatistics != 0) {
5170     gclog_or_tty->print_cr(
5171       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5172       worker_id, _timer.seconds());
5173   }
5174 }
5175 
5176 // Parallel remark task
5177 class CMSParRemarkTask: public CMSParMarkTask {


5273     work_queue(worker_id));
5274 
5275   // Rescan young gen roots first since these are likely
5276   // coarsely partitioned and may, on that account, constitute
5277   // the critical path; thus, it's best to start off that
5278   // work first.
5279   // ---------- young gen roots --------------
5280   {
5281     work_on_young_gen_roots(worker_id, &par_mrias_cl);
5282     _timer.stop();
5283     if (PrintCMSStatistics != 0) {
5284       gclog_or_tty->print_cr(
5285         "Finished young gen rescan work in %dth thread: %3.3f sec",
5286         worker_id, _timer.seconds());
5287     }
5288   }
5289 
5290   // ---------- remaining roots --------------
5291   _timer.reset();
5292   _timer.start();
5293   gch->gen_process_roots(Generation::Old,
5294                          false,     // yg was scanned above
5295                          false,     // this is parallel code
5296                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5297                          _collector->should_unload_classes(),
5298                          &par_mrias_cl,
5299                          NULL,
5300                          NULL);     // The dirty klasses will be handled below
5301 
5302   assert(_collector->should_unload_classes()
5303          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5304          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5305   _timer.stop();
5306   if (PrintCMSStatistics != 0) {
5307     gclog_or_tty->print_cr(
5308       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5309       worker_id, _timer.seconds());
5310   }
5311 
5312   // ---------- unhandled CLD scanning ----------
5313   if (worker_id == 0) { // Single threaded at the moment.


5865       verify_work_stacks_empty();
5866       if (PrintCMSStatistics != 0) {
5867         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5868           markFromDirtyCardsClosure.num_dirty_cards());
5869       }
5870     }
5871   }
5872   if (VerifyDuringGC &&
5873       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5874     HandleMark hm;  // Discard invalid handles created during verification
5875     Universe::verify();
5876   }
5877   {
5878     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5879 
5880     verify_work_stacks_empty();
5881 
5882     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5883     GenCollectedHeap::StrongRootsScope srs(gch);
5884 
5885     gch->gen_process_roots(Generation::Old,
5886                            true,  // younger gens as roots
5887                            false, // use the local StrongRootsScope
5888                            SharedHeap::ScanningOption(roots_scanning_options()),
5889                            should_unload_classes(),
5890                            &mrias_cl,
5891                            NULL,
5892                            NULL); // The dirty klasses will be handled below
5893 
5894     assert(should_unload_classes()
5895            || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5896            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5897   }
5898 
5899   {
5900     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5901 
5902     verify_work_stacks_empty();
5903 
5904     // Scan all class loader data objects that might have been introduced
5905     // during concurrent marking.


6342   size_t nearLargestOffset =
6343     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6344   if (PrintFLSStatistics != 0) {
6345     gclog_or_tty->print_cr(
6346       "CMS: Large Block: " PTR_FORMAT ";"
6347       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6348       largestAddr,
6349       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6350   }
6351   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6352 }
6353 
6354 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6355   return addr >= _cmsSpace->nearLargestChunk();
6356 }
6357 
6358 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6359   return _cmsSpace->find_chunk_at_end();
6360 }
6361 
6362 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
6363                                                     bool full) {
6364   // If the young generation has been collected. Gather any statistics
6365   // that are of interest at this point.
6366   bool current_is_young = (current_generation == GenCollectedHeap::heap()->young_gen());
6367   if (!full && current_is_young) {
6368     // Gather statistics on the young generation collection.
6369     collector()->stats().record_gc0_end(used());
6370   }
6371 }
6372 
6373 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6374   if (PrintGCDetails && Verbose) {
6375     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6376   }
6377   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6378   _debug_collection_type =
6379     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6380   if (PrintGCDetails && Verbose) {
6381     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6382   }
6383 }
6384 
6385 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6386   bool asynch) {
6387   // We iterate over the space(s) underlying this generation,


src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File