< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




  36 #include "gc/cms/vmCMSOperations.hpp"
  37 #include "gc/serial/genMarkSweep.hpp"
  38 #include "gc/serial/tenuredGeneration.hpp"
  39 #include "gc/shared/adaptiveSizePolicy.hpp"
  40 #include "gc/shared/cardGeneration.inline.hpp"
  41 #include "gc/shared/cardTableRS.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/collectorPolicy.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTimer.hpp"
  48 #include "gc/shared/gcTrace.hpp"
  49 #include "gc/shared/gcTraceTime.hpp"
  50 #include "gc/shared/genCollectedHeap.hpp"
  51 #include "gc/shared/genOopClosures.inline.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/referencePolicy.hpp"
  54 #include "gc/shared/strongRootsScope.hpp"
  55 #include "gc/shared/taskqueue.inline.hpp"

  56 #include "memory/allocation.hpp"
  57 #include "memory/iterator.inline.hpp"
  58 #include "memory/padded.hpp"
  59 #include "memory/resourceArea.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "runtime/atomic.inline.hpp"
  63 #include "runtime/globals_extension.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/java.hpp"
  66 #include "runtime/orderAccess.inline.hpp"
  67 #include "runtime/vmThread.hpp"
  68 #include "services/memoryService.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/stack.inline.hpp"
  71 
  72 // statics
  73 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  74 bool CMSCollector::_full_gc_requested = false;
  75 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;


 349 // young generation collection.
 350 double CMSStats::time_until_cms_gen_full() const {
 351   size_t cms_free = _cms_gen->cmsSpace()->free();
 352   GenCollectedHeap* gch = GenCollectedHeap::heap();
 353   size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
 354                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 355   if (cms_free > expected_promotion) {
 356     // Start a cms collection if there isn't enough space to promote
 357     // for the next young collection.  Use the padded average as
 358     // a safety factor.
 359     cms_free -= expected_promotion;
 360 
 361     // Adjust by the safety factor.
 362     double cms_free_dbl = (double)cms_free;
 363     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
 364     // Apply a further correction factor which tries to adjust
 365     // for recent occurance of concurrent mode failures.
 366     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 367     cms_free_dbl = cms_free_dbl * cms_adjustment;
 368 
 369     if (PrintGCDetails && Verbose) {
 370       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 371         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 372         cms_free, expected_promotion);
 373       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 374         cms_free_dbl, cms_consumption_rate() + 1.0);
 375     }
 376     // Add 1 in case the consumption rate goes to zero.
 377     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 378   }
 379   return 0.0;
 380 }
 381 
 382 // Compare the duration of the cms collection to the
 383 // time remaining before the cms generation is empty.
 384 // Note that the time from the start of the cms collection
 385 // to the start of the cms sweep (less than the total
 386 // duration of the cms collection) can be used.  This
 387 // has been tried and some applications experienced
 388 // promotion failures early in execution.  This was
 389 // possibly because the averages were not accurate
 390 // enough at the beginning.
 391 double CMSStats::time_until_cms_start() const {
 392   // We add "gc0_period" to the "work" calculation
 393   // below because this query is done (mostly) at the
 394   // end of a scavenge, so we need to conservatively
 395   // account for that much possible delay
 396   // in the query so as to avoid concurrent mode failures
 397   // due to starting the collection just a wee bit too
 398   // late.
 399   double work = cms_duration() + gc0_period();
 400   double deadline = time_until_cms_gen_full();
 401   // If a concurrent mode failure occurred recently, we want to be
 402   // more conservative and halve our expected time_until_cms_gen_full()
 403   if (work > deadline) {
 404     if (Verbose && PrintGCDetails) {
 405       gclog_or_tty->print(
 406         " CMSCollector: collect because of anticipated promotion "
 407         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 408         gc0_period(), time_until_cms_gen_full());
 409     }
 410     return 0.0;
 411   }
 412   return work - deadline;
 413 }
 414 
 415 #ifndef PRODUCT
 416 void CMSStats::print_on(outputStream *st) const {
 417   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 418   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 419                gc0_duration(), gc0_period(), gc0_promoted());
 420   st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 421             cms_duration(), cms_period(), cms_allocated());
 422   st->print(",cms_since_beg=%g,cms_since_end=%g",
 423             cms_time_since_begin(), cms_time_since_end());
 424   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 425             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 426 
 427   if (valid()) {
 428     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 429               promotion_rate(), cms_allocation_rate());


 651 //
 652 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 653   if (UsePerfData) {
 654     _space_counters->update_used(used);
 655     _space_counters->update_capacity();
 656     _gen_counters->update_all();
 657   }
 658 }
 659 
 660 void ConcurrentMarkSweepGeneration::print() const {
 661   Generation::print();
 662   cmsSpace()->print();
 663 }
 664 
 665 #ifndef PRODUCT
 666 void ConcurrentMarkSweepGeneration::print_statistics() {
 667   cmsSpace()->printFLCensus(0);
 668 }
 669 #endif
 670 
 671 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 672   GenCollectedHeap* gch = GenCollectedHeap::heap();
 673   if (PrintGCDetails) {
 674     // I didn't want to change the logging when removing the level concept,
 675     // but I guess this logging could say "old" or something instead of "1".
 676     assert(gch->is_old_gen(this),
 677            "The CMS generation should be the old generation");
 678     uint level = 1;
 679     if (Verbose) {
 680       gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "(" SIZE_FORMAT ")]",
 681         level, short_name(), s, used(), capacity());
 682     } else {
 683       gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)]",
 684         level, short_name(), s, used() / K, capacity() / K);
 685     }
 686   }
 687   if (Verbose) {
 688     gclog_or_tty->print(" " SIZE_FORMAT "(" SIZE_FORMAT ")",
 689               gch->used(), gch->capacity());
 690   } else {
 691     gclog_or_tty->print(" " SIZE_FORMAT "K(" SIZE_FORMAT "K)",
 692               gch->used() / K, gch->capacity() / K);
 693   }
 694 }
 695 
 696 size_t
 697 ConcurrentMarkSweepGeneration::contiguous_available() const {
 698   // dld proposes an improvement in precision here. If the committed
 699   // part of the space ends in a free block we should add that to
 700   // uncommitted size in the calculation below. Will make this
 701   // change later, staying with the approximation below for the
 702   // time being. -- ysr.
 703   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 704 }
 705 
 706 size_t
 707 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 708   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 709 }
 710 
 711 size_t ConcurrentMarkSweepGeneration::max_available() const {
 712   return free() + _virtual_space.uncommitted_size();
 713 }
 714 
 715 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 716   size_t available = max_available();
 717   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 718   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 719   if (Verbose && PrintGCDetails) {
 720     gclog_or_tty->print_cr(
 721       "CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "),"
 722       "max_promo(" SIZE_FORMAT ")",
 723       res? "":" not", available, res? ">=":"<",
 724       av_promo, max_promotion_in_bytes);
 725   }
 726   return res;
 727 }
 728 
 729 // At a promotion failure dump information on block layout in heap
 730 // (cms old generation).
 731 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 732   if (CMSDumpAtPromotionFailure) {
 733     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);


 734   }
 735 }
 736 
 737 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 738   // Clear the promotion information.  These pointers can be adjusted
 739   // along with all the other pointers into the heap but
 740   // compaction is expected to be a rare event with
 741   // a heap using cms so don't do it without seeing the need.
 742   for (uint i = 0; i < ParallelGCThreads; i++) {
 743     _par_gc_thread_states[i]->promo.reset();
 744   }
 745 }
 746 
 747 void ConcurrentMarkSweepGeneration::compute_new_size() {
 748   assert_locked_or_safepoint(Heap_lock);
 749 
 750   // If incremental collection failed, we just want to expand
 751   // to the limit.
 752   if (incremental_collection_failed()) {
 753     clear_incremental_collection_failed();


 769 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 770   assert_locked_or_safepoint(Heap_lock);
 771 
 772   // If incremental collection failed, we just want to expand
 773   // to the limit.
 774   if (incremental_collection_failed()) {
 775     clear_incremental_collection_failed();
 776     grow_to_reserved();
 777     return;
 778   }
 779 
 780   double free_percentage = ((double) free()) / capacity();
 781   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 782   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 783 
 784   // compute expansion delta needed for reaching desired free percentage
 785   if (free_percentage < desired_free_percentage) {
 786     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 787     assert(desired_capacity >= capacity(), "invalid expansion size");
 788     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 789     if (PrintGCDetails && Verbose) {

 790       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 791       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 792       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 793       gclog_or_tty->print_cr("  Desired free fraction %f", desired_free_percentage);
 794       gclog_or_tty->print_cr("  Maximum free fraction %f", maximum_free_percentage);
 795       gclog_or_tty->print_cr("  Capacity " SIZE_FORMAT, capacity() / 1000);
 796       gclog_or_tty->print_cr("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
 797       GenCollectedHeap* gch = GenCollectedHeap::heap();
 798       assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
 799       size_t young_size = gch->young_gen()->capacity();
 800       gclog_or_tty->print_cr("  Young gen size " SIZE_FORMAT, young_size / 1000);
 801       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
 802       gclog_or_tty->print_cr("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
 803       gclog_or_tty->print_cr("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
 804     }
 805     // safe if expansion fails
 806     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 807     if (PrintGCDetails && Verbose) {
 808       gclog_or_tty->print_cr("  Expanded free fraction %f", ((double) free()) / capacity());
 809     }
 810   } else {
 811     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 812     assert(desired_capacity <= capacity(), "invalid expansion size");
 813     size_t shrink_bytes = capacity() - desired_capacity;
 814     // Don't shrink unless the delta is greater than the minimum shrink we want
 815     if (shrink_bytes >= MinHeapDeltaBytes) {
 816       shrink_free_list_by(shrink_bytes);
 817     }
 818   }
 819 }
 820 
 821 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 822   return cmsSpace()->freelistLock();
 823 }
 824 
 825 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
 826   CMSSynchronousYieldRequest yr;
 827   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
 828   return have_lock_and_allocate(size, tlab);
 829 }


1127 ConcurrentMarkSweepGeneration::
1128 par_oop_since_save_marks_iterate_done(int thread_num) {
1129   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1130   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1131   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1132 }
1133 
1134 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1135                                                    size_t size,
1136                                                    bool   tlab)
1137 {
1138   // We allow a STW collection only if a full
1139   // collection was requested.
1140   return full || should_allocate(size, tlab); // FIX ME !!!
1141   // This and promotion failure handling are connected at the
1142   // hip and should be fixed by untying them.
1143 }
1144 
1145 bool CMSCollector::shouldConcurrentCollect() {
1146   if (_full_gc_requested) {
1147     if (Verbose && PrintGCDetails) {
1148       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1149                              " gc request (or gc_locker)");
1150     }
1151     return true;
1152   }
1153 
1154   FreelistLocker x(this);
1155   // ------------------------------------------------------------------
1156   // Print out lots of information which affects the initiation of
1157   // a collection.
1158   if (PrintCMSInitiationStatistics && stats().valid()) {
1159     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1160     gclog_or_tty->stamp();
1161     gclog_or_tty->cr();
1162     stats().print_on(gclog_or_tty);
1163     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1164       stats().time_until_cms_gen_full());
1165     gclog_or_tty->print_cr("free=" SIZE_FORMAT, _cmsGen->free());
1166     gclog_or_tty->print_cr("contiguous_available=" SIZE_FORMAT,
1167                            _cmsGen->contiguous_available());
1168     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1169     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1170     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1171     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1172     gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1173     gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1174     gclog_or_tty->print_cr("metadata initialized %d",
1175       MetaspaceGC::should_concurrent_collect());
1176   }
1177   // ------------------------------------------------------------------
1178 
1179   // If the estimated time to complete a cms collection (cms_duration())
1180   // is less than the estimated time remaining until the cms generation
1181   // is full, start a collection.
1182   if (!UseCMSInitiatingOccupancyOnly) {
1183     if (stats().valid()) {
1184       if (stats().time_until_cms_start() == 0.0) {
1185         return true;
1186       }
1187     } else {
1188       // We want to conservatively collect somewhat early in order
1189       // to try and "bootstrap" our CMS/promotion statistics;
1190       // this branch will not fire after the first successful CMS
1191       // collection because the stats should then be valid.
1192       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1193         if (Verbose && PrintGCDetails) {
1194           gclog_or_tty->print_cr(
1195             " CMSCollector: collect for bootstrapping statistics:"
1196             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1197             _bootstrap_occupancy);
1198         }
1199         return true;
1200       }
1201     }
1202   }
1203 
1204   // Otherwise, we start a collection cycle if
1205   // old gen want a collection cycle started. Each may use
1206   // an appropriate criterion for making this decision.
1207   // XXX We need to make sure that the gen expansion
1208   // criterion dovetails well with this. XXX NEED TO FIX THIS
1209   if (_cmsGen->should_concurrent_collect()) {
1210     if (Verbose && PrintGCDetails) {
1211       gclog_or_tty->print_cr("CMS old gen initiated");
1212     }
1213     return true;
1214   }
1215 
1216   // We start a collection if we believe an incremental collection may fail;
1217   // this is not likely to be productive in practice because it's probably too
1218   // late anyway.
1219   GenCollectedHeap* gch = GenCollectedHeap::heap();
1220   assert(gch->collector_policy()->is_generation_policy(),
1221          "You may want to check the correctness of the following");
1222   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1223     if (Verbose && PrintGCDetails) {
1224       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1225     }
1226     return true;
1227   }
1228 
1229   if (MetaspaceGC::should_concurrent_collect()) {
1230     if (Verbose && PrintGCDetails) {
1231       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1232     }
1233     return true;
1234   }
1235 
1236   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1237   if (CMSTriggerInterval >= 0) {
1238     if (CMSTriggerInterval == 0) {
1239       // Trigger always
1240       return true;
1241     }
1242 
1243     // Check the CMS time since begin (we do not check the stats validity
1244     // as we want to be able to trigger the first CMS cycle as well)
1245     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1246       if (Verbose && PrintGCDetails) {
1247         if (stats().valid()) {
1248           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1249                                  stats().cms_time_since_begin());
1250         } else {
1251           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1252         }
1253       }
1254       return true;
1255     }
1256   }
1257 
1258   return false;
1259 }
1260 
1261 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1262 
1263 // Clear _expansion_cause fields of constituent generations
1264 void CMSCollector::clear_expansion_cause() {
1265   _cmsGen->clear_expansion_cause();
1266 }
1267 
1268 // We should be conservative in starting a collection cycle.  To
1269 // start too eagerly runs the risk of collecting too often in the
1270 // extreme.  To collect too rarely falls back on full collections,
1271 // which works, even if not optimum in terms of concurrent work.
1272 // As a work around for too eagerly collecting, use the flag


1275 // collections.
1276 // We want to start a new collection cycle if any of the following
1277 // conditions hold:
1278 // . our current occupancy exceeds the configured initiating occupancy
1279 //   for this generation, or
1280 // . we recently needed to expand this space and have not, since that
1281 //   expansion, done a collection of this generation, or
1282 // . the underlying space believes that it may be a good idea to initiate
1283 //   a concurrent collection (this may be based on criteria such as the
1284 //   following: the space uses linear allocation and linear allocation is
1285 //   going to fail, or there is believed to be excessive fragmentation in
1286 //   the generation, etc... or ...
1287 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1288 //   the case of the old generation; see CR 6543076):
1289 //   we may be approaching a point at which allocation requests may fail because
1290 //   we will be out of sufficient free space given allocation rate estimates.]
1291 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1292 
1293   assert_lock_strong(freelistLock());
1294   if (occupancy() > initiating_occupancy()) {
1295     if (PrintGCDetails && Verbose) {
1296       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1297         short_name(), occupancy(), initiating_occupancy());
1298     }
1299     return true;
1300   }
1301   if (UseCMSInitiatingOccupancyOnly) {
1302     return false;
1303   }
1304   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1305     if (PrintGCDetails && Verbose) {
1306       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1307         short_name());
1308     }
1309     return true;
1310   }
1311   return false;
1312 }
1313 
1314 void ConcurrentMarkSweepGeneration::collect(bool   full,
1315                                             bool   clear_all_soft_refs,
1316                                             size_t size,
1317                                             bool   tlab)
1318 {
1319   collector()->collect(full, clear_all_soft_refs, size, tlab);
1320 }
1321 
1322 void CMSCollector::collect(bool   full,
1323                            bool   clear_all_soft_refs,
1324                            size_t size,
1325                            bool   tlab)
1326 {
1327   // The following "if" branch is present for defensive reasons.
1328   // In the current uses of this interface, it can be replaced with:


1345   GenCollectedHeap* gch = GenCollectedHeap::heap();
1346   unsigned int gc_count = gch->total_full_collections();
1347   if (gc_count == full_gc_count) {
1348     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1349     _full_gc_requested = true;
1350     _full_gc_cause = cause;
1351     CGC_lock->notify();   // nudge CMS thread
1352   } else {
1353     assert(gc_count > full_gc_count, "Error: causal loop");
1354   }
1355 }
1356 
1357 bool CMSCollector::is_external_interruption() {
1358   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1359   return GCCause::is_user_requested_gc(cause) ||
1360          GCCause::is_serviceability_requested_gc(cause);
1361 }
1362 
1363 void CMSCollector::report_concurrent_mode_interruption() {
1364   if (is_external_interruption()) {
1365     if (PrintGCDetails) {
1366       gclog_or_tty->print(" (concurrent mode interrupted)");
1367     }
1368   } else {
1369     if (PrintGCDetails) {
1370       gclog_or_tty->print(" (concurrent mode failure)");
1371     }
1372     _gc_tracer_cm->report_concurrent_mode_failure();
1373   }
1374 }
1375 
1376 
1377 // The foreground and background collectors need to coordinate in order
1378 // to make sure that they do not mutually interfere with CMS collections.
1379 // When a background collection is active,
1380 // the foreground collector may need to take over (preempt) and
1381 // synchronously complete an ongoing collection. Depending on the
1382 // frequency of the background collections and the heap usage
1383 // of the application, this preemption can be seldom or frequent.
1384 // There are only certain
1385 // points in the background collection that the "collection-baton"
1386 // can be passed to the foreground collector.
1387 //
1388 // The foreground collector will wait for the baton before
1389 // starting any part of the collection.  The foreground collector
1390 // will only wait at one location.
1391 //


1485       CGC_lock->notify();
1486       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1487              "Possible deadlock");
1488       while (_foregroundGCShouldWait) {
1489         // wait for notification
1490         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1491         // Possibility of delay/starvation here, since CMS token does
1492         // not know to give priority to VM thread? Actually, i think
1493         // there wouldn't be any delay/starvation, but the proof of
1494         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1495       }
1496       ConcurrentMarkSweepThread::set_CMS_flag(
1497         ConcurrentMarkSweepThread::CMS_vm_has_token);
1498     }
1499   }
1500   // The CMS_token is already held.  Get back the other locks.
1501   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1502          "VM thread should have CMS token");
1503   getFreelistLocks();
1504   bitMapLock()->lock_without_safepoint_check();
1505   if (TraceCMSState) {
1506     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1507       INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state);
1508     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1509   }
1510 
1511   // Inform cms gen if this was due to partial collection failing.
1512   // The CMS gen may use this fact to determine its expansion policy.
1513   GenCollectedHeap* gch = GenCollectedHeap::heap();
1514   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1515     assert(!_cmsGen->incremental_collection_failed(),
1516            "Should have been noticed, reacted to and cleared");
1517     _cmsGen->set_incremental_collection_failed();
1518   }
1519 
1520   if (first_state > Idling) {
1521     report_concurrent_mode_interruption();
1522   }
1523 
1524   set_did_compact(true);
1525 
1526   // If the collection is being acquired from the background
1527   // collector, there may be references on the discovered
1528   // references lists.  Abandon those references, since some
1529   // of them may have become unreachable after concurrent


1563 // after obtaining the free list locks for the
1564 // two generations.
1565 void CMSCollector::compute_new_size() {
1566   assert_locked_or_safepoint(Heap_lock);
1567   FreelistLocker z(this);
1568   MetaspaceGC::compute_new_size();
1569   _cmsGen->compute_new_size_free_list();
1570 }
1571 
1572 // A work method used by the foreground collector to do
1573 // a mark-sweep-compact.
1574 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1575   GenCollectedHeap* gch = GenCollectedHeap::heap();
1576 
1577   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1578   gc_timer->register_gc_start();
1579 
1580   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1581   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1582 
1583   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
1584 
1585   // Temporarily widen the span of the weak reference processing to
1586   // the entire heap.
1587   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1588   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1589   // Temporarily, clear the "is_alive_non_header" field of the
1590   // reference processor.
1591   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1592   // Temporarily make reference _processing_ single threaded (non-MT).
1593   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1594   // Temporarily make refs discovery atomic
1595   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1596   // Temporarily make reference _discovery_ single threaded (non-MT)
1597   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1598 
1599   ref_processor()->set_enqueuing_is_done(false);
1600   ref_processor()->enable_discovery();
1601   ref_processor()->setup_policy(clear_all_soft_refs);
1602   // If an asynchronous collection finishes, the _modUnionTable is
1603   // all clear.  If we are assuming the collection from an asynchronous


1648   // Clear any data recorded in the PLAB chunk arrays.
1649   if (_survivor_plab_array != NULL) {
1650     reset_survivor_plab_arrays();
1651   }
1652 
1653   // Adjust the per-size allocation stats for the next epoch.
1654   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1655   // Restart the "inter sweep timer" for the next epoch.
1656   _inter_sweep_timer.reset();
1657   _inter_sweep_timer.start();
1658 
1659   gc_timer->register_gc_end();
1660 
1661   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1662 
1663   // For a mark-sweep-compact, compute_new_size() will be called
1664   // in the heap's do_collection() method.
1665 }
1666 
1667 void CMSCollector::print_eden_and_survivor_chunk_arrays() {





1668   ContiguousSpace* eden_space = _young_gen->eden();
1669   ContiguousSpace* from_space = _young_gen->from();
1670   ContiguousSpace* to_space   = _young_gen->to();
1671   // Eden
1672   if (_eden_chunk_array != NULL) {
1673     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1674                            p2i(eden_space->bottom()), p2i(eden_space->top()),
1675                            p2i(eden_space->end()), eden_space->capacity());
1676     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1677                            "_eden_chunk_capacity=" SIZE_FORMAT,
1678                            _eden_chunk_index, _eden_chunk_capacity);
1679     for (size_t i = 0; i < _eden_chunk_index; i++) {
1680       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1681                              i, p2i(_eden_chunk_array[i]));
1682     }
1683   }
1684   // Survivor
1685   if (_survivor_chunk_array != NULL) {
1686     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1687                            p2i(from_space->bottom()), p2i(from_space->top()),
1688                            p2i(from_space->end()), from_space->capacity());
1689     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1690                            "_survivor_chunk_capacity=" SIZE_FORMAT,
1691                            _survivor_chunk_index, _survivor_chunk_capacity);
1692     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1693       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1694                              i, p2i(_survivor_chunk_array[i]));
1695     }
1696   }
1697 }
1698 
1699 void CMSCollector::getFreelistLocks() const {
1700   // Get locks for all free lists in all generations that this
1701   // collector is responsible for
1702   _cmsGen->freelistLock()->lock_without_safepoint_check();
1703 }
1704 
1705 void CMSCollector::releaseFreelistLocks() const {
1706   // Release locks for all free lists in all generations that this
1707   // collector is responsible for
1708   _cmsGen->freelistLock()->unlock();
1709 }
1710 
1711 bool CMSCollector::haveFreelistLocks() const {
1712   // Check locks for all free lists in all generations that this
1713   // collector is responsible for
1714   assert_lock_strong(_cmsGen->freelistLock());


1778     // Signal that we are about to start a collection
1779     gch->increment_total_full_collections();  // ... starting a collection cycle
1780     _collection_count_start = gch->total_full_collections();
1781   }
1782 
1783   // Used for PrintGC
1784   size_t prev_used = 0;
1785   if (PrintGC && Verbose) {
1786     prev_used = _cmsGen->used();
1787   }
1788 
1789   // The change of the collection state is normally done at this level;
1790   // the exceptions are phases that are executed while the world is
1791   // stopped.  For those phases the change of state is done while the
1792   // world is stopped.  For baton passing purposes this allows the
1793   // background collector to finish the phase and change state atomically.
1794   // The foreground collector cannot wait on a phase that is done
1795   // while the world is stopped because the foreground collector already
1796   // has the world stopped and would deadlock.
1797   while (_collectorState != Idling) {
1798     if (TraceCMSState) {
1799       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1800         p2i(Thread::current()), _collectorState);
1801     }
1802     // The foreground collector
1803     //   holds the Heap_lock throughout its collection.
1804     //   holds the CMS token (but not the lock)
1805     //     except while it is waiting for the background collector to yield.
1806     //
1807     // The foreground collector should be blocked (not for long)
1808     //   if the background collector is about to start a phase
1809     //   executed with world stopped.  If the background
1810     //   collector has already started such a phase, the
1811     //   foreground collector is blocked waiting for the
1812     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1813     //   are executed in the VM thread.
1814     //
1815     // The locking order is
1816     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1817     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1818     //   CMS token  (claimed in
1819     //                stop_world_and_do() -->
1820     //                  safepoint_synchronize() -->
1821     //                    CMSThread::synchronize())
1822 
1823     {
1824       // Check if the FG collector wants us to yield.
1825       CMSTokenSync x(true); // is cms thread
1826       if (waitForForegroundGC()) {
1827         // We yielded to a foreground GC, nothing more to be
1828         // done this round.
1829         assert(_foregroundGCShouldWait == false, "We set it to false in "
1830                "waitForForegroundGC()");
1831         if (TraceCMSState) {
1832           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1833             " exiting collection CMS state %d",
1834             p2i(Thread::current()), _collectorState);
1835         }
1836         return;
1837       } else {
1838         // The background collector can run but check to see if the
1839         // foreground collector has done a collection while the
1840         // background collector was waiting to get the CGC_lock
1841         // above.  If yes, break so that _foregroundGCShouldWait
1842         // is cleared before returning.
1843         if (_collectorState == Idling) {
1844           break;
1845         }
1846       }
1847     }
1848 
1849     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1850       "should be waiting");
1851 
1852     switch (_collectorState) {
1853       case InitialMarking:
1854         {
1855           ReleaseForegroundGC x(this);


1919         break;
1920       }
1921       case Resetting:
1922         // CMS heap resizing has been completed
1923         reset_concurrent();
1924         assert(_collectorState == Idling, "Collector state should "
1925           "have changed");
1926 
1927         MetaspaceGC::set_should_concurrent_collect(false);
1928 
1929         stats().record_cms_end();
1930         // Don't move the concurrent_phases_end() and compute_new_size()
1931         // calls to here because a preempted background collection
1932         // has it's state set to "Resetting".
1933         break;
1934       case Idling:
1935       default:
1936         ShouldNotReachHere();
1937         break;
1938     }
1939     if (TraceCMSState) {
1940       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1941         p2i(Thread::current()), _collectorState);
1942     }
1943     assert(_foregroundGCShouldWait, "block post-condition");
1944   }
1945 
1946   // Should this be in gc_epilogue?
1947   collector_policy()->counters()->update_counters();
1948 
1949   {
1950     // Clear _foregroundGCShouldWait and, in the event that the
1951     // foreground collector is waiting, notify it, before
1952     // returning.
1953     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1954     _foregroundGCShouldWait = false;
1955     if (_foregroundGCIsActive) {
1956       CGC_lock->notify();
1957     }
1958     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1959            "Possible deadlock");
1960   }
1961   if (TraceCMSState) {
1962     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1963       " exiting collection CMS state %d",
1964       p2i(Thread::current()), _collectorState);
1965   }
1966   if (PrintGC && Verbose) {
1967     _cmsGen->print_heap_change(prev_used);
1968   }
1969 }
1970 
1971 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1972   _cms_start_registered = true;
1973   _gc_timer_cm->register_gc_start();
1974   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1975 }
1976 
1977 void CMSCollector::register_gc_end() {
1978   if (_cms_start_registered) {
1979     report_heap_summary(GCWhen::AfterGC);
1980 
1981     _gc_timer_cm->register_gc_end();
1982     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1983     _cms_start_registered = false;
1984   }
1985 }
1986 
1987 void CMSCollector::save_heap_summary() {
1988   GenCollectedHeap* gch = GenCollectedHeap::heap();


2000   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2001          "CMS thread should have CMS token");
2002   // Block the foreground collector until the
2003   // background collectors decides whether to
2004   // yield.
2005   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2006   _foregroundGCShouldWait = true;
2007   if (_foregroundGCIsActive) {
2008     // The background collector yields to the
2009     // foreground collector and returns a value
2010     // indicating that it has yielded.  The foreground
2011     // collector can proceed.
2012     res = true;
2013     _foregroundGCShouldWait = false;
2014     ConcurrentMarkSweepThread::clear_CMS_flag(
2015       ConcurrentMarkSweepThread::CMS_cms_has_token);
2016     ConcurrentMarkSweepThread::set_CMS_flag(
2017       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2018     // Get a possibly blocked foreground thread going
2019     CGC_lock->notify();
2020     if (TraceCMSState) {
2021       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2022         p2i(Thread::current()), _collectorState);
2023     }
2024     while (_foregroundGCIsActive) {
2025       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2026     }
2027     ConcurrentMarkSweepThread::set_CMS_flag(
2028       ConcurrentMarkSweepThread::CMS_cms_has_token);
2029     ConcurrentMarkSweepThread::clear_CMS_flag(
2030       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2031   }
2032   if (TraceCMSState) {
2033     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2034       p2i(Thread::current()), _collectorState);
2035   }
2036   return res;
2037 }
2038 
2039 // Because of the need to lock the free lists and other structures in
2040 // the collector, common to all the generations that the collector is
2041 // collecting, we need the gc_prologues of individual CMS generations
2042 // delegate to their collector. It may have been simpler had the
2043 // current infrastructure allowed one to call a prologue on a
2044 // collector. In the absence of that we have the generation's
2045 // prologue delegate to the collector, which delegates back
2046 // some "local" work to a worker method in the individual generations
2047 // that it's responsible for collecting, while itself doing any
2048 // work common to all generations it's responsible for. A similar
2049 // comment applies to the  gc_epilogue()'s.
2050 // The role of the variable _between_prologue_and_epilogue is to
2051 // enforce the invocation protocol.
2052 void CMSCollector::gc_prologue(bool full) {
2053   // Call gc_prologue_work() for the CMSGen
2054   // we are responsible for.
2055 


2112   // collecting.
2113   collector()->gc_prologue(full);
2114 }
2115 
2116 // This is a "private" interface for use by this generation's CMSCollector.
2117 // Not to be called directly by any other entity (for instance,
2118 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2119 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2120   bool registerClosure, ModUnionClosure* modUnionClosure) {
2121   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2122   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2123     "Should be NULL");
2124   if (registerClosure) {
2125     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2126   }
2127   cmsSpace()->gc_prologue();
2128   // Clear stat counters
2129   NOT_PRODUCT(
2130     assert(_numObjectsPromoted == 0, "check");
2131     assert(_numWordsPromoted   == 0, "check");
2132     if (Verbose && PrintGC) {
2133       gclog_or_tty->print("Allocated " SIZE_FORMAT " objects, "
2134                           SIZE_FORMAT " bytes concurrently",
2135       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2136     }
2137     _numObjectsAllocated = 0;
2138     _numWordsAllocated   = 0;
2139   )
2140 }
2141 
2142 void CMSCollector::gc_epilogue(bool full) {
2143   // The following locking discipline assumes that we are only called
2144   // when the world is stopped.
2145   assert(SafepointSynchronize::is_at_safepoint(),
2146          "world is stopped assumption");
2147 
2148   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2149   // if linear allocation blocks need to be appropriately marked to allow the
2150   // the blocks to be parsable. We also check here whether we need to nudge the
2151   // CMS collector thread to start a new cycle (if it's not already active).
2152   assert(   Thread::current()->is_VM_thread()
2153          || (   CMSScavengeBeforeRemark
2154              && Thread::current()->is_ConcurrentGC_thread()),
2155          "Incorrect thread type for epilogue execution");
2156 


2193   _between_prologue_and_epilogue = false;  // ready for next cycle
2194 }
2195 
2196 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2197   collector()->gc_epilogue(full);
2198 
2199   // Also reset promotion tracking in par gc thread states.
2200   for (uint i = 0; i < ParallelGCThreads; i++) {
2201     _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2202   }
2203 }
2204 
2205 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2206   assert(!incremental_collection_failed(), "Should have been cleared");
2207   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2208   cmsSpace()->gc_epilogue();
2209     // Print stat counters
2210   NOT_PRODUCT(
2211     assert(_numObjectsAllocated == 0, "check");
2212     assert(_numWordsAllocated == 0, "check");
2213     if (Verbose && PrintGC) {
2214       gclog_or_tty->print("Promoted " SIZE_FORMAT " objects, "
2215                           SIZE_FORMAT " bytes",
2216                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2217     }
2218     _numObjectsPromoted = 0;
2219     _numWordsPromoted   = 0;
2220   )
2221 
2222   if (PrintGC && Verbose) {
2223     // Call down the chain in contiguous_available needs the freelistLock
2224     // so print this out before releasing the freeListLock.
2225     gclog_or_tty->print(" Contiguous available " SIZE_FORMAT " bytes ",
2226                         contiguous_available());
2227   }
2228 }
2229 
2230 #ifndef PRODUCT
2231 bool CMSCollector::have_cms_token() {
2232   Thread* thr = Thread::current();
2233   if (thr->is_VM_thread()) {
2234     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2235   } else if (thr->is_ConcurrentGC_thread()) {
2236     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2237   } else if (thr->is_GC_task_thread()) {
2238     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2239            ParGCRareEvent_lock->owned_by_self();
2240   }
2241   return false;
2242 }
2243 #endif
2244 
2245 // Check reachability of the given heap address in CMS generation,
2246 // treating all other generations as roots.
2247 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2248   // We could "guarantee" below, rather than assert, but I'll
2249   // leave these as "asserts" so that an adventurous debugger
2250   // could try this in the product build provided some subset of
2251   // the conditions were met, provided they were interested in the
2252   // results and knew that the computation below wouldn't interfere
2253   // with other concurrent computations mutating the structures
2254   // being read or written.
2255   assert(SafepointSynchronize::is_at_safepoint(),
2256          "Else mutations in object graph will make answer suspect");
2257   assert(have_cms_token(), "Should hold cms token");
2258   assert(haveFreelistLocks(), "must hold free list locks");
2259   assert_lock_strong(bitMapLock());
2260 
2261   // Clear the marking bit map array before starting, but, just
2262   // for kicks, first report if the given address is already marked
2263   gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2264                 _markBitMap.isMarked(addr) ? "" : " not");
2265 
2266   if (verify_after_remark()) {
2267     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2268     bool result = verification_mark_bm()->isMarked(addr);
2269     gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2270                            result ? "IS" : "is NOT");
2271     return result;
2272   } else {
2273     gclog_or_tty->print_cr("Could not compute result");
2274     return false;
2275   }
2276 }
2277 
2278 
2279 void
2280 CMSCollector::print_on_error(outputStream* st) {
2281   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2282   if (collector != NULL) {
2283     CMSBitMap* bitmap = &collector->_markBitMap;
2284     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2285     bitmap->print_on_error(st, " Bits: ");
2286 
2287     st->cr();
2288 
2289     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2290     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2291     mut_bitmap->print_on_error(st, " Bits: ");
2292   }
2293 }
2294 
2295 ////////////////////////////////////////////////////////
2296 // CMS Verification Support
2297 ////////////////////////////////////////////////////////
2298 // Following the remark phase, the following invariant
2299 // should hold -- each object in the CMS heap which is
2300 // marked in markBitMap() should be marked in the verification_mark_bm().
2301 
2302 class VerifyMarkedClosure: public BitMapClosure {
2303   CMSBitMap* _marks;
2304   bool       _failed;
2305 
2306  public:
2307   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2308 
2309   bool do_bit(size_t offset) {
2310     HeapWord* addr = _marks->offsetToHeapWord(offset);
2311     if (!_marks->isMarked(addr)) {
2312       oop(addr)->print_on(gclog_or_tty);
2313       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));


2314       _failed = true;
2315     }
2316     return true;
2317   }
2318 
2319   bool failed() { return _failed; }
2320 };
2321 
2322 bool CMSCollector::verify_after_remark(bool silent) {
2323   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2324   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2325   static bool init = false;
2326 
2327   assert(SafepointSynchronize::is_at_safepoint(),
2328          "Else mutations in object graph will make answer suspect");
2329   assert(have_cms_token(),
2330          "Else there may be mutual interference in use of "
2331          " verification data structures");
2332   assert(_collectorState > Marking && _collectorState <= Sweeping,
2333          "Else marking info checked here may be obsolete");
2334   assert(haveFreelistLocks(), "must hold free list locks");
2335   assert_lock_strong(bitMapLock());
2336 
2337 
2338   // Allocate marking bit map if not already allocated
2339   if (!init) { // first time
2340     if (!verification_mark_bm()->allocate(_span)) {
2341       return false;
2342     }
2343     init = true;


2366   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2367   // Update the saved marks which may affect the root scans.
2368   gch->save_marks();
2369 
2370   if (CMSRemarkVerifyVariant == 1) {
2371     // In this first variant of verification, we complete
2372     // all marking, then check if the new marks-vector is
2373     // a subset of the CMS marks-vector.
2374     verify_after_remark_work_1();
2375   } else if (CMSRemarkVerifyVariant == 2) {
2376     // In this second variant of verification, we flag an error
2377     // (i.e. an object reachable in the new marks-vector not reachable
2378     // in the CMS marks-vector) immediately, also indicating the
2379     // identify of an object (A) that references the unmarked object (B) --
2380     // presumably, a mutation to A failed to be picked up by preclean/remark?
2381     verify_after_remark_work_2();
2382   } else {
2383     warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2384             CMSRemarkVerifyVariant);
2385   }
2386   if (!silent) gclog_or_tty->print(" done] ");
2387   return true;
2388 }
2389 
2390 void CMSCollector::verify_after_remark_work_1() {
2391   ResourceMark rm;
2392   HandleMark  hm;
2393   GenCollectedHeap* gch = GenCollectedHeap::heap();
2394 
2395   // Get a clear set of claim bits for the roots processing to work with.
2396   ClassLoaderDataGraph::clear_claimed_marks();
2397 
2398   // Mark from roots one level into CMS
2399   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2400   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2401 
2402   {
2403     StrongRootsScope srs(1);
2404 
2405     gch->gen_process_roots(&srs,
2406                            GenCollectedHeap::OldGen,


2418     false /* don't yield */, true /* verifying */);
2419   assert(_restart_addr == NULL, "Expected pre-condition");
2420   verification_mark_bm()->iterate(&markFromRootsClosure);
2421   while (_restart_addr != NULL) {
2422     // Deal with stack overflow: by restarting at the indicated
2423     // address.
2424     HeapWord* ra = _restart_addr;
2425     markFromRootsClosure.reset(ra);
2426     _restart_addr = NULL;
2427     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2428   }
2429   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2430   verify_work_stacks_empty();
2431 
2432   // Marking completed -- now verify that each bit marked in
2433   // verification_mark_bm() is also marked in markBitMap(); flag all
2434   // errors by printing corresponding objects.
2435   VerifyMarkedClosure vcl(markBitMap());
2436   verification_mark_bm()->iterate(&vcl);
2437   if (vcl.failed()) {
2438     gclog_or_tty->print("Verification failed");
2439     gch->print_on(gclog_or_tty);


2440     fatal("CMS: failed marking verification after remark");
2441   }
2442 }
2443 
2444 class VerifyKlassOopsKlassClosure : public KlassClosure {
2445   class VerifyKlassOopsClosure : public OopClosure {
2446     CMSBitMap* _bitmap;
2447    public:
2448     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2449     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2450     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2451   } _oop_closure;
2452  public:
2453   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2454   void do_klass(Klass* k) {
2455     k->oops_do(&_oop_closure);
2456   }
2457 };
2458 
2459 void CMSCollector::verify_after_remark_work_2() {


2712   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2713   if (GCExpandToAllocateDelayMillis > 0) {
2714     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2715   }
2716   return have_lock_and_allocate(word_size, tlab);
2717 }
2718 
2719 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2720     size_t bytes,
2721     size_t expand_bytes,
2722     CMSExpansionCause::Cause cause)
2723 {
2724 
2725   bool success = expand(bytes, expand_bytes);
2726 
2727   // remember why we expanded; this information is used
2728   // by shouldConcurrentCollect() when making decisions on whether to start
2729   // a new CMS cycle.
2730   if (success) {
2731     set_expansion_cause(cause);
2732     if (PrintGCDetails && Verbose) {
2733       gclog_or_tty->print_cr("Expanded CMS gen for %s",
2734         CMSExpansionCause::to_string(cause));
2735     }
2736   }
2737 }
2738 
2739 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2740   HeapWord* res = NULL;
2741   MutexLocker x(ParGCRareEvent_lock);
2742   while (true) {
2743     // Expansion by some other thread might make alloc OK now:
2744     res = ps->lab.alloc(word_sz);
2745     if (res != NULL) return res;
2746     // If there's not enough expansion space available, give up.
2747     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2748       return NULL;
2749     }
2750     // Otherwise, we try expansion.
2751     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2752     // Now go around the loop and try alloc again;
2753     // A competing par_promote might beat us to the expansion space,
2754     // so we may go around the loop again if promotion fails again.
2755     if (GCExpandToAllocateDelayMillis > 0) {


2802   assert_lock_strong(freelistLock());
2803   if (PrintGCDetails && Verbose) {
2804     warning("Shrinking of CMS not yet implemented");
2805   }
2806   return;
2807 }
2808 
2809 
2810 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2811 // phases.
2812 class CMSPhaseAccounting: public StackObj {
2813  public:
2814   CMSPhaseAccounting(CMSCollector *collector,
2815                      const char *phase,
2816                      bool print_cr = true);
2817   ~CMSPhaseAccounting();
2818 
2819  private:
2820   CMSCollector *_collector;
2821   const char *_phase;
2822   elapsedTimer _wallclock;
2823   bool _print_cr;
2824 
2825  public:
2826   // Not MT-safe; so do not pass around these StackObj's
2827   // where they may be accessed by other threads.
2828   jlong wallclock_millis() {
2829     assert(_wallclock.is_active(), "Wall clock should not stop");
2830     _wallclock.stop();  // to record time
2831     jlong ret = _wallclock.milliseconds();
2832     _wallclock.start(); // restart
2833     return ret;
2834   }
2835 };
2836 
2837 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2838                                        const char *phase,
2839                                        bool print_cr) :
2840   _collector(collector), _phase(phase), _print_cr(print_cr) {
2841 
2842   if (PrintCMSStatistics != 0) {
2843     _collector->resetYields();
2844   }
2845   if (PrintGCDetails) {
2846     gclog_or_tty->gclog_stamp();
2847     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2848       _collector->cmsGen()->short_name(), _phase);
2849   }
2850   _collector->resetTimer();
2851   _wallclock.start();
2852   _collector->startTimer();

2853 }
2854 
2855 CMSPhaseAccounting::~CMSPhaseAccounting() {
2856   assert(_wallclock.is_active(), "Wall clock should not have stopped");
2857   _collector->stopTimer();
2858   _wallclock.stop();
2859   if (PrintGCDetails) {
2860     gclog_or_tty->gclog_stamp();
2861     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2862                  _collector->cmsGen()->short_name(),
2863                  _phase, _collector->timerValue(), _wallclock.seconds());
2864     if (_print_cr) {
2865       gclog_or_tty->cr();
2866     }
2867     if (PrintCMSStatistics != 0) {
2868       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2869                     _collector->yields());
2870     }
2871   }
2872 }
2873 
2874 // CMS work
2875 
2876 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2877 class CMSParMarkTask : public AbstractGangTask {
2878  protected:
2879   CMSCollector*     _collector;
2880   uint              _n_workers;
2881   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2882       AbstractGangTask(name),
2883       _collector(collector),
2884       _n_workers(n_workers) {}
2885   // Work method in support of parallel rescan ... of young gen spaces
2886   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2887                              ContiguousSpace* space,
2888                              HeapWord** chunk_array, size_t chunk_top);
2889   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2890 };
2891 


2918                     Mutex::_no_safepoint_check_flag);
2919     checkpointRootsInitialWork();
2920     // enable ("weak") refs discovery
2921     rp->enable_discovery();
2922     _collectorState = Marking;
2923   }
2924 }
2925 
2926 void CMSCollector::checkpointRootsInitialWork() {
2927   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2928   assert(_collectorState == InitialMarking, "just checking");
2929 
2930   // Already have locks.
2931   assert_lock_strong(bitMapLock());
2932   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2933 
2934   // Setup the verification and class unloading state for this
2935   // CMS collection cycle.
2936   setup_cms_unloading_and_verification_state();
2937 
2938   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2939     PrintGCDetails && Verbose, true, _gc_timer_cm);)
2940 
2941   // Reset all the PLAB chunk arrays if necessary.
2942   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2943     reset_survivor_plab_arrays();
2944   }
2945 
2946   ResourceMark rm;
2947   HandleMark  hm;
2948 
2949   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2950   GenCollectedHeap* gch = GenCollectedHeap::heap();
2951 
2952   verify_work_stacks_empty();
2953   verify_overflow_empty();
2954 
2955   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2956   // Update the saved marks which may affect the root scans.
2957   gch->save_marks();
2958 
2959   // weak reference processing has not started yet.
2960   ref_processor()->set_enqueuing_is_done(false);
2961 
2962   // Need to remember all newly created CLDs,
2963   // so that we can guarantee that the remark finds them.
2964   ClassLoaderDataGraph::remember_new_clds(true);
2965 
2966   // Whenever a CLD is found, it will be claimed before proceeding to mark
2967   // the klasses. The claimed marks need to be cleared before marking starts.
2968   ClassLoaderDataGraph::clear_claimed_marks();
2969 
2970   if (CMSPrintEdenSurvivorChunks) {
2971     print_eden_and_survivor_chunk_arrays();
2972   }
2973 
2974   {
2975 #if defined(COMPILER2) || INCLUDE_JVMCI
2976     DerivedPointerTableDeactivate dpt_deact;
2977 #endif
2978     if (CMSParallelInitialMarkEnabled) {
2979       // The parallel version.
2980       WorkGang* workers = gch->workers();
2981       assert(workers != NULL, "Need parallel worker threads.");
2982       uint n_workers = workers->active_workers();
2983 
2984       StrongRootsScope srs(n_workers);
2985 
2986       CMSParInitialMarkTask tsk(this, &srs, n_workers);
2987       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2988       if (n_workers > 1) {
2989         workers->run_task(&tsk);
2990       } else {
2991         tsk.work(0);
2992       }


3023   save_sweep_limits();
3024   verify_overflow_empty();
3025 }
3026 
3027 bool CMSCollector::markFromRoots() {
3028   // we might be tempted to assert that:
3029   // assert(!SafepointSynchronize::is_at_safepoint(),
3030   //        "inconsistent argument?");
3031   // However that wouldn't be right, because it's possible that
3032   // a safepoint is indeed in progress as a young generation
3033   // stop-the-world GC happens even as we mark in this generation.
3034   assert(_collectorState == Marking, "inconsistent state?");
3035   check_correct_thread_executing();
3036   verify_overflow_empty();
3037 
3038   // Weak ref discovery note: We may be discovering weak
3039   // refs in this generation concurrent (but interleaved) with
3040   // weak ref discovery by the young generation collector.
3041 
3042   CMSTokenSyncWithLocks ts(true, bitMapLock());
3043   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3044   CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3045   bool res = markFromRootsWork();
3046   if (res) {
3047     _collectorState = Precleaning;
3048   } else { // We failed and a foreground collection wants to take over
3049     assert(_foregroundGCIsActive, "internal state inconsistency");
3050     assert(_restart_addr == NULL,  "foreground will restart from scratch");
3051     if (PrintGCDetails) {
3052       gclog_or_tty->print_cr("bailing out to foreground collection");
3053     }
3054   }
3055   verify_overflow_empty();
3056   return res;
3057 }
3058 
3059 bool CMSCollector::markFromRootsWork() {
3060   // iterate over marked bits in bit map, doing a full scan and mark
3061   // from these roots using the following algorithm:
3062   // . if oop is to the right of the current scan pointer,
3063   //   mark corresponding bit (we'll process it later)
3064   // . else (oop is to left of current scan pointer)
3065   //   push oop on marking stack
3066   // . drain the marking stack
3067 
3068   // Note that when we do a marking step we need to hold the
3069   // bit map lock -- recall that direct allocation (by mutators)
3070   // and promotion (by the young generation collector) is also
3071   // marking the bit map. [the so-called allocate live policy.]
3072   // Because the implementation of bit map marking is not
3073   // robust wrt simultaneous marking of bits in the same word,


3238 //    and local work queue empty,
3239 //    then in a loop do:
3240 //    . check global overflow stack; steal a batch of oops and trace
3241 //    . try to steal from other threads oif GOS is empty
3242 //    . if neither is available, offer termination
3243 // -- Terminate and return result
3244 //
3245 void CMSConcMarkingTask::work(uint worker_id) {
3246   elapsedTimer _timer;
3247   ResourceMark rm;
3248   HandleMark hm;
3249 
3250   DEBUG_ONLY(_collector->verify_overflow_empty();)
3251 
3252   // Before we begin work, our work queue should be empty
3253   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3254   // Scan the bitmap covering _cms_space, tracing through grey objects.
3255   _timer.start();
3256   do_scan_and_mark(worker_id, _cms_space);
3257   _timer.stop();
3258   if (PrintCMSStatistics != 0) {
3259     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3260       worker_id, _timer.seconds());
3261       // XXX: need xxx/xxx type of notation, two timers
3262   }
3263 
3264   // ... do work stealing
3265   _timer.reset();
3266   _timer.start();
3267   do_work_steal(worker_id);
3268   _timer.stop();
3269   if (PrintCMSStatistics != 0) {
3270     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3271       worker_id, _timer.seconds());
3272       // XXX: need xxx/xxx type of notation, two timers
3273   }
3274   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3275   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3276   // Note that under the current task protocol, the
3277   // following assertion is true even of the spaces
3278   // expanded since the completion of the concurrent
3279   // marking. XXX This will likely change under a strict
3280   // ABORT semantics.
3281   // After perm removal the comparison was changed to
3282   // greater than or equal to from strictly greater than.
3283   // Before perm removal the highest address sweep would
3284   // have been at the end of perm gen but now is at the
3285   // end of the tenured gen.
3286   assert(_global_finger >=  _cms_space->end(),
3287          "All tasks have been completed");
3288   DEBUG_ONLY(_collector->verify_overflow_empty();)
3289 }
3290 
3291 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3292   HeapWord* read = _global_finger;
3293   HeapWord* cur  = read;


3468   // Check if oop points into the CMS generation
3469   // and is not marked
3470   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3471     // a white object ...
3472     // If we manage to "claim" the object, by being the
3473     // first thread to mark it, then we push it on our
3474     // marking stack
3475     if (_bit_map->par_mark(addr)) {     // ... now grey
3476       // push on work queue (grey set)
3477       bool simulate_overflow = false;
3478       NOT_PRODUCT(
3479         if (CMSMarkStackOverflowALot &&
3480             _collector->simulate_overflow()) {
3481           // simulate a stack overflow
3482           simulate_overflow = true;
3483         }
3484       )
3485       if (simulate_overflow ||
3486           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3487         // stack overflow
3488         if (PrintCMSStatistics != 0) {
3489           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3490                                  SIZE_FORMAT, _overflow_stack->capacity());
3491         }
3492         // We cannot assert that the overflow stack is full because
3493         // it may have been emptied since.
3494         assert(simulate_overflow ||
3495                _work_queue->size() == _work_queue->max_elems(),
3496               "Else push should have succeeded");
3497         handle_stack_overflow(addr);
3498       }
3499     } // Else, some other thread got there first
3500     do_yield_check();
3501   }
3502 }
3503 
3504 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
3505 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3506 
3507 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3508   while (_work_queue->size() > max) {
3509     oop new_oop;
3510     if (_work_queue->pop_local(new_oop)) {
3511       assert(new_oop->is_oop(), "Should be an oop");


3556       assert(work_q->size() == 0, "Impossible!");
3557       break;
3558     } else if (yielding() || should_yield()) {
3559       yield();
3560     }
3561   }
3562 }
3563 
3564 // This is run by the CMS (coordinator) thread.
3565 void CMSConcMarkingTask::coordinator_yield() {
3566   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3567          "CMS thread should hold CMS token");
3568   // First give up the locks, then yield, then re-lock
3569   // We should probably use a constructor/destructor idiom to
3570   // do this unlock/lock or modify the MutexUnlocker class to
3571   // serve our purpose. XXX
3572   assert_lock_strong(_bit_map_lock);
3573   _bit_map_lock->unlock();
3574   ConcurrentMarkSweepThread::desynchronize(true);
3575   _collector->stopTimer();
3576   if (PrintCMSStatistics != 0) {
3577     _collector->incrementYields();
3578   }
3579 
3580   // It is possible for whichever thread initiated the yield request
3581   // not to get a chance to wake up and take the bitmap lock between
3582   // this thread releasing it and reacquiring it. So, while the
3583   // should_yield() flag is on, let's sleep for a bit to give the
3584   // other thread a chance to wake up. The limit imposed on the number
3585   // of iterations is defensive, to avoid any unforseen circumstances
3586   // putting us into an infinite loop. Since it's always been this
3587   // (coordinator_yield()) method that was observed to cause the
3588   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3589   // which is by default non-zero. For the other seven methods that
3590   // also perform the yield operation, as are using a different
3591   // parameter (CMSYieldSleepCount) which is by default zero. This way we
3592   // can enable the sleeping for those methods too, if necessary.
3593   // See 6442774.
3594   //
3595   // We really need to reconsider the synchronization between the GC
3596   // thread and the yield-requesting threads in the future and we
3597   // should really use wait/notify, which is the recommended
3598   // way of doing this type of interaction. Additionally, we should


3720 void CMSCollector::preclean() {
3721   check_correct_thread_executing();
3722   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3723   verify_work_stacks_empty();
3724   verify_overflow_empty();
3725   _abort_preclean = false;
3726   if (CMSPrecleaningEnabled) {
3727     if (!CMSEdenChunksRecordAlways) {
3728       _eden_chunk_index = 0;
3729     }
3730     size_t used = get_eden_used();
3731     size_t capacity = get_eden_capacity();
3732     // Don't start sampling unless we will get sufficiently
3733     // many samples.
3734     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3735                 * CMSScheduleRemarkEdenPenetration)) {
3736       _start_sampling = true;
3737     } else {
3738       _start_sampling = false;
3739     }
3740     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3741     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
3742     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3743   }
3744   CMSTokenSync x(true); // is cms thread
3745   if (CMSPrecleaningEnabled) {
3746     sample_eden();
3747     _collectorState = AbortablePreclean;
3748   } else {
3749     _collectorState = FinalMarking;
3750   }
3751   verify_work_stacks_empty();
3752   verify_overflow_empty();
3753 }
3754 
3755 // Try and schedule the remark such that young gen
3756 // occupancy is CMSScheduleRemarkEdenPenetration %.
3757 void CMSCollector::abortable_preclean() {
3758   check_correct_thread_executing();
3759   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3760   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3761 
3762   // If Eden's current occupancy is below this threshold,
3763   // immediately schedule the remark; else preclean
3764   // past the next scavenge in an effort to
3765   // schedule the pause as described above. By choosing
3766   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3767   // we will never do an actual abortable preclean cycle.
3768   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3769     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3770     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
3771     // We need more smarts in the abortable preclean
3772     // loop below to deal with cases where allocation
3773     // in young gen is very very slow, and our precleaning
3774     // is running a losing race against a horde of
3775     // mutators intent on flooding us with CMS updates
3776     // (dirty cards).
3777     // One, admittedly dumb, strategy is to give up
3778     // after a certain number of abortable precleaning loops
3779     // or after a certain maximum time. We want to make
3780     // this smarter in the next iteration.
3781     // XXX FIX ME!!! YSR
3782     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3783     while (!(should_abort_preclean() ||
3784              ConcurrentMarkSweepThread::should_terminate())) {
3785       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3786       cumworkdone += workdone;
3787       loops++;
3788       // Voluntarily terminate abortable preclean phase if we have
3789       // been at it for too long.
3790       if ((CMSMaxAbortablePrecleanLoops != 0) &&
3791           loops >= CMSMaxAbortablePrecleanLoops) {
3792         if (PrintGCDetails) {
3793           gclog_or_tty->print(" CMS: abort preclean due to loops ");
3794         }
3795         break;
3796       }
3797       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3798         if (PrintGCDetails) {
3799           gclog_or_tty->print(" CMS: abort preclean due to time ");
3800         }
3801         break;
3802       }
3803       // If we are doing little work each iteration, we should
3804       // take a short break.
3805       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3806         // Sleep for some time, waiting for work to accumulate
3807         stopTimer();
3808         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3809         startTimer();
3810         waited++;
3811       }
3812     }
3813     if (PrintCMSStatistics > 0) {
3814       gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3815                           loops, waited, cumworkdone);
3816     }
3817   }
3818   CMSTokenSync x(true); // is cms thread
3819   if (_collectorState != Idling) {
3820     assert(_collectorState == AbortablePreclean,
3821            "Spontaneous state transition?");
3822     _collectorState = FinalMarking;
3823   } // Else, a foreground collection completed this CMS cycle.
3824   return;
3825 }
3826 
3827 // Respond to an Eden sampling opportunity
3828 void CMSCollector::sample_eden() {
3829   // Make sure a young gc cannot sneak in between our
3830   // reading and recording of a sample.
3831   assert(Thread::current()->is_ConcurrentGC_thread(),
3832          "Only the cms thread may collect Eden samples");
3833   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3834          "Should collect samples while holding CMS token");
3835   if (!_start_sampling) {
3836     return;
3837   }


3940   // processes.
3941   ScanMarkedObjectsAgainCarefullyClosure
3942     smoac_cl(this, _span,
3943       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3944 
3945   // Preclean dirty cards in ModUnionTable and CardTable using
3946   // appropriate convergence criterion;
3947   // repeat CMSPrecleanIter times unless we find that
3948   // we are losing.
3949   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3950   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3951          "Bad convergence multiplier");
3952   assert(CMSPrecleanThreshold >= 100,
3953          "Unreasonably low CMSPrecleanThreshold");
3954 
3955   size_t numIter, cumNumCards, lastNumCards, curNumCards;
3956   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3957        numIter < CMSPrecleanIter;
3958        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3959     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3960     if (Verbose && PrintGCDetails) {
3961       gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3962     }
3963     // Either there are very few dirty cards, so re-mark
3964     // pause will be small anyway, or our pre-cleaning isn't
3965     // that much faster than the rate at which cards are being
3966     // dirtied, so we might as well stop and re-mark since
3967     // precleaning won't improve our re-mark time by much.
3968     if (curNumCards <= CMSPrecleanThreshold ||
3969         (numIter > 0 &&
3970          (curNumCards * CMSPrecleanDenominator >
3971          lastNumCards * CMSPrecleanNumerator))) {
3972       numIter++;
3973       cumNumCards += curNumCards;
3974       break;
3975     }
3976   }
3977 
3978   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3979 
3980   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3981   cumNumCards += curNumCards;
3982   if (PrintGCDetails && PrintCMSStatistics != 0) {
3983     gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3984                   curNumCards, cumNumCards, numIter);
3985   }
3986   return cumNumCards;   // as a measure of useful work done
3987 }
3988 
3989 // PRECLEANING NOTES:
3990 // Precleaning involves:
3991 // . reading the bits of the modUnionTable and clearing the set bits.
3992 // . For the cards corresponding to the set bits, we scan the
3993 //   objects on those cards. This means we need the free_list_lock
3994 //   so that we can safely iterate over the CMS space when scanning
3995 //   for oops.
3996 // . When we scan the objects, we'll be both reading and setting
3997 //   marks in the marking bit map, so we'll need the marking bit map.
3998 // . For protecting _collector_state transitions, we take the CGC_lock.
3999 //   Note that any races in the reading of of card table entries by the
4000 //   CMS thread on the one hand and the clearing of those entries by the
4001 //   VM thread or the setting of those entries by the mutator threads on the
4002 //   other are quite benign. However, for efficiency it makes sense to keep
4003 //   the VM thread from racing with the CMS thread while the latter is
4004 //   dirty card info to the modUnionTable. We therefore also use the
4005 //   CGC_lock to protect the reading of the card table and the mod union


4219   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4220   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4221   PrecleanKlassClosure preclean_klass_closure(cl);
4222   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4223 
4224   verify_work_stacks_empty();
4225   verify_overflow_empty();
4226 }
4227 
4228 void CMSCollector::checkpointRootsFinal() {
4229   assert(_collectorState == FinalMarking, "incorrect state transition?");
4230   check_correct_thread_executing();
4231   // world is stopped at this checkpoint
4232   assert(SafepointSynchronize::is_at_safepoint(),
4233          "world should be stopped");
4234   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4235 
4236   verify_work_stacks_empty();
4237   verify_overflow_empty();
4238 
4239   if (PrintGCDetails) {
4240     gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4241                         _young_gen->used() / K,
4242                         _young_gen->capacity() / K);
4243   }
4244   {
4245     if (CMSScavengeBeforeRemark) {
4246       GenCollectedHeap* gch = GenCollectedHeap::heap();
4247       // Temporarily set flag to false, GCH->do_collection will
4248       // expect it to be false and set to true
4249       FlagSetting fl(gch->_is_gc_active, false);
4250       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4251         PrintGCDetails && Verbose, true, _gc_timer_cm);)

4252       gch->do_collection(true,                      // full (i.e. force, see below)
4253                          false,                     // !clear_all_soft_refs
4254                          0,                         // size
4255                          false,                     // is_tlab
4256                          GenCollectedHeap::YoungGen // type
4257         );
4258     }
4259     FreelistLocker x(this);
4260     MutexLockerEx y(bitMapLock(),
4261                     Mutex::_no_safepoint_check_flag);
4262     checkpointRootsFinalWork();
4263   }
4264   verify_work_stacks_empty();
4265   verify_overflow_empty();
4266 }
4267 
4268 void CMSCollector::checkpointRootsFinalWork() {
4269   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
4270 
4271   assert(haveFreelistLocks(), "must have free list locks");
4272   assert_lock_strong(bitMapLock());
4273 
4274   ResourceMark rm;
4275   HandleMark   hm;
4276 
4277   GenCollectedHeap* gch = GenCollectedHeap::heap();
4278 
4279   if (should_unload_classes()) {
4280     CodeCache::gc_prologue();
4281   }
4282   assert(haveFreelistLocks(), "must have free list locks");
4283   assert_lock_strong(bitMapLock());
4284 
4285   // We might assume that we need not fill TLAB's when
4286   // CMSScavengeBeforeRemark is set, because we may have just done
4287   // a scavenge which would have filled all TLAB's -- and besides
4288   // Eden would be empty. This however may not always be the case --
4289   // for instance although we asked for a scavenge, it may not have
4290   // happened because of a JNI critical section. We probably need
4291   // a policy for deciding whether we can in that case wait until
4292   // the critical section releases and then do the remark following
4293   // the scavenge, and skip it here. In the absence of that policy,
4294   // or of an indication of whether the scavenge did indeed occur,
4295   // we cannot rely on TLAB's having been filled and must do
4296   // so here just in case a scavenge did not happen.
4297   gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4298   // Update the saved marks which may affect the root scans.
4299   gch->save_marks();
4300 
4301   if (CMSPrintEdenSurvivorChunks) {
4302     print_eden_and_survivor_chunk_arrays();
4303   }
4304 
4305   {
4306 #if defined(COMPILER2) || INCLUDE_JVMCI
4307     DerivedPointerTableDeactivate dpt_deact;
4308 #endif
4309 
4310     // Note on the role of the mod union table:
4311     // Since the marker in "markFromRoots" marks concurrently with
4312     // mutators, it is possible for some reachable objects not to have been
4313     // scanned. For instance, an only reference to an object A was
4314     // placed in object B after the marker scanned B. Unless B is rescanned,
4315     // A would be collected. Such updates to references in marked objects
4316     // are detected via the mod union table which is the set of all cards
4317     // dirtied since the first checkpoint in this GC cycle and prior to
4318     // the most recent young generation GC, minus those cleaned up by the
4319     // concurrent precleaning.
4320     if (CMSParallelRemarkEnabled) {
4321       GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
4322       do_remark_parallel();
4323     } else {
4324       GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm);
4325       do_remark_non_parallel();
4326     }
4327   }
4328   verify_work_stacks_empty();
4329   verify_overflow_empty();
4330 
4331   {
4332     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
4333     refProcessingWork();
4334   }
4335   verify_work_stacks_empty();
4336   verify_overflow_empty();
4337 
4338   if (should_unload_classes()) {
4339     CodeCache::gc_epilogue();
4340   }
4341   JvmtiExport::gc_epilogue();
4342 
4343   // If we encountered any (marking stack / work queue) overflow
4344   // events during the current CMS cycle, take appropriate
4345   // remedial measures, where possible, so as to try and avoid
4346   // recurrence of that condition.
4347   assert(_markStack.isEmpty(), "No grey objects");
4348   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4349                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4350   if (ser_ovflw > 0) {
4351     if (PrintCMSStatistics != 0) {
4352       gclog_or_tty->print_cr("Marking stack overflow (benign) "
4353         "(pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT
4354         ", kac_preclean=" SIZE_FORMAT ")",
4355         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4356         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4357     }
4358     _markStack.expand();
4359     _ser_pmc_remark_ovflw = 0;
4360     _ser_pmc_preclean_ovflw = 0;
4361     _ser_kac_preclean_ovflw = 0;
4362     _ser_kac_ovflw = 0;
4363   }
4364   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4365     if (PrintCMSStatistics != 0) {
4366       gclog_or_tty->print_cr("Work queue overflow (benign) "
4367         "(pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4368         _par_pmc_remark_ovflw, _par_kac_ovflw);
4369     }
4370     _par_pmc_remark_ovflw = 0;
4371     _par_kac_ovflw = 0;
4372   }
4373   if (PrintCMSStatistics != 0) {
4374      if (_markStack._hit_limit > 0) {
4375        gclog_or_tty->print_cr(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4376                               _markStack._hit_limit);
4377      }
4378      if (_markStack._failed_double > 0) {
4379        gclog_or_tty->print_cr(" (benign) Failed stack doubling (" SIZE_FORMAT "),"
4380                               " current capacity " SIZE_FORMAT,
4381                               _markStack._failed_double,
4382                               _markStack.capacity());
4383      }
4384   }
4385   _markStack._hit_limit = 0;
4386   _markStack._failed_double = 0;
4387 
4388   if ((VerifyAfterGC || VerifyDuringGC) &&
4389       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4390     verify_after_remark();
4391   }
4392 
4393   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4394 
4395   // Change under the freelistLocks.
4396   _collectorState = Sweeping;
4397   // Call isAllClear() under bitMapLock
4398   assert(_modUnionTable.isAllClear(),
4399       "Should be clear by end of the final marking");
4400   assert(_ct->klass_rem_set()->mod_union_is_clear(),
4401       "Should be clear by end of the final marking");
4402 }
4403 
4404 void CMSParInitialMarkTask::work(uint worker_id) {
4405   elapsedTimer _timer;
4406   ResourceMark rm;
4407   HandleMark   hm;
4408 
4409   // ---------- scan from roots --------------
4410   _timer.start();
4411   GenCollectedHeap* gch = GenCollectedHeap::heap();
4412   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4413 
4414   // ---------- young gen roots --------------
4415   {
4416     work_on_young_gen_roots(worker_id, &par_mri_cl);
4417     _timer.stop();
4418     if (PrintCMSStatistics != 0) {
4419       gclog_or_tty->print_cr(
4420         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4421         worker_id, _timer.seconds());
4422     }
4423   }
4424 
4425   // ---------- remaining roots --------------
4426   _timer.reset();
4427   _timer.start();
4428 
4429   CLDToOopClosure cld_closure(&par_mri_cl, true);
4430 
4431   gch->gen_process_roots(_strong_roots_scope,
4432                          GenCollectedHeap::OldGen,
4433                          false,     // yg was scanned above
4434                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4435                          _collector->should_unload_classes(),
4436                          &par_mri_cl,
4437                          NULL,
4438                          &cld_closure);
4439   assert(_collector->should_unload_classes()
4440          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4441          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4442   _timer.stop();
4443   if (PrintCMSStatistics != 0) {
4444     gclog_or_tty->print_cr(
4445       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4446       worker_id, _timer.seconds());
4447   }
4448 }
4449 
4450 // Parallel remark task
4451 class CMSParRemarkTask: public CMSParMarkTask {
4452   CompactibleFreeListSpace* _cms_space;
4453 
4454   // The per-thread work queues, available here for stealing.
4455   OopTaskQueueSet*       _task_queues;
4456   ParallelTaskTerminator _term;
4457   StrongRootsScope*      _strong_roots_scope;
4458 
4459  public:
4460   // A value of 0 passed to n_workers will cause the number of
4461   // workers to be taken from the active workers in the work gang.
4462   CMSParRemarkTask(CMSCollector* collector,
4463                    CompactibleFreeListSpace* cms_space,
4464                    uint n_workers, WorkGang* workers,
4465                    OopTaskQueueSet* task_queues,
4466                    StrongRootsScope* strong_roots_scope):
4467     CMSParMarkTask("Rescan roots and grey objects in parallel",


4540   elapsedTimer _timer;
4541   ResourceMark rm;
4542   HandleMark   hm;
4543 
4544   // ---------- rescan from roots --------------
4545   _timer.start();
4546   GenCollectedHeap* gch = GenCollectedHeap::heap();
4547   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4548     _collector->_span, _collector->ref_processor(),
4549     &(_collector->_markBitMap),
4550     work_queue(worker_id));
4551 
4552   // Rescan young gen roots first since these are likely
4553   // coarsely partitioned and may, on that account, constitute
4554   // the critical path; thus, it's best to start off that
4555   // work first.
4556   // ---------- young gen roots --------------
4557   {
4558     work_on_young_gen_roots(worker_id, &par_mrias_cl);
4559     _timer.stop();
4560     if (PrintCMSStatistics != 0) {
4561       gclog_or_tty->print_cr(
4562         "Finished young gen rescan work in %dth thread: %3.3f sec",
4563         worker_id, _timer.seconds());
4564     }
4565   }
4566 
4567   // ---------- remaining roots --------------
4568   _timer.reset();
4569   _timer.start();
4570   gch->gen_process_roots(_strong_roots_scope,
4571                          GenCollectedHeap::OldGen,
4572                          false,     // yg was scanned above
4573                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4574                          _collector->should_unload_classes(),
4575                          &par_mrias_cl,
4576                          NULL,
4577                          NULL);     // The dirty klasses will be handled below
4578 
4579   assert(_collector->should_unload_classes()
4580          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4581          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4582   _timer.stop();
4583   if (PrintCMSStatistics != 0) {
4584     gclog_or_tty->print_cr(
4585       "Finished remaining root rescan work in %dth thread: %3.3f sec",
4586       worker_id, _timer.seconds());
4587   }
4588 
4589   // ---------- unhandled CLD scanning ----------
4590   if (worker_id == 0) { // Single threaded at the moment.
4591     _timer.reset();
4592     _timer.start();
4593 
4594     // Scan all new class loader data objects and new dependencies that were
4595     // introduced during concurrent marking.
4596     ResourceMark rm;
4597     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4598     for (int i = 0; i < array->length(); i++) {
4599       par_mrias_cl.do_cld_nv(array->at(i));
4600     }
4601 
4602     // We don't need to keep track of new CLDs anymore.
4603     ClassLoaderDataGraph::remember_new_clds(false);
4604 
4605     _timer.stop();
4606     if (PrintCMSStatistics != 0) {
4607       gclog_or_tty->print_cr(
4608           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
4609           worker_id, _timer.seconds());
4610     }
4611   }
4612 
4613   // ---------- dirty klass scanning ----------
4614   if (worker_id == 0) { // Single threaded at the moment.
4615     _timer.reset();
4616     _timer.start();
4617 
4618     // Scan all classes that was dirtied during the concurrent marking phase.
4619     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4620     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4621 
4622     _timer.stop();
4623     if (PrintCMSStatistics != 0) {
4624       gclog_or_tty->print_cr(
4625           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
4626           worker_id, _timer.seconds());
4627     }
4628   }
4629 
4630   // We might have added oops to ClassLoaderData::_handles during the
4631   // concurrent marking phase. These oops point to newly allocated objects
4632   // that are guaranteed to be kept alive. Either by the direct allocation
4633   // code, or when the young collector processes the roots. Hence,
4634   // we don't have to revisit the _handles block during the remark phase.
4635 
4636   // ---------- rescan dirty cards ------------
4637   _timer.reset();
4638   _timer.start();
4639 
4640   // Do the rescan tasks for each of the two spaces
4641   // (cms_space) in turn.
4642   // "worker_id" is passed to select the task_queue for "worker_id"
4643   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4644   _timer.stop();
4645   if (PrintCMSStatistics != 0) {
4646     gclog_or_tty->print_cr(
4647       "Finished dirty card rescan work in %dth thread: %3.3f sec",
4648       worker_id, _timer.seconds());
4649   }
4650 
4651   // ---------- steal work from other threads ...
4652   // ---------- ... and drain overflow list.
4653   _timer.reset();
4654   _timer.start();
4655   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4656   _timer.stop();
4657   if (PrintCMSStatistics != 0) {
4658     gclog_or_tty->print_cr(
4659       "Finished work stealing in %dth thread: %3.3f sec",
4660       worker_id, _timer.seconds());
4661   }
4662 }
4663 
4664 // Note that parameter "i" is not used.
4665 void
4666 CMSParMarkTask::do_young_space_rescan(uint worker_id,
4667   OopsInGenClosure* cl, ContiguousSpace* space,
4668   HeapWord** chunk_array, size_t chunk_top) {
4669   // Until all tasks completed:
4670   // . claim an unclaimed task
4671   // . compute region boundaries corresponding to task claimed
4672   //   using chunk_array
4673   // . par_oop_iterate(cl) over that region
4674 
4675   ResourceMark rm;
4676   HandleMark   hm;
4677 
4678   SequentialSubTasksDone* pst = space->par_seq_tasks();
4679 
4680   uint nth_task = 0;
4681   uint n_tasks  = pst->n_tasks();


4835       // because we just took work from the overflow list,
4836       // but of course we can't since all of that could have
4837       // been already stolen from us.
4838       // "He giveth and He taketh away."
4839       continue;
4840     }
4841     // Verify that we have no work before we resort to stealing
4842     assert(work_q->size() == 0, "Have work, shouldn't steal");
4843     // Try to steal from other queues that have work
4844     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4845       NOT_PRODUCT(num_steals++;)
4846       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4847       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4848       // Do scanning work
4849       obj_to_scan->oop_iterate(cl);
4850       // Loop around, finish this work, and try to steal some more
4851     } else if (terminator()->offer_termination()) {
4852         break;  // nirvana from the infinite cycle
4853     }
4854   }
4855   NOT_PRODUCT(
4856     if (PrintCMSStatistics != 0) {
4857       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
4858     }
4859   )
4860   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4861          "Else our work is not yet done");
4862 }
4863 
4864 // Record object boundaries in _eden_chunk_array by sampling the eden
4865 // top in the slow-path eden object allocation code path and record
4866 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4867 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4868 // sampling in sample_eden() that activates during the part of the
4869 // preclean phase.
4870 void CMSCollector::sample_eden_chunk() {
4871   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4872     if (_eden_chunk_lock->try_lock()) {
4873       // Record a sample. This is the critical section. The contents
4874       // of the _eden_chunk_array have to be non-decreasing in the
4875       // address order.
4876       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4877       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4878              "Unexpected state of Eden");
4879       if (_eden_chunk_index == 0 ||


4936       if (cur_val < min_val) {
4937         min_tid = j;
4938         min_val = cur_val;
4939       } else {
4940         assert(cur_val < top, "All recorded addresses should be less");
4941       }
4942     }
4943     // At this point min_val and min_tid are respectively
4944     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4945     // and the thread (j) that witnesses that address.
4946     // We record this address in the _survivor_chunk_array[i]
4947     // and increment _cursor[min_tid] prior to the next round i.
4948     if (min_val == top) {
4949       break;
4950     }
4951     _survivor_chunk_array[i] = min_val;
4952     _cursor[min_tid]++;
4953   }
4954   // We are all done; record the size of the _survivor_chunk_array
4955   _survivor_chunk_index = i; // exclusive: [0, i)
4956   if (PrintCMSStatistics > 0) {
4957     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4958   }
4959   // Verify that we used up all the recorded entries
4960   #ifdef ASSERT
4961     size_t total = 0;
4962     for (int j = 0; j < no_of_gc_threads; j++) {
4963       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4964       total += _cursor[j];
4965     }
4966     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4967     // Check that the merged array is in sorted order
4968     if (total > 0) {
4969       for (size_t i = 0; i < total - 1; i++) {
4970         if (PrintCMSStatistics > 0) {
4971           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4972                               i, p2i(_survivor_chunk_array[i]));
4973         }
4974         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4975                "Not sorted");
4976       }
4977     }
4978   #endif // ASSERT
4979 }
4980 
4981 // Set up the space's par_seq_tasks structure for work claiming
4982 // for parallel initial scan and rescan of young gen.
4983 // See ParRescanTask where this is currently used.
4984 void
4985 CMSCollector::
4986 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4987   assert(n_threads > 0, "Unexpected n_threads argument");
4988 
4989   // Eden space
4990   if (!_young_gen->eden()->is_empty()) {
4991     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4992     assert(!pst->valid(), "Clobbering existing data?");
4993     // Each valid entry in [0, _eden_chunk_index) represents a task.


5087   // as a result of work_q overflow
5088   restore_preserved_marks_if_any();
5089 }
5090 
5091 // Non-parallel version of remark
5092 void CMSCollector::do_remark_non_parallel() {
5093   ResourceMark rm;
5094   HandleMark   hm;
5095   GenCollectedHeap* gch = GenCollectedHeap::heap();
5096   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5097 
5098   MarkRefsIntoAndScanClosure
5099     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5100              &_markStack, this,
5101              false /* should_yield */, false /* not precleaning */);
5102   MarkFromDirtyCardsClosure
5103     markFromDirtyCardsClosure(this, _span,
5104                               NULL,  // space is set further below
5105                               &_markBitMap, &_markStack, &mrias_cl);
5106   {
5107     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5108     // Iterate over the dirty cards, setting the corresponding bits in the
5109     // mod union table.
5110     {
5111       ModUnionClosure modUnionClosure(&_modUnionTable);
5112       _ct->ct_bs()->dirty_card_iterate(
5113                       _cmsGen->used_region(),
5114                       &modUnionClosure);
5115     }
5116     // Having transferred these marks into the modUnionTable, we just need
5117     // to rescan the marked objects on the dirty cards in the modUnionTable.
5118     // The initial marking may have been done during an asynchronous
5119     // collection so there may be dirty bits in the mod-union table.
5120     const int alignment =
5121       CardTableModRefBS::card_size * BitsPerWord;
5122     {
5123       // ... First handle dirty cards in CMS gen
5124       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5125       MemRegion ur = _cmsGen->used_region();
5126       HeapWord* lb = ur.start();
5127       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5128       MemRegion cms_span(lb, ub);
5129       _modUnionTable.dirty_range_iterate_clear(cms_span,
5130                                                &markFromDirtyCardsClosure);
5131       verify_work_stacks_empty();
5132       if (PrintCMSStatistics != 0) {
5133         gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5134           markFromDirtyCardsClosure.num_dirty_cards());
5135       }
5136     }
5137   }
5138   if (VerifyDuringGC &&
5139       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5140     HandleMark hm;  // Discard invalid handles created during verification
5141     Universe::verify();
5142   }
5143   {
5144     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5145 
5146     verify_work_stacks_empty();
5147 
5148     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5149     StrongRootsScope srs(1);
5150 
5151     gch->gen_process_roots(&srs,
5152                            GenCollectedHeap::OldGen,
5153                            true,  // young gen as roots
5154                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
5155                            should_unload_classes(),
5156                            &mrias_cl,
5157                            NULL,
5158                            NULL); // The dirty klasses will be handled below
5159 
5160     assert(should_unload_classes()
5161            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5162            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5163   }
5164 
5165   {
5166     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5167 
5168     verify_work_stacks_empty();
5169 
5170     // Scan all class loader data objects that might have been introduced
5171     // during concurrent marking.
5172     ResourceMark rm;
5173     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5174     for (int i = 0; i < array->length(); i++) {
5175       mrias_cl.do_cld_nv(array->at(i));
5176     }
5177 
5178     // We don't need to keep track of new CLDs anymore.
5179     ClassLoaderDataGraph::remember_new_clds(false);
5180 
5181     verify_work_stacks_empty();
5182   }
5183 
5184   {
5185     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5186 
5187     verify_work_stacks_empty();
5188 
5189     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5190     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5191 
5192     verify_work_stacks_empty();
5193   }
5194 
5195   // We might have added oops to ClassLoaderData::_handles during the
5196   // concurrent marking phase. These oops point to newly allocated objects
5197   // that are guaranteed to be kept alive. Either by the direct allocation
5198   // code, or when the young collector processes the roots. Hence,
5199   // we don't have to revisit the _handles block during the remark phase.
5200 
5201   verify_work_stacks_empty();
5202   // Restore evacuated mark words, if any, used for overflow list links
5203   restore_preserved_marks_if_any();
5204 
5205   verify_overflow_empty();


5327       // We'd like to assert(work_q->size() != 0, ...)
5328       // because we just took work from the overflow list,
5329       // but of course we can't, since all of that might have
5330       // been already stolen from us.
5331       continue;
5332     }
5333     // Verify that we have no work before we resort to stealing
5334     assert(work_q->size() == 0, "Have work, shouldn't steal");
5335     // Try to steal from other queues that have work
5336     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5337       NOT_PRODUCT(num_steals++;)
5338       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5339       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5340       // Do scanning work
5341       obj_to_scan->oop_iterate(keep_alive);
5342       // Loop around, finish this work, and try to steal some more
5343     } else if (terminator()->offer_termination()) {
5344       break;  // nirvana from the infinite cycle
5345     }
5346   }
5347   NOT_PRODUCT(
5348     if (PrintCMSStatistics != 0) {
5349       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5350     }
5351   )
5352 }
5353 
5354 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5355 {
5356   GenCollectedHeap* gch = GenCollectedHeap::heap();
5357   WorkGang* workers = gch->workers();
5358   assert(workers != NULL, "Need parallel worker threads.");
5359   CMSRefProcTaskProxy rp_task(task, &_collector,
5360                               _collector.ref_processor()->span(),
5361                               _collector.markBitMap(),
5362                               workers, _collector.task_queues());
5363   workers->run_task(&rp_task);
5364 }
5365 
5366 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5367 {
5368 
5369   GenCollectedHeap* gch = GenCollectedHeap::heap();
5370   WorkGang* workers = gch->workers();
5371   assert(workers != NULL, "Need parallel worker threads.");


5373   workers->run_task(&enq_task);
5374 }
5375 
5376 void CMSCollector::refProcessingWork() {
5377   ResourceMark rm;
5378   HandleMark   hm;
5379 
5380   ReferenceProcessor* rp = ref_processor();
5381   assert(rp->span().equals(_span), "Spans should be equal");
5382   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5383   // Process weak references.
5384   rp->setup_policy(false);
5385   verify_work_stacks_empty();
5386 
5387   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5388                                           &_markStack, false /* !preclean */);
5389   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5390                                 _span, &_markBitMap, &_markStack,
5391                                 &cmsKeepAliveClosure, false /* !preclean */);
5392   {
5393     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
5394 
5395     ReferenceProcessorStats stats;
5396     if (rp->processing_is_mt()) {
5397       // Set the degree of MT here.  If the discovery is done MT, there
5398       // may have been a different number of threads doing the discovery
5399       // and a different number of discovered lists may have Ref objects.
5400       // That is OK as long as the Reference lists are balanced (see
5401       // balance_all_queues() and balance_queues()).
5402       GenCollectedHeap* gch = GenCollectedHeap::heap();
5403       uint active_workers = ParallelGCThreads;
5404       WorkGang* workers = gch->workers();
5405       if (workers != NULL) {
5406         active_workers = workers->active_workers();
5407         // The expectation is that active_workers will have already
5408         // been set to a reasonable value.  If it has not been set,
5409         // investigate.
5410         assert(active_workers > 0, "Should have been set during scavenge");
5411       }
5412       rp->set_active_mt_degree(active_workers);
5413       CMSRefProcTaskExecutor task_executor(*this);


5415                                         &cmsKeepAliveClosure,
5416                                         &cmsDrainMarkingStackClosure,
5417                                         &task_executor,
5418                                         _gc_timer_cm);
5419     } else {
5420       stats = rp->process_discovered_references(&_is_alive_closure,
5421                                         &cmsKeepAliveClosure,
5422                                         &cmsDrainMarkingStackClosure,
5423                                         NULL,
5424                                         _gc_timer_cm);
5425     }
5426     _gc_tracer_cm->report_gc_reference_stats(stats);
5427 
5428   }
5429 
5430   // This is the point where the entire marking should have completed.
5431   verify_work_stacks_empty();
5432 
5433   if (should_unload_classes()) {
5434     {
5435       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
5436 
5437       // Unload classes and purge the SystemDictionary.
5438       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5439 
5440       // Unload nmethods.
5441       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5442 
5443       // Prune dead klasses from subklass/sibling/implementor lists.
5444       Klass::clean_weak_klass_links(&_is_alive_closure);
5445     }
5446 
5447     {
5448       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
5449       // Clean up unreferenced symbols in symbol table.
5450       SymbolTable::unlink();
5451     }
5452 
5453     {
5454       GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
5455       // Delete entries for dead interned strings.
5456       StringTable::unlink(&_is_alive_closure);
5457     }
5458   }
5459 
5460 
5461   // Restore any preserved marks as a result of mark stack or
5462   // work queue overflow
5463   restore_preserved_marks_if_any();  // done single-threaded for now
5464 
5465   rp->set_enqueuing_is_done(true);
5466   if (rp->processing_is_mt()) {
5467     rp->balance_all_queues();
5468     CMSRefProcTaskExecutor task_executor(*this);
5469     rp->enqueue_discovered_references(&task_executor);
5470   } else {
5471     rp->enqueue_discovered_references(NULL);
5472   }
5473   rp->verify_no_references_recorded();
5474   assert(!rp->discovery_enabled(), "should have been disabled");


5501     }
5502   }
5503 }
5504 #endif
5505 
5506 void CMSCollector::sweep() {
5507   assert(_collectorState == Sweeping, "just checking");
5508   check_correct_thread_executing();
5509   verify_work_stacks_empty();
5510   verify_overflow_empty();
5511   increment_sweep_count();
5512   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5513 
5514   _inter_sweep_timer.stop();
5515   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5516 
5517   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5518   _intra_sweep_timer.reset();
5519   _intra_sweep_timer.start();
5520   {
5521     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5522     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5523     // First sweep the old gen
5524     {
5525       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5526                                bitMapLock());
5527       sweepWork(_cmsGen);
5528     }
5529 
5530     // Update Universe::_heap_*_at_gc figures.
5531     // We need all the free list locks to make the abstract state
5532     // transition from Sweeping to Resetting. See detailed note
5533     // further below.
5534     {
5535       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5536       // Update heap occupancy information which is used as
5537       // input to soft ref clearing policy at the next gc.
5538       Universe::update_heap_info_at_gc();
5539       _collectorState = Resizing;
5540     }
5541   }
5542   verify_work_stacks_empty();


5585   GenCollectedHeap* gch = GenCollectedHeap::heap();
5586   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5587   gch->update_full_collections_completed(_collection_count_start);
5588 }
5589 
5590 // FIX ME!!! Looks like this belongs in CFLSpace, with
5591 // CMSGen merely delegating to it.
5592 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5593   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5594   HeapWord*  minAddr        = _cmsSpace->bottom();
5595   HeapWord*  largestAddr    =
5596     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5597   if (largestAddr == NULL) {
5598     // The dictionary appears to be empty.  In this case
5599     // try to coalesce at the end of the heap.
5600     largestAddr = _cmsSpace->end();
5601   }
5602   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5603   size_t nearLargestOffset =
5604     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5605   if (PrintFLSStatistics != 0) {
5606     gclog_or_tty->print_cr(
5607       "CMS: Large Block: " PTR_FORMAT ";"
5608       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5609       p2i(largestAddr),
5610       p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5611   }
5612   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5613 }
5614 
5615 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5616   return addr >= _cmsSpace->nearLargestChunk();
5617 }
5618 
5619 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5620   return _cmsSpace->find_chunk_at_end();
5621 }
5622 
5623 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5624                                                     bool full) {
5625   // If the young generation has been collected, gather any statistics
5626   // that are of interest at this point.
5627   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5628   if (!full && current_is_young) {
5629     // Gather statistics on the young generation collection.
5630     collector()->stats().record_gc0_end(used());
5631   }


5685   } else {                                      // did not unload classes,
5686     _concurrent_cycles_since_last_unload++;     // ... increment count
5687   }
5688 }
5689 
5690 // Reset CMS data structures (for now just the marking bit map)
5691 // preparatory for the next cycle.
5692 void CMSCollector::reset_concurrent() {
5693   CMSTokenSyncWithLocks ts(true, bitMapLock());
5694 
5695   // If the state is not "Resetting", the foreground  thread
5696   // has done a collection and the resetting.
5697   if (_collectorState != Resetting) {
5698     assert(_collectorState == Idling, "The state should only change"
5699       " because the foreground collector has finished the collection");
5700     return;
5701   }
5702 
5703   // Clear the mark bitmap (no grey objects to start with)
5704   // for the next cycle.
5705   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5706   CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
5707 
5708   HeapWord* curAddr = _markBitMap.startWord();
5709   while (curAddr < _markBitMap.endWord()) {
5710     size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5711     MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5712     _markBitMap.clear_large_range(chunk);
5713     if (ConcurrentMarkSweepThread::should_yield() &&
5714         !foregroundGCIsActive() &&
5715         CMSYield) {
5716       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5717              "CMS thread should hold CMS token");
5718       assert_lock_strong(bitMapLock());
5719       bitMapLock()->unlock();
5720       ConcurrentMarkSweepThread::desynchronize(true);
5721       stopTimer();
5722       if (PrintCMSStatistics != 0) {
5723         incrementYields();
5724       }
5725 
5726       // See the comment in coordinator_yield()
5727       for (unsigned i = 0; i < CMSYieldSleepCount &&
5728                        ConcurrentMarkSweepThread::should_yield() &&
5729                        !CMSCollector::foregroundGCIsActive(); ++i) {
5730         os::sleep(Thread::current(), 1, false);
5731       }
5732 
5733       ConcurrentMarkSweepThread::synchronize(true);
5734       bitMapLock()->lock_without_safepoint_check();
5735       startTimer();
5736     }
5737     curAddr = chunk.end();
5738   }
5739   // A successful mostly concurrent collection has been done.
5740   // Because only the full (i.e., concurrent mode failure) collections
5741   // are being measured for gc overhead limits, clean the "near" flag
5742   // and count.
5743   size_policy()->reset_gc_overhead_limit_count();
5744   _collectorState = Idling;
5745 
5746   register_gc_end();
5747 }
5748 
5749 // Same as above but for STW paths
5750 void CMSCollector::reset_stw() {
5751   // already have the lock
5752   assert(_collectorState == Resetting, "just checking");
5753   assert_lock_strong(bitMapLock());
5754   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5755   _markBitMap.clear_all();
5756   _collectorState = Idling;
5757   register_gc_end();
5758 }
5759 
5760 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5761   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5762   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
5763   TraceCollectorStats tcs(counters());
5764 
5765   switch (op) {
5766     case CMS_op_checkpointRootsInitial: {

5767       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5768       checkpointRootsInitial();
5769       if (PrintGC) {
5770         _cmsGen->printOccupancy("initial-mark");
5771       }
5772       break;
5773     }
5774     case CMS_op_checkpointRootsFinal: {

5775       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5776       checkpointRootsFinal();
5777       if (PrintGC) {
5778         _cmsGen->printOccupancy("remark");
5779       }
5780       break;
5781     }
5782     default:
5783       fatal("No such CMS_op");
5784   }
5785 }
5786 
5787 #ifndef PRODUCT
5788 size_t const CMSCollector::skip_header_HeapWords() {
5789   return FreeChunk::header_size();
5790 }
5791 
5792 // Try and collect here conditions that should hold when
5793 // CMS thread is exiting. The idea is that the foreground GC
5794 // thread should not be blocked if it wants to terminate
5795 // the CMS thread and yet continue to run the VM for a while
5796 // after that.
5797 void CMSCollector::verify_ok_to_terminate() const {
5798   assert(Thread::current()->is_ConcurrentGC_thread(),
5799          "should be called by CMS thread");


5972   }
5973   assert(_virtual_space.committed_size() == rs.size(),
5974          "didn't reserve backing store for all of CMS stack?");
5975   _base = (oop*)(_virtual_space.low());
5976   _index = 0;
5977   _capacity = size;
5978   NOT_PRODUCT(_max_depth = 0);
5979   return true;
5980 }
5981 
5982 // XXX FIX ME !!! In the MT case we come in here holding a
5983 // leaf lock. For printing we need to take a further lock
5984 // which has lower rank. We need to recalibrate the two
5985 // lock-ranks involved in order to be able to print the
5986 // messages below. (Or defer the printing to the caller.
5987 // For now we take the expedient path of just disabling the
5988 // messages for the problematic case.)
5989 void CMSMarkStack::expand() {
5990   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5991   if (_capacity == MarkStackSizeMax) {
5992     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
5993       // We print a warning message only once per CMS cycle.
5994       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
5995     }
5996     return;
5997   }
5998   // Double capacity if possible
5999   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6000   // Do not give up existing stack until we have managed to
6001   // get the double capacity that we desired.
6002   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6003                    new_capacity * sizeof(oop)));
6004   if (rs.is_reserved()) {
6005     // Release the backing store associated with old stack
6006     _virtual_space.release();
6007     // Reinitialize virtual space for new stack
6008     if (!_virtual_space.initialize(rs, rs.size())) {
6009       fatal("Not enough swap for expanded marking stack");
6010     }
6011     _base = (oop*)(_virtual_space.low());
6012     _index = 0;
6013     _capacity = new_capacity;
6014   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6015     // Failed to double capacity, continue;
6016     // we print a detail message only once per CMS cycle.
6017     gclog_or_tty->print(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to "
6018             SIZE_FORMAT "K",
6019             _capacity / K, new_capacity / K);
6020   }
6021 }
6022 
6023 
6024 // Closures
6025 // XXX: there seems to be a lot of code  duplication here;
6026 // should refactor and consolidate common code.
6027 
6028 // This closure is used to mark refs into the CMS generation in
6029 // the CMS bit map. Called at the first checkpoint. This closure
6030 // assumes that we do not need to re-mark dirty cards; if the CMS
6031 // generation on which this is used is not an oldest
6032 // generation then this will lose younger_gen cards!
6033 
6034 MarkRefsIntoClosure::MarkRefsIntoClosure(
6035   MemRegion span, CMSBitMap* bitMap):
6036     _span(span),
6037     _bitMap(bitMap)
6038 {


6076 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6077 
6078 // A variant of the above, used for CMS marking verification.
6079 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6080   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6081     _span(span),
6082     _verification_bm(verification_bm),
6083     _cms_bm(cms_bm)
6084 {
6085   assert(ref_processor() == NULL, "deliberately left NULL");
6086   assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6087 }
6088 
6089 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6090   // if p points into _span, then mark corresponding bit in _markBitMap
6091   assert(obj->is_oop(), "expected an oop");
6092   HeapWord* addr = (HeapWord*)obj;
6093   if (_span.contains(addr)) {
6094     _verification_bm->mark(addr);
6095     if (!_cms_bm->isMarked(addr)) {
6096       oop(addr)->print();
6097       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));


6098       fatal("... aborting");
6099     }
6100   }
6101 }
6102 
6103 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6104 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6105 
6106 //////////////////////////////////////////////////
6107 // MarkRefsIntoAndScanClosure
6108 //////////////////////////////////////////////////
6109 
6110 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6111                                                        ReferenceProcessor* rp,
6112                                                        CMSBitMap* bit_map,
6113                                                        CMSBitMap* mod_union_table,
6114                                                        CMSMarkStack*  mark_stack,
6115                                                        CMSCollector* collector,
6116                                                        bool should_yield,
6117                                                        bool concurrent_precleaning):


6173            "overflow list was drained above");
6174 
6175     assert(_collector->no_preserved_marks(),
6176            "All preserved marks should have been restored above");
6177   }
6178 }
6179 
6180 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6181 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6182 
6183 void MarkRefsIntoAndScanClosure::do_yield_work() {
6184   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6185          "CMS thread should hold CMS token");
6186   assert_lock_strong(_freelistLock);
6187   assert_lock_strong(_bit_map->lock());
6188   // relinquish the free_list_lock and bitMaplock()
6189   _bit_map->lock()->unlock();
6190   _freelistLock->unlock();
6191   ConcurrentMarkSweepThread::desynchronize(true);
6192   _collector->stopTimer();
6193   if (PrintCMSStatistics != 0) {
6194     _collector->incrementYields();
6195   }
6196 
6197   // See the comment in coordinator_yield()
6198   for (unsigned i = 0;
6199        i < CMSYieldSleepCount &&
6200        ConcurrentMarkSweepThread::should_yield() &&
6201        !CMSCollector::foregroundGCIsActive();
6202        ++i) {
6203     os::sleep(Thread::current(), 1, false);
6204   }
6205 
6206   ConcurrentMarkSweepThread::synchronize(true);
6207   _freelistLock->lock_without_safepoint_check();
6208   _bit_map->lock()->lock_without_safepoint_check();
6209   _collector->startTimer();
6210 }
6211 
6212 ///////////////////////////////////////////////////////////
6213 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6214 //                                 MarkRefsIntoAndScanClosure
6215 ///////////////////////////////////////////////////////////


6331       // An object not (yet) reached by marking: we merely need to
6332       // compute its size so as to go look at the next block.
6333       assert(p->is_oop(true), "should be an oop");
6334       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6335     }
6336   }
6337   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6338   return size;
6339 }
6340 
6341 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6342   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6343          "CMS thread should hold CMS token");
6344   assert_lock_strong(_freelistLock);
6345   assert_lock_strong(_bitMap->lock());
6346   // relinquish the free_list_lock and bitMaplock()
6347   _bitMap->lock()->unlock();
6348   _freelistLock->unlock();
6349   ConcurrentMarkSweepThread::desynchronize(true);
6350   _collector->stopTimer();
6351   if (PrintCMSStatistics != 0) {
6352     _collector->incrementYields();
6353   }
6354 
6355   // See the comment in coordinator_yield()
6356   for (unsigned i = 0; i < CMSYieldSleepCount &&
6357                    ConcurrentMarkSweepThread::should_yield() &&
6358                    !CMSCollector::foregroundGCIsActive(); ++i) {
6359     os::sleep(Thread::current(), 1, false);
6360   }
6361 
6362   ConcurrentMarkSweepThread::synchronize(true);
6363   _freelistLock->lock_without_safepoint_check();
6364   _bitMap->lock()->lock_without_safepoint_check();
6365   _collector->startTimer();
6366 }
6367 
6368 
6369 //////////////////////////////////////////////////////////////////
6370 // SurvivorSpacePrecleanClosure
6371 //////////////////////////////////////////////////////////////////
6372 // This (single-threaded) closure is used to preclean the oops in
6373 // the survivor spaces.


6400     // the ones in CMS heap (i.e. in _span).
6401     new_oop->oop_iterate(_scanning_closure);
6402     // check if it's time to yield
6403     do_yield_check();
6404   }
6405   unsigned int after_count =
6406     GenCollectedHeap::heap()->total_collections();
6407   bool abort = (_before_count != after_count) ||
6408                _collector->should_abort_preclean();
6409   return abort ? 0 : size;
6410 }
6411 
6412 void SurvivorSpacePrecleanClosure::do_yield_work() {
6413   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6414          "CMS thread should hold CMS token");
6415   assert_lock_strong(_bit_map->lock());
6416   // Relinquish the bit map lock
6417   _bit_map->lock()->unlock();
6418   ConcurrentMarkSweepThread::desynchronize(true);
6419   _collector->stopTimer();
6420   if (PrintCMSStatistics != 0) {
6421     _collector->incrementYields();
6422   }
6423 
6424   // See the comment in coordinator_yield()
6425   for (unsigned i = 0; i < CMSYieldSleepCount &&
6426                        ConcurrentMarkSweepThread::should_yield() &&
6427                        !CMSCollector::foregroundGCIsActive(); ++i) {
6428     os::sleep(Thread::current(), 1, false);
6429   }
6430 
6431   ConcurrentMarkSweepThread::synchronize(true);
6432   _bit_map->lock()->lock_without_safepoint_check();
6433   _collector->startTimer();
6434 }
6435 
6436 // This closure is used to rescan the marked objects on the dirty cards
6437 // in the mod union table and the card table proper. In the parallel
6438 // case, although the bitMap is shared, we do a single read so the
6439 // isMarked() query is "safe".
6440 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6441   // Ignore mark word because we are running concurrent with mutators
6442   assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));


6555       return true;
6556     }
6557   }
6558   scanOopsInOop(addr);
6559   return true;
6560 }
6561 
6562 // We take a break if we've been at this for a while,
6563 // so as to avoid monopolizing the locks involved.
6564 void MarkFromRootsClosure::do_yield_work() {
6565   // First give up the locks, then yield, then re-lock
6566   // We should probably use a constructor/destructor idiom to
6567   // do this unlock/lock or modify the MutexUnlocker class to
6568   // serve our purpose. XXX
6569   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6570          "CMS thread should hold CMS token");
6571   assert_lock_strong(_bitMap->lock());
6572   _bitMap->lock()->unlock();
6573   ConcurrentMarkSweepThread::desynchronize(true);
6574   _collector->stopTimer();
6575   if (PrintCMSStatistics != 0) {
6576     _collector->incrementYields();
6577   }
6578 
6579   // See the comment in coordinator_yield()
6580   for (unsigned i = 0; i < CMSYieldSleepCount &&
6581                        ConcurrentMarkSweepThread::should_yield() &&
6582                        !CMSCollector::foregroundGCIsActive(); ++i) {
6583     os::sleep(Thread::current(), 1, false);
6584   }
6585 
6586   ConcurrentMarkSweepThread::synchronize(true);
6587   _bitMap->lock()->lock_without_safepoint_check();
6588   _collector->startTimer();
6589 }
6590 
6591 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6592   assert(_bitMap->isMarked(ptr), "expected bit to be set");
6593   assert(_markStack->isEmpty(),
6594          "should drain stack to limit stack usage");
6595   // convert ptr to an oop preparatory to scanning
6596   oop obj = oop(ptr);
6597   // Ignore mark word in verification below, since we


6863 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6864 
6865 // Upon stack overflow, we discard (part of) the stack,
6866 // remembering the least address amongst those discarded
6867 // in CMSCollector's _restart_address.
6868 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6869   // Remember the least grey address discarded
6870   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6871   _collector->lower_restart_addr(ra);
6872   _mark_stack->reset();  // discard stack contents
6873   _mark_stack->expand(); // expand the stack if possible
6874 }
6875 
6876 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6877   assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6878   HeapWord* addr = (HeapWord*)obj;
6879   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6880     // Oop lies in _span and isn't yet grey or black
6881     _verification_bm->mark(addr);            // now grey
6882     if (!_cms_bm->isMarked(addr)) {
6883       oop(addr)->print();
6884       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6885                              p2i(addr));

6886       fatal("... aborting");
6887     }
6888 
6889     if (!_mark_stack->push(obj)) { // stack overflow
6890       if (PrintCMSStatistics != 0) {
6891         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6892                                SIZE_FORMAT, _mark_stack->capacity());
6893       }
6894       assert(_mark_stack->isFull(), "Else push should have succeeded");
6895       handle_stack_overflow(addr);
6896     }
6897     // anything including and to the right of _finger
6898     // will be scanned as we iterate over the remainder of the
6899     // bit map
6900   }
6901 }
6902 
6903 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6904                      MemRegion span,
6905                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
6906                      HeapWord* finger, MarkFromRootsClosure* parent) :
6907   MetadataAwareOopClosure(collector->ref_processor()),
6908   _collector(collector),
6909   _span(span),
6910   _bitMap(bitMap),
6911   _markStack(markStack),
6912   _finger(finger),
6913   _parent(parent)


6973 void PushOrMarkClosure::do_oop(oop obj) {
6974   // Ignore mark word because we are running concurrent with mutators.
6975   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6976   HeapWord* addr = (HeapWord*)obj;
6977   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6978     // Oop lies in _span and isn't yet grey or black
6979     _bitMap->mark(addr);            // now grey
6980     if (addr < _finger) {
6981       // the bit map iteration has already either passed, or
6982       // sampled, this bit in the bit map; we'll need to
6983       // use the marking stack to scan this oop's oops.
6984       bool simulate_overflow = false;
6985       NOT_PRODUCT(
6986         if (CMSMarkStackOverflowALot &&
6987             _collector->simulate_overflow()) {
6988           // simulate a stack overflow
6989           simulate_overflow = true;
6990         }
6991       )
6992       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6993         if (PrintCMSStatistics != 0) {
6994           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6995                                  SIZE_FORMAT, _markStack->capacity());
6996         }
6997         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6998         handle_stack_overflow(addr);
6999       }
7000     }
7001     // anything including and to the right of _finger
7002     // will be scanned as we iterate over the remainder of the
7003     // bit map
7004     do_yield_check();
7005   }
7006 }
7007 
7008 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7009 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7010 
7011 void Par_PushOrMarkClosure::do_oop(oop obj) {
7012   // Ignore mark word because we are running concurrent with mutators.
7013   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7014   HeapWord* addr = (HeapWord*)obj;
7015   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7016     // Oop lies in _span and isn't yet grey or black


7025     // -- else push on work queue
7026     if (   !res       // someone else marked it, they will deal with it
7027         || (addr >= *gfa)  // will be scanned in a later task
7028         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7029       return;
7030     }
7031     // the bit map iteration has already either passed, or
7032     // sampled, this bit in the bit map; we'll need to
7033     // use the marking stack to scan this oop's oops.
7034     bool simulate_overflow = false;
7035     NOT_PRODUCT(
7036       if (CMSMarkStackOverflowALot &&
7037           _collector->simulate_overflow()) {
7038         // simulate a stack overflow
7039         simulate_overflow = true;
7040       }
7041     )
7042     if (simulate_overflow ||
7043         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7044       // stack overflow
7045       if (PrintCMSStatistics != 0) {
7046         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7047                                SIZE_FORMAT, _overflow_stack->capacity());
7048       }
7049       // We cannot assert that the overflow stack is full because
7050       // it may have been emptied since.
7051       assert(simulate_overflow ||
7052              _work_queue->size() == _work_queue->max_elems(),
7053             "Else push should have succeeded");
7054       handle_stack_overflow(addr);
7055     }
7056     do_yield_check();
7057   }
7058 }
7059 
7060 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7061 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7062 
7063 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7064                                        MemRegion span,
7065                                        ReferenceProcessor* rp,
7066                                        CMSBitMap* bit_map,
7067                                        CMSBitMap* mod_union_table,
7068                                        CMSMarkStack*  mark_stack,


7190         _collector->par_push_on_overflow_list(obj);
7191         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7192       }
7193     } // Else, some other thread got there first
7194   }
7195 }
7196 
7197 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7198 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7199 
7200 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7201   Mutex* bml = _collector->bitMapLock();
7202   assert_lock_strong(bml);
7203   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7204          "CMS thread should hold CMS token");
7205 
7206   bml->unlock();
7207   ConcurrentMarkSweepThread::desynchronize(true);
7208 
7209   _collector->stopTimer();
7210   if (PrintCMSStatistics != 0) {
7211     _collector->incrementYields();
7212   }
7213 
7214   // See the comment in coordinator_yield()
7215   for (unsigned i = 0; i < CMSYieldSleepCount &&
7216                        ConcurrentMarkSweepThread::should_yield() &&
7217                        !CMSCollector::foregroundGCIsActive(); ++i) {
7218     os::sleep(Thread::current(), 1, false);
7219   }
7220 
7221   ConcurrentMarkSweepThread::synchronize(true);
7222   bml->lock();
7223 
7224   _collector->startTimer();
7225 }
7226 
7227 bool CMSPrecleanRefsYieldClosure::should_return() {
7228   if (ConcurrentMarkSweepThread::should_yield()) {
7229     do_yield_work();
7230   }
7231   return _collector->foregroundGCIsActive();
7232 }
7233 
7234 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7235   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7236          "mr should be aligned to start at a card boundary");
7237   // We'd like to assert:
7238   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7239   //        "mr should be a range of cards");
7240   // However, that would be too strong in one case -- the last
7241   // partition ends at _unallocated_block which, in general, can be
7242   // an arbitrary boundary, not necessarily card aligned.
7243   if (PrintCMSStatistics != 0) {
7244     _num_dirty_cards +=
7245          mr.word_size()/CardTableModRefBS::card_size_in_words;
7246   }
7247   _space->object_iterate_mem(mr, &_scan_cl);
7248 }
7249 
7250 SweepClosure::SweepClosure(CMSCollector* collector,
7251                            ConcurrentMarkSweepGeneration* g,
7252                            CMSBitMap* bitMap, bool should_yield) :
7253   _collector(collector),
7254   _g(g),
7255   _sp(g->cmsSpace()),
7256   _limit(_sp->sweep_limit()),
7257   _freelistLock(_sp->freelistLock()),
7258   _bitMap(bitMap),
7259   _yield(should_yield),
7260   _inFreeRange(false),           // No free range at beginning of sweep
7261   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7262   _lastFreeRangeCoalesced(false),
7263   _freeFinger(g->used_region().start())
7264 {
7265   NOT_PRODUCT(
7266     _numObjectsFreed = 0;
7267     _numWordsFreed   = 0;
7268     _numObjectsLive = 0;
7269     _numWordsLive = 0;
7270     _numObjectsAlreadyFree = 0;
7271     _numWordsAlreadyFree = 0;
7272     _last_fc = NULL;
7273 
7274     _sp->initializeIndexedFreeListArrayReturnedBytes();
7275     _sp->dictionary()->initialize_dict_returned_bytes();
7276   )
7277   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7278          "sweep _limit out of bounds");
7279   if (CMSTraceSweeper) {
7280     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7281                         p2i(_limit));
7282   }
7283 }
7284 
7285 void SweepClosure::print_on(outputStream* st) const {
7286   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7287                 p2i(_sp->bottom()), p2i(_sp->end()));
7288   tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7289   tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7290   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7291   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7292                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7293 }
7294 
7295 #ifndef PRODUCT
7296 // Assertion checking only:  no useful work in product mode --
7297 // however, if any of the flags below become product flags,
7298 // you may need to review this code to see if it needs to be
7299 // enabled in product mode.
7300 SweepClosure::~SweepClosure() {
7301   assert_lock_strong(_freelistLock);
7302   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7303          "sweep _limit out of bounds");
7304   if (inFreeRange()) {
7305     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7306     print();
7307     ShouldNotReachHere();
7308   }
7309   if (Verbose && PrintGC) {
7310     gclog_or_tty->print("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",

7311                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7312     gclog_or_tty->print_cr("\nLive " SIZE_FORMAT " objects,  "
7313                            SIZE_FORMAT " bytes  "
7314       "Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7315       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7316       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7317     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7318                         * sizeof(HeapWord);
7319     gclog_or_tty->print_cr("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7320 
7321     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7322       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7323       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7324       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7325       gclog_or_tty->print("Returned " SIZE_FORMAT " bytes", returned_bytes);
7326       gclog_or_tty->print("   Indexed List Returned " SIZE_FORMAT " bytes",
7327         indexListReturnedBytes);
7328       gclog_or_tty->print_cr("        Dictionary Returned " SIZE_FORMAT " bytes",
7329         dict_returned_bytes);
7330     }
7331   }
7332   if (CMSTraceSweeper) {
7333     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7334                            p2i(_limit));
7335   }


7336 }
7337 #endif  // PRODUCT
7338 
7339 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7340     bool freeRangeInFreeLists) {
7341   if (CMSTraceSweeper) {
7342     gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7343                p2i(freeFinger), freeRangeInFreeLists);
7344   }
7345   assert(!inFreeRange(), "Trampling existing free range");
7346   set_inFreeRange(true);
7347   set_lastFreeRangeCoalesced(false);
7348 
7349   set_freeFinger(freeFinger);
7350   set_freeRangeInFreeLists(freeRangeInFreeLists);
7351 }
7352 
7353 // Note that the sweeper runs concurrently with mutators. Thus,
7354 // it is possible for direct allocation in this generation to happen
7355 // in the middle of the sweep. Note that the sweeper also coalesces
7356 // contiguous free blocks. Thus, unless the sweeper and the allocator
7357 // synchronize appropriately freshly allocated blocks may get swept up.
7358 // This is accomplished by the sweeper locking the free lists while
7359 // it is sweeping. Thus blocks that are determined to be free are
7360 // indeed free. There is however one additional complication:
7361 // blocks that have been allocated since the final checkpoint and
7362 // mark, will not have been marked and so would be treated as
7363 // unreachable and swept up. To prevent this, the allocator marks
7364 // the bit map when allocating during the sweep phase. This leads,


7382   size_t res;
7383 
7384   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7385   // than "addr == _limit" because although _limit was a block boundary when
7386   // we started the sweep, it may no longer be one because heap expansion
7387   // may have caused us to coalesce the block ending at the address _limit
7388   // with a newly expanded chunk (this happens when _limit was set to the
7389   // previous _end of the space), so we may have stepped past _limit:
7390   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7391   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7392     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7393            "sweep _limit out of bounds");
7394     assert(addr < _sp->end(), "addr out of bounds");
7395     // Flush any free range we might be holding as a single
7396     // coalesced chunk to the appropriate free list.
7397     if (inFreeRange()) {
7398       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7399              "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7400       flush_cur_free_chunk(freeFinger(),
7401                            pointer_delta(addr, freeFinger()));
7402       if (CMSTraceSweeper) {
7403         gclog_or_tty->print("Sweep: last chunk: ");
7404         gclog_or_tty->print("put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") "
7405                    "[coalesced:%d]\n",
7406                    p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7407                    lastFreeRangeCoalesced() ? 1 : 0);
7408       }
7409     }
7410 
7411     // help the iterator loop finish
7412     return pointer_delta(_sp->end(), addr);
7413   }
7414 
7415   assert(addr < _limit, "sweep invariant");
7416   // check if we should yield
7417   do_yield_check(addr);
7418   if (fc->is_free()) {
7419     // Chunk that is already free
7420     res = fc->size();
7421     do_already_free_chunk(fc);
7422     debug_only(_sp->verifyFreeLists());
7423     // If we flush the chunk at hand in lookahead_and_flush()
7424     // and it's coalesced with a preceding chunk, then the
7425     // process of "mangling" the payload of the coalesced block
7426     // will cause erasure of the size information from the
7427     // (erstwhile) header of all the coalesced blocks but the
7428     // first, so the first disjunct in the assert will not hold
7429     // in that specific case (in which case the second disjunct


7591            "Should be an initialized object");
7592     // Ignore mark word because we are running concurrent with mutators
7593     assert(oop(addr)->is_oop(true), "live block should be an oop");
7594     // Verify that the bit map has no bits marked between
7595     // addr and purported end of this block.
7596     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7597     assert(size >= 3, "Necessary for Printezis marks to work");
7598     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7599     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7600   }
7601   return size;
7602 }
7603 
7604 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7605                                                  size_t chunkSize) {
7606   // do_post_free_or_garbage_chunk() should only be called in the case
7607   // of the adaptive free list allocator.
7608   const bool fcInFreeLists = fc->is_free();
7609   assert((HeapWord*)fc <= _limit, "sweep invariant");
7610 
7611   if (CMSTraceSweeper) {
7612     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7613   }
7614 
7615   HeapWord* const fc_addr = (HeapWord*) fc;
7616 
7617   bool coalesce = false;
7618   const size_t left  = pointer_delta(fc_addr, freeFinger());
7619   const size_t right = chunkSize;
7620   switch (FLSCoalescePolicy) {
7621     // numeric value forms a coalition aggressiveness metric
7622     case 0:  { // never coalesce
7623       coalesce = false;
7624       break;
7625     }
7626     case 1: { // coalesce if left & right chunks on overpopulated lists
7627       coalesce = _sp->coalOverPopulated(left) &&
7628                  _sp->coalOverPopulated(right);
7629       break;
7630     }
7631     case 2: { // coalesce if left chunk on overpopulated list (default)
7632       coalesce = _sp->coalOverPopulated(left);
7633       break;


7690 // we'll look at because its end crosses past _limit, we'll preemptively
7691 // flush it along with any free range we may be holding on to. Note that
7692 // this can be the case only for an already free or freshly garbage
7693 // chunk. If this block is an object, it can never straddle
7694 // over _limit. The "straddling" occurs when _limit is set at
7695 // the previous end of the space when this cycle started, and
7696 // a subsequent heap expansion caused the previously co-terminal
7697 // free block to be coalesced with the newly expanded portion,
7698 // thus rendering _limit a non-block-boundary making it dangerous
7699 // for the sweeper to step over and examine.
7700 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7701   assert(inFreeRange(), "Should only be called if currently in a free range.");
7702   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7703   assert(_sp->used_region().contains(eob - 1),
7704          "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7705          " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7706          " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7707          p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7708   if (eob >= _limit) {
7709     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7710     if (CMSTraceSweeper) {
7711       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7712                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7713                              "[" PTR_FORMAT "," PTR_FORMAT ")",
7714                              p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7715     }
7716     // Return the storage we are tracking back into the free lists.
7717     if (CMSTraceSweeper) {
7718       gclog_or_tty->print_cr("Flushing ... ");
7719     }
7720     assert(freeFinger() < eob, "Error");
7721     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7722   }
7723 }
7724 
7725 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7726   assert(inFreeRange(), "Should only be called if currently in a free range.");
7727   assert(size > 0,
7728     "A zero sized chunk cannot be added to the free lists.");
7729   if (!freeRangeInFreeLists()) {
7730     if (CMSTraceSweeper) {
7731       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7732                     p2i(chunk), size);
7733     }
7734     // A new free range is going to be starting.  The current
7735     // free range has not been added to the free lists yet or
7736     // was removed so add it back.
7737     // If the current free range was coalesced, then the death
7738     // of the free range was recorded.  Record a birth now.
7739     if (lastFreeRangeCoalesced()) {
7740       _sp->coalBirth(size);
7741     }
7742     _sp->addChunkAndRepairOffsetTable(chunk, size,
7743             lastFreeRangeCoalesced());
7744   } else if (CMSTraceSweeper) {
7745     gclog_or_tty->print_cr("Already in free list: nothing to flush");
7746   }
7747   set_inFreeRange(false);
7748   set_freeRangeInFreeLists(false);
7749 }
7750 
7751 // We take a break if we've been at this for a while,
7752 // so as to avoid monopolizing the locks involved.
7753 void SweepClosure::do_yield_work(HeapWord* addr) {
7754   // Return current free chunk being used for coalescing (if any)
7755   // to the appropriate freelist.  After yielding, the next
7756   // free block encountered will start a coalescing range of
7757   // free blocks.  If the next free block is adjacent to the
7758   // chunk just flushed, they will need to wait for the next
7759   // sweep to be coalesced.
7760   if (inFreeRange()) {
7761     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7762   }
7763 
7764   // First give up the locks, then yield, then re-lock.
7765   // We should probably use a constructor/destructor idiom to
7766   // do this unlock/lock or modify the MutexUnlocker class to
7767   // serve our purpose. XXX
7768   assert_lock_strong(_bitMap->lock());
7769   assert_lock_strong(_freelistLock);
7770   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7771          "CMS thread should hold CMS token");
7772   _bitMap->lock()->unlock();
7773   _freelistLock->unlock();
7774   ConcurrentMarkSweepThread::desynchronize(true);
7775   _collector->stopTimer();
7776   if (PrintCMSStatistics != 0) {
7777     _collector->incrementYields();
7778   }
7779 
7780   // See the comment in coordinator_yield()
7781   for (unsigned i = 0; i < CMSYieldSleepCount &&
7782                        ConcurrentMarkSweepThread::should_yield() &&
7783                        !CMSCollector::foregroundGCIsActive(); ++i) {
7784     os::sleep(Thread::current(), 1, false);
7785   }
7786 
7787   ConcurrentMarkSweepThread::synchronize(true);
7788   _freelistLock->lock();
7789   _bitMap->lock()->lock_without_safepoint_check();
7790   _collector->startTimer();
7791 }
7792 
7793 #ifndef PRODUCT
7794 // This is actually very useful in a product build if it can
7795 // be called from the debugger.  Compile it into the product
7796 // as needed.
7797 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7798   return debug_cms_space->verify_chunk_in_free_list(fc);
7799 }
7800 #endif
7801 
7802 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7803   if (CMSTraceSweeper) {
7804     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7805                            p2i(fc), fc->size());
7806   }
7807 }
7808 
7809 // CMSIsAliveClosure
7810 bool CMSIsAliveClosure::do_object_b(oop obj) {
7811   HeapWord* addr = (HeapWord*)obj;
7812   return addr != NULL &&
7813          (!_span.contains(addr) || _bit_map->isMarked(addr));
7814 }
7815 
7816 
7817 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7818                       MemRegion span,
7819                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7820                       bool cpc):
7821   _collector(collector),
7822   _span(span),
7823   _bit_map(bit_map),
7824   _mark_stack(mark_stack),
7825   _concurrent_precleaning(cpc) {
7826   assert(!_span.is_empty(), "Empty span could spell trouble");




  36 #include "gc/cms/vmCMSOperations.hpp"
  37 #include "gc/serial/genMarkSweep.hpp"
  38 #include "gc/serial/tenuredGeneration.hpp"
  39 #include "gc/shared/adaptiveSizePolicy.hpp"
  40 #include "gc/shared/cardGeneration.inline.hpp"
  41 #include "gc/shared/cardTableRS.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/collectorPolicy.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTimer.hpp"
  48 #include "gc/shared/gcTrace.hpp"
  49 #include "gc/shared/gcTraceTime.hpp"
  50 #include "gc/shared/genCollectedHeap.hpp"
  51 #include "gc/shared/genOopClosures.inline.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/referencePolicy.hpp"
  54 #include "gc/shared/strongRootsScope.hpp"
  55 #include "gc/shared/taskqueue.inline.hpp"
  56 #include "logging/log.hpp"
  57 #include "memory/allocation.hpp"
  58 #include "memory/iterator.inline.hpp"
  59 #include "memory/padded.hpp"
  60 #include "memory/resourceArea.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "prims/jvmtiExport.hpp"
  63 #include "runtime/atomic.inline.hpp"
  64 #include "runtime/globals_extension.hpp"
  65 #include "runtime/handles.inline.hpp"
  66 #include "runtime/java.hpp"
  67 #include "runtime/orderAccess.inline.hpp"
  68 #include "runtime/vmThread.hpp"
  69 #include "services/memoryService.hpp"
  70 #include "services/runtimeService.hpp"
  71 #include "utilities/stack.inline.hpp"
  72 
  73 // statics
  74 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  75 bool CMSCollector::_full_gc_requested = false;
  76 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;


 350 // young generation collection.
 351 double CMSStats::time_until_cms_gen_full() const {
 352   size_t cms_free = _cms_gen->cmsSpace()->free();
 353   GenCollectedHeap* gch = GenCollectedHeap::heap();
 354   size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
 355                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 356   if (cms_free > expected_promotion) {
 357     // Start a cms collection if there isn't enough space to promote
 358     // for the next young collection.  Use the padded average as
 359     // a safety factor.
 360     cms_free -= expected_promotion;
 361 
 362     // Adjust by the safety factor.
 363     double cms_free_dbl = (double)cms_free;
 364     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
 365     // Apply a further correction factor which tries to adjust
 366     // for recent occurance of concurrent mode failures.
 367     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 368     cms_free_dbl = cms_free_dbl * cms_adjustment;
 369 
 370     log_develop(gc, stats)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,


 371                            cms_free, expected_promotion);
 372     log_develop(gc, stats)("  cms_free_dbl %f cms_consumption_rate %f",
 373                            cms_free_dbl, cms_consumption_rate() + 1.0);

 374     // Add 1 in case the consumption rate goes to zero.
 375     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 376   }
 377   return 0.0;
 378 }
 379 
 380 // Compare the duration of the cms collection to the
 381 // time remaining before the cms generation is empty.
 382 // Note that the time from the start of the cms collection
 383 // to the start of the cms sweep (less than the total
 384 // duration of the cms collection) can be used.  This
 385 // has been tried and some applications experienced
 386 // promotion failures early in execution.  This was
 387 // possibly because the averages were not accurate
 388 // enough at the beginning.
 389 double CMSStats::time_until_cms_start() const {
 390   // We add "gc0_period" to the "work" calculation
 391   // below because this query is done (mostly) at the
 392   // end of a scavenge, so we need to conservatively
 393   // account for that much possible delay
 394   // in the query so as to avoid concurrent mode failures
 395   // due to starting the collection just a wee bit too
 396   // late.
 397   double work = cms_duration() + gc0_period();
 398   double deadline = time_until_cms_gen_full();
 399   // If a concurrent mode failure occurred recently, we want to be
 400   // more conservative and halve our expected time_until_cms_gen_full()
 401   if (work > deadline) {
 402     log_develop(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
 403                     cms_duration(), gc0_period(), time_until_cms_gen_full());




 404     return 0.0;
 405   }
 406   return work - deadline;
 407 }
 408 
 409 #ifndef PRODUCT
 410 void CMSStats::print_on(outputStream *st) const {
 411   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 412   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 413                gc0_duration(), gc0_period(), gc0_promoted());
 414   st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 415             cms_duration(), cms_period(), cms_allocated());
 416   st->print(",cms_since_beg=%g,cms_since_end=%g",
 417             cms_time_since_begin(), cms_time_since_end());
 418   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 419             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 420 
 421   if (valid()) {
 422     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 423               promotion_rate(), cms_allocation_rate());


 645 //
 646 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 647   if (UsePerfData) {
 648     _space_counters->update_used(used);
 649     _space_counters->update_capacity();
 650     _gen_counters->update_all();
 651   }
 652 }
 653 
 654 void ConcurrentMarkSweepGeneration::print() const {
 655   Generation::print();
 656   cmsSpace()->print();
 657 }
 658 
 659 #ifndef PRODUCT
 660 void ConcurrentMarkSweepGeneration::print_statistics() {
 661   cmsSpace()->printFLCensus(0);
 662 }
 663 #endif
 664 

























 665 size_t
 666 ConcurrentMarkSweepGeneration::contiguous_available() const {
 667   // dld proposes an improvement in precision here. If the committed
 668   // part of the space ends in a free block we should add that to
 669   // uncommitted size in the calculation below. Will make this
 670   // change later, staying with the approximation below for the
 671   // time being. -- ysr.
 672   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 673 }
 674 
 675 size_t
 676 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 677   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 678 }
 679 
 680 size_t ConcurrentMarkSweepGeneration::max_available() const {
 681   return free() + _virtual_space.uncommitted_size();
 682 }
 683 
 684 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 685   size_t available = max_available();
 686   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 687   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 688   log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 689                            res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);





 690   return res;
 691 }
 692 
 693 // At a promotion failure dump information on block layout in heap
 694 // (cms old generation).
 695 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 696   LogHandle(gc, promotion) log;
 697   if (log.is_trace()) {
 698     ResourceMark rm;
 699     cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream());
 700   }
 701 }
 702 
 703 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 704   // Clear the promotion information.  These pointers can be adjusted
 705   // along with all the other pointers into the heap but
 706   // compaction is expected to be a rare event with
 707   // a heap using cms so don't do it without seeing the need.
 708   for (uint i = 0; i < ParallelGCThreads; i++) {
 709     _par_gc_thread_states[i]->promo.reset();
 710   }
 711 }
 712 
 713 void ConcurrentMarkSweepGeneration::compute_new_size() {
 714   assert_locked_or_safepoint(Heap_lock);
 715 
 716   // If incremental collection failed, we just want to expand
 717   // to the limit.
 718   if (incremental_collection_failed()) {
 719     clear_incremental_collection_failed();


 735 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 736   assert_locked_or_safepoint(Heap_lock);
 737 
 738   // If incremental collection failed, we just want to expand
 739   // to the limit.
 740   if (incremental_collection_failed()) {
 741     clear_incremental_collection_failed();
 742     grow_to_reserved();
 743     return;
 744   }
 745 
 746   double free_percentage = ((double) free()) / capacity();
 747   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 748   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 749 
 750   // compute expansion delta needed for reaching desired free percentage
 751   if (free_percentage < desired_free_percentage) {
 752     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 753     assert(desired_capacity >= capacity(), "invalid expansion size");
 754     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 755     LogHandle(gc) log;
 756     if (log.is_trace()) {
 757       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 758       log.trace("From compute_new_size: ");
 759       log.trace("  Free fraction %f", free_percentage);
 760       log.trace("  Desired free fraction %f", desired_free_percentage);
 761       log.trace("  Maximum free fraction %f", maximum_free_percentage);
 762       log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
 763       log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
 764       GenCollectedHeap* gch = GenCollectedHeap::heap();
 765       assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
 766       size_t young_size = gch->young_gen()->capacity();
 767       log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
 768       log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
 769       log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
 770       log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
 771     }
 772     // safe if expansion fails
 773     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 774     log.trace("  Expanded free fraction %f", ((double) free()) / capacity());


 775   } else {
 776     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 777     assert(desired_capacity <= capacity(), "invalid expansion size");
 778     size_t shrink_bytes = capacity() - desired_capacity;
 779     // Don't shrink unless the delta is greater than the minimum shrink we want
 780     if (shrink_bytes >= MinHeapDeltaBytes) {
 781       shrink_free_list_by(shrink_bytes);
 782     }
 783   }
 784 }
 785 
 786 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 787   return cmsSpace()->freelistLock();
 788 }
 789 
 790 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
 791   CMSSynchronousYieldRequest yr;
 792   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
 793   return have_lock_and_allocate(size, tlab);
 794 }


1092 ConcurrentMarkSweepGeneration::
1093 par_oop_since_save_marks_iterate_done(int thread_num) {
1094   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1095   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1096   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1097 }
1098 
1099 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1100                                                    size_t size,
1101                                                    bool   tlab)
1102 {
1103   // We allow a STW collection only if a full
1104   // collection was requested.
1105   return full || should_allocate(size, tlab); // FIX ME !!!
1106   // This and promotion failure handling are connected at the
1107   // hip and should be fixed by untying them.
1108 }
1109 
1110 bool CMSCollector::shouldConcurrentCollect() {
1111   if (_full_gc_requested) {
1112     log_trace(gc, conc)("CMSCollector: collect because of explicit  gc request (or gc_locker)");



1113     return true;
1114   }
1115 
1116   FreelistLocker x(this);
1117   // ------------------------------------------------------------------
1118   // Print out lots of information which affects the initiation of
1119   // a collection.
1120   LogHandle(gc, conc, stats) log;
1121   if (log.is_debug() && stats().valid()) {
1122     log.debug("CMSCollector shouldConcurrentCollect: ");
1123     ResourceMark rm;
1124     stats().print_on(log.debug_stream());
1125     log.debug("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
1126     log.debug("free=" SIZE_FORMAT, _cmsGen->free());
1127     log.debug("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
1128     log.debug("promotion_rate=%g", stats().promotion_rate());
1129     log.debug("cms_allocation_rate=%g", stats().cms_allocation_rate());
1130     log.debug("occupancy=%3.7f", _cmsGen->occupancy());
1131     log.debug("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1132     log.debug("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1133     log.debug("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1134     log.debug("metadata initialized %d", MetaspaceGC::should_concurrent_collect());



1135   }
1136   // ------------------------------------------------------------------
1137 
1138   // If the estimated time to complete a cms collection (cms_duration())
1139   // is less than the estimated time remaining until the cms generation
1140   // is full, start a collection.
1141   if (!UseCMSInitiatingOccupancyOnly) {
1142     if (stats().valid()) {
1143       if (stats().time_until_cms_start() == 0.0) {
1144         return true;
1145       }
1146     } else {
1147       // We want to conservatively collect somewhat early in order
1148       // to try and "bootstrap" our CMS/promotion statistics;
1149       // this branch will not fire after the first successful CMS
1150       // collection because the stats should then be valid.
1151       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1152         log_debug(gc)(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
1153                       _cmsGen->occupancy(), _bootstrap_occupancy);




1154         return true;
1155       }
1156     }
1157   }
1158 
1159   // Otherwise, we start a collection cycle if
1160   // old gen want a collection cycle started. Each may use
1161   // an appropriate criterion for making this decision.
1162   // XXX We need to make sure that the gen expansion
1163   // criterion dovetails well with this. XXX NEED TO FIX THIS
1164   if (_cmsGen->should_concurrent_collect()) {
1165     log_trace(gc)("CMS old gen initiated");


1166     return true;
1167   }
1168 
1169   // We start a collection if we believe an incremental collection may fail;
1170   // this is not likely to be productive in practice because it's probably too
1171   // late anyway.
1172   GenCollectedHeap* gch = GenCollectedHeap::heap();
1173   assert(gch->collector_policy()->is_generation_policy(),
1174          "You may want to check the correctness of the following");
1175   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1176     log_trace(gc)("CMSCollector: collect because incremental collection will fail ");


1177     return true;
1178   }
1179 
1180   if (MetaspaceGC::should_concurrent_collect()) {
1181     log_trace(gc)("CMSCollector: collect for metadata allocation ");


1182     return true;
1183   }
1184 
1185   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1186   if (CMSTriggerInterval >= 0) {
1187     if (CMSTriggerInterval == 0) {
1188       // Trigger always
1189       return true;
1190     }
1191 
1192     // Check the CMS time since begin (we do not check the stats validity
1193     // as we want to be able to trigger the first CMS cycle as well)
1194     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {

1195       if (stats().valid()) {
1196         log_trace(gc)("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1197                       stats().cms_time_since_begin());
1198       } else {
1199         log_trace(gc)("CMSCollector: collect because of trigger interval (first collection)");

1200       }
1201       return true;
1202     }
1203   }
1204 
1205   return false;
1206 }
1207 
1208 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1209 
1210 // Clear _expansion_cause fields of constituent generations
1211 void CMSCollector::clear_expansion_cause() {
1212   _cmsGen->clear_expansion_cause();
1213 }
1214 
1215 // We should be conservative in starting a collection cycle.  To
1216 // start too eagerly runs the risk of collecting too often in the
1217 // extreme.  To collect too rarely falls back on full collections,
1218 // which works, even if not optimum in terms of concurrent work.
1219 // As a work around for too eagerly collecting, use the flag


1222 // collections.
1223 // We want to start a new collection cycle if any of the following
1224 // conditions hold:
1225 // . our current occupancy exceeds the configured initiating occupancy
1226 //   for this generation, or
1227 // . we recently needed to expand this space and have not, since that
1228 //   expansion, done a collection of this generation, or
1229 // . the underlying space believes that it may be a good idea to initiate
1230 //   a concurrent collection (this may be based on criteria such as the
1231 //   following: the space uses linear allocation and linear allocation is
1232 //   going to fail, or there is believed to be excessive fragmentation in
1233 //   the generation, etc... or ...
1234 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1235 //   the case of the old generation; see CR 6543076):
1236 //   we may be approaching a point at which allocation requests may fail because
1237 //   we will be out of sufficient free space given allocation rate estimates.]
1238 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1239 
1240   assert_lock_strong(freelistLock());
1241   if (occupancy() > initiating_occupancy()) {
1242     log_trace(gc)(" %s: collect because of occupancy %f / %f  ",

1243                   short_name(), occupancy(), initiating_occupancy());

1244     return true;
1245   }
1246   if (UseCMSInitiatingOccupancyOnly) {
1247     return false;
1248   }
1249   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1250     log_trace(gc)(" %s: collect because expanded for allocation ", short_name());



1251     return true;
1252   }
1253   return false;
1254 }
1255 
1256 void ConcurrentMarkSweepGeneration::collect(bool   full,
1257                                             bool   clear_all_soft_refs,
1258                                             size_t size,
1259                                             bool   tlab)
1260 {
1261   collector()->collect(full, clear_all_soft_refs, size, tlab);
1262 }
1263 
1264 void CMSCollector::collect(bool   full,
1265                            bool   clear_all_soft_refs,
1266                            size_t size,
1267                            bool   tlab)
1268 {
1269   // The following "if" branch is present for defensive reasons.
1270   // In the current uses of this interface, it can be replaced with:


1287   GenCollectedHeap* gch = GenCollectedHeap::heap();
1288   unsigned int gc_count = gch->total_full_collections();
1289   if (gc_count == full_gc_count) {
1290     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1291     _full_gc_requested = true;
1292     _full_gc_cause = cause;
1293     CGC_lock->notify();   // nudge CMS thread
1294   } else {
1295     assert(gc_count > full_gc_count, "Error: causal loop");
1296   }
1297 }
1298 
1299 bool CMSCollector::is_external_interruption() {
1300   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1301   return GCCause::is_user_requested_gc(cause) ||
1302          GCCause::is_serviceability_requested_gc(cause);
1303 }
1304 
1305 void CMSCollector::report_concurrent_mode_interruption() {
1306   if (is_external_interruption()) {
1307     log_debug(gc)("Concurrent mode interrupted");


1308   } else {
1309     log_debug(gc)("Concurrent mode failure");


1310     _gc_tracer_cm->report_concurrent_mode_failure();
1311   }
1312 }
1313 
1314 
1315 // The foreground and background collectors need to coordinate in order
1316 // to make sure that they do not mutually interfere with CMS collections.
1317 // When a background collection is active,
1318 // the foreground collector may need to take over (preempt) and
1319 // synchronously complete an ongoing collection. Depending on the
1320 // frequency of the background collections and the heap usage
1321 // of the application, this preemption can be seldom or frequent.
1322 // There are only certain
1323 // points in the background collection that the "collection-baton"
1324 // can be passed to the foreground collector.
1325 //
1326 // The foreground collector will wait for the baton before
1327 // starting any part of the collection.  The foreground collector
1328 // will only wait at one location.
1329 //


1423       CGC_lock->notify();
1424       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1425              "Possible deadlock");
1426       while (_foregroundGCShouldWait) {
1427         // wait for notification
1428         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1429         // Possibility of delay/starvation here, since CMS token does
1430         // not know to give priority to VM thread? Actually, i think
1431         // there wouldn't be any delay/starvation, but the proof of
1432         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1433       }
1434       ConcurrentMarkSweepThread::set_CMS_flag(
1435         ConcurrentMarkSweepThread::CMS_vm_has_token);
1436     }
1437   }
1438   // The CMS_token is already held.  Get back the other locks.
1439   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1440          "VM thread should have CMS token");
1441   getFreelistLocks();
1442   bitMapLock()->lock_without_safepoint_check();
1443   log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
1444                        p2i(Thread::current()), first_state);
1445   log_debug(gc, state)("    gets control with state %d", _collectorState);


1446 
1447   // Inform cms gen if this was due to partial collection failing.
1448   // The CMS gen may use this fact to determine its expansion policy.
1449   GenCollectedHeap* gch = GenCollectedHeap::heap();
1450   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1451     assert(!_cmsGen->incremental_collection_failed(),
1452            "Should have been noticed, reacted to and cleared");
1453     _cmsGen->set_incremental_collection_failed();
1454   }
1455 
1456   if (first_state > Idling) {
1457     report_concurrent_mode_interruption();
1458   }
1459 
1460   set_did_compact(true);
1461 
1462   // If the collection is being acquired from the background
1463   // collector, there may be references on the discovered
1464   // references lists.  Abandon those references, since some
1465   // of them may have become unreachable after concurrent


1499 // after obtaining the free list locks for the
1500 // two generations.
1501 void CMSCollector::compute_new_size() {
1502   assert_locked_or_safepoint(Heap_lock);
1503   FreelistLocker z(this);
1504   MetaspaceGC::compute_new_size();
1505   _cmsGen->compute_new_size_free_list();
1506 }
1507 
1508 // A work method used by the foreground collector to do
1509 // a mark-sweep-compact.
1510 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1511   GenCollectedHeap* gch = GenCollectedHeap::heap();
1512 
1513   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1514   gc_timer->register_gc_start();
1515 
1516   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1517   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1518 
1519   GCTraceTime(Trace, gc) t("CMS:MSC");
1520 
1521   // Temporarily widen the span of the weak reference processing to
1522   // the entire heap.
1523   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1524   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1525   // Temporarily, clear the "is_alive_non_header" field of the
1526   // reference processor.
1527   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1528   // Temporarily make reference _processing_ single threaded (non-MT).
1529   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1530   // Temporarily make refs discovery atomic
1531   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1532   // Temporarily make reference _discovery_ single threaded (non-MT)
1533   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1534 
1535   ref_processor()->set_enqueuing_is_done(false);
1536   ref_processor()->enable_discovery();
1537   ref_processor()->setup_policy(clear_all_soft_refs);
1538   // If an asynchronous collection finishes, the _modUnionTable is
1539   // all clear.  If we are assuming the collection from an asynchronous


1584   // Clear any data recorded in the PLAB chunk arrays.
1585   if (_survivor_plab_array != NULL) {
1586     reset_survivor_plab_arrays();
1587   }
1588 
1589   // Adjust the per-size allocation stats for the next epoch.
1590   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1591   // Restart the "inter sweep timer" for the next epoch.
1592   _inter_sweep_timer.reset();
1593   _inter_sweep_timer.start();
1594 
1595   gc_timer->register_gc_end();
1596 
1597   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1598 
1599   // For a mark-sweep-compact, compute_new_size() will be called
1600   // in the heap's do_collection() method.
1601 }
1602 
1603 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1604   LogHandle(gc, heap) log;
1605   if (!log.is_trace()) {
1606     return;
1607   }
1608 
1609   ContiguousSpace* eden_space = _young_gen->eden();
1610   ContiguousSpace* from_space = _young_gen->from();
1611   ContiguousSpace* to_space   = _young_gen->to();
1612   // Eden
1613   if (_eden_chunk_array != NULL) {
1614     log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1615               p2i(eden_space->bottom()), p2i(eden_space->top()),
1616               p2i(eden_space->end()), eden_space->capacity());
1617     log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,

1618               _eden_chunk_index, _eden_chunk_capacity);
1619     for (size_t i = 0; i < _eden_chunk_index; i++) {
1620       log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));

1621     }
1622   }
1623   // Survivor
1624   if (_survivor_chunk_array != NULL) {
1625     log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1626               p2i(from_space->bottom()), p2i(from_space->top()),
1627               p2i(from_space->end()), from_space->capacity());
1628     log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,

1629               _survivor_chunk_index, _survivor_chunk_capacity);
1630     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1631       log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));

1632     }
1633   }
1634 }
1635 
1636 void CMSCollector::getFreelistLocks() const {
1637   // Get locks for all free lists in all generations that this
1638   // collector is responsible for
1639   _cmsGen->freelistLock()->lock_without_safepoint_check();
1640 }
1641 
1642 void CMSCollector::releaseFreelistLocks() const {
1643   // Release locks for all free lists in all generations that this
1644   // collector is responsible for
1645   _cmsGen->freelistLock()->unlock();
1646 }
1647 
1648 bool CMSCollector::haveFreelistLocks() const {
1649   // Check locks for all free lists in all generations that this
1650   // collector is responsible for
1651   assert_lock_strong(_cmsGen->freelistLock());


1715     // Signal that we are about to start a collection
1716     gch->increment_total_full_collections();  // ... starting a collection cycle
1717     _collection_count_start = gch->total_full_collections();
1718   }
1719 
1720   // Used for PrintGC
1721   size_t prev_used = 0;
1722   if (PrintGC && Verbose) {
1723     prev_used = _cmsGen->used();
1724   }
1725 
1726   // The change of the collection state is normally done at this level;
1727   // the exceptions are phases that are executed while the world is
1728   // stopped.  For those phases the change of state is done while the
1729   // world is stopped.  For baton passing purposes this allows the
1730   // background collector to finish the phase and change state atomically.
1731   // The foreground collector cannot wait on a phase that is done
1732   // while the world is stopped because the foreground collector already
1733   // has the world stopped and would deadlock.
1734   while (_collectorState != Idling) {
1735     log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",

1736                          p2i(Thread::current()), _collectorState);

1737     // The foreground collector
1738     //   holds the Heap_lock throughout its collection.
1739     //   holds the CMS token (but not the lock)
1740     //     except while it is waiting for the background collector to yield.
1741     //
1742     // The foreground collector should be blocked (not for long)
1743     //   if the background collector is about to start a phase
1744     //   executed with world stopped.  If the background
1745     //   collector has already started such a phase, the
1746     //   foreground collector is blocked waiting for the
1747     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1748     //   are executed in the VM thread.
1749     //
1750     // The locking order is
1751     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1752     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1753     //   CMS token  (claimed in
1754     //                stop_world_and_do() -->
1755     //                  safepoint_synchronize() -->
1756     //                    CMSThread::synchronize())
1757 
1758     {
1759       // Check if the FG collector wants us to yield.
1760       CMSTokenSync x(true); // is cms thread
1761       if (waitForForegroundGC()) {
1762         // We yielded to a foreground GC, nothing more to be
1763         // done this round.
1764         assert(_foregroundGCShouldWait == false, "We set it to false in "
1765                "waitForForegroundGC()");
1766         log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",


1767                              p2i(Thread::current()), _collectorState);

1768         return;
1769       } else {
1770         // The background collector can run but check to see if the
1771         // foreground collector has done a collection while the
1772         // background collector was waiting to get the CGC_lock
1773         // above.  If yes, break so that _foregroundGCShouldWait
1774         // is cleared before returning.
1775         if (_collectorState == Idling) {
1776           break;
1777         }
1778       }
1779     }
1780 
1781     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1782       "should be waiting");
1783 
1784     switch (_collectorState) {
1785       case InitialMarking:
1786         {
1787           ReleaseForegroundGC x(this);


1851         break;
1852       }
1853       case Resetting:
1854         // CMS heap resizing has been completed
1855         reset_concurrent();
1856         assert(_collectorState == Idling, "Collector state should "
1857           "have changed");
1858 
1859         MetaspaceGC::set_should_concurrent_collect(false);
1860 
1861         stats().record_cms_end();
1862         // Don't move the concurrent_phases_end() and compute_new_size()
1863         // calls to here because a preempted background collection
1864         // has it's state set to "Resetting".
1865         break;
1866       case Idling:
1867       default:
1868         ShouldNotReachHere();
1869         break;
1870     }
1871     log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",

1872                          p2i(Thread::current()), _collectorState);

1873     assert(_foregroundGCShouldWait, "block post-condition");
1874   }
1875 
1876   // Should this be in gc_epilogue?
1877   collector_policy()->counters()->update_counters();
1878 
1879   {
1880     // Clear _foregroundGCShouldWait and, in the event that the
1881     // foreground collector is waiting, notify it, before
1882     // returning.
1883     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1884     _foregroundGCShouldWait = false;
1885     if (_foregroundGCIsActive) {
1886       CGC_lock->notify();
1887     }
1888     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1889            "Possible deadlock");
1890   }
1891   log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",


1892                        p2i(Thread::current()), _collectorState);
1893   log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1894                      prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);


1895 }
1896 
1897 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1898   _cms_start_registered = true;
1899   _gc_timer_cm->register_gc_start();
1900   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1901 }
1902 
1903 void CMSCollector::register_gc_end() {
1904   if (_cms_start_registered) {
1905     report_heap_summary(GCWhen::AfterGC);
1906 
1907     _gc_timer_cm->register_gc_end();
1908     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1909     _cms_start_registered = false;
1910   }
1911 }
1912 
1913 void CMSCollector::save_heap_summary() {
1914   GenCollectedHeap* gch = GenCollectedHeap::heap();


1926   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1927          "CMS thread should have CMS token");
1928   // Block the foreground collector until the
1929   // background collectors decides whether to
1930   // yield.
1931   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1932   _foregroundGCShouldWait = true;
1933   if (_foregroundGCIsActive) {
1934     // The background collector yields to the
1935     // foreground collector and returns a value
1936     // indicating that it has yielded.  The foreground
1937     // collector can proceed.
1938     res = true;
1939     _foregroundGCShouldWait = false;
1940     ConcurrentMarkSweepThread::clear_CMS_flag(
1941       ConcurrentMarkSweepThread::CMS_cms_has_token);
1942     ConcurrentMarkSweepThread::set_CMS_flag(
1943       ConcurrentMarkSweepThread::CMS_cms_wants_token);
1944     // Get a possibly blocked foreground thread going
1945     CGC_lock->notify();
1946     log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",

1947                          p2i(Thread::current()), _collectorState);

1948     while (_foregroundGCIsActive) {
1949       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1950     }
1951     ConcurrentMarkSweepThread::set_CMS_flag(
1952       ConcurrentMarkSweepThread::CMS_cms_has_token);
1953     ConcurrentMarkSweepThread::clear_CMS_flag(
1954       ConcurrentMarkSweepThread::CMS_cms_wants_token);
1955   }
1956   log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",

1957                        p2i(Thread::current()), _collectorState);

1958   return res;
1959 }
1960 
1961 // Because of the need to lock the free lists and other structures in
1962 // the collector, common to all the generations that the collector is
1963 // collecting, we need the gc_prologues of individual CMS generations
1964 // delegate to their collector. It may have been simpler had the
1965 // current infrastructure allowed one to call a prologue on a
1966 // collector. In the absence of that we have the generation's
1967 // prologue delegate to the collector, which delegates back
1968 // some "local" work to a worker method in the individual generations
1969 // that it's responsible for collecting, while itself doing any
1970 // work common to all generations it's responsible for. A similar
1971 // comment applies to the  gc_epilogue()'s.
1972 // The role of the variable _between_prologue_and_epilogue is to
1973 // enforce the invocation protocol.
1974 void CMSCollector::gc_prologue(bool full) {
1975   // Call gc_prologue_work() for the CMSGen
1976   // we are responsible for.
1977 


2034   // collecting.
2035   collector()->gc_prologue(full);
2036 }
2037 
2038 // This is a "private" interface for use by this generation's CMSCollector.
2039 // Not to be called directly by any other entity (for instance,
2040 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2041 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2042   bool registerClosure, ModUnionClosure* modUnionClosure) {
2043   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2044   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2045     "Should be NULL");
2046   if (registerClosure) {
2047     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2048   }
2049   cmsSpace()->gc_prologue();
2050   // Clear stat counters
2051   NOT_PRODUCT(
2052     assert(_numObjectsPromoted == 0, "check");
2053     assert(_numWordsPromoted   == 0, "check");
2054     log_develop(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",


2055                                  _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));

2056     _numObjectsAllocated = 0;
2057     _numWordsAllocated   = 0;
2058   )
2059 }
2060 
2061 void CMSCollector::gc_epilogue(bool full) {
2062   // The following locking discipline assumes that we are only called
2063   // when the world is stopped.
2064   assert(SafepointSynchronize::is_at_safepoint(),
2065          "world is stopped assumption");
2066 
2067   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2068   // if linear allocation blocks need to be appropriately marked to allow the
2069   // the blocks to be parsable. We also check here whether we need to nudge the
2070   // CMS collector thread to start a new cycle (if it's not already active).
2071   assert(   Thread::current()->is_VM_thread()
2072          || (   CMSScavengeBeforeRemark
2073              && Thread::current()->is_ConcurrentGC_thread()),
2074          "Incorrect thread type for epilogue execution");
2075 


2112   _between_prologue_and_epilogue = false;  // ready for next cycle
2113 }
2114 
2115 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2116   collector()->gc_epilogue(full);
2117 
2118   // Also reset promotion tracking in par gc thread states.
2119   for (uint i = 0; i < ParallelGCThreads; i++) {
2120     _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2121   }
2122 }
2123 
2124 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2125   assert(!incremental_collection_failed(), "Should have been cleared");
2126   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2127   cmsSpace()->gc_epilogue();
2128     // Print stat counters
2129   NOT_PRODUCT(
2130     assert(_numObjectsAllocated == 0, "check");
2131     assert(_numWordsAllocated == 0, "check");
2132     log_develop(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",


2133                                      _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));

2134     _numObjectsPromoted = 0;
2135     _numWordsPromoted   = 0;
2136   )
2137 

2138   // Call down the chain in contiguous_available needs the freelistLock
2139   // so print this out before releasing the freeListLock.
2140   log_develop(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());


2141 }
2142 
2143 #ifndef PRODUCT
2144 bool CMSCollector::have_cms_token() {
2145   Thread* thr = Thread::current();
2146   if (thr->is_VM_thread()) {
2147     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2148   } else if (thr->is_ConcurrentGC_thread()) {
2149     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2150   } else if (thr->is_GC_task_thread()) {
2151     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2152            ParGCRareEvent_lock->owned_by_self();
2153   }
2154   return false;
2155 }
2156 #endif
2157 


































2158 void
2159 CMSCollector::print_on_error(outputStream* st) {
2160   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2161   if (collector != NULL) {
2162     CMSBitMap* bitmap = &collector->_markBitMap;
2163     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2164     bitmap->print_on_error(st, " Bits: ");
2165 
2166     st->cr();
2167 
2168     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2169     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2170     mut_bitmap->print_on_error(st, " Bits: ");
2171   }
2172 }
2173 
2174 ////////////////////////////////////////////////////////
2175 // CMS Verification Support
2176 ////////////////////////////////////////////////////////
2177 // Following the remark phase, the following invariant
2178 // should hold -- each object in the CMS heap which is
2179 // marked in markBitMap() should be marked in the verification_mark_bm().
2180 
2181 class VerifyMarkedClosure: public BitMapClosure {
2182   CMSBitMap* _marks;
2183   bool       _failed;
2184 
2185  public:
2186   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2187 
2188   bool do_bit(size_t offset) {
2189     HeapWord* addr = _marks->offsetToHeapWord(offset);
2190     if (!_marks->isMarked(addr)) {
2191       LogHandle(gc, verify) log;
2192       ResourceMark rm;
2193       oop(addr)->print_on(log.info_stream());
2194       log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2195       _failed = true;
2196     }
2197     return true;
2198   }
2199 
2200   bool failed() { return _failed; }
2201 };
2202 
2203 bool CMSCollector::verify_after_remark() {
2204   GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking.");
2205   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2206   static bool init = false;
2207 
2208   assert(SafepointSynchronize::is_at_safepoint(),
2209          "Else mutations in object graph will make answer suspect");
2210   assert(have_cms_token(),
2211          "Else there may be mutual interference in use of "
2212          " verification data structures");
2213   assert(_collectorState > Marking && _collectorState <= Sweeping,
2214          "Else marking info checked here may be obsolete");
2215   assert(haveFreelistLocks(), "must hold free list locks");
2216   assert_lock_strong(bitMapLock());
2217 
2218 
2219   // Allocate marking bit map if not already allocated
2220   if (!init) { // first time
2221     if (!verification_mark_bm()->allocate(_span)) {
2222       return false;
2223     }
2224     init = true;


2247   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2248   // Update the saved marks which may affect the root scans.
2249   gch->save_marks();
2250 
2251   if (CMSRemarkVerifyVariant == 1) {
2252     // In this first variant of verification, we complete
2253     // all marking, then check if the new marks-vector is
2254     // a subset of the CMS marks-vector.
2255     verify_after_remark_work_1();
2256   } else if (CMSRemarkVerifyVariant == 2) {
2257     // In this second variant of verification, we flag an error
2258     // (i.e. an object reachable in the new marks-vector not reachable
2259     // in the CMS marks-vector) immediately, also indicating the
2260     // identify of an object (A) that references the unmarked object (B) --
2261     // presumably, a mutation to A failed to be picked up by preclean/remark?
2262     verify_after_remark_work_2();
2263   } else {
2264     warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2265             CMSRemarkVerifyVariant);
2266   }

2267   return true;
2268 }
2269 
2270 void CMSCollector::verify_after_remark_work_1() {
2271   ResourceMark rm;
2272   HandleMark  hm;
2273   GenCollectedHeap* gch = GenCollectedHeap::heap();
2274 
2275   // Get a clear set of claim bits for the roots processing to work with.
2276   ClassLoaderDataGraph::clear_claimed_marks();
2277 
2278   // Mark from roots one level into CMS
2279   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2280   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2281 
2282   {
2283     StrongRootsScope srs(1);
2284 
2285     gch->gen_process_roots(&srs,
2286                            GenCollectedHeap::OldGen,


2298     false /* don't yield */, true /* verifying */);
2299   assert(_restart_addr == NULL, "Expected pre-condition");
2300   verification_mark_bm()->iterate(&markFromRootsClosure);
2301   while (_restart_addr != NULL) {
2302     // Deal with stack overflow: by restarting at the indicated
2303     // address.
2304     HeapWord* ra = _restart_addr;
2305     markFromRootsClosure.reset(ra);
2306     _restart_addr = NULL;
2307     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2308   }
2309   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2310   verify_work_stacks_empty();
2311 
2312   // Marking completed -- now verify that each bit marked in
2313   // verification_mark_bm() is also marked in markBitMap(); flag all
2314   // errors by printing corresponding objects.
2315   VerifyMarkedClosure vcl(markBitMap());
2316   verification_mark_bm()->iterate(&vcl);
2317   if (vcl.failed()) {
2318     LogHandle(gc, verify) log;
2319     log.info("Verification failed");
2320     ResourceMark rm;
2321     gch->print_on(log.info_stream());
2322     fatal("CMS: failed marking verification after remark");
2323   }
2324 }
2325 
2326 class VerifyKlassOopsKlassClosure : public KlassClosure {
2327   class VerifyKlassOopsClosure : public OopClosure {
2328     CMSBitMap* _bitmap;
2329    public:
2330     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2331     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2332     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2333   } _oop_closure;
2334  public:
2335   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2336   void do_klass(Klass* k) {
2337     k->oops_do(&_oop_closure);
2338   }
2339 };
2340 
2341 void CMSCollector::verify_after_remark_work_2() {


2594   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2595   if (GCExpandToAllocateDelayMillis > 0) {
2596     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2597   }
2598   return have_lock_and_allocate(word_size, tlab);
2599 }
2600 
2601 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2602     size_t bytes,
2603     size_t expand_bytes,
2604     CMSExpansionCause::Cause cause)
2605 {
2606 
2607   bool success = expand(bytes, expand_bytes);
2608 
2609   // remember why we expanded; this information is used
2610   // by shouldConcurrentCollect() when making decisions on whether to start
2611   // a new CMS cycle.
2612   if (success) {
2613     set_expansion_cause(cause);
2614     log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));



2615   }
2616 }
2617 
2618 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2619   HeapWord* res = NULL;
2620   MutexLocker x(ParGCRareEvent_lock);
2621   while (true) {
2622     // Expansion by some other thread might make alloc OK now:
2623     res = ps->lab.alloc(word_sz);
2624     if (res != NULL) return res;
2625     // If there's not enough expansion space available, give up.
2626     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2627       return NULL;
2628     }
2629     // Otherwise, we try expansion.
2630     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2631     // Now go around the loop and try alloc again;
2632     // A competing par_promote might beat us to the expansion space,
2633     // so we may go around the loop again if promotion fails again.
2634     if (GCExpandToAllocateDelayMillis > 0) {


2681   assert_lock_strong(freelistLock());
2682   if (PrintGCDetails && Verbose) {
2683     warning("Shrinking of CMS not yet implemented");
2684   }
2685   return;
2686 }
2687 
2688 
2689 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2690 // phases.
2691 class CMSPhaseAccounting: public StackObj {
2692  public:
2693   CMSPhaseAccounting(CMSCollector *collector,
2694                      const char *phase,
2695                      bool print_cr = true);
2696   ~CMSPhaseAccounting();
2697 
2698  private:
2699   CMSCollector *_collector;
2700   const char *_phase;
2701   jlong _start_counter;
2702   bool _print_cr;
2703 
2704  public:
2705   // Not MT-safe; so do not pass around these StackObj's
2706   // where they may be accessed by other threads.
2707   jlong wallclock_millis() {
2708     return TimeHelper::counter_to_millis(os::elapsed_counter() - _start_counter);




2709   }
2710 };
2711 
2712 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2713                                        const char *phase,
2714                                        bool print_cr) :
2715   _collector(collector), _phase(phase), _print_cr(print_cr) {
2716 

2717   _collector->resetYields();






2718   _collector->resetTimer();
2719   _start_counter = os::elapsed_counter();
2720   _collector->startTimer();
2721   log_info(gc)("Concurrent %s (%.3f)", _phase, TimeHelper::counter_to_seconds(_start_counter));
2722 }
2723 
2724 CMSPhaseAccounting::~CMSPhaseAccounting() {

2725   _collector->stopTimer();
2726   jlong end_counter = os::elapsed_counter();
2727   log_debug(gc)("Concurrent active time: %.3fms", _collector->timerValue());
2728   log_info(gc)("Concurrent %s (%.3fs, %.3fs) %.3fms",
2729                _phase,
2730                TimeHelper::counter_to_seconds(_start_counter),
2731                TimeHelper::counter_to_seconds(end_counter),
2732                TimeHelper::counter_to_millis(end_counter - _start_counter));
2733   log_debug(gc, conc, stats)(" (CMS-concurrent-%s yielded %d times)", _phase, _collector->yields());






2734 }
2735 
2736 // CMS work
2737 
2738 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2739 class CMSParMarkTask : public AbstractGangTask {
2740  protected:
2741   CMSCollector*     _collector;
2742   uint              _n_workers;
2743   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2744       AbstractGangTask(name),
2745       _collector(collector),
2746       _n_workers(n_workers) {}
2747   // Work method in support of parallel rescan ... of young gen spaces
2748   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2749                              ContiguousSpace* space,
2750                              HeapWord** chunk_array, size_t chunk_top);
2751   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2752 };
2753 


2780                     Mutex::_no_safepoint_check_flag);
2781     checkpointRootsInitialWork();
2782     // enable ("weak") refs discovery
2783     rp->enable_discovery();
2784     _collectorState = Marking;
2785   }
2786 }
2787 
2788 void CMSCollector::checkpointRootsInitialWork() {
2789   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2790   assert(_collectorState == InitialMarking, "just checking");
2791 
2792   // Already have locks.
2793   assert_lock_strong(bitMapLock());
2794   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2795 
2796   // Setup the verification and class unloading state for this
2797   // CMS collection cycle.
2798   setup_cms_unloading_and_verification_state();
2799 
2800   GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm);

2801 
2802   // Reset all the PLAB chunk arrays if necessary.
2803   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2804     reset_survivor_plab_arrays();
2805   }
2806 
2807   ResourceMark rm;
2808   HandleMark  hm;
2809 
2810   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2811   GenCollectedHeap* gch = GenCollectedHeap::heap();
2812 
2813   verify_work_stacks_empty();
2814   verify_overflow_empty();
2815 
2816   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2817   // Update the saved marks which may affect the root scans.
2818   gch->save_marks();
2819 
2820   // weak reference processing has not started yet.
2821   ref_processor()->set_enqueuing_is_done(false);
2822 
2823   // Need to remember all newly created CLDs,
2824   // so that we can guarantee that the remark finds them.
2825   ClassLoaderDataGraph::remember_new_clds(true);
2826 
2827   // Whenever a CLD is found, it will be claimed before proceeding to mark
2828   // the klasses. The claimed marks need to be cleared before marking starts.
2829   ClassLoaderDataGraph::clear_claimed_marks();
2830 

2831   print_eden_and_survivor_chunk_arrays();

2832 
2833   {
2834 #if defined(COMPILER2) || INCLUDE_JVMCI
2835     DerivedPointerTableDeactivate dpt_deact;
2836 #endif
2837     if (CMSParallelInitialMarkEnabled) {
2838       // The parallel version.
2839       WorkGang* workers = gch->workers();
2840       assert(workers != NULL, "Need parallel worker threads.");
2841       uint n_workers = workers->active_workers();
2842 
2843       StrongRootsScope srs(n_workers);
2844 
2845       CMSParInitialMarkTask tsk(this, &srs, n_workers);
2846       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2847       if (n_workers > 1) {
2848         workers->run_task(&tsk);
2849       } else {
2850         tsk.work(0);
2851       }


2882   save_sweep_limits();
2883   verify_overflow_empty();
2884 }
2885 
2886 bool CMSCollector::markFromRoots() {
2887   // we might be tempted to assert that:
2888   // assert(!SafepointSynchronize::is_at_safepoint(),
2889   //        "inconsistent argument?");
2890   // However that wouldn't be right, because it's possible that
2891   // a safepoint is indeed in progress as a young generation
2892   // stop-the-world GC happens even as we mark in this generation.
2893   assert(_collectorState == Marking, "inconsistent state?");
2894   check_correct_thread_executing();
2895   verify_overflow_empty();
2896 
2897   // Weak ref discovery note: We may be discovering weak
2898   // refs in this generation concurrent (but interleaved) with
2899   // weak ref discovery by the young generation collector.
2900 
2901   CMSTokenSyncWithLocks ts(true, bitMapLock());
2902   GCTraceCPUTime tcpu;
2903   CMSPhaseAccounting pa(this, "Mark", !PrintGCDetails);
2904   bool res = markFromRootsWork();
2905   if (res) {
2906     _collectorState = Precleaning;
2907   } else { // We failed and a foreground collection wants to take over
2908     assert(_foregroundGCIsActive, "internal state inconsistency");
2909     assert(_restart_addr == NULL,  "foreground will restart from scratch");
2910     log_debug(gc, conc)("bailing out to foreground collection");


2911   }
2912   verify_overflow_empty();
2913   return res;
2914 }
2915 
2916 bool CMSCollector::markFromRootsWork() {
2917   // iterate over marked bits in bit map, doing a full scan and mark
2918   // from these roots using the following algorithm:
2919   // . if oop is to the right of the current scan pointer,
2920   //   mark corresponding bit (we'll process it later)
2921   // . else (oop is to left of current scan pointer)
2922   //   push oop on marking stack
2923   // . drain the marking stack
2924 
2925   // Note that when we do a marking step we need to hold the
2926   // bit map lock -- recall that direct allocation (by mutators)
2927   // and promotion (by the young generation collector) is also
2928   // marking the bit map. [the so-called allocate live policy.]
2929   // Because the implementation of bit map marking is not
2930   // robust wrt simultaneous marking of bits in the same word,


3095 //    and local work queue empty,
3096 //    then in a loop do:
3097 //    . check global overflow stack; steal a batch of oops and trace
3098 //    . try to steal from other threads oif GOS is empty
3099 //    . if neither is available, offer termination
3100 // -- Terminate and return result
3101 //
3102 void CMSConcMarkingTask::work(uint worker_id) {
3103   elapsedTimer _timer;
3104   ResourceMark rm;
3105   HandleMark hm;
3106 
3107   DEBUG_ONLY(_collector->verify_overflow_empty();)
3108 
3109   // Before we begin work, our work queue should be empty
3110   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3111   // Scan the bitmap covering _cms_space, tracing through grey objects.
3112   _timer.start();
3113   do_scan_and_mark(worker_id, _cms_space);
3114   _timer.stop();
3115   log_debug(gc, conc, stats)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());




3116 
3117   // ... do work stealing
3118   _timer.reset();
3119   _timer.start();
3120   do_work_steal(worker_id);
3121   _timer.stop();
3122   log_debug(gc, conc, stats)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());




3123   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3124   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3125   // Note that under the current task protocol, the
3126   // following assertion is true even of the spaces
3127   // expanded since the completion of the concurrent
3128   // marking. XXX This will likely change under a strict
3129   // ABORT semantics.
3130   // After perm removal the comparison was changed to
3131   // greater than or equal to from strictly greater than.
3132   // Before perm removal the highest address sweep would
3133   // have been at the end of perm gen but now is at the
3134   // end of the tenured gen.
3135   assert(_global_finger >=  _cms_space->end(),
3136          "All tasks have been completed");
3137   DEBUG_ONLY(_collector->verify_overflow_empty();)
3138 }
3139 
3140 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3141   HeapWord* read = _global_finger;
3142   HeapWord* cur  = read;


3317   // Check if oop points into the CMS generation
3318   // and is not marked
3319   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3320     // a white object ...
3321     // If we manage to "claim" the object, by being the
3322     // first thread to mark it, then we push it on our
3323     // marking stack
3324     if (_bit_map->par_mark(addr)) {     // ... now grey
3325       // push on work queue (grey set)
3326       bool simulate_overflow = false;
3327       NOT_PRODUCT(
3328         if (CMSMarkStackOverflowALot &&
3329             _collector->simulate_overflow()) {
3330           // simulate a stack overflow
3331           simulate_overflow = true;
3332         }
3333       )
3334       if (simulate_overflow ||
3335           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3336         // stack overflow
3337         log_debug(gc, conc, stats)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());



3338         // We cannot assert that the overflow stack is full because
3339         // it may have been emptied since.
3340         assert(simulate_overflow ||
3341                _work_queue->size() == _work_queue->max_elems(),
3342               "Else push should have succeeded");
3343         handle_stack_overflow(addr);
3344       }
3345     } // Else, some other thread got there first
3346     do_yield_check();
3347   }
3348 }
3349 
3350 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
3351 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3352 
3353 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3354   while (_work_queue->size() > max) {
3355     oop new_oop;
3356     if (_work_queue->pop_local(new_oop)) {
3357       assert(new_oop->is_oop(), "Should be an oop");


3402       assert(work_q->size() == 0, "Impossible!");
3403       break;
3404     } else if (yielding() || should_yield()) {
3405       yield();
3406     }
3407   }
3408 }
3409 
3410 // This is run by the CMS (coordinator) thread.
3411 void CMSConcMarkingTask::coordinator_yield() {
3412   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3413          "CMS thread should hold CMS token");
3414   // First give up the locks, then yield, then re-lock
3415   // We should probably use a constructor/destructor idiom to
3416   // do this unlock/lock or modify the MutexUnlocker class to
3417   // serve our purpose. XXX
3418   assert_lock_strong(_bit_map_lock);
3419   _bit_map_lock->unlock();
3420   ConcurrentMarkSweepThread::desynchronize(true);
3421   _collector->stopTimer();

3422   _collector->incrementYields();

3423 
3424   // It is possible for whichever thread initiated the yield request
3425   // not to get a chance to wake up and take the bitmap lock between
3426   // this thread releasing it and reacquiring it. So, while the
3427   // should_yield() flag is on, let's sleep for a bit to give the
3428   // other thread a chance to wake up. The limit imposed on the number
3429   // of iterations is defensive, to avoid any unforseen circumstances
3430   // putting us into an infinite loop. Since it's always been this
3431   // (coordinator_yield()) method that was observed to cause the
3432   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3433   // which is by default non-zero. For the other seven methods that
3434   // also perform the yield operation, as are using a different
3435   // parameter (CMSYieldSleepCount) which is by default zero. This way we
3436   // can enable the sleeping for those methods too, if necessary.
3437   // See 6442774.
3438   //
3439   // We really need to reconsider the synchronization between the GC
3440   // thread and the yield-requesting threads in the future and we
3441   // should really use wait/notify, which is the recommended
3442   // way of doing this type of interaction. Additionally, we should


3564 void CMSCollector::preclean() {
3565   check_correct_thread_executing();
3566   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3567   verify_work_stacks_empty();
3568   verify_overflow_empty();
3569   _abort_preclean = false;
3570   if (CMSPrecleaningEnabled) {
3571     if (!CMSEdenChunksRecordAlways) {
3572       _eden_chunk_index = 0;
3573     }
3574     size_t used = get_eden_used();
3575     size_t capacity = get_eden_capacity();
3576     // Don't start sampling unless we will get sufficiently
3577     // many samples.
3578     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3579                 * CMSScheduleRemarkEdenPenetration)) {
3580       _start_sampling = true;
3581     } else {
3582       _start_sampling = false;
3583     }
3584     GCTraceCPUTime tcpu;
3585     CMSPhaseAccounting pa(this, "Preclean", !PrintGCDetails);
3586     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3587   }
3588   CMSTokenSync x(true); // is cms thread
3589   if (CMSPrecleaningEnabled) {
3590     sample_eden();
3591     _collectorState = AbortablePreclean;
3592   } else {
3593     _collectorState = FinalMarking;
3594   }
3595   verify_work_stacks_empty();
3596   verify_overflow_empty();
3597 }
3598 
3599 // Try and schedule the remark such that young gen
3600 // occupancy is CMSScheduleRemarkEdenPenetration %.
3601 void CMSCollector::abortable_preclean() {
3602   check_correct_thread_executing();
3603   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3604   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3605 
3606   // If Eden's current occupancy is below this threshold,
3607   // immediately schedule the remark; else preclean
3608   // past the next scavenge in an effort to
3609   // schedule the pause as described above. By choosing
3610   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3611   // we will never do an actual abortable preclean cycle.
3612   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3613     GCTraceCPUTime tcpu;
3614     CMSPhaseAccounting pa(this, "Abortable Preclean", !PrintGCDetails);
3615     // We need more smarts in the abortable preclean
3616     // loop below to deal with cases where allocation
3617     // in young gen is very very slow, and our precleaning
3618     // is running a losing race against a horde of
3619     // mutators intent on flooding us with CMS updates
3620     // (dirty cards).
3621     // One, admittedly dumb, strategy is to give up
3622     // after a certain number of abortable precleaning loops
3623     // or after a certain maximum time. We want to make
3624     // this smarter in the next iteration.
3625     // XXX FIX ME!!! YSR
3626     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3627     while (!(should_abort_preclean() ||
3628              ConcurrentMarkSweepThread::should_terminate())) {
3629       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3630       cumworkdone += workdone;
3631       loops++;
3632       // Voluntarily terminate abortable preclean phase if we have
3633       // been at it for too long.
3634       if ((CMSMaxAbortablePrecleanLoops != 0) &&
3635           loops >= CMSMaxAbortablePrecleanLoops) {
3636         log_debug(gc, conc)(" CMS: abort preclean due to loops ");


3637         break;
3638       }
3639       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3640         log_debug(gc, conc)(" CMS: abort preclean due to time ");


3641         break;
3642       }
3643       // If we are doing little work each iteration, we should
3644       // take a short break.
3645       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3646         // Sleep for some time, waiting for work to accumulate
3647         stopTimer();
3648         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3649         startTimer();
3650         waited++;
3651       }
3652     }
3653     log_debug(gc, conc, stats)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",

3654                                loops, waited, cumworkdone);
3655   }

3656   CMSTokenSync x(true); // is cms thread
3657   if (_collectorState != Idling) {
3658     assert(_collectorState == AbortablePreclean,
3659            "Spontaneous state transition?");
3660     _collectorState = FinalMarking;
3661   } // Else, a foreground collection completed this CMS cycle.
3662   return;
3663 }
3664 
3665 // Respond to an Eden sampling opportunity
3666 void CMSCollector::sample_eden() {
3667   // Make sure a young gc cannot sneak in between our
3668   // reading and recording of a sample.
3669   assert(Thread::current()->is_ConcurrentGC_thread(),
3670          "Only the cms thread may collect Eden samples");
3671   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3672          "Should collect samples while holding CMS token");
3673   if (!_start_sampling) {
3674     return;
3675   }


3778   // processes.
3779   ScanMarkedObjectsAgainCarefullyClosure
3780     smoac_cl(this, _span,
3781       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3782 
3783   // Preclean dirty cards in ModUnionTable and CardTable using
3784   // appropriate convergence criterion;
3785   // repeat CMSPrecleanIter times unless we find that
3786   // we are losing.
3787   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3788   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3789          "Bad convergence multiplier");
3790   assert(CMSPrecleanThreshold >= 100,
3791          "Unreasonably low CMSPrecleanThreshold");
3792 
3793   size_t numIter, cumNumCards, lastNumCards, curNumCards;
3794   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3795        numIter < CMSPrecleanIter;
3796        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3797     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3798     log_trace(gc, conc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);


3799     // Either there are very few dirty cards, so re-mark
3800     // pause will be small anyway, or our pre-cleaning isn't
3801     // that much faster than the rate at which cards are being
3802     // dirtied, so we might as well stop and re-mark since
3803     // precleaning won't improve our re-mark time by much.
3804     if (curNumCards <= CMSPrecleanThreshold ||
3805         (numIter > 0 &&
3806          (curNumCards * CMSPrecleanDenominator >
3807          lastNumCards * CMSPrecleanNumerator))) {
3808       numIter++;
3809       cumNumCards += curNumCards;
3810       break;
3811     }
3812   }
3813 
3814   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3815 
3816   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3817   cumNumCards += curNumCards;
3818   log_trace(gc, conc, stats)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",

3819                              curNumCards, cumNumCards, numIter);

3820   return cumNumCards;   // as a measure of useful work done
3821 }
3822 
3823 // PRECLEANING NOTES:
3824 // Precleaning involves:
3825 // . reading the bits of the modUnionTable and clearing the set bits.
3826 // . For the cards corresponding to the set bits, we scan the
3827 //   objects on those cards. This means we need the free_list_lock
3828 //   so that we can safely iterate over the CMS space when scanning
3829 //   for oops.
3830 // . When we scan the objects, we'll be both reading and setting
3831 //   marks in the marking bit map, so we'll need the marking bit map.
3832 // . For protecting _collector_state transitions, we take the CGC_lock.
3833 //   Note that any races in the reading of of card table entries by the
3834 //   CMS thread on the one hand and the clearing of those entries by the
3835 //   VM thread or the setting of those entries by the mutator threads on the
3836 //   other are quite benign. However, for efficiency it makes sense to keep
3837 //   the VM thread from racing with the CMS thread while the latter is
3838 //   dirty card info to the modUnionTable. We therefore also use the
3839 //   CGC_lock to protect the reading of the card table and the mod union


4053   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4054   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4055   PrecleanKlassClosure preclean_klass_closure(cl);
4056   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4057 
4058   verify_work_stacks_empty();
4059   verify_overflow_empty();
4060 }
4061 
4062 void CMSCollector::checkpointRootsFinal() {
4063   assert(_collectorState == FinalMarking, "incorrect state transition?");
4064   check_correct_thread_executing();
4065   // world is stopped at this checkpoint
4066   assert(SafepointSynchronize::is_at_safepoint(),
4067          "world should be stopped");
4068   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4069 
4070   verify_work_stacks_empty();
4071   verify_overflow_empty();
4072 
4073   log_debug(gc, conc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4074                       _young_gen->used() / K, _young_gen->capacity() / K);



4075   {
4076     if (CMSScavengeBeforeRemark) {
4077       GenCollectedHeap* gch = GenCollectedHeap::heap();
4078       // Temporarily set flag to false, GCH->do_collection will
4079       // expect it to be false and set to true
4080       FlagSetting fl(gch->_is_gc_active, false);
4081 
4082       GCTraceTime(Trace, gc) tm("Scavenge-Before-Remark", _gc_timer_cm);
4083 
4084       gch->do_collection(true,                      // full (i.e. force, see below)
4085                          false,                     // !clear_all_soft_refs
4086                          0,                         // size
4087                          false,                     // is_tlab
4088                          GenCollectedHeap::YoungGen // type
4089         );
4090     }
4091     FreelistLocker x(this);
4092     MutexLockerEx y(bitMapLock(),
4093                     Mutex::_no_safepoint_check_flag);
4094     checkpointRootsFinalWork();
4095   }
4096   verify_work_stacks_empty();
4097   verify_overflow_empty();
4098 }
4099 
4100 void CMSCollector::checkpointRootsFinalWork() {
4101   GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm);
4102 
4103   assert(haveFreelistLocks(), "must have free list locks");
4104   assert_lock_strong(bitMapLock());
4105 
4106   ResourceMark rm;
4107   HandleMark   hm;
4108 
4109   GenCollectedHeap* gch = GenCollectedHeap::heap();
4110 
4111   if (should_unload_classes()) {
4112     CodeCache::gc_prologue();
4113   }
4114   assert(haveFreelistLocks(), "must have free list locks");
4115   assert_lock_strong(bitMapLock());
4116 
4117   // We might assume that we need not fill TLAB's when
4118   // CMSScavengeBeforeRemark is set, because we may have just done
4119   // a scavenge which would have filled all TLAB's -- and besides
4120   // Eden would be empty. This however may not always be the case --
4121   // for instance although we asked for a scavenge, it may not have
4122   // happened because of a JNI critical section. We probably need
4123   // a policy for deciding whether we can in that case wait until
4124   // the critical section releases and then do the remark following
4125   // the scavenge, and skip it here. In the absence of that policy,
4126   // or of an indication of whether the scavenge did indeed occur,
4127   // we cannot rely on TLAB's having been filled and must do
4128   // so here just in case a scavenge did not happen.
4129   gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4130   // Update the saved marks which may affect the root scans.
4131   gch->save_marks();
4132 

4133   print_eden_and_survivor_chunk_arrays();

4134 
4135   {
4136 #if defined(COMPILER2) || INCLUDE_JVMCI
4137     DerivedPointerTableDeactivate dpt_deact;
4138 #endif
4139 
4140     // Note on the role of the mod union table:
4141     // Since the marker in "markFromRoots" marks concurrently with
4142     // mutators, it is possible for some reachable objects not to have been
4143     // scanned. For instance, an only reference to an object A was
4144     // placed in object B after the marker scanned B. Unless B is rescanned,
4145     // A would be collected. Such updates to references in marked objects
4146     // are detected via the mod union table which is the set of all cards
4147     // dirtied since the first checkpoint in this GC cycle and prior to
4148     // the most recent young generation GC, minus those cleaned up by the
4149     // concurrent precleaning.
4150     if (CMSParallelRemarkEnabled) {
4151       GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm);
4152       do_remark_parallel();
4153     } else {
4154       GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm);
4155       do_remark_non_parallel();
4156     }
4157   }
4158   verify_work_stacks_empty();
4159   verify_overflow_empty();
4160 
4161   {
4162     GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm);
4163     refProcessingWork();
4164   }
4165   verify_work_stacks_empty();
4166   verify_overflow_empty();
4167 
4168   if (should_unload_classes()) {
4169     CodeCache::gc_epilogue();
4170   }
4171   JvmtiExport::gc_epilogue();
4172 
4173   // If we encountered any (marking stack / work queue) overflow
4174   // events during the current CMS cycle, take appropriate
4175   // remedial measures, where possible, so as to try and avoid
4176   // recurrence of that condition.
4177   assert(_markStack.isEmpty(), "No grey objects");
4178   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4179                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4180   if (ser_ovflw > 0) {
4181     log_debug(gc, conc, stats)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
4182                                _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);





4183     _markStack.expand();
4184     _ser_pmc_remark_ovflw = 0;
4185     _ser_pmc_preclean_ovflw = 0;
4186     _ser_kac_preclean_ovflw = 0;
4187     _ser_kac_ovflw = 0;
4188   }
4189   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4190      log_debug(gc, conc, stats)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",


4191                                  _par_pmc_remark_ovflw, _par_kac_ovflw);

4192      _par_pmc_remark_ovflw = 0;
4193     _par_kac_ovflw = 0;
4194   }

4195    if (_markStack._hit_limit > 0) {
4196      log_debug(gc, conc, stats)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4197                                 _markStack._hit_limit);
4198    }
4199    if (_markStack._failed_double > 0) {
4200      log_debug(gc, conc, stats)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4201                                 _markStack._failed_double, _markStack.capacity());



4202    }
4203   _markStack._hit_limit = 0;
4204   _markStack._failed_double = 0;
4205 
4206   if ((VerifyAfterGC || VerifyDuringGC) &&
4207       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4208     verify_after_remark();
4209   }
4210 
4211   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4212 
4213   // Change under the freelistLocks.
4214   _collectorState = Sweeping;
4215   // Call isAllClear() under bitMapLock
4216   assert(_modUnionTable.isAllClear(),
4217       "Should be clear by end of the final marking");
4218   assert(_ct->klass_rem_set()->mod_union_is_clear(),
4219       "Should be clear by end of the final marking");
4220 }
4221 
4222 void CMSParInitialMarkTask::work(uint worker_id) {
4223   elapsedTimer _timer;
4224   ResourceMark rm;
4225   HandleMark   hm;
4226 
4227   // ---------- scan from roots --------------
4228   _timer.start();
4229   GenCollectedHeap* gch = GenCollectedHeap::heap();
4230   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4231 
4232   // ---------- young gen roots --------------
4233   {
4234     work_on_young_gen_roots(worker_id, &par_mri_cl);
4235     _timer.stop();
4236     log_debug(gc, conc, stats)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());




4237   }
4238 
4239   // ---------- remaining roots --------------
4240   _timer.reset();
4241   _timer.start();
4242 
4243   CLDToOopClosure cld_closure(&par_mri_cl, true);
4244 
4245   gch->gen_process_roots(_strong_roots_scope,
4246                          GenCollectedHeap::OldGen,
4247                          false,     // yg was scanned above
4248                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4249                          _collector->should_unload_classes(),
4250                          &par_mri_cl,
4251                          NULL,
4252                          &cld_closure);
4253   assert(_collector->should_unload_classes()
4254          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4255          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4256   _timer.stop();
4257   log_debug(gc, conc, stats)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());




4258 }
4259 
4260 // Parallel remark task
4261 class CMSParRemarkTask: public CMSParMarkTask {
4262   CompactibleFreeListSpace* _cms_space;
4263 
4264   // The per-thread work queues, available here for stealing.
4265   OopTaskQueueSet*       _task_queues;
4266   ParallelTaskTerminator _term;
4267   StrongRootsScope*      _strong_roots_scope;
4268 
4269  public:
4270   // A value of 0 passed to n_workers will cause the number of
4271   // workers to be taken from the active workers in the work gang.
4272   CMSParRemarkTask(CMSCollector* collector,
4273                    CompactibleFreeListSpace* cms_space,
4274                    uint n_workers, WorkGang* workers,
4275                    OopTaskQueueSet* task_queues,
4276                    StrongRootsScope* strong_roots_scope):
4277     CMSParMarkTask("Rescan roots and grey objects in parallel",


4350   elapsedTimer _timer;
4351   ResourceMark rm;
4352   HandleMark   hm;
4353 
4354   // ---------- rescan from roots --------------
4355   _timer.start();
4356   GenCollectedHeap* gch = GenCollectedHeap::heap();
4357   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4358     _collector->_span, _collector->ref_processor(),
4359     &(_collector->_markBitMap),
4360     work_queue(worker_id));
4361 
4362   // Rescan young gen roots first since these are likely
4363   // coarsely partitioned and may, on that account, constitute
4364   // the critical path; thus, it's best to start off that
4365   // work first.
4366   // ---------- young gen roots --------------
4367   {
4368     work_on_young_gen_roots(worker_id, &par_mrias_cl);
4369     _timer.stop();
4370     log_debug(gc, conc, stats)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());




4371   }
4372 
4373   // ---------- remaining roots --------------
4374   _timer.reset();
4375   _timer.start();
4376   gch->gen_process_roots(_strong_roots_scope,
4377                          GenCollectedHeap::OldGen,
4378                          false,     // yg was scanned above
4379                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4380                          _collector->should_unload_classes(),
4381                          &par_mrias_cl,
4382                          NULL,
4383                          NULL);     // The dirty klasses will be handled below
4384 
4385   assert(_collector->should_unload_classes()
4386          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4387          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4388   _timer.stop();
4389   log_debug(gc, conc, stats)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());




4390 
4391   // ---------- unhandled CLD scanning ----------
4392   if (worker_id == 0) { // Single threaded at the moment.
4393     _timer.reset();
4394     _timer.start();
4395 
4396     // Scan all new class loader data objects and new dependencies that were
4397     // introduced during concurrent marking.
4398     ResourceMark rm;
4399     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4400     for (int i = 0; i < array->length(); i++) {
4401       par_mrias_cl.do_cld_nv(array->at(i));
4402     }
4403 
4404     // We don't need to keep track of new CLDs anymore.
4405     ClassLoaderDataGraph::remember_new_clds(false);
4406 
4407     _timer.stop();
4408     log_debug(gc, conc, stats)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());




4409   }
4410 
4411   // ---------- dirty klass scanning ----------
4412   if (worker_id == 0) { // Single threaded at the moment.
4413     _timer.reset();
4414     _timer.start();
4415 
4416     // Scan all classes that was dirtied during the concurrent marking phase.
4417     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4418     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4419 
4420     _timer.stop();
4421     log_debug(gc, conc, stats)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());




4422   }
4423 
4424   // We might have added oops to ClassLoaderData::_handles during the
4425   // concurrent marking phase. These oops point to newly allocated objects
4426   // that are guaranteed to be kept alive. Either by the direct allocation
4427   // code, or when the young collector processes the roots. Hence,
4428   // we don't have to revisit the _handles block during the remark phase.
4429 
4430   // ---------- rescan dirty cards ------------
4431   _timer.reset();
4432   _timer.start();
4433 
4434   // Do the rescan tasks for each of the two spaces
4435   // (cms_space) in turn.
4436   // "worker_id" is passed to select the task_queue for "worker_id"
4437   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4438   _timer.stop();
4439   log_debug(gc, conc, stats)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());




4440 
4441   // ---------- steal work from other threads ...
4442   // ---------- ... and drain overflow list.
4443   _timer.reset();
4444   _timer.start();
4445   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4446   _timer.stop();
4447   log_debug(gc, conc, stats)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());




4448 }
4449 
4450 // Note that parameter "i" is not used.
4451 void
4452 CMSParMarkTask::do_young_space_rescan(uint worker_id,
4453   OopsInGenClosure* cl, ContiguousSpace* space,
4454   HeapWord** chunk_array, size_t chunk_top) {
4455   // Until all tasks completed:
4456   // . claim an unclaimed task
4457   // . compute region boundaries corresponding to task claimed
4458   //   using chunk_array
4459   // . par_oop_iterate(cl) over that region
4460 
4461   ResourceMark rm;
4462   HandleMark   hm;
4463 
4464   SequentialSubTasksDone* pst = space->par_seq_tasks();
4465 
4466   uint nth_task = 0;
4467   uint n_tasks  = pst->n_tasks();


4621       // because we just took work from the overflow list,
4622       // but of course we can't since all of that could have
4623       // been already stolen from us.
4624       // "He giveth and He taketh away."
4625       continue;
4626     }
4627     // Verify that we have no work before we resort to stealing
4628     assert(work_q->size() == 0, "Have work, shouldn't steal");
4629     // Try to steal from other queues that have work
4630     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4631       NOT_PRODUCT(num_steals++;)
4632       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4633       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4634       // Do scanning work
4635       obj_to_scan->oop_iterate(cl);
4636       // Loop around, finish this work, and try to steal some more
4637     } else if (terminator()->offer_termination()) {
4638         break;  // nirvana from the infinite cycle
4639     }
4640   }
4641   log_develop(gc, conc, stats)("\t(%d: stole %d oops)", i, num_steals);




4642   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4643          "Else our work is not yet done");
4644 }
4645 
4646 // Record object boundaries in _eden_chunk_array by sampling the eden
4647 // top in the slow-path eden object allocation code path and record
4648 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4649 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4650 // sampling in sample_eden() that activates during the part of the
4651 // preclean phase.
4652 void CMSCollector::sample_eden_chunk() {
4653   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4654     if (_eden_chunk_lock->try_lock()) {
4655       // Record a sample. This is the critical section. The contents
4656       // of the _eden_chunk_array have to be non-decreasing in the
4657       // address order.
4658       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4659       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4660              "Unexpected state of Eden");
4661       if (_eden_chunk_index == 0 ||


4718       if (cur_val < min_val) {
4719         min_tid = j;
4720         min_val = cur_val;
4721       } else {
4722         assert(cur_val < top, "All recorded addresses should be less");
4723       }
4724     }
4725     // At this point min_val and min_tid are respectively
4726     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4727     // and the thread (j) that witnesses that address.
4728     // We record this address in the _survivor_chunk_array[i]
4729     // and increment _cursor[min_tid] prior to the next round i.
4730     if (min_val == top) {
4731       break;
4732     }
4733     _survivor_chunk_array[i] = min_val;
4734     _cursor[min_tid]++;
4735   }
4736   // We are all done; record the size of the _survivor_chunk_array
4737   _survivor_chunk_index = i; // exclusive: [0, i)
4738   log_debug(gc, conc, stats)(" (Survivor:" SIZE_FORMAT "chunks) ", i);


4739   // Verify that we used up all the recorded entries
4740   #ifdef ASSERT
4741     size_t total = 0;
4742     for (int j = 0; j < no_of_gc_threads; j++) {
4743       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4744       total += _cursor[j];
4745     }
4746     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4747     // Check that the merged array is in sorted order
4748     if (total > 0) {
4749       for (size_t i = 0; i < total - 1; i++) {
4750         log_develop(gc, conc, stats)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",

4751                                     i, p2i(_survivor_chunk_array[i]));

4752         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4753                "Not sorted");
4754       }
4755     }
4756   #endif // ASSERT
4757 }
4758 
4759 // Set up the space's par_seq_tasks structure for work claiming
4760 // for parallel initial scan and rescan of young gen.
4761 // See ParRescanTask where this is currently used.
4762 void
4763 CMSCollector::
4764 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4765   assert(n_threads > 0, "Unexpected n_threads argument");
4766 
4767   // Eden space
4768   if (!_young_gen->eden()->is_empty()) {
4769     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4770     assert(!pst->valid(), "Clobbering existing data?");
4771     // Each valid entry in [0, _eden_chunk_index) represents a task.


4865   // as a result of work_q overflow
4866   restore_preserved_marks_if_any();
4867 }
4868 
4869 // Non-parallel version of remark
4870 void CMSCollector::do_remark_non_parallel() {
4871   ResourceMark rm;
4872   HandleMark   hm;
4873   GenCollectedHeap* gch = GenCollectedHeap::heap();
4874   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4875 
4876   MarkRefsIntoAndScanClosure
4877     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
4878              &_markStack, this,
4879              false /* should_yield */, false /* not precleaning */);
4880   MarkFromDirtyCardsClosure
4881     markFromDirtyCardsClosure(this, _span,
4882                               NULL,  // space is set further below
4883                               &_markBitMap, &_markStack, &mrias_cl);
4884   {
4885     GCTraceTime(Trace, gc) t("grey object rescan", _gc_timer_cm);
4886     // Iterate over the dirty cards, setting the corresponding bits in the
4887     // mod union table.
4888     {
4889       ModUnionClosure modUnionClosure(&_modUnionTable);
4890       _ct->ct_bs()->dirty_card_iterate(
4891                       _cmsGen->used_region(),
4892                       &modUnionClosure);
4893     }
4894     // Having transferred these marks into the modUnionTable, we just need
4895     // to rescan the marked objects on the dirty cards in the modUnionTable.
4896     // The initial marking may have been done during an asynchronous
4897     // collection so there may be dirty bits in the mod-union table.
4898     const int alignment =
4899       CardTableModRefBS::card_size * BitsPerWord;
4900     {
4901       // ... First handle dirty cards in CMS gen
4902       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
4903       MemRegion ur = _cmsGen->used_region();
4904       HeapWord* lb = ur.start();
4905       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
4906       MemRegion cms_span(lb, ub);
4907       _modUnionTable.dirty_range_iterate_clear(cms_span,
4908                                                &markFromDirtyCardsClosure);
4909       verify_work_stacks_empty();
4910       log_debug(gc, conc, stats)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());



4911     }
4912   }
4913   if (VerifyDuringGC &&
4914       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4915     HandleMark hm;  // Discard invalid handles created during verification
4916     Universe::verify();
4917   }
4918   {
4919     GCTraceTime(Trace, gc) t("root rescan", _gc_timer_cm);
4920 
4921     verify_work_stacks_empty();
4922 
4923     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
4924     StrongRootsScope srs(1);
4925 
4926     gch->gen_process_roots(&srs,
4927                            GenCollectedHeap::OldGen,
4928                            true,  // young gen as roots
4929                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
4930                            should_unload_classes(),
4931                            &mrias_cl,
4932                            NULL,
4933                            NULL); // The dirty klasses will be handled below
4934 
4935     assert(should_unload_classes()
4936            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4937            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4938   }
4939 
4940   {
4941     GCTraceTime(Trace, gc) t("visit unhandled CLDs", _gc_timer_cm);
4942 
4943     verify_work_stacks_empty();
4944 
4945     // Scan all class loader data objects that might have been introduced
4946     // during concurrent marking.
4947     ResourceMark rm;
4948     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4949     for (int i = 0; i < array->length(); i++) {
4950       mrias_cl.do_cld_nv(array->at(i));
4951     }
4952 
4953     // We don't need to keep track of new CLDs anymore.
4954     ClassLoaderDataGraph::remember_new_clds(false);
4955 
4956     verify_work_stacks_empty();
4957   }
4958 
4959   {
4960     GCTraceTime(Trace, gc) t("dirty klass scan", _gc_timer_cm);
4961 
4962     verify_work_stacks_empty();
4963 
4964     RemarkKlassClosure remark_klass_closure(&mrias_cl);
4965     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4966 
4967     verify_work_stacks_empty();
4968   }
4969 
4970   // We might have added oops to ClassLoaderData::_handles during the
4971   // concurrent marking phase. These oops point to newly allocated objects
4972   // that are guaranteed to be kept alive. Either by the direct allocation
4973   // code, or when the young collector processes the roots. Hence,
4974   // we don't have to revisit the _handles block during the remark phase.
4975 
4976   verify_work_stacks_empty();
4977   // Restore evacuated mark words, if any, used for overflow list links
4978   restore_preserved_marks_if_any();
4979 
4980   verify_overflow_empty();


5102       // We'd like to assert(work_q->size() != 0, ...)
5103       // because we just took work from the overflow list,
5104       // but of course we can't, since all of that might have
5105       // been already stolen from us.
5106       continue;
5107     }
5108     // Verify that we have no work before we resort to stealing
5109     assert(work_q->size() == 0, "Have work, shouldn't steal");
5110     // Try to steal from other queues that have work
5111     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5112       NOT_PRODUCT(num_steals++;)
5113       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5114       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5115       // Do scanning work
5116       obj_to_scan->oop_iterate(keep_alive);
5117       // Loop around, finish this work, and try to steal some more
5118     } else if (terminator()->offer_termination()) {
5119       break;  // nirvana from the infinite cycle
5120     }
5121   }
5122   log_develop(gc, conc, stats)("\n\t(%d: stole %d oops)", i, num_steals);




5123 }
5124 
5125 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5126 {
5127   GenCollectedHeap* gch = GenCollectedHeap::heap();
5128   WorkGang* workers = gch->workers();
5129   assert(workers != NULL, "Need parallel worker threads.");
5130   CMSRefProcTaskProxy rp_task(task, &_collector,
5131                               _collector.ref_processor()->span(),
5132                               _collector.markBitMap(),
5133                               workers, _collector.task_queues());
5134   workers->run_task(&rp_task);
5135 }
5136 
5137 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5138 {
5139 
5140   GenCollectedHeap* gch = GenCollectedHeap::heap();
5141   WorkGang* workers = gch->workers();
5142   assert(workers != NULL, "Need parallel worker threads.");


5144   workers->run_task(&enq_task);
5145 }
5146 
5147 void CMSCollector::refProcessingWork() {
5148   ResourceMark rm;
5149   HandleMark   hm;
5150 
5151   ReferenceProcessor* rp = ref_processor();
5152   assert(rp->span().equals(_span), "Spans should be equal");
5153   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5154   // Process weak references.
5155   rp->setup_policy(false);
5156   verify_work_stacks_empty();
5157 
5158   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5159                                           &_markStack, false /* !preclean */);
5160   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5161                                 _span, &_markBitMap, &_markStack,
5162                                 &cmsKeepAliveClosure, false /* !preclean */);
5163   {
5164     GCTraceTime(Debug, gc) t("weak refs processing", _gc_timer_cm);
5165 
5166     ReferenceProcessorStats stats;
5167     if (rp->processing_is_mt()) {
5168       // Set the degree of MT here.  If the discovery is done MT, there
5169       // may have been a different number of threads doing the discovery
5170       // and a different number of discovered lists may have Ref objects.
5171       // That is OK as long as the Reference lists are balanced (see
5172       // balance_all_queues() and balance_queues()).
5173       GenCollectedHeap* gch = GenCollectedHeap::heap();
5174       uint active_workers = ParallelGCThreads;
5175       WorkGang* workers = gch->workers();
5176       if (workers != NULL) {
5177         active_workers = workers->active_workers();
5178         // The expectation is that active_workers will have already
5179         // been set to a reasonable value.  If it has not been set,
5180         // investigate.
5181         assert(active_workers > 0, "Should have been set during scavenge");
5182       }
5183       rp->set_active_mt_degree(active_workers);
5184       CMSRefProcTaskExecutor task_executor(*this);


5186                                         &cmsKeepAliveClosure,
5187                                         &cmsDrainMarkingStackClosure,
5188                                         &task_executor,
5189                                         _gc_timer_cm);
5190     } else {
5191       stats = rp->process_discovered_references(&_is_alive_closure,
5192                                         &cmsKeepAliveClosure,
5193                                         &cmsDrainMarkingStackClosure,
5194                                         NULL,
5195                                         _gc_timer_cm);
5196     }
5197     _gc_tracer_cm->report_gc_reference_stats(stats);
5198 
5199   }
5200 
5201   // This is the point where the entire marking should have completed.
5202   verify_work_stacks_empty();
5203 
5204   if (should_unload_classes()) {
5205     {
5206       GCTraceTime(Debug, gc) t("class unloading", _gc_timer_cm);
5207 
5208       // Unload classes and purge the SystemDictionary.
5209       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5210 
5211       // Unload nmethods.
5212       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5213 
5214       // Prune dead klasses from subklass/sibling/implementor lists.
5215       Klass::clean_weak_klass_links(&_is_alive_closure);
5216     }
5217 
5218     {
5219       GCTraceTime(Debug, gc) t("scrub symbol table", _gc_timer_cm);
5220       // Clean up unreferenced symbols in symbol table.
5221       SymbolTable::unlink();
5222     }
5223 
5224     {
5225       GCTraceTime(Debug, gc) t("scrub string table", _gc_timer_cm);
5226       // Delete entries for dead interned strings.
5227       StringTable::unlink(&_is_alive_closure);
5228     }
5229   }
5230 
5231 
5232   // Restore any preserved marks as a result of mark stack or
5233   // work queue overflow
5234   restore_preserved_marks_if_any();  // done single-threaded for now
5235 
5236   rp->set_enqueuing_is_done(true);
5237   if (rp->processing_is_mt()) {
5238     rp->balance_all_queues();
5239     CMSRefProcTaskExecutor task_executor(*this);
5240     rp->enqueue_discovered_references(&task_executor);
5241   } else {
5242     rp->enqueue_discovered_references(NULL);
5243   }
5244   rp->verify_no_references_recorded();
5245   assert(!rp->discovery_enabled(), "should have been disabled");


5272     }
5273   }
5274 }
5275 #endif
5276 
5277 void CMSCollector::sweep() {
5278   assert(_collectorState == Sweeping, "just checking");
5279   check_correct_thread_executing();
5280   verify_work_stacks_empty();
5281   verify_overflow_empty();
5282   increment_sweep_count();
5283   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5284 
5285   _inter_sweep_timer.stop();
5286   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5287 
5288   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5289   _intra_sweep_timer.reset();
5290   _intra_sweep_timer.start();
5291   {
5292     GCTraceCPUTime tcpu;
5293     CMSPhaseAccounting pa(this, "Sweep", !PrintGCDetails);
5294     // First sweep the old gen
5295     {
5296       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5297                                bitMapLock());
5298       sweepWork(_cmsGen);
5299     }
5300 
5301     // Update Universe::_heap_*_at_gc figures.
5302     // We need all the free list locks to make the abstract state
5303     // transition from Sweeping to Resetting. See detailed note
5304     // further below.
5305     {
5306       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5307       // Update heap occupancy information which is used as
5308       // input to soft ref clearing policy at the next gc.
5309       Universe::update_heap_info_at_gc();
5310       _collectorState = Resizing;
5311     }
5312   }
5313   verify_work_stacks_empty();


5356   GenCollectedHeap* gch = GenCollectedHeap::heap();
5357   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5358   gch->update_full_collections_completed(_collection_count_start);
5359 }
5360 
5361 // FIX ME!!! Looks like this belongs in CFLSpace, with
5362 // CMSGen merely delegating to it.
5363 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5364   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5365   HeapWord*  minAddr        = _cmsSpace->bottom();
5366   HeapWord*  largestAddr    =
5367     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5368   if (largestAddr == NULL) {
5369     // The dictionary appears to be empty.  In this case
5370     // try to coalesce at the end of the heap.
5371     largestAddr = _cmsSpace->end();
5372   }
5373   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5374   size_t nearLargestOffset =
5375     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5376   log_debug(gc, freelist, stats)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5377                                  p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));





5378   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5379 }
5380 
5381 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5382   return addr >= _cmsSpace->nearLargestChunk();
5383 }
5384 
5385 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5386   return _cmsSpace->find_chunk_at_end();
5387 }
5388 
5389 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5390                                                     bool full) {
5391   // If the young generation has been collected, gather any statistics
5392   // that are of interest at this point.
5393   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5394   if (!full && current_is_young) {
5395     // Gather statistics on the young generation collection.
5396     collector()->stats().record_gc0_end(used());
5397   }


5451   } else {                                      // did not unload classes,
5452     _concurrent_cycles_since_last_unload++;     // ... increment count
5453   }
5454 }
5455 
5456 // Reset CMS data structures (for now just the marking bit map)
5457 // preparatory for the next cycle.
5458 void CMSCollector::reset_concurrent() {
5459   CMSTokenSyncWithLocks ts(true, bitMapLock());
5460 
5461   // If the state is not "Resetting", the foreground  thread
5462   // has done a collection and the resetting.
5463   if (_collectorState != Resetting) {
5464     assert(_collectorState == Idling, "The state should only change"
5465       " because the foreground collector has finished the collection");
5466     return;
5467   }
5468 
5469   // Clear the mark bitmap (no grey objects to start with)
5470   // for the next cycle.
5471   GCTraceCPUTime tcpu;
5472   CMSPhaseAccounting cmspa(this, "Reset", !PrintGCDetails);
5473 
5474   HeapWord* curAddr = _markBitMap.startWord();
5475   while (curAddr < _markBitMap.endWord()) {
5476     size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5477     MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5478     _markBitMap.clear_large_range(chunk);
5479     if (ConcurrentMarkSweepThread::should_yield() &&
5480         !foregroundGCIsActive() &&
5481         CMSYield) {
5482       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5483              "CMS thread should hold CMS token");
5484       assert_lock_strong(bitMapLock());
5485       bitMapLock()->unlock();
5486       ConcurrentMarkSweepThread::desynchronize(true);
5487       stopTimer();

5488       incrementYields();

5489 
5490       // See the comment in coordinator_yield()
5491       for (unsigned i = 0; i < CMSYieldSleepCount &&
5492                        ConcurrentMarkSweepThread::should_yield() &&
5493                        !CMSCollector::foregroundGCIsActive(); ++i) {
5494         os::sleep(Thread::current(), 1, false);
5495       }
5496 
5497       ConcurrentMarkSweepThread::synchronize(true);
5498       bitMapLock()->lock_without_safepoint_check();
5499       startTimer();
5500     }
5501     curAddr = chunk.end();
5502   }
5503   // A successful mostly concurrent collection has been done.
5504   // Because only the full (i.e., concurrent mode failure) collections
5505   // are being measured for gc overhead limits, clean the "near" flag
5506   // and count.
5507   size_policy()->reset_gc_overhead_limit_count();
5508   _collectorState = Idling;
5509 
5510   register_gc_end();
5511 }
5512 
5513 // Same as above but for STW paths
5514 void CMSCollector::reset_stw() {
5515   // already have the lock
5516   assert(_collectorState == Resetting, "just checking");
5517   assert_lock_strong(bitMapLock());
5518   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5519   _markBitMap.clear_all();
5520   _collectorState = Idling;
5521   register_gc_end();
5522 }
5523 
5524 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5525   GCTraceCPUTime tcpu;

5526   TraceCollectorStats tcs(counters());
5527 
5528   switch (op) {
5529     case CMS_op_checkpointRootsInitial: {
5530       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5531       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5532       checkpointRootsInitial();



5533       break;
5534     }
5535     case CMS_op_checkpointRootsFinal: {
5536       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5537       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5538       checkpointRootsFinal();



5539       break;
5540     }
5541     default:
5542       fatal("No such CMS_op");
5543   }
5544 }
5545 
5546 #ifndef PRODUCT
5547 size_t const CMSCollector::skip_header_HeapWords() {
5548   return FreeChunk::header_size();
5549 }
5550 
5551 // Try and collect here conditions that should hold when
5552 // CMS thread is exiting. The idea is that the foreground GC
5553 // thread should not be blocked if it wants to terminate
5554 // the CMS thread and yet continue to run the VM for a while
5555 // after that.
5556 void CMSCollector::verify_ok_to_terminate() const {
5557   assert(Thread::current()->is_ConcurrentGC_thread(),
5558          "should be called by CMS thread");


5731   }
5732   assert(_virtual_space.committed_size() == rs.size(),
5733          "didn't reserve backing store for all of CMS stack?");
5734   _base = (oop*)(_virtual_space.low());
5735   _index = 0;
5736   _capacity = size;
5737   NOT_PRODUCT(_max_depth = 0);
5738   return true;
5739 }
5740 
5741 // XXX FIX ME !!! In the MT case we come in here holding a
5742 // leaf lock. For printing we need to take a further lock
5743 // which has lower rank. We need to recalibrate the two
5744 // lock-ranks involved in order to be able to print the
5745 // messages below. (Or defer the printing to the caller.
5746 // For now we take the expedient path of just disabling the
5747 // messages for the problematic case.)
5748 void CMSMarkStack::expand() {
5749   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5750   if (_capacity == MarkStackSizeMax) {
5751     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5752       // We print a warning message only once per CMS cycle.
5753       log_debug(gc, conc)(" (benign) Hit CMSMarkStack max size limit");
5754     }
5755     return;
5756   }
5757   // Double capacity if possible
5758   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5759   // Do not give up existing stack until we have managed to
5760   // get the double capacity that we desired.
5761   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5762                    new_capacity * sizeof(oop)));
5763   if (rs.is_reserved()) {
5764     // Release the backing store associated with old stack
5765     _virtual_space.release();
5766     // Reinitialize virtual space for new stack
5767     if (!_virtual_space.initialize(rs, rs.size())) {
5768       fatal("Not enough swap for expanded marking stack");
5769     }
5770     _base = (oop*)(_virtual_space.low());
5771     _index = 0;
5772     _capacity = new_capacity;
5773   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5774     // Failed to double capacity, continue;
5775     // we print a detail message only once per CMS cycle.
5776     log_debug(gc, conc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",

5777                         _capacity / K, new_capacity / K);
5778   }
5779 }
5780 
5781 
5782 // Closures
5783 // XXX: there seems to be a lot of code  duplication here;
5784 // should refactor and consolidate common code.
5785 
5786 // This closure is used to mark refs into the CMS generation in
5787 // the CMS bit map. Called at the first checkpoint. This closure
5788 // assumes that we do not need to re-mark dirty cards; if the CMS
5789 // generation on which this is used is not an oldest
5790 // generation then this will lose younger_gen cards!
5791 
5792 MarkRefsIntoClosure::MarkRefsIntoClosure(
5793   MemRegion span, CMSBitMap* bitMap):
5794     _span(span),
5795     _bitMap(bitMap)
5796 {


5834 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
5835 
5836 // A variant of the above, used for CMS marking verification.
5837 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5838   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5839     _span(span),
5840     _verification_bm(verification_bm),
5841     _cms_bm(cms_bm)
5842 {
5843   assert(ref_processor() == NULL, "deliberately left NULL");
5844   assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5845 }
5846 
5847 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5848   // if p points into _span, then mark corresponding bit in _markBitMap
5849   assert(obj->is_oop(), "expected an oop");
5850   HeapWord* addr = (HeapWord*)obj;
5851   if (_span.contains(addr)) {
5852     _verification_bm->mark(addr);
5853     if (!_cms_bm->isMarked(addr)) {
5854       LogHandle(gc, verify) log;
5855       ResourceMark rm;
5856       oop(addr)->print_on(log.info_stream());
5857       log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5858       fatal("... aborting");
5859     }
5860   }
5861 }
5862 
5863 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5864 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5865 
5866 //////////////////////////////////////////////////
5867 // MarkRefsIntoAndScanClosure
5868 //////////////////////////////////////////////////
5869 
5870 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5871                                                        ReferenceProcessor* rp,
5872                                                        CMSBitMap* bit_map,
5873                                                        CMSBitMap* mod_union_table,
5874                                                        CMSMarkStack*  mark_stack,
5875                                                        CMSCollector* collector,
5876                                                        bool should_yield,
5877                                                        bool concurrent_precleaning):


5933            "overflow list was drained above");
5934 
5935     assert(_collector->no_preserved_marks(),
5936            "All preserved marks should have been restored above");
5937   }
5938 }
5939 
5940 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5941 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5942 
5943 void MarkRefsIntoAndScanClosure::do_yield_work() {
5944   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5945          "CMS thread should hold CMS token");
5946   assert_lock_strong(_freelistLock);
5947   assert_lock_strong(_bit_map->lock());
5948   // relinquish the free_list_lock and bitMaplock()
5949   _bit_map->lock()->unlock();
5950   _freelistLock->unlock();
5951   ConcurrentMarkSweepThread::desynchronize(true);
5952   _collector->stopTimer();

5953   _collector->incrementYields();

5954 
5955   // See the comment in coordinator_yield()
5956   for (unsigned i = 0;
5957        i < CMSYieldSleepCount &&
5958        ConcurrentMarkSweepThread::should_yield() &&
5959        !CMSCollector::foregroundGCIsActive();
5960        ++i) {
5961     os::sleep(Thread::current(), 1, false);
5962   }
5963 
5964   ConcurrentMarkSweepThread::synchronize(true);
5965   _freelistLock->lock_without_safepoint_check();
5966   _bit_map->lock()->lock_without_safepoint_check();
5967   _collector->startTimer();
5968 }
5969 
5970 ///////////////////////////////////////////////////////////
5971 // Par_MarkRefsIntoAndScanClosure: a parallel version of
5972 //                                 MarkRefsIntoAndScanClosure
5973 ///////////////////////////////////////////////////////////


6089       // An object not (yet) reached by marking: we merely need to
6090       // compute its size so as to go look at the next block.
6091       assert(p->is_oop(true), "should be an oop");
6092       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6093     }
6094   }
6095   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6096   return size;
6097 }
6098 
6099 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6100   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6101          "CMS thread should hold CMS token");
6102   assert_lock_strong(_freelistLock);
6103   assert_lock_strong(_bitMap->lock());
6104   // relinquish the free_list_lock and bitMaplock()
6105   _bitMap->lock()->unlock();
6106   _freelistLock->unlock();
6107   ConcurrentMarkSweepThread::desynchronize(true);
6108   _collector->stopTimer();

6109   _collector->incrementYields();

6110 
6111   // See the comment in coordinator_yield()
6112   for (unsigned i = 0; i < CMSYieldSleepCount &&
6113                    ConcurrentMarkSweepThread::should_yield() &&
6114                    !CMSCollector::foregroundGCIsActive(); ++i) {
6115     os::sleep(Thread::current(), 1, false);
6116   }
6117 
6118   ConcurrentMarkSweepThread::synchronize(true);
6119   _freelistLock->lock_without_safepoint_check();
6120   _bitMap->lock()->lock_without_safepoint_check();
6121   _collector->startTimer();
6122 }
6123 
6124 
6125 //////////////////////////////////////////////////////////////////
6126 // SurvivorSpacePrecleanClosure
6127 //////////////////////////////////////////////////////////////////
6128 // This (single-threaded) closure is used to preclean the oops in
6129 // the survivor spaces.


6156     // the ones in CMS heap (i.e. in _span).
6157     new_oop->oop_iterate(_scanning_closure);
6158     // check if it's time to yield
6159     do_yield_check();
6160   }
6161   unsigned int after_count =
6162     GenCollectedHeap::heap()->total_collections();
6163   bool abort = (_before_count != after_count) ||
6164                _collector->should_abort_preclean();
6165   return abort ? 0 : size;
6166 }
6167 
6168 void SurvivorSpacePrecleanClosure::do_yield_work() {
6169   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6170          "CMS thread should hold CMS token");
6171   assert_lock_strong(_bit_map->lock());
6172   // Relinquish the bit map lock
6173   _bit_map->lock()->unlock();
6174   ConcurrentMarkSweepThread::desynchronize(true);
6175   _collector->stopTimer();

6176   _collector->incrementYields();

6177 
6178   // See the comment in coordinator_yield()
6179   for (unsigned i = 0; i < CMSYieldSleepCount &&
6180                        ConcurrentMarkSweepThread::should_yield() &&
6181                        !CMSCollector::foregroundGCIsActive(); ++i) {
6182     os::sleep(Thread::current(), 1, false);
6183   }
6184 
6185   ConcurrentMarkSweepThread::synchronize(true);
6186   _bit_map->lock()->lock_without_safepoint_check();
6187   _collector->startTimer();
6188 }
6189 
6190 // This closure is used to rescan the marked objects on the dirty cards
6191 // in the mod union table and the card table proper. In the parallel
6192 // case, although the bitMap is shared, we do a single read so the
6193 // isMarked() query is "safe".
6194 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6195   // Ignore mark word because we are running concurrent with mutators
6196   assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));


6309       return true;
6310     }
6311   }
6312   scanOopsInOop(addr);
6313   return true;
6314 }
6315 
6316 // We take a break if we've been at this for a while,
6317 // so as to avoid monopolizing the locks involved.
6318 void MarkFromRootsClosure::do_yield_work() {
6319   // First give up the locks, then yield, then re-lock
6320   // We should probably use a constructor/destructor idiom to
6321   // do this unlock/lock or modify the MutexUnlocker class to
6322   // serve our purpose. XXX
6323   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6324          "CMS thread should hold CMS token");
6325   assert_lock_strong(_bitMap->lock());
6326   _bitMap->lock()->unlock();
6327   ConcurrentMarkSweepThread::desynchronize(true);
6328   _collector->stopTimer();

6329   _collector->incrementYields();

6330 
6331   // See the comment in coordinator_yield()
6332   for (unsigned i = 0; i < CMSYieldSleepCount &&
6333                        ConcurrentMarkSweepThread::should_yield() &&
6334                        !CMSCollector::foregroundGCIsActive(); ++i) {
6335     os::sleep(Thread::current(), 1, false);
6336   }
6337 
6338   ConcurrentMarkSweepThread::synchronize(true);
6339   _bitMap->lock()->lock_without_safepoint_check();
6340   _collector->startTimer();
6341 }
6342 
6343 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6344   assert(_bitMap->isMarked(ptr), "expected bit to be set");
6345   assert(_markStack->isEmpty(),
6346          "should drain stack to limit stack usage");
6347   // convert ptr to an oop preparatory to scanning
6348   oop obj = oop(ptr);
6349   // Ignore mark word in verification below, since we


6615 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6616 
6617 // Upon stack overflow, we discard (part of) the stack,
6618 // remembering the least address amongst those discarded
6619 // in CMSCollector's _restart_address.
6620 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6621   // Remember the least grey address discarded
6622   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6623   _collector->lower_restart_addr(ra);
6624   _mark_stack->reset();  // discard stack contents
6625   _mark_stack->expand(); // expand the stack if possible
6626 }
6627 
6628 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6629   assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6630   HeapWord* addr = (HeapWord*)obj;
6631   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6632     // Oop lies in _span and isn't yet grey or black
6633     _verification_bm->mark(addr);            // now grey
6634     if (!_cms_bm->isMarked(addr)) {
6635       LogHandle(gc, verify) log;
6636       ResourceMark rm;
6637       oop(addr)->print_on(log.info_stream());
6638       log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6639       fatal("... aborting");
6640     }
6641 
6642     if (!_mark_stack->push(obj)) { // stack overflow
6643       log_debug(gc, conc, stats)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());



6644       assert(_mark_stack->isFull(), "Else push should have succeeded");
6645       handle_stack_overflow(addr);
6646     }
6647     // anything including and to the right of _finger
6648     // will be scanned as we iterate over the remainder of the
6649     // bit map
6650   }
6651 }
6652 
6653 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6654                      MemRegion span,
6655                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
6656                      HeapWord* finger, MarkFromRootsClosure* parent) :
6657   MetadataAwareOopClosure(collector->ref_processor()),
6658   _collector(collector),
6659   _span(span),
6660   _bitMap(bitMap),
6661   _markStack(markStack),
6662   _finger(finger),
6663   _parent(parent)


6723 void PushOrMarkClosure::do_oop(oop obj) {
6724   // Ignore mark word because we are running concurrent with mutators.
6725   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6726   HeapWord* addr = (HeapWord*)obj;
6727   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6728     // Oop lies in _span and isn't yet grey or black
6729     _bitMap->mark(addr);            // now grey
6730     if (addr < _finger) {
6731       // the bit map iteration has already either passed, or
6732       // sampled, this bit in the bit map; we'll need to
6733       // use the marking stack to scan this oop's oops.
6734       bool simulate_overflow = false;
6735       NOT_PRODUCT(
6736         if (CMSMarkStackOverflowALot &&
6737             _collector->simulate_overflow()) {
6738           // simulate a stack overflow
6739           simulate_overflow = true;
6740         }
6741       )
6742       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6743         log_debug(gc, conc, stats)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());



6744         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6745         handle_stack_overflow(addr);
6746       }
6747     }
6748     // anything including and to the right of _finger
6749     // will be scanned as we iterate over the remainder of the
6750     // bit map
6751     do_yield_check();
6752   }
6753 }
6754 
6755 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
6756 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
6757 
6758 void Par_PushOrMarkClosure::do_oop(oop obj) {
6759   // Ignore mark word because we are running concurrent with mutators.
6760   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6761   HeapWord* addr = (HeapWord*)obj;
6762   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6763     // Oop lies in _span and isn't yet grey or black


6772     // -- else push on work queue
6773     if (   !res       // someone else marked it, they will deal with it
6774         || (addr >= *gfa)  // will be scanned in a later task
6775         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6776       return;
6777     }
6778     // the bit map iteration has already either passed, or
6779     // sampled, this bit in the bit map; we'll need to
6780     // use the marking stack to scan this oop's oops.
6781     bool simulate_overflow = false;
6782     NOT_PRODUCT(
6783       if (CMSMarkStackOverflowALot &&
6784           _collector->simulate_overflow()) {
6785         // simulate a stack overflow
6786         simulate_overflow = true;
6787       }
6788     )
6789     if (simulate_overflow ||
6790         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6791       // stack overflow
6792       log_debug(gc, conc, stats)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());



6793       // We cannot assert that the overflow stack is full because
6794       // it may have been emptied since.
6795       assert(simulate_overflow ||
6796              _work_queue->size() == _work_queue->max_elems(),
6797             "Else push should have succeeded");
6798       handle_stack_overflow(addr);
6799     }
6800     do_yield_check();
6801   }
6802 }
6803 
6804 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
6805 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
6806 
6807 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6808                                        MemRegion span,
6809                                        ReferenceProcessor* rp,
6810                                        CMSBitMap* bit_map,
6811                                        CMSBitMap* mod_union_table,
6812                                        CMSMarkStack*  mark_stack,


6934         _collector->par_push_on_overflow_list(obj);
6935         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
6936       }
6937     } // Else, some other thread got there first
6938   }
6939 }
6940 
6941 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
6942 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
6943 
6944 void CMSPrecleanRefsYieldClosure::do_yield_work() {
6945   Mutex* bml = _collector->bitMapLock();
6946   assert_lock_strong(bml);
6947   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6948          "CMS thread should hold CMS token");
6949 
6950   bml->unlock();
6951   ConcurrentMarkSweepThread::desynchronize(true);
6952 
6953   _collector->stopTimer();

6954   _collector->incrementYields();

6955 
6956   // See the comment in coordinator_yield()
6957   for (unsigned i = 0; i < CMSYieldSleepCount &&
6958                        ConcurrentMarkSweepThread::should_yield() &&
6959                        !CMSCollector::foregroundGCIsActive(); ++i) {
6960     os::sleep(Thread::current(), 1, false);
6961   }
6962 
6963   ConcurrentMarkSweepThread::synchronize(true);
6964   bml->lock();
6965 
6966   _collector->startTimer();
6967 }
6968 
6969 bool CMSPrecleanRefsYieldClosure::should_return() {
6970   if (ConcurrentMarkSweepThread::should_yield()) {
6971     do_yield_work();
6972   }
6973   return _collector->foregroundGCIsActive();
6974 }
6975 
6976 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
6977   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
6978          "mr should be aligned to start at a card boundary");
6979   // We'd like to assert:
6980   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
6981   //        "mr should be a range of cards");
6982   // However, that would be too strong in one case -- the last
6983   // partition ends at _unallocated_block which, in general, can be
6984   // an arbitrary boundary, not necessarily card aligned.
6985   _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;



6986   _space->object_iterate_mem(mr, &_scan_cl);
6987 }
6988 
6989 SweepClosure::SweepClosure(CMSCollector* collector,
6990                            ConcurrentMarkSweepGeneration* g,
6991                            CMSBitMap* bitMap, bool should_yield) :
6992   _collector(collector),
6993   _g(g),
6994   _sp(g->cmsSpace()),
6995   _limit(_sp->sweep_limit()),
6996   _freelistLock(_sp->freelistLock()),
6997   _bitMap(bitMap),
6998   _yield(should_yield),
6999   _inFreeRange(false),           // No free range at beginning of sweep
7000   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7001   _lastFreeRangeCoalesced(false),
7002   _freeFinger(g->used_region().start())
7003 {
7004   NOT_PRODUCT(
7005     _numObjectsFreed = 0;
7006     _numWordsFreed   = 0;
7007     _numObjectsLive = 0;
7008     _numWordsLive = 0;
7009     _numObjectsAlreadyFree = 0;
7010     _numWordsAlreadyFree = 0;
7011     _last_fc = NULL;
7012 
7013     _sp->initializeIndexedFreeListArrayReturnedBytes();
7014     _sp->dictionary()->initialize_dict_returned_bytes();
7015   )
7016   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7017          "sweep _limit out of bounds");
7018   log_develop(gc, sweep)("====================");
7019   log_develop(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));


7020 }
7021 
7022 void SweepClosure::print_on(outputStream* st) const {
7023   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7024                 p2i(_sp->bottom()), p2i(_sp->end()));
7025   tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7026   tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7027   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7028   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7029                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7030 }
7031 
7032 #ifndef PRODUCT
7033 // Assertion checking only:  no useful work in product mode --
7034 // however, if any of the flags below become product flags,
7035 // you may need to review this code to see if it needs to be
7036 // enabled in product mode.
7037 SweepClosure::~SweepClosure() {
7038   assert_lock_strong(_freelistLock);
7039   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7040          "sweep _limit out of bounds");
7041   if (inFreeRange()) {
7042     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7043     print();
7044     ShouldNotReachHere();
7045   }
7046 
7047   if (log_is_enabled(Debug, gc, sweep)) {
7048     log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7049                          _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7050     log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7051                          _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7052     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7053     log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7054   }



7055 
7056   if (log_is_enabled(Debug, gc, conc, stats) && CMSVerifyReturnedBytes) {
7057     size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7058     size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7059     size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7060     log_debug(gc, conc, stats)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
7061                                returned_bytes, indexListReturnedBytes, dict_returned_bytes);








7062   }
7063   log_develop(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
7064   log_develop(gc, sweep)("================");
7065 }
7066 #endif  // PRODUCT
7067 
7068 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7069     bool freeRangeInFreeLists) {
7070   log_develop(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",

7071                          p2i(freeFinger), freeRangeInFreeLists);

7072   assert(!inFreeRange(), "Trampling existing free range");
7073   set_inFreeRange(true);
7074   set_lastFreeRangeCoalesced(false);
7075 
7076   set_freeFinger(freeFinger);
7077   set_freeRangeInFreeLists(freeRangeInFreeLists);
7078 }
7079 
7080 // Note that the sweeper runs concurrently with mutators. Thus,
7081 // it is possible for direct allocation in this generation to happen
7082 // in the middle of the sweep. Note that the sweeper also coalesces
7083 // contiguous free blocks. Thus, unless the sweeper and the allocator
7084 // synchronize appropriately freshly allocated blocks may get swept up.
7085 // This is accomplished by the sweeper locking the free lists while
7086 // it is sweeping. Thus blocks that are determined to be free are
7087 // indeed free. There is however one additional complication:
7088 // blocks that have been allocated since the final checkpoint and
7089 // mark, will not have been marked and so would be treated as
7090 // unreachable and swept up. To prevent this, the allocator marks
7091 // the bit map when allocating during the sweep phase. This leads,


7109   size_t res;
7110 
7111   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7112   // than "addr == _limit" because although _limit was a block boundary when
7113   // we started the sweep, it may no longer be one because heap expansion
7114   // may have caused us to coalesce the block ending at the address _limit
7115   // with a newly expanded chunk (this happens when _limit was set to the
7116   // previous _end of the space), so we may have stepped past _limit:
7117   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7118   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7119     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7120            "sweep _limit out of bounds");
7121     assert(addr < _sp->end(), "addr out of bounds");
7122     // Flush any free range we might be holding as a single
7123     // coalesced chunk to the appropriate free list.
7124     if (inFreeRange()) {
7125       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7126              "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7127       flush_cur_free_chunk(freeFinger(),
7128                            pointer_delta(addr, freeFinger()));
7129       log_develop(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",



7130                              p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7131                              lastFreeRangeCoalesced() ? 1 : 0);
7132     }

7133 
7134     // help the iterator loop finish
7135     return pointer_delta(_sp->end(), addr);
7136   }
7137 
7138   assert(addr < _limit, "sweep invariant");
7139   // check if we should yield
7140   do_yield_check(addr);
7141   if (fc->is_free()) {
7142     // Chunk that is already free
7143     res = fc->size();
7144     do_already_free_chunk(fc);
7145     debug_only(_sp->verifyFreeLists());
7146     // If we flush the chunk at hand in lookahead_and_flush()
7147     // and it's coalesced with a preceding chunk, then the
7148     // process of "mangling" the payload of the coalesced block
7149     // will cause erasure of the size information from the
7150     // (erstwhile) header of all the coalesced blocks but the
7151     // first, so the first disjunct in the assert will not hold
7152     // in that specific case (in which case the second disjunct


7314            "Should be an initialized object");
7315     // Ignore mark word because we are running concurrent with mutators
7316     assert(oop(addr)->is_oop(true), "live block should be an oop");
7317     // Verify that the bit map has no bits marked between
7318     // addr and purported end of this block.
7319     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7320     assert(size >= 3, "Necessary for Printezis marks to work");
7321     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7322     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7323   }
7324   return size;
7325 }
7326 
7327 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7328                                                  size_t chunkSize) {
7329   // do_post_free_or_garbage_chunk() should only be called in the case
7330   // of the adaptive free list allocator.
7331   const bool fcInFreeLists = fc->is_free();
7332   assert((HeapWord*)fc <= _limit, "sweep invariant");
7333 
7334   log_develop(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);


7335 
7336   HeapWord* const fc_addr = (HeapWord*) fc;
7337 
7338   bool coalesce = false;
7339   const size_t left  = pointer_delta(fc_addr, freeFinger());
7340   const size_t right = chunkSize;
7341   switch (FLSCoalescePolicy) {
7342     // numeric value forms a coalition aggressiveness metric
7343     case 0:  { // never coalesce
7344       coalesce = false;
7345       break;
7346     }
7347     case 1: { // coalesce if left & right chunks on overpopulated lists
7348       coalesce = _sp->coalOverPopulated(left) &&
7349                  _sp->coalOverPopulated(right);
7350       break;
7351     }
7352     case 2: { // coalesce if left chunk on overpopulated list (default)
7353       coalesce = _sp->coalOverPopulated(left);
7354       break;


7411 // we'll look at because its end crosses past _limit, we'll preemptively
7412 // flush it along with any free range we may be holding on to. Note that
7413 // this can be the case only for an already free or freshly garbage
7414 // chunk. If this block is an object, it can never straddle
7415 // over _limit. The "straddling" occurs when _limit is set at
7416 // the previous end of the space when this cycle started, and
7417 // a subsequent heap expansion caused the previously co-terminal
7418 // free block to be coalesced with the newly expanded portion,
7419 // thus rendering _limit a non-block-boundary making it dangerous
7420 // for the sweeper to step over and examine.
7421 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7422   assert(inFreeRange(), "Should only be called if currently in a free range.");
7423   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7424   assert(_sp->used_region().contains(eob - 1),
7425          "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7426          " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7427          " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7428          p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7429   if (eob >= _limit) {
7430     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7431     log_develop(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "

7432                            "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7433                            "[" PTR_FORMAT "," PTR_FORMAT ")",
7434                            p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));

7435     // Return the storage we are tracking back into the free lists.
7436     log_develop(gc, sweep)("Flushing ... ");


7437     assert(freeFinger() < eob, "Error");
7438     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7439   }
7440 }
7441 
7442 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7443   assert(inFreeRange(), "Should only be called if currently in a free range.");
7444   assert(size > 0,
7445     "A zero sized chunk cannot be added to the free lists.");
7446   if (!freeRangeInFreeLists()) {
7447     log_develop(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",

7448                            p2i(chunk), size);

7449     // A new free range is going to be starting.  The current
7450     // free range has not been added to the free lists yet or
7451     // was removed so add it back.
7452     // If the current free range was coalesced, then the death
7453     // of the free range was recorded.  Record a birth now.
7454     if (lastFreeRangeCoalesced()) {
7455       _sp->coalBirth(size);
7456     }
7457     _sp->addChunkAndRepairOffsetTable(chunk, size,
7458             lastFreeRangeCoalesced());
7459   } else {
7460     log_develop(gc, sweep)("Already in free list: nothing to flush");
7461   }
7462   set_inFreeRange(false);
7463   set_freeRangeInFreeLists(false);
7464 }
7465 
7466 // We take a break if we've been at this for a while,
7467 // so as to avoid monopolizing the locks involved.
7468 void SweepClosure::do_yield_work(HeapWord* addr) {
7469   // Return current free chunk being used for coalescing (if any)
7470   // to the appropriate freelist.  After yielding, the next
7471   // free block encountered will start a coalescing range of
7472   // free blocks.  If the next free block is adjacent to the
7473   // chunk just flushed, they will need to wait for the next
7474   // sweep to be coalesced.
7475   if (inFreeRange()) {
7476     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7477   }
7478 
7479   // First give up the locks, then yield, then re-lock.
7480   // We should probably use a constructor/destructor idiom to
7481   // do this unlock/lock or modify the MutexUnlocker class to
7482   // serve our purpose. XXX
7483   assert_lock_strong(_bitMap->lock());
7484   assert_lock_strong(_freelistLock);
7485   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7486          "CMS thread should hold CMS token");
7487   _bitMap->lock()->unlock();
7488   _freelistLock->unlock();
7489   ConcurrentMarkSweepThread::desynchronize(true);
7490   _collector->stopTimer();

7491   _collector->incrementYields();

7492 
7493   // See the comment in coordinator_yield()
7494   for (unsigned i = 0; i < CMSYieldSleepCount &&
7495                        ConcurrentMarkSweepThread::should_yield() &&
7496                        !CMSCollector::foregroundGCIsActive(); ++i) {
7497     os::sleep(Thread::current(), 1, false);
7498   }
7499 
7500   ConcurrentMarkSweepThread::synchronize(true);
7501   _freelistLock->lock();
7502   _bitMap->lock()->lock_without_safepoint_check();
7503   _collector->startTimer();
7504 }
7505 
7506 #ifndef PRODUCT
7507 // This is actually very useful in a product build if it can
7508 // be called from the debugger.  Compile it into the product
7509 // as needed.
7510 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7511   return debug_cms_space->verify_chunk_in_free_list(fc);
7512 }
7513 #endif
7514 
7515 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7516   log_develop(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",

7517                          p2i(fc), fc->size());

7518 }
7519 
7520 // CMSIsAliveClosure
7521 bool CMSIsAliveClosure::do_object_b(oop obj) {
7522   HeapWord* addr = (HeapWord*)obj;
7523   return addr != NULL &&
7524          (!_span.contains(addr) || _bit_map->isMarked(addr));
7525 }
7526 
7527 
7528 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7529                       MemRegion span,
7530                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7531                       bool cpc):
7532   _collector(collector),
7533   _span(span),
7534   _bit_map(bit_map),
7535   _mark_stack(mark_stack),
7536   _concurrent_precleaning(cpc) {
7537   assert(!_span.is_empty(), "Empty span could spell trouble");


< prev index next >