< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 7209 : [mq]: inccms


 150 };
 151 
 152 // Convenience class that does a CMSTokenSync, and then acquires
 153 // upto three locks.
 154 class CMSTokenSyncWithLocks: public CMSTokenSync {
 155  private:
 156   // Note: locks are acquired in textual declaration order
 157   // and released in the opposite order
 158   MutexLockerEx _locker1, _locker2, _locker3;
 159  public:
 160   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 161                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 162     CMSTokenSync(is_cms_thread),
 163     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 164     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 165     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 166   { }
 167 };
 168 
 169 
 170 // Wrapper class to temporarily disable icms during a foreground cms collection.
 171 class ICMSDisabler: public StackObj {
 172  public:
 173   // The ctor disables icms and wakes up the thread so it notices the change;
 174   // the dtor re-enables icms.  Note that the CMSCollector methods will check
 175   // CMSIncrementalMode.
 176   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
 177   ~ICMSDisabler() { CMSCollector::enable_icms(); }
 178 };
 179 
 180 //////////////////////////////////////////////////////////////////
 181 //  Concurrent Mark-Sweep Generation /////////////////////////////
 182 //////////////////////////////////////////////////////////////////
 183 
 184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 185 
 186 // This struct contains per-thread things necessary to support parallel
 187 // young-gen collection.
 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
 189  public:
 190   CFLS_LAB lab;
 191   PromotionInfo promo;
 192 
 193   // Constructor.
 194   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 195     promo.setSpace(cfls);
 196   }
 197 };
 198 
 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(


 346   _saved_alpha = alpha;
 347 
 348   // Initialize the alphas to the bootstrap value of 100.
 349   _gc0_alpha = _cms_alpha = 100;
 350 
 351   _cms_begin_time.update();
 352   _cms_end_time.update();
 353 
 354   _gc0_duration = 0.0;
 355   _gc0_period = 0.0;
 356   _gc0_promoted = 0;
 357 
 358   _cms_duration = 0.0;
 359   _cms_period = 0.0;
 360   _cms_allocated = 0;
 361 
 362   _cms_used_at_gc0_begin = 0;
 363   _cms_used_at_gc0_end = 0;
 364   _allow_duty_cycle_reduction = false;
 365   _valid_bits = 0;
 366   _icms_duty_cycle = CMSIncrementalDutyCycle;
 367 }
 368 
 369 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 370   // TBD: CR 6909490
 371   return 1.0;
 372 }
 373 
 374 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 375 }
 376 
 377 // If promotion failure handling is on use
 378 // the padded average size of the promotion for each
 379 // young generation collection.
 380 double CMSStats::time_until_cms_gen_full() const {
 381   size_t cms_free = _cms_gen->cmsSpace()->free();
 382   GenCollectedHeap* gch = GenCollectedHeap::heap();
 383   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 384                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 385   if (cms_free > expected_promotion) {
 386     // Start a cms collection if there isn't enough space to promote


 425   // account for that much possible delay
 426   // in the query so as to avoid concurrent mode failures
 427   // due to starting the collection just a wee bit too
 428   // late.
 429   double work = cms_duration() + gc0_period();
 430   double deadline = time_until_cms_gen_full();
 431   // If a concurrent mode failure occurred recently, we want to be
 432   // more conservative and halve our expected time_until_cms_gen_full()
 433   if (work > deadline) {
 434     if (Verbose && PrintGCDetails) {
 435       gclog_or_tty->print(
 436         " CMSCollector: collect because of anticipated promotion "
 437         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 438         gc0_period(), time_until_cms_gen_full());
 439     }
 440     return 0.0;
 441   }
 442   return work - deadline;
 443 }
 444 
 445 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
 446 // amount of change to prevent wild oscillation.
 447 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
 448                                               unsigned int new_duty_cycle) {
 449   assert(old_duty_cycle <= 100, "bad input value");
 450   assert(new_duty_cycle <= 100, "bad input value");
 451 
 452   // Note:  use subtraction with caution since it may underflow (values are
 453   // unsigned).  Addition is safe since we're in the range 0-100.
 454   unsigned int damped_duty_cycle = new_duty_cycle;
 455   if (new_duty_cycle < old_duty_cycle) {
 456     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
 457     if (new_duty_cycle + largest_delta < old_duty_cycle) {
 458       damped_duty_cycle = old_duty_cycle - largest_delta;
 459     }
 460   } else if (new_duty_cycle > old_duty_cycle) {
 461     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
 462     if (new_duty_cycle > old_duty_cycle + largest_delta) {
 463       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
 464     }
 465   }
 466   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
 467 
 468   if (CMSTraceIncrementalPacing) {
 469     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
 470                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
 471   }
 472   return damped_duty_cycle;
 473 }
 474 
 475 unsigned int CMSStats::icms_update_duty_cycle_impl() {
 476   assert(CMSIncrementalPacing && valid(),
 477          "should be handled in icms_update_duty_cycle()");
 478 
 479   double cms_time_so_far = cms_timer().seconds();
 480   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
 481   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
 482 
 483   // Avoid division by 0.
 484   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
 485   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
 486 
 487   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
 488   if (new_duty_cycle > _icms_duty_cycle) {
 489     // Avoid very small duty cycles (1 or 2); 0 is allowed.
 490     if (new_duty_cycle > 2) {
 491       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
 492                                                 new_duty_cycle);
 493     }
 494   } else if (_allow_duty_cycle_reduction) {
 495     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
 496     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
 497     // Respect the minimum duty cycle.
 498     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
 499     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
 500   }
 501 
 502   if (PrintGCDetails || CMSTraceIncrementalPacing) {
 503     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
 504   }
 505 
 506   _allow_duty_cycle_reduction = false;
 507   return _icms_duty_cycle;
 508 }
 509 
 510 #ifndef PRODUCT
 511 void CMSStats::print_on(outputStream *st) const {
 512   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 513   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 514                gc0_duration(), gc0_period(), gc0_promoted());
 515   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 516             cms_duration(), cms_duration_per_mb(),
 517             cms_period(), cms_allocated());
 518   st->print(",cms_since_beg=%g,cms_since_end=%g",
 519             cms_time_since_begin(), cms_time_since_end());
 520   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 521             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 522   if (CMSIncrementalMode) {
 523     st->print(",dc=%d", icms_duty_cycle());
 524   }
 525 
 526   if (valid()) {
 527     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 528               promotion_rate(), cms_allocation_rate());
 529     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 530               cms_consumption_rate(), time_until_cms_gen_full());
 531   }
 532   st->print(" ");
 533 }
 534 #endif // #ifndef PRODUCT
 535 
 536 CMSCollector::CollectorState CMSCollector::_collectorState =
 537                              CMSCollector::Idling;
 538 bool CMSCollector::_foregroundGCIsActive = false;
 539 bool CMSCollector::_foregroundGCShouldWait = false;
 540 
 541 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 542                            CardTableRS*                   ct,
 543                            ConcurrentMarkSweepPolicy*     cp):
 544   _cmsGen(cmsGen),


 562   _stats(cmsGen),
 563   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
 564   _eden_chunk_array(NULL),     // may be set in ctor body
 565   _eden_chunk_capacity(0),     // -- ditto --
 566   _eden_chunk_index(0),        // -- ditto --
 567   _survivor_plab_array(NULL),  // -- ditto --
 568   _survivor_chunk_array(NULL), // -- ditto --
 569   _survivor_chunk_capacity(0), // -- ditto --
 570   _survivor_chunk_index(0),    // -- ditto --
 571   _ser_pmc_preclean_ovflw(0),
 572   _ser_kac_preclean_ovflw(0),
 573   _ser_pmc_remark_ovflw(0),
 574   _par_pmc_remark_ovflw(0),
 575   _ser_kac_ovflw(0),
 576   _par_kac_ovflw(0),
 577 #ifndef PRODUCT
 578   _num_par_pushes(0),
 579 #endif
 580   _collection_count_start(0),
 581   _verifying(false),
 582   _icms_start_limit(NULL),
 583   _icms_stop_limit(NULL),
 584   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 585   _completed_initialization(false),
 586   _collector_policy(cp),
 587   _should_unload_classes(CMSClassUnloadingEnabled),
 588   _concurrent_cycles_since_last_unload(0),
 589   _roots_scanning_options(SharedHeap::SO_None),
 590   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 591   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 592   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 593   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 594   _cms_start_registered(false)
 595 {
 596   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 597     ExplicitGCInvokesConcurrent = true;
 598   }
 599   // Now expand the span and allocate the collection support structures
 600   // (MUT, marking bit map etc.) to cover both generations subject to
 601   // collection.
 602 
 603   // For use by dirty card to oop closures.


1107         } else {
1108           _modUnionTable.mark_range(mr);
1109         }
1110       } else {  // not an obj array; we can just mark the head
1111         if (par) {
1112           _modUnionTable.par_mark(start);
1113         } else {
1114           _modUnionTable.mark(start);
1115         }
1116       }
1117     }
1118   }
1119 }
1120 
1121 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1122 {
1123   size_t delta = pointer_delta(addr, space->bottom());
1124   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1125 }
1126 
1127 void CMSCollector::icms_update_allocation_limits()
1128 {
1129   Generation* young = GenCollectedHeap::heap()->get_gen(0);
1130   EdenSpace* eden = young->as_DefNewGeneration()->eden();
1131 
1132   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1133   if (CMSTraceIncrementalPacing) {
1134     stats().print();
1135   }
1136 
1137   assert(duty_cycle <= 100, "invalid duty cycle");
1138   if (duty_cycle != 0) {
1139     // The duty_cycle is a percentage between 0 and 100; convert to words and
1140     // then compute the offset from the endpoints of the space.
1141     size_t free_words = eden->free() / HeapWordSize;
1142     double free_words_dbl = (double)free_words;
1143     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1144     size_t offset_words = (free_words - duty_cycle_words) / 2;
1145 
1146     _icms_start_limit = eden->top() + offset_words;
1147     _icms_stop_limit = eden->end() - offset_words;
1148 
1149     // The limits may be adjusted (shifted to the right) by
1150     // CMSIncrementalOffset, to allow the application more mutator time after a
1151     // young gen gc (when all mutators were stopped) and before CMS starts and
1152     // takes away one or more cpus.
1153     if (CMSIncrementalOffset != 0) {
1154       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1155       size_t adjustment = (size_t)adjustment_dbl;
1156       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1157       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1158         _icms_start_limit += adjustment;
1159         _icms_stop_limit = tmp_stop;
1160       }
1161     }
1162   }
1163   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1164     _icms_start_limit = _icms_stop_limit = eden->end();
1165   }
1166 
1167   // Install the new start limit.
1168   eden->set_soft_end(_icms_start_limit);
1169 
1170   if (CMSTraceIncrementalMode) {
1171     gclog_or_tty->print(" icms alloc limits:  "
1172                            PTR_FORMAT "," PTR_FORMAT
1173                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1174                            p2i(_icms_start_limit), p2i(_icms_stop_limit),
1175                            percent_of_space(eden, _icms_start_limit),
1176                            percent_of_space(eden, _icms_stop_limit));
1177     if (Verbose) {
1178       gclog_or_tty->print("eden:  ");
1179       eden->print_on(gclog_or_tty);
1180     }
1181   }
1182 }
1183 
1184 // Any changes here should try to maintain the invariant
1185 // that if this method is called with _icms_start_limit
1186 // and _icms_stop_limit both NULL, then it should return NULL
1187 // and not notify the icms thread.
1188 HeapWord*
1189 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1190                                        size_t word_size)
1191 {
1192   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1193   // nop.
1194   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1195     if (top <= _icms_start_limit) {
1196       if (CMSTraceIncrementalMode) {
1197         space->print_on(gclog_or_tty);
1198         gclog_or_tty->stamp();
1199         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1200                                ", new limit=" PTR_FORMAT
1201                                " (" SIZE_FORMAT "%%)",
1202                                p2i(top), p2i(_icms_stop_limit),
1203                                percent_of_space(space, _icms_stop_limit));
1204       }
1205       ConcurrentMarkSweepThread::start_icms();
1206       assert(top < _icms_stop_limit, "Tautology");
1207       if (word_size < pointer_delta(_icms_stop_limit, top)) {
1208         return _icms_stop_limit;
1209       }
1210 
1211       // The allocation will cross both the _start and _stop limits, so do the
1212       // stop notification also and return end().
1213       if (CMSTraceIncrementalMode) {
1214         space->print_on(gclog_or_tty);
1215         gclog_or_tty->stamp();
1216         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1217                                ", new limit=" PTR_FORMAT
1218                                " (" SIZE_FORMAT "%%)",
1219                                p2i(top), p2i(space->end()),
1220                                percent_of_space(space, space->end()));
1221       }
1222       ConcurrentMarkSweepThread::stop_icms();
1223       return space->end();
1224     }
1225 
1226     if (top <= _icms_stop_limit) {
1227       if (CMSTraceIncrementalMode) {
1228         space->print_on(gclog_or_tty);
1229         gclog_or_tty->stamp();
1230         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1231                                ", new limit=" PTR_FORMAT
1232                                " (" SIZE_FORMAT "%%)",
1233                                top, space->end(),
1234                                percent_of_space(space, space->end()));
1235       }
1236       ConcurrentMarkSweepThread::stop_icms();
1237       return space->end();
1238     }
1239 
1240     if (CMSTraceIncrementalMode) {
1241       space->print_on(gclog_or_tty);
1242       gclog_or_tty->stamp();
1243       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1244                              ", new limit=" PTR_FORMAT,
1245                              top, NULL);
1246     }
1247   }
1248 
1249   return NULL;
1250 }
1251 
1252 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1253   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1254   // allocate, copy and if necessary update promoinfo --
1255   // delegate to underlying space.
1256   assert_lock_strong(freelistLock());
1257 
1258 #ifndef PRODUCT
1259   if (Universe::heap()->promotion_should_fail()) {
1260     return NULL;
1261   }
1262 #endif  // #ifndef PRODUCT
1263 
1264   oop res = _cmsSpace->promote(obj, obj_size);
1265   if (res == NULL) {
1266     // expand and retry
1267     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1268     expand(s*HeapWordSize, MinHeapDeltaBytes,
1269       CMSExpansionCause::_satisfy_promotion);
1270     // Since there's currently no next generation, we don't try to promote
1271     // into a more senior generation.


1274                                "promotion to next generation");
1275     res = _cmsSpace->promote(obj, obj_size);
1276   }
1277   if (res != NULL) {
1278     // See comment in allocate() about when objects should
1279     // be allocated live.
1280     assert(obj->is_oop(), "Will dereference klass pointer below");
1281     collector()->promoted(false,           // Not parallel
1282                           (HeapWord*)res, obj->is_objArray(), obj_size);
1283     // promotion counters
1284     NOT_PRODUCT(
1285       _numObjectsPromoted++;
1286       _numWordsPromoted +=
1287         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1288     )
1289   }
1290   return res;
1291 }
1292 
1293 
1294 HeapWord*
1295 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1296                                              HeapWord* top,
1297                                              size_t word_sz)
1298 {
1299   return collector()->allocation_limit_reached(space, top, word_sz);
1300 }
1301 
1302 // IMPORTANT: Notes on object size recognition in CMS.
1303 // ---------------------------------------------------
1304 // A block of storage in the CMS generation is always in
1305 // one of three states. A free block (FREE), an allocated
1306 // object (OBJECT) whose size() method reports the correct size,
1307 // and an intermediate state (TRANSIENT) in which its size cannot
1308 // be accurately determined.
1309 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1310 // -----------------------------------------------------
1311 // FREE:      klass_word & 1 == 1; mark_word holds block size
1312 //
1313 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1314 //            obj->size() computes correct size
1315 //
1316 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1317 //
1318 // STATE IDENTIFICATION: (64 bit+COOPS)
1319 // ------------------------------------
1320 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1321 //


1794 //
1795 
1796 void CMSCollector::acquire_control_and_collect(bool full,
1797         bool clear_all_soft_refs) {
1798   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1799   assert(!Thread::current()->is_ConcurrentGC_thread(),
1800          "shouldn't try to acquire control from self!");
1801 
1802   // Start the protocol for acquiring control of the
1803   // collection from the background collector (aka CMS thread).
1804   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1805          "VM thread should have CMS token");
1806   // Remember the possibly interrupted state of an ongoing
1807   // concurrent collection
1808   CollectorState first_state = _collectorState;
1809 
1810   // Signal to a possibly ongoing concurrent collection that
1811   // we want to do a foreground collection.
1812   _foregroundGCIsActive = true;
1813 
1814   // Disable incremental mode during a foreground collection.
1815   ICMSDisabler icms_disabler;
1816 
1817   // release locks and wait for a notify from the background collector
1818   // releasing the locks in only necessary for phases which
1819   // do yields to improve the granularity of the collection.
1820   assert_lock_strong(bitMapLock());
1821   // We need to lock the Free list lock for the space that we are
1822   // currently collecting.
1823   assert(haveFreelistLocks(), "Must be holding free list locks");
1824   bitMapLock()->unlock();
1825   releaseFreelistLocks();
1826   {
1827     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1828     if (_foregroundGCShouldWait) {
1829       // We are going to be waiting for action for the CMS thread;
1830       // it had better not be gone (for instance at shutdown)!
1831       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1832              "CMS thread must be running");
1833       // Wait here until the background collector gives us the go-ahead
1834       ConcurrentMarkSweepThread::clear_CMS_flag(
1835         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1836       // Get a possibly blocked CMS thread going:


2120       }
2121       // If first_state was not Idling, then a background GC
2122       // was in progress and has now finished.  No need to do it
2123       // again.  Leave the state as Idling.
2124       break;
2125     case Precleaning:
2126       // In the foreground case don't do the precleaning since
2127       // it is not done concurrently and there is extra work
2128       // required.
2129       _collectorState = FinalMarking;
2130   }
2131   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2132 
2133   // For a mark-sweep, compute_new_size() will be called
2134   // in the heap's do_collection() method.
2135 }
2136 
2137 
2138 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2139   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2140   EdenSpace* eden_space = dng->eden();
2141   ContiguousSpace* from_space = dng->from();
2142   ContiguousSpace* to_space   = dng->to();
2143   // Eden
2144   if (_eden_chunk_array != NULL) {
2145     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2146                            eden_space->bottom(), eden_space->top(),
2147                            eden_space->end(), eden_space->capacity());
2148     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2149                            "_eden_chunk_capacity=" SIZE_FORMAT,
2150                            _eden_chunk_index, _eden_chunk_capacity);
2151     for (size_t i = 0; i < _eden_chunk_index; i++) {
2152       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2153                              i, _eden_chunk_array[i]);
2154     }
2155   }
2156   // Survivor
2157   if (_survivor_chunk_array != NULL) {
2158     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2159                            from_space->bottom(), from_space->top(),
2160                            from_space->end(), from_space->capacity());


2768 
2769   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2770 
2771   _cmsGen->gc_epilogue_work(full);
2772 
2773   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2774     // in case sampling was not already enabled, enable it
2775     _start_sampling = true;
2776   }
2777   // reset _eden_chunk_array so sampling starts afresh
2778   _eden_chunk_index = 0;
2779 
2780   size_t cms_used   = _cmsGen->cmsSpace()->used();
2781 
2782   // update performance counters - this uses a special version of
2783   // update_counters() that allows the utilization to be passed as a
2784   // parameter, avoiding multiple calls to used().
2785   //
2786   _cmsGen->update_counters(cms_used);
2787 
2788   if (CMSIncrementalMode) {
2789     icms_update_allocation_limits();
2790   }
2791 
2792   bitMapLock()->unlock();
2793   releaseFreelistLocks();
2794 
2795   if (!CleanChunkPoolAsync) {
2796     Chunk::clean_chunk_pool();
2797   }
2798 
2799   set_did_compact(false);
2800   _between_prologue_and_epilogue = false;  // ready for next cycle
2801 }
2802 
2803 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2804   collector()->gc_epilogue(full);
2805 
2806   // Also reset promotion tracking in par gc thread states.
2807   if (CollectedHeap::use_parallel_gc_threads()) {
2808     for (uint i = 0; i < ParallelGCThreads; i++) {
2809       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2810     }
2811   }


4259     } else if (terminator()->offer_termination(&_term_term)) {
4260       assert(work_q->size() == 0, "Impossible!");
4261       break;
4262     } else if (yielding() || should_yield()) {
4263       yield();
4264     }
4265   }
4266 }
4267 
4268 // This is run by the CMS (coordinator) thread.
4269 void CMSConcMarkingTask::coordinator_yield() {
4270   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4271          "CMS thread should hold CMS token");
4272   // First give up the locks, then yield, then re-lock
4273   // We should probably use a constructor/destructor idiom to
4274   // do this unlock/lock or modify the MutexUnlocker class to
4275   // serve our purpose. XXX
4276   assert_lock_strong(_bit_map_lock);
4277   _bit_map_lock->unlock();
4278   ConcurrentMarkSweepThread::desynchronize(true);
4279   ConcurrentMarkSweepThread::acknowledge_yield_request();
4280   _collector->stopTimer();
4281   if (PrintCMSStatistics != 0) {
4282     _collector->incrementYields();
4283   }
4284   _collector->icms_wait();
4285 
4286   // It is possible for whichever thread initiated the yield request
4287   // not to get a chance to wake up and take the bitmap lock between
4288   // this thread releasing it and reacquiring it. So, while the
4289   // should_yield() flag is on, let's sleep for a bit to give the
4290   // other thread a chance to wake up. The limit imposed on the number
4291   // of iterations is defensive, to avoid any unforseen circumstances
4292   // putting us into an infinite loop. Since it's always been this
4293   // (coordinator_yield()) method that was observed to cause the
4294   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4295   // which is by default non-zero. For the other seven methods that
4296   // also perform the yield operation, as are using a different
4297   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4298   // can enable the sleeping for those methods too, if necessary.
4299   // See 6442774.
4300   //
4301   // We really need to reconsider the synchronization between the GC
4302   // thread and the yield-requesting threads in the future and we
4303   // should really use wait/notify, which is the recommended
4304   // way of doing this type of interaction. Additionally, we should
4305   // consolidate the eight methods that do the yield operation and they
4306   // are almost identical into one for better maintainability and
4307   // readability. See 6445193.
4308   //
4309   // Tony 2006.06.29
4310   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4311                    ConcurrentMarkSweepThread::should_yield() &&
4312                    !CMSCollector::foregroundGCIsActive(); ++i) {
4313     os::sleep(Thread::current(), 1, false);
4314     ConcurrentMarkSweepThread::acknowledge_yield_request();
4315   }
4316 
4317   ConcurrentMarkSweepThread::synchronize(true);
4318   _bit_map_lock->lock_without_safepoint_check();
4319   _collector->startTimer();
4320 }
4321 
4322 bool CMSCollector::do_marking_mt(bool asynch) {
4323   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4324   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4325                                        conc_workers()->total_workers(),
4326                                        conc_workers()->active_workers(),
4327                                        Threads::number_of_non_daemon_threads());
4328   conc_workers()->set_active_workers(num_workers);
4329 
4330   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4331 
4332   CMSConcMarkingTask tsk(this,
4333                          cms_space,
4334                          asynch,


5225     // Check if we have modified any oops in the Klass during the concurrent marking.
5226     if (k->has_accumulated_modified_oops()) {
5227       k->clear_accumulated_modified_oops();
5228 
5229       // We could have transfered the current modified marks to the accumulated marks,
5230       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5231     } else if (k->has_modified_oops()) {
5232       // Don't clear anything, this info is needed by the next young collection.
5233     } else {
5234       // No modified oops in the Klass.
5235       return;
5236     }
5237 
5238     // The klass has modified fields, need to scan the klass.
5239     _cm_klass_closure.do_klass(k);
5240   }
5241 };
5242 
5243 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5244   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5245   EdenSpace* eden_space = dng->eden();
5246   ContiguousSpace* from_space = dng->from();
5247   ContiguousSpace* to_space   = dng->to();
5248 
5249   HeapWord** eca = _collector->_eden_chunk_array;
5250   size_t     ect = _collector->_eden_chunk_index;
5251   HeapWord** sca = _collector->_survivor_chunk_array;
5252   size_t     sct = _collector->_survivor_chunk_index;
5253 
5254   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5255   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5256 
5257   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5258   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5259   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5260 }
5261 
5262 // work_queue(i) is passed to the closure
5263 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5264 // also is passed to do_dirty_card_rescan_tasks() and to
5265 // do_work_steal() to select the i-th task_queue.


6461     }
6462 
6463     // Clear the mark bitmap (no grey objects to start with)
6464     // for the next cycle.
6465     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6466     CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6467 
6468     HeapWord* curAddr = _markBitMap.startWord();
6469     while (curAddr < _markBitMap.endWord()) {
6470       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6471       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6472       _markBitMap.clear_large_range(chunk);
6473       if (ConcurrentMarkSweepThread::should_yield() &&
6474           !foregroundGCIsActive() &&
6475           CMSYield) {
6476         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6477                "CMS thread should hold CMS token");
6478         assert_lock_strong(bitMapLock());
6479         bitMapLock()->unlock();
6480         ConcurrentMarkSweepThread::desynchronize(true);
6481         ConcurrentMarkSweepThread::acknowledge_yield_request();
6482         stopTimer();
6483         if (PrintCMSStatistics != 0) {
6484           incrementYields();
6485         }
6486         icms_wait();
6487 
6488         // See the comment in coordinator_yield()
6489         for (unsigned i = 0; i < CMSYieldSleepCount &&
6490                          ConcurrentMarkSweepThread::should_yield() &&
6491                          !CMSCollector::foregroundGCIsActive(); ++i) {
6492           os::sleep(Thread::current(), 1, false);
6493           ConcurrentMarkSweepThread::acknowledge_yield_request();
6494         }
6495 
6496         ConcurrentMarkSweepThread::synchronize(true);
6497         bitMapLock()->lock_without_safepoint_check();
6498         startTimer();
6499       }
6500       curAddr = chunk.end();
6501     }
6502     // A successful mostly concurrent collection has been done.
6503     // Because only the full (i.e., concurrent mode failure) collections
6504     // are being measured for gc overhead limits, clean the "near" flag
6505     // and count.
6506     size_policy()->reset_gc_overhead_limit_count();
6507     _collectorState = Idling;
6508   } else {
6509     // already have the lock
6510     assert(_collectorState == Resetting, "just checking");
6511     assert_lock_strong(bitMapLock());
6512     _markBitMap.clear_all();
6513     _collectorState = Idling;
6514   }
6515 
6516   // Stop incremental mode after a cycle completes, so that any future cycles
6517   // are triggered by allocation.
6518   stop_icms();
6519 
6520   NOT_PRODUCT(
6521     if (RotateCMSCollectionTypes) {
6522       _cmsGen->rotate_debug_collection_type();
6523     }
6524   )
6525 
6526   register_gc_end();
6527 }
6528 
6529 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6530   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6531   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6532   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
6533   TraceCollectorStats tcs(counters());
6534 
6535   switch (op) {
6536     case CMS_op_checkpointRootsInitial: {
6537       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6538       checkpointRootsInitial(true);       // asynch
6539       if (PrintGC) {


6951       _collector->restore_preserved_marks_if_any();
6952       assert(_collector->no_preserved_marks(), "No preserved marks");
6953     }
6954     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6955            "All preserved marks should have been restored above");
6956   }
6957 }
6958 
6959 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6960 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6961 
6962 void MarkRefsIntoAndScanClosure::do_yield_work() {
6963   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6964          "CMS thread should hold CMS token");
6965   assert_lock_strong(_freelistLock);
6966   assert_lock_strong(_bit_map->lock());
6967   // relinquish the free_list_lock and bitMaplock()
6968   _bit_map->lock()->unlock();
6969   _freelistLock->unlock();
6970   ConcurrentMarkSweepThread::desynchronize(true);
6971   ConcurrentMarkSweepThread::acknowledge_yield_request();
6972   _collector->stopTimer();
6973   if (PrintCMSStatistics != 0) {
6974     _collector->incrementYields();
6975   }
6976   _collector->icms_wait();
6977 
6978   // See the comment in coordinator_yield()
6979   for (unsigned i = 0;
6980        i < CMSYieldSleepCount &&
6981        ConcurrentMarkSweepThread::should_yield() &&
6982        !CMSCollector::foregroundGCIsActive();
6983        ++i) {
6984     os::sleep(Thread::current(), 1, false);
6985     ConcurrentMarkSweepThread::acknowledge_yield_request();
6986   }
6987 
6988   ConcurrentMarkSweepThread::synchronize(true);
6989   _freelistLock->lock_without_safepoint_check();
6990   _bit_map->lock()->lock_without_safepoint_check();
6991   _collector->startTimer();
6992 }
6993 
6994 ///////////////////////////////////////////////////////////
6995 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6996 //                                 MarkRefsIntoAndScanClosure
6997 ///////////////////////////////////////////////////////////
6998 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6999   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7000   CMSBitMap* bit_map, OopTaskQueue* work_queue):
7001   _span(span),
7002   _bit_map(bit_map),
7003   _work_queue(work_queue),
7004   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7005                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),


7111     } else {
7112       // An object not (yet) reached by marking: we merely need to
7113       // compute its size so as to go look at the next block.
7114       assert(p->is_oop(true), "should be an oop");
7115       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7116     }
7117   }
7118   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7119   return size;
7120 }
7121 
7122 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7123   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7124          "CMS thread should hold CMS token");
7125   assert_lock_strong(_freelistLock);
7126   assert_lock_strong(_bitMap->lock());
7127   // relinquish the free_list_lock and bitMaplock()
7128   _bitMap->lock()->unlock();
7129   _freelistLock->unlock();
7130   ConcurrentMarkSweepThread::desynchronize(true);
7131   ConcurrentMarkSweepThread::acknowledge_yield_request();
7132   _collector->stopTimer();
7133   if (PrintCMSStatistics != 0) {
7134     _collector->incrementYields();
7135   }
7136   _collector->icms_wait();
7137 
7138   // See the comment in coordinator_yield()
7139   for (unsigned i = 0; i < CMSYieldSleepCount &&
7140                    ConcurrentMarkSweepThread::should_yield() &&
7141                    !CMSCollector::foregroundGCIsActive(); ++i) {
7142     os::sleep(Thread::current(), 1, false);
7143     ConcurrentMarkSweepThread::acknowledge_yield_request();
7144   }
7145 
7146   ConcurrentMarkSweepThread::synchronize(true);
7147   _freelistLock->lock_without_safepoint_check();
7148   _bitMap->lock()->lock_without_safepoint_check();
7149   _collector->startTimer();
7150 }
7151 
7152 
7153 //////////////////////////////////////////////////////////////////
7154 // SurvivorSpacePrecleanClosure
7155 //////////////////////////////////////////////////////////////////
7156 // This (single-threaded) closure is used to preclean the oops in
7157 // the survivor spaces.
7158 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7159 
7160   HeapWord* addr = (HeapWord*)p;
7161   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7162   assert(!_span.contains(addr), "we are scanning the survivor spaces");
7163   assert(p->klass_or_null() != NULL, "object should be initialized");


7183     // iterate over the oops in this oop, marking and pushing
7184     // the ones in CMS heap (i.e. in _span).
7185     new_oop->oop_iterate(_scanning_closure);
7186     // check if it's time to yield
7187     do_yield_check();
7188   }
7189   unsigned int after_count =
7190     GenCollectedHeap::heap()->total_collections();
7191   bool abort = (_before_count != after_count) ||
7192                _collector->should_abort_preclean();
7193   return abort ? 0 : size;
7194 }
7195 
7196 void SurvivorSpacePrecleanClosure::do_yield_work() {
7197   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7198          "CMS thread should hold CMS token");
7199   assert_lock_strong(_bit_map->lock());
7200   // Relinquish the bit map lock
7201   _bit_map->lock()->unlock();
7202   ConcurrentMarkSweepThread::desynchronize(true);
7203   ConcurrentMarkSweepThread::acknowledge_yield_request();
7204   _collector->stopTimer();
7205   if (PrintCMSStatistics != 0) {
7206     _collector->incrementYields();
7207   }
7208   _collector->icms_wait();
7209 
7210   // See the comment in coordinator_yield()
7211   for (unsigned i = 0; i < CMSYieldSleepCount &&
7212                        ConcurrentMarkSweepThread::should_yield() &&
7213                        !CMSCollector::foregroundGCIsActive(); ++i) {
7214     os::sleep(Thread::current(), 1, false);
7215     ConcurrentMarkSweepThread::acknowledge_yield_request();
7216   }
7217 
7218   ConcurrentMarkSweepThread::synchronize(true);
7219   _bit_map->lock()->lock_without_safepoint_check();
7220   _collector->startTimer();
7221 }
7222 
7223 // This closure is used to rescan the marked objects on the dirty cards
7224 // in the mod union table and the card table proper. In the parallel
7225 // case, although the bitMap is shared, we do a single read so the
7226 // isMarked() query is "safe".
7227 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7228   // Ignore mark word because we are running concurrent with mutators
7229   assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
7230   HeapWord* addr = (HeapWord*)p;
7231   assert(_span.contains(addr), "we are scanning the CMS generation");
7232   bool is_obj_array = false;
7233   #ifdef ASSERT
7234     if (!_parallel) {
7235       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");


7341       DEBUG_ONLY(})
7342       return true;
7343     }
7344   }
7345   scanOopsInOop(addr);
7346   return true;
7347 }
7348 
7349 // We take a break if we've been at this for a while,
7350 // so as to avoid monopolizing the locks involved.
7351 void MarkFromRootsClosure::do_yield_work() {
7352   // First give up the locks, then yield, then re-lock
7353   // We should probably use a constructor/destructor idiom to
7354   // do this unlock/lock or modify the MutexUnlocker class to
7355   // serve our purpose. XXX
7356   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7357          "CMS thread should hold CMS token");
7358   assert_lock_strong(_bitMap->lock());
7359   _bitMap->lock()->unlock();
7360   ConcurrentMarkSweepThread::desynchronize(true);
7361   ConcurrentMarkSweepThread::acknowledge_yield_request();
7362   _collector->stopTimer();
7363   if (PrintCMSStatistics != 0) {
7364     _collector->incrementYields();
7365   }
7366   _collector->icms_wait();
7367 
7368   // See the comment in coordinator_yield()
7369   for (unsigned i = 0; i < CMSYieldSleepCount &&
7370                        ConcurrentMarkSweepThread::should_yield() &&
7371                        !CMSCollector::foregroundGCIsActive(); ++i) {
7372     os::sleep(Thread::current(), 1, false);
7373     ConcurrentMarkSweepThread::acknowledge_yield_request();
7374   }
7375 
7376   ConcurrentMarkSweepThread::synchronize(true);
7377   _bitMap->lock()->lock_without_safepoint_check();
7378   _collector->startTimer();
7379 }
7380 
7381 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7382   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7383   assert(_markStack->isEmpty(),
7384          "should drain stack to limit stack usage");
7385   // convert ptr to an oop preparatory to scanning
7386   oop obj = oop(ptr);
7387   // Ignore mark word in verification below, since we
7388   // may be running concurrent with mutators.
7389   assert(obj->is_oop(true), "should be an oop");
7390   assert(_finger <= ptr, "_finger runneth ahead");
7391   // advance the finger to right end of this object
7392   _finger = ptr + obj->size();
7393   assert(_finger > ptr, "we just incremented it above");
7394   // On large heaps, it may take us some time to get through
7395   // the marking phase (especially if running iCMS). During
7396   // this time it's possible that a lot of mutations have
7397   // accumulated in the card table and the mod union table --
7398   // these mutation records are redundant until we have
7399   // actually traced into the corresponding card.
7400   // Here, we check whether advancing the finger would make
7401   // us cross into a new card, and if so clear corresponding
7402   // cards in the MUT (preclean them in the card-table in the
7403   // future).
7404 
7405   DEBUG_ONLY(if (!_verifying) {)
7406     // The clean-on-enter optimization is disabled by default,
7407     // until we fix 6178663.
7408     if (CMSCleanOnEnter && (_finger > _threshold)) {
7409       // [_threshold, _finger) represents the interval
7410       // of cards to be cleared  in MUT (or precleaned in card table).
7411       // The set of cards to be cleared is all those that overlap
7412       // with the interval [_threshold, _finger); note that
7413       // _threshold is always kept card-aligned but _finger isn't
7414       // always card-aligned.
7415       HeapWord* old_threshold = _threshold;


7492   scan_oops_in_oop(addr);
7493   return true;
7494 }
7495 
7496 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7497   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7498   // Should we assert that our work queue is empty or
7499   // below some drain limit?
7500   assert(_work_queue->size() == 0,
7501          "should drain stack to limit stack usage");
7502   // convert ptr to an oop preparatory to scanning
7503   oop obj = oop(ptr);
7504   // Ignore mark word in verification below, since we
7505   // may be running concurrent with mutators.
7506   assert(obj->is_oop(true), "should be an oop");
7507   assert(_finger <= ptr, "_finger runneth ahead");
7508   // advance the finger to right end of this object
7509   _finger = ptr + obj->size();
7510   assert(_finger > ptr, "we just incremented it above");
7511   // On large heaps, it may take us some time to get through
7512   // the marking phase (especially if running iCMS). During
7513   // this time it's possible that a lot of mutations have
7514   // accumulated in the card table and the mod union table --
7515   // these mutation records are redundant until we have
7516   // actually traced into the corresponding card.
7517   // Here, we check whether advancing the finger would make
7518   // us cross into a new card, and if so clear corresponding
7519   // cards in the MUT (preclean them in the card-table in the
7520   // future).
7521 
7522   // The clean-on-enter optimization is disabled by default,
7523   // until we fix 6178663.
7524   if (CMSCleanOnEnter && (_finger > _threshold)) {
7525     // [_threshold, _finger) represents the interval
7526     // of cards to be cleared  in MUT (or precleaned in card table).
7527     // The set of cards to be cleared is all those that overlap
7528     // with the interval [_threshold, _finger); note that
7529     // _threshold is always kept card-aligned but _finger isn't
7530     // always card-aligned.
7531     HeapWord* old_threshold = _threshold;
7532     assert(old_threshold == (HeapWord*)round_to(


7981       if (simulate_overflow || !_work_queue->push(obj)) {
7982         _collector->par_push_on_overflow_list(obj);
7983         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7984       }
7985     } // Else, some other thread got there first
7986   }
7987 }
7988 
7989 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7990 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7991 
7992 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7993   Mutex* bml = _collector->bitMapLock();
7994   assert_lock_strong(bml);
7995   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7996          "CMS thread should hold CMS token");
7997 
7998   bml->unlock();
7999   ConcurrentMarkSweepThread::desynchronize(true);
8000 
8001   ConcurrentMarkSweepThread::acknowledge_yield_request();
8002 
8003   _collector->stopTimer();
8004   if (PrintCMSStatistics != 0) {
8005     _collector->incrementYields();
8006   }
8007   _collector->icms_wait();
8008 
8009   // See the comment in coordinator_yield()
8010   for (unsigned i = 0; i < CMSYieldSleepCount &&
8011                        ConcurrentMarkSweepThread::should_yield() &&
8012                        !CMSCollector::foregroundGCIsActive(); ++i) {
8013     os::sleep(Thread::current(), 1, false);
8014     ConcurrentMarkSweepThread::acknowledge_yield_request();
8015   }
8016 
8017   ConcurrentMarkSweepThread::synchronize(true);
8018   bml->lock();
8019 
8020   _collector->startTimer();
8021 }
8022 
8023 bool CMSPrecleanRefsYieldClosure::should_return() {
8024   if (ConcurrentMarkSweepThread::should_yield()) {
8025     do_yield_work();
8026   }
8027   return _collector->foregroundGCIsActive();
8028 }
8029 
8030 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8031   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8032          "mr should be aligned to start at a card boundary");
8033   // We'd like to assert:
8034   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,


8662   // to the appropriate freelist.  After yielding, the next
8663   // free block encountered will start a coalescing range of
8664   // free blocks.  If the next free block is adjacent to the
8665   // chunk just flushed, they will need to wait for the next
8666   // sweep to be coalesced.
8667   if (inFreeRange()) {
8668     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8669   }
8670 
8671   // First give up the locks, then yield, then re-lock.
8672   // We should probably use a constructor/destructor idiom to
8673   // do this unlock/lock or modify the MutexUnlocker class to
8674   // serve our purpose. XXX
8675   assert_lock_strong(_bitMap->lock());
8676   assert_lock_strong(_freelistLock);
8677   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8678          "CMS thread should hold CMS token");
8679   _bitMap->lock()->unlock();
8680   _freelistLock->unlock();
8681   ConcurrentMarkSweepThread::desynchronize(true);
8682   ConcurrentMarkSweepThread::acknowledge_yield_request();
8683   _collector->stopTimer();
8684   if (PrintCMSStatistics != 0) {
8685     _collector->incrementYields();
8686   }
8687   _collector->icms_wait();
8688 
8689   // See the comment in coordinator_yield()
8690   for (unsigned i = 0; i < CMSYieldSleepCount &&
8691                        ConcurrentMarkSweepThread::should_yield() &&
8692                        !CMSCollector::foregroundGCIsActive(); ++i) {
8693     os::sleep(Thread::current(), 1, false);
8694     ConcurrentMarkSweepThread::acknowledge_yield_request();
8695   }
8696 
8697   ConcurrentMarkSweepThread::synchronize(true);
8698   _freelistLock->lock();
8699   _bitMap->lock()->lock_without_safepoint_check();
8700   _collector->startTimer();
8701 }
8702 
8703 #ifndef PRODUCT
8704 // This is actually very useful in a product build if it can
8705 // be called from the debugger.  Compile it into the product
8706 // as needed.
8707 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8708   return debug_cms_space->verify_chunk_in_free_list(fc);
8709 }
8710 #endif
8711 
8712 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8713   if (CMSTraceSweeper) {
8714     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",




 150 };
 151 
 152 // Convenience class that does a CMSTokenSync, and then acquires
 153 // upto three locks.
 154 class CMSTokenSyncWithLocks: public CMSTokenSync {
 155  private:
 156   // Note: locks are acquired in textual declaration order
 157   // and released in the opposite order
 158   MutexLockerEx _locker1, _locker2, _locker3;
 159  public:
 160   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 161                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 162     CMSTokenSync(is_cms_thread),
 163     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 164     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 165     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 166   { }
 167 };
 168 
 169 










 170 //////////////////////////////////////////////////////////////////
 171 //  Concurrent Mark-Sweep Generation /////////////////////////////
 172 //////////////////////////////////////////////////////////////////
 173 
 174 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 175 
 176 // This struct contains per-thread things necessary to support parallel
 177 // young-gen collection.
 178 class CMSParGCThreadState: public CHeapObj<mtGC> {
 179  public:
 180   CFLS_LAB lab;
 181   PromotionInfo promo;
 182 
 183   // Constructor.
 184   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 185     promo.setSpace(cfls);
 186   }
 187 };
 188 
 189 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(


 336   _saved_alpha = alpha;
 337 
 338   // Initialize the alphas to the bootstrap value of 100.
 339   _gc0_alpha = _cms_alpha = 100;
 340 
 341   _cms_begin_time.update();
 342   _cms_end_time.update();
 343 
 344   _gc0_duration = 0.0;
 345   _gc0_period = 0.0;
 346   _gc0_promoted = 0;
 347 
 348   _cms_duration = 0.0;
 349   _cms_period = 0.0;
 350   _cms_allocated = 0;
 351 
 352   _cms_used_at_gc0_begin = 0;
 353   _cms_used_at_gc0_end = 0;
 354   _allow_duty_cycle_reduction = false;
 355   _valid_bits = 0;

 356 }
 357 
 358 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 359   // TBD: CR 6909490
 360   return 1.0;
 361 }
 362 
 363 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 364 }
 365 
 366 // If promotion failure handling is on use
 367 // the padded average size of the promotion for each
 368 // young generation collection.
 369 double CMSStats::time_until_cms_gen_full() const {
 370   size_t cms_free = _cms_gen->cmsSpace()->free();
 371   GenCollectedHeap* gch = GenCollectedHeap::heap();
 372   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 373                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 374   if (cms_free > expected_promotion) {
 375     // Start a cms collection if there isn't enough space to promote


 414   // account for that much possible delay
 415   // in the query so as to avoid concurrent mode failures
 416   // due to starting the collection just a wee bit too
 417   // late.
 418   double work = cms_duration() + gc0_period();
 419   double deadline = time_until_cms_gen_full();
 420   // If a concurrent mode failure occurred recently, we want to be
 421   // more conservative and halve our expected time_until_cms_gen_full()
 422   if (work > deadline) {
 423     if (Verbose && PrintGCDetails) {
 424       gclog_or_tty->print(
 425         " CMSCollector: collect because of anticipated promotion "
 426         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 427         gc0_period(), time_until_cms_gen_full());
 428     }
 429     return 0.0;
 430   }
 431   return work - deadline;
 432 }
 433 

































































 434 #ifndef PRODUCT
 435 void CMSStats::print_on(outputStream *st) const {
 436   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 437   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 438                gc0_duration(), gc0_period(), gc0_promoted());
 439   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 440             cms_duration(), cms_duration_per_mb(),
 441             cms_period(), cms_allocated());
 442   st->print(",cms_since_beg=%g,cms_since_end=%g",
 443             cms_time_since_begin(), cms_time_since_end());
 444   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 445             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);



 446 
 447   if (valid()) {
 448     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 449               promotion_rate(), cms_allocation_rate());
 450     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 451               cms_consumption_rate(), time_until_cms_gen_full());
 452   }
 453   st->print(" ");
 454 }
 455 #endif // #ifndef PRODUCT
 456 
 457 CMSCollector::CollectorState CMSCollector::_collectorState =
 458                              CMSCollector::Idling;
 459 bool CMSCollector::_foregroundGCIsActive = false;
 460 bool CMSCollector::_foregroundGCShouldWait = false;
 461 
 462 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 463                            CardTableRS*                   ct,
 464                            ConcurrentMarkSweepPolicy*     cp):
 465   _cmsGen(cmsGen),


 483   _stats(cmsGen),
 484   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
 485   _eden_chunk_array(NULL),     // may be set in ctor body
 486   _eden_chunk_capacity(0),     // -- ditto --
 487   _eden_chunk_index(0),        // -- ditto --
 488   _survivor_plab_array(NULL),  // -- ditto --
 489   _survivor_chunk_array(NULL), // -- ditto --
 490   _survivor_chunk_capacity(0), // -- ditto --
 491   _survivor_chunk_index(0),    // -- ditto --
 492   _ser_pmc_preclean_ovflw(0),
 493   _ser_kac_preclean_ovflw(0),
 494   _ser_pmc_remark_ovflw(0),
 495   _par_pmc_remark_ovflw(0),
 496   _ser_kac_ovflw(0),
 497   _par_kac_ovflw(0),
 498 #ifndef PRODUCT
 499   _num_par_pushes(0),
 500 #endif
 501   _collection_count_start(0),
 502   _verifying(false),


 503   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 504   _completed_initialization(false),
 505   _collector_policy(cp),
 506   _should_unload_classes(CMSClassUnloadingEnabled),
 507   _concurrent_cycles_since_last_unload(0),
 508   _roots_scanning_options(SharedHeap::SO_None),
 509   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 510   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 511   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 512   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 513   _cms_start_registered(false)
 514 {
 515   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 516     ExplicitGCInvokesConcurrent = true;
 517   }
 518   // Now expand the span and allocate the collection support structures
 519   // (MUT, marking bit map etc.) to cover both generations subject to
 520   // collection.
 521 
 522   // For use by dirty card to oop closures.


1026         } else {
1027           _modUnionTable.mark_range(mr);
1028         }
1029       } else {  // not an obj array; we can just mark the head
1030         if (par) {
1031           _modUnionTable.par_mark(start);
1032         } else {
1033           _modUnionTable.mark(start);
1034         }
1035       }
1036     }
1037   }
1038 }
1039 
1040 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1041 {
1042   size_t delta = pointer_delta(addr, space->bottom());
1043   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1044 }
1045 





























































































































1046 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1047   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1048   // allocate, copy and if necessary update promoinfo --
1049   // delegate to underlying space.
1050   assert_lock_strong(freelistLock());
1051 
1052 #ifndef PRODUCT
1053   if (Universe::heap()->promotion_should_fail()) {
1054     return NULL;
1055   }
1056 #endif  // #ifndef PRODUCT
1057 
1058   oop res = _cmsSpace->promote(obj, obj_size);
1059   if (res == NULL) {
1060     // expand and retry
1061     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1062     expand(s*HeapWordSize, MinHeapDeltaBytes,
1063       CMSExpansionCause::_satisfy_promotion);
1064     // Since there's currently no next generation, we don't try to promote
1065     // into a more senior generation.


1068                                "promotion to next generation");
1069     res = _cmsSpace->promote(obj, obj_size);
1070   }
1071   if (res != NULL) {
1072     // See comment in allocate() about when objects should
1073     // be allocated live.
1074     assert(obj->is_oop(), "Will dereference klass pointer below");
1075     collector()->promoted(false,           // Not parallel
1076                           (HeapWord*)res, obj->is_objArray(), obj_size);
1077     // promotion counters
1078     NOT_PRODUCT(
1079       _numObjectsPromoted++;
1080       _numWordsPromoted +=
1081         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1082     )
1083   }
1084   return res;
1085 }
1086 
1087 








1088 // IMPORTANT: Notes on object size recognition in CMS.
1089 // ---------------------------------------------------
1090 // A block of storage in the CMS generation is always in
1091 // one of three states. A free block (FREE), an allocated
1092 // object (OBJECT) whose size() method reports the correct size,
1093 // and an intermediate state (TRANSIENT) in which its size cannot
1094 // be accurately determined.
1095 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1096 // -----------------------------------------------------
1097 // FREE:      klass_word & 1 == 1; mark_word holds block size
1098 //
1099 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1100 //            obj->size() computes correct size
1101 //
1102 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1103 //
1104 // STATE IDENTIFICATION: (64 bit+COOPS)
1105 // ------------------------------------
1106 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1107 //


1580 //
1581 
1582 void CMSCollector::acquire_control_and_collect(bool full,
1583         bool clear_all_soft_refs) {
1584   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1585   assert(!Thread::current()->is_ConcurrentGC_thread(),
1586          "shouldn't try to acquire control from self!");
1587 
1588   // Start the protocol for acquiring control of the
1589   // collection from the background collector (aka CMS thread).
1590   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1591          "VM thread should have CMS token");
1592   // Remember the possibly interrupted state of an ongoing
1593   // concurrent collection
1594   CollectorState first_state = _collectorState;
1595 
1596   // Signal to a possibly ongoing concurrent collection that
1597   // we want to do a foreground collection.
1598   _foregroundGCIsActive = true;
1599 



1600   // release locks and wait for a notify from the background collector
1601   // releasing the locks in only necessary for phases which
1602   // do yields to improve the granularity of the collection.
1603   assert_lock_strong(bitMapLock());
1604   // We need to lock the Free list lock for the space that we are
1605   // currently collecting.
1606   assert(haveFreelistLocks(), "Must be holding free list locks");
1607   bitMapLock()->unlock();
1608   releaseFreelistLocks();
1609   {
1610     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1611     if (_foregroundGCShouldWait) {
1612       // We are going to be waiting for action for the CMS thread;
1613       // it had better not be gone (for instance at shutdown)!
1614       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1615              "CMS thread must be running");
1616       // Wait here until the background collector gives us the go-ahead
1617       ConcurrentMarkSweepThread::clear_CMS_flag(
1618         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1619       // Get a possibly blocked CMS thread going:


1903       }
1904       // If first_state was not Idling, then a background GC
1905       // was in progress and has now finished.  No need to do it
1906       // again.  Leave the state as Idling.
1907       break;
1908     case Precleaning:
1909       // In the foreground case don't do the precleaning since
1910       // it is not done concurrently and there is extra work
1911       // required.
1912       _collectorState = FinalMarking;
1913   }
1914   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
1915 
1916   // For a mark-sweep, compute_new_size() will be called
1917   // in the heap's do_collection() method.
1918 }
1919 
1920 
1921 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1922   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
1923   ContiguousSpace* eden_space = dng->eden();
1924   ContiguousSpace* from_space = dng->from();
1925   ContiguousSpace* to_space   = dng->to();
1926   // Eden
1927   if (_eden_chunk_array != NULL) {
1928     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1929                            eden_space->bottom(), eden_space->top(),
1930                            eden_space->end(), eden_space->capacity());
1931     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1932                            "_eden_chunk_capacity=" SIZE_FORMAT,
1933                            _eden_chunk_index, _eden_chunk_capacity);
1934     for (size_t i = 0; i < _eden_chunk_index; i++) {
1935       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1936                              i, _eden_chunk_array[i]);
1937     }
1938   }
1939   // Survivor
1940   if (_survivor_chunk_array != NULL) {
1941     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1942                            from_space->bottom(), from_space->top(),
1943                            from_space->end(), from_space->capacity());


2551 
2552   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2553 
2554   _cmsGen->gc_epilogue_work(full);
2555 
2556   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2557     // in case sampling was not already enabled, enable it
2558     _start_sampling = true;
2559   }
2560   // reset _eden_chunk_array so sampling starts afresh
2561   _eden_chunk_index = 0;
2562 
2563   size_t cms_used   = _cmsGen->cmsSpace()->used();
2564 
2565   // update performance counters - this uses a special version of
2566   // update_counters() that allows the utilization to be passed as a
2567   // parameter, avoiding multiple calls to used().
2568   //
2569   _cmsGen->update_counters(cms_used);
2570 




2571   bitMapLock()->unlock();
2572   releaseFreelistLocks();
2573 
2574   if (!CleanChunkPoolAsync) {
2575     Chunk::clean_chunk_pool();
2576   }
2577 
2578   set_did_compact(false);
2579   _between_prologue_and_epilogue = false;  // ready for next cycle
2580 }
2581 
2582 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2583   collector()->gc_epilogue(full);
2584 
2585   // Also reset promotion tracking in par gc thread states.
2586   if (CollectedHeap::use_parallel_gc_threads()) {
2587     for (uint i = 0; i < ParallelGCThreads; i++) {
2588       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2589     }
2590   }


4038     } else if (terminator()->offer_termination(&_term_term)) {
4039       assert(work_q->size() == 0, "Impossible!");
4040       break;
4041     } else if (yielding() || should_yield()) {
4042       yield();
4043     }
4044   }
4045 }
4046 
4047 // This is run by the CMS (coordinator) thread.
4048 void CMSConcMarkingTask::coordinator_yield() {
4049   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4050          "CMS thread should hold CMS token");
4051   // First give up the locks, then yield, then re-lock
4052   // We should probably use a constructor/destructor idiom to
4053   // do this unlock/lock or modify the MutexUnlocker class to
4054   // serve our purpose. XXX
4055   assert_lock_strong(_bit_map_lock);
4056   _bit_map_lock->unlock();
4057   ConcurrentMarkSweepThread::desynchronize(true);

4058   _collector->stopTimer();
4059   if (PrintCMSStatistics != 0) {
4060     _collector->incrementYields();
4061   }

4062 
4063   // It is possible for whichever thread initiated the yield request
4064   // not to get a chance to wake up and take the bitmap lock between
4065   // this thread releasing it and reacquiring it. So, while the
4066   // should_yield() flag is on, let's sleep for a bit to give the
4067   // other thread a chance to wake up. The limit imposed on the number
4068   // of iterations is defensive, to avoid any unforseen circumstances
4069   // putting us into an infinite loop. Since it's always been this
4070   // (coordinator_yield()) method that was observed to cause the
4071   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4072   // which is by default non-zero. For the other seven methods that
4073   // also perform the yield operation, as are using a different
4074   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4075   // can enable the sleeping for those methods too, if necessary.
4076   // See 6442774.
4077   //
4078   // We really need to reconsider the synchronization between the GC
4079   // thread and the yield-requesting threads in the future and we
4080   // should really use wait/notify, which is the recommended
4081   // way of doing this type of interaction. Additionally, we should
4082   // consolidate the eight methods that do the yield operation and they
4083   // are almost identical into one for better maintainability and
4084   // readability. See 6445193.
4085   //
4086   // Tony 2006.06.29
4087   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4088                    ConcurrentMarkSweepThread::should_yield() &&
4089                    !CMSCollector::foregroundGCIsActive(); ++i) {
4090     os::sleep(Thread::current(), 1, false);

4091   }
4092 
4093   ConcurrentMarkSweepThread::synchronize(true);
4094   _bit_map_lock->lock_without_safepoint_check();
4095   _collector->startTimer();
4096 }
4097 
4098 bool CMSCollector::do_marking_mt(bool asynch) {
4099   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4100   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4101                                        conc_workers()->total_workers(),
4102                                        conc_workers()->active_workers(),
4103                                        Threads::number_of_non_daemon_threads());
4104   conc_workers()->set_active_workers(num_workers);
4105 
4106   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4107 
4108   CMSConcMarkingTask tsk(this,
4109                          cms_space,
4110                          asynch,


5001     // Check if we have modified any oops in the Klass during the concurrent marking.
5002     if (k->has_accumulated_modified_oops()) {
5003       k->clear_accumulated_modified_oops();
5004 
5005       // We could have transfered the current modified marks to the accumulated marks,
5006       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5007     } else if (k->has_modified_oops()) {
5008       // Don't clear anything, this info is needed by the next young collection.
5009     } else {
5010       // No modified oops in the Klass.
5011       return;
5012     }
5013 
5014     // The klass has modified fields, need to scan the klass.
5015     _cm_klass_closure.do_klass(k);
5016   }
5017 };
5018 
5019 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5020   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5021   ContiguousSpace* eden_space = dng->eden();
5022   ContiguousSpace* from_space = dng->from();
5023   ContiguousSpace* to_space   = dng->to();
5024 
5025   HeapWord** eca = _collector->_eden_chunk_array;
5026   size_t     ect = _collector->_eden_chunk_index;
5027   HeapWord** sca = _collector->_survivor_chunk_array;
5028   size_t     sct = _collector->_survivor_chunk_index;
5029 
5030   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5031   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5032 
5033   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5034   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5035   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5036 }
5037 
5038 // work_queue(i) is passed to the closure
5039 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5040 // also is passed to do_dirty_card_rescan_tasks() and to
5041 // do_work_steal() to select the i-th task_queue.


6237     }
6238 
6239     // Clear the mark bitmap (no grey objects to start with)
6240     // for the next cycle.
6241     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6242     CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6243 
6244     HeapWord* curAddr = _markBitMap.startWord();
6245     while (curAddr < _markBitMap.endWord()) {
6246       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6247       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6248       _markBitMap.clear_large_range(chunk);
6249       if (ConcurrentMarkSweepThread::should_yield() &&
6250           !foregroundGCIsActive() &&
6251           CMSYield) {
6252         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6253                "CMS thread should hold CMS token");
6254         assert_lock_strong(bitMapLock());
6255         bitMapLock()->unlock();
6256         ConcurrentMarkSweepThread::desynchronize(true);

6257         stopTimer();
6258         if (PrintCMSStatistics != 0) {
6259           incrementYields();
6260         }

6261 
6262         // See the comment in coordinator_yield()
6263         for (unsigned i = 0; i < CMSYieldSleepCount &&
6264                          ConcurrentMarkSweepThread::should_yield() &&
6265                          !CMSCollector::foregroundGCIsActive(); ++i) {
6266           os::sleep(Thread::current(), 1, false);

6267         }
6268 
6269         ConcurrentMarkSweepThread::synchronize(true);
6270         bitMapLock()->lock_without_safepoint_check();
6271         startTimer();
6272       }
6273       curAddr = chunk.end();
6274     }
6275     // A successful mostly concurrent collection has been done.
6276     // Because only the full (i.e., concurrent mode failure) collections
6277     // are being measured for gc overhead limits, clean the "near" flag
6278     // and count.
6279     size_policy()->reset_gc_overhead_limit_count();
6280     _collectorState = Idling;
6281   } else {
6282     // already have the lock
6283     assert(_collectorState == Resetting, "just checking");
6284     assert_lock_strong(bitMapLock());
6285     _markBitMap.clear_all();
6286     _collectorState = Idling;
6287   }
6288 




6289   NOT_PRODUCT(
6290     if (RotateCMSCollectionTypes) {
6291       _cmsGen->rotate_debug_collection_type();
6292     }
6293   )
6294 
6295   register_gc_end();
6296 }
6297 
6298 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6299   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6300   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6301   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
6302   TraceCollectorStats tcs(counters());
6303 
6304   switch (op) {
6305     case CMS_op_checkpointRootsInitial: {
6306       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6307       checkpointRootsInitial(true);       // asynch
6308       if (PrintGC) {


6720       _collector->restore_preserved_marks_if_any();
6721       assert(_collector->no_preserved_marks(), "No preserved marks");
6722     }
6723     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6724            "All preserved marks should have been restored above");
6725   }
6726 }
6727 
6728 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6729 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6730 
6731 void MarkRefsIntoAndScanClosure::do_yield_work() {
6732   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6733          "CMS thread should hold CMS token");
6734   assert_lock_strong(_freelistLock);
6735   assert_lock_strong(_bit_map->lock());
6736   // relinquish the free_list_lock and bitMaplock()
6737   _bit_map->lock()->unlock();
6738   _freelistLock->unlock();
6739   ConcurrentMarkSweepThread::desynchronize(true);

6740   _collector->stopTimer();
6741   if (PrintCMSStatistics != 0) {
6742     _collector->incrementYields();
6743   }

6744 
6745   // See the comment in coordinator_yield()
6746   for (unsigned i = 0;
6747        i < CMSYieldSleepCount &&
6748        ConcurrentMarkSweepThread::should_yield() &&
6749        !CMSCollector::foregroundGCIsActive();
6750        ++i) {
6751     os::sleep(Thread::current(), 1, false);

6752   }
6753 
6754   ConcurrentMarkSweepThread::synchronize(true);
6755   _freelistLock->lock_without_safepoint_check();
6756   _bit_map->lock()->lock_without_safepoint_check();
6757   _collector->startTimer();
6758 }
6759 
6760 ///////////////////////////////////////////////////////////
6761 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6762 //                                 MarkRefsIntoAndScanClosure
6763 ///////////////////////////////////////////////////////////
6764 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6765   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6766   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6767   _span(span),
6768   _bit_map(bit_map),
6769   _work_queue(work_queue),
6770   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6771                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),


6877     } else {
6878       // An object not (yet) reached by marking: we merely need to
6879       // compute its size so as to go look at the next block.
6880       assert(p->is_oop(true), "should be an oop");
6881       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6882     }
6883   }
6884   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6885   return size;
6886 }
6887 
6888 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6889   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6890          "CMS thread should hold CMS token");
6891   assert_lock_strong(_freelistLock);
6892   assert_lock_strong(_bitMap->lock());
6893   // relinquish the free_list_lock and bitMaplock()
6894   _bitMap->lock()->unlock();
6895   _freelistLock->unlock();
6896   ConcurrentMarkSweepThread::desynchronize(true);

6897   _collector->stopTimer();
6898   if (PrintCMSStatistics != 0) {
6899     _collector->incrementYields();
6900   }

6901 
6902   // See the comment in coordinator_yield()
6903   for (unsigned i = 0; i < CMSYieldSleepCount &&
6904                    ConcurrentMarkSweepThread::should_yield() &&
6905                    !CMSCollector::foregroundGCIsActive(); ++i) {
6906     os::sleep(Thread::current(), 1, false);

6907   }
6908 
6909   ConcurrentMarkSweepThread::synchronize(true);
6910   _freelistLock->lock_without_safepoint_check();
6911   _bitMap->lock()->lock_without_safepoint_check();
6912   _collector->startTimer();
6913 }
6914 
6915 
6916 //////////////////////////////////////////////////////////////////
6917 // SurvivorSpacePrecleanClosure
6918 //////////////////////////////////////////////////////////////////
6919 // This (single-threaded) closure is used to preclean the oops in
6920 // the survivor spaces.
6921 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6922 
6923   HeapWord* addr = (HeapWord*)p;
6924   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6925   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6926   assert(p->klass_or_null() != NULL, "object should be initialized");


6946     // iterate over the oops in this oop, marking and pushing
6947     // the ones in CMS heap (i.e. in _span).
6948     new_oop->oop_iterate(_scanning_closure);
6949     // check if it's time to yield
6950     do_yield_check();
6951   }
6952   unsigned int after_count =
6953     GenCollectedHeap::heap()->total_collections();
6954   bool abort = (_before_count != after_count) ||
6955                _collector->should_abort_preclean();
6956   return abort ? 0 : size;
6957 }
6958 
6959 void SurvivorSpacePrecleanClosure::do_yield_work() {
6960   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6961          "CMS thread should hold CMS token");
6962   assert_lock_strong(_bit_map->lock());
6963   // Relinquish the bit map lock
6964   _bit_map->lock()->unlock();
6965   ConcurrentMarkSweepThread::desynchronize(true);

6966   _collector->stopTimer();
6967   if (PrintCMSStatistics != 0) {
6968     _collector->incrementYields();
6969   }

6970 
6971   // See the comment in coordinator_yield()
6972   for (unsigned i = 0; i < CMSYieldSleepCount &&
6973                        ConcurrentMarkSweepThread::should_yield() &&
6974                        !CMSCollector::foregroundGCIsActive(); ++i) {
6975     os::sleep(Thread::current(), 1, false);

6976   }
6977 
6978   ConcurrentMarkSweepThread::synchronize(true);
6979   _bit_map->lock()->lock_without_safepoint_check();
6980   _collector->startTimer();
6981 }
6982 
6983 // This closure is used to rescan the marked objects on the dirty cards
6984 // in the mod union table and the card table proper. In the parallel
6985 // case, although the bitMap is shared, we do a single read so the
6986 // isMarked() query is "safe".
6987 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6988   // Ignore mark word because we are running concurrent with mutators
6989   assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
6990   HeapWord* addr = (HeapWord*)p;
6991   assert(_span.contains(addr), "we are scanning the CMS generation");
6992   bool is_obj_array = false;
6993   #ifdef ASSERT
6994     if (!_parallel) {
6995       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");


7101       DEBUG_ONLY(})
7102       return true;
7103     }
7104   }
7105   scanOopsInOop(addr);
7106   return true;
7107 }
7108 
7109 // We take a break if we've been at this for a while,
7110 // so as to avoid monopolizing the locks involved.
7111 void MarkFromRootsClosure::do_yield_work() {
7112   // First give up the locks, then yield, then re-lock
7113   // We should probably use a constructor/destructor idiom to
7114   // do this unlock/lock or modify the MutexUnlocker class to
7115   // serve our purpose. XXX
7116   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7117          "CMS thread should hold CMS token");
7118   assert_lock_strong(_bitMap->lock());
7119   _bitMap->lock()->unlock();
7120   ConcurrentMarkSweepThread::desynchronize(true);

7121   _collector->stopTimer();
7122   if (PrintCMSStatistics != 0) {
7123     _collector->incrementYields();
7124   }

7125 
7126   // See the comment in coordinator_yield()
7127   for (unsigned i = 0; i < CMSYieldSleepCount &&
7128                        ConcurrentMarkSweepThread::should_yield() &&
7129                        !CMSCollector::foregroundGCIsActive(); ++i) {
7130     os::sleep(Thread::current(), 1, false);

7131   }
7132 
7133   ConcurrentMarkSweepThread::synchronize(true);
7134   _bitMap->lock()->lock_without_safepoint_check();
7135   _collector->startTimer();
7136 }
7137 
7138 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7139   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7140   assert(_markStack->isEmpty(),
7141          "should drain stack to limit stack usage");
7142   // convert ptr to an oop preparatory to scanning
7143   oop obj = oop(ptr);
7144   // Ignore mark word in verification below, since we
7145   // may be running concurrent with mutators.
7146   assert(obj->is_oop(true), "should be an oop");
7147   assert(_finger <= ptr, "_finger runneth ahead");
7148   // advance the finger to right end of this object
7149   _finger = ptr + obj->size();
7150   assert(_finger > ptr, "we just incremented it above");
7151   // On large heaps, it may take us some time to get through
7152   // the marking phase. During
7153   // this time it's possible that a lot of mutations have
7154   // accumulated in the card table and the mod union table --
7155   // these mutation records are redundant until we have
7156   // actually traced into the corresponding card.
7157   // Here, we check whether advancing the finger would make
7158   // us cross into a new card, and if so clear corresponding
7159   // cards in the MUT (preclean them in the card-table in the
7160   // future).
7161 
7162   DEBUG_ONLY(if (!_verifying) {)
7163     // The clean-on-enter optimization is disabled by default,
7164     // until we fix 6178663.
7165     if (CMSCleanOnEnter && (_finger > _threshold)) {
7166       // [_threshold, _finger) represents the interval
7167       // of cards to be cleared  in MUT (or precleaned in card table).
7168       // The set of cards to be cleared is all those that overlap
7169       // with the interval [_threshold, _finger); note that
7170       // _threshold is always kept card-aligned but _finger isn't
7171       // always card-aligned.
7172       HeapWord* old_threshold = _threshold;


7249   scan_oops_in_oop(addr);
7250   return true;
7251 }
7252 
7253 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7254   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7255   // Should we assert that our work queue is empty or
7256   // below some drain limit?
7257   assert(_work_queue->size() == 0,
7258          "should drain stack to limit stack usage");
7259   // convert ptr to an oop preparatory to scanning
7260   oop obj = oop(ptr);
7261   // Ignore mark word in verification below, since we
7262   // may be running concurrent with mutators.
7263   assert(obj->is_oop(true), "should be an oop");
7264   assert(_finger <= ptr, "_finger runneth ahead");
7265   // advance the finger to right end of this object
7266   _finger = ptr + obj->size();
7267   assert(_finger > ptr, "we just incremented it above");
7268   // On large heaps, it may take us some time to get through
7269   // the marking phase. During
7270   // this time it's possible that a lot of mutations have
7271   // accumulated in the card table and the mod union table --
7272   // these mutation records are redundant until we have
7273   // actually traced into the corresponding card.
7274   // Here, we check whether advancing the finger would make
7275   // us cross into a new card, and if so clear corresponding
7276   // cards in the MUT (preclean them in the card-table in the
7277   // future).
7278 
7279   // The clean-on-enter optimization is disabled by default,
7280   // until we fix 6178663.
7281   if (CMSCleanOnEnter && (_finger > _threshold)) {
7282     // [_threshold, _finger) represents the interval
7283     // of cards to be cleared  in MUT (or precleaned in card table).
7284     // The set of cards to be cleared is all those that overlap
7285     // with the interval [_threshold, _finger); note that
7286     // _threshold is always kept card-aligned but _finger isn't
7287     // always card-aligned.
7288     HeapWord* old_threshold = _threshold;
7289     assert(old_threshold == (HeapWord*)round_to(


7738       if (simulate_overflow || !_work_queue->push(obj)) {
7739         _collector->par_push_on_overflow_list(obj);
7740         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7741       }
7742     } // Else, some other thread got there first
7743   }
7744 }
7745 
7746 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7747 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7748 
7749 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7750   Mutex* bml = _collector->bitMapLock();
7751   assert_lock_strong(bml);
7752   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7753          "CMS thread should hold CMS token");
7754 
7755   bml->unlock();
7756   ConcurrentMarkSweepThread::desynchronize(true);
7757 


7758   _collector->stopTimer();
7759   if (PrintCMSStatistics != 0) {
7760     _collector->incrementYields();
7761   }

7762 
7763   // See the comment in coordinator_yield()
7764   for (unsigned i = 0; i < CMSYieldSleepCount &&
7765                        ConcurrentMarkSweepThread::should_yield() &&
7766                        !CMSCollector::foregroundGCIsActive(); ++i) {
7767     os::sleep(Thread::current(), 1, false);

7768   }
7769 
7770   ConcurrentMarkSweepThread::synchronize(true);
7771   bml->lock();
7772 
7773   _collector->startTimer();
7774 }
7775 
7776 bool CMSPrecleanRefsYieldClosure::should_return() {
7777   if (ConcurrentMarkSweepThread::should_yield()) {
7778     do_yield_work();
7779   }
7780   return _collector->foregroundGCIsActive();
7781 }
7782 
7783 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7784   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7785          "mr should be aligned to start at a card boundary");
7786   // We'd like to assert:
7787   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,


8415   // to the appropriate freelist.  After yielding, the next
8416   // free block encountered will start a coalescing range of
8417   // free blocks.  If the next free block is adjacent to the
8418   // chunk just flushed, they will need to wait for the next
8419   // sweep to be coalesced.
8420   if (inFreeRange()) {
8421     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8422   }
8423 
8424   // First give up the locks, then yield, then re-lock.
8425   // We should probably use a constructor/destructor idiom to
8426   // do this unlock/lock or modify the MutexUnlocker class to
8427   // serve our purpose. XXX
8428   assert_lock_strong(_bitMap->lock());
8429   assert_lock_strong(_freelistLock);
8430   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8431          "CMS thread should hold CMS token");
8432   _bitMap->lock()->unlock();
8433   _freelistLock->unlock();
8434   ConcurrentMarkSweepThread::desynchronize(true);

8435   _collector->stopTimer();
8436   if (PrintCMSStatistics != 0) {
8437     _collector->incrementYields();
8438   }

8439 
8440   // See the comment in coordinator_yield()
8441   for (unsigned i = 0; i < CMSYieldSleepCount &&
8442                        ConcurrentMarkSweepThread::should_yield() &&
8443                        !CMSCollector::foregroundGCIsActive(); ++i) {
8444     os::sleep(Thread::current(), 1, false);

8445   }
8446 
8447   ConcurrentMarkSweepThread::synchronize(true);
8448   _freelistLock->lock();
8449   _bitMap->lock()->lock_without_safepoint_check();
8450   _collector->startTimer();
8451 }
8452 
8453 #ifndef PRODUCT
8454 // This is actually very useful in a product build if it can
8455 // be called from the debugger.  Compile it into the product
8456 // as needed.
8457 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8458   return debug_cms_space->verify_chunk_in_free_list(fc);
8459 }
8460 #endif
8461 
8462 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8463   if (CMSTraceSweeper) {
8464     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",


< prev index next >