< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 7209 : [mq]: inccms
rev 7210 : [mq]: icms-reviews


 150 };
 151 
 152 // Convenience class that does a CMSTokenSync, and then acquires
 153 // upto three locks.
 154 class CMSTokenSyncWithLocks: public CMSTokenSync {
 155  private:
 156   // Note: locks are acquired in textual declaration order
 157   // and released in the opposite order
 158   MutexLockerEx _locker1, _locker2, _locker3;
 159  public:
 160   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 161                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 162     CMSTokenSync(is_cms_thread),
 163     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 164     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 165     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 166   { }
 167 };
 168 
 169 
 170 // Wrapper class to temporarily disable icms during a foreground cms collection.
 171 class ICMSDisabler: public StackObj {
 172  public:
 173   // The ctor disables icms and wakes up the thread so it notices the change;
 174   // the dtor re-enables icms.  Note that the CMSCollector methods will check
 175   // CMSIncrementalMode.
 176   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
 177   ~ICMSDisabler() { CMSCollector::enable_icms(); }
 178 };
 179 
 180 //////////////////////////////////////////////////////////////////
 181 //  Concurrent Mark-Sweep Generation /////////////////////////////
 182 //////////////////////////////////////////////////////////////////
 183 
 184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 185 
 186 // This struct contains per-thread things necessary to support parallel
 187 // young-gen collection.
 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
 189  public:
 190   CFLS_LAB lab;
 191   PromotionInfo promo;
 192 
 193   // Constructor.
 194   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 195     promo.setSpace(cfls);
 196   }
 197 };
 198 
 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(


 346   _saved_alpha = alpha;
 347 
 348   // Initialize the alphas to the bootstrap value of 100.
 349   _gc0_alpha = _cms_alpha = 100;
 350 
 351   _cms_begin_time.update();
 352   _cms_end_time.update();
 353 
 354   _gc0_duration = 0.0;
 355   _gc0_period = 0.0;
 356   _gc0_promoted = 0;
 357 
 358   _cms_duration = 0.0;
 359   _cms_period = 0.0;
 360   _cms_allocated = 0;
 361 
 362   _cms_used_at_gc0_begin = 0;
 363   _cms_used_at_gc0_end = 0;
 364   _allow_duty_cycle_reduction = false;
 365   _valid_bits = 0;
 366   _icms_duty_cycle = CMSIncrementalDutyCycle;
 367 }
 368 
 369 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 370   // TBD: CR 6909490
 371   return 1.0;
 372 }
 373 
 374 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 375 }
 376 
 377 // If promotion failure handling is on use
 378 // the padded average size of the promotion for each
 379 // young generation collection.
 380 double CMSStats::time_until_cms_gen_full() const {
 381   size_t cms_free = _cms_gen->cmsSpace()->free();
 382   GenCollectedHeap* gch = GenCollectedHeap::heap();
 383   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 384                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 385   if (cms_free > expected_promotion) {
 386     // Start a cms collection if there isn't enough space to promote


 425   // account for that much possible delay
 426   // in the query so as to avoid concurrent mode failures
 427   // due to starting the collection just a wee bit too
 428   // late.
 429   double work = cms_duration() + gc0_period();
 430   double deadline = time_until_cms_gen_full();
 431   // If a concurrent mode failure occurred recently, we want to be
 432   // more conservative and halve our expected time_until_cms_gen_full()
 433   if (work > deadline) {
 434     if (Verbose && PrintGCDetails) {
 435       gclog_or_tty->print(
 436         " CMSCollector: collect because of anticipated promotion "
 437         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 438         gc0_period(), time_until_cms_gen_full());
 439     }
 440     return 0.0;
 441   }
 442   return work - deadline;
 443 }
 444 
 445 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
 446 // amount of change to prevent wild oscillation.
 447 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
 448                                               unsigned int new_duty_cycle) {
 449   assert(old_duty_cycle <= 100, "bad input value");
 450   assert(new_duty_cycle <= 100, "bad input value");
 451 
 452   // Note:  use subtraction with caution since it may underflow (values are
 453   // unsigned).  Addition is safe since we're in the range 0-100.
 454   unsigned int damped_duty_cycle = new_duty_cycle;
 455   if (new_duty_cycle < old_duty_cycle) {
 456     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
 457     if (new_duty_cycle + largest_delta < old_duty_cycle) {
 458       damped_duty_cycle = old_duty_cycle - largest_delta;
 459     }
 460   } else if (new_duty_cycle > old_duty_cycle) {
 461     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
 462     if (new_duty_cycle > old_duty_cycle + largest_delta) {
 463       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
 464     }
 465   }
 466   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
 467 
 468   if (CMSTraceIncrementalPacing) {
 469     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
 470                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
 471   }
 472   return damped_duty_cycle;
 473 }
 474 
 475 unsigned int CMSStats::icms_update_duty_cycle_impl() {
 476   assert(CMSIncrementalPacing && valid(),
 477          "should be handled in icms_update_duty_cycle()");
 478 
 479   double cms_time_so_far = cms_timer().seconds();
 480   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
 481   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
 482 
 483   // Avoid division by 0.
 484   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
 485   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
 486 
 487   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
 488   if (new_duty_cycle > _icms_duty_cycle) {
 489     // Avoid very small duty cycles (1 or 2); 0 is allowed.
 490     if (new_duty_cycle > 2) {
 491       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
 492                                                 new_duty_cycle);
 493     }
 494   } else if (_allow_duty_cycle_reduction) {
 495     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
 496     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
 497     // Respect the minimum duty cycle.
 498     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
 499     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
 500   }
 501 
 502   if (PrintGCDetails || CMSTraceIncrementalPacing) {
 503     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
 504   }
 505 
 506   _allow_duty_cycle_reduction = false;
 507   return _icms_duty_cycle;
 508 }
 509 
 510 #ifndef PRODUCT
 511 void CMSStats::print_on(outputStream *st) const {
 512   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 513   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 514                gc0_duration(), gc0_period(), gc0_promoted());
 515   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 516             cms_duration(), cms_duration_per_mb(),
 517             cms_period(), cms_allocated());
 518   st->print(",cms_since_beg=%g,cms_since_end=%g",
 519             cms_time_since_begin(), cms_time_since_end());
 520   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 521             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 522   if (CMSIncrementalMode) {
 523     st->print(",dc=%d", icms_duty_cycle());
 524   }
 525 
 526   if (valid()) {
 527     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 528               promotion_rate(), cms_allocation_rate());
 529     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 530               cms_consumption_rate(), time_until_cms_gen_full());
 531   }
 532   st->print(" ");
 533 }
 534 #endif // #ifndef PRODUCT
 535 
 536 CMSCollector::CollectorState CMSCollector::_collectorState =
 537                              CMSCollector::Idling;
 538 bool CMSCollector::_foregroundGCIsActive = false;
 539 bool CMSCollector::_foregroundGCShouldWait = false;
 540 
 541 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 542                            CardTableRS*                   ct,
 543                            ConcurrentMarkSweepPolicy*     cp):
 544   _cmsGen(cmsGen),


 562   _stats(cmsGen),
 563   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
 564   _eden_chunk_array(NULL),     // may be set in ctor body
 565   _eden_chunk_capacity(0),     // -- ditto --
 566   _eden_chunk_index(0),        // -- ditto --
 567   _survivor_plab_array(NULL),  // -- ditto --
 568   _survivor_chunk_array(NULL), // -- ditto --
 569   _survivor_chunk_capacity(0), // -- ditto --
 570   _survivor_chunk_index(0),    // -- ditto --
 571   _ser_pmc_preclean_ovflw(0),
 572   _ser_kac_preclean_ovflw(0),
 573   _ser_pmc_remark_ovflw(0),
 574   _par_pmc_remark_ovflw(0),
 575   _ser_kac_ovflw(0),
 576   _par_kac_ovflw(0),
 577 #ifndef PRODUCT
 578   _num_par_pushes(0),
 579 #endif
 580   _collection_count_start(0),
 581   _verifying(false),
 582   _icms_start_limit(NULL),
 583   _icms_stop_limit(NULL),
 584   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 585   _completed_initialization(false),
 586   _collector_policy(cp),
 587   _should_unload_classes(CMSClassUnloadingEnabled),
 588   _concurrent_cycles_since_last_unload(0),
 589   _roots_scanning_options(SharedHeap::SO_None),
 590   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 591   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 592   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 593   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 594   _cms_start_registered(false)
 595 {
 596   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 597     ExplicitGCInvokesConcurrent = true;
 598   }
 599   // Now expand the span and allocate the collection support structures
 600   // (MUT, marking bit map etc.) to cover both generations subject to
 601   // collection.
 602 
 603   // For use by dirty card to oop closures.


1101         // card size.
1102         MemRegion mr(start,
1103                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1104                         CardTableModRefBS::card_size /* bytes */));
1105         if (par) {
1106           _modUnionTable.par_mark_range(mr);
1107         } else {
1108           _modUnionTable.mark_range(mr);
1109         }
1110       } else {  // not an obj array; we can just mark the head
1111         if (par) {
1112           _modUnionTable.par_mark(start);
1113         } else {
1114           _modUnionTable.mark(start);
1115         }
1116       }
1117     }
1118   }
1119 }
1120 
1121 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1122 {
1123   size_t delta = pointer_delta(addr, space->bottom());
1124   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1125 }
1126 
1127 void CMSCollector::icms_update_allocation_limits()
1128 {
1129   Generation* young = GenCollectedHeap::heap()->get_gen(0);
1130   EdenSpace* eden = young->as_DefNewGeneration()->eden();
1131 
1132   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1133   if (CMSTraceIncrementalPacing) {
1134     stats().print();
1135   }
1136 
1137   assert(duty_cycle <= 100, "invalid duty cycle");
1138   if (duty_cycle != 0) {
1139     // The duty_cycle is a percentage between 0 and 100; convert to words and
1140     // then compute the offset from the endpoints of the space.
1141     size_t free_words = eden->free() / HeapWordSize;
1142     double free_words_dbl = (double)free_words;
1143     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1144     size_t offset_words = (free_words - duty_cycle_words) / 2;
1145 
1146     _icms_start_limit = eden->top() + offset_words;
1147     _icms_stop_limit = eden->end() - offset_words;
1148 
1149     // The limits may be adjusted (shifted to the right) by
1150     // CMSIncrementalOffset, to allow the application more mutator time after a
1151     // young gen gc (when all mutators were stopped) and before CMS starts and
1152     // takes away one or more cpus.
1153     if (CMSIncrementalOffset != 0) {
1154       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1155       size_t adjustment = (size_t)adjustment_dbl;
1156       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1157       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1158         _icms_start_limit += adjustment;
1159         _icms_stop_limit = tmp_stop;
1160       }
1161     }
1162   }
1163   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1164     _icms_start_limit = _icms_stop_limit = eden->end();
1165   }
1166 
1167   // Install the new start limit.
1168   eden->set_soft_end(_icms_start_limit);
1169 
1170   if (CMSTraceIncrementalMode) {
1171     gclog_or_tty->print(" icms alloc limits:  "
1172                            PTR_FORMAT "," PTR_FORMAT
1173                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1174                            p2i(_icms_start_limit), p2i(_icms_stop_limit),
1175                            percent_of_space(eden, _icms_start_limit),
1176                            percent_of_space(eden, _icms_stop_limit));
1177     if (Verbose) {
1178       gclog_or_tty->print("eden:  ");
1179       eden->print_on(gclog_or_tty);
1180     }
1181   }
1182 }
1183 
1184 // Any changes here should try to maintain the invariant
1185 // that if this method is called with _icms_start_limit
1186 // and _icms_stop_limit both NULL, then it should return NULL
1187 // and not notify the icms thread.
1188 HeapWord*
1189 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1190                                        size_t word_size)
1191 {
1192   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1193   // nop.
1194   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1195     if (top <= _icms_start_limit) {
1196       if (CMSTraceIncrementalMode) {
1197         space->print_on(gclog_or_tty);
1198         gclog_or_tty->stamp();
1199         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1200                                ", new limit=" PTR_FORMAT
1201                                " (" SIZE_FORMAT "%%)",
1202                                p2i(top), p2i(_icms_stop_limit),
1203                                percent_of_space(space, _icms_stop_limit));
1204       }
1205       ConcurrentMarkSweepThread::start_icms();
1206       assert(top < _icms_stop_limit, "Tautology");
1207       if (word_size < pointer_delta(_icms_stop_limit, top)) {
1208         return _icms_stop_limit;
1209       }
1210 
1211       // The allocation will cross both the _start and _stop limits, so do the
1212       // stop notification also and return end().
1213       if (CMSTraceIncrementalMode) {
1214         space->print_on(gclog_or_tty);
1215         gclog_or_tty->stamp();
1216         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1217                                ", new limit=" PTR_FORMAT
1218                                " (" SIZE_FORMAT "%%)",
1219                                p2i(top), p2i(space->end()),
1220                                percent_of_space(space, space->end()));
1221       }
1222       ConcurrentMarkSweepThread::stop_icms();
1223       return space->end();
1224     }
1225 
1226     if (top <= _icms_stop_limit) {
1227       if (CMSTraceIncrementalMode) {
1228         space->print_on(gclog_or_tty);
1229         gclog_or_tty->stamp();
1230         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1231                                ", new limit=" PTR_FORMAT
1232                                " (" SIZE_FORMAT "%%)",
1233                                top, space->end(),
1234                                percent_of_space(space, space->end()));
1235       }
1236       ConcurrentMarkSweepThread::stop_icms();
1237       return space->end();
1238     }
1239 
1240     if (CMSTraceIncrementalMode) {
1241       space->print_on(gclog_or_tty);
1242       gclog_or_tty->stamp();
1243       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1244                              ", new limit=" PTR_FORMAT,
1245                              top, NULL);
1246     }
1247   }
1248 
1249   return NULL;
1250 }
1251 
1252 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1253   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1254   // allocate, copy and if necessary update promoinfo --
1255   // delegate to underlying space.
1256   assert_lock_strong(freelistLock());
1257 
1258 #ifndef PRODUCT
1259   if (Universe::heap()->promotion_should_fail()) {
1260     return NULL;
1261   }
1262 #endif  // #ifndef PRODUCT
1263 
1264   oop res = _cmsSpace->promote(obj, obj_size);
1265   if (res == NULL) {
1266     // expand and retry
1267     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1268     expand(s*HeapWordSize, MinHeapDeltaBytes,
1269       CMSExpansionCause::_satisfy_promotion);
1270     // Since there's currently no next generation, we don't try to promote
1271     // into a more senior generation.


1274                                "promotion to next generation");
1275     res = _cmsSpace->promote(obj, obj_size);
1276   }
1277   if (res != NULL) {
1278     // See comment in allocate() about when objects should
1279     // be allocated live.
1280     assert(obj->is_oop(), "Will dereference klass pointer below");
1281     collector()->promoted(false,           // Not parallel
1282                           (HeapWord*)res, obj->is_objArray(), obj_size);
1283     // promotion counters
1284     NOT_PRODUCT(
1285       _numObjectsPromoted++;
1286       _numWordsPromoted +=
1287         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1288     )
1289   }
1290   return res;
1291 }
1292 
1293 
1294 HeapWord*
1295 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1296                                              HeapWord* top,
1297                                              size_t word_sz)
1298 {
1299   return collector()->allocation_limit_reached(space, top, word_sz);
1300 }
1301 
1302 // IMPORTANT: Notes on object size recognition in CMS.
1303 // ---------------------------------------------------
1304 // A block of storage in the CMS generation is always in
1305 // one of three states. A free block (FREE), an allocated
1306 // object (OBJECT) whose size() method reports the correct size,
1307 // and an intermediate state (TRANSIENT) in which its size cannot
1308 // be accurately determined.
1309 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1310 // -----------------------------------------------------
1311 // FREE:      klass_word & 1 == 1; mark_word holds block size
1312 //
1313 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1314 //            obj->size() computes correct size
1315 //
1316 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1317 //
1318 // STATE IDENTIFICATION: (64 bit+COOPS)
1319 // ------------------------------------
1320 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1321 //


1794 //
1795 
1796 void CMSCollector::acquire_control_and_collect(bool full,
1797         bool clear_all_soft_refs) {
1798   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1799   assert(!Thread::current()->is_ConcurrentGC_thread(),
1800          "shouldn't try to acquire control from self!");
1801 
1802   // Start the protocol for acquiring control of the
1803   // collection from the background collector (aka CMS thread).
1804   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1805          "VM thread should have CMS token");
1806   // Remember the possibly interrupted state of an ongoing
1807   // concurrent collection
1808   CollectorState first_state = _collectorState;
1809 
1810   // Signal to a possibly ongoing concurrent collection that
1811   // we want to do a foreground collection.
1812   _foregroundGCIsActive = true;
1813 
1814   // Disable incremental mode during a foreground collection.
1815   ICMSDisabler icms_disabler;
1816 
1817   // release locks and wait for a notify from the background collector
1818   // releasing the locks in only necessary for phases which
1819   // do yields to improve the granularity of the collection.
1820   assert_lock_strong(bitMapLock());
1821   // We need to lock the Free list lock for the space that we are
1822   // currently collecting.
1823   assert(haveFreelistLocks(), "Must be holding free list locks");
1824   bitMapLock()->unlock();
1825   releaseFreelistLocks();
1826   {
1827     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1828     if (_foregroundGCShouldWait) {
1829       // We are going to be waiting for action for the CMS thread;
1830       // it had better not be gone (for instance at shutdown)!
1831       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1832              "CMS thread must be running");
1833       // Wait here until the background collector gives us the go-ahead
1834       ConcurrentMarkSweepThread::clear_CMS_flag(
1835         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1836       // Get a possibly blocked CMS thread going:


2120       }
2121       // If first_state was not Idling, then a background GC
2122       // was in progress and has now finished.  No need to do it
2123       // again.  Leave the state as Idling.
2124       break;
2125     case Precleaning:
2126       // In the foreground case don't do the precleaning since
2127       // it is not done concurrently and there is extra work
2128       // required.
2129       _collectorState = FinalMarking;
2130   }
2131   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2132 
2133   // For a mark-sweep, compute_new_size() will be called
2134   // in the heap's do_collection() method.
2135 }
2136 
2137 
2138 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2139   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2140   EdenSpace* eden_space = dng->eden();
2141   ContiguousSpace* from_space = dng->from();
2142   ContiguousSpace* to_space   = dng->to();
2143   // Eden
2144   if (_eden_chunk_array != NULL) {
2145     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2146                            eden_space->bottom(), eden_space->top(),
2147                            eden_space->end(), eden_space->capacity());
2148     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2149                            "_eden_chunk_capacity=" SIZE_FORMAT,
2150                            _eden_chunk_index, _eden_chunk_capacity);
2151     for (size_t i = 0; i < _eden_chunk_index; i++) {
2152       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2153                              i, _eden_chunk_array[i]);
2154     }
2155   }
2156   // Survivor
2157   if (_survivor_chunk_array != NULL) {
2158     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2159                            from_space->bottom(), from_space->top(),
2160                            from_space->end(), from_space->capacity());


2768 
2769   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2770 
2771   _cmsGen->gc_epilogue_work(full);
2772 
2773   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2774     // in case sampling was not already enabled, enable it
2775     _start_sampling = true;
2776   }
2777   // reset _eden_chunk_array so sampling starts afresh
2778   _eden_chunk_index = 0;
2779 
2780   size_t cms_used   = _cmsGen->cmsSpace()->used();
2781 
2782   // update performance counters - this uses a special version of
2783   // update_counters() that allows the utilization to be passed as a
2784   // parameter, avoiding multiple calls to used().
2785   //
2786   _cmsGen->update_counters(cms_used);
2787 
2788   if (CMSIncrementalMode) {
2789     icms_update_allocation_limits();
2790   }
2791 
2792   bitMapLock()->unlock();
2793   releaseFreelistLocks();
2794 
2795   if (!CleanChunkPoolAsync) {
2796     Chunk::clean_chunk_pool();
2797   }
2798 
2799   set_did_compact(false);
2800   _between_prologue_and_epilogue = false;  // ready for next cycle
2801 }
2802 
2803 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2804   collector()->gc_epilogue(full);
2805 
2806   // Also reset promotion tracking in par gc thread states.
2807   if (CollectedHeap::use_parallel_gc_threads()) {
2808     for (uint i = 0; i < ParallelGCThreads; i++) {
2809       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2810     }
2811   }


4259     } else if (terminator()->offer_termination(&_term_term)) {
4260       assert(work_q->size() == 0, "Impossible!");
4261       break;
4262     } else if (yielding() || should_yield()) {
4263       yield();
4264     }
4265   }
4266 }
4267 
4268 // This is run by the CMS (coordinator) thread.
4269 void CMSConcMarkingTask::coordinator_yield() {
4270   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4271          "CMS thread should hold CMS token");
4272   // First give up the locks, then yield, then re-lock
4273   // We should probably use a constructor/destructor idiom to
4274   // do this unlock/lock or modify the MutexUnlocker class to
4275   // serve our purpose. XXX
4276   assert_lock_strong(_bit_map_lock);
4277   _bit_map_lock->unlock();
4278   ConcurrentMarkSweepThread::desynchronize(true);
4279   ConcurrentMarkSweepThread::acknowledge_yield_request();
4280   _collector->stopTimer();
4281   if (PrintCMSStatistics != 0) {
4282     _collector->incrementYields();
4283   }
4284   _collector->icms_wait();
4285 
4286   // It is possible for whichever thread initiated the yield request
4287   // not to get a chance to wake up and take the bitmap lock between
4288   // this thread releasing it and reacquiring it. So, while the
4289   // should_yield() flag is on, let's sleep for a bit to give the
4290   // other thread a chance to wake up. The limit imposed on the number
4291   // of iterations is defensive, to avoid any unforseen circumstances
4292   // putting us into an infinite loop. Since it's always been this
4293   // (coordinator_yield()) method that was observed to cause the
4294   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4295   // which is by default non-zero. For the other seven methods that
4296   // also perform the yield operation, as are using a different
4297   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4298   // can enable the sleeping for those methods too, if necessary.
4299   // See 6442774.
4300   //
4301   // We really need to reconsider the synchronization between the GC
4302   // thread and the yield-requesting threads in the future and we
4303   // should really use wait/notify, which is the recommended
4304   // way of doing this type of interaction. Additionally, we should
4305   // consolidate the eight methods that do the yield operation and they
4306   // are almost identical into one for better maintainability and
4307   // readability. See 6445193.
4308   //
4309   // Tony 2006.06.29
4310   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4311                    ConcurrentMarkSweepThread::should_yield() &&
4312                    !CMSCollector::foregroundGCIsActive(); ++i) {
4313     os::sleep(Thread::current(), 1, false);
4314     ConcurrentMarkSweepThread::acknowledge_yield_request();
4315   }
4316 
4317   ConcurrentMarkSweepThread::synchronize(true);
4318   _bit_map_lock->lock_without_safepoint_check();
4319   _collector->startTimer();
4320 }
4321 
4322 bool CMSCollector::do_marking_mt(bool asynch) {
4323   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4324   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4325                                        conc_workers()->total_workers(),
4326                                        conc_workers()->active_workers(),
4327                                        Threads::number_of_non_daemon_threads());
4328   conc_workers()->set_active_workers(num_workers);
4329 
4330   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4331 
4332   CMSConcMarkingTask tsk(this,
4333                          cms_space,
4334                          asynch,


5225     // Check if we have modified any oops in the Klass during the concurrent marking.
5226     if (k->has_accumulated_modified_oops()) {
5227       k->clear_accumulated_modified_oops();
5228 
5229       // We could have transfered the current modified marks to the accumulated marks,
5230       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5231     } else if (k->has_modified_oops()) {
5232       // Don't clear anything, this info is needed by the next young collection.
5233     } else {
5234       // No modified oops in the Klass.
5235       return;
5236     }
5237 
5238     // The klass has modified fields, need to scan the klass.
5239     _cm_klass_closure.do_klass(k);
5240   }
5241 };
5242 
5243 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5244   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5245   EdenSpace* eden_space = dng->eden();
5246   ContiguousSpace* from_space = dng->from();
5247   ContiguousSpace* to_space   = dng->to();
5248 
5249   HeapWord** eca = _collector->_eden_chunk_array;
5250   size_t     ect = _collector->_eden_chunk_index;
5251   HeapWord** sca = _collector->_survivor_chunk_array;
5252   size_t     sct = _collector->_survivor_chunk_index;
5253 
5254   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5255   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5256 
5257   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5258   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5259   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5260 }
5261 
5262 // work_queue(i) is passed to the closure
5263 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5264 // also is passed to do_dirty_card_rescan_tasks() and to
5265 // do_work_steal() to select the i-th task_queue.


5397   // Until all tasks completed:
5398   // . claim an unclaimed task
5399   // . compute region boundaries corresponding to task claimed
5400   //   using chunk_array
5401   // . par_oop_iterate(cl) over that region
5402 
5403   ResourceMark rm;
5404   HandleMark   hm;
5405 
5406   SequentialSubTasksDone* pst = space->par_seq_tasks();
5407 
5408   uint nth_task = 0;
5409   uint n_tasks  = pst->n_tasks();
5410 
5411   if (n_tasks > 0) {
5412     assert(pst->valid(), "Uninitialized use?");
5413     HeapWord *start, *end;
5414     while (!pst->is_task_claimed(/* reference */ nth_task)) {
5415       // We claimed task # nth_task; compute its boundaries.
5416       if (chunk_top == 0) {  // no samples were taken
5417         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5418         start = space->bottom();
5419         end   = space->top();
5420       } else if (nth_task == 0) {
5421         start = space->bottom();
5422         end   = chunk_array[nth_task];
5423       } else if (nth_task < (uint)chunk_top) {
5424         assert(nth_task >= 1, "Control point invariant");
5425         start = chunk_array[nth_task - 1];
5426         end   = chunk_array[nth_task];
5427       } else {
5428         assert(nth_task == (uint)chunk_top, "Control point invariant");
5429         start = chunk_array[chunk_top - 1];
5430         end   = space->top();
5431       }
5432       MemRegion mr(start, end);
5433       // Verify that mr is in space
5434       assert(mr.is_empty() || space->used_region().contains(mr),
5435              "Should be in space");
5436       // Verify that "start" is an object boundary
5437       assert(mr.is_empty() || oop(mr.start())->is_oop(),


5775     workers->set_active_workers(n_workers);
5776   }
5777   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5778 
5779   CMSParRemarkTask tsk(this,
5780     cms_space,
5781     n_workers, workers, task_queues());
5782 
5783   // Set up for parallel process_roots work.
5784   gch->set_par_threads(n_workers);
5785   // We won't be iterating over the cards in the card table updating
5786   // the younger_gen cards, so we shouldn't call the following else
5787   // the verification code as well as subsequent younger_refs_iterate
5788   // code would get confused. XXX
5789   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5790 
5791   // The young gen rescan work will not be done as part of
5792   // process_roots (which currently doesn't know how to
5793   // parallelize such a scan), but rather will be broken up into
5794   // a set of parallel tasks (via the sampling that the [abortable]
5795   // preclean phase did of EdenSpace, plus the [two] tasks of
5796   // scanning the [two] survivor spaces. Further fine-grain
5797   // parallelization of the scanning of the survivor spaces
5798   // themselves, and of precleaning of the younger gen itself
5799   // is deferred to the future.
5800   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5801 
5802   // The dirty card rescan work is broken up into a "sequence"
5803   // of parallel tasks (per constituent space) that are dynamically
5804   // claimed by the parallel threads.
5805   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5806 
5807   // It turns out that even when we're using 1 thread, doing the work in a
5808   // separate thread causes wide variance in run times.  We can't help this
5809   // in the multi-threaded case, but we special-case n=1 here to get
5810   // repeatable measurements of the 1-thread overhead of the parallel code.
5811   if (n_workers > 1) {
5812     // Make refs discovery MT-safe, if it isn't already: it may not
5813     // necessarily be so, since it's possible that we are doing
5814     // ST marking.
5815     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);


6461     }
6462 
6463     // Clear the mark bitmap (no grey objects to start with)
6464     // for the next cycle.
6465     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6466     CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6467 
6468     HeapWord* curAddr = _markBitMap.startWord();
6469     while (curAddr < _markBitMap.endWord()) {
6470       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6471       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6472       _markBitMap.clear_large_range(chunk);
6473       if (ConcurrentMarkSweepThread::should_yield() &&
6474           !foregroundGCIsActive() &&
6475           CMSYield) {
6476         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6477                "CMS thread should hold CMS token");
6478         assert_lock_strong(bitMapLock());
6479         bitMapLock()->unlock();
6480         ConcurrentMarkSweepThread::desynchronize(true);
6481         ConcurrentMarkSweepThread::acknowledge_yield_request();
6482         stopTimer();
6483         if (PrintCMSStatistics != 0) {
6484           incrementYields();
6485         }
6486         icms_wait();
6487 
6488         // See the comment in coordinator_yield()
6489         for (unsigned i = 0; i < CMSYieldSleepCount &&
6490                          ConcurrentMarkSweepThread::should_yield() &&
6491                          !CMSCollector::foregroundGCIsActive(); ++i) {
6492           os::sleep(Thread::current(), 1, false);
6493           ConcurrentMarkSweepThread::acknowledge_yield_request();
6494         }
6495 
6496         ConcurrentMarkSweepThread::synchronize(true);
6497         bitMapLock()->lock_without_safepoint_check();
6498         startTimer();
6499       }
6500       curAddr = chunk.end();
6501     }
6502     // A successful mostly concurrent collection has been done.
6503     // Because only the full (i.e., concurrent mode failure) collections
6504     // are being measured for gc overhead limits, clean the "near" flag
6505     // and count.
6506     size_policy()->reset_gc_overhead_limit_count();
6507     _collectorState = Idling;
6508   } else {
6509     // already have the lock
6510     assert(_collectorState == Resetting, "just checking");
6511     assert_lock_strong(bitMapLock());
6512     _markBitMap.clear_all();
6513     _collectorState = Idling;
6514   }
6515 
6516   // Stop incremental mode after a cycle completes, so that any future cycles
6517   // are triggered by allocation.
6518   stop_icms();
6519 
6520   NOT_PRODUCT(
6521     if (RotateCMSCollectionTypes) {
6522       _cmsGen->rotate_debug_collection_type();
6523     }
6524   )
6525 
6526   register_gc_end();
6527 }
6528 
6529 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6530   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6531   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6532   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
6533   TraceCollectorStats tcs(counters());
6534 
6535   switch (op) {
6536     case CMS_op_checkpointRootsInitial: {
6537       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6538       checkpointRootsInitial(true);       // asynch
6539       if (PrintGC) {


6951       _collector->restore_preserved_marks_if_any();
6952       assert(_collector->no_preserved_marks(), "No preserved marks");
6953     }
6954     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6955            "All preserved marks should have been restored above");
6956   }
6957 }
6958 
6959 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6960 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6961 
6962 void MarkRefsIntoAndScanClosure::do_yield_work() {
6963   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6964          "CMS thread should hold CMS token");
6965   assert_lock_strong(_freelistLock);
6966   assert_lock_strong(_bit_map->lock());
6967   // relinquish the free_list_lock and bitMaplock()
6968   _bit_map->lock()->unlock();
6969   _freelistLock->unlock();
6970   ConcurrentMarkSweepThread::desynchronize(true);
6971   ConcurrentMarkSweepThread::acknowledge_yield_request();
6972   _collector->stopTimer();
6973   if (PrintCMSStatistics != 0) {
6974     _collector->incrementYields();
6975   }
6976   _collector->icms_wait();
6977 
6978   // See the comment in coordinator_yield()
6979   for (unsigned i = 0;
6980        i < CMSYieldSleepCount &&
6981        ConcurrentMarkSweepThread::should_yield() &&
6982        !CMSCollector::foregroundGCIsActive();
6983        ++i) {
6984     os::sleep(Thread::current(), 1, false);
6985     ConcurrentMarkSweepThread::acknowledge_yield_request();
6986   }
6987 
6988   ConcurrentMarkSweepThread::synchronize(true);
6989   _freelistLock->lock_without_safepoint_check();
6990   _bit_map->lock()->lock_without_safepoint_check();
6991   _collector->startTimer();
6992 }
6993 
6994 ///////////////////////////////////////////////////////////
6995 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6996 //                                 MarkRefsIntoAndScanClosure
6997 ///////////////////////////////////////////////////////////
6998 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6999   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7000   CMSBitMap* bit_map, OopTaskQueue* work_queue):
7001   _span(span),
7002   _bit_map(bit_map),
7003   _work_queue(work_queue),
7004   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7005                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),


7111     } else {
7112       // An object not (yet) reached by marking: we merely need to
7113       // compute its size so as to go look at the next block.
7114       assert(p->is_oop(true), "should be an oop");
7115       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7116     }
7117   }
7118   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7119   return size;
7120 }
7121 
7122 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7123   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7124          "CMS thread should hold CMS token");
7125   assert_lock_strong(_freelistLock);
7126   assert_lock_strong(_bitMap->lock());
7127   // relinquish the free_list_lock and bitMaplock()
7128   _bitMap->lock()->unlock();
7129   _freelistLock->unlock();
7130   ConcurrentMarkSweepThread::desynchronize(true);
7131   ConcurrentMarkSweepThread::acknowledge_yield_request();
7132   _collector->stopTimer();
7133   if (PrintCMSStatistics != 0) {
7134     _collector->incrementYields();
7135   }
7136   _collector->icms_wait();
7137 
7138   // See the comment in coordinator_yield()
7139   for (unsigned i = 0; i < CMSYieldSleepCount &&
7140                    ConcurrentMarkSweepThread::should_yield() &&
7141                    !CMSCollector::foregroundGCIsActive(); ++i) {
7142     os::sleep(Thread::current(), 1, false);
7143     ConcurrentMarkSweepThread::acknowledge_yield_request();
7144   }
7145 
7146   ConcurrentMarkSweepThread::synchronize(true);
7147   _freelistLock->lock_without_safepoint_check();
7148   _bitMap->lock()->lock_without_safepoint_check();
7149   _collector->startTimer();
7150 }
7151 
7152 
7153 //////////////////////////////////////////////////////////////////
7154 // SurvivorSpacePrecleanClosure
7155 //////////////////////////////////////////////////////////////////
7156 // This (single-threaded) closure is used to preclean the oops in
7157 // the survivor spaces.
7158 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7159 
7160   HeapWord* addr = (HeapWord*)p;
7161   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7162   assert(!_span.contains(addr), "we are scanning the survivor spaces");
7163   assert(p->klass_or_null() != NULL, "object should be initialized");


7183     // iterate over the oops in this oop, marking and pushing
7184     // the ones in CMS heap (i.e. in _span).
7185     new_oop->oop_iterate(_scanning_closure);
7186     // check if it's time to yield
7187     do_yield_check();
7188   }
7189   unsigned int after_count =
7190     GenCollectedHeap::heap()->total_collections();
7191   bool abort = (_before_count != after_count) ||
7192                _collector->should_abort_preclean();
7193   return abort ? 0 : size;
7194 }
7195 
7196 void SurvivorSpacePrecleanClosure::do_yield_work() {
7197   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7198          "CMS thread should hold CMS token");
7199   assert_lock_strong(_bit_map->lock());
7200   // Relinquish the bit map lock
7201   _bit_map->lock()->unlock();
7202   ConcurrentMarkSweepThread::desynchronize(true);
7203   ConcurrentMarkSweepThread::acknowledge_yield_request();
7204   _collector->stopTimer();
7205   if (PrintCMSStatistics != 0) {
7206     _collector->incrementYields();
7207   }
7208   _collector->icms_wait();
7209 
7210   // See the comment in coordinator_yield()
7211   for (unsigned i = 0; i < CMSYieldSleepCount &&
7212                        ConcurrentMarkSweepThread::should_yield() &&
7213                        !CMSCollector::foregroundGCIsActive(); ++i) {
7214     os::sleep(Thread::current(), 1, false);
7215     ConcurrentMarkSweepThread::acknowledge_yield_request();
7216   }
7217 
7218   ConcurrentMarkSweepThread::synchronize(true);
7219   _bit_map->lock()->lock_without_safepoint_check();
7220   _collector->startTimer();
7221 }
7222 
7223 // This closure is used to rescan the marked objects on the dirty cards
7224 // in the mod union table and the card table proper. In the parallel
7225 // case, although the bitMap is shared, we do a single read so the
7226 // isMarked() query is "safe".
7227 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7228   // Ignore mark word because we are running concurrent with mutators
7229   assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
7230   HeapWord* addr = (HeapWord*)p;
7231   assert(_span.contains(addr), "we are scanning the CMS generation");
7232   bool is_obj_array = false;
7233   #ifdef ASSERT
7234     if (!_parallel) {
7235       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");


7341       DEBUG_ONLY(})
7342       return true;
7343     }
7344   }
7345   scanOopsInOop(addr);
7346   return true;
7347 }
7348 
7349 // We take a break if we've been at this for a while,
7350 // so as to avoid monopolizing the locks involved.
7351 void MarkFromRootsClosure::do_yield_work() {
7352   // First give up the locks, then yield, then re-lock
7353   // We should probably use a constructor/destructor idiom to
7354   // do this unlock/lock or modify the MutexUnlocker class to
7355   // serve our purpose. XXX
7356   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7357          "CMS thread should hold CMS token");
7358   assert_lock_strong(_bitMap->lock());
7359   _bitMap->lock()->unlock();
7360   ConcurrentMarkSweepThread::desynchronize(true);
7361   ConcurrentMarkSweepThread::acknowledge_yield_request();
7362   _collector->stopTimer();
7363   if (PrintCMSStatistics != 0) {
7364     _collector->incrementYields();
7365   }
7366   _collector->icms_wait();
7367 
7368   // See the comment in coordinator_yield()
7369   for (unsigned i = 0; i < CMSYieldSleepCount &&
7370                        ConcurrentMarkSweepThread::should_yield() &&
7371                        !CMSCollector::foregroundGCIsActive(); ++i) {
7372     os::sleep(Thread::current(), 1, false);
7373     ConcurrentMarkSweepThread::acknowledge_yield_request();
7374   }
7375 
7376   ConcurrentMarkSweepThread::synchronize(true);
7377   _bitMap->lock()->lock_without_safepoint_check();
7378   _collector->startTimer();
7379 }
7380 
7381 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7382   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7383   assert(_markStack->isEmpty(),
7384          "should drain stack to limit stack usage");
7385   // convert ptr to an oop preparatory to scanning
7386   oop obj = oop(ptr);
7387   // Ignore mark word in verification below, since we
7388   // may be running concurrent with mutators.
7389   assert(obj->is_oop(true), "should be an oop");
7390   assert(_finger <= ptr, "_finger runneth ahead");
7391   // advance the finger to right end of this object
7392   _finger = ptr + obj->size();
7393   assert(_finger > ptr, "we just incremented it above");
7394   // On large heaps, it may take us some time to get through
7395   // the marking phase (especially if running iCMS). During
7396   // this time it's possible that a lot of mutations have
7397   // accumulated in the card table and the mod union table --
7398   // these mutation records are redundant until we have
7399   // actually traced into the corresponding card.
7400   // Here, we check whether advancing the finger would make
7401   // us cross into a new card, and if so clear corresponding
7402   // cards in the MUT (preclean them in the card-table in the
7403   // future).
7404 
7405   DEBUG_ONLY(if (!_verifying) {)
7406     // The clean-on-enter optimization is disabled by default,
7407     // until we fix 6178663.
7408     if (CMSCleanOnEnter && (_finger > _threshold)) {
7409       // [_threshold, _finger) represents the interval
7410       // of cards to be cleared  in MUT (or precleaned in card table).
7411       // The set of cards to be cleared is all those that overlap
7412       // with the interval [_threshold, _finger); note that
7413       // _threshold is always kept card-aligned but _finger isn't
7414       // always card-aligned.
7415       HeapWord* old_threshold = _threshold;


7492   scan_oops_in_oop(addr);
7493   return true;
7494 }
7495 
7496 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7497   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7498   // Should we assert that our work queue is empty or
7499   // below some drain limit?
7500   assert(_work_queue->size() == 0,
7501          "should drain stack to limit stack usage");
7502   // convert ptr to an oop preparatory to scanning
7503   oop obj = oop(ptr);
7504   // Ignore mark word in verification below, since we
7505   // may be running concurrent with mutators.
7506   assert(obj->is_oop(true), "should be an oop");
7507   assert(_finger <= ptr, "_finger runneth ahead");
7508   // advance the finger to right end of this object
7509   _finger = ptr + obj->size();
7510   assert(_finger > ptr, "we just incremented it above");
7511   // On large heaps, it may take us some time to get through
7512   // the marking phase (especially if running iCMS). During
7513   // this time it's possible that a lot of mutations have
7514   // accumulated in the card table and the mod union table --
7515   // these mutation records are redundant until we have
7516   // actually traced into the corresponding card.
7517   // Here, we check whether advancing the finger would make
7518   // us cross into a new card, and if so clear corresponding
7519   // cards in the MUT (preclean them in the card-table in the
7520   // future).
7521 
7522   // The clean-on-enter optimization is disabled by default,
7523   // until we fix 6178663.
7524   if (CMSCleanOnEnter && (_finger > _threshold)) {
7525     // [_threshold, _finger) represents the interval
7526     // of cards to be cleared  in MUT (or precleaned in card table).
7527     // The set of cards to be cleared is all those that overlap
7528     // with the interval [_threshold, _finger); note that
7529     // _threshold is always kept card-aligned but _finger isn't
7530     // always card-aligned.
7531     HeapWord* old_threshold = _threshold;
7532     assert(old_threshold == (HeapWord*)round_to(


7981       if (simulate_overflow || !_work_queue->push(obj)) {
7982         _collector->par_push_on_overflow_list(obj);
7983         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7984       }
7985     } // Else, some other thread got there first
7986   }
7987 }
7988 
7989 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7990 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7991 
7992 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7993   Mutex* bml = _collector->bitMapLock();
7994   assert_lock_strong(bml);
7995   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7996          "CMS thread should hold CMS token");
7997 
7998   bml->unlock();
7999   ConcurrentMarkSweepThread::desynchronize(true);
8000 
8001   ConcurrentMarkSweepThread::acknowledge_yield_request();
8002 
8003   _collector->stopTimer();
8004   if (PrintCMSStatistics != 0) {
8005     _collector->incrementYields();
8006   }
8007   _collector->icms_wait();
8008 
8009   // See the comment in coordinator_yield()
8010   for (unsigned i = 0; i < CMSYieldSleepCount &&
8011                        ConcurrentMarkSweepThread::should_yield() &&
8012                        !CMSCollector::foregroundGCIsActive(); ++i) {
8013     os::sleep(Thread::current(), 1, false);
8014     ConcurrentMarkSweepThread::acknowledge_yield_request();
8015   }
8016 
8017   ConcurrentMarkSweepThread::synchronize(true);
8018   bml->lock();
8019 
8020   _collector->startTimer();
8021 }
8022 
8023 bool CMSPrecleanRefsYieldClosure::should_return() {
8024   if (ConcurrentMarkSweepThread::should_yield()) {
8025     do_yield_work();
8026   }
8027   return _collector->foregroundGCIsActive();
8028 }
8029 
8030 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8031   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8032          "mr should be aligned to start at a card boundary");
8033   // We'd like to assert:
8034   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,


8662   // to the appropriate freelist.  After yielding, the next
8663   // free block encountered will start a coalescing range of
8664   // free blocks.  If the next free block is adjacent to the
8665   // chunk just flushed, they will need to wait for the next
8666   // sweep to be coalesced.
8667   if (inFreeRange()) {
8668     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8669   }
8670 
8671   // First give up the locks, then yield, then re-lock.
8672   // We should probably use a constructor/destructor idiom to
8673   // do this unlock/lock or modify the MutexUnlocker class to
8674   // serve our purpose. XXX
8675   assert_lock_strong(_bitMap->lock());
8676   assert_lock_strong(_freelistLock);
8677   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8678          "CMS thread should hold CMS token");
8679   _bitMap->lock()->unlock();
8680   _freelistLock->unlock();
8681   ConcurrentMarkSweepThread::desynchronize(true);
8682   ConcurrentMarkSweepThread::acknowledge_yield_request();
8683   _collector->stopTimer();
8684   if (PrintCMSStatistics != 0) {
8685     _collector->incrementYields();
8686   }
8687   _collector->icms_wait();
8688 
8689   // See the comment in coordinator_yield()
8690   for (unsigned i = 0; i < CMSYieldSleepCount &&
8691                        ConcurrentMarkSweepThread::should_yield() &&
8692                        !CMSCollector::foregroundGCIsActive(); ++i) {
8693     os::sleep(Thread::current(), 1, false);
8694     ConcurrentMarkSweepThread::acknowledge_yield_request();
8695   }
8696 
8697   ConcurrentMarkSweepThread::synchronize(true);
8698   _freelistLock->lock();
8699   _bitMap->lock()->lock_without_safepoint_check();
8700   _collector->startTimer();
8701 }
8702 
8703 #ifndef PRODUCT
8704 // This is actually very useful in a product build if it can
8705 // be called from the debugger.  Compile it into the product
8706 // as needed.
8707 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8708   return debug_cms_space->verify_chunk_in_free_list(fc);
8709 }
8710 #endif
8711 
8712 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8713   if (CMSTraceSweeper) {
8714     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",




 150 };
 151 
 152 // Convenience class that does a CMSTokenSync, and then acquires
 153 // upto three locks.
 154 class CMSTokenSyncWithLocks: public CMSTokenSync {
 155  private:
 156   // Note: locks are acquired in textual declaration order
 157   // and released in the opposite order
 158   MutexLockerEx _locker1, _locker2, _locker3;
 159  public:
 160   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 161                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 162     CMSTokenSync(is_cms_thread),
 163     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 164     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 165     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 166   { }
 167 };
 168 
 169 










 170 //////////////////////////////////////////////////////////////////
 171 //  Concurrent Mark-Sweep Generation /////////////////////////////
 172 //////////////////////////////////////////////////////////////////
 173 
 174 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 175 
 176 // This struct contains per-thread things necessary to support parallel
 177 // young-gen collection.
 178 class CMSParGCThreadState: public CHeapObj<mtGC> {
 179  public:
 180   CFLS_LAB lab;
 181   PromotionInfo promo;
 182 
 183   // Constructor.
 184   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 185     promo.setSpace(cfls);
 186   }
 187 };
 188 
 189 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(


 336   _saved_alpha = alpha;
 337 
 338   // Initialize the alphas to the bootstrap value of 100.
 339   _gc0_alpha = _cms_alpha = 100;
 340 
 341   _cms_begin_time.update();
 342   _cms_end_time.update();
 343 
 344   _gc0_duration = 0.0;
 345   _gc0_period = 0.0;
 346   _gc0_promoted = 0;
 347 
 348   _cms_duration = 0.0;
 349   _cms_period = 0.0;
 350   _cms_allocated = 0;
 351 
 352   _cms_used_at_gc0_begin = 0;
 353   _cms_used_at_gc0_end = 0;
 354   _allow_duty_cycle_reduction = false;
 355   _valid_bits = 0;

 356 }
 357 
 358 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 359   // TBD: CR 6909490
 360   return 1.0;
 361 }
 362 
 363 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 364 }
 365 
 366 // If promotion failure handling is on use
 367 // the padded average size of the promotion for each
 368 // young generation collection.
 369 double CMSStats::time_until_cms_gen_full() const {
 370   size_t cms_free = _cms_gen->cmsSpace()->free();
 371   GenCollectedHeap* gch = GenCollectedHeap::heap();
 372   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 373                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 374   if (cms_free > expected_promotion) {
 375     // Start a cms collection if there isn't enough space to promote


 414   // account for that much possible delay
 415   // in the query so as to avoid concurrent mode failures
 416   // due to starting the collection just a wee bit too
 417   // late.
 418   double work = cms_duration() + gc0_period();
 419   double deadline = time_until_cms_gen_full();
 420   // If a concurrent mode failure occurred recently, we want to be
 421   // more conservative and halve our expected time_until_cms_gen_full()
 422   if (work > deadline) {
 423     if (Verbose && PrintGCDetails) {
 424       gclog_or_tty->print(
 425         " CMSCollector: collect because of anticipated promotion "
 426         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 427         gc0_period(), time_until_cms_gen_full());
 428     }
 429     return 0.0;
 430   }
 431   return work - deadline;
 432 }
 433 

































































 434 #ifndef PRODUCT
 435 void CMSStats::print_on(outputStream *st) const {
 436   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 437   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 438                gc0_duration(), gc0_period(), gc0_promoted());
 439   st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 440             cms_duration(), cms_period(), cms_allocated());

 441   st->print(",cms_since_beg=%g,cms_since_end=%g",
 442             cms_time_since_begin(), cms_time_since_end());
 443   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 444             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);



 445 
 446   if (valid()) {
 447     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 448               promotion_rate(), cms_allocation_rate());
 449     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 450               cms_consumption_rate(), time_until_cms_gen_full());
 451   }
 452   st->print(" ");
 453 }
 454 #endif // #ifndef PRODUCT
 455 
 456 CMSCollector::CollectorState CMSCollector::_collectorState =
 457                              CMSCollector::Idling;
 458 bool CMSCollector::_foregroundGCIsActive = false;
 459 bool CMSCollector::_foregroundGCShouldWait = false;
 460 
 461 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 462                            CardTableRS*                   ct,
 463                            ConcurrentMarkSweepPolicy*     cp):
 464   _cmsGen(cmsGen),


 482   _stats(cmsGen),
 483   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
 484   _eden_chunk_array(NULL),     // may be set in ctor body
 485   _eden_chunk_capacity(0),     // -- ditto --
 486   _eden_chunk_index(0),        // -- ditto --
 487   _survivor_plab_array(NULL),  // -- ditto --
 488   _survivor_chunk_array(NULL), // -- ditto --
 489   _survivor_chunk_capacity(0), // -- ditto --
 490   _survivor_chunk_index(0),    // -- ditto --
 491   _ser_pmc_preclean_ovflw(0),
 492   _ser_kac_preclean_ovflw(0),
 493   _ser_pmc_remark_ovflw(0),
 494   _par_pmc_remark_ovflw(0),
 495   _ser_kac_ovflw(0),
 496   _par_kac_ovflw(0),
 497 #ifndef PRODUCT
 498   _num_par_pushes(0),
 499 #endif
 500   _collection_count_start(0),
 501   _verifying(false),


 502   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 503   _completed_initialization(false),
 504   _collector_policy(cp),
 505   _should_unload_classes(CMSClassUnloadingEnabled),
 506   _concurrent_cycles_since_last_unload(0),
 507   _roots_scanning_options(SharedHeap::SO_None),
 508   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 509   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 510   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 511   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 512   _cms_start_registered(false)
 513 {
 514   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 515     ExplicitGCInvokesConcurrent = true;
 516   }
 517   // Now expand the span and allocate the collection support structures
 518   // (MUT, marking bit map etc.) to cover both generations subject to
 519   // collection.
 520 
 521   // For use by dirty card to oop closures.


1019         // card size.
1020         MemRegion mr(start,
1021                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1022                         CardTableModRefBS::card_size /* bytes */));
1023         if (par) {
1024           _modUnionTable.par_mark_range(mr);
1025         } else {
1026           _modUnionTable.mark_range(mr);
1027         }
1028       } else {  // not an obj array; we can just mark the head
1029         if (par) {
1030           _modUnionTable.par_mark(start);
1031         } else {
1032           _modUnionTable.mark(start);
1033         }
1034       }
1035     }
1036   }
1037 }
1038 



































































































































1039 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1040   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1041   // allocate, copy and if necessary update promoinfo --
1042   // delegate to underlying space.
1043   assert_lock_strong(freelistLock());
1044 
1045 #ifndef PRODUCT
1046   if (Universe::heap()->promotion_should_fail()) {
1047     return NULL;
1048   }
1049 #endif  // #ifndef PRODUCT
1050 
1051   oop res = _cmsSpace->promote(obj, obj_size);
1052   if (res == NULL) {
1053     // expand and retry
1054     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1055     expand(s*HeapWordSize, MinHeapDeltaBytes,
1056       CMSExpansionCause::_satisfy_promotion);
1057     // Since there's currently no next generation, we don't try to promote
1058     // into a more senior generation.


1061                                "promotion to next generation");
1062     res = _cmsSpace->promote(obj, obj_size);
1063   }
1064   if (res != NULL) {
1065     // See comment in allocate() about when objects should
1066     // be allocated live.
1067     assert(obj->is_oop(), "Will dereference klass pointer below");
1068     collector()->promoted(false,           // Not parallel
1069                           (HeapWord*)res, obj->is_objArray(), obj_size);
1070     // promotion counters
1071     NOT_PRODUCT(
1072       _numObjectsPromoted++;
1073       _numWordsPromoted +=
1074         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1075     )
1076   }
1077   return res;
1078 }
1079 
1080 








1081 // IMPORTANT: Notes on object size recognition in CMS.
1082 // ---------------------------------------------------
1083 // A block of storage in the CMS generation is always in
1084 // one of three states. A free block (FREE), an allocated
1085 // object (OBJECT) whose size() method reports the correct size,
1086 // and an intermediate state (TRANSIENT) in which its size cannot
1087 // be accurately determined.
1088 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1089 // -----------------------------------------------------
1090 // FREE:      klass_word & 1 == 1; mark_word holds block size
1091 //
1092 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1093 //            obj->size() computes correct size
1094 //
1095 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1096 //
1097 // STATE IDENTIFICATION: (64 bit+COOPS)
1098 // ------------------------------------
1099 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1100 //


1573 //
1574 
1575 void CMSCollector::acquire_control_and_collect(bool full,
1576         bool clear_all_soft_refs) {
1577   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1578   assert(!Thread::current()->is_ConcurrentGC_thread(),
1579          "shouldn't try to acquire control from self!");
1580 
1581   // Start the protocol for acquiring control of the
1582   // collection from the background collector (aka CMS thread).
1583   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1584          "VM thread should have CMS token");
1585   // Remember the possibly interrupted state of an ongoing
1586   // concurrent collection
1587   CollectorState first_state = _collectorState;
1588 
1589   // Signal to a possibly ongoing concurrent collection that
1590   // we want to do a foreground collection.
1591   _foregroundGCIsActive = true;
1592 



1593   // release locks and wait for a notify from the background collector
1594   // releasing the locks in only necessary for phases which
1595   // do yields to improve the granularity of the collection.
1596   assert_lock_strong(bitMapLock());
1597   // We need to lock the Free list lock for the space that we are
1598   // currently collecting.
1599   assert(haveFreelistLocks(), "Must be holding free list locks");
1600   bitMapLock()->unlock();
1601   releaseFreelistLocks();
1602   {
1603     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1604     if (_foregroundGCShouldWait) {
1605       // We are going to be waiting for action for the CMS thread;
1606       // it had better not be gone (for instance at shutdown)!
1607       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1608              "CMS thread must be running");
1609       // Wait here until the background collector gives us the go-ahead
1610       ConcurrentMarkSweepThread::clear_CMS_flag(
1611         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1612       // Get a possibly blocked CMS thread going:


1896       }
1897       // If first_state was not Idling, then a background GC
1898       // was in progress and has now finished.  No need to do it
1899       // again.  Leave the state as Idling.
1900       break;
1901     case Precleaning:
1902       // In the foreground case don't do the precleaning since
1903       // it is not done concurrently and there is extra work
1904       // required.
1905       _collectorState = FinalMarking;
1906   }
1907   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
1908 
1909   // For a mark-sweep, compute_new_size() will be called
1910   // in the heap's do_collection() method.
1911 }
1912 
1913 
1914 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1915   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
1916   ContiguousSpace* eden_space = dng->eden();
1917   ContiguousSpace* from_space = dng->from();
1918   ContiguousSpace* to_space   = dng->to();
1919   // Eden
1920   if (_eden_chunk_array != NULL) {
1921     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1922                            eden_space->bottom(), eden_space->top(),
1923                            eden_space->end(), eden_space->capacity());
1924     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1925                            "_eden_chunk_capacity=" SIZE_FORMAT,
1926                            _eden_chunk_index, _eden_chunk_capacity);
1927     for (size_t i = 0; i < _eden_chunk_index; i++) {
1928       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1929                              i, _eden_chunk_array[i]);
1930     }
1931   }
1932   // Survivor
1933   if (_survivor_chunk_array != NULL) {
1934     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1935                            from_space->bottom(), from_space->top(),
1936                            from_space->end(), from_space->capacity());


2544 
2545   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2546 
2547   _cmsGen->gc_epilogue_work(full);
2548 
2549   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2550     // in case sampling was not already enabled, enable it
2551     _start_sampling = true;
2552   }
2553   // reset _eden_chunk_array so sampling starts afresh
2554   _eden_chunk_index = 0;
2555 
2556   size_t cms_used   = _cmsGen->cmsSpace()->used();
2557 
2558   // update performance counters - this uses a special version of
2559   // update_counters() that allows the utilization to be passed as a
2560   // parameter, avoiding multiple calls to used().
2561   //
2562   _cmsGen->update_counters(cms_used);
2563 




2564   bitMapLock()->unlock();
2565   releaseFreelistLocks();
2566 
2567   if (!CleanChunkPoolAsync) {
2568     Chunk::clean_chunk_pool();
2569   }
2570 
2571   set_did_compact(false);
2572   _between_prologue_and_epilogue = false;  // ready for next cycle
2573 }
2574 
2575 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2576   collector()->gc_epilogue(full);
2577 
2578   // Also reset promotion tracking in par gc thread states.
2579   if (CollectedHeap::use_parallel_gc_threads()) {
2580     for (uint i = 0; i < ParallelGCThreads; i++) {
2581       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2582     }
2583   }


4031     } else if (terminator()->offer_termination(&_term_term)) {
4032       assert(work_q->size() == 0, "Impossible!");
4033       break;
4034     } else if (yielding() || should_yield()) {
4035       yield();
4036     }
4037   }
4038 }
4039 
4040 // This is run by the CMS (coordinator) thread.
4041 void CMSConcMarkingTask::coordinator_yield() {
4042   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4043          "CMS thread should hold CMS token");
4044   // First give up the locks, then yield, then re-lock
4045   // We should probably use a constructor/destructor idiom to
4046   // do this unlock/lock or modify the MutexUnlocker class to
4047   // serve our purpose. XXX
4048   assert_lock_strong(_bit_map_lock);
4049   _bit_map_lock->unlock();
4050   ConcurrentMarkSweepThread::desynchronize(true);

4051   _collector->stopTimer();
4052   if (PrintCMSStatistics != 0) {
4053     _collector->incrementYields();
4054   }

4055 
4056   // It is possible for whichever thread initiated the yield request
4057   // not to get a chance to wake up and take the bitmap lock between
4058   // this thread releasing it and reacquiring it. So, while the
4059   // should_yield() flag is on, let's sleep for a bit to give the
4060   // other thread a chance to wake up. The limit imposed on the number
4061   // of iterations is defensive, to avoid any unforseen circumstances
4062   // putting us into an infinite loop. Since it's always been this
4063   // (coordinator_yield()) method that was observed to cause the
4064   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4065   // which is by default non-zero. For the other seven methods that
4066   // also perform the yield operation, as are using a different
4067   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4068   // can enable the sleeping for those methods too, if necessary.
4069   // See 6442774.
4070   //
4071   // We really need to reconsider the synchronization between the GC
4072   // thread and the yield-requesting threads in the future and we
4073   // should really use wait/notify, which is the recommended
4074   // way of doing this type of interaction. Additionally, we should
4075   // consolidate the eight methods that do the yield operation and they
4076   // are almost identical into one for better maintainability and
4077   // readability. See 6445193.
4078   //
4079   // Tony 2006.06.29
4080   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4081                    ConcurrentMarkSweepThread::should_yield() &&
4082                    !CMSCollector::foregroundGCIsActive(); ++i) {
4083     os::sleep(Thread::current(), 1, false);

4084   }
4085 
4086   ConcurrentMarkSweepThread::synchronize(true);
4087   _bit_map_lock->lock_without_safepoint_check();
4088   _collector->startTimer();
4089 }
4090 
4091 bool CMSCollector::do_marking_mt(bool asynch) {
4092   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4093   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4094                                        conc_workers()->total_workers(),
4095                                        conc_workers()->active_workers(),
4096                                        Threads::number_of_non_daemon_threads());
4097   conc_workers()->set_active_workers(num_workers);
4098 
4099   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4100 
4101   CMSConcMarkingTask tsk(this,
4102                          cms_space,
4103                          asynch,


4994     // Check if we have modified any oops in the Klass during the concurrent marking.
4995     if (k->has_accumulated_modified_oops()) {
4996       k->clear_accumulated_modified_oops();
4997 
4998       // We could have transfered the current modified marks to the accumulated marks,
4999       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5000     } else if (k->has_modified_oops()) {
5001       // Don't clear anything, this info is needed by the next young collection.
5002     } else {
5003       // No modified oops in the Klass.
5004       return;
5005     }
5006 
5007     // The klass has modified fields, need to scan the klass.
5008     _cm_klass_closure.do_klass(k);
5009   }
5010 };
5011 
5012 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5013   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5014   ContiguousSpace* eden_space = dng->eden();
5015   ContiguousSpace* from_space = dng->from();
5016   ContiguousSpace* to_space   = dng->to();
5017 
5018   HeapWord** eca = _collector->_eden_chunk_array;
5019   size_t     ect = _collector->_eden_chunk_index;
5020   HeapWord** sca = _collector->_survivor_chunk_array;
5021   size_t     sct = _collector->_survivor_chunk_index;
5022 
5023   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5024   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5025 
5026   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5027   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5028   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5029 }
5030 
5031 // work_queue(i) is passed to the closure
5032 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5033 // also is passed to do_dirty_card_rescan_tasks() and to
5034 // do_work_steal() to select the i-th task_queue.


5166   // Until all tasks completed:
5167   // . claim an unclaimed task
5168   // . compute region boundaries corresponding to task claimed
5169   //   using chunk_array
5170   // . par_oop_iterate(cl) over that region
5171 
5172   ResourceMark rm;
5173   HandleMark   hm;
5174 
5175   SequentialSubTasksDone* pst = space->par_seq_tasks();
5176 
5177   uint nth_task = 0;
5178   uint n_tasks  = pst->n_tasks();
5179 
5180   if (n_tasks > 0) {
5181     assert(pst->valid(), "Uninitialized use?");
5182     HeapWord *start, *end;
5183     while (!pst->is_task_claimed(/* reference */ nth_task)) {
5184       // We claimed task # nth_task; compute its boundaries.
5185       if (chunk_top == 0) {  // no samples were taken
5186         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
5187         start = space->bottom();
5188         end   = space->top();
5189       } else if (nth_task == 0) {
5190         start = space->bottom();
5191         end   = chunk_array[nth_task];
5192       } else if (nth_task < (uint)chunk_top) {
5193         assert(nth_task >= 1, "Control point invariant");
5194         start = chunk_array[nth_task - 1];
5195         end   = chunk_array[nth_task];
5196       } else {
5197         assert(nth_task == (uint)chunk_top, "Control point invariant");
5198         start = chunk_array[chunk_top - 1];
5199         end   = space->top();
5200       }
5201       MemRegion mr(start, end);
5202       // Verify that mr is in space
5203       assert(mr.is_empty() || space->used_region().contains(mr),
5204              "Should be in space");
5205       // Verify that "start" is an object boundary
5206       assert(mr.is_empty() || oop(mr.start())->is_oop(),


5544     workers->set_active_workers(n_workers);
5545   }
5546   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5547 
5548   CMSParRemarkTask tsk(this,
5549     cms_space,
5550     n_workers, workers, task_queues());
5551 
5552   // Set up for parallel process_roots work.
5553   gch->set_par_threads(n_workers);
5554   // We won't be iterating over the cards in the card table updating
5555   // the younger_gen cards, so we shouldn't call the following else
5556   // the verification code as well as subsequent younger_refs_iterate
5557   // code would get confused. XXX
5558   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5559 
5560   // The young gen rescan work will not be done as part of
5561   // process_roots (which currently doesn't know how to
5562   // parallelize such a scan), but rather will be broken up into
5563   // a set of parallel tasks (via the sampling that the [abortable]
5564   // preclean phase did of eden, plus the [two] tasks of
5565   // scanning the [two] survivor spaces. Further fine-grain
5566   // parallelization of the scanning of the survivor spaces
5567   // themselves, and of precleaning of the younger gen itself
5568   // is deferred to the future.
5569   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5570 
5571   // The dirty card rescan work is broken up into a "sequence"
5572   // of parallel tasks (per constituent space) that are dynamically
5573   // claimed by the parallel threads.
5574   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5575 
5576   // It turns out that even when we're using 1 thread, doing the work in a
5577   // separate thread causes wide variance in run times.  We can't help this
5578   // in the multi-threaded case, but we special-case n=1 here to get
5579   // repeatable measurements of the 1-thread overhead of the parallel code.
5580   if (n_workers > 1) {
5581     // Make refs discovery MT-safe, if it isn't already: it may not
5582     // necessarily be so, since it's possible that we are doing
5583     // ST marking.
5584     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);


6230     }
6231 
6232     // Clear the mark bitmap (no grey objects to start with)
6233     // for the next cycle.
6234     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6235     CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6236 
6237     HeapWord* curAddr = _markBitMap.startWord();
6238     while (curAddr < _markBitMap.endWord()) {
6239       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6240       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6241       _markBitMap.clear_large_range(chunk);
6242       if (ConcurrentMarkSweepThread::should_yield() &&
6243           !foregroundGCIsActive() &&
6244           CMSYield) {
6245         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6246                "CMS thread should hold CMS token");
6247         assert_lock_strong(bitMapLock());
6248         bitMapLock()->unlock();
6249         ConcurrentMarkSweepThread::desynchronize(true);

6250         stopTimer();
6251         if (PrintCMSStatistics != 0) {
6252           incrementYields();
6253         }

6254 
6255         // See the comment in coordinator_yield()
6256         for (unsigned i = 0; i < CMSYieldSleepCount &&
6257                          ConcurrentMarkSweepThread::should_yield() &&
6258                          !CMSCollector::foregroundGCIsActive(); ++i) {
6259           os::sleep(Thread::current(), 1, false);

6260         }
6261 
6262         ConcurrentMarkSweepThread::synchronize(true);
6263         bitMapLock()->lock_without_safepoint_check();
6264         startTimer();
6265       }
6266       curAddr = chunk.end();
6267     }
6268     // A successful mostly concurrent collection has been done.
6269     // Because only the full (i.e., concurrent mode failure) collections
6270     // are being measured for gc overhead limits, clean the "near" flag
6271     // and count.
6272     size_policy()->reset_gc_overhead_limit_count();
6273     _collectorState = Idling;
6274   } else {
6275     // already have the lock
6276     assert(_collectorState == Resetting, "just checking");
6277     assert_lock_strong(bitMapLock());
6278     _markBitMap.clear_all();
6279     _collectorState = Idling;
6280   }
6281 




6282   NOT_PRODUCT(
6283     if (RotateCMSCollectionTypes) {
6284       _cmsGen->rotate_debug_collection_type();
6285     }
6286   )
6287 
6288   register_gc_end();
6289 }
6290 
6291 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6292   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6293   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6294   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
6295   TraceCollectorStats tcs(counters());
6296 
6297   switch (op) {
6298     case CMS_op_checkpointRootsInitial: {
6299       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6300       checkpointRootsInitial(true);       // asynch
6301       if (PrintGC) {


6713       _collector->restore_preserved_marks_if_any();
6714       assert(_collector->no_preserved_marks(), "No preserved marks");
6715     }
6716     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6717            "All preserved marks should have been restored above");
6718   }
6719 }
6720 
6721 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6722 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6723 
6724 void MarkRefsIntoAndScanClosure::do_yield_work() {
6725   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6726          "CMS thread should hold CMS token");
6727   assert_lock_strong(_freelistLock);
6728   assert_lock_strong(_bit_map->lock());
6729   // relinquish the free_list_lock and bitMaplock()
6730   _bit_map->lock()->unlock();
6731   _freelistLock->unlock();
6732   ConcurrentMarkSweepThread::desynchronize(true);

6733   _collector->stopTimer();
6734   if (PrintCMSStatistics != 0) {
6735     _collector->incrementYields();
6736   }

6737 
6738   // See the comment in coordinator_yield()
6739   for (unsigned i = 0;
6740        i < CMSYieldSleepCount &&
6741        ConcurrentMarkSweepThread::should_yield() &&
6742        !CMSCollector::foregroundGCIsActive();
6743        ++i) {
6744     os::sleep(Thread::current(), 1, false);

6745   }
6746 
6747   ConcurrentMarkSweepThread::synchronize(true);
6748   _freelistLock->lock_without_safepoint_check();
6749   _bit_map->lock()->lock_without_safepoint_check();
6750   _collector->startTimer();
6751 }
6752 
6753 ///////////////////////////////////////////////////////////
6754 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6755 //                                 MarkRefsIntoAndScanClosure
6756 ///////////////////////////////////////////////////////////
6757 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6758   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6759   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6760   _span(span),
6761   _bit_map(bit_map),
6762   _work_queue(work_queue),
6763   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6764                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),


6870     } else {
6871       // An object not (yet) reached by marking: we merely need to
6872       // compute its size so as to go look at the next block.
6873       assert(p->is_oop(true), "should be an oop");
6874       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6875     }
6876   }
6877   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6878   return size;
6879 }
6880 
6881 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6882   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6883          "CMS thread should hold CMS token");
6884   assert_lock_strong(_freelistLock);
6885   assert_lock_strong(_bitMap->lock());
6886   // relinquish the free_list_lock and bitMaplock()
6887   _bitMap->lock()->unlock();
6888   _freelistLock->unlock();
6889   ConcurrentMarkSweepThread::desynchronize(true);

6890   _collector->stopTimer();
6891   if (PrintCMSStatistics != 0) {
6892     _collector->incrementYields();
6893   }

6894 
6895   // See the comment in coordinator_yield()
6896   for (unsigned i = 0; i < CMSYieldSleepCount &&
6897                    ConcurrentMarkSweepThread::should_yield() &&
6898                    !CMSCollector::foregroundGCIsActive(); ++i) {
6899     os::sleep(Thread::current(), 1, false);

6900   }
6901 
6902   ConcurrentMarkSweepThread::synchronize(true);
6903   _freelistLock->lock_without_safepoint_check();
6904   _bitMap->lock()->lock_without_safepoint_check();
6905   _collector->startTimer();
6906 }
6907 
6908 
6909 //////////////////////////////////////////////////////////////////
6910 // SurvivorSpacePrecleanClosure
6911 //////////////////////////////////////////////////////////////////
6912 // This (single-threaded) closure is used to preclean the oops in
6913 // the survivor spaces.
6914 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6915 
6916   HeapWord* addr = (HeapWord*)p;
6917   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6918   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6919   assert(p->klass_or_null() != NULL, "object should be initialized");


6939     // iterate over the oops in this oop, marking and pushing
6940     // the ones in CMS heap (i.e. in _span).
6941     new_oop->oop_iterate(_scanning_closure);
6942     // check if it's time to yield
6943     do_yield_check();
6944   }
6945   unsigned int after_count =
6946     GenCollectedHeap::heap()->total_collections();
6947   bool abort = (_before_count != after_count) ||
6948                _collector->should_abort_preclean();
6949   return abort ? 0 : size;
6950 }
6951 
6952 void SurvivorSpacePrecleanClosure::do_yield_work() {
6953   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6954          "CMS thread should hold CMS token");
6955   assert_lock_strong(_bit_map->lock());
6956   // Relinquish the bit map lock
6957   _bit_map->lock()->unlock();
6958   ConcurrentMarkSweepThread::desynchronize(true);

6959   _collector->stopTimer();
6960   if (PrintCMSStatistics != 0) {
6961     _collector->incrementYields();
6962   }

6963 
6964   // See the comment in coordinator_yield()
6965   for (unsigned i = 0; i < CMSYieldSleepCount &&
6966                        ConcurrentMarkSweepThread::should_yield() &&
6967                        !CMSCollector::foregroundGCIsActive(); ++i) {
6968     os::sleep(Thread::current(), 1, false);

6969   }
6970 
6971   ConcurrentMarkSweepThread::synchronize(true);
6972   _bit_map->lock()->lock_without_safepoint_check();
6973   _collector->startTimer();
6974 }
6975 
6976 // This closure is used to rescan the marked objects on the dirty cards
6977 // in the mod union table and the card table proper. In the parallel
6978 // case, although the bitMap is shared, we do a single read so the
6979 // isMarked() query is "safe".
6980 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6981   // Ignore mark word because we are running concurrent with mutators
6982   assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
6983   HeapWord* addr = (HeapWord*)p;
6984   assert(_span.contains(addr), "we are scanning the CMS generation");
6985   bool is_obj_array = false;
6986   #ifdef ASSERT
6987     if (!_parallel) {
6988       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");


7094       DEBUG_ONLY(})
7095       return true;
7096     }
7097   }
7098   scanOopsInOop(addr);
7099   return true;
7100 }
7101 
7102 // We take a break if we've been at this for a while,
7103 // so as to avoid monopolizing the locks involved.
7104 void MarkFromRootsClosure::do_yield_work() {
7105   // First give up the locks, then yield, then re-lock
7106   // We should probably use a constructor/destructor idiom to
7107   // do this unlock/lock or modify the MutexUnlocker class to
7108   // serve our purpose. XXX
7109   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7110          "CMS thread should hold CMS token");
7111   assert_lock_strong(_bitMap->lock());
7112   _bitMap->lock()->unlock();
7113   ConcurrentMarkSweepThread::desynchronize(true);

7114   _collector->stopTimer();
7115   if (PrintCMSStatistics != 0) {
7116     _collector->incrementYields();
7117   }

7118 
7119   // See the comment in coordinator_yield()
7120   for (unsigned i = 0; i < CMSYieldSleepCount &&
7121                        ConcurrentMarkSweepThread::should_yield() &&
7122                        !CMSCollector::foregroundGCIsActive(); ++i) {
7123     os::sleep(Thread::current(), 1, false);

7124   }
7125 
7126   ConcurrentMarkSweepThread::synchronize(true);
7127   _bitMap->lock()->lock_without_safepoint_check();
7128   _collector->startTimer();
7129 }
7130 
7131 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7132   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7133   assert(_markStack->isEmpty(),
7134          "should drain stack to limit stack usage");
7135   // convert ptr to an oop preparatory to scanning
7136   oop obj = oop(ptr);
7137   // Ignore mark word in verification below, since we
7138   // may be running concurrent with mutators.
7139   assert(obj->is_oop(true), "should be an oop");
7140   assert(_finger <= ptr, "_finger runneth ahead");
7141   // advance the finger to right end of this object
7142   _finger = ptr + obj->size();
7143   assert(_finger > ptr, "we just incremented it above");
7144   // On large heaps, it may take us some time to get through
7145   // the marking phase. During
7146   // this time it's possible that a lot of mutations have
7147   // accumulated in the card table and the mod union table --
7148   // these mutation records are redundant until we have
7149   // actually traced into the corresponding card.
7150   // Here, we check whether advancing the finger would make
7151   // us cross into a new card, and if so clear corresponding
7152   // cards in the MUT (preclean them in the card-table in the
7153   // future).
7154 
7155   DEBUG_ONLY(if (!_verifying) {)
7156     // The clean-on-enter optimization is disabled by default,
7157     // until we fix 6178663.
7158     if (CMSCleanOnEnter && (_finger > _threshold)) {
7159       // [_threshold, _finger) represents the interval
7160       // of cards to be cleared  in MUT (or precleaned in card table).
7161       // The set of cards to be cleared is all those that overlap
7162       // with the interval [_threshold, _finger); note that
7163       // _threshold is always kept card-aligned but _finger isn't
7164       // always card-aligned.
7165       HeapWord* old_threshold = _threshold;


7242   scan_oops_in_oop(addr);
7243   return true;
7244 }
7245 
7246 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7247   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7248   // Should we assert that our work queue is empty or
7249   // below some drain limit?
7250   assert(_work_queue->size() == 0,
7251          "should drain stack to limit stack usage");
7252   // convert ptr to an oop preparatory to scanning
7253   oop obj = oop(ptr);
7254   // Ignore mark word in verification below, since we
7255   // may be running concurrent with mutators.
7256   assert(obj->is_oop(true), "should be an oop");
7257   assert(_finger <= ptr, "_finger runneth ahead");
7258   // advance the finger to right end of this object
7259   _finger = ptr + obj->size();
7260   assert(_finger > ptr, "we just incremented it above");
7261   // On large heaps, it may take us some time to get through
7262   // the marking phase. During
7263   // this time it's possible that a lot of mutations have
7264   // accumulated in the card table and the mod union table --
7265   // these mutation records are redundant until we have
7266   // actually traced into the corresponding card.
7267   // Here, we check whether advancing the finger would make
7268   // us cross into a new card, and if so clear corresponding
7269   // cards in the MUT (preclean them in the card-table in the
7270   // future).
7271 
7272   // The clean-on-enter optimization is disabled by default,
7273   // until we fix 6178663.
7274   if (CMSCleanOnEnter && (_finger > _threshold)) {
7275     // [_threshold, _finger) represents the interval
7276     // of cards to be cleared  in MUT (or precleaned in card table).
7277     // The set of cards to be cleared is all those that overlap
7278     // with the interval [_threshold, _finger); note that
7279     // _threshold is always kept card-aligned but _finger isn't
7280     // always card-aligned.
7281     HeapWord* old_threshold = _threshold;
7282     assert(old_threshold == (HeapWord*)round_to(


7731       if (simulate_overflow || !_work_queue->push(obj)) {
7732         _collector->par_push_on_overflow_list(obj);
7733         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7734       }
7735     } // Else, some other thread got there first
7736   }
7737 }
7738 
7739 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7740 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7741 
7742 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7743   Mutex* bml = _collector->bitMapLock();
7744   assert_lock_strong(bml);
7745   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7746          "CMS thread should hold CMS token");
7747 
7748   bml->unlock();
7749   ConcurrentMarkSweepThread::desynchronize(true);
7750 


7751   _collector->stopTimer();
7752   if (PrintCMSStatistics != 0) {
7753     _collector->incrementYields();
7754   }

7755 
7756   // See the comment in coordinator_yield()
7757   for (unsigned i = 0; i < CMSYieldSleepCount &&
7758                        ConcurrentMarkSweepThread::should_yield() &&
7759                        !CMSCollector::foregroundGCIsActive(); ++i) {
7760     os::sleep(Thread::current(), 1, false);

7761   }
7762 
7763   ConcurrentMarkSweepThread::synchronize(true);
7764   bml->lock();
7765 
7766   _collector->startTimer();
7767 }
7768 
7769 bool CMSPrecleanRefsYieldClosure::should_return() {
7770   if (ConcurrentMarkSweepThread::should_yield()) {
7771     do_yield_work();
7772   }
7773   return _collector->foregroundGCIsActive();
7774 }
7775 
7776 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7777   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7778          "mr should be aligned to start at a card boundary");
7779   // We'd like to assert:
7780   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,


8408   // to the appropriate freelist.  After yielding, the next
8409   // free block encountered will start a coalescing range of
8410   // free blocks.  If the next free block is adjacent to the
8411   // chunk just flushed, they will need to wait for the next
8412   // sweep to be coalesced.
8413   if (inFreeRange()) {
8414     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8415   }
8416 
8417   // First give up the locks, then yield, then re-lock.
8418   // We should probably use a constructor/destructor idiom to
8419   // do this unlock/lock or modify the MutexUnlocker class to
8420   // serve our purpose. XXX
8421   assert_lock_strong(_bitMap->lock());
8422   assert_lock_strong(_freelistLock);
8423   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8424          "CMS thread should hold CMS token");
8425   _bitMap->lock()->unlock();
8426   _freelistLock->unlock();
8427   ConcurrentMarkSweepThread::desynchronize(true);

8428   _collector->stopTimer();
8429   if (PrintCMSStatistics != 0) {
8430     _collector->incrementYields();
8431   }

8432 
8433   // See the comment in coordinator_yield()
8434   for (unsigned i = 0; i < CMSYieldSleepCount &&
8435                        ConcurrentMarkSweepThread::should_yield() &&
8436                        !CMSCollector::foregroundGCIsActive(); ++i) {
8437     os::sleep(Thread::current(), 1, false);

8438   }
8439 
8440   ConcurrentMarkSweepThread::synchronize(true);
8441   _freelistLock->lock();
8442   _bitMap->lock()->lock_without_safepoint_check();
8443   _collector->startTimer();
8444 }
8445 
8446 #ifndef PRODUCT
8447 // This is actually very useful in a product build if it can
8448 // be called from the debugger.  Compile it into the product
8449 // as needed.
8450 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8451   return debug_cms_space->verify_chunk_in_free_list(fc);
8452 }
8453 #endif
8454 
8455 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8456   if (CMSTraceSweeper) {
8457     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",


< prev index next >