1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)concurrentMarkSweepGeneration.inline.hpp     1.47 07/05/17 15:52:12 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 inline void CMSBitMap::clear_all() {
  29   assert_locked();
  30   // CMS bitmaps are usually cover large memory regions
  31   _bm.clear_large();
  32   return;
  33 }
  34 
  35 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
  36   return (pointer_delta(addr, _bmStartWord)) >> _shifter;
  37 }
  38 
  39 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
  40   return _bmStartWord + (offset << _shifter);
  41 }
  42 
  43 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
  44   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  45   return diff >> _shifter;
  46 }
  47 
  48 inline void CMSBitMap::mark(HeapWord* addr) {
  49   assert_locked();
  50   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  51          "outside underlying space?");
  52   _bm.set_bit(heapWordToOffset(addr));
  53 }
  54 
  55 inline bool CMSBitMap::par_mark(HeapWord* addr) {
  56   assert_locked();
  57   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  58          "outside underlying space?");
  59   return _bm.par_at_put(heapWordToOffset(addr), true);
  60 }
  61 
  62 inline void CMSBitMap::par_clear(HeapWord* addr) {
  63   assert_locked();
  64   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  65          "outside underlying space?");
  66   _bm.par_at_put(heapWordToOffset(addr), false);
  67 }
  68 
  69 inline void CMSBitMap::mark_range(MemRegion mr) {
  70   NOT_PRODUCT(region_invariant(mr));
  71   // Range size is usually just 1 bit.
  72   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
  73                 BitMap::small_range);
  74 }
  75 
  76 inline void CMSBitMap::clear_range(MemRegion mr) {
  77   NOT_PRODUCT(region_invariant(mr));
  78   // Range size is usually just 1 bit.
  79   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
  80                   BitMap::small_range);
  81 }
  82 
  83 inline void CMSBitMap::par_mark_range(MemRegion mr) {
  84   NOT_PRODUCT(region_invariant(mr));
  85   // Range size is usually just 1 bit.
  86   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
  87                     BitMap::small_range);
  88 }
  89 
  90 inline void CMSBitMap::par_clear_range(MemRegion mr) {
  91   NOT_PRODUCT(region_invariant(mr));
  92   // Range size is usually just 1 bit.
  93   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
  94                       BitMap::small_range);
  95 }
  96 
  97 inline void CMSBitMap::mark_large_range(MemRegion mr) {
  98   NOT_PRODUCT(region_invariant(mr));
  99   // Range size must be greater than 32 bytes.
 100   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
 101                 BitMap::large_range);
 102 }
 103 
 104 inline void CMSBitMap::clear_large_range(MemRegion mr) {
 105   NOT_PRODUCT(region_invariant(mr));
 106   // Range size must be greater than 32 bytes.
 107   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
 108                   BitMap::large_range);
 109 }
 110 
 111 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
 112   NOT_PRODUCT(region_invariant(mr));
 113   // Range size must be greater than 32 bytes.
 114   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
 115                     BitMap::large_range);
 116 }
 117 
 118 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
 119   NOT_PRODUCT(region_invariant(mr));
 120   // Range size must be greater than 32 bytes.
 121   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
 122                       BitMap::large_range);
 123 }
 124 
 125 // Starting at "addr" (inclusive) return a memory region
 126 // corresponding to the first maximally contiguous marked ("1") region.
 127 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
 128   return getAndClearMarkedRegion(addr, endWord());
 129 }
 130 
 131 // Starting at "start_addr" (inclusive) return a memory region
 132 // corresponding to the first maximal contiguous marked ("1") region
 133 // strictly less than end_addr.
 134 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
 135                                                     HeapWord* end_addr) {
 136   HeapWord *start, *end;
 137   assert_locked();
 138   start = getNextMarkedWordAddress  (start_addr, end_addr);
 139   end   = getNextUnmarkedWordAddress(start,      end_addr);
 140   assert(start <= end, "Consistency check");
 141   MemRegion mr(start, end);
 142   if (!mr.is_empty()) {
 143     clear_range(mr);
 144   }
 145   return mr;
 146 }
 147 
 148 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
 149   assert_locked();
 150   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 151          "outside underlying space?");
 152   return _bm.at(heapWordToOffset(addr));
 153 }
 154 
 155 // The same as isMarked() but without a lock check.
 156 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
 157   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 158          "outside underlying space?");
 159   return _bm.at(heapWordToOffset(addr));
 160 }
 161 
 162 
 163 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
 164   assert_locked();
 165   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 166          "outside underlying space?");
 167   return !_bm.at(heapWordToOffset(addr));
 168 }
 169 
 170 // Return the HeapWord address corresponding to next "1" bit
 171 // (inclusive).
 172 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
 173   return getNextMarkedWordAddress(addr, endWord());
 174 }
 175 
 176 // Return the least HeapWord address corresponding to next "1" bit
 177 // starting at start_addr (inclusive) but strictly less than end_addr.
 178 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
 179   HeapWord* start_addr, HeapWord* end_addr) const {
 180   assert_locked();
 181   size_t nextOffset = _bm.get_next_one_offset(
 182                         heapWordToOffset(start_addr),
 183                         heapWordToOffset(end_addr));
 184   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 185   assert(nextAddr >= start_addr &&
 186          nextAddr <= end_addr, "get_next_one postcondition");
 187   assert((nextAddr == end_addr) ||
 188          isMarked(nextAddr), "get_next_one postcondition");
 189   return nextAddr;
 190 }
 191 
 192 
 193 // Return the HeapWord address corrsponding to the next "0" bit
 194 // (inclusive).
 195 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
 196   return getNextUnmarkedWordAddress(addr, endWord());
 197 }
 198 
 199 // Return the HeapWord address corrsponding to the next "0" bit
 200 // (inclusive).
 201 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
 202   HeapWord* start_addr, HeapWord* end_addr) const {
 203   assert_locked();
 204   size_t nextOffset = _bm.get_next_zero_offset(
 205                         heapWordToOffset(start_addr),
 206                         heapWordToOffset(end_addr));
 207   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 208   assert(nextAddr >= start_addr &&
 209          nextAddr <= end_addr, "get_next_zero postcondition");
 210   assert((nextAddr == end_addr) ||
 211           isUnmarked(nextAddr), "get_next_zero postcondition");
 212   return nextAddr;
 213 }
 214 
 215 inline bool CMSBitMap::isAllClear() const {
 216   assert_locked();
 217   return getNextMarkedWordAddress(startWord()) >= endWord();
 218 }
 219 
 220 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
 221                             HeapWord* right) {
 222   assert_locked();
 223   left = MAX2(_bmStartWord, left);
 224   right = MIN2(_bmStartWord + _bmWordSize, right);
 225   if (right > left) {
 226     _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
 227   }
 228 }
 229 
 230 inline void CMSCollector::start_icms() {
 231   if (CMSIncrementalMode) {
 232     ConcurrentMarkSweepThread::start_icms();
 233   }
 234 }
 235 
 236 inline void CMSCollector::stop_icms() {
 237   if (CMSIncrementalMode) {
 238     ConcurrentMarkSweepThread::stop_icms();
 239   }
 240 }
 241 
 242 inline void CMSCollector::disable_icms() {
 243   if (CMSIncrementalMode) {
 244     ConcurrentMarkSweepThread::disable_icms();
 245   }
 246 }
 247 
 248 inline void CMSCollector::enable_icms() {
 249   if (CMSIncrementalMode) {
 250     ConcurrentMarkSweepThread::enable_icms();
 251   }
 252 }
 253 
 254 inline void CMSCollector::icms_wait() {
 255   if (CMSIncrementalMode) {
 256     cmsThread()->icms_wait();
 257   }
 258 }
 259 
 260 inline void CMSCollector::save_sweep_limits() {
 261   _cmsGen->save_sweep_limit();
 262   _permGen->save_sweep_limit();
 263 }
 264 
 265 inline bool CMSCollector::is_dead_obj(oop obj) const {
 266   HeapWord* addr = (HeapWord*)obj;
 267   assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
 268           && _cmsGen->cmsSpace()->block_is_obj(addr))
 269          ||
 270          (_permGen->cmsSpace()->is_in_reserved(addr)
 271           && _permGen->cmsSpace()->block_is_obj(addr)),
 272          "must be object");
 273   return  should_unload_classes() &&
 274           _collectorState == Sweeping &&
 275          !_markBitMap.isMarked(addr);
 276 }
 277 
 278 inline bool CMSCollector::should_abort_preclean() const {
 279   // We are in the midst of an "abortable preclean" and either
 280   // scavenge is done or foreground GC wants to take over collection
 281   return _collectorState == AbortablePreclean &&
 282          (_abort_preclean || _foregroundGCIsActive ||
 283           GenCollectedHeap::heap()->incremental_collection_will_fail());
 284 }
 285 
 286 inline size_t CMSCollector::get_eden_used() const {
 287   return _young_gen->as_DefNewGeneration()->eden()->used();
 288 }
 289 
 290 inline size_t CMSCollector::get_eden_capacity() const {
 291   return _young_gen->as_DefNewGeneration()->eden()->capacity();
 292 }
 293 
 294 inline bool CMSStats::valid() const {
 295   return _valid_bits == _ALL_VALID;
 296 }
 297 
 298 inline void CMSStats::record_gc0_begin() {
 299   if (_gc0_begin_time.is_updated()) {
 300     float last_gc0_period = _gc0_begin_time.seconds();
 301     _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period, 
 302       last_gc0_period, _gc0_alpha);
 303     _gc0_alpha = _saved_alpha;
 304     _valid_bits |= _GC0_VALID;
 305   }
 306   _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
 307 
 308   _gc0_begin_time.update();
 309 }
 310 
 311 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
 312   float last_gc0_duration = _gc0_begin_time.seconds();
 313   _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration, 
 314     last_gc0_duration, _gc0_alpha);
 315 
 316   // Amount promoted.
 317   _cms_used_at_gc0_end = cms_gen_bytes_used;
 318 
 319   size_t promoted_bytes = 0;
 320   if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
 321     promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
 322   } 
 323 
 324   // If the younger gen collections were skipped, then the
 325   // number of promoted bytes will be 0 and adding it to the
 326   // average will incorrectly lessen the average.  It is, however,
 327   // also possible that no promotion was needed.
 328   // 
 329   // _gc0_promoted used to be calculated as
 330   // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
 331   //  promoted_bytes, _gc0_alpha);
 332   _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
 333   _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
 334 
 335   // Amount directly allocated.
 336   size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
 337   _cms_gen->reset_direct_allocated_words();
 338   _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated, 
 339     allocated_bytes, _gc0_alpha);
 340 }
 341 
 342 inline void CMSStats::record_cms_begin() {
 343   _cms_timer.stop();
 344 
 345   // This is just an approximate value, but is good enough.
 346   _cms_used_at_cms_begin = _cms_used_at_gc0_end;
 347 
 348   _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period, 
 349     (float) _cms_timer.seconds(), _cms_alpha);
 350   _cms_begin_time.update();
 351 
 352   _cms_timer.reset();
 353   _cms_timer.start();
 354 }
 355 
 356 inline void CMSStats::record_cms_end() {
 357   _cms_timer.stop();
 358 
 359   float cur_duration = _cms_timer.seconds();
 360   _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration, 
 361     cur_duration, _cms_alpha);
 362 
 363   // Avoid division by 0.
 364   const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
 365   _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
 366                                  cur_duration / cms_used_mb,
 367                                  _cms_alpha);
 368 
 369   _cms_end_time.update();
 370   _cms_alpha = _saved_alpha;
 371   _allow_duty_cycle_reduction = true;
 372   _valid_bits |= _CMS_VALID;
 373 
 374   _cms_timer.start();
 375 }
 376 
 377 inline double CMSStats::cms_time_since_begin() const {
 378   return _cms_begin_time.seconds();
 379 }
 380 
 381 inline double CMSStats::cms_time_since_end() const {
 382   return _cms_end_time.seconds();
 383 }
 384 
 385 inline double CMSStats::promotion_rate() const {
 386   assert(valid(), "statistics not valid yet");
 387   return gc0_promoted() / gc0_period();
 388 }
 389 
 390 inline double CMSStats::cms_allocation_rate() const {
 391   assert(valid(), "statistics not valid yet");
 392   return cms_allocated() / gc0_period();
 393 }
 394 
 395 inline double CMSStats::cms_consumption_rate() const {
 396   assert(valid(), "statistics not valid yet");
 397   return (gc0_promoted() + cms_allocated()) / gc0_period();
 398 }
 399 
 400 inline unsigned int CMSStats::icms_update_duty_cycle() {
 401   // Update the duty cycle only if pacing is enabled and the stats are valid
 402   // (after at least one young gen gc and one cms cycle have completed).
 403   if (CMSIncrementalPacing && valid()) {
 404     return icms_update_duty_cycle_impl();
 405   }
 406   return _icms_duty_cycle;
 407 }
 408 
 409 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
 410   cmsSpace()->save_sweep_limit();
 411 }
 412 
 413 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
 414   return _cmsSpace->capacity();
 415 }
 416 
 417 inline size_t ConcurrentMarkSweepGeneration::used() const {
 418   return _cmsSpace->used();
 419 }
 420 
 421 inline size_t ConcurrentMarkSweepGeneration::free() const {
 422   return _cmsSpace->free();
 423 }
 424 
 425 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
 426   return _cmsSpace->used_region();
 427 }
 428 
 429 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
 430   return _cmsSpace->used_region_at_save_marks();
 431 }
 432 
 433 inline void MarkFromRootsClosure::do_yield_check() {
 434   if (ConcurrentMarkSweepThread::should_yield() &&
 435       !_collector->foregroundGCIsActive() &&
 436       _yield) {
 437     do_yield_work();
 438   }
 439 }
 440 
 441 inline void Par_MarkFromRootsClosure::do_yield_check() {
 442   if (ConcurrentMarkSweepThread::should_yield() &&
 443       !_collector->foregroundGCIsActive() &&
 444       _yield) {
 445     do_yield_work();
 446   }
 447 }
 448 
 449 // Return value of "true" indicates that the on-going preclean
 450 // should be aborted.
 451 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
 452   if (ConcurrentMarkSweepThread::should_yield() &&
 453       !_collector->foregroundGCIsActive() &&
 454       _yield) {
 455     // Sample young gen size before and after yield
 456     _collector->sample_eden(); 
 457     do_yield_work();
 458     _collector->sample_eden();
 459     return _collector->should_abort_preclean();
 460   }
 461   return false;
 462 }
 463 
 464 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
 465   if (ConcurrentMarkSweepThread::should_yield() &&
 466       !_collector->foregroundGCIsActive() &&
 467       _yield) {
 468     // Sample young gen size before and after yield
 469     _collector->sample_eden();
 470     do_yield_work();
 471     _collector->sample_eden();
 472   }
 473 }
 474 
 475 inline void SweepClosure::do_yield_check(HeapWord* addr) {
 476   if (ConcurrentMarkSweepThread::should_yield() &&
 477       !_collector->foregroundGCIsActive() &&
 478       _yield) {
 479     do_yield_work(addr);
 480   }
 481 }
 482 
 483 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
 484   // The conditions are ordered for the remarking phase
 485   // when _yield is false.
 486   if (_yield &&
 487       !_collector->foregroundGCIsActive() &&
 488       ConcurrentMarkSweepThread::should_yield()) {
 489     do_yield_work();
 490   }
 491 }
 492 
 493 
 494 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
 495   // Align the end of mr so it's at a card boundary.
 496   // This is superfluous except at the end of the space;
 497   // we should do better than this XXX
 498   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 499                  CardTableModRefBS::card_size /* bytes */));
 500   _t->mark_range(mr2);
 501 }
 502 
 503 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
 504   // Align the end of mr so it's at a card boundary.
 505   // This is superfluous except at the end of the space;
 506   // we should do better than this XXX
 507   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 508                  CardTableModRefBS::card_size /* bytes */));
 509   _t->par_mark_range(mr2);
 510 }