1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
  27 
  28 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  32 #include "gc_implementation/shared/gcUtil.hpp"
  33 #include "memory/defNewGeneration.hpp"
  34 
  35 inline void CMSBitMap::clear_all() {
  36   assert_locked();
  37   // CMS bitmaps are usually cover large memory regions
  38   _bm.clear_large();
  39   return;
  40 }
  41 
  42 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
  43   return (pointer_delta(addr, _bmStartWord)) >> _shifter;
  44 }
  45 
  46 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
  47   return _bmStartWord + (offset << _shifter);
  48 }
  49 
  50 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
  51   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  52   return diff >> _shifter;
  53 }
  54 
  55 inline void CMSBitMap::mark(HeapWord* addr) {
  56   assert_locked();
  57   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  58          "outside underlying space?");
  59   _bm.set_bit(heapWordToOffset(addr));
  60 }
  61 
  62 inline bool CMSBitMap::par_mark(HeapWord* addr) {
  63   assert_locked();
  64   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  65          "outside underlying space?");
  66   return _bm.par_at_put(heapWordToOffset(addr), true);
  67 }
  68 
  69 inline void CMSBitMap::par_clear(HeapWord* addr) {
  70   assert_locked();
  71   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  72          "outside underlying space?");
  73   _bm.par_at_put(heapWordToOffset(addr), false);
  74 }
  75 
  76 inline void CMSBitMap::mark_range(MemRegion mr) {
  77   NOT_PRODUCT(region_invariant(mr));
  78   // Range size is usually just 1 bit.
  79   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  80                 BitMap::small_range);
  81 }
  82 
  83 inline void CMSBitMap::clear_range(MemRegion mr) {
  84   NOT_PRODUCT(region_invariant(mr));
  85   // Range size is usually just 1 bit.
  86   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  87                   BitMap::small_range);
  88 }
  89 
  90 inline void CMSBitMap::par_mark_range(MemRegion mr) {
  91   NOT_PRODUCT(region_invariant(mr));
  92   // Range size is usually just 1 bit.
  93   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  94                     BitMap::small_range);
  95 }
  96 
  97 inline void CMSBitMap::par_clear_range(MemRegion mr) {
  98   NOT_PRODUCT(region_invariant(mr));
  99   // Range size is usually just 1 bit.
 100   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 101                       BitMap::small_range);
 102 }
 103 
 104 inline void CMSBitMap::mark_large_range(MemRegion mr) {
 105   NOT_PRODUCT(region_invariant(mr));
 106   // Range size must be greater than 32 bytes.
 107   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 108                 BitMap::large_range);
 109 }
 110 
 111 inline void CMSBitMap::clear_large_range(MemRegion mr) {
 112   NOT_PRODUCT(region_invariant(mr));
 113   // Range size must be greater than 32 bytes.
 114   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 115                   BitMap::large_range);
 116 }
 117 
 118 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
 119   NOT_PRODUCT(region_invariant(mr));
 120   // Range size must be greater than 32 bytes.
 121   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 122                     BitMap::large_range);
 123 }
 124 
 125 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
 126   NOT_PRODUCT(region_invariant(mr));
 127   // Range size must be greater than 32 bytes.
 128   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 129                       BitMap::large_range);
 130 }
 131 
 132 // Starting at "addr" (inclusive) return a memory region
 133 // corresponding to the first maximally contiguous marked ("1") region.
 134 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
 135   return getAndClearMarkedRegion(addr, endWord());
 136 }
 137 
 138 // Starting at "start_addr" (inclusive) return a memory region
 139 // corresponding to the first maximal contiguous marked ("1") region
 140 // strictly less than end_addr.
 141 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
 142                                                     HeapWord* end_addr) {
 143   HeapWord *start, *end;
 144   assert_locked();
 145   start = getNextMarkedWordAddress  (start_addr, end_addr);
 146   end   = getNextUnmarkedWordAddress(start,      end_addr);
 147   assert(start <= end, "Consistency check");
 148   MemRegion mr(start, end);
 149   if (!mr.is_empty()) {
 150     clear_range(mr);
 151   }
 152   return mr;
 153 }
 154 
 155 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
 156   assert_locked();
 157   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 158          "outside underlying space?");
 159   return _bm.at(heapWordToOffset(addr));
 160 }
 161 
 162 // The same as isMarked() but without a lock check.
 163 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
 164   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 165          "outside underlying space?");
 166   return _bm.at(heapWordToOffset(addr));
 167 }
 168 
 169 
 170 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
 171   assert_locked();
 172   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 173          "outside underlying space?");
 174   return !_bm.at(heapWordToOffset(addr));
 175 }
 176 
 177 // Return the HeapWord address corresponding to next "1" bit
 178 // (inclusive).
 179 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
 180   return getNextMarkedWordAddress(addr, endWord());
 181 }
 182 
 183 // Return the least HeapWord address corresponding to next "1" bit
 184 // starting at start_addr (inclusive) but strictly less than end_addr.
 185 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
 186   HeapWord* start_addr, HeapWord* end_addr) const {
 187   assert_locked();
 188   size_t nextOffset = _bm.get_next_one_offset(
 189                         heapWordToOffset(start_addr),
 190                         heapWordToOffset(end_addr));
 191   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 192   assert(nextAddr >= start_addr &&
 193          nextAddr <= end_addr, "get_next_one postcondition");
 194   assert((nextAddr == end_addr) ||
 195          isMarked(nextAddr), "get_next_one postcondition");
 196   return nextAddr;
 197 }
 198 
 199 
 200 // Return the HeapWord address corrsponding to the next "0" bit
 201 // (inclusive).
 202 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
 203   return getNextUnmarkedWordAddress(addr, endWord());
 204 }
 205 
 206 // Return the HeapWord address corrsponding to the next "0" bit
 207 // (inclusive).
 208 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
 209   HeapWord* start_addr, HeapWord* end_addr) const {
 210   assert_locked();
 211   size_t nextOffset = _bm.get_next_zero_offset(
 212                         heapWordToOffset(start_addr),
 213                         heapWordToOffset(end_addr));
 214   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 215   assert(nextAddr >= start_addr &&
 216          nextAddr <= end_addr, "get_next_zero postcondition");
 217   assert((nextAddr == end_addr) ||
 218           isUnmarked(nextAddr), "get_next_zero postcondition");
 219   return nextAddr;
 220 }
 221 
 222 inline bool CMSBitMap::isAllClear() const {
 223   assert_locked();
 224   return getNextMarkedWordAddress(startWord()) >= endWord();
 225 }
 226 
 227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
 228                             HeapWord* right) {
 229   assert_locked();
 230   left = MAX2(_bmStartWord, left);
 231   right = MIN2(_bmStartWord + _bmWordSize, right);
 232   if (right > left) {
 233     _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
 234   }
 235 }
 236 
 237 inline void CMSCollector::start_icms() {
 238   if (CMSIncrementalMode) {
 239     ConcurrentMarkSweepThread::start_icms();
 240   }
 241 }
 242 
 243 inline void CMSCollector::stop_icms() {
 244   if (CMSIncrementalMode) {
 245     ConcurrentMarkSweepThread::stop_icms();
 246   }
 247 }
 248 
 249 inline void CMSCollector::disable_icms() {
 250   if (CMSIncrementalMode) {
 251     ConcurrentMarkSweepThread::disable_icms();
 252   }
 253 }
 254 
 255 inline void CMSCollector::enable_icms() {
 256   if (CMSIncrementalMode) {
 257     ConcurrentMarkSweepThread::enable_icms();
 258   }
 259 }
 260 
 261 inline void CMSCollector::icms_wait() {
 262   if (CMSIncrementalMode) {
 263     cmsThread()->icms_wait();
 264   }
 265 }
 266 
 267 inline void CMSCollector::save_sweep_limits() {
 268   _cmsGen->save_sweep_limit();
 269   _permGen->save_sweep_limit();
 270 }
 271 
 272 inline bool CMSCollector::is_dead_obj(oop obj) const {
 273   HeapWord* addr = (HeapWord*)obj;
 274   assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
 275           && _cmsGen->cmsSpace()->block_is_obj(addr))
 276          ||
 277          (_permGen->cmsSpace()->is_in_reserved(addr)
 278           && _permGen->cmsSpace()->block_is_obj(addr)),
 279          "must be object");
 280   return  should_unload_classes() &&
 281           _collectorState == Sweeping &&
 282          !_markBitMap.isMarked(addr);
 283 }
 284 
 285 inline bool CMSCollector::should_abort_preclean() const {
 286   // We are in the midst of an "abortable preclean" and either
 287   // scavenge is done or foreground GC wants to take over collection
 288   return _collectorState == AbortablePreclean &&
 289          (_abort_preclean || _foregroundGCIsActive ||
 290           GenCollectedHeap::heap()->incremental_collection_will_fail());
 291 }
 292 
 293 inline size_t CMSCollector::get_eden_used() const {
 294   return _young_gen->as_DefNewGeneration()->eden()->used();
 295 }
 296 
 297 inline size_t CMSCollector::get_eden_capacity() const {
 298   return _young_gen->as_DefNewGeneration()->eden()->capacity();
 299 }
 300 
 301 inline bool CMSStats::valid() const {
 302   return _valid_bits == _ALL_VALID;
 303 }
 304 
 305 inline void CMSStats::record_gc0_begin() {
 306   if (_gc0_begin_time.is_updated()) {
 307     float last_gc0_period = _gc0_begin_time.seconds();
 308     _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
 309       last_gc0_period, _gc0_alpha);
 310     _gc0_alpha = _saved_alpha;
 311     _valid_bits |= _GC0_VALID;
 312   }
 313   _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
 314 
 315   _gc0_begin_time.update();
 316 }
 317 
 318 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
 319   float last_gc0_duration = _gc0_begin_time.seconds();
 320   _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
 321     last_gc0_duration, _gc0_alpha);
 322 
 323   // Amount promoted.
 324   _cms_used_at_gc0_end = cms_gen_bytes_used;
 325 
 326   size_t promoted_bytes = 0;
 327   if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
 328     promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
 329   }
 330 
 331   // If the younger gen collections were skipped, then the
 332   // number of promoted bytes will be 0 and adding it to the
 333   // average will incorrectly lessen the average.  It is, however,
 334   // also possible that no promotion was needed.
 335   //
 336   // _gc0_promoted used to be calculated as
 337   // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
 338   //  promoted_bytes, _gc0_alpha);
 339   _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
 340   _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
 341 
 342   // Amount directly allocated.
 343   size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
 344   _cms_gen->reset_direct_allocated_words();
 345   _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
 346     allocated_bytes, _gc0_alpha);
 347 }
 348 
 349 inline void CMSStats::record_cms_begin() {
 350   _cms_timer.stop();
 351 
 352   // This is just an approximate value, but is good enough.
 353   _cms_used_at_cms_begin = _cms_used_at_gc0_end;
 354 
 355   _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
 356     (float) _cms_timer.seconds(), _cms_alpha);
 357   _cms_begin_time.update();
 358 
 359   _cms_timer.reset();
 360   _cms_timer.start();
 361 }
 362 
 363 inline void CMSStats::record_cms_end() {
 364   _cms_timer.stop();
 365 
 366   float cur_duration = _cms_timer.seconds();
 367   _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
 368     cur_duration, _cms_alpha);
 369 
 370   // Avoid division by 0.
 371   const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
 372   _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
 373                                  cur_duration / cms_used_mb,
 374                                  _cms_alpha);
 375 
 376   _cms_end_time.update();
 377   _cms_alpha = _saved_alpha;
 378   _allow_duty_cycle_reduction = true;
 379   _valid_bits |= _CMS_VALID;
 380 
 381   _cms_timer.start();
 382 }
 383 
 384 inline double CMSStats::cms_time_since_begin() const {
 385   return _cms_begin_time.seconds();
 386 }
 387 
 388 inline double CMSStats::cms_time_since_end() const {
 389   return _cms_end_time.seconds();
 390 }
 391 
 392 inline double CMSStats::promotion_rate() const {
 393   assert(valid(), "statistics not valid yet");
 394   return gc0_promoted() / gc0_period();
 395 }
 396 
 397 inline double CMSStats::cms_allocation_rate() const {
 398   assert(valid(), "statistics not valid yet");
 399   return cms_allocated() / gc0_period();
 400 }
 401 
 402 inline double CMSStats::cms_consumption_rate() const {
 403   assert(valid(), "statistics not valid yet");
 404   return (gc0_promoted() + cms_allocated()) / gc0_period();
 405 }
 406 
 407 inline unsigned int CMSStats::icms_update_duty_cycle() {
 408   // Update the duty cycle only if pacing is enabled and the stats are valid
 409   // (after at least one young gen gc and one cms cycle have completed).
 410   if (CMSIncrementalPacing && valid()) {
 411     return icms_update_duty_cycle_impl();
 412   }
 413   return _icms_duty_cycle;
 414 }
 415 
 416 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
 417   cmsSpace()->save_sweep_limit();
 418 }
 419 
 420 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
 421   return _cmsSpace->capacity();
 422 }
 423 
 424 inline size_t ConcurrentMarkSweepGeneration::used() const {
 425   return _cmsSpace->used();
 426 }
 427 
 428 inline size_t ConcurrentMarkSweepGeneration::free() const {
 429   return _cmsSpace->free();
 430 }
 431 
 432 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
 433   return _cmsSpace->used_region();
 434 }
 435 
 436 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
 437   return _cmsSpace->used_region_at_save_marks();
 438 }
 439 
 440 inline void MarkFromRootsClosure::do_yield_check() {
 441   if (ConcurrentMarkSweepThread::should_yield() &&
 442       !_collector->foregroundGCIsActive() &&
 443       _yield) {
 444     do_yield_work();
 445   }
 446 }
 447 
 448 inline void Par_MarkFromRootsClosure::do_yield_check() {
 449   if (ConcurrentMarkSweepThread::should_yield() &&
 450       !_collector->foregroundGCIsActive() &&
 451       _yield) {
 452     do_yield_work();
 453   }
 454 }
 455 
 456 // Return value of "true" indicates that the on-going preclean
 457 // should be aborted.
 458 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
 459   if (ConcurrentMarkSweepThread::should_yield() &&
 460       !_collector->foregroundGCIsActive() &&
 461       _yield) {
 462     // Sample young gen size before and after yield
 463     _collector->sample_eden();
 464     do_yield_work();
 465     _collector->sample_eden();
 466     return _collector->should_abort_preclean();
 467   }
 468   return false;
 469 }
 470 
 471 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
 472   if (ConcurrentMarkSweepThread::should_yield() &&
 473       !_collector->foregroundGCIsActive() &&
 474       _yield) {
 475     // Sample young gen size before and after yield
 476     _collector->sample_eden();
 477     do_yield_work();
 478     _collector->sample_eden();
 479   }
 480 }
 481 
 482 inline void SweepClosure::do_yield_check(HeapWord* addr) {
 483   if (ConcurrentMarkSweepThread::should_yield() &&
 484       !_collector->foregroundGCIsActive() &&
 485       _yield) {
 486     do_yield_work(addr);
 487   }
 488 }
 489 
 490 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
 491   // The conditions are ordered for the remarking phase
 492   // when _yield is false.
 493   if (_yield &&
 494       !_collector->foregroundGCIsActive() &&
 495       ConcurrentMarkSweepThread::should_yield()) {
 496     do_yield_work();
 497   }
 498 }
 499 
 500 
 501 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
 502   // Align the end of mr so it's at a card boundary.
 503   // This is superfluous except at the end of the space;
 504   // we should do better than this XXX
 505   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 506                  CardTableModRefBS::card_size /* bytes */));
 507   _t->mark_range(mr2);
 508 }
 509 
 510 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
 511   // Align the end of mr so it's at a card boundary.
 512   // This is superfluous except at the end of the space;
 513   // we should do better than this XXX
 514   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 515                  CardTableModRefBS::card_size /* bytes */));
 516   _t->par_mark_range(mr2);
 517 }
 518 
 519 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP