1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
  27 
  28 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  32 #include "gc_implementation/shared/gcUtil.hpp"
  33 #include "memory/defNewGeneration.hpp"
  34 
  35 inline void CMSBitMap::clear_all() {
  36   assert_locked();
  37   // CMS bitmaps are usually cover large memory regions
  38   _bm.clear_large();
  39   return;
  40 }
  41 
  42 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
  43   return (pointer_delta(addr, _bmStartWord)) >> _shifter;
  44 }
  45 
  46 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
  47   return _bmStartWord + (offset << _shifter);
  48 }
  49 
  50 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
  51   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  52   return diff >> _shifter;
  53 }
  54 
  55 inline void CMSBitMap::mark(HeapWord* addr) {
  56   assert_locked();
  57   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  58          "outside underlying space?");
  59   _bm.set_bit(heapWordToOffset(addr));
  60 }
  61 
  62 inline bool CMSBitMap::par_mark(HeapWord* addr) {
  63   assert_locked();
  64   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  65          "outside underlying space?");
  66   return _bm.par_at_put(heapWordToOffset(addr), true);
  67 }
  68 
  69 inline void CMSBitMap::par_clear(HeapWord* addr) {
  70   assert_locked();
  71   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  72          "outside underlying space?");
  73   _bm.par_at_put(heapWordToOffset(addr), false);
  74 }
  75 
  76 inline void CMSBitMap::mark_range(MemRegion mr) {
  77   NOT_PRODUCT(region_invariant(mr));
  78   // Range size is usually just 1 bit.
  79   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  80                 BitMap::small_range);
  81 }
  82 
  83 inline void CMSBitMap::clear_range(MemRegion mr) {
  84   NOT_PRODUCT(region_invariant(mr));
  85   // Range size is usually just 1 bit.
  86   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  87                   BitMap::small_range);
  88 }
  89 
  90 inline void CMSBitMap::par_mark_range(MemRegion mr) {
  91   NOT_PRODUCT(region_invariant(mr));
  92   // Range size is usually just 1 bit.
  93   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  94                     BitMap::small_range);
  95 }
  96 
  97 inline void CMSBitMap::par_clear_range(MemRegion mr) {
  98   NOT_PRODUCT(region_invariant(mr));
  99   // Range size is usually just 1 bit.
 100   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 101                       BitMap::small_range);
 102 }
 103 
 104 inline void CMSBitMap::mark_large_range(MemRegion mr) {
 105   NOT_PRODUCT(region_invariant(mr));
 106   // Range size must be greater than 32 bytes.
 107   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 108                 BitMap::large_range);
 109 }
 110 
 111 inline void CMSBitMap::clear_large_range(MemRegion mr) {
 112   NOT_PRODUCT(region_invariant(mr));
 113   // Range size must be greater than 32 bytes.
 114   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 115                   BitMap::large_range);
 116 }
 117 
 118 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
 119   NOT_PRODUCT(region_invariant(mr));
 120   // Range size must be greater than 32 bytes.
 121   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 122                     BitMap::large_range);
 123 }
 124 
 125 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
 126   NOT_PRODUCT(region_invariant(mr));
 127   // Range size must be greater than 32 bytes.
 128   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 129                       BitMap::large_range);
 130 }
 131 
 132 // Starting at "addr" (inclusive) return a memory region
 133 // corresponding to the first maximally contiguous marked ("1") region.
 134 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
 135   return getAndClearMarkedRegion(addr, endWord());
 136 }
 137 
 138 // Starting at "start_addr" (inclusive) return a memory region
 139 // corresponding to the first maximal contiguous marked ("1") region
 140 // strictly less than end_addr.
 141 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
 142                                                     HeapWord* end_addr) {
 143   HeapWord *start, *end;
 144   assert_locked();
 145   start = getNextMarkedWordAddress  (start_addr, end_addr);
 146   end   = getNextUnmarkedWordAddress(start,      end_addr);
 147   assert(start <= end, "Consistency check");
 148   MemRegion mr(start, end);
 149   if (!mr.is_empty()) {
 150     clear_range(mr);
 151   }
 152   return mr;
 153 }
 154 
 155 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
 156   assert_locked();
 157   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 158          "outside underlying space?");
 159   return _bm.at(heapWordToOffset(addr));
 160 }
 161 
 162 // The same as isMarked() but without a lock check.
 163 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
 164   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 165          "outside underlying space?");
 166   return _bm.at(heapWordToOffset(addr));
 167 }
 168 
 169 
 170 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
 171   assert_locked();
 172   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 173          "outside underlying space?");
 174   return !_bm.at(heapWordToOffset(addr));
 175 }
 176 
 177 // Return the HeapWord address corresponding to next "1" bit
 178 // (inclusive).
 179 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
 180   return getNextMarkedWordAddress(addr, endWord());
 181 }
 182 
 183 // Return the least HeapWord address corresponding to next "1" bit
 184 // starting at start_addr (inclusive) but strictly less than end_addr.
 185 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
 186   HeapWord* start_addr, HeapWord* end_addr) const {
 187   assert_locked();
 188   size_t nextOffset = _bm.get_next_one_offset(
 189                         heapWordToOffset(start_addr),
 190                         heapWordToOffset(end_addr));
 191   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 192   assert(nextAddr >= start_addr &&
 193          nextAddr <= end_addr, "get_next_one postcondition");
 194   assert((nextAddr == end_addr) ||
 195          isMarked(nextAddr), "get_next_one postcondition");
 196   return nextAddr;
 197 }
 198 
 199 
 200 // Return the HeapWord address corresponding to the next "0" bit
 201 // (inclusive).
 202 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
 203   return getNextUnmarkedWordAddress(addr, endWord());
 204 }
 205 
 206 // Return the HeapWord address corresponding to the next "0" bit
 207 // (inclusive).
 208 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
 209   HeapWord* start_addr, HeapWord* end_addr) const {
 210   assert_locked();
 211   size_t nextOffset = _bm.get_next_zero_offset(
 212                         heapWordToOffset(start_addr),
 213                         heapWordToOffset(end_addr));
 214   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 215   assert(nextAddr >= start_addr &&
 216          nextAddr <= end_addr, "get_next_zero postcondition");
 217   assert((nextAddr == end_addr) ||
 218           isUnmarked(nextAddr), "get_next_zero postcondition");
 219   return nextAddr;
 220 }
 221 
 222 inline bool CMSBitMap::isAllClear() const {
 223   assert_locked();
 224   return getNextMarkedWordAddress(startWord()) >= endWord();
 225 }
 226 
 227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
 228                             HeapWord* right) {
 229   assert_locked();
 230   left = MAX2(_bmStartWord, left);
 231   right = MIN2(_bmStartWord + _bmWordSize, right);
 232   if (right > left) {
 233     _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
 234   }
 235 }
 236 
 237 inline void CMSCollector::save_sweep_limits() {
 238   _cmsGen->save_sweep_limit();
 239 }
 240 
 241 inline bool CMSCollector::is_dead_obj(oop obj) const {
 242   HeapWord* addr = (HeapWord*)obj;
 243   assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
 244           && _cmsGen->cmsSpace()->block_is_obj(addr)),
 245          "must be object");
 246   return  should_unload_classes() &&
 247           _collectorState == Sweeping &&
 248          !_markBitMap.isMarked(addr);
 249 }
 250 
 251 inline bool CMSCollector::should_abort_preclean() const {
 252   // We are in the midst of an "abortable preclean" and either
 253   // scavenge is done or foreground GC wants to take over collection
 254   return _collectorState == AbortablePreclean &&
 255          (_abort_preclean || _foregroundGCIsActive ||
 256           GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
 257 }
 258 
 259 inline size_t CMSCollector::get_eden_used() const {
 260   return _young_gen->as_DefNewGeneration()->eden()->used();
 261 }
 262 
 263 inline size_t CMSCollector::get_eden_capacity() const {
 264   return _young_gen->as_DefNewGeneration()->eden()->capacity();
 265 }
 266 
 267 inline bool CMSStats::valid() const {
 268   return _valid_bits == _ALL_VALID;
 269 }
 270 
 271 inline void CMSStats::record_gc0_begin() {
 272   if (_gc0_begin_time.is_updated()) {
 273     float last_gc0_period = _gc0_begin_time.seconds();
 274     _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
 275       last_gc0_period, _gc0_alpha);
 276     _gc0_alpha = _saved_alpha;
 277     _valid_bits |= _GC0_VALID;
 278   }
 279   _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
 280 
 281   _gc0_begin_time.update();
 282 }
 283 
 284 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
 285   float last_gc0_duration = _gc0_begin_time.seconds();
 286   _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
 287     last_gc0_duration, _gc0_alpha);
 288 
 289   // Amount promoted.
 290   _cms_used_at_gc0_end = cms_gen_bytes_used;
 291 
 292   size_t promoted_bytes = 0;
 293   if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
 294     promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
 295   }
 296 
 297   // If the younger gen collections were skipped, then the
 298   // number of promoted bytes will be 0 and adding it to the
 299   // average will incorrectly lessen the average.  It is, however,
 300   // also possible that no promotion was needed.
 301   //
 302   // _gc0_promoted used to be calculated as
 303   // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
 304   //  promoted_bytes, _gc0_alpha);
 305   _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
 306   _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
 307 
 308   // Amount directly allocated.
 309   size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
 310   _cms_gen->reset_direct_allocated_words();
 311   _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
 312     allocated_bytes, _gc0_alpha);
 313 }
 314 
 315 inline void CMSStats::record_cms_begin() {
 316   _cms_timer.stop();
 317 
 318   // This is just an approximate value, but is good enough.
 319   _cms_used_at_cms_begin = _cms_used_at_gc0_end;
 320 
 321   _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
 322     (float) _cms_timer.seconds(), _cms_alpha);
 323   _cms_begin_time.update();
 324 
 325   _cms_timer.reset();
 326   _cms_timer.start();
 327 }
 328 
 329 inline void CMSStats::record_cms_end() {
 330   _cms_timer.stop();
 331 
 332   float cur_duration = _cms_timer.seconds();
 333   _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
 334     cur_duration, _cms_alpha);
 335 
 336   // Avoid division by 0.
 337   const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
 338   _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
 339                                  cur_duration / cms_used_mb,
 340                                  _cms_alpha);
 341 
 342   _cms_end_time.update();
 343   _cms_alpha = _saved_alpha;
 344   _allow_duty_cycle_reduction = true;
 345   _valid_bits |= _CMS_VALID;
 346 
 347   _cms_timer.start();
 348 }
 349 
 350 inline double CMSStats::cms_time_since_begin() const {
 351   return _cms_begin_time.seconds();
 352 }
 353 
 354 inline double CMSStats::cms_time_since_end() const {
 355   return _cms_end_time.seconds();
 356 }
 357 
 358 inline double CMSStats::promotion_rate() const {
 359   assert(valid(), "statistics not valid yet");
 360   return gc0_promoted() / gc0_period();
 361 }
 362 
 363 inline double CMSStats::cms_allocation_rate() const {
 364   assert(valid(), "statistics not valid yet");
 365   return cms_allocated() / gc0_period();
 366 }
 367 
 368 inline double CMSStats::cms_consumption_rate() const {
 369   assert(valid(), "statistics not valid yet");
 370   return (gc0_promoted() + cms_allocated()) / gc0_period();
 371 }
 372 
 373 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
 374   cmsSpace()->save_sweep_limit();
 375 }
 376 
 377 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
 378   return _cmsSpace->capacity();
 379 }
 380 
 381 inline size_t ConcurrentMarkSweepGeneration::used() const {
 382   return _cmsSpace->used();
 383 }
 384 
 385 inline size_t ConcurrentMarkSweepGeneration::free() const {
 386   return _cmsSpace->free();
 387 }
 388 
 389 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
 390   return _cmsSpace->used_region();
 391 }
 392 
 393 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
 394   return _cmsSpace->used_region_at_save_marks();
 395 }
 396 
 397 inline void MarkFromRootsClosure::do_yield_check() {
 398   if (ConcurrentMarkSweepThread::should_yield() &&
 399       !_collector->foregroundGCIsActive() &&
 400       _yield) {
 401     do_yield_work();
 402   }
 403 }
 404 
 405 inline void Par_MarkFromRootsClosure::do_yield_check() {
 406   if (ConcurrentMarkSweepThread::should_yield() &&
 407       !_collector->foregroundGCIsActive() &&
 408       _yield) {
 409     do_yield_work();
 410   }
 411 }
 412 
 413 inline void PushOrMarkClosure::do_yield_check() {
 414   _parent->do_yield_check();
 415 }
 416 
 417 inline void Par_PushOrMarkClosure::do_yield_check() {
 418   _parent->do_yield_check();
 419 }
 420 
 421 // Return value of "true" indicates that the on-going preclean
 422 // should be aborted.
 423 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
 424   if (ConcurrentMarkSweepThread::should_yield() &&
 425       !_collector->foregroundGCIsActive() &&
 426       _yield) {
 427     // Sample young gen size before and after yield
 428     _collector->sample_eden();
 429     do_yield_work();
 430     _collector->sample_eden();
 431     return _collector->should_abort_preclean();
 432   }
 433   return false;
 434 }
 435 
 436 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
 437   if (ConcurrentMarkSweepThread::should_yield() &&
 438       !_collector->foregroundGCIsActive() &&
 439       _yield) {
 440     // Sample young gen size before and after yield
 441     _collector->sample_eden();
 442     do_yield_work();
 443     _collector->sample_eden();
 444   }
 445 }
 446 
 447 inline void SweepClosure::do_yield_check(HeapWord* addr) {
 448   if (ConcurrentMarkSweepThread::should_yield() &&
 449       !_collector->foregroundGCIsActive() &&
 450       _yield) {
 451     do_yield_work(addr);
 452   }
 453 }
 454 
 455 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
 456   // The conditions are ordered for the remarking phase
 457   // when _yield is false.
 458   if (_yield &&
 459       !_collector->foregroundGCIsActive() &&
 460       ConcurrentMarkSweepThread::should_yield()) {
 461     do_yield_work();
 462   }
 463 }
 464 
 465 
 466 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
 467   // Align the end of mr so it's at a card boundary.
 468   // This is superfluous except at the end of the space;
 469   // we should do better than this XXX
 470   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 471                  CardTableModRefBS::card_size /* bytes */));
 472   _t->mark_range(mr2);
 473 }
 474 
 475 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
 476   // Align the end of mr so it's at a card boundary.
 477   // This is superfluous except at the end of the space;
 478   // we should do better than this XXX
 479   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 480                  CardTableModRefBS::card_size /* bytes */));
 481   _t->par_mark_range(mr2);
 482 }
 483 
 484 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP