1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
  27 
  28 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  32 #include "gc_implementation/shared/gcUtil.hpp"
  33 #include "memory/defNewGeneration.hpp"
  34 
  35 inline void CMSBitMap::clear_all() {
  36   assert_locked();
  37   // CMS bitmaps are usually cover large memory regions
  38   _bm.clear_large();
  39   return;
  40 }
  41 
  42 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
  43   return (pointer_delta(addr, _bmStartWord)) >> _shifter;
  44 }
  45 
  46 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
  47   return _bmStartWord + (offset << _shifter);
  48 }
  49 
  50 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
  51   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  52   return diff >> _shifter;
  53 }
  54 
  55 inline void CMSBitMap::mark(HeapWord* addr) {
  56   assert_locked();
  57   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  58          "outside underlying space?");
  59   _bm.set_bit(heapWordToOffset(addr));
  60 }
  61 
  62 inline bool CMSBitMap::par_mark(HeapWord* addr) {
  63   assert_locked();
  64   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  65          "outside underlying space?");
  66   return _bm.par_at_put(heapWordToOffset(addr), true);
  67 }
  68 
  69 inline void CMSBitMap::par_clear(HeapWord* addr) {
  70   assert_locked();
  71   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  72          "outside underlying space?");
  73   _bm.par_at_put(heapWordToOffset(addr), false);
  74 }
  75 
  76 inline void CMSBitMap::mark_range(MemRegion mr) {
  77   NOT_PRODUCT(region_invariant(mr));
  78   // Range size is usually just 1 bit.
  79   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  80                 BitMap::small_range);
  81 }
  82 
  83 inline void CMSBitMap::clear_range(MemRegion mr) {
  84   NOT_PRODUCT(region_invariant(mr));
  85   // Range size is usually just 1 bit.
  86   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  87                   BitMap::small_range);
  88 }
  89 
  90 inline void CMSBitMap::par_mark_range(MemRegion mr) {
  91   NOT_PRODUCT(region_invariant(mr));
  92   // Range size is usually just 1 bit.
  93   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  94                     BitMap::small_range);
  95 }
  96 
  97 inline void CMSBitMap::par_clear_range(MemRegion mr) {
  98   NOT_PRODUCT(region_invariant(mr));
  99   // Range size is usually just 1 bit.
 100   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 101                       BitMap::small_range);
 102 }
 103 
 104 inline void CMSBitMap::mark_large_range(MemRegion mr) {
 105   NOT_PRODUCT(region_invariant(mr));
 106   // Range size must be greater than 32 bytes.
 107   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 108                 BitMap::large_range);
 109 }
 110 
 111 inline void CMSBitMap::clear_large_range(MemRegion mr) {
 112   NOT_PRODUCT(region_invariant(mr));
 113   // Range size must be greater than 32 bytes.
 114   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 115                   BitMap::large_range);
 116 }
 117 
 118 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
 119   NOT_PRODUCT(region_invariant(mr));
 120   // Range size must be greater than 32 bytes.
 121   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 122                     BitMap::large_range);
 123 }
 124 
 125 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
 126   NOT_PRODUCT(region_invariant(mr));
 127   // Range size must be greater than 32 bytes.
 128   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 129                       BitMap::large_range);
 130 }
 131 
 132 // Starting at "addr" (inclusive) return a memory region
 133 // corresponding to the first maximally contiguous marked ("1") region.
 134 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
 135   return getAndClearMarkedRegion(addr, endWord());
 136 }
 137 
 138 // Starting at "start_addr" (inclusive) return a memory region
 139 // corresponding to the first maximal contiguous marked ("1") region
 140 // strictly less than end_addr.
 141 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
 142                                                     HeapWord* end_addr) {
 143   HeapWord *start, *end;
 144   assert_locked();
 145   start = getNextMarkedWordAddress  (start_addr, end_addr);
 146   end   = getNextUnmarkedWordAddress(start,      end_addr);
 147   assert(start <= end, "Consistency check");
 148   MemRegion mr(start, end);
 149   if (!mr.is_empty()) {
 150     clear_range(mr);
 151   }
 152   return mr;
 153 }
 154 
 155 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
 156   assert_locked();
 157   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 158          "outside underlying space?");
 159   return _bm.at(heapWordToOffset(addr));
 160 }
 161 
 162 // The same as isMarked() but without a lock check.
 163 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
 164   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 165          "outside underlying space?");
 166   return _bm.at(heapWordToOffset(addr));
 167 }
 168 
 169 
 170 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
 171   assert_locked();
 172   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 173          "outside underlying space?");
 174   return !_bm.at(heapWordToOffset(addr));
 175 }
 176 
 177 // Return the HeapWord address corresponding to next "1" bit
 178 // (inclusive).
 179 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
 180   return getNextMarkedWordAddress(addr, endWord());
 181 }
 182 
 183 // Return the least HeapWord address corresponding to next "1" bit
 184 // starting at start_addr (inclusive) but strictly less than end_addr.
 185 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
 186   HeapWord* start_addr, HeapWord* end_addr) const {
 187   assert_locked();
 188   size_t nextOffset = _bm.get_next_one_offset(
 189                         heapWordToOffset(start_addr),
 190                         heapWordToOffset(end_addr));
 191   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 192   assert(nextAddr >= start_addr &&
 193          nextAddr <= end_addr, "get_next_one postcondition");
 194   assert((nextAddr == end_addr) ||
 195          isMarked(nextAddr), "get_next_one postcondition");
 196   return nextAddr;
 197 }
 198 
 199 
 200 // Return the HeapWord address corresponding to the next "0" bit
 201 // (inclusive).
 202 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
 203   return getNextUnmarkedWordAddress(addr, endWord());
 204 }
 205 
 206 // Return the HeapWord address corresponding to the next "0" bit
 207 // (inclusive).
 208 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
 209   HeapWord* start_addr, HeapWord* end_addr) const {
 210   assert_locked();
 211   size_t nextOffset = _bm.get_next_zero_offset(
 212                         heapWordToOffset(start_addr),
 213                         heapWordToOffset(end_addr));
 214   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 215   assert(nextAddr >= start_addr &&
 216          nextAddr <= end_addr, "get_next_zero postcondition");
 217   assert((nextAddr == end_addr) ||
 218           isUnmarked(nextAddr), "get_next_zero postcondition");
 219   return nextAddr;
 220 }
 221 
 222 inline bool CMSBitMap::isAllClear() const {
 223   assert_locked();
 224   return getNextMarkedWordAddress(startWord()) >= endWord();
 225 }
 226 
 227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
 228                             HeapWord* right) {
 229   assert_locked();
 230   left = MAX2(_bmStartWord, left);
 231   right = MIN2(_bmStartWord + _bmWordSize, right);
 232   if (right > left) {
 233     _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
 234   }
 235 }
 236 
 237 inline void CMSCollector::save_sweep_limits() {
 238   _cmsGen->save_sweep_limit();
 239 }
 240 
 241 inline bool CMSCollector::is_dead_obj(oop obj) const {
 242   HeapWord* addr = (HeapWord*)obj;
 243   assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
 244           && _cmsGen->cmsSpace()->block_is_obj(addr)),
 245          "must be object");
 246   return  should_unload_classes() &&
 247           _collectorState == Sweeping &&
 248          !_markBitMap.isMarked(addr);
 249 }
 250 
 251 inline bool CMSCollector::should_abort_preclean() const {
 252   // We are in the midst of an "abortable preclean" and either
 253   // scavenge is done or foreground GC wants to take over collection
 254   return _collectorState == AbortablePreclean &&
 255          (_abort_preclean || _foregroundGCIsActive ||
 256           GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
 257 }
 258 
 259 inline size_t CMSCollector::get_eden_used() const {
 260   return _young_gen->as_DefNewGeneration()->eden()->used();
 261 }
 262 
 263 inline size_t CMSCollector::get_eden_capacity() const {
 264   return _young_gen->as_DefNewGeneration()->eden()->capacity();
 265 }
 266 
 267 inline bool CMSStats::valid() const {
 268   return _valid_bits == _ALL_VALID;
 269 }
 270 
 271 inline void CMSStats::record_gc0_begin() {
 272   if (_gc0_begin_time.is_updated()) {
 273     float last_gc0_period = _gc0_begin_time.seconds();
 274     _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
 275       last_gc0_period, _gc0_alpha);
 276     _gc0_alpha = _saved_alpha;
 277     _valid_bits |= _GC0_VALID;
 278   }
 279   _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
 280 
 281   _gc0_begin_time.update();
 282 }
 283 
 284 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
 285   float last_gc0_duration = _gc0_begin_time.seconds();
 286   _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
 287     last_gc0_duration, _gc0_alpha);
 288 
 289   // Amount promoted.
 290   _cms_used_at_gc0_end = cms_gen_bytes_used;
 291 
 292   size_t promoted_bytes = 0;
 293   if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
 294     promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
 295   }
 296 
 297   // If the younger gen collections were skipped, then the
 298   // number of promoted bytes will be 0 and adding it to the
 299   // average will incorrectly lessen the average.  It is, however,
 300   // also possible that no promotion was needed.
 301   //
 302   // _gc0_promoted used to be calculated as
 303   // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
 304   //  promoted_bytes, _gc0_alpha);
 305   _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
 306   _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
 307 
 308   // Amount directly allocated.
 309   size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
 310   _cms_gen->reset_direct_allocated_words();
 311   _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
 312     allocated_bytes, _gc0_alpha);
 313 }
 314 
 315 inline void CMSStats::record_cms_begin() {
 316   _cms_timer.stop();
 317 
 318   // This is just an approximate value, but is good enough.
 319   _cms_used_at_cms_begin = _cms_used_at_gc0_end;
 320 
 321   _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
 322     (float) _cms_timer.seconds(), _cms_alpha);
 323   _cms_begin_time.update();
 324 
 325   _cms_timer.reset();
 326   _cms_timer.start();
 327 }
 328 
 329 inline void CMSStats::record_cms_end() {
 330   _cms_timer.stop();
 331 
 332   float cur_duration = _cms_timer.seconds();
 333   _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
 334     cur_duration, _cms_alpha);
 335 
 336   _cms_end_time.update();
 337   _cms_alpha = _saved_alpha;
 338   _allow_duty_cycle_reduction = true;
 339   _valid_bits |= _CMS_VALID;
 340 
 341   _cms_timer.start();
 342 }
 343 
 344 inline double CMSStats::cms_time_since_begin() const {
 345   return _cms_begin_time.seconds();
 346 }
 347 
 348 inline double CMSStats::cms_time_since_end() const {
 349   return _cms_end_time.seconds();
 350 }
 351 
 352 inline double CMSStats::promotion_rate() const {
 353   assert(valid(), "statistics not valid yet");
 354   return gc0_promoted() / gc0_period();
 355 }
 356 
 357 inline double CMSStats::cms_allocation_rate() const {
 358   assert(valid(), "statistics not valid yet");
 359   return cms_allocated() / gc0_period();
 360 }
 361 
 362 inline double CMSStats::cms_consumption_rate() const {
 363   assert(valid(), "statistics not valid yet");
 364   return (gc0_promoted() + cms_allocated()) / gc0_period();
 365 }
 366 
 367 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
 368   cmsSpace()->save_sweep_limit();
 369 }
 370 
 371 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
 372   return _cmsSpace->capacity();
 373 }
 374 
 375 inline size_t ConcurrentMarkSweepGeneration::used() const {
 376   return _cmsSpace->used();
 377 }
 378 
 379 inline size_t ConcurrentMarkSweepGeneration::free() const {
 380   return _cmsSpace->free();
 381 }
 382 
 383 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
 384   return _cmsSpace->used_region();
 385 }
 386 
 387 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
 388   return _cmsSpace->used_region_at_save_marks();
 389 }
 390 
 391 inline void MarkFromRootsClosure::do_yield_check() {
 392   if (ConcurrentMarkSweepThread::should_yield() &&
 393       !_collector->foregroundGCIsActive() &&
 394       _yield) {
 395     do_yield_work();
 396   }
 397 }
 398 
 399 inline void Par_MarkFromRootsClosure::do_yield_check() {
 400   if (ConcurrentMarkSweepThread::should_yield() &&
 401       !_collector->foregroundGCIsActive()) {
 402     do_yield_work();
 403   }
 404 }
 405 
 406 inline void PushOrMarkClosure::do_yield_check() {
 407   _parent->do_yield_check();
 408 }
 409 
 410 inline void Par_PushOrMarkClosure::do_yield_check() {
 411   _parent->do_yield_check();
 412 }
 413 
 414 // Return value of "true" indicates that the on-going preclean
 415 // should be aborted.
 416 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
 417   if (ConcurrentMarkSweepThread::should_yield() &&
 418       !_collector->foregroundGCIsActive() &&
 419       _yield) {
 420     // Sample young gen size before and after yield
 421     _collector->sample_eden();
 422     do_yield_work();
 423     _collector->sample_eden();
 424     return _collector->should_abort_preclean();
 425   }
 426   return false;
 427 }
 428 
 429 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
 430   if (ConcurrentMarkSweepThread::should_yield() &&
 431       !_collector->foregroundGCIsActive() &&
 432       _yield) {
 433     // Sample young gen size before and after yield
 434     _collector->sample_eden();
 435     do_yield_work();
 436     _collector->sample_eden();
 437   }
 438 }
 439 
 440 inline void SweepClosure::do_yield_check(HeapWord* addr) {
 441   if (ConcurrentMarkSweepThread::should_yield() &&
 442       !_collector->foregroundGCIsActive() &&
 443       _yield) {
 444     do_yield_work(addr);
 445   }
 446 }
 447 
 448 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
 449   // The conditions are ordered for the remarking phase
 450   // when _yield is false.
 451   if (_yield &&
 452       !_collector->foregroundGCIsActive() &&
 453       ConcurrentMarkSweepThread::should_yield()) {
 454     do_yield_work();
 455   }
 456 }
 457 
 458 
 459 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
 460   // Align the end of mr so it's at a card boundary.
 461   // This is superfluous except at the end of the space;
 462   // we should do better than this XXX
 463   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 464                  CardTableModRefBS::card_size /* bytes */));
 465   _t->mark_range(mr2);
 466 }
 467 
 468 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
 469   // Align the end of mr so it's at a card boundary.
 470   // This is superfluous except at the end of the space;
 471   // we should do better than this XXX
 472   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 473                  CardTableModRefBS::card_size /* bytes */));
 474   _t->par_mark_range(mr2);
 475 }
 476 
 477 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP