1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
  27 
  28 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  32 #include "gc_implementation/shared/gcUtil.hpp"
  33 #include "memory/defNewGeneration.hpp"
  34 #include "memory/genCollectedHeap.hpp"
  35 
  36 inline void CMSBitMap::clear_all() {
  37   assert_locked();
  38   // CMS bitmaps are usually cover large memory regions
  39   _bm.clear_large();
  40   return;
  41 }
  42 
  43 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
  44   return (pointer_delta(addr, _bmStartWord)) >> _shifter;
  45 }
  46 
  47 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
  48   return _bmStartWord + (offset << _shifter);
  49 }
  50 
  51 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
  52   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  53   return diff >> _shifter;
  54 }
  55 
  56 inline void CMSBitMap::mark(HeapWord* addr) {
  57   assert_locked();
  58   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  59          "outside underlying space?");
  60   _bm.set_bit(heapWordToOffset(addr));
  61 }
  62 
  63 inline bool CMSBitMap::par_mark(HeapWord* addr) {
  64   assert_locked();
  65   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  66          "outside underlying space?");
  67   return _bm.par_at_put(heapWordToOffset(addr), true);
  68 }
  69 
  70 inline void CMSBitMap::par_clear(HeapWord* addr) {
  71   assert_locked();
  72   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  73          "outside underlying space?");
  74   _bm.par_at_put(heapWordToOffset(addr), false);
  75 }
  76 
  77 inline void CMSBitMap::mark_range(MemRegion mr) {
  78   NOT_PRODUCT(region_invariant(mr));
  79   // Range size is usually just 1 bit.
  80   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  81                 BitMap::small_range);
  82 }
  83 
  84 inline void CMSBitMap::clear_range(MemRegion mr) {
  85   NOT_PRODUCT(region_invariant(mr));
  86   // Range size is usually just 1 bit.
  87   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  88                   BitMap::small_range);
  89 }
  90 
  91 inline void CMSBitMap::par_mark_range(MemRegion mr) {
  92   NOT_PRODUCT(region_invariant(mr));
  93   // Range size is usually just 1 bit.
  94   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
  95                     BitMap::small_range);
  96 }
  97 
  98 inline void CMSBitMap::par_clear_range(MemRegion mr) {
  99   NOT_PRODUCT(region_invariant(mr));
 100   // Range size is usually just 1 bit.
 101   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 102                       BitMap::small_range);
 103 }
 104 
 105 inline void CMSBitMap::mark_large_range(MemRegion mr) {
 106   NOT_PRODUCT(region_invariant(mr));
 107   // Range size must be greater than 32 bytes.
 108   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 109                 BitMap::large_range);
 110 }
 111 
 112 inline void CMSBitMap::clear_large_range(MemRegion mr) {
 113   NOT_PRODUCT(region_invariant(mr));
 114   // Range size must be greater than 32 bytes.
 115   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 116                   BitMap::large_range);
 117 }
 118 
 119 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
 120   NOT_PRODUCT(region_invariant(mr));
 121   // Range size must be greater than 32 bytes.
 122   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 123                     BitMap::large_range);
 124 }
 125 
 126 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
 127   NOT_PRODUCT(region_invariant(mr));
 128   // Range size must be greater than 32 bytes.
 129   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
 130                       BitMap::large_range);
 131 }
 132 
 133 // Starting at "addr" (inclusive) return a memory region
 134 // corresponding to the first maximally contiguous marked ("1") region.
 135 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
 136   return getAndClearMarkedRegion(addr, endWord());
 137 }
 138 
 139 // Starting at "start_addr" (inclusive) return a memory region
 140 // corresponding to the first maximal contiguous marked ("1") region
 141 // strictly less than end_addr.
 142 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
 143                                                     HeapWord* end_addr) {
 144   HeapWord *start, *end;
 145   assert_locked();
 146   start = getNextMarkedWordAddress  (start_addr, end_addr);
 147   end   = getNextUnmarkedWordAddress(start,      end_addr);
 148   assert(start <= end, "Consistency check");
 149   MemRegion mr(start, end);
 150   if (!mr.is_empty()) {
 151     clear_range(mr);
 152   }
 153   return mr;
 154 }
 155 
 156 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
 157   assert_locked();
 158   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 159          "outside underlying space?");
 160   return _bm.at(heapWordToOffset(addr));
 161 }
 162 
 163 // The same as isMarked() but without a lock check.
 164 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
 165   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 166          "outside underlying space?");
 167   return _bm.at(heapWordToOffset(addr));
 168 }
 169 
 170 
 171 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
 172   assert_locked();
 173   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 174          "outside underlying space?");
 175   return !_bm.at(heapWordToOffset(addr));
 176 }
 177 
 178 // Return the HeapWord address corresponding to next "1" bit
 179 // (inclusive).
 180 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
 181   return getNextMarkedWordAddress(addr, endWord());
 182 }
 183 
 184 // Return the least HeapWord address corresponding to next "1" bit
 185 // starting at start_addr (inclusive) but strictly less than end_addr.
 186 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
 187   HeapWord* start_addr, HeapWord* end_addr) const {
 188   assert_locked();
 189   size_t nextOffset = _bm.get_next_one_offset(
 190                         heapWordToOffset(start_addr),
 191                         heapWordToOffset(end_addr));
 192   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 193   assert(nextAddr >= start_addr &&
 194          nextAddr <= end_addr, "get_next_one postcondition");
 195   assert((nextAddr == end_addr) ||
 196          isMarked(nextAddr), "get_next_one postcondition");
 197   return nextAddr;
 198 }
 199 
 200 
 201 // Return the HeapWord address corresponding to the next "0" bit
 202 // (inclusive).
 203 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
 204   return getNextUnmarkedWordAddress(addr, endWord());
 205 }
 206 
 207 // Return the HeapWord address corresponding to the next "0" bit
 208 // (inclusive).
 209 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
 210   HeapWord* start_addr, HeapWord* end_addr) const {
 211   assert_locked();
 212   size_t nextOffset = _bm.get_next_zero_offset(
 213                         heapWordToOffset(start_addr),
 214                         heapWordToOffset(end_addr));
 215   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
 216   assert(nextAddr >= start_addr &&
 217          nextAddr <= end_addr, "get_next_zero postcondition");
 218   assert((nextAddr == end_addr) ||
 219           isUnmarked(nextAddr), "get_next_zero postcondition");
 220   return nextAddr;
 221 }
 222 
 223 inline bool CMSBitMap::isAllClear() const {
 224   assert_locked();
 225   return getNextMarkedWordAddress(startWord()) >= endWord();
 226 }
 227 
 228 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
 229                             HeapWord* right) {
 230   assert_locked();
 231   left = MAX2(_bmStartWord, left);
 232   right = MIN2(_bmStartWord + _bmWordSize, right);
 233   if (right > left) {
 234     _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
 235   }
 236 }
 237 
 238 inline void CMSCollector::save_sweep_limits() {
 239   _cmsGen->save_sweep_limit();
 240 }
 241 
 242 inline bool CMSCollector::is_dead_obj(oop obj) const {
 243   HeapWord* addr = (HeapWord*)obj;
 244   assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
 245           && _cmsGen->cmsSpace()->block_is_obj(addr)),
 246          "must be object");
 247   return  should_unload_classes() &&
 248           _collectorState == Sweeping &&
 249          !_markBitMap.isMarked(addr);
 250 }
 251 
 252 inline bool CMSCollector::should_abort_preclean() const {
 253   // We are in the midst of an "abortable preclean" and either
 254   // scavenge is done or foreground GC wants to take over collection
 255   return _collectorState == AbortablePreclean &&
 256          (_abort_preclean || _foregroundGCIsActive ||
 257           GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
 258 }
 259 
 260 inline size_t CMSCollector::get_eden_used() const {
 261   return _young_gen->as_DefNewGeneration()->eden()->used();
 262 }
 263 
 264 inline size_t CMSCollector::get_eden_capacity() const {
 265   return _young_gen->as_DefNewGeneration()->eden()->capacity();
 266 }
 267 
 268 inline bool CMSStats::valid() const {
 269   return _valid_bits == _ALL_VALID;
 270 }
 271 
 272 inline void CMSStats::record_gc0_begin() {
 273   if (_gc0_begin_time.is_updated()) {
 274     float last_gc0_period = _gc0_begin_time.seconds();
 275     _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
 276       last_gc0_period, _gc0_alpha);
 277     _gc0_alpha = _saved_alpha;
 278     _valid_bits |= _GC0_VALID;
 279   }
 280   _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
 281 
 282   _gc0_begin_time.update();
 283 }
 284 
 285 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
 286   float last_gc0_duration = _gc0_begin_time.seconds();
 287   _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
 288     last_gc0_duration, _gc0_alpha);
 289 
 290   // Amount promoted.
 291   _cms_used_at_gc0_end = cms_gen_bytes_used;
 292 
 293   size_t promoted_bytes = 0;
 294   if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
 295     promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
 296   }
 297 
 298   // If the younger gen collections were skipped, then the
 299   // number of promoted bytes will be 0 and adding it to the
 300   // average will incorrectly lessen the average.  It is, however,
 301   // also possible that no promotion was needed.
 302   //
 303   // _gc0_promoted used to be calculated as
 304   // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
 305   //  promoted_bytes, _gc0_alpha);
 306   _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
 307   _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
 308 
 309   // Amount directly allocated.
 310   size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
 311   _cms_gen->reset_direct_allocated_words();
 312   _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
 313     allocated_bytes, _gc0_alpha);
 314 }
 315 
 316 inline void CMSStats::record_cms_begin() {
 317   _cms_timer.stop();
 318 
 319   // This is just an approximate value, but is good enough.
 320   _cms_used_at_cms_begin = _cms_used_at_gc0_end;
 321 
 322   _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
 323     (float) _cms_timer.seconds(), _cms_alpha);
 324   _cms_begin_time.update();
 325 
 326   _cms_timer.reset();
 327   _cms_timer.start();
 328 }
 329 
 330 inline void CMSStats::record_cms_end() {
 331   _cms_timer.stop();
 332 
 333   float cur_duration = _cms_timer.seconds();
 334   _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
 335     cur_duration, _cms_alpha);
 336 
 337   _cms_end_time.update();
 338   _cms_alpha = _saved_alpha;
 339   _allow_duty_cycle_reduction = true;
 340   _valid_bits |= _CMS_VALID;
 341 
 342   _cms_timer.start();
 343 }
 344 
 345 inline double CMSStats::cms_time_since_begin() const {
 346   return _cms_begin_time.seconds();
 347 }
 348 
 349 inline double CMSStats::cms_time_since_end() const {
 350   return _cms_end_time.seconds();
 351 }
 352 
 353 inline double CMSStats::promotion_rate() const {
 354   assert(valid(), "statistics not valid yet");
 355   return gc0_promoted() / gc0_period();
 356 }
 357 
 358 inline double CMSStats::cms_allocation_rate() const {
 359   assert(valid(), "statistics not valid yet");
 360   return cms_allocated() / gc0_period();
 361 }
 362 
 363 inline double CMSStats::cms_consumption_rate() const {
 364   assert(valid(), "statistics not valid yet");
 365   return (gc0_promoted() + cms_allocated()) / gc0_period();
 366 }
 367 
 368 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
 369   cmsSpace()->save_sweep_limit();
 370 }
 371 
 372 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
 373   return _cmsSpace->capacity();
 374 }
 375 
 376 inline size_t ConcurrentMarkSweepGeneration::used() const {
 377   return _cmsSpace->used();
 378 }
 379 
 380 inline size_t ConcurrentMarkSweepGeneration::free() const {
 381   return _cmsSpace->free();
 382 }
 383 
 384 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
 385   return _cmsSpace->used_region();
 386 }
 387 
 388 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
 389   return _cmsSpace->used_region_at_save_marks();
 390 }
 391 
 392 inline void MarkFromRootsClosure::do_yield_check() {
 393   if (ConcurrentMarkSweepThread::should_yield() &&
 394       !_collector->foregroundGCIsActive() &&
 395       _yield) {
 396     do_yield_work();
 397   }
 398 }
 399 
 400 inline void Par_MarkFromRootsClosure::do_yield_check() {
 401   if (ConcurrentMarkSweepThread::should_yield() &&
 402       !_collector->foregroundGCIsActive()) {
 403     do_yield_work();
 404   }
 405 }
 406 
 407 inline void PushOrMarkClosure::do_yield_check() {
 408   _parent->do_yield_check();
 409 }
 410 
 411 inline void Par_PushOrMarkClosure::do_yield_check() {
 412   _parent->do_yield_check();
 413 }
 414 
 415 // Return value of "true" indicates that the on-going preclean
 416 // should be aborted.
 417 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
 418   if (ConcurrentMarkSweepThread::should_yield() &&
 419       !_collector->foregroundGCIsActive() &&
 420       _yield) {
 421     // Sample young gen size before and after yield
 422     _collector->sample_eden();
 423     do_yield_work();
 424     _collector->sample_eden();
 425     return _collector->should_abort_preclean();
 426   }
 427   return false;
 428 }
 429 
 430 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
 431   if (ConcurrentMarkSweepThread::should_yield() &&
 432       !_collector->foregroundGCIsActive() &&
 433       _yield) {
 434     // Sample young gen size before and after yield
 435     _collector->sample_eden();
 436     do_yield_work();
 437     _collector->sample_eden();
 438   }
 439 }
 440 
 441 inline void SweepClosure::do_yield_check(HeapWord* addr) {
 442   if (ConcurrentMarkSweepThread::should_yield() &&
 443       !_collector->foregroundGCIsActive() &&
 444       _yield) {
 445     do_yield_work(addr);
 446   }
 447 }
 448 
 449 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
 450   // The conditions are ordered for the remarking phase
 451   // when _yield is false.
 452   if (_yield &&
 453       !_collector->foregroundGCIsActive() &&
 454       ConcurrentMarkSweepThread::should_yield()) {
 455     do_yield_work();
 456   }
 457 }
 458 
 459 
 460 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
 461   // Align the end of mr so it's at a card boundary.
 462   // This is superfluous except at the end of the space;
 463   // we should do better than this XXX
 464   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 465                  CardTableModRefBS::card_size /* bytes */));
 466   _t->mark_range(mr2);
 467 }
 468 
 469 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
 470   // Align the end of mr so it's at a card boundary.
 471   // This is superfluous except at the end of the space;
 472   // we should do better than this XXX
 473   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
 474                  CardTableModRefBS::card_size /* bytes */));
 475   _t->par_mark_range(mr2);
 476 }
 477 
 478 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP